summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acbuffer.h2
-rw-r--r--include/acpi/acconfig.h2
-rw-r--r--include/acpi/acexcep.h2
-rw-r--r--include/acpi/acnames.h3
-rw-r--r--include/acpi/acoutput.h21
-rw-r--r--include/acpi/acpi.h2
-rw-r--r--include/acpi/acpi_bus.h47
-rw-r--r--include/acpi/acpi_io.h4
-rw-r--r--include/acpi/acpi_lpat.h65
-rw-r--r--include/acpi/acpiosxf.h4
-rw-r--r--include/acpi/acpixf.h56
-rw-r--r--include/acpi/acrestyp.h42
-rw-r--r--include/acpi/actbl.h20
-rw-r--r--include/acpi/actbl1.h189
-rw-r--r--include/acpi/actbl2.h294
-rw-r--r--include/acpi/actbl3.h100
-rw-r--r--include/acpi/actypes.h104
-rw-r--r--include/acpi/acuuid.h89
-rw-r--r--include/acpi/platform/acenv.h45
-rw-r--r--include/acpi/platform/acenvex.h11
-rw-r--r--include/acpi/platform/acgcc.h6
-rw-r--r--include/acpi/platform/aclinux.h2
-rw-r--r--include/acpi/platform/aclinuxex.h2
-rw-r--r--include/acpi/processor.h6
-rw-r--r--include/acpi/video.h21
-rw-r--r--include/asm-generic/4level-fixup.h1
-rw-r--r--include/asm-generic/asm-offsets.h1
-rw-r--r--include/asm-generic/barrier.h32
-rw-r--r--include/asm-generic/cmpxchg.h3
-rw-r--r--include/asm-generic/dma-mapping-common.h5
-rw-r--r--include/asm-generic/futex.h7
-rw-r--r--include/asm-generic/gpio.h53
-rw-r--r--include/asm-generic/io.h17
-rw-r--r--include/asm-generic/iomap.h4
-rw-r--r--include/asm-generic/mm-arch-hooks.h16
-rw-r--r--include/asm-generic/pci.h13
-rw-r--r--include/asm-generic/pci_iomap.h10
-rw-r--r--include/asm-generic/pgtable.h222
-rw-r--r--include/asm-generic/preempt.h7
-rw-r--r--include/asm-generic/qspinlock.h139
-rw-r--r--include/asm-generic/qspinlock_types.h79
-rw-r--r--include/asm-generic/scatterlist.h34
-rw-r--r--include/asm-generic/seccomp.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h19
-rw-r--r--include/clocksource/arm_arch_timer.h2
-rw-r--r--include/clocksource/timer-sp804.h28
-rw-r--r--include/crypto/aead.h533
-rw-r--r--include/crypto/akcipher.h340
-rw-r--r--include/crypto/algapi.h37
-rw-r--r--include/crypto/compress.h8
-rw-r--r--include/crypto/cryptd.h1
-rw-r--r--include/crypto/drbg.h59
-rw-r--r--include/crypto/hash.h2
-rw-r--r--include/crypto/if_alg.h8
-rw-r--r--include/crypto/internal/aead.h102
-rw-r--r--include/crypto/internal/akcipher.h60
-rw-r--r--include/crypto/internal/geniv.h24
-rw-r--r--include/crypto/internal/rng.h21
-rw-r--r--include/crypto/internal/rsa.h27
-rw-r--r--include/crypto/md5.h5
-rw-r--r--include/crypto/null.h3
-rw-r--r--include/crypto/rng.h103
-rw-r--r--include/crypto/scatterwalk.h14
-rw-r--r--include/crypto/sha.h15
-rw-r--r--include/crypto/sha1_base.h106
-rw-r--r--include/crypto/sha256_base.h128
-rw-r--r--include/crypto/sha512_base.h131
-rw-r--r--include/drm/bridge/dw_hdmi.h62
-rw-r--r--include/drm/bridge/ptn3460.h37
-rw-r--r--include/drm/drmP.h47
-rw-r--r--include/drm/drm_atomic.h108
-rw-r--r--include/drm/drm_atomic_helper.h68
-rw-r--r--include/drm/drm_crtc.h206
-rw-r--r--include/drm/drm_crtc_helper.h68
-rw-r--r--include/drm/drm_dp_helper.h183
-rw-r--r--include/drm/drm_dp_mst_helper.h6
-rw-r--r--include/drm/drm_edid.h2
-rw-r--r--include/drm/drm_fb_helper.h21
-rw-r--r--include/drm/drm_gem.h14
-rw-r--r--include/drm/drm_mem_util.h5
-rw-r--r--include/drm/drm_mm.h52
-rw-r--r--include/drm/drm_modes.h15
-rw-r--r--include/drm/drm_panel.h5
-rw-r--r--include/drm/drm_pciids.h1
-rw-r--r--include/drm/drm_plane_helper.h17
-rw-r--r--include/drm/i915_component.h39
-rw-r--r--include/drm/i915_pciids.h81
-rw-r--r--include/drm/i915_powerwell.h37
-rw-r--r--include/drm/ttm/ttm_bo_api.h2
-rw-r--r--include/drm/ttm/ttm_bo_driver.h2
-rw-r--r--include/dt-bindings/clock/alphascale,asm9260.h97
-rw-r--r--include/dt-bindings/clock/bcm-cygnus.h68
-rw-r--r--include/dt-bindings/clock/exynos3250.h61
-rw-r--r--include/dt-bindings/clock/exynos4.h7
-rw-r--r--include/dt-bindings/clock/exynos5420.h6
-rw-r--r--include/dt-bindings/clock/exynos5433.h1403
-rw-r--r--include/dt-bindings/clock/exynos7-clk.h88
-rw-r--r--include/dt-bindings/clock/hi6220-clock.h173
-rw-r--r--include/dt-bindings/clock/imx6qdl-clock.h5
-rw-r--r--include/dt-bindings/clock/imx7d-clock.h450
-rw-r--r--include/dt-bindings/clock/jz4740-cgu.h37
-rw-r--r--include/dt-bindings/clock/jz4780-cgu.h88
-rw-r--r--include/dt-bindings/clock/lpc18xx-ccu.h74
-rw-r--r--include/dt-bindings/clock/lpc18xx-cgu.h41
-rw-r--r--include/dt-bindings/clock/marvell,mmp2.h1
-rw-r--r--include/dt-bindings/clock/marvell,pxa168.h3
-rw-r--r--include/dt-bindings/clock/marvell,pxa1928.h57
-rw-r--r--include/dt-bindings/clock/marvell,pxa910.h4
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h25
-rw-r--r--include/dt-bindings/clock/mt8135-clk.h194
-rw-r--r--include/dt-bindings/clock/mt8173-clk.h235
-rw-r--r--include/dt-bindings/clock/pistachio-clk.h183
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq806x.h4
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8916.h156
-rw-r--r--include/dt-bindings/clock/qcom,lcc-ipq806x.h30
-rw-r--r--include/dt-bindings/clock/qcom,lcc-msm8960.h50
-rw-r--r--include/dt-bindings/clock/r8a73a4-clock.h63
-rw-r--r--include/dt-bindings/clock/r8a7778-clock.h71
-rw-r--r--include/dt-bindings/clock/r8a7790-clock.h7
-rw-r--r--include/dt-bindings/clock/r8a7791-clock.h8
-rw-r--r--include/dt-bindings/clock/r8a7794-clock.h20
-rw-r--r--include/dt-bindings/clock/rk3288-cru.h7
-rw-r--r--include/dt-bindings/clock/samsung,s2mps11.h23
-rw-r--r--include/dt-bindings/clock/sh73a0-clock.h82
-rw-r--r--include/dt-bindings/clock/stih418-clks.h34
-rw-r--r--include/dt-bindings/clock/tegra124-car-common.h345
-rw-r--r--include/dt-bindings/clock/tegra124-car.h345
-rw-r--r--include/dt-bindings/clock/vf610-clock.h4
-rw-r--r--include/dt-bindings/clock/zx296702-clock.h170
-rw-r--r--include/dt-bindings/dma/jz4780-dma.h49
-rw-r--r--include/dt-bindings/dma/sun4i-a10.h56
-rw-r--r--include/dt-bindings/gpio/meson8-gpio.h157
-rw-r--r--include/dt-bindings/gpio/meson8b-gpio.h32
-rw-r--r--include/dt-bindings/iio/qcom,spmi-vadc.h119
-rw-r--r--include/dt-bindings/interrupt-controller/irq-st.h30
-rw-r--r--include/dt-bindings/leds/common.h21
-rw-r--r--include/dt-bindings/media/omap3-isp.h22
-rw-r--r--include/dt-bindings/media/xilinx-vip.h39
-rw-r--r--include/dt-bindings/mfd/arizona.h (renamed from include/linux/mfd/arizona/gpio.h)85
-rw-r--r--include/dt-bindings/mfd/qcom-rpm.h160
-rw-r--r--include/dt-bindings/mfd/st-lpc.h15
-rw-r--r--include/dt-bindings/net/ti-dp83867.h45
-rw-r--r--include/dt-bindings/phy/phy-miphy365x.h14
-rw-r--r--include/dt-bindings/phy/phy-pistachio-usb.h16
-rw-r--r--include/dt-bindings/pinctrl/am33xx.h3
-rw-r--r--include/dt-bindings/pinctrl/am43xx.h4
-rw-r--r--include/dt-bindings/pinctrl/bcm2835.h27
-rw-r--r--include/dt-bindings/pinctrl/mt6397-pinfunc.h256
-rw-r--r--include/dt-bindings/pinctrl/mt65xx.h40
-rw-r--r--include/dt-bindings/pinctrl/omap.h1
-rw-r--r--include/dt-bindings/pinctrl/qcom,pmic-gpio.h15
-rw-r--r--include/dt-bindings/pinctrl/qcom,pmic-mpp.h4
-rw-r--r--include/dt-bindings/pinctrl/sun4i-a10.h62
-rw-r--r--include/dt-bindings/reset-controller/mt8135-resets.h64
-rw-r--r--include/dt-bindings/reset-controller/mt8173-resets.h63
-rw-r--r--include/dt-bindings/reset/qcom,gcc-ipq806x.h43
-rw-r--r--include/dt-bindings/reset/qcom,gcc-msm8916.h108
-rw-r--r--include/dt-bindings/sound/apq8016-lpass.h9
-rw-r--r--include/dt-bindings/sound/audio-jack-events.h9
-rw-r--r--include/dt-bindings/sound/samsung-i2s.h8
-rw-r--r--include/dt-bindings/sound/tas2552.h18
-rw-r--r--include/dt-bindings/thermal/thermal_exynos.h (renamed from include/linux/clk/sunxi.h)20
-rw-r--r--include/kvm/arm_arch_timer.h31
-rw-r--r--include/kvm/arm_vgic.h145
-rw-r--r--include/kvm/iodev.h76
-rw-r--r--include/linux/a.out.h67
-rw-r--r--include/linux/acpi.h132
-rw-r--r--include/linux/acpi_irq.h10
-rw-r--r--include/linux/ahci_platform.h6
-rw-r--r--include/linux/aio.h70
-rw-r--r--include/linux/alarmtimer.h4
-rw-r--r--include/linux/amba/bus.h13
-rw-r--r--include/linux/amba/sp810.h2
-rw-r--r--include/linux/arm-cci.h9
-rw-r--r--include/linux/async_tx.h3
-rw-r--r--include/linux/ata.h41
-rw-r--r--include/linux/ata_platform.h5
-rw-r--r--include/linux/audit.h4
-rw-r--r--include/linux/backing-dev-defs.h256
-rw-r--r--include/linux/backing-dev.h608
-rw-r--r--include/linux/backlight.h8
-rw-r--r--include/linux/basic_mmio_gpio.h1
-rw-r--r--include/linux/bcm47xx_nvram.h49
-rw-r--r--include/linux/bcm47xx_wdt.h1
-rw-r--r--include/linux/bcma/bcma.h31
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h11
-rw-r--r--include/linux/bcma/bcma_driver_gmac_cmn.h6
-rw-r--r--include/linux/bcma/bcma_driver_mips.h15
-rw-r--r--include/linux/bcma/bcma_driver_pci.h23
-rw-r--r--include/linux/bcma/bcma_driver_pcie2.h4
-rw-r--r--include/linux/bcma/bcma_regs.h2
-rw-r--r--include/linux/bcma/bcma_soc.h2
-rw-r--r--include/linux/bio.h32
-rw-r--r--include/linux/bitmap.h75
-rw-r--r--include/linux/bitops.h4
-rw-r--r--include/linux/bitrev.h77
-rw-r--r--include/linux/blk-cgroup.h650
-rw-r--r--include/linux/blk-mq.h23
-rw-r--r--include/linux/blk_types.h29
-rw-r--r--include/linux/blkdev.h90
-rw-r--r--include/linux/bootmem.h8
-rw-r--r--include/linux/bottom_half.h1
-rw-r--r--include/linux/bpf.h97
-rw-r--r--include/linux/brcmphy.h11
-rw-r--r--include/linux/buffer_head.h7
-rw-r--r--include/linux/cacheinfo.h2
-rw-r--r--include/linux/can/dev.h2
-rw-r--r--include/linux/can/led.h6
-rw-r--r--include/linux/can/skb.h9
-rw-r--r--include/linux/capability.h29
-rw-r--r--include/linux/cdev.h2
-rw-r--r--include/linux/ceph/ceph_features.h16
-rw-r--r--include/linux/ceph/ceph_fs.h38
-rw-r--r--include/linux/ceph/debugfs.h8
-rw-r--r--include/linux/ceph/libceph.h26
-rw-r--r--include/linux/ceph/messenger.h7
-rw-r--r--include/linux/ceph/mon_client.h9
-rw-r--r--include/linux/ceph/osd_client.h2
-rw-r--r--include/linux/ceph/osdmap.h5
-rw-r--r--include/linux/cgroup-defs.h501
-rw-r--r--include/linux/cgroup.h992
-rw-r--r--include/linux/cgroup_subsys.h8
-rw-r--r--include/linux/cleancache.h13
-rw-r--r--include/linux/clk-private.h220
-rw-r--r--include/linux/clk-provider.h76
-rw-r--r--include/linux/clk.h90
-rw-r--r--include/linux/clk/at91_pmc.h4
-rw-r--r--include/linux/clk/shmobile.h1
-rw-r--r--include/linux/clk/tegra.h2
-rw-r--r--include/linux/clk/ti.h29
-rw-r--r--include/linux/clkdev.h11
-rw-r--r--include/linux/clockchips.h185
-rw-r--r--include/linux/clocksource.h134
-rw-r--r--include/linux/cma.h12
-rw-r--r--include/linux/compaction.h87
-rw-r--r--include/linux/compat.h11
-rw-r--r--include/linux/compiler-gcc.h222
-rw-r--r--include/linux/compiler-gcc3.h23
-rw-r--r--include/linux/compiler-gcc4.h87
-rw-r--r--include/linux/compiler-gcc5.h65
-rw-r--r--include/linux/compiler-intel.h5
-rw-r--r--include/linux/compiler.h86
-rw-r--r--include/linux/configfs.h4
-rw-r--r--include/linux/console.h4
-rw-r--r--include/linux/console_struct.h1
-rw-r--r--include/linux/context_tracking.h25
-rw-r--r--include/linux/context_tracking_state.h10
-rw-r--r--include/linux/coresight.h22
-rw-r--r--include/linux/cpu.h27
-rw-r--r--include/linux/cpu_cooling.h39
-rw-r--r--include/linux/cpufreq.h15
-rw-r--r--include/linux/cpuidle.h35
-rw-r--r--include/linux/cpumask.h231
-rw-r--r--include/linux/crc-itu-t.h2
-rw-r--r--include/linux/crc-t10dif.h1
-rw-r--r--include/linux/cred.h23
-rw-r--r--include/linux/crush/crush.h52
-rw-r--r--include/linux/crush/hash.h6
-rw-r--r--include/linux/crush/mapper.h2
-rw-r--r--include/linux/crypto.h508
-rw-r--r--include/linux/cryptohash.h2
-rw-r--r--include/linux/dcache.h165
-rw-r--r--include/linux/dccp.h4
-rw-r--r--include/linux/debugfs.h19
-rw-r--r--include/linux/devfreq-event.h196
-rw-r--r--include/linux/device-mapper.h16
-rw-r--r--include/linux/device.h153
-rw-r--r--include/linux/dma-buf.h40
-rw-r--r--include/linux/dma-mapping.h4
-rw-r--r--include/linux/dma/hsu.h48
-rw-r--r--include/linux/dma/pxa-dma.h27
-rw-r--r--include/linux/dma/xilinx_dma.h (renamed from include/linux/amba/xilinx_dma.h)0
-rw-r--r--include/linux/dmaengine.h216
-rw-r--r--include/linux/dmapool.h4
-rw-r--r--include/linux/dmar.h85
-rw-r--r--include/linux/dmi.h4
-rw-r--r--include/linux/dqblk_v1.h3
-rw-r--r--include/linux/efi.h21
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/elf-randomize.h22
-rw-r--r--include/linux/enclosure.h13
-rw-r--r--include/linux/etherdevice.h47
-rw-r--r--include/linux/exportfs.h23
-rw-r--r--include/linux/extcon.h134
-rw-r--r--include/linux/extcon/extcon-adc-jack.h5
-rw-r--r--include/linux/f2fs_fs.h18
-rw-r--r--include/linux/falloc.h6
-rw-r--r--include/linux/fb.h12
-rw-r--r--include/linux/fdtable.h3
-rw-r--r--include/linux/fec.h1
-rw-r--r--include/linux/filter.h37
-rw-r--r--include/linux/fixp-arith.h145
-rw-r--r--include/linux/frontswap.h14
-rw-r--r--include/linux/fs.h372
-rw-r--r--include/linux/fs_pin.h27
-rw-r--r--include/linux/fscache-cache.h55
-rw-r--r--include/linux/fsl_devices.h1
-rw-r--r--include/linux/fsnotify.h6
-rw-r--r--include/linux/fsnotify_backend.h2
-rw-r--r--include/linux/fwnode.h27
-rw-r--r--include/linux/genalloc.h6
-rw-r--r--include/linux/gfp.h43
-rw-r--r--include/linux/goldfish.h19
-rw-r--r--include/linux/gpio.h7
-rw-r--r--include/linux/gpio/consumer.h152
-rw-r--r--include/linux/gpio/driver.h61
-rw-r--r--include/linux/hardirq.h4
-rw-r--r--include/linux/hdmi.h37
-rw-r--r--include/linux/hid-sensor-hub.h61
-rw-r--r--include/linux/hid-sensor-ids.h2
-rw-r--r--include/linux/hid.h14
-rw-r--r--include/linux/highmem.h2
-rw-r--r--include/linux/host1x.h19
-rw-r--r--include/linux/hrtimer.h167
-rw-r--r--include/linux/hsi/hsi.h6
-rw-r--r--include/linux/htirq.h22
-rw-r--r--include/linux/huge_mm.h12
-rw-r--r--include/linux/hugetlb.h47
-rw-r--r--include/linux/hw_random.h8
-rw-r--r--include/linux/hwspinlock.h7
-rw-r--r--include/linux/hyperv.h117
-rw-r--r--include/linux/i2c.h69
-rw-r--r--include/linux/i2c/twl.h1
-rw-r--r--include/linux/i2o.h988
-rw-r--r--include/linux/ide.h27
-rw-r--r--include/linux/ieee80211.h27
-rw-r--r--include/linux/ieee802154.h30
-rw-r--r--include/linux/if_bridge.h19
-rw-r--r--include/linux/if_link.h10
-rw-r--r--include/linux/if_macvlan.h2
-rw-r--r--include/linux/if_pppox.h4
-rw-r--r--include/linux/if_vlan.h171
-rw-r--r--include/linux/igmp.h1
-rw-r--r--include/linux/iio/buffer.h87
-rw-r--r--include/linux/iio/common/ssp_sensors.h82
-rw-r--r--include/linux/iio/consumer.h12
-rw-r--r--include/linux/iio/dac/max517.h2
-rw-r--r--include/linux/iio/events.h30
-rw-r--r--include/linux/iio/iio.h27
-rw-r--r--include/linux/iio/kfifo_buf.h5
-rw-r--r--include/linux/iio/types.h68
-rw-r--r--include/linux/inet_diag.h44
-rw-r--r--include/linux/inetdevice.h3
-rw-r--r--include/linux/init.h127
-rw-r--r--include/linux/init_task.h24
-rw-r--r--include/linux/input/mt.h3
-rw-r--r--include/linux/input/touchscreen.h5
-rw-r--r--include/linux/intel-iommu.h29
-rw-r--r--include/linux/intel_mid_dma.h76
-rw-r--r--include/linux/interrupt.h36
-rw-r--r--include/linux/io-mapping.h2
-rw-r--r--include/linux/io.h18
-rw-r--r--include/linux/iommu-common.h51
-rw-r--r--include/linux/iommu.h79
-rw-r--r--include/linux/iopoll.h144
-rw-r--r--include/linux/ioport.h8
-rw-r--r--include/linux/iova.h41
-rw-r--r--include/linux/ipv6.h17
-rw-r--r--include/linux/irq.h96
-rw-r--r--include/linux/irq_work.h3
-rw-r--r--include/linux/irqchip.h14
-rw-r--r--include/linux/irqchip/arm-gic-acpi.h31
-rw-r--r--include/linux/irqchip/arm-gic-v3.h66
-rw-r--r--include/linux/irqchip/arm-gic.h9
-rw-r--r--include/linux/irqchip/ingenic.h23
-rw-r--r--include/linux/irqchip/irq-crossbar.h11
-rw-r--r--include/linux/irqchip/irq-omap-intc.h2
-rw-r--r--include/linux/irqchip/irq-sa11x0.h17
-rw-r--r--include/linux/irqchip/mips-gic.h10
-rw-r--r--include/linux/irqdesc.h69
-rw-r--r--include/linux/irqdomain.h8
-rw-r--r--include/linux/irqflags.h43
-rw-r--r--include/linux/irqnr.h6
-rw-r--r--include/linux/jbd.h9
-rw-r--r--include/linux/jbd2.h13
-rw-r--r--include/linux/jhash.h17
-rw-r--r--include/linux/jiffies.h130
-rw-r--r--include/linux/jump_label.h21
-rw-r--r--include/linux/jz4780-nemc.h43
-rw-r--r--include/linux/kasan.h88
-rw-r--r--include/linux/kconfig.h24
-rw-r--r--include/linux/kdb.h8
-rw-r--r--include/linux/kernel.h57
-rw-r--r--include/linux/kernfs.h15
-rw-r--r--include/linux/kexec.h26
-rw-r--r--include/linux/kmemleak.h6
-rw-r--r--include/linux/kobject.h5
-rw-r--r--include/linux/kprobes.h3
-rw-r--r--include/linux/ksm.h17
-rw-r--r--include/linux/ktime.h34
-rw-r--r--include/linux/kvm_host.h177
-rw-r--r--include/linux/kvm_types.h1
-rw-r--r--include/linux/lcm.h1
-rw-r--r--include/linux/led-class-flash.h192
-rw-r--r--include/linux/leds.h31
-rw-r--r--include/linux/lglock.h5
-rw-r--r--include/linux/lguest.h4
-rw-r--r--include/linux/lguest_launcher.h61
-rw-r--r--include/linux/libata.h26
-rw-r--r--include/linux/libfdt_env.h4
-rw-r--r--include/linux/libnvdimm.h151
-rw-r--r--include/linux/list_lru.h82
-rw-r--r--include/linux/list_nulls.h6
-rw-r--r--include/linux/livepatch.h139
-rw-r--r--include/linux/lockdep.h21
-rw-r--r--include/linux/lockref.h3
-rw-r--r--include/linux/lsm_hooks.h1888
-rw-r--r--include/linux/mailbox_client.h2
-rw-r--r--include/linux/mailbox_controller.h2
-rw-r--r--include/linux/mbus.h5
-rw-r--r--include/linux/mdio-gpio.h3
-rw-r--r--include/linux/mei_cl_bus.h42
-rw-r--r--include/linux/memblock.h75
-rw-r--r--include/linux/memcontrol.h125
-rw-r--r--include/linux/memory_hotplug.h6
-rw-r--r--include/linux/mempool.h5
-rw-r--r--include/linux/mfd/abx500/ab8500-bm.h1
-rw-r--r--include/linux/mfd/abx500/ux500_chargalg.h11
-rw-r--r--include/linux/mfd/arizona/core.h12
-rw-r--r--include/linux/mfd/arizona/pdata.h32
-rw-r--r--include/linux/mfd/arizona/registers.h27
-rw-r--r--include/linux/mfd/axp20x.h141
-rw-r--r--include/linux/mfd/cros_ec.h93
-rw-r--r--include/linux/mfd/cros_ec_commands.h277
-rw-r--r--include/linux/mfd/da9055/core.h2
-rw-r--r--include/linux/mfd/da9063/core.h1
-rw-r--r--include/linux/mfd/da9063/pdata.h1
-rw-r--r--include/linux/mfd/da9150/core.h68
-rw-r--r--include/linux/mfd/da9150/registers.h1155
-rw-r--r--include/linux/mfd/max77686-private.h1
-rw-r--r--include/linux/mfd/max77686.h33
-rw-r--r--include/linux/mfd/max77693-private.h113
-rw-r--r--include/linux/mfd/max77693.h25
-rw-r--r--include/linux/mfd/max77843-private.h454
-rw-r--r--include/linux/mfd/menelaus.h7
-rw-r--r--include/linux/mfd/mt6397/core.h64
-rw-r--r--include/linux/mfd/mt6397/registers.h362
-rw-r--r--include/linux/mfd/palmas.h4
-rw-r--r--include/linux/mfd/qcom_rpm.h13
-rw-r--r--include/linux/mfd/rk808.h3
-rw-r--r--include/linux/mfd/rt5033-private.h260
-rw-r--r--include/linux/mfd/rt5033.h62
-rw-r--r--include/linux/mfd/rtsx_pci.h1116
-rw-r--r--include/linux/mfd/samsung/core.h9
-rw-r--r--include/linux/mfd/samsung/irq.h2
-rw-r--r--include/linux/mfd/samsung/rtc.h2
-rw-r--r--include/linux/mfd/sky81452.h31
-rw-r--r--include/linux/mfd/stmpe.h60
-rw-r--r--include/linux/mfd/stw481x.h4
-rw-r--r--include/linux/mfd/syscon/atmel-matrix.h117
-rw-r--r--include/linux/mfd/syscon/atmel-mc.h144
-rw-r--r--include/linux/mfd/syscon/atmel-smc.h173
-rw-r--r--include/linux/mfd/syscon/atmel-st.h49
-rw-r--r--include/linux/mfd/syscon/exynos4-pmu.h21
-rw-r--r--include/linux/mfd/syscon/exynos5-pmu.h3
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h1
-rw-r--r--include/linux/mfd/tc3589x.h35
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h4
-rw-r--r--include/linux/mfd/tmio.h30
-rw-r--r--include/linux/mfd/wm8350/supply.h6
-rw-r--r--include/linux/migrate.h9
-rw-r--r--include/linux/miscdevice.h2
-rw-r--r--include/linux/mlx4/cmd.h48
-rw-r--r--include/linux/mlx4/device.h135
-rw-r--r--include/linux/mlx4/driver.h19
-rw-r--r--include/linux/mlx4/qp.h21
-rw-r--r--include/linux/mlx5/cmd.h2
-rw-r--r--include/linux/mlx5/cq.h10
-rw-r--r--include/linux/mlx5/device.h217
-rw-r--r--include/linux/mlx5/doorbell.h2
-rw-r--r--include/linux/mlx5/driver.h189
-rw-r--r--include/linux/mlx5/flow_table.h54
-rw-r--r--include/linux/mlx5/mlx5_ifc.h6584
-rw-r--r--include/linux/mlx5/qp.h27
-rw-r--r--include/linux/mlx5/srq.h2
-rw-r--r--include/linux/mlx5/vport.h55
-rw-r--r--include/linux/mm-arch-hooks.h25
-rw-r--r--include/linux/mm.h309
-rw-r--r--include/linux/mm_types.h45
-rw-r--r--include/linux/mmc/card.h18
-rw-r--r--include/linux/mmc/core.h6
-rw-r--r--include/linux/mmc/dw_mmc.h18
-rw-r--r--include/linux/mmc/host.h38
-rw-r--r--include/linux/mmc/mmc.h14
-rw-r--r--include/linux/mmc/sdhci-pci-data.h2
-rw-r--r--include/linux/mmc/sdhci-spear.h34
-rw-r--r--include/linux/mmc/sdhci.h208
-rw-r--r--include/linux/mmc/sdio_ids.h8
-rw-r--r--include/linux/mmc/sh_mobile_sdhi.h25
-rw-r--r--include/linux/mmc/slot-gpio.h5
-rw-r--r--include/linux/mmiotrace.h2
-rw-r--r--include/linux/mmu_notifier.h12
-rw-r--r--include/linux/mmzone.h53
-rw-r--r--include/linux/mod_devicetable.h42
-rw-r--r--include/linux/module.h165
-rw-r--r--include/linux/moduleloader.h8
-rw-r--r--include/linux/moduleparam.h111
-rw-r--r--include/linux/mount.h3
-rw-r--r--include/linux/mpi.h15
-rw-r--r--include/linux/mtd/cfi.h188
-rw-r--r--include/linux/mtd/map.h54
-rw-r--r--include/linux/mtd/mtd.h3
-rw-r--r--include/linux/mtd/nand.h6
-rw-r--r--include/linux/mtd/spi-nor.h12
-rw-r--r--include/linux/mtd/ubi.h53
-rw-r--r--include/linux/mutex.h1
-rw-r--r--include/linux/namei.h41
-rw-r--r--include/linux/nbd.h46
-rw-r--r--include/linux/nd.h151
-rw-r--r--include/linux/net.h15
-rw-r--r--include/linux/netdev_features.h1
-rw-r--r--include/linux/netdevice.h348
-rw-r--r--include/linux/netfilter.h144
-rw-r--r--include/linux/netfilter/ipset/ip_set.h66
-rw-r--r--include/linux/netfilter/ipset/ip_set_comment.h38
-rw-r--r--include/linux/netfilter/ipset/ip_set_timeout.h27
-rw-r--r--include/linux/netfilter/x_tables.h60
-rw-r--r--include/linux/netfilter_arp/arp_tables.h3
-rw-r--r--include/linux/netfilter_bridge.h111
-rw-r--r--include/linux/netfilter_bridge/ebtables.h5
-rw-r--r--include/linux/netfilter_defs.h9
-rw-r--r--include/linux/netfilter_ingress.h41
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h3
-rw-r--r--include/linux/netfilter_ipv6.h3
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h3
-rw-r--r--include/linux/netlink.h4
-rw-r--r--include/linux/nfs4.h11
-rw-r--r--include/linux/nfs_fs.h13
-rw-r--r--include/linux/nfs_fs_sb.h10
-rw-r--r--include/linux/nfs_idmap.h77
-rw-r--r--include/linux/nfs_page.h23
-rw-r--r--include/linux/nfs_xdr.h95
-rw-r--r--include/linux/nilfs2_fs.h2
-rw-r--r--include/linux/nmi.h24
-rw-r--r--include/linux/nodemask.h67
-rw-r--r--include/linux/ntb.h970
-rw-r--r--include/linux/ntb_transport.h85
-rw-r--r--include/linux/nvme.h46
-rw-r--r--include/linux/nx842.h11
-rw-r--r--include/linux/of.h78
-rw-r--r--include/linux/of_device.h10
-rw-r--r--include/linux/of_dma.h21
-rw-r--r--include/linux/of_fdt.h6
-rw-r--r--include/linux/of_gpio.h1
-rw-r--r--include/linux/of_graph.h28
-rw-r--r--include/linux/of_iommu.h6
-rw-r--r--include/linux/of_irq.h9
-rw-r--r--include/linux/of_mdio.h7
-rw-r--r--include/linux/of_net.h8
-rw-r--r--include/linux/of_pci.h3
-rw-r--r--include/linux/of_platform.h2
-rw-r--r--include/linux/omap-gpmc.h3
-rw-r--r--include/linux/oom.h27
-rw-r--r--include/linux/osq_lock.h17
-rw-r--r--include/linux/page-flags.h110
-rw-r--r--include/linux/page_counter.h3
-rw-r--r--include/linux/page_ext.h2
-rw-r--r--include/linux/page_owner.h13
-rw-r--r--include/linux/pagemap.h9
-rw-r--r--include/linux/parport.h43
-rw-r--r--include/linux/pata_arasan_cf_data.h2
-rw-r--r--include/linux/pci-acpi.h5
-rw-r--r--include/linux/pci-aspm.h4
-rw-r--r--include/linux/pci.h87
-rw-r--r--include/linux/pci_ids.h9
-rw-r--r--include/linux/percpu-refcount.h34
-rw-r--r--include/linux/percpu_counter.h13
-rw-r--r--include/linux/perf_event.h229
-rw-r--r--include/linux/personality.h40
-rw-r--r--include/linux/phy.h26
-rw-r--r--include/linux/phy/phy-qcom-ufs.h59
-rw-r--r--include/linux/phy/phy-sun4i-usb.h26
-rw-r--r--include/linux/phy/phy.h9
-rw-r--r--include/linux/phy_fixed.h9
-rw-r--r--include/linux/pid_namespace.h4
-rw-r--r--include/linux/pinctrl/consumer.h2
-rw-r--r--include/linux/pinctrl/pinconf-generic.h29
-rw-r--r--include/linux/pinctrl/pinctrl.h14
-rw-r--r--include/linux/pinctrl/pinmux.h6
-rw-r--r--include/linux/platform_data/bfin_rotary.h117
-rw-r--r--include/linux/platform_data/cpuidle-exynos.h20
-rw-r--r--include/linux/platform_data/dma-dw.h6
-rw-r--r--include/linux/platform_data/dma-hsu.h25
-rw-r--r--include/linux/platform_data/dma-imx-sdma.h3
-rw-r--r--include/linux/platform_data/dma-mmp_tdma.h7
-rw-r--r--include/linux/platform_data/dma-rcar-audmapp.h34
-rw-r--r--include/linux/platform_data/gpio-ath79.h19
-rw-r--r--include/linux/platform_data/gpio-omap.h12
-rw-r--r--include/linux/platform_data/hsmmc-omap.h6
-rw-r--r--include/linux/platform_data/i2c-davinci.h1
-rw-r--r--include/linux/platform_data/ipmmu-vmsa.h24
-rw-r--r--include/linux/platform_data/irda-sa11x0.h20
-rw-r--r--include/linux/platform_data/keyboard-spear.h2
-rw-r--r--include/linux/platform_data/mmc-msm_sdcc.h27
-rw-r--r--include/linux/platform_data/mmc-omap.h4
-rw-r--r--include/linux/platform_data/msm_serial_hs.h49
-rw-r--r--include/linux/platform_data/nfcmrvl.h40
-rw-r--r--include/linux/platform_data/ntc_thermistor.h1
-rw-r--r--include/linux/platform_data/nxp-nci.h27
-rw-r--r--include/linux/platform_data/regulator-haptic.h29
-rw-r--r--include/linux/platform_data/serial-imx.h5
-rw-r--r--include/linux/platform_data/si5351.h4
-rw-r--r--include/linux/platform_data/sky81452-backlight.h46
-rw-r--r--include/linux/platform_data/st-nci.h (renamed from include/linux/platform_data/st21nfcb.h)16
-rw-r--r--include/linux/platform_data/st21nfca.h2
-rw-r--r--include/linux/platform_data/st33zp24.h (renamed from include/linux/platform_data/irq-renesas-irqc.h)23
-rw-r--r--include/linux/platform_data/st_nci.h29
-rw-r--r--include/linux/platform_data/usb-rcar-gen2-phy.h22
-rw-r--r--include/linux/platform_data/video-msm_fb.h146
-rw-r--r--include/linux/platform_data/vsp1.h27
-rw-r--r--include/linux/platform_data/wkup_m3.h30
-rw-r--r--include/linux/platform_device.h25
-rw-r--r--include/linux/pm-trace.h (renamed from include/linux/resume-trace.h)9
-rw-r--r--include/linux/pm.h24
-rw-r--r--include/linux/pm_clock.h10
-rw-r--r--include/linux/pm_domain.h10
-rw-r--r--include/linux/pm_wakeirq.h51
-rw-r--r--include/linux/pm_wakeup.h9
-rw-r--r--include/linux/pmem.h152
-rw-r--r--include/linux/pnp.h24
-rw-r--r--include/linux/power/charger-manager.h35
-rw-r--r--include/linux/power/max17042_battery.h13
-rw-r--r--include/linux/power_supply.h89
-rw-r--r--include/linux/preempt.h161
-rw-r--r--include/linux/preempt_mask.h117
-rw-r--r--include/linux/printk.h19
-rw-r--r--include/linux/property.h46
-rw-r--r--include/linux/pstore.h2
-rw-r--r--include/linux/pstore_ram.h1
-rw-r--r--include/linux/ptp_clock_kernel.h12
-rw-r--r--include/linux/pwm.h12
-rw-r--r--include/linux/pxa2xx_ssp.h4
-rw-r--r--include/linux/qcom_scm.h39
-rw-r--r--include/linux/quota.h157
-rw-r--r--include/linux/quotaops.h21
-rw-r--r--include/linux/raid/pq.h1
-rw-r--r--include/linux/random.h9
-rw-r--r--include/linux/rbtree.h18
-rw-r--r--include/linux/rbtree_augmented.h21
-rw-r--r--include/linux/rbtree_latch.h212
-rw-r--r--include/linux/rculist.h26
-rw-r--r--include/linux/rcupdate.h138
-rw-r--r--include/linux/rcutiny.h61
-rw-r--r--include/linux/rcutree.h20
-rw-r--r--include/linux/reboot.h3
-rw-r--r--include/linux/regmap.h16
-rw-r--r--include/linux/regulator/act8865.h14
-rw-r--r--include/linux/regulator/consumer.h23
-rw-r--r--include/linux/regulator/da9211.h2
-rw-r--r--include/linux/regulator/driver.h30
-rw-r--r--include/linux/regulator/machine.h22
-rw-r--r--include/linux/regulator/max8973-regulator.h4
-rw-r--r--include/linux/regulator/mt6397-regulator.h49
-rw-r--r--include/linux/regulator/pfuze100.h14
-rw-r--r--include/linux/remoteproc.h11
-rw-r--r--include/linux/reset/bcm63xx_pmb.h88
-rw-r--r--include/linux/resource_ext.h77
-rw-r--r--include/linux/rhashtable.h827
-rw-r--r--include/linux/rio.h2
-rw-r--r--include/linux/rmap.h12
-rw-r--r--include/linux/rtc.h23
-rw-r--r--include/linux/rtc/ds1685.h375
-rw-r--r--include/linux/rtc/sirfsoc_rtciobrg.h4
-rw-r--r--include/linux/rtnetlink.h13
-rw-r--r--include/linux/scatterlist.h47
-rw-r--r--include/linux/sched.h304
-rw-r--r--include/linux/sched/rt.h7
-rw-r--r--include/linux/sched/sysctl.h12
-rw-r--r--include/linux/scif.h993
-rw-r--r--include/linux/security.h1645
-rw-r--r--include/linux/seq_buf.h3
-rw-r--r--include/linux/seq_file.h26
-rw-r--r--include/linux/seqlock.h134
-rw-r--r--include/linux/serial_8250.h27
-rw-r--r--include/linux/serial_core.h61
-rw-r--r--include/linux/serial_mfd.h47
-rw-r--r--include/linux/serial_s3c.h28
-rw-r--r--include/linux/serial_sci.h86
-rw-r--r--include/linux/shdma-base.h1
-rw-r--r--include/linux/shrinker.h6
-rw-r--r--include/linux/skbuff.h179
-rw-r--r--include/linux/slab.h51
-rw-r--r--include/linux/slab_def.h2
-rw-r--r--include/linux/slub_def.h21
-rw-r--r--include/linux/smp.h9
-rw-r--r--include/linux/smpboot.h6
-rw-r--r--include/linux/soc/sunxi/sunxi_sram.h19
-rw-r--r--include/linux/sock_diag.h46
-rw-r--r--include/linux/socket.h15
-rw-r--r--include/linux/spi/at86rf230.h5
-rw-r--r--include/linux/spi/l4f00242t03.h4
-rw-r--r--include/linux/spi/lms283gf05.h4
-rw-r--r--include/linux/spi/mxs-spi.h4
-rw-r--r--include/linux/spi/pxa2xx_spi.h5
-rw-r--r--include/linux/spi/rspi.h5
-rw-r--r--include/linux/spi/sh_hspi.h4
-rw-r--r--include/linux/spi/sh_msiof.h2
-rw-r--r--include/linux/spi/spi.h16
-rw-r--r--include/linux/spi/tle62x0.h4
-rw-r--r--include/linux/spi/tsc2005.h5
-rw-r--r--include/linux/spinlock.h10
-rw-r--r--include/linux/spinlock_api_smp.h2
-rw-r--r--include/linux/spinlock_api_up.h1
-rw-r--r--include/linux/srcu.h16
-rw-r--r--include/linux/ssb/ssb.h8
-rw-r--r--include/linux/ssb/ssb_regs.h1
-rw-r--r--include/linux/stacktrace.h2
-rw-r--r--include/linux/stddef.h15
-rw-r--r--include/linux/stmmac.h3
-rw-r--r--include/linux/string.h7
-rw-r--r--include/linux/string_helpers.h12
-rw-r--r--include/linux/sunrpc/bc_xprt.h1
-rw-r--r--include/linux/sunrpc/clnt.h4
-rw-r--r--include/linux/sunrpc/debug.h18
-rw-r--r--include/linux/sunrpc/metrics.h7
-rw-r--r--include/linux/sunrpc/msg_prot.h8
-rw-r--r--include/linux/sunrpc/rpc_rdma.h14
-rw-r--r--include/linux/sunrpc/sched.h19
-rw-r--r--include/linux/sunrpc/svc.h2
-rw-r--r--include/linux/sunrpc/svc_rdma.h26
-rw-r--r--include/linux/sunrpc/xprt.h45
-rw-r--r--include/linux/sunrpc/xprtrdma.h8
-rw-r--r--include/linux/suspend.h16
-rw-r--r--include/linux/sw842.h12
-rw-r--r--include/linux/swap.h18
-rw-r--r--include/linux/swapops.h8
-rw-r--r--include/linux/syscalls.h26
-rw-r--r--include/linux/sysctl.h6
-rw-r--r--include/linux/sysfs.h30
-rw-r--r--include/linux/syslog.h6
-rw-r--r--include/linux/tcp.h34
-rw-r--r--include/linux/thermal.h153
-rw-r--r--include/linux/ti_wilink_st.h13
-rw-r--r--include/linux/tick.h202
-rw-r--r--include/linux/time64.h2
-rw-r--r--include/linux/timecounter.h139
-rw-r--r--include/linux/timekeeper_internal.h35
-rw-r--r--include/linux/timekeeping.h42
-rw-r--r--include/linux/timer.h63
-rw-r--r--include/linux/timerqueue.h8
-rw-r--r--include/linux/topology.h6
-rw-r--r--include/linux/trace_events.h (renamed from include/linux/ftrace_event.h)199
-rw-r--r--include/linux/tracefs.h45
-rw-r--r--include/linux/tracepoint.h10
-rw-r--r--include/linux/tty.h28
-rw-r--r--include/linux/types.h26
-rw-r--r--include/linux/u64_stats_sync.h7
-rw-r--r--include/linux/uaccess.h48
-rw-r--r--include/linux/udp.h18
-rw-r--r--include/linux/uidgid.h16
-rw-r--r--include/linux/uio.h33
-rw-r--r--include/linux/ulpi/driver.h60
-rw-r--r--include/linux/ulpi/interface.h23
-rw-r--r--include/linux/ulpi/regs.h130
-rw-r--r--include/linux/usb.h33
-rw-r--r--include/linux/usb/cdc_ncm.h7
-rw-r--r--include/linux/usb/chipidea.h2
-rw-r--r--include/linux/usb/composite.h3
-rw-r--r--include/linux/usb/ehci_pdriver.h4
-rw-r--r--include/linux/usb/gadget.h11
-rw-r--r--include/linux/usb/hcd.h5
-rw-r--r--include/linux/usb/msm_hsusb.h26
-rw-r--r--include/linux/usb/msm_hsusb_hw.h9
-rw-r--r--include/linux/usb/net2280.h3
-rw-r--r--include/linux/usb/otg-fsm.h2
-rw-r--r--include/linux/usb/phy.h12
-rw-r--r--include/linux/usb/renesas_usbhs.h5
-rw-r--r--include/linux/usb/serial.h3
-rw-r--r--include/linux/usb/ulpi.h134
-rw-r--r--include/linux/usb/usb338x.h4
-rw-r--r--include/linux/usb/usb_phy_generic.h2
-rw-r--r--include/linux/usb/usbnet.h16
-rw-r--r--include/linux/usb_usual.h2
-rw-r--r--include/linux/util_macros.h40
-rw-r--r--include/linux/uwb/umc.h2
-rw-r--r--include/linux/vfio.h40
-rw-r--r--include/linux/vgaarb.h5
-rw-r--r--include/linux/virtio.h2
-rw-r--r--include/linux/virtio_byteorder.h24
-rw-r--r--include/linux/virtio_config.h34
-rw-r--r--include/linux/virtio_mmio.h44
-rw-r--r--include/linux/virtio_ring.h23
-rw-r--r--include/linux/vmalloc.h14
-rw-r--r--include/linux/vme.h3
-rw-r--r--include/linux/vmw_vmci_api.h2
-rw-r--r--include/linux/vringh.h18
-rw-r--r--include/linux/vt_buffer.h4
-rw-r--r--include/linux/wait.h70
-rw-r--r--include/linux/watchdog.h11
-rw-r--r--include/linux/wl12xx.h49
-rw-r--r--include/linux/workqueue.h43
-rw-r--r--include/linux/writeback.h224
-rw-r--r--include/linux/zpool.h8
-rw-r--r--include/linux/zsmalloc.h3
-rw-r--r--include/media/adp1653.h8
-rw-r--r--include/media/adv7511.h7
-rw-r--r--include/media/adv7604.h82
-rw-r--r--include/media/adv7842.h142
-rw-r--r--include/media/atmel-isi.h4
-rw-r--r--include/media/davinci/vpfe_capture.h2
-rw-r--r--include/media/media-entity.h21
-rw-r--r--include/media/mt9p031.h2
-rw-r--r--include/media/omap3isp.h38
-rw-r--r--include/media/ov2659.h34
-rw-r--r--include/media/rc-core.h9
-rw-r--r--include/media/rc-map.h4
-rw-r--r--include/media/saa7146_vv.h4
-rw-r--r--include/media/smiapp.h10
-rw-r--r--include/media/tea575x.h5
-rw-r--r--include/media/v4l2-clk.h10
-rw-r--r--include/media/v4l2-dev.h4
-rw-r--r--include/media/v4l2-device.h2
-rw-r--r--include/media/v4l2-dv-timings.h6
-rw-r--r--include/media/v4l2-flash-led-class.h148
-rw-r--r--include/media/v4l2-ioctl.h21
-rw-r--r--include/media/v4l2-mediabus.h2
-rw-r--r--include/media/v4l2-mem2mem.h4
-rw-r--r--include/media/v4l2-of.h50
-rw-r--r--include/media/v4l2-subdev.h81
-rw-r--r--include/media/videobuf-dma-sg.h8
-rw-r--r--include/media/videobuf-dvb.h6
-rw-r--r--include/media/videobuf2-core.h33
-rw-r--r--include/misc/cxl-base.h48
-rw-r--r--include/misc/cxl.h207
-rw-r--r--include/net/9p/client.h8
-rw-r--r--include/net/9p/transport.h2
-rw-r--r--include/net/addrconf.h4
-rw-r--r--include/net/af_unix.h1
-rw-r--r--include/net/af_vsock.h6
-rw-r--r--include/net/arp.h20
-rw-r--r--include/net/ax25.h21
-rw-r--r--include/net/bluetooth/bluetooth.h56
-rw-r--r--include/net/bluetooth/hci.h150
-rw-r--r--include/net/bluetooth/hci_core.h316
-rw-r--r--include/net/bluetooth/l2cap.h1
-rw-r--r--include/net/bluetooth/mgmt.h105
-rw-r--r--include/net/bluetooth/rfcomm.h2
-rw-r--r--include/net/bond_3ad.h30
-rw-r--r--include/net/bond_options.h3
-rw-r--r--include/net/bonding.h28
-rw-r--r--include/net/caif/cfpkt.h2
-rw-r--r--include/net/cfg80211.h428
-rw-r--r--include/net/cfg802154.h82
-rw-r--r--include/net/checksum.h9
-rw-r--r--include/net/cipso_ipv4.h25
-rw-r--r--include/net/codel.h22
-rw-r--r--include/net/compat.h2
-rw-r--r--include/net/dcbnl.h3
-rw-r--r--include/net/dn_neigh.h5
-rw-r--r--include/net/dsa.h27
-rw-r--r--include/net/dst.h19
-rw-r--r--include/net/dst_ops.h1
-rw-r--r--include/net/fib_rules.h14
-rw-r--r--include/net/flow_dissector.h220
-rw-r--r--include/net/flow_keys.h45
-rw-r--r--include/net/genetlink.h25
-rw-r--r--include/net/geneve.h12
-rw-r--r--include/net/gro_cells.h29
-rw-r--r--include/net/ieee802154_netdev.h38
-rw-r--r--include/net/if_inet6.h4
-rw-r--r--include/net/inet6_connection_sock.h3
-rw-r--r--include/net/inet6_hashtables.h2
-rw-r--r--include/net/inet_common.h9
-rw-r--r--include/net/inet_connection_sock.h45
-rw-r--r--include/net/inet_frag.h2
-rw-r--r--include/net/inet_hashtables.h85
-rw-r--r--include/net/inet_sock.h60
-rw-r--r--include/net/inet_timewait_sock.h108
-rw-r--r--include/net/inetpeer.h3
-rw-r--r--include/net/ip.h79
-rw-r--r--include/net/ip6_fib.h55
-rw-r--r--include/net/ip6_route.h27
-rw-r--r--include/net/ip6_tunnel.h7
-rw-r--r--include/net/ip_fib.h88
-rw-r--r--include/net/ip_tunnels.h7
-rw-r--r--include/net/ip_vs.h69
-rw-r--r--include/net/ipv6.h75
-rw-r--r--include/net/iw_handler.h22
-rw-r--r--include/net/llc_conn.h2
-rw-r--r--include/net/mac80211.h587
-rw-r--r--include/net/mac802154.h268
-rw-r--r--include/net/ndisc.h19
-rw-r--r--include/net/neighbour.h67
-rw-r--r--include/net/net_namespace.h67
-rw-r--r--include/net/netfilter/br_netfilter.h60
-rw-r--r--include/net/netfilter/ipv4/nf_reject.h8
-rw-r--r--include/net/netfilter/ipv6/nf_reject.h13
-rw-r--r--include/net/netfilter/nf_conntrack.h7
-rw-r--r--include/net/netfilter/nf_log.h10
-rw-r--r--include/net/netfilter/nf_nat_l3proto.h48
-rw-r--r--include/net/netfilter/nf_queue.h8
-rw-r--r--include/net/netfilter/nf_tables.h660
-rw-r--r--include/net/netfilter/nf_tables_core.h3
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h5
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h5
-rw-r--r--include/net/netfilter/nft_meta.h4
-rw-r--r--include/net/netlink.h60
-rw-r--r--include/net/netns/generic.h2
-rw-r--r--include/net/netns/hash.h4
-rw-r--r--include/net/netns/ipv4.h22
-rw-r--r--include/net/netns/ipv6.h4
-rw-r--r--include/net/netns/mpls.h17
-rw-r--r--include/net/netns/netfilter.h4
-rw-r--r--include/net/netns/nftables.h1
-rw-r--r--include/net/netns/sctp.h1
-rw-r--r--include/net/netns/x_tables.h3
-rw-r--r--include/net/nfc/hci.h36
-rw-r--r--include/net/nfc/nci.h98
-rw-r--r--include/net/nfc/nci_core.h213
-rw-r--r--include/net/nfc/nfc.h49
-rw-r--r--include/net/nl802154.h126
-rw-r--r--include/net/ping.h9
-rw-r--r--include/net/pkt_sched.h12
-rw-r--r--include/net/regulatory.h19
-rw-r--r--include/net/request_sock.h132
-rw-r--r--include/net/route.h2
-rw-r--r--include/net/rtnetlink.h4
-rw-r--r--include/net/sch_generic.h40
-rw-r--r--include/net/sctp/sctp.h10
-rw-r--r--include/net/sctp/structs.h4
-rw-r--r--include/net/sock.h169
-rw-r--r--include/net/switchdev.h253
-rw-r--r--include/net/tc_act/tc_bpf.h29
-rw-r--r--include/net/tc_act/tc_connmark.h14
-rw-r--r--include/net/tcp.h238
-rw-r--r--include/net/tcp_states.h4
-rw-r--r--include/net/udp.h24
-rw-r--r--include/net/udp_tunnel.h17
-rw-r--r--include/net/udplite.h3
-rw-r--r--include/net/vxlan.h106
-rw-r--r--include/net/xfrm.h25
-rw-r--r--include/ras/ras_event.h85
-rw-r--r--include/rdma/ib_addr.h7
-rw-r--r--include/rdma/ib_cache.h8
-rw-r--r--include/rdma/ib_cm.h7
-rw-r--r--include/rdma/ib_mad.h41
-rw-r--r--include/rdma/ib_verbs.h413
-rw-r--r--include/rdma/iw_cm.h1
-rw-r--r--include/rdma/iw_portmap.h25
-rw-r--r--include/rdma/opa_smi.h106
-rw-r--r--include/rdma/rdma_cm.h2
-rw-r--r--include/rxrpc/packet.h3
-rw-r--r--include/scsi/scsi.h288
-rw-r--r--include/scsi/scsi_common.h64
-rw-r--r--include/scsi/scsi_dbg.h74
-rw-r--r--include/scsi/scsi_device.h22
-rw-r--r--include/scsi/scsi_devinfo.h1
-rw-r--r--include/scsi/scsi_eh.h32
-rw-r--r--include/scsi/scsi_host.h3
-rw-r--r--include/scsi/scsi_proto.h281
-rw-r--r--include/scsi/scsi_tcq.h3
-rw-r--r--include/scsi/scsi_transport_fc.h1
-rw-r--r--include/scsi/scsi_transport_srp.h1
-rw-r--r--include/scsi/srp.h7
-rw-r--r--include/soc/at91/at91rm9200_sdramc.h63
-rw-r--r--include/soc/at91/at91sam9_ddrsdr.h2
-rw-r--r--include/soc/imx/revision.h37
-rw-r--r--include/soc/imx/timer.h26
-rw-r--r--include/soc/sa1100/pwer.h15
-rw-r--r--include/soc/tegra/emc.h19
-rw-r--r--include/soc/tegra/fuse.h2
-rw-r--r--include/soc/tegra/mc.h20
-rw-r--r--include/soc/tegra/pm.h2
-rw-r--r--include/soc/tegra/pmc.h2
-rw-r--r--include/sound/ac97_codec.h4
-rw-r--r--include/sound/ad1816a.h5
-rw-r--r--include/sound/ak4113.h11
-rw-r--r--include/sound/ak4114.h11
-rw-r--r--include/sound/compress_driver.h9
-rw-r--r--include/sound/control.h15
-rw-r--r--include/sound/core.h51
-rw-r--r--include/sound/designware_i2s.h2
-rw-r--r--include/sound/dmaengine_pcm.h5
-rw-r--r--include/sound/emu10k1.h30
-rw-r--r--include/sound/emux_synth.h2
-rw-r--r--include/sound/es1688.h3
-rw-r--r--include/sound/gus.h6
-rw-r--r--include/sound/hda_i915.h36
-rw-r--r--include/sound/hda_register.h244
-rw-r--r--include/sound/hda_regmap.h219
-rw-r--r--include/sound/hdaudio.h550
-rw-r--r--include/sound/hdaudio_ext.h132
-rw-r--r--include/sound/hwdep.h3
-rw-r--r--include/sound/info.h37
-rw-r--r--include/sound/jack.h13
-rw-r--r--include/sound/pcm.h97
-rw-r--r--include/sound/pcm_drm_eld.h6
-rw-r--r--include/sound/pcm_iec958.h9
-rw-r--r--include/sound/pcm_params.h103
-rw-r--r--include/sound/rawmidi.h4
-rw-r--r--include/sound/rcar_snd.h1
-rw-r--r--include/sound/rt5645.h6
-rw-r--r--include/sound/rt5670.h1
-rw-r--r--include/sound/rt5677.h3
-rw-r--r--include/sound/sb.h8
-rw-r--r--include/sound/seq_device.h46
-rw-r--r--include/sound/seq_kernel.h15
-rw-r--r--include/sound/simple_card.h2
-rw-r--r--include/sound/soc-dapm.h61
-rw-r--r--include/sound/soc-dpcm.h2
-rw-r--r--include/sound/soc-topology.h168
-rw-r--r--include/sound/soc.h160
-rw-r--r--include/sound/spear_dma.h2
-rw-r--r--include/sound/sta32x.h18
-rw-r--r--include/sound/tlv.h15
-rw-r--r--include/sound/wss.h6
-rw-r--r--include/target/iscsi/iscsi_target_core.h893
-rw-r--r--include/target/iscsi/iscsi_target_stat.h64
-rw-r--r--include/target/iscsi/iscsi_transport.h2
-rw-r--r--include/target/target_core_backend.h79
-rw-r--r--include/target/target_core_backend_configfs.h118
-rw-r--r--include/target/target_core_base.h213
-rw-r--r--include/target/target_core_configfs.h56
-rw-r--r--include/target/target_core_fabric.h104
-rw-r--r--include/target/target_core_fabric_configfs.h5
-rw-r--r--include/trace/define_trace.h3
-rw-r--r--include/trace/events/9p.h157
-rw-r--r--include/trace/events/btrfs.h63
-rw-r--r--include/trace/events/clk.h198
-rw-r--r--include/trace/events/cma.h66
-rw-r--r--include/trace/events/compaction.h209
-rw-r--r--include/trace/events/ext3.h18
-rw-r--r--include/trace/events/ext4.h87
-rw-r--r--include/trace/events/f2fs.h373
-rw-r--r--include/trace/events/filemap.h8
-rw-r--r--include/trace/events/intel-sst.h7
-rw-r--r--include/trace/events/iommu.h31
-rw-r--r--include/trace/events/irq.h39
-rw-r--r--include/trace/events/kmem.h103
-rw-r--r--include/trace/events/kvm.h19
-rw-r--r--include/trace/events/libata.h325
-rw-r--r--include/trace/events/migrate.h42
-rw-r--r--include/trace/events/module.h4
-rw-r--r--include/trace/events/net.h8
-rw-r--r--include/trace/events/power.h27
-rw-r--r--include/trace/events/random.h10
-rw-r--r--include/trace/events/regmap.h252
-rw-r--r--include/trace/events/sched.h3
-rw-r--r--include/trace/events/sunrpc.h62
-rw-r--r--include/trace/events/target.h2
-rw-r--r--include/trace/events/thermal.h58
-rw-r--r--include/trace/events/thermal_power_allocator.h87
-rw-r--r--include/trace/events/timer.h12
-rw-r--r--include/trace/events/tlb.h36
-rw-r--r--include/trace/events/v4l2.h78
-rw-r--r--include/trace/events/vmscan.h8
-rw-r--r--include/trace/events/writeback.h121
-rw-r--r--include/trace/events/xen.h2
-rw-r--r--include/trace/perf.h350
-rw-r--r--include/trace/syscall.h6
-rw-r--r--include/trace/trace_events.h (renamed from include/trace/ftrace.h)448
-rw-r--r--include/uapi/asm-generic/errno.h11
-rw-r--r--include/uapi/drm/amdgpu_drm.h643
-rw-r--r--include/uapi/drm/drm.h11
-rw-r--r--include/uapi/drm/drm_fourcc.h96
-rw-r--r--include/uapi/drm/drm_mode.h67
-rw-r--r--include/uapi/drm/i915_drm.h44
-rw-r--r--include/uapi/drm/msm_drm.h76
-rw-r--r--include/uapi/drm/nouveau_drm.h1
-rw-r--r--include/uapi/drm/radeon_drm.h6
-rw-r--r--include/uapi/drm/tegra_drm.h3
-rw-r--r--include/uapi/linux/Kbuild10
-rw-r--r--include/uapi/linux/am437x-vpfe.h124
-rw-r--r--include/uapi/linux/bpf.h106
-rw-r--r--include/uapi/linux/btrfs.h3
-rw-r--r--include/uapi/linux/can.h6
-rw-r--r--include/uapi/linux/can/gw.h5
-rw-r--r--include/uapi/linux/can/raw.h1
-rw-r--r--include/uapi/linux/cryptouser.h (renamed from include/linux/cryptouser.h)6
-rw-r--r--include/uapi/linux/dcbnl.h76
-rw-r--r--include/uapi/linux/dm-ioctl.h4
-rw-r--r--include/uapi/linux/dvb/dmx.h10
-rw-r--r--include/uapi/linux/dvb/frontend.h223
-rw-r--r--include/uapi/linux/elf-em.h1
-rw-r--r--include/uapi/linux/ethtool.h41
-rw-r--r--include/uapi/linux/falloc.h17
-rw-r--r--include/uapi/linux/filter.h10
-rw-r--r--include/uapi/linux/fou.h2
-rw-r--r--include/uapi/linux/fs.h4
-rw-r--r--include/uapi/linux/fuse.h3
-rw-r--r--include/uapi/linux/gsmmux.h (renamed from include/linux/gsmmux.h)3
-rw-r--r--include/uapi/linux/hsi/Kbuild2
-rw-r--r--include/uapi/linux/hsi/cs-protocol.h119
-rw-r--r--include/uapi/linux/hyperv.h8
-rw-r--r--include/uapi/linux/i2c.h1
-rw-r--r--include/uapi/linux/if_addr.h2
-rw-r--r--include/uapi/linux/if_bridge.h2
-rw-r--r--include/uapi/linux/if_link.h48
-rw-r--r--include/uapi/linux/if_packet.h8
-rw-r--r--include/uapi/linux/if_tun.h6
-rw-r--r--include/uapi/linux/iio/Kbuild3
-rw-r--r--include/uapi/linux/iio/events.h42
-rw-r--r--include/uapi/linux/iio/types.h94
-rw-r--r--include/uapi/linux/in.h20
-rw-r--r--include/uapi/linux/inet_diag.h8
-rw-r--r--include/uapi/linux/input.h11
-rw-r--r--include/uapi/linux/ip.h1
-rw-r--r--include/uapi/linux/ip_vs.h7
-rw-r--r--include/uapi/linux/ipv6.h8
-rw-r--r--include/uapi/linux/ipv6_route.h1
-rw-r--r--include/uapi/linux/kernel-page-flags.h1
-rw-r--r--include/uapi/linux/kexec.h6
-rw-r--r--include/uapi/linux/kfd_ioctl.h135
-rw-r--r--include/uapi/linux/kvm.h84
-rw-r--r--include/uapi/linux/l2tp.h1
-rw-r--r--include/uapi/linux/libc-compat.h28
-rw-r--r--include/uapi/linux/magic.h2
-rw-r--r--include/uapi/linux/media-bus-format.h28
-rw-r--r--include/uapi/linux/media.h52
-rw-r--r--include/uapi/linux/mempolicy.h2
-rw-r--r--include/uapi/linux/mic_common.h12
-rw-r--r--include/uapi/linux/mpls.h10
-rw-r--r--include/uapi/linux/msdos_fs.h4
-rw-r--r--include/uapi/linux/nbd.h2
-rw-r--r--include/uapi/linux/ndctl.h197
-rw-r--r--include/uapi/linux/neighbour.h2
-rw-r--r--include/uapi/linux/net_namespace.h23
-rw-r--r--include/uapi/linux/net_tstamp.h3
-rw-r--r--include/uapi/linux/netconf.h1
-rw-r--r--include/uapi/linux/netfilter.h9
-rw-r--r--include/uapi/linux/netfilter/ipset/ip_set.h6
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_tcp.h3
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h82
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_queue.h4
-rw-r--r--include/uapi/linux/netfilter/xt_socket.h8
-rw-r--r--include/uapi/linux/netfilter_bridge/ebtables.h4
-rw-r--r--include/uapi/linux/netlink.h16
-rw-r--r--include/uapi/linux/nfc.h11
-rw-r--r--include/uapi/linux/nfs4.h14
-rw-r--r--include/uapi/linux/nfs_idmap.h2
-rw-r--r--include/uapi/linux/nfsd/debug.h9
-rw-r--r--include/uapi/linux/nfsd/export.h7
-rw-r--r--include/uapi/linux/nl80211.h265
-rw-r--r--include/uapi/linux/nvme.h31
-rw-r--r--include/uapi/linux/openvswitch.h57
-rw-r--r--include/uapi/linux/pci_regs.h4
-rw-r--r--include/uapi/linux/perf_event.h133
-rw-r--r--include/uapi/linux/pkt_cls.h59
-rw-r--r--include/uapi/linux/pkt_sched.h9
-rw-r--r--include/uapi/linux/prctl.h5
-rw-r--r--include/uapi/linux/quota.h20
-rw-r--r--include/uapi/linux/raid/md_p.h7
-rw-r--r--include/uapi/linux/raid/md_u.h1
-rw-r--r--include/uapi/linux/rds.h10
-rw-r--r--include/uapi/linux/rtnetlink.h27
-rw-r--r--include/uapi/linux/scif_ioctl.h130
-rw-r--r--include/uapi/linux/serial.h4
-rw-r--r--include/uapi/linux/serial_core.h15
-rw-r--r--include/uapi/linux/serial_reg.h25
-rw-r--r--include/uapi/linux/snmp.h8
-rw-r--r--include/uapi/linux/sock_diag.h10
-rw-r--r--include/uapi/linux/som.h154
-rw-r--r--include/uapi/linux/target_core_user.h44
-rw-r--r--include/uapi/linux/tc_act/Kbuild2
-rw-r--r--include/uapi/linux/tc_act/tc_bpf.h33
-rw-r--r--include/uapi/linux/tc_act/tc_connmark.h22
-rw-r--r--include/uapi/linux/tcp.h7
-rw-r--r--include/uapi/linux/tipc_config.h20
-rw-r--r--include/uapi/linux/tipc_netlink.h9
-rw-r--r--include/uapi/linux/tty.h1
-rw-r--r--include/uapi/linux/tty_flags.h2
-rw-r--r--include/uapi/linux/usb/functionfs.h1
-rw-r--r--include/uapi/linux/usbdevice_fs.h3
-rw-r--r--include/uapi/linux/v4l2-controls.h4
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h64
-rw-r--r--include/uapi/linux/v4l2-mediabus.h4
-rw-r--r--include/uapi/linux/v4l2-subdev.h12
-rw-r--r--include/uapi/linux/vfio.h105
-rw-r--r--include/uapi/linux/vhost.h14
-rw-r--r--include/uapi/linux/videodev2.h118
-rw-r--r--include/uapi/linux/virtio_balloon.h36
-rw-r--r--include/uapi/linux/virtio_blk.h25
-rw-r--r--include/uapi/linux/virtio_config.h2
-rw-r--r--include/uapi/linux/virtio_gpu.h206
-rw-r--r--include/uapi/linux/virtio_ids.h2
-rw-r--r--include/uapi/linux/virtio_input.h76
-rw-r--r--include/uapi/linux/virtio_net.h42
-rw-r--r--include/uapi/linux/virtio_pci.h93
-rw-r--r--include/uapi/linux/virtio_ring.h2
-rw-r--r--include/uapi/linux/virtio_scsi.h12
-rw-r--r--include/uapi/linux/xfrm.h2
-rw-r--r--include/uapi/linux/xilinx-v4l2-controls.h73
-rw-r--r--include/uapi/misc/cxl.h22
-rw-r--r--include/uapi/rdma/ib_user_verbs.h25
-rw-r--r--include/uapi/rdma/rdma_netlink.h1
-rw-r--r--include/uapi/sound/asequencer.h1
-rw-r--r--include/uapi/sound/asoc.h388
-rw-r--r--include/uapi/sound/asound.h42
-rw-r--r--include/uapi/sound/compress_offload.h2
-rw-r--r--include/uapi/sound/emu10k1.h3
-rw-r--r--include/uapi/sound/hdspm.h6
-rw-r--r--include/uapi/sound/tlv.h31
-rw-r--r--include/uapi/sound/usb_stream.h76
-rw-r--r--include/video/exynos5433_decon.h165
-rw-r--r--include/video/exynos7_decon.h349
-rw-r--r--include/video/imx-ipu-v3.h25
-rw-r--r--include/video/neomagic.h5
-rw-r--r--include/video/omapdss.h9
-rw-r--r--include/video/samsung_fimd.h11
-rw-r--r--include/video/tdfx.h2
-rw-r--r--include/xen/events.h2
-rw-r--r--include/xen/grant_table.h44
-rw-r--r--include/xen/interface/features.h6
-rw-r--r--include/xen/interface/grant_table.h7
-rw-r--r--include/xen/interface/xen.h6
-rw-r--r--include/xen/page.h5
-rw-r--r--include/xen/xen-ops.h74
-rw-r--r--include/xen/xenbus.h24
1208 files changed, 58619 insertions, 18030 deletions
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h
index d5ec6c8..6b040f4 100644
--- a/include/acpi/acbuffer.h
+++ b/include/acpi/acbuffer.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 5a0a3e5..03aacfb 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 8b06e4c..11c3a01 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index 7461327..b52c0dc 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,6 +51,7 @@
#define METHOD_NAME__BBN "_BBN"
#define METHOD_NAME__CBA "_CBA"
#define METHOD_NAME__CID "_CID"
+#define METHOD_NAME__CLS "_CLS"
#define METHOD_NAME__CRS "_CRS"
#define METHOD_NAME__DDN "_DDN"
#define METHOD_NAME__HID "_HID"
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 1baae6e..f56de8c 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -240,7 +240,7 @@
/*
* If ACPI_GET_FUNCTION_NAME was not defined in the compiler-dependent header,
* define it now. This is the case where there the compiler does not support
- * a __FUNCTION__ macro or equivalent.
+ * a __func__ macro or equivalent.
*/
#ifndef ACPI_GET_FUNCTION_NAME
#define ACPI_GET_FUNCTION_NAME _acpi_function_name
@@ -249,12 +249,12 @@
* The Name parameter should be the procedure name as a quoted string.
* The function name is also used by the function exit macros below.
* Note: (const char) is used to be compatible with the debug interfaces
- * and macros such as __FUNCTION__.
+ * and macros such as __func__.
*/
#define ACPI_FUNCTION_NAME(name) static const char _acpi_function_name[] = #name;
#else
-/* Compiler supports __FUNCTION__ (or equivalent) -- Ignore this macro */
+/* Compiler supports __func__ (or equivalent) -- Ignore this macro */
#define ACPI_FUNCTION_NAME(name)
#endif /* ACPI_GET_FUNCTION_NAME */
@@ -294,8 +294,12 @@
/* DEBUG_PRINT functions */
-#define ACPI_DEBUG_PRINT(plist) ACPI_ACTUAL_DEBUG plist
-#define ACPI_DEBUG_PRINT_RAW(plist) ACPI_ACTUAL_DEBUG_RAW plist
+#ifndef COMPILER_VA_MACRO
+
+#define ACPI_DEBUG_PRINT(plist) acpi_debug_print plist
+#define ACPI_DEBUG_PRINT_RAW(plist) acpi_debug_print_raw plist
+
+#else
/* Helper macros for DEBUG_PRINT */
@@ -315,6 +319,11 @@
ACPI_DO_DEBUG_PRINT (acpi_debug_print_raw, level, line, \
filename, modulename, component, __VA_ARGS__)
+#define ACPI_DEBUG_PRINT(plist) ACPI_ACTUAL_DEBUG plist
+#define ACPI_DEBUG_PRINT_RAW(plist) ACPI_ACTUAL_DEBUG_RAW plist
+
+#endif
+
/*
* Function entry tracing
*
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h
index a08e55a..b0bb30e 100644
--- a/include/acpi/acpi.h
+++ b/include/acpi/acpi.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 61e32ec..83061ca 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -208,7 +208,10 @@ struct acpi_device_flags {
u32 visited:1;
u32 hotplug_notify:1;
u32 is_dock_station:1;
- u32 reserved:23;
+ u32 of_compatible_ok:1;
+ u32 coherent_dma:1;
+ u32 cca_seen:1;
+ u32 reserved:20;
};
/* File System */
@@ -252,6 +255,7 @@ struct acpi_device_pnp {
#define acpi_device_bid(d) ((d)->pnp.bus_id)
#define acpi_device_adr(d) ((d)->pnp.bus_address)
const char *acpi_device_hid(struct acpi_device *device);
+#define acpi_device_uid(d) ((d)->pnp.unique_id)
#define acpi_device_name(d) ((d)->pnp.device_name)
#define acpi_device_class(d) ((d)->pnp.device_class)
@@ -270,7 +274,6 @@ struct acpi_device_power_flags {
struct acpi_device_power_state {
struct {
u8 valid:1;
- u8 os_accessible:1;
u8 explicit_set:1; /* _PSx present? */
u8 reserved:6;
} flags;
@@ -379,14 +382,48 @@ struct acpi_device {
void (*remove)(struct acpi_device *);
};
+static inline bool acpi_check_dma(struct acpi_device *adev, bool *coherent)
+{
+ bool ret = false;
+
+ if (!adev)
+ return ret;
+
+ /**
+ * Currently, we only support _CCA=1 (i.e. coherent_dma=1)
+ * This should be equivalent to specifyig dma-coherent for
+ * a device in OF.
+ *
+ * For the case when _CCA=0 (i.e. coherent_dma=0 && cca_seen=1),
+ * There are two cases:
+ * case 1. Do not support and disable DMA.
+ * case 2. Support but rely on arch-specific cache maintenance for
+ * non-coherence DMA operations.
+ * Currently, we implement case 1 above.
+ *
+ * For the case when _CCA is missing (i.e. cca_seen=0) and
+ * platform specifies ACPI_CCA_REQUIRED, we do not support DMA,
+ * and fallback to arch-specific default handling.
+ *
+ * See acpi_init_coherency() for more info.
+ */
+ if (adev->flags.coherent_dma) {
+ ret = true;
+ if (coherent)
+ *coherent = adev->flags.coherent_dma;
+ }
+ return ret;
+}
+
static inline bool is_acpi_node(struct fwnode_handle *fwnode)
{
return fwnode && fwnode->type == FWNODE_ACPI;
}
-static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode)
+static inline struct acpi_device *to_acpi_node(struct fwnode_handle *fwnode)
{
- return fwnode ? container_of(fwnode, struct acpi_device, fwnode) : NULL;
+ return is_acpi_node(fwnode) ?
+ container_of(fwnode, struct acpi_device, fwnode) : NULL;
}
static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev)
@@ -599,7 +636,7 @@ static inline bool acpi_device_can_wakeup(struct acpi_device *adev)
static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
{
- return adev->power.states[ACPI_STATE_D3_COLD].flags.os_accessible;
+ return adev->power.states[ACPI_STATE_D3_COLD].flags.valid;
}
#else /* CONFIG_ACPI */
diff --git a/include/acpi/acpi_io.h b/include/acpi/acpi_io.h
index 444671e..dd86c5f 100644
--- a/include/acpi/acpi_io.h
+++ b/include/acpi/acpi_io.h
@@ -3,11 +3,15 @@
#include <linux/io.h>
+#include <asm/acpi.h>
+
+#ifndef acpi_os_ioremap
static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
acpi_size size)
{
return ioremap_cache(phys, size);
}
+#endif
void __iomem *__init_refok
acpi_os_map_iomem(acpi_physical_address phys, acpi_size size);
diff --git a/include/acpi/acpi_lpat.h b/include/acpi/acpi_lpat.h
new file mode 100644
index 0000000..da37e12
--- /dev/null
+++ b/include/acpi/acpi_lpat.h
@@ -0,0 +1,65 @@
+/*
+ * acpi_lpat.h - LPAT table processing functions
+ *
+ * Copyright (C) 2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef ACPI_LPAT_H
+#define ACPI_LPAT_H
+
+struct acpi_lpat {
+ int temp;
+ int raw;
+};
+
+struct acpi_lpat_conversion_table {
+ struct acpi_lpat *lpat;
+ int lpat_count;
+};
+
+#ifdef CONFIG_ACPI
+
+int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table,
+ int raw);
+int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table,
+ int temp);
+struct acpi_lpat_conversion_table *acpi_lpat_get_conversion_table(acpi_handle
+ handle);
+void acpi_lpat_free_conversion_table(struct acpi_lpat_conversion_table
+ *lpat_table);
+
+#else
+static int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table,
+ int raw)
+{
+ return 0;
+}
+
+static int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table,
+ int temp)
+{
+ return 0;
+}
+
+static struct acpi_lpat_conversion_table *acpi_lpat_get_conversion_table(
+ acpi_handle handle)
+{
+ return NULL;
+}
+
+static void acpi_lpat_free_conversion_table(struct acpi_lpat_conversion_table
+ *lpat_table)
+{
+}
+
+#endif
+#endif
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 03b3e6d..d02df0a 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -7,7 +7,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -95,7 +95,7 @@ acpi_physical_address acpi_os_get_root_pointer(void);
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_predefined_override
acpi_status
acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
- acpi_string * new_val);
+ char **new_val);
#endif
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_table_override
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 5ba7846..e8ec18a 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20141107
+#define ACPI_CA_VERSION 0x20150619
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -195,9 +195,18 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
* address. Although ACPICA adheres to the ACPI specification which
* requires the use of the corresponding 64-bit address if it is non-zero,
* some machines have been found to have a corrupted non-zero 64-bit
- * address. Default is TRUE, favor the 32-bit addresses.
+ * address. Default is FALSE, do not favor the 32-bit addresses.
*/
-ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, TRUE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, FALSE);
+
+/*
+ * Optionally use 32-bit FACS table addresses.
+ * It is reported that some platforms fail to resume from system suspending
+ * if 64-bit FACS table address is selected:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=74021
+ * Default is TRUE, favor the 32-bit addresses.
+ */
+ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_facs_addresses, TRUE);
/*
* Optionally truncate I/O addresses to 16 bits. Provides compatibility
@@ -220,6 +229,11 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_disable_auto_repair, FALSE);
ACPI_INIT_GLOBAL(u8, acpi_gbl_disable_ssdt_table_install, FALSE);
/*
+ * Optionally enable runtime namespace override.
+ */
+ACPI_INIT_GLOBAL(u8, acpi_gbl_runtime_namespace_override, TRUE);
+
+/*
* We keep track of the latest version of Windows that has been requested by
* the BIOS. ACPI 5.0.
*/
@@ -431,13 +445,13 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_load_tables(void))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_reallocate_root_table(void))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init
- acpi_find_root_pointer(acpi_size * rsdp_address))
-
+ acpi_find_root_pointer(acpi_physical_address *
+ rsdp_address))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_get_table_header(acpi_string signature,
- u32 instance,
- struct acpi_table_header
- *out_table_header))
+ acpi_get_table_header(acpi_string signature,
+ u32 instance,
+ struct acpi_table_header
+ *out_table_header))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_table(acpi_string signature, u32 instance,
struct acpi_table_header
@@ -569,6 +583,14 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
address,
void *context))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_install_gpe_raw_handler(acpi_handle
+ gpe_device,
+ u32 gpe_number,
+ u32 type,
+ acpi_gpe_handler
+ address,
+ void *context))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_remove_gpe_handler(acpi_handle gpe_device,
u32 gpe_number,
acpi_gpe_handler
@@ -806,8 +828,12 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_leave_sleep_state(u8 sleep_state))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
- acpi_set_firmware_waking_vector(u32
- physical_address))
+ acpi_set_firmware_waking_vectors
+ (acpi_physical_address physical_address,
+ acpi_physical_address physical_address64))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_set_firmware_waking_vector(u32
+ physical_address))
#if ACPI_MACHINE_WIDTH == 64
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_set_firmware_waking_vector64(u64
@@ -891,12 +917,6 @@ ACPI_APP_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1)
ACPI_GLOBAL(u8, acpi_gbl_permanent_mmap);
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_get_id(acpi_handle object,
- acpi_owner_id * out_type))
-
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_unload_table_id(acpi_owner_id id))
-
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_table_with_size(acpi_string signature,
u32 instance,
struct acpi_table_header
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index eb760ca..ebe2426 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -305,43 +305,51 @@ struct acpi_resource_source {
u8 max_address_fixed; \
union acpi_resource_attribute info;
-struct acpi_resource_address {
-ACPI_RESOURCE_ADDRESS_COMMON};
-
-struct acpi_resource_address16 {
- ACPI_RESOURCE_ADDRESS_COMMON u16 granularity;
+struct acpi_address16_attribute {
+ u16 granularity;
u16 minimum;
u16 maximum;
u16 translation_offset;
u16 address_length;
- struct acpi_resource_source resource_source;
};
-struct acpi_resource_address32 {
- ACPI_RESOURCE_ADDRESS_COMMON u32 granularity;
+struct acpi_address32_attribute {
+ u32 granularity;
u32 minimum;
u32 maximum;
u32 translation_offset;
u32 address_length;
- struct acpi_resource_source resource_source;
};
-struct acpi_resource_address64 {
- ACPI_RESOURCE_ADDRESS_COMMON u64 granularity;
+struct acpi_address64_attribute {
+ u64 granularity;
u64 minimum;
u64 maximum;
u64 translation_offset;
u64 address_length;
+};
+
+struct acpi_resource_address {
+ACPI_RESOURCE_ADDRESS_COMMON};
+
+struct acpi_resource_address16 {
+ ACPI_RESOURCE_ADDRESS_COMMON struct acpi_address16_attribute address;
+ struct acpi_resource_source resource_source;
+};
+
+struct acpi_resource_address32 {
+ ACPI_RESOURCE_ADDRESS_COMMON struct acpi_address32_attribute address;
+ struct acpi_resource_source resource_source;
+};
+
+struct acpi_resource_address64 {
+ ACPI_RESOURCE_ADDRESS_COMMON struct acpi_address64_attribute address;
struct acpi_resource_source resource_source;
};
struct acpi_resource_extended_address64 {
ACPI_RESOURCE_ADDRESS_COMMON u8 revision_ID;
- u64 granularity;
- u64 minimum;
- u64 maximum;
- u64 translation_offset;
- u64 address_length;
+ struct acpi_address64_attribute address;
u64 type_specific;
};
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index bee19d8..2d5faf5 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -65,6 +65,7 @@
#define ACPI_SIG_DSDT "DSDT" /* Differentiated System Description Table */
#define ACPI_SIG_FADT "FACP" /* Fixed ACPI Description Table */
#define ACPI_SIG_FACS "FACS" /* Firmware ACPI Control Structure */
+#define ACPI_SIG_OSDT "OSDT" /* Override System Description Table */
#define ACPI_SIG_PSDT "PSDT" /* Persistent System Description Table */
#define ACPI_SIG_RSDP "RSD PTR " /* Root System Description Pointer */
#define ACPI_SIG_RSDT "RSDT" /* Root System Description Table */
@@ -284,6 +285,7 @@ struct acpi_table_fadt {
struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */
struct acpi_generic_address sleep_control; /* 64-bit Sleep Control register (ACPI 5.0) */
struct acpi_generic_address sleep_status; /* 64-bit Sleep Status register (ACPI 5.0) */
+ u64 hypervisor_id; /* Hypervisor Vendor ID (ACPI 6.0) */
};
/* Masks for FADT IA-PC Boot Architecture Flags (boot_flags) [Vx]=Introduced in this FADT revision */
@@ -341,7 +343,7 @@ enum acpi_preferred_pm_profiles {
PM_TABLET = 8
};
-/* Values for sleep_status and sleep_control registers (V5 FADT) */
+/* Values for sleep_status and sleep_control registers (V5+ FADT) */
#define ACPI_X_WAKE_STATUS 0x80
#define ACPI_X_SLEEP_TYPE_MASK 0x1C
@@ -398,15 +400,17 @@ struct acpi_table_desc {
* FADT is the bottom line as to what the version really is.
*
* For reference, the values below are as follows:
- * FADT V1 size: 0x074
- * FADT V2 size: 0x084
- * FADT V3 size: 0x0F4
- * FADT V4 size: 0x0F4
- * FADT V5 size: 0x10C
+ * FADT V1 size: 0x074
+ * FADT V2 size: 0x084
+ * FADT V3 size: 0x0F4
+ * FADT V4 size: 0x0F4
+ * FADT V5 size: 0x10C
+ * FADT V6 size: 0x114
*/
#define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4)
#define ACPI_FADT_V2_SIZE (u32) (ACPI_FADT_OFFSET (minor_revision) + 1)
#define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control))
-#define ACPI_FADT_V5_SIZE (u32) (sizeof (struct acpi_table_fadt))
+#define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id))
+#define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt))
#endif /* __ACTBL_H__ */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 29e7937..fcd5709 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -71,6 +71,7 @@
#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
#define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */
#define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */
+#define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */
/*
* All tables must be byte-packed to match the ACPI specification, since
@@ -673,7 +674,8 @@ enum acpi_madt_type {
ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR = 12,
ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13,
ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14,
- ACPI_MADT_TYPE_RESERVED = 15 /* 15 and greater are reserved */
+ ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15,
+ ACPI_MADT_TYPE_RESERVED = 16 /* 16 and greater are reserved */
};
/*
@@ -794,7 +796,7 @@ struct acpi_madt_local_x2apic_nmi {
u8 reserved[3]; /* reserved - must be zero */
};
-/* 11: Generic Interrupt (ACPI 5.0) */
+/* 11: Generic Interrupt (ACPI 5.0 + ACPI 6.0 changes) */
struct acpi_madt_generic_interrupt {
struct acpi_subtable_header header;
@@ -811,6 +813,8 @@ struct acpi_madt_generic_interrupt {
u32 vgic_interrupt;
u64 gicr_base_address;
u64 arm_mpidr;
+ u8 efficiency_class;
+ u8 reserved2[3];
};
/* Masks for Flags field above */
@@ -819,7 +823,7 @@ struct acpi_madt_generic_interrupt {
#define ACPI_MADT_PERFORMANCE_IRQ_MODE (1<<1) /* 01: Performance Interrupt Mode */
#define ACPI_MADT_VGIC_IRQ_MODE (1<<2) /* 02: VGIC Maintenance Interrupt mode */
-/* 12: Generic Distributor (ACPI 5.0) */
+/* 12: Generic Distributor (ACPI 5.0 + ACPI 6.0 changes) */
struct acpi_madt_generic_distributor {
struct acpi_subtable_header header;
@@ -827,7 +831,19 @@ struct acpi_madt_generic_distributor {
u32 gic_id;
u64 base_address;
u32 global_irq_base;
- u32 reserved2; /* reserved - must be zero */
+ u8 version;
+ u8 reserved2[3]; /* reserved - must be zero */
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_gic_version {
+ ACPI_MADT_GIC_VERSION_NONE = 0,
+ ACPI_MADT_GIC_VERSION_V1 = 1,
+ ACPI_MADT_GIC_VERSION_V2 = 2,
+ ACPI_MADT_GIC_VERSION_V3 = 3,
+ ACPI_MADT_GIC_VERSION_V4 = 4,
+ ACPI_MADT_GIC_VERSION_RESERVED = 5 /* 5 and greater are reserved */
};
/* 13: Generic MSI Frame (ACPI 5.1) */
@@ -855,6 +871,16 @@ struct acpi_madt_generic_redistributor {
u32 length;
};
+/* 15: Generic Translator (ACPI 6.0) */
+
+struct acpi_madt_generic_translator {
+ struct acpi_subtable_header header;
+ u16 reserved; /* reserved - must be zero */
+ u32 translation_id;
+ u64 base_address;
+ u32 reserved2;
+};
+
/*
* Common flags fields for MADT subtables
*/
@@ -908,6 +934,159 @@ struct acpi_msct_proximity {
/*******************************************************************************
*
+ * NFIT - NVDIMM Interface Table (ACPI 6.0)
+ * Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_nfit {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 reserved; /* Reserved, must be zero */
+};
+
+/* Subtable header for NFIT */
+
+struct acpi_nfit_header {
+ u16 type;
+ u16 length;
+};
+
+/* Values for subtable type in struct acpi_nfit_header */
+
+enum acpi_nfit_type {
+ ACPI_NFIT_TYPE_SYSTEM_ADDRESS = 0,
+ ACPI_NFIT_TYPE_MEMORY_MAP = 1,
+ ACPI_NFIT_TYPE_INTERLEAVE = 2,
+ ACPI_NFIT_TYPE_SMBIOS = 3,
+ ACPI_NFIT_TYPE_CONTROL_REGION = 4,
+ ACPI_NFIT_TYPE_DATA_REGION = 5,
+ ACPI_NFIT_TYPE_FLUSH_ADDRESS = 6,
+ ACPI_NFIT_TYPE_RESERVED = 7 /* 7 and greater are reserved */
+};
+
+/*
+ * NFIT Subtables
+ */
+
+/* 0: System Physical Address Range Structure */
+
+struct acpi_nfit_system_address {
+ struct acpi_nfit_header header;
+ u16 range_index;
+ u16 flags;
+ u32 reserved; /* Reseved, must be zero */
+ u32 proximity_domain;
+ u8 range_guid[16];
+ u64 address;
+ u64 length;
+ u64 memory_mapping;
+};
+
+/* Flags */
+
+#define ACPI_NFIT_ADD_ONLINE_ONLY (1) /* 00: Add/Online Operation Only */
+#define ACPI_NFIT_PROXIMITY_VALID (1<<1) /* 01: Proximity Domain Valid */
+
+/* Range Type GUIDs appear in the include/acuuid.h file */
+
+/* 1: Memory Device to System Address Range Map Structure */
+
+struct acpi_nfit_memory_map {
+ struct acpi_nfit_header header;
+ u32 device_handle;
+ u16 physical_id;
+ u16 region_id;
+ u16 range_index;
+ u16 region_index;
+ u64 region_size;
+ u64 region_offset;
+ u64 address;
+ u16 interleave_index;
+ u16 interleave_ways;
+ u16 flags;
+ u16 reserved; /* Reserved, must be zero */
+};
+
+/* Flags */
+
+#define ACPI_NFIT_MEM_SAVE_FAILED (1) /* 00: Last SAVE to Memory Device failed */
+#define ACPI_NFIT_MEM_RESTORE_FAILED (1<<1) /* 01: Last RESTORE from Memory Device failed */
+#define ACPI_NFIT_MEM_FLUSH_FAILED (1<<2) /* 02: Platform flush failed */
+#define ACPI_NFIT_MEM_ARMED (1<<3) /* 03: Memory Device observed to be not armed */
+#define ACPI_NFIT_MEM_HEALTH_OBSERVED (1<<4) /* 04: Memory Device observed SMART/health events */
+#define ACPI_NFIT_MEM_HEALTH_ENABLED (1<<5) /* 05: SMART/health events enabled */
+
+/* 2: Interleave Structure */
+
+struct acpi_nfit_interleave {
+ struct acpi_nfit_header header;
+ u16 interleave_index;
+ u16 reserved; /* Reserved, must be zero */
+ u32 line_count;
+ u32 line_size;
+ u32 line_offset[1]; /* Variable length */
+};
+
+/* 3: SMBIOS Management Information Structure */
+
+struct acpi_nfit_smbios {
+ struct acpi_nfit_header header;
+ u32 reserved; /* Reserved, must be zero */
+ u8 data[1]; /* Variable length */
+};
+
+/* 4: NVDIMM Control Region Structure */
+
+struct acpi_nfit_control_region {
+ struct acpi_nfit_header header;
+ u16 region_index;
+ u16 vendor_id;
+ u16 device_id;
+ u16 revision_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_revision_id;
+ u8 reserved[6]; /* Reserved, must be zero */
+ u32 serial_number;
+ u16 code;
+ u16 windows;
+ u64 window_size;
+ u64 command_offset;
+ u64 command_size;
+ u64 status_offset;
+ u64 status_size;
+ u16 flags;
+ u8 reserved1[6]; /* Reserved, must be zero */
+};
+
+/* Flags */
+
+#define ACPI_NFIT_CONTROL_BUFFERED (1) /* Block Data Windows implementation is buffered */
+
+/* 5: NVDIMM Block Data Window Region Structure */
+
+struct acpi_nfit_data_region {
+ struct acpi_nfit_header header;
+ u16 region_index;
+ u16 windows;
+ u64 offset;
+ u64 size;
+ u64 capacity;
+ u64 start_address;
+};
+
+/* 6: Flush Hint Address Structure */
+
+struct acpi_nfit_flush_address {
+ struct acpi_nfit_header header;
+ u32 device_handle;
+ u16 hint_count;
+ u8 reserved[6]; /* Reserved, must be zero */
+ u64 hint_address[1]; /* Variable length */
+};
+
+/*******************************************************************************
+ *
* SBST - Smart Battery Specification Table
* Version 1
*
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index ecff624..a948fc5 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,8 +51,8 @@
* These tables are not consumed directly by the ACPICA subsystem, but are
* included here to support device drivers and the AML disassembler.
*
- * The tables in this file are defined by third-party specifications, and are
- * not defined directly by the ACPI specification itself.
+ * Generally, the tables in this file are defined by third-party specifications,
+ * and are not defined directly by the ACPI specification itself.
*
******************************************************************************/
@@ -69,15 +69,18 @@
#define ACPI_SIG_DMAR "DMAR" /* DMA Remapping table */
#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */
#define ACPI_SIG_IBFT "IBFT" /* iSCSI Boot Firmware Table */
+#define ACPI_SIG_IORT "IORT" /* IO Remapping Table */
#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */
#define ACPI_SIG_LPIT "LPIT" /* Low Power Idle Table */
#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */
#define ACPI_SIG_MCHI "MCHI" /* Management Controller Host Interface table */
+#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */
#define ACPI_SIG_MTMR "MTMR" /* MID Timer table */
#define ACPI_SIG_SLIC "SLIC" /* Software Licensing Description Table */
#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */
#define ACPI_SIG_SPMI "SPMI" /* Server Platform Management Interface table */
#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */
+#define ACPI_SIG_TPM2 "TPM2" /* Trusted Platform Module 2.0 H/W interface table */
#define ACPI_SIG_UEFI "UEFI" /* Uefi Boot Optimization Table */
#define ACPI_SIG_VRTC "VRTC" /* Virtual Real Time Clock Table */
#define ACPI_SIG_WAET "WAET" /* Windows ACPI Emulated devices Table */
@@ -649,6 +652,131 @@ struct acpi_ibft_target {
/*******************************************************************************
*
+ * IORT - IO Remapping Table
+ *
+ * Conforms to "IO Remapping Table System Software on ARM Platforms",
+ * Document number: ARM DEN 0049A, 2015
+ *
+ ******************************************************************************/
+
+struct acpi_table_iort {
+ struct acpi_table_header header;
+ u32 node_count;
+ u32 node_offset;
+ u32 reserved;
+};
+
+/*
+ * IORT subtables
+ */
+struct acpi_iort_node {
+ u8 type;
+ u16 length;
+ u8 revision;
+ u32 reserved;
+ u32 mapping_count;
+ u32 mapping_offset;
+ char node_data[1];
+};
+
+/* Values for subtable Type above */
+
+enum acpi_iort_node_type {
+ ACPI_IORT_NODE_ITS_GROUP = 0x00,
+ ACPI_IORT_NODE_NAMED_COMPONENT = 0x01,
+ ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 0x02,
+ ACPI_IORT_NODE_SMMU = 0x03
+};
+
+struct acpi_iort_id_mapping {
+ u32 input_base; /* Lowest value in input range */
+ u32 id_count; /* Number of IDs */
+ u32 output_base; /* Lowest value in output range */
+ u32 output_reference; /* A reference to the output node */
+ u32 flags;
+};
+
+/* Masks for Flags field above for IORT subtable */
+
+#define ACPI_IORT_ID_SINGLE_MAPPING (1)
+
+struct acpi_iort_memory_access {
+ u32 cache_coherency;
+ u8 hints;
+ u16 reserved;
+ u8 memory_flags;
+};
+
+/* Values for cache_coherency field above */
+
+#define ACPI_IORT_NODE_COHERENT 0x00000001 /* The device node is fully coherent */
+#define ACPI_IORT_NODE_NOT_COHERENT 0x00000000 /* The device node is not coherent */
+
+/* Masks for Hints field above */
+
+#define ACPI_IORT_HT_TRANSIENT (1)
+#define ACPI_IORT_HT_WRITE (1<<1)
+#define ACPI_IORT_HT_READ (1<<2)
+#define ACPI_IORT_HT_OVERRIDE (1<<3)
+
+/* Masks for memory_flags field above */
+
+#define ACPI_IORT_MF_COHERENCY (1)
+#define ACPI_IORT_MF_ATTRIBUTES (1<<1)
+
+/*
+ * IORT node specific subtables
+ */
+struct acpi_iort_its_group {
+ u32 its_count;
+ u32 identifiers[1]; /* GIC ITS identifier arrary */
+};
+
+struct acpi_iort_named_component {
+ u32 node_flags;
+ u64 memory_properties; /* Memory access properties */
+ u8 memory_address_limit; /* Memory address size limit */
+ char device_name[1]; /* Path of namespace object */
+};
+
+struct acpi_iort_root_complex {
+ u64 memory_properties; /* Memory access properties */
+ u32 ats_attribute;
+ u32 pci_segment_number;
+};
+
+/* Values for ats_attribute field above */
+
+#define ACPI_IORT_ATS_SUPPORTED 0x00000001 /* The root complex supports ATS */
+#define ACPI_IORT_ATS_UNSUPPORTED 0x00000000 /* The root complex doesn't support ATS */
+
+struct acpi_iort_smmu {
+ u64 base_address; /* SMMU base address */
+ u64 span; /* Length of memory range */
+ u32 model;
+ u32 flags;
+ u32 global_interrupt_offset;
+ u32 context_interrupt_count;
+ u32 context_interrupt_offset;
+ u32 pmu_interrupt_count;
+ u32 pmu_interrupt_offset;
+ u64 interrupts[1]; /* Interrupt array */
+};
+
+/* Values for Model field above */
+
+#define ACPI_IORT_SMMU_V1 0x00000000 /* Generic SMMUv1 */
+#define ACPI_IORT_SMMU_V2 0x00000001 /* Generic SMMUv2 */
+#define ACPI_IORT_SMMU_CORELINK_MMU400 0x00000002 /* ARM Corelink MMU-400 */
+#define ACPI_IORT_SMMU_CORELINK_MMU500 0x00000003 /* ARM Corelink MMU-500 */
+
+/* Masks for Flags field above */
+
+#define ACPI_IORT_SMMU_DVM_SUPPORTED (1)
+#define ACPI_IORT_SMMU_COHERENT_WALK (1<<1)
+
+/*******************************************************************************
+ *
* IVRS - I/O Virtualization Reporting Structure
* Version 1
*
@@ -823,7 +951,7 @@ struct acpi_ivrs_memory {
*
* LPIT - Low Power Idle Table
*
- * Conforms to "ACPI Low Power Idle Table (LPIT) and _LPD Proposal (DRAFT)"
+ * Conforms to "ACPI Low Power Idle Table (LPIT)" July 2014.
*
******************************************************************************/
@@ -845,7 +973,7 @@ struct acpi_lpit_header {
enum acpi_lpit_type {
ACPI_LPIT_TYPE_NATIVE_CSTATE = 0x00,
- ACPI_LPIT_TYPE_SIMPLE_IO = 0x01
+ ACPI_LPIT_TYPE_RESERVED = 0x01 /* 1 and above are reserved */
};
/* Masks for Flags field above */
@@ -868,21 +996,6 @@ struct acpi_lpit_native {
u64 counter_frequency;
};
-/* 0x01: Simple I/O based LPI structure */
-
-struct acpi_lpit_io {
- struct acpi_lpit_header header;
- struct acpi_generic_address entry_trigger;
- u32 trigger_action;
- u64 trigger_value;
- u64 trigger_mask;
- struct acpi_generic_address minimum_idle_state;
- u32 residency;
- u32 latency;
- struct acpi_generic_address residency_counter;
- u64 counter_frequency;
-};
-
/*******************************************************************************
*
* MCFG - PCI Memory Mapped Configuration table and subtable
@@ -935,6 +1048,21 @@ struct acpi_table_mchi {
/*******************************************************************************
*
+ * MSDM - Microsoft Data Management table
+ *
+ * Conforms to "Microsoft Software Licensing Tables (SLIC and MSDM)",
+ * November 29, 2011. Copyright 2011 Microsoft
+ *
+ ******************************************************************************/
+
+/* Basic MSDM table is only the common ACPI header */
+
+struct acpi_table_msdm {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+/*******************************************************************************
+ *
* MTMR - MID Timer Table
* Version 1
*
@@ -959,10 +1087,9 @@ struct acpi_mtmr_entry {
/*******************************************************************************
*
* SLIC - Software Licensing Description Table
- * Version 1
*
- * Conforms to "OEM Activation 2.0 for Windows Vista Operating Systems",
- * Copyright 2006
+ * Conforms to "Microsoft Software Licensing Tables (SLIC and MSDM)",
+ * November 29, 2011. Copyright 2011 Microsoft
*
******************************************************************************/
@@ -972,52 +1099,6 @@ struct acpi_table_slic {
struct acpi_table_header header; /* Common ACPI table header */
};
-/* Common SLIC subtable header */
-
-struct acpi_slic_header {
- u32 type;
- u32 length;
-};
-
-/* Values for Type field above */
-
-enum acpi_slic_type {
- ACPI_SLIC_TYPE_PUBLIC_KEY = 0,
- ACPI_SLIC_TYPE_WINDOWS_MARKER = 1,
- ACPI_SLIC_TYPE_RESERVED = 2 /* 2 and greater are reserved */
-};
-
-/*
- * SLIC Subtables, correspond to Type in struct acpi_slic_header
- */
-
-/* 0: Public Key Structure */
-
-struct acpi_slic_key {
- struct acpi_slic_header header;
- u8 key_type;
- u8 version;
- u16 reserved;
- u32 algorithm;
- char magic[4];
- u32 bit_length;
- u32 exponent;
- u8 modulus[128];
-};
-
-/* 1: Windows Marker Structure */
-
-struct acpi_slic_marker {
- struct acpi_slic_header header;
- u32 version;
- char oem_id[ACPI_OEM_ID_SIZE]; /* ASCII OEM identification */
- char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; /* ASCII OEM table identification */
- char windows_flag[8];
- u32 slic_version;
- u8 reserved[16];
- u8 signature[128];
-};
-
/*******************************************************************************
*
* SPCR - Serial Port Console Redirection table
@@ -1099,20 +1180,85 @@ enum acpi_spmi_interface_types {
/*******************************************************************************
*
* TCPA - Trusted Computing Platform Alliance table
- * Version 1
+ * Version 2
+ *
+ * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0",
+ * December 19, 2014
*
- * Conforms to "TCG PC Specific Implementation Specification",
- * Version 1.1, August 18, 2003
+ * NOTE: There are two versions of the table with the same signature --
+ * the client version and the server version.
*
******************************************************************************/
-struct acpi_table_tcpa {
+struct acpi_table_tcpa_client {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u16 platform_class;
+ u32 minimum_log_length; /* Minimum length for the event log area */
+ u64 log_address; /* Address of the event log area */
+};
+
+struct acpi_table_tcpa_server {
struct acpi_table_header header; /* Common ACPI table header */
+ u16 platform_class;
u16 reserved;
- u32 max_log_length; /* Maximum length for the event log area */
+ u64 minimum_log_length; /* Minimum length for the event log area */
u64 log_address; /* Address of the event log area */
+ u16 spec_revision;
+ u8 device_flags;
+ u8 interrupt_flags;
+ u8 gpe_number;
+ u8 reserved2[3];
+ u32 global_interrupt;
+ struct acpi_generic_address address;
+ u32 reserved3;
+ struct acpi_generic_address config_address;
+ u8 group;
+ u8 bus; /* PCI Bus/Segment/Function numbers */
+ u8 device;
+ u8 function;
+};
+
+/* Values for device_flags above */
+
+#define ACPI_TCPA_PCI_DEVICE (1)
+#define ACPI_TCPA_BUS_PNP (1<<1)
+#define ACPI_TCPA_ADDRESS_VALID (1<<2)
+
+/* Values for interrupt_flags above */
+
+#define ACPI_TCPA_INTERRUPT_MODE (1)
+#define ACPI_TCPA_INTERRUPT_POLARITY (1<<1)
+#define ACPI_TCPA_SCI_VIA_GPE (1<<2)
+#define ACPI_TCPA_GLOBAL_INTERRUPT (1<<3)
+
+/*******************************************************************************
+ *
+ * TPM2 - Trusted Platform Module (TPM) 2.0 Hardware Interface Table
+ * Version 4
+ *
+ * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0",
+ * December 19, 2014
+ *
+ ******************************************************************************/
+
+struct acpi_table_tpm2 {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u16 platform_class;
+ u16 reserved;
+ u64 control_address;
+ u32 start_method;
+
+ /* Platform-specific data follows */
};
+/* Values for start_method above */
+
+#define ACPI_TPM2_NOT_ALLOWED 0
+#define ACPI_TPM2_START_METHOD 2
+#define ACPI_TPM2_MEMORY_MAPPED 6
+#define ACPI_TPM2_COMMAND_BUFFER 7
+#define ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD 8
+
/*******************************************************************************
*
* UEFI - UEFI Boot optimization Table
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 5480cb2..1df8916 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,7 +51,8 @@
* These tables are not consumed directly by the ACPICA subsystem, but are
* included here to support device drivers and the AML disassembler.
*
- * The tables in this file are fully defined within the ACPI specification.
+ * In general, the tables in this file are fully defined within the ACPI
+ * specification.
*
******************************************************************************/
@@ -68,7 +69,9 @@
#define ACPI_SIG_PCCT "PCCT" /* Platform Communications Channel Table */
#define ACPI_SIG_PMTT "PMTT" /* Platform Memory Topology Table */
#define ACPI_SIG_RASF "RASF" /* RAS Feature table */
-#define ACPI_SIG_TPM2 "TPM2" /* Trusted Platform Module 2.0 H/W interface table */
+#define ACPI_SIG_STAO "STAO" /* Status Override table */
+#define ACPI_SIG_WPBT "WPBT" /* Windows Platform Binary Table */
+#define ACPI_SIG_XENV "XENV" /* Xen Environment table */
#define ACPI_SIG_S3PT "S3PT" /* S3 Performance (sub)Table */
#define ACPI_SIG_PCCS "PCC" /* PCC Shared Memory Region */
@@ -77,7 +80,6 @@
#define ACPI_SIG_MATR "MATR" /* Memory Address Translation Table */
#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */
-#define ACPI_SIG_WPBT "WPBT" /* Windows Platform Binary Table */
/*
* All tables must be byte-packed to match the ACPI specification, since
@@ -117,6 +119,8 @@ struct acpi_table_bgrt {
/*******************************************************************************
*
* DRTM - Dynamic Root of Trust for Measurement table
+ * Conforms to "TCG D-RTM Architecture" June 17 2013, Version 1.0.0
+ * Table version 1
*
******************************************************************************/
@@ -133,22 +137,40 @@ struct acpi_table_drtm {
u32 flags;
};
-/* 1) Validated Tables List */
+/* Flag Definitions for above */
+
+#define ACPI_DRTM_ACCESS_ALLOWED (1)
+#define ACPI_DRTM_ENABLE_GAP_CODE (1<<1)
+#define ACPI_DRTM_INCOMPLETE_MEASUREMENTS (1<<2)
+#define ACPI_DRTM_AUTHORITY_ORDER (1<<3)
+
+/* 1) Validated Tables List (64-bit addresses) */
-struct acpi_drtm_vtl_list {
- u32 validated_table_list_count;
+struct acpi_drtm_vtable_list {
+ u32 validated_table_count;
+ u64 validated_tables[1];
};
-/* 2) Resources List */
+/* 2) Resources List (of Resource Descriptors) */
+
+/* Resource Descriptor */
+
+struct acpi_drtm_resource {
+ u8 size[7];
+ u8 type;
+ u64 address;
+};
struct acpi_drtm_resource_list {
- u32 resource_list_count;
+ u32 resource_count;
+ struct acpi_drtm_resource resources[1];
};
/* 3) Platform-specific Identifiers List */
-struct acpi_drtm_id_list {
- u32 id_list_count;
+struct acpi_drtm_dps_id {
+ u32 dps_id_length;
+ u8 dps_id[16];
};
/*******************************************************************************
@@ -685,32 +707,52 @@ enum acpi_rasf_status {
/*******************************************************************************
*
- * TPM2 - Trusted Platform Module (TPM) 2.0 Hardware Interface Table
- * Version 3
+ * STAO - Status Override Table (_STA override) - ACPI 6.0
+ * Version 1
*
- * Conforms to "TPM 2.0 Hardware Interface Table (TPM2)" 29 November 2011
+ * Conforms to "ACPI Specification for Status Override Table"
+ * 6 January 2015
*
******************************************************************************/
-struct acpi_table_tpm2 {
+struct acpi_table_stao {
struct acpi_table_header header; /* Common ACPI table header */
- u32 flags;
- u64 control_address;
- u32 start_method;
+ u8 ignore_uart;
+};
+
+/*******************************************************************************
+ *
+ * WPBT - Windows Platform Environment Table (ACPI 6.0)
+ * Version 1
+ *
+ * Conforms to "Windows Platform Binary Table (WPBT)" 29 November 2011
+ *
+ ******************************************************************************/
+
+struct acpi_table_wpbt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 handoff_size;
+ u64 handoff_address;
+ u8 layout;
+ u8 type;
+ u16 arguments_length;
};
-/* Control area structure (not part of table, pointed to by control_address) */
+/*******************************************************************************
+ *
+ * XENV - Xen Environment Table (ACPI 6.0)
+ * Version 1
+ *
+ * Conforms to "ACPI Specification for Xen Environment Table" 4 January 2015
+ *
+ ******************************************************************************/
-struct acpi_tpm2_control {
- u32 reserved;
- u32 error;
- u32 cancel;
- u32 start;
- u64 interrupt_control;
- u32 command_size;
- u64 command_address;
- u32 response_size;
- u64 response_address;
+struct acpi_table_xenv {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u64 grant_table_address;
+ u64 grant_table_size;
+ u32 event_interrupt;
+ u8 event_flags;
};
/* Reset to default packing */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index bbef173..c2a41d2 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -124,7 +124,6 @@
#ifndef ACPI_USE_SYSTEM_INTTYPES
typedef unsigned char u8;
-typedef unsigned char u8;
typedef unsigned short u16;
typedef short s16;
typedef COMPILER_DEPENDENT_UINT64 u64;
@@ -199,9 +198,29 @@ typedef int s32;
typedef s32 acpi_native_int;
typedef u32 acpi_size;
+
+#ifdef ACPI_32BIT_PHYSICAL_ADDRESS
+
+/*
+ * OSPMs can define this to shrink the size of the structures for 32-bit
+ * none PAE environment. ASL compiler may always define this to generate
+ * 32-bit OSPM compliant tables.
+ */
typedef u32 acpi_io_address;
typedef u32 acpi_physical_address;
+#else /* ACPI_32BIT_PHYSICAL_ADDRESS */
+
+/*
+ * It is reported that, after some calculations, the physical addresses can
+ * wrap over the 32-bit boundary on 32-bit PAE environment.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=87971
+ */
+typedef u64 acpi_io_address;
+typedef u64 acpi_physical_address;
+
+#endif /* ACPI_32BIT_PHYSICAL_ADDRESS */
+
#define ACPI_MAX_PTR ACPI_UINT32_MAX
#define ACPI_SIZE_MAX ACPI_UINT32_MAX
@@ -452,11 +471,6 @@ typedef u8 acpi_owner_id;
#define ACPI_INTEGER_BIT_SIZE 64
#define ACPI_MAX_DECIMAL_DIGITS 20 /* 2^64 = 18,446,744,073,709,551,616 */
-
-#if ACPI_MACHINE_WIDTH == 64
-#define ACPI_USE_NATIVE_DIVIDE /* Use compiler native 64-bit divide */
-#endif
-
#define ACPI_MAX64_DECIMAL_DIGITS 20
#define ACPI_MAX32_DECIMAL_DIGITS 10
#define ACPI_MAX16_DECIMAL_DIGITS 5
@@ -511,6 +525,7 @@ typedef u64 acpi_integer;
#define ACPI_CAST_PTR(t, p) ((t *) (acpi_uintptr_t) (p))
#define ACPI_CAST_INDIRECT_PTR(t, p) ((t **) (acpi_uintptr_t) (p))
#define ACPI_ADD_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) + (acpi_size)(b)))
+#define ACPI_SUB_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) - (acpi_size)(b)))
#define ACPI_PTR_DIFF(a, b) (acpi_size) (ACPI_CAST_PTR (u8, (a)) - ACPI_CAST_PTR (u8, (b)))
/* Pointer/Integer type conversions */
@@ -527,14 +542,14 @@ typedef u64 acpi_integer;
#define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b)))
#define ACPI_MOVE_NAME(dest,src) (*ACPI_CAST_PTR (u32, (dest)) = *ACPI_CAST_PTR (u32, (src)))
#else
-#define ACPI_COMPARE_NAME(a,b) (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE))
-#define ACPI_MOVE_NAME(dest,src) (ACPI_STRNCPY (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAME_SIZE))
+#define ACPI_COMPARE_NAME(a,b) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE))
+#define ACPI_MOVE_NAME(dest,src) (strncpy (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAME_SIZE))
#endif
/* Support for the special RSDP signature (8 characters) */
-#define ACPI_VALIDATE_RSDP_SIG(a) (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8))
-#define ACPI_MAKE_RSDP_SIG(dest) (ACPI_MEMCPY (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8))
+#define ACPI_VALIDATE_RSDP_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8))
+#define ACPI_MAKE_RSDP_SIG(dest) (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8))
/*******************************************************************************
*
@@ -553,6 +568,7 @@ typedef u64 acpi_integer;
#define ACPI_NO_ACPI_ENABLE 0x10
#define ACPI_NO_DEVICE_INIT 0x20
#define ACPI_NO_OBJECT_INIT 0x40
+#define ACPI_NO_FACS_INIT 0x80
/*
* Initialization state
@@ -713,38 +729,37 @@ typedef u32 acpi_event_type;
* The encoding of acpi_event_status is illustrated below.
* Note that a set bit (1) indicates the property is TRUE
* (e.g. if bit 0 is set then the event is enabled).
- * +-------------+-+-+-+-+
- * | Bits 31:4 |3|2|1|0|
- * +-------------+-+-+-+-+
- * | | | | |
- * | | | | +- Enabled?
- * | | | +--- Enabled for wake?
- * | | +----- Set?
- * | +------- Has a handler?
- * +------------- <Reserved>
+ * +-------------+-+-+-+-+-+
+ * | Bits 31:5 |4|3|2|1|0|
+ * +-------------+-+-+-+-+-+
+ * | | | | | |
+ * | | | | | +- Enabled?
+ * | | | | +--- Enabled for wake?
+ * | | | +----- Status bit set?
+ * | | +------- Enable bit set?
+ * | +--------- Has a handler?
+ * +--------------- <Reserved>
*/
typedef u32 acpi_event_status;
#define ACPI_EVENT_FLAG_DISABLED (acpi_event_status) 0x00
#define ACPI_EVENT_FLAG_ENABLED (acpi_event_status) 0x01
#define ACPI_EVENT_FLAG_WAKE_ENABLED (acpi_event_status) 0x02
-#define ACPI_EVENT_FLAG_SET (acpi_event_status) 0x04
-#define ACPI_EVENT_FLAG_HAS_HANDLER (acpi_event_status) 0x08
+#define ACPI_EVENT_FLAG_STATUS_SET (acpi_event_status) 0x04
+#define ACPI_EVENT_FLAG_ENABLE_SET (acpi_event_status) 0x08
+#define ACPI_EVENT_FLAG_HAS_HANDLER (acpi_event_status) 0x10
+#define ACPI_EVENT_FLAG_SET ACPI_EVENT_FLAG_STATUS_SET
/* Actions for acpi_set_gpe, acpi_gpe_wakeup, acpi_hw_low_set_gpe */
#define ACPI_GPE_ENABLE 0
#define ACPI_GPE_DISABLE 1
#define ACPI_GPE_CONDITIONAL_ENABLE 2
-#define ACPI_GPE_SAVE_MASK 4
-
-#define ACPI_GPE_ENABLE_SAVE (ACPI_GPE_ENABLE | ACPI_GPE_SAVE_MASK)
-#define ACPI_GPE_DISABLE_SAVE (ACPI_GPE_DISABLE | ACPI_GPE_SAVE_MASK)
/*
* GPE info flags - Per GPE
* +-------+-+-+---+
- * | 7:4 |3|2|1:0|
+ * | 7:5 |4|3|2:0|
* +-------+-+-+---+
* | | | |
* | | | +-- Type of dispatch:to method, handler, notify, or none
@@ -756,13 +771,15 @@ typedef u32 acpi_event_status;
#define ACPI_GPE_DISPATCH_METHOD (u8) 0x01
#define ACPI_GPE_DISPATCH_HANDLER (u8) 0x02
#define ACPI_GPE_DISPATCH_NOTIFY (u8) 0x03
-#define ACPI_GPE_DISPATCH_MASK (u8) 0x03
+#define ACPI_GPE_DISPATCH_RAW_HANDLER (u8) 0x04
+#define ACPI_GPE_DISPATCH_MASK (u8) 0x07
+#define ACPI_GPE_DISPATCH_TYPE(flags) ((u8) ((flags) & ACPI_GPE_DISPATCH_MASK))
-#define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x04
+#define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x08
#define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00
-#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x04
+#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x08
-#define ACPI_GPE_CAN_WAKE (u8) 0x08
+#define ACPI_GPE_CAN_WAKE (u8) 0x10
/*
* Flags for GPE and Lock interfaces
@@ -1124,6 +1141,10 @@ u32 (*acpi_interface_handler) (acpi_string interface_name, u32 supported);
#define ACPI_UUID_LENGTH 16
+/* Length of 3-byte PCI class code values when converted back to a string */
+
+#define ACPI_PCICLS_STRING_SIZE 7 /* Includes null terminator */
+
/* Structures used for device/processor HID, UID, CID, and SUB */
struct acpi_pnp_device_id {
@@ -1146,7 +1167,7 @@ struct acpi_device_info {
u32 name; /* ACPI object Name */
acpi_object_type type; /* ACPI object Type */
u8 param_count; /* If a method, required parameter count */
- u8 valid; /* Indicates which optional fields are valid */
+ u16 valid; /* Indicates which optional fields are valid */
u8 flags; /* Miscellaneous info */
u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */
u8 lowest_dstates[5]; /* _sx_w values: 0xFF indicates not valid */
@@ -1155,6 +1176,7 @@ struct acpi_device_info {
struct acpi_pnp_device_id hardware_id; /* _HID value */
struct acpi_pnp_device_id unique_id; /* _UID value */
struct acpi_pnp_device_id subsystem_id; /* _SUB value */
+ struct acpi_pnp_device_id class_code; /* _CLS value */
struct acpi_pnp_device_id_list compatible_id_list; /* _CID list <must be last> */
};
@@ -1164,14 +1186,15 @@ struct acpi_device_info {
/* Flags for Valid field above (acpi_get_object_info) */
-#define ACPI_VALID_STA 0x01
-#define ACPI_VALID_ADR 0x02
-#define ACPI_VALID_HID 0x04
-#define ACPI_VALID_UID 0x08
-#define ACPI_VALID_SUB 0x10
-#define ACPI_VALID_CID 0x20
-#define ACPI_VALID_SXDS 0x40
-#define ACPI_VALID_SXWS 0x80
+#define ACPI_VALID_STA 0x0001
+#define ACPI_VALID_ADR 0x0002
+#define ACPI_VALID_HID 0x0004
+#define ACPI_VALID_UID 0x0008
+#define ACPI_VALID_SUB 0x0010
+#define ACPI_VALID_CID 0x0020
+#define ACPI_VALID_CLS 0x0040
+#define ACPI_VALID_SXDS 0x0100
+#define ACPI_VALID_SXWS 0x0200
/* Flags for _STA return value (current_status above) */
@@ -1249,6 +1272,7 @@ struct acpi_memory_list {
#define ACPI_OSI_WIN_VISTA_SP2 0x0A
#define ACPI_OSI_WIN_7 0x0B
#define ACPI_OSI_WIN_8 0x0C
+#define ACPI_OSI_WIN_10 0x0D
/* Definitions of file IO */
diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h
new file mode 100644
index 0000000..80fe8cf
--- /dev/null
+++ b/include/acpi/acuuid.h
@@ -0,0 +1,89 @@
+/******************************************************************************
+ *
+ * Name: acuuid.h - ACPI-related UUID/GUID definitions
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2015, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACUUID_H__
+#define __ACUUID_H__
+
+/*
+ * Note1: UUIDs and GUIDs are defined to be identical in ACPI.
+ *
+ * Note2: This file is standalone and should remain that way.
+ */
+
+/* Controllers */
+
+#define UUID_GPIO_CONTROLLER "4f248f40-d5e2-499f-834c-27758ea1cd3f"
+#define UUID_USB_CONTROLLER "ce2ee385-00e6-48cb-9f05-2edb927c4899"
+#define UUID_SATA_CONTROLLER "e4db149b-fcfe-425b-a6d8-92357d78fc7f"
+
+/* Devices */
+
+#define UUID_PCI_HOST_BRIDGE "33db4d5b-1ff7-401c-9657-7441c03dd766"
+#define UUID_I2C_DEVICE "3cdff6f7-4267-4555-ad05-b30a3d8938de"
+#define UUID_POWER_BUTTON "dfbcf3c5-e7a5-44e6-9c1f-29c76f6e059c"
+
+/* Interfaces */
+
+#define UUID_DEVICE_LABELING "e5c937d0-3553-4d7a-9117-ea4d19c3434d"
+#define UUID_PHYSICAL_PRESENCE "3dddfaa6-361b-4eb4-a424-8d10089d1653"
+
+/* NVDIMM - NFIT table */
+
+#define UUID_VOLATILE_MEMORY "7305944f-fdda-44e3-b16c-3f22d252e5d0"
+#define UUID_PERSISTENT_MEMORY "66f0d379-b4f3-4074-ac43-0d3318b78cdb"
+#define UUID_CONTROL_REGION "92f701f6-13b4-405d-910b-299367e8234c"
+#define UUID_DATA_REGION "91af0530-5d86-470e-a6b0-0a2db9408249"
+#define UUID_VOLATILE_VIRTUAL_DISK "77ab535a-45fc-624b-5560-f7b281d1f96e"
+#define UUID_VOLATILE_VIRTUAL_CD "3d5abd30-4175-87ce-6d64-d2ade523c4bb"
+#define UUID_PERSISTENT_VIRTUAL_DISK "5cea02c9-4d07-69d3-269f-4496fbe096f9"
+#define UUID_PERSISTENT_VIRTUAL_CD "08018188-42cd-bb48-100f-5387d53ded3d"
+
+/* Miscellaneous */
+
+#define UUID_PLATFORM_CAPABILITIES "0811b06e-4a27-44f9-8d60-3cbbc22e7b48"
+#define UUID_DYNAMIC_ENUMERATION "d8c1a3a6-be9b-4c9b-91bf-c3cb81fc5daf"
+#define UUID_BATTERY_THERMAL_LIMIT "4c2067e3-887d-475c-9720-4af1d3ed602e"
+#define UUID_THERMAL_EXTENSIONS "14d399cd-7a27-4b18-8fb4-7cb7b9f4e500"
+#define UUID_DEVICE_PROPERTIES "daffd814-6eba-4d8c-8a91-bc9bbf4aa301"
+
+#endif /* __AUUID_H__ */
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 5f8cc1f..3cedd43 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -76,6 +76,7 @@
#define ACPI_LARGE_NAMESPACE_NODE
#define ACPI_DATA_TABLE_DISASSEMBLY
#define ACPI_SINGLE_THREADED
+#define ACPI_32BIT_PHYSICAL_ADDRESS
#endif
/* acpi_exec configuration. Multithreaded with full AML debugger */
@@ -174,6 +175,9 @@
#elif defined(_APPLE) || defined(__APPLE__)
#include "acmacosx.h"
+#elif defined(__DragonFly__)
+#include "acdragonfly.h"
+
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
#include "acfreebsd.h"
@@ -342,29 +346,6 @@
/* We will be linking to the standard Clib functions */
-#define ACPI_STRSTR(s1,s2) strstr((s1), (s2))
-#define ACPI_STRCHR(s1,c) strchr((s1), (c))
-#define ACPI_STRLEN(s) (acpi_size) strlen((s))
-#define ACPI_STRCPY(d,s) (void) strcpy((d), (s))
-#define ACPI_STRNCPY(d,s,n) (void) strncpy((d), (s), (acpi_size)(n))
-#define ACPI_STRNCMP(d,s,n) strncmp((d), (s), (acpi_size)(n))
-#define ACPI_STRCMP(d,s) strcmp((d), (s))
-#define ACPI_STRCAT(d,s) (void) strcat((d), (s))
-#define ACPI_STRNCAT(d,s,n) strncat((d), (s), (acpi_size)(n))
-#define ACPI_STRTOUL(d,s,n) strtoul((d), (s), (acpi_size)(n))
-#define ACPI_MEMCMP(s1,s2,n) memcmp((const char *)(s1), (const char *)(s2), (acpi_size)(n))
-#define ACPI_MEMCPY(d,s,n) (void) memcpy((d), (s), (acpi_size)(n))
-#define ACPI_MEMSET(d,s,n) (void) memset((d), (s), (acpi_size)(n))
-
-#define ACPI_TOUPPER(i) toupper((int) (i))
-#define ACPI_TOLOWER(i) tolower((int) (i))
-#define ACPI_IS_XDIGIT(i) isxdigit((int) (i))
-#define ACPI_IS_DIGIT(i) isdigit((int) (i))
-#define ACPI_IS_SPACE(i) isspace((int) (i))
-#define ACPI_IS_UPPER(i) isupper((int) (i))
-#define ACPI_IS_PRINT(i) isprint((int) (i))
-#define ACPI_IS_ALPHA(i) isalpha((int) (i))
-
#else
/******************************************************************************
@@ -402,22 +383,6 @@ typedef char *va_list;
/* Use the local (ACPICA) definitions of the clib functions */
-#define ACPI_STRSTR(s1,s2) acpi_ut_strstr ((s1), (s2))
-#define ACPI_STRCHR(s1,c) acpi_ut_strchr ((s1), (c))
-#define ACPI_STRLEN(s) (acpi_size) acpi_ut_strlen ((s))
-#define ACPI_STRCPY(d,s) (void) acpi_ut_strcpy ((d), (s))
-#define ACPI_STRNCPY(d,s,n) (void) acpi_ut_strncpy ((d), (s), (acpi_size)(n))
-#define ACPI_STRNCMP(d,s,n) acpi_ut_strncmp ((d), (s), (acpi_size)(n))
-#define ACPI_STRCMP(d,s) acpi_ut_strcmp ((d), (s))
-#define ACPI_STRCAT(d,s) (void) acpi_ut_strcat ((d), (s))
-#define ACPI_STRNCAT(d,s,n) acpi_ut_strncat ((d), (s), (acpi_size)(n))
-#define ACPI_STRTOUL(d,s,n) acpi_ut_strtoul ((d), (s), (acpi_size)(n))
-#define ACPI_MEMCMP(s1,s2,n) acpi_ut_memcmp((const char *)(s1), (const char *)(s2), (acpi_size)(n))
-#define ACPI_MEMCPY(d,s,n) (void) acpi_ut_memcpy ((d), (s), (acpi_size)(n))
-#define ACPI_MEMSET(d,v,n) (void) acpi_ut_memset ((d), (v), (acpi_size)(n))
-#define ACPI_TOUPPER(c) acpi_ut_to_upper ((int) (c))
-#define ACPI_TOLOWER(c) acpi_ut_to_lower ((int) (c))
-
#endif /* ACPI_USE_SYSTEM_CLIBRARY */
#ifndef ACPI_FILE
diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h
index 2b61238..0a7dc8e 100644
--- a/include/acpi/platform/acenvex.h
+++ b/include/acpi/platform/acenvex.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -56,6 +56,15 @@
#if defined(_LINUX) || defined(__linux__)
#include <acpi/platform/aclinuxex.h>
+#elif defined(_AED_EFI)
+#include "acefiex.h"
+
+#elif defined(_GNU_EFI)
+#include "acefiex.h"
+
+#elif defined(__DragonFly__)
+#include "acdragonflyex.h"
+
#endif
/*! [End] no source code translation !*/
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index 384875d..5457a06 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -75,4 +75,8 @@
#undef strchr
#endif
+/* GCC supports __VA_ARGS__ in macros */
+
+#define COMPILER_VA_MACRO 1
+
#endif /* __ACGCC_H__ */
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 1ba7c19..74ba46c 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h
index 568d4b8..acedc3f 100644
--- a/include/acpi/platform/aclinuxex.h
+++ b/include/acpi/platform/aclinuxex.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index b95dc32..4188a4d 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -196,7 +196,7 @@ struct acpi_processor_flags {
struct acpi_processor {
acpi_handle handle;
u32 acpi_id;
- u32 phys_id; /* CPU hardware ID such as APIC ID for x86 */
+ phys_cpuid_t phys_id; /* CPU hardware ID such as APIC ID for x86 */
u32 id; /* CPU logical ID allocated by OS */
u32 pblk;
int performance_platform_limit;
@@ -310,8 +310,8 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
#endif /* CONFIG_CPU_FREQ */
/* in processor_core.c */
-int acpi_get_phys_id(acpi_handle, int type, u32 acpi_id);
-int acpi_map_cpuid(int phys_id, u32 acpi_id);
+phys_cpuid_t acpi_get_phys_id(acpi_handle, int type, u32 acpi_id);
+int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id);
int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
/* in processor_pdc.c */
diff --git a/include/acpi/video.h b/include/acpi/video.h
index 843ef1a..e840b29 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -16,23 +16,36 @@ struct acpi_device;
#define ACPI_VIDEO_DISPLAY_LEGACY_PANEL 0x0110
#define ACPI_VIDEO_DISPLAY_LEGACY_TV 0x0200
+enum acpi_backlight_type {
+ acpi_backlight_undef = -1,
+ acpi_backlight_none = 0,
+ acpi_backlight_video,
+ acpi_backlight_vendor,
+ acpi_backlight_native,
+};
+
#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE)
extern int acpi_video_register(void);
extern void acpi_video_unregister(void);
-extern void acpi_video_unregister_backlight(void);
extern int acpi_video_get_edid(struct acpi_device *device, int type,
int device_id, void **edid);
-extern bool acpi_video_verify_backlight_support(void);
+extern enum acpi_backlight_type acpi_video_get_backlight_type(void);
+extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type);
#else
static inline int acpi_video_register(void) { return 0; }
static inline void acpi_video_unregister(void) { return; }
-static inline void acpi_video_unregister_backlight(void) { return; }
static inline int acpi_video_get_edid(struct acpi_device *device, int type,
int device_id, void **edid)
{
return -ENODEV;
}
-static inline bool acpi_video_verify_backlight_support(void) { return false; }
+static inline enum acpi_backlight_type acpi_video_get_backlight_type(void)
+{
+ return acpi_backlight_vendor;
+}
+static inline void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type)
+{
+}
#endif
#endif
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
index 77ff547..5bdab6b 100644
--- a/include/asm-generic/4level-fixup.h
+++ b/include/asm-generic/4level-fixup.h
@@ -4,6 +4,7 @@
#define __ARCH_HAS_4LEVEL_HACK
#define __PAGETABLE_PUD_FOLDED
+#define PUD_SHIFT PGDIR_SHIFT
#define PUD_SIZE PGDIR_SIZE
#define PUD_MASK PGDIR_MASK
#define PTRS_PER_PUD 1
diff --git a/include/asm-generic/asm-offsets.h b/include/asm-generic/asm-offsets.h
new file mode 100644
index 0000000..d370ee3
--- /dev/null
+++ b/include/asm-generic/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index f5c40b0..55e3abc 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -55,19 +55,45 @@
#endif
#ifdef CONFIG_SMP
+
+#ifndef smp_mb
#define smp_mb() mb()
+#endif
+
+#ifndef smp_rmb
#define smp_rmb() rmb()
+#endif
+
+#ifndef smp_wmb
#define smp_wmb() wmb()
+#endif
+
+#ifndef smp_read_barrier_depends
#define smp_read_barrier_depends() read_barrier_depends()
-#else
+#endif
+
+#else /* !CONFIG_SMP */
+
+#ifndef smp_mb
#define smp_mb() barrier()
+#endif
+
+#ifndef smp_rmb
#define smp_rmb() barrier()
+#endif
+
+#ifndef smp_wmb
#define smp_wmb() barrier()
+#endif
+
+#ifndef smp_read_barrier_depends
#define smp_read_barrier_depends() do { } while (0)
#endif
-#ifndef set_mb
-#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
+#endif /* CONFIG_SMP */
+
+#ifndef smp_store_mb
+#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
#endif
#ifndef smp_mb__before_atomic
diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h
index 811fb1e..3766ab3 100644
--- a/include/asm-generic/cmpxchg.h
+++ b/include/asm-generic/cmpxchg.h
@@ -86,9 +86,6 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
/*
* Atomic compare and exchange.
- *
- * Do not define __HAVE_ARCH_CMPXCHG because we want to use it to check whether
- * a cmpxchg primitive faster than repeated local irq save/restore exists.
*/
#include <asm-generic/cmpxchg-local.h>
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
index 3378dcf..940d5ec 100644
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -39,6 +39,10 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
debug_dma_unmap_page(dev, addr, size, dir, true);
}
+/*
+ * dma_maps_sg_attrs returns 0 on error and > 0 on success.
+ * It should never return a value < 0.
+ */
static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
@@ -51,6 +55,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
kmemcheck_mark_initialized(sg_virt(s), s->length);
BUG_ON(!valid_dma_direction(dir));
ents = ops->map_sg(dev, sg, nents, dir, attrs);
+ BUG_ON(ents < 0);
debug_dma_map_sg(dev, sg, nents, ents, dir);
return ents;
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index b59b5a5..e56272c 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -8,8 +8,7 @@
#ifndef CONFIG_SMP
/*
* The following implementation only for uniprocessor machines.
- * For UP, it's relies on the fact that pagefault_disable() also disables
- * preemption to ensure mutual exclusion.
+ * It relies on preempt_disable() ensuring mutual exclusion.
*
*/
@@ -38,6 +37,7 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
+ preempt_disable();
pagefault_disable();
ret = -EFAULT;
@@ -72,6 +72,7 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
out_pagefault_enable:
pagefault_enable();
+ preempt_enable();
if (ret == 0) {
switch (cmp) {
@@ -106,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
{
u32 val;
+ preempt_disable();
if (unlikely(get_user(val, uaddr) != 0))
return -EFAULT;
@@ -113,6 +115,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
*uval = val;
+ preempt_enable();
return 0;
}
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 383ade1..40ec143 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -5,7 +5,6 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/of.h>
-#include <linux/pinctrl/pinctrl.h>
#ifdef CONFIG_GPIOLIB
@@ -129,63 +128,11 @@ static inline int gpio_export_link(struct device *dev, const char *name,
return gpiod_export_link(dev, name, gpio_to_desc(gpio));
}
-static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
-{
- return gpiod_sysfs_set_active_low(gpio_to_desc(gpio), value);
-}
-
static inline void gpio_unexport(unsigned gpio)
{
gpiod_unexport(gpio_to_desc(gpio));
}
-#ifdef CONFIG_PINCTRL
-
-/**
- * struct gpio_pin_range - pin range controlled by a gpio chip
- * @head: list for maintaining set of pin ranges, used internally
- * @pctldev: pinctrl device which handles corresponding pins
- * @range: actual range of pins controlled by a gpio controller
- */
-
-struct gpio_pin_range {
- struct list_head node;
- struct pinctrl_dev *pctldev;
- struct pinctrl_gpio_range range;
-};
-
-int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
- unsigned int gpio_offset, unsigned int pin_offset,
- unsigned int npins);
-int gpiochip_add_pingroup_range(struct gpio_chip *chip,
- struct pinctrl_dev *pctldev,
- unsigned int gpio_offset, const char *pin_group);
-void gpiochip_remove_pin_ranges(struct gpio_chip *chip);
-
-#else
-
-static inline int
-gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
- unsigned int gpio_offset, unsigned int pin_offset,
- unsigned int npins)
-{
- return 0;
-}
-static inline int
-gpiochip_add_pingroup_range(struct gpio_chip *chip,
- struct pinctrl_dev *pctldev,
- unsigned int gpio_offset, const char *pin_group)
-{
- return 0;
-}
-
-static inline void
-gpiochip_remove_pin_ranges(struct gpio_chip *chip)
-{
-}
-
-#endif /* CONFIG_PINCTRL */
-
#else /* !CONFIG_GPIOLIB */
static inline bool gpio_is_valid(int number)
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 9db0423..f56094c 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -769,6 +769,14 @@ static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
}
#endif
+#ifndef ioremap_uc
+#define ioremap_uc ioremap_uc
+static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
+{
+ return ioremap_nocache(offset, size);
+}
+#endif
+
#ifndef ioremap_wc
#define ioremap_wc ioremap_wc
static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
@@ -777,8 +785,17 @@ static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
}
#endif
+#ifndef ioremap_wt
+#define ioremap_wt ioremap_wt
+static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
+{
+ return ioremap_nocache(offset, size);
+}
+#endif
+
#ifndef iounmap
#define iounmap iounmap
+
static inline void iounmap(void __iomem *addr)
{
}
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index 1b41011..d8f8622 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -66,6 +66,10 @@ extern void ioport_unmap(void __iomem *);
#define ioremap_wc ioremap_nocache
#endif
+#ifndef ARCH_HAS_IOREMAP_WT
+#define ioremap_wt ioremap_nocache
+#endif
+
#ifdef CONFIG_PCI
/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */
struct pci_dev;
diff --git a/include/asm-generic/mm-arch-hooks.h b/include/asm-generic/mm-arch-hooks.h
new file mode 100644
index 0000000..5ff0e51
--- /dev/null
+++ b/include/asm-generic/mm-arch-hooks.h
@@ -0,0 +1,16 @@
+/*
+ * Architecture specific mm hooks
+ */
+
+#ifndef _ASM_GENERIC_MM_ARCH_HOOKS_H
+#define _ASM_GENERIC_MM_ARCH_HOOKS_H
+
+/*
+ * This file should be included through arch/../include/asm/Kbuild for
+ * the architecture which doesn't need specific mm hooks.
+ *
+ * In that case, the generic hooks defined in include/linux/mm-arch-hooks.h
+ * are used.
+ */
+
+#endif /* _ASM_GENERIC_MM_ARCH_HOOKS_H */
diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h
index e80a049..f24bc51 100644
--- a/include/asm-generic/pci.h
+++ b/include/asm-generic/pci.h
@@ -6,19 +6,6 @@
#ifndef _ASM_GENERIC_PCI_H
#define _ASM_GENERIC_PCI_H
-static inline struct resource *
-pcibios_select_root(struct pci_dev *pdev, struct resource *res)
-{
- struct resource *root = NULL;
-
- if (res->flags & IORESOURCE_IO)
- root = &ioport_resource;
- if (res->flags & IORESOURCE_MEM)
- root = &iomem_resource;
-
- return root;
-}
-
#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h
index ce37349..7389c87 100644
--- a/include/asm-generic/pci_iomap.h
+++ b/include/asm-generic/pci_iomap.h
@@ -15,6 +15,9 @@ struct pci_dev;
#ifdef CONFIG_PCI
/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
+ unsigned long offset,
+ unsigned long maxlen);
/* Create a virtual mapping cookie for a port on a given PCI device.
* Do not call this directly, it exists to make it easier for architectures
* to override */
@@ -30,6 +33,13 @@ static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned lon
{
return NULL;
}
+
+static inline void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
+ unsigned long offset,
+ unsigned long maxlen)
+{
+ return NULL;
+}
#endif
#endif /* __ASM_GENERIC_IO_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 177d597..29c57b2 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -6,6 +6,12 @@
#include <linux/mm_types.h>
#include <linux/bug.h>
+#include <linux/errno.h>
+
+#if 4 - defined(__PAGETABLE_PUD_FOLDED) - defined(__PAGETABLE_PMD_FOLDED) != \
+ CONFIG_PGTABLE_LEVELS
+#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{PUD,PMD}_FOLDED
+#endif
/*
* On almost all architectures and configurations, 0 can be used as the
@@ -90,11 +96,11 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
}
#endif
-#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR
+#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
- unsigned long address,
- pmd_t *pmdp)
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pmd_t *pmdp)
{
pmd_t pmd = *pmdp;
pmd_clear(pmdp);
@@ -103,13 +109,13 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
-#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
+#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
+static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp,
int full)
{
- return pmdp_get_and_clear(mm, address, pmdp);
+ return pmdp_huge_get_and_clear(mm, address, pmdp);
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
@@ -146,8 +152,8 @@ extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
pte_t *ptep);
#endif
-#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
-extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
+#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
+extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
unsigned long address,
pmd_t *pmdp);
#endif
@@ -183,6 +189,22 @@ extern void pmdp_splitting_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
#endif
+#ifndef pmdp_collapse_flush
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+#else
+static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ BUILD_BUG();
+ return *pmdp;
+}
+#define pmdp_collapse_flush pmdp_collapse_flush
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable);
@@ -244,10 +266,6 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
# define pte_accessible(mm, pte) ((void)(pte), 1)
#endif
-#ifndef pte_present_nonuma
-#define pte_present_nonuma(pte) pte_present(pte)
-#endif
-
#ifndef flush_tlb_fix_spurious_fault
#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
#endif
@@ -260,6 +278,10 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
#define pgprot_writecombine pgprot_noncached
#endif
+#ifndef pgprot_writethrough
+#define pgprot_writethrough pgprot_noncached
+#endif
+
#ifndef pgprot_device
#define pgprot_device pgprot_noncached
#endif
@@ -474,21 +496,6 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
{
return pte;
}
-
-static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
-{
- return pte;
-}
-
-static inline pte_t pte_file_mksoft_dirty(pte_t pte)
-{
- return pte;
-}
-
-static inline int pte_file_soft_dirty(pte_t pte)
-{
- return 0;
-}
#endif
#ifndef __HAVE_PFNMAP_TRACKING
@@ -688,158 +695,51 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
#endif
}
-#ifdef CONFIG_NUMA_BALANCING
-/*
- * _PAGE_NUMA distinguishes between an unmapped page table entry, an entry that
- * is protected for PROT_NONE and a NUMA hinting fault entry. If the
- * architecture defines __PAGE_PROTNONE then it should take that into account
- * but those that do not can rely on the fact that the NUMA hinting scanner
- * skips inaccessible VMAs.
- *
- * pte/pmd_present() returns true if pte/pmd_numa returns true. Page
- * fault triggers on those regions if pte/pmd_numa returns true
- * (because _PAGE_PRESENT is not set).
- */
-#ifndef pte_numa
-static inline int pte_numa(pte_t pte)
-{
- return ptenuma_flags(pte) == _PAGE_NUMA;
-}
-#endif
-
-#ifndef pmd_numa
-static inline int pmd_numa(pmd_t pmd)
-{
- return pmdnuma_flags(pmd) == _PAGE_NUMA;
-}
-#endif
-
+#ifndef CONFIG_NUMA_BALANCING
/*
- * pte/pmd_mknuma sets the _PAGE_ACCESSED bitflag automatically
- * because they're called by the NUMA hinting minor page fault. If we
- * wouldn't set the _PAGE_ACCESSED bitflag here, the TLB miss handler
- * would be forced to set it later while filling the TLB after we
- * return to userland. That would trigger a second write to memory
- * that we optimize away by setting _PAGE_ACCESSED here.
+ * Technically a PTE can be PROTNONE even when not doing NUMA balancing but
+ * the only case the kernel cares is for NUMA balancing and is only ever set
+ * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked
+ * _PAGE_PROTNONE so by by default, implement the helper as "always no". It
+ * is the responsibility of the caller to distinguish between PROT_NONE
+ * protections and NUMA hinting fault protections.
*/
-#ifndef pte_mknonnuma
-static inline pte_t pte_mknonnuma(pte_t pte)
-{
- pteval_t val = pte_val(pte);
-
- val &= ~_PAGE_NUMA;
- val |= (_PAGE_PRESENT|_PAGE_ACCESSED);
- return __pte(val);
-}
-#endif
-
-#ifndef pmd_mknonnuma
-static inline pmd_t pmd_mknonnuma(pmd_t pmd)
-{
- pmdval_t val = pmd_val(pmd);
-
- val &= ~_PAGE_NUMA;
- val |= (_PAGE_PRESENT|_PAGE_ACCESSED);
-
- return __pmd(val);
-}
-#endif
-
-#ifndef pte_mknuma
-static inline pte_t pte_mknuma(pte_t pte)
-{
- pteval_t val = pte_val(pte);
-
- VM_BUG_ON(!(val & _PAGE_PRESENT));
-
- val &= ~_PAGE_PRESENT;
- val |= _PAGE_NUMA;
-
- return __pte(val);
-}
-#endif
-
-#ifndef ptep_set_numa
-static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
-{
- pte_t ptent = *ptep;
-
- ptent = pte_mknuma(ptent);
- set_pte_at(mm, addr, ptep, ptent);
- return;
-}
-#endif
-
-#ifndef pmd_mknuma
-static inline pmd_t pmd_mknuma(pmd_t pmd)
-{
- pmdval_t val = pmd_val(pmd);
-
- val &= ~_PAGE_PRESENT;
- val |= _PAGE_NUMA;
-
- return __pmd(val);
-}
-#endif
-
-#ifndef pmdp_set_numa
-static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmdp)
-{
- pmd_t pmd = *pmdp;
-
- pmd = pmd_mknuma(pmd);
- set_pmd_at(mm, addr, pmdp, pmd);
- return;
-}
-#endif
-#else
-static inline int pmd_numa(pmd_t pmd)
+static inline int pte_protnone(pte_t pte)
{
return 0;
}
-static inline int pte_numa(pte_t pte)
+static inline int pmd_protnone(pmd_t pmd)
{
return 0;
}
+#endif /* CONFIG_NUMA_BALANCING */
-static inline pte_t pte_mknonnuma(pte_t pte)
-{
- return pte;
-}
+#endif /* CONFIG_MMU */
-static inline pmd_t pmd_mknonnuma(pmd_t pmd)
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
+int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
+int pud_clear_huge(pud_t *pud);
+int pmd_clear_huge(pmd_t *pmd);
+#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
+static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
{
- return pmd;
+ return 0;
}
-
-static inline pte_t pte_mknuma(pte_t pte)
+static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
{
- return pte;
-}
-
-static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
-{
- return;
+ return 0;
}
-
-
-static inline pmd_t pmd_mknuma(pmd_t pmd)
+static inline int pud_clear_huge(pud_t *pud)
{
- return pmd;
+ return 0;
}
-
-static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmdp)
+static inline int pmd_clear_huge(pmd_t *pmd)
{
- return ;
+ return 0;
}
-#endif /* CONFIG_NUMA_BALANCING */
-
-#endif /* CONFIG_MMU */
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index eb6f9e6..d0a7a47 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -79,11 +79,8 @@ static __always_inline bool should_resched(void)
#ifdef CONFIG_PREEMPT
extern asmlinkage void preempt_schedule(void);
#define __preempt_schedule() preempt_schedule()
-
-#ifdef CONFIG_CONTEXT_TRACKING
-extern asmlinkage void preempt_schedule_context(void);
-#define __preempt_schedule_context() preempt_schedule_context()
-#endif
+extern asmlinkage void preempt_schedule_notrace(void);
+#define __preempt_schedule_notrace() preempt_schedule_notrace()
#endif /* CONFIG_PREEMPT */
#endif /* __ASM_PREEMPT_H */
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
new file mode 100644
index 0000000..83bfb87
--- /dev/null
+++ b/include/asm-generic/qspinlock.h
@@ -0,0 +1,139 @@
+/*
+ * Queued spinlock
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
+ *
+ * Authors: Waiman Long <waiman.long@hp.com>
+ */
+#ifndef __ASM_GENERIC_QSPINLOCK_H
+#define __ASM_GENERIC_QSPINLOCK_H
+
+#include <asm-generic/qspinlock_types.h>
+
+/**
+ * queued_spin_is_locked - is the spinlock locked?
+ * @lock: Pointer to queued spinlock structure
+ * Return: 1 if it is locked, 0 otherwise
+ */
+static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
+{
+ return atomic_read(&lock->val);
+}
+
+/**
+ * queued_spin_value_unlocked - is the spinlock structure unlocked?
+ * @lock: queued spinlock structure
+ * Return: 1 if it is unlocked, 0 otherwise
+ *
+ * N.B. Whenever there are tasks waiting for the lock, it is considered
+ * locked wrt the lockref code to avoid lock stealing by the lockref
+ * code and change things underneath the lock. This also allows some
+ * optimizations to be applied without conflict with lockref.
+ */
+static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
+{
+ return !atomic_read(&lock.val);
+}
+
+/**
+ * queued_spin_is_contended - check if the lock is contended
+ * @lock : Pointer to queued spinlock structure
+ * Return: 1 if lock contended, 0 otherwise
+ */
+static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
+{
+ return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
+}
+/**
+ * queued_spin_trylock - try to acquire the queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ * Return: 1 if lock acquired, 0 if failed
+ */
+static __always_inline int queued_spin_trylock(struct qspinlock *lock)
+{
+ if (!atomic_read(&lock->val) &&
+ (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) == 0))
+ return 1;
+ return 0;
+}
+
+extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+
+/**
+ * queued_spin_lock - acquire a queued spinlock
+ * @lock: Pointer to queued spinlock structure
+ */
+static __always_inline void queued_spin_lock(struct qspinlock *lock)
+{
+ u32 val;
+
+ val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
+ if (likely(val == 0))
+ return;
+ queued_spin_lock_slowpath(lock, val);
+}
+
+#ifndef queued_spin_unlock
+/**
+ * queued_spin_unlock - release a queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ */
+static __always_inline void queued_spin_unlock(struct qspinlock *lock)
+{
+ /*
+ * smp_mb__before_atomic() in order to guarantee release semantics
+ */
+ smp_mb__before_atomic_dec();
+ atomic_sub(_Q_LOCKED_VAL, &lock->val);
+}
+#endif
+
+/**
+ * queued_spin_unlock_wait - wait until current lock holder releases the lock
+ * @lock : Pointer to queued spinlock structure
+ *
+ * There is a very slight possibility of live-lock if the lockers keep coming
+ * and the waiter is just unfortunate enough to not see any unlock state.
+ */
+static inline void queued_spin_unlock_wait(struct qspinlock *lock)
+{
+ while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
+ cpu_relax();
+}
+
+#ifndef virt_queued_spin_lock
+static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock)
+{
+ return false;
+}
+#endif
+
+/*
+ * Initializier
+ */
+#define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) }
+
+/*
+ * Remapping spinlock architecture specific functions to the corresponding
+ * queued spinlock functions.
+ */
+#define arch_spin_is_locked(l) queued_spin_is_locked(l)
+#define arch_spin_is_contended(l) queued_spin_is_contended(l)
+#define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
+#define arch_spin_lock(l) queued_spin_lock(l)
+#define arch_spin_trylock(l) queued_spin_trylock(l)
+#define arch_spin_unlock(l) queued_spin_unlock(l)
+#define arch_spin_lock_flags(l, f) queued_spin_lock(l)
+#define arch_spin_unlock_wait(l) queued_spin_unlock_wait(l)
+
+#endif /* __ASM_GENERIC_QSPINLOCK_H */
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
new file mode 100644
index 0000000..85f888e
--- /dev/null
+++ b/include/asm-generic/qspinlock_types.h
@@ -0,0 +1,79 @@
+/*
+ * Queued spinlock
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
+ *
+ * Authors: Waiman Long <waiman.long@hp.com>
+ */
+#ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H
+#define __ASM_GENERIC_QSPINLOCK_TYPES_H
+
+/*
+ * Including atomic.h with PARAVIRT on will cause compilation errors because
+ * of recursive header file incluson via paravirt_types.h. So don't include
+ * it if PARAVIRT is on.
+ */
+#ifndef CONFIG_PARAVIRT
+#include <linux/types.h>
+#include <linux/atomic.h>
+#endif
+
+typedef struct qspinlock {
+ atomic_t val;
+} arch_spinlock_t;
+
+/*
+ * Bitfields in the atomic value:
+ *
+ * When NR_CPUS < 16K
+ * 0- 7: locked byte
+ * 8: pending
+ * 9-15: not used
+ * 16-17: tail index
+ * 18-31: tail cpu (+1)
+ *
+ * When NR_CPUS >= 16K
+ * 0- 7: locked byte
+ * 8: pending
+ * 9-10: tail index
+ * 11-31: tail cpu (+1)
+ */
+#define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\
+ << _Q_ ## type ## _OFFSET)
+#define _Q_LOCKED_OFFSET 0
+#define _Q_LOCKED_BITS 8
+#define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED)
+
+#define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
+#if CONFIG_NR_CPUS < (1U << 14)
+#define _Q_PENDING_BITS 8
+#else
+#define _Q_PENDING_BITS 1
+#endif
+#define _Q_PENDING_MASK _Q_SET_MASK(PENDING)
+
+#define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS)
+#define _Q_TAIL_IDX_BITS 2
+#define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX)
+
+#define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS)
+#define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET)
+#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU)
+
+#define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET
+#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
+
+#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
+#define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET)
+
+#endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */
diff --git a/include/asm-generic/scatterlist.h b/include/asm-generic/scatterlist.h
deleted file mode 100644
index 5de0735..0000000
--- a/include/asm-generic/scatterlist.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef __ASM_GENERIC_SCATTERLIST_H
-#define __ASM_GENERIC_SCATTERLIST_H
-
-#include <linux/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
- unsigned long page_link;
- unsigned int offset;
- unsigned int length;
- dma_addr_t dma_address;
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
- unsigned int dma_length;
-#endif
-};
-
-/*
- * These macros should be used after a dma_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns, or alternatively stop on the first sg_dma_len(sg) which
- * is 0.
- */
-#define sg_dma_address(sg) ((sg)->dma_address)
-
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
-#define sg_dma_len(sg) ((sg)->dma_length)
-#else
-#define sg_dma_len(sg) ((sg)->length)
-#endif
-
-#endif /* __ASM_GENERIC_SCATTERLIST_H */
diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h
index 9fa1f65..c9ccafa 100644
--- a/include/asm-generic/seccomp.h
+++ b/include/asm-generic/seccomp.h
@@ -17,7 +17,9 @@
#define __NR_seccomp_read_32 __NR_read
#define __NR_seccomp_write_32 __NR_write
#define __NR_seccomp_exit_32 __NR_exit
+#ifndef __NR_seccomp_sigreturn_32
#define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn
+#endif
#endif /* CONFIG_COMPAT && ! already defined */
#define __NR_seccomp_read __NR_read
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index bee5d68..8bd374d 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -124,7 +124,10 @@
#define FTRACE_EVENTS() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_ftrace_events) = .; \
*(_ftrace_events) \
- VMLINUX_SYMBOL(__stop_ftrace_events) = .;
+ VMLINUX_SYMBOL(__stop_ftrace_events) = .; \
+ VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .; \
+ *(_ftrace_enum_map) \
+ VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .;
#else
#define FTRACE_EVENTS()
#endif
@@ -150,6 +153,14 @@
#define TRACE_SYSCALLS()
#endif
+#ifdef CONFIG_SERIAL_EARLYCON
+#define EARLYCON_TABLE() STRUCT_ALIGN(); \
+ VMLINUX_SYMBOL(__earlycon_table) = .; \
+ *(__earlycon_table) \
+ *(__earlycon_table_end)
+#else
+#define EARLYCON_TABLE()
+#endif
#define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
#define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name)
@@ -167,6 +178,7 @@
#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu)
#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
+#define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
#define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon)
#define KERNEL_DTB() \
@@ -401,7 +413,7 @@
#define TEXT_TEXT \
ALIGN_FUNCTION(); \
*(.text.hot) \
- *(.text) \
+ *(.text .text.fixup) \
*(.ref.text) \
MEM_KEEP(init.text) \
MEM_KEEP(exit.text) \
@@ -478,6 +490,7 @@
#define KERNEL_CTORS() . = ALIGN(8); \
VMLINUX_SYMBOL(__ctors_start) = .; \
*(.ctors) \
+ *(SORT(.init_array.*)) \
*(.init_array) \
VMLINUX_SYMBOL(__ctors_end) = .;
#else
@@ -500,8 +513,10 @@
CLKSRC_OF_TABLES() \
IOMMU_OF_TABLES() \
CPU_METHOD_OF_TABLES() \
+ CPUIDLE_METHOD_OF_TABLES() \
KERNEL_DTB() \
IRQCHIP_OF_MATCH_TABLE() \
+ EARLYCON_TABLE() \
EARLYCON_OF_TABLES()
#define INIT_TEXT \
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
index 6d26b40..9916d0e 100644
--- a/include/clocksource/arm_arch_timer.h
+++ b/include/clocksource/arm_arch_timer.h
@@ -16,7 +16,7 @@
#ifndef __CLKSOURCE_ARM_ARCH_TIMER_H
#define __CLKSOURCE_ARM_ARCH_TIMER_H
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
#include <linux/types.h>
#define ARCH_TIMER_CTRL_ENABLE (1 << 0)
diff --git a/include/clocksource/timer-sp804.h b/include/clocksource/timer-sp804.h
new file mode 100644
index 0000000..1f8a1ca
--- /dev/null
+++ b/include/clocksource/timer-sp804.h
@@ -0,0 +1,28 @@
+#ifndef __CLKSOURCE_TIMER_SP804_H
+#define __CLKSOURCE_TIMER_SP804_H
+
+struct clk;
+
+void __sp804_clocksource_and_sched_clock_init(void __iomem *,
+ const char *, struct clk *, int);
+void __sp804_clockevents_init(void __iomem *, unsigned int,
+ struct clk *, const char *);
+void sp804_timer_disable(void __iomem *);
+
+static inline void sp804_clocksource_init(void __iomem *base, const char *name)
+{
+ __sp804_clocksource_and_sched_clock_init(base, name, NULL, 0);
+}
+
+static inline void sp804_clocksource_and_sched_clock_init(void __iomem *base,
+ const char *name)
+{
+ __sp804_clocksource_and_sched_clock_init(base, name, NULL, 1);
+}
+
+static inline void sp804_clockevents_init(void __iomem *base, unsigned int irq, const char *name)
+{
+ __sp804_clockevents_init(base, irq, NULL, name);
+
+}
+#endif
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 94b19be..7169ad0 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -18,6 +18,65 @@
#include <linux/slab.h>
/**
+ * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
+ *
+ * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD
+ * (listed as type "aead" in /proc/crypto)
+ *
+ * The most prominent examples for this type of encryption is GCM and CCM.
+ * However, the kernel supports other types of AEAD ciphers which are defined
+ * with the following cipher string:
+ *
+ * authenc(keyed message digest, block cipher)
+ *
+ * For example: authenc(hmac(sha256), cbc(aes))
+ *
+ * The example code provided for the asynchronous block cipher operation
+ * applies here as well. Naturally all *ablkcipher* symbols must be exchanged
+ * the *aead* pendants discussed in the following. In addition, for the AEAD
+ * operation, the aead_request_set_assoc function must be used to set the
+ * pointer to the associated data memory location before performing the
+ * encryption or decryption operation. In case of an encryption, the associated
+ * data memory is filled during the encryption operation. For decryption, the
+ * associated data memory must contain data that is used to verify the integrity
+ * of the decrypted data. Another deviation from the asynchronous block cipher
+ * operation is that the caller should explicitly check for -EBADMSG of the
+ * crypto_aead_decrypt. That error indicates an authentication error, i.e.
+ * a breach in the integrity of the message. In essence, that -EBADMSG error
+ * code is the key bonus an AEAD cipher has over "standard" block chaining
+ * modes.
+ */
+
+/**
+ * struct aead_request - AEAD request
+ * @base: Common attributes for async crypto requests
+ * @old: Boolean whether the old or new AEAD API is used
+ * @assoclen: Length in bytes of associated data for authentication
+ * @cryptlen: Length of data to be encrypted or decrypted
+ * @iv: Initialisation vector
+ * @assoc: Associated data
+ * @src: Source data
+ * @dst: Destination data
+ * @__ctx: Start of private context data
+ */
+struct aead_request {
+ struct crypto_async_request base;
+
+ bool old;
+
+ unsigned int assoclen;
+ unsigned int cryptlen;
+
+ u8 *iv;
+
+ struct scatterlist *assoc;
+ struct scatterlist *src;
+ struct scatterlist *dst;
+
+ void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+/**
* struct aead_givcrypt_request - AEAD request with IV generation
* @seq: Sequence number for IV generation
* @giv: Space for generated IV
@@ -30,6 +89,474 @@ struct aead_givcrypt_request {
struct aead_request areq;
};
+/**
+ * struct aead_alg - AEAD cipher definition
+ * @maxauthsize: Set the maximum authentication tag size supported by the
+ * transformation. A transformation may support smaller tag sizes.
+ * As the authentication tag is a message digest to ensure the
+ * integrity of the encrypted data, a consumer typically wants the
+ * largest authentication tag possible as defined by this
+ * variable.
+ * @setauthsize: Set authentication size for the AEAD transformation. This
+ * function is used to specify the consumer requested size of the
+ * authentication tag to be either generated by the transformation
+ * during encryption or the size of the authentication tag to be
+ * supplied during the decryption operation. This function is also
+ * responsible for checking the authentication tag size for
+ * validity.
+ * @setkey: see struct ablkcipher_alg
+ * @encrypt: see struct ablkcipher_alg
+ * @decrypt: see struct ablkcipher_alg
+ * @geniv: see struct ablkcipher_alg
+ * @ivsize: see struct ablkcipher_alg
+ * @init: Initialize the cryptographic transformation object. This function
+ * is used to initialize the cryptographic transformation object.
+ * This function is called only once at the instantiation time, right
+ * after the transformation context was allocated. In case the
+ * cryptographic hardware has some special requirements which need to
+ * be handled by software, this function shall check for the precise
+ * requirement of the transformation and put any software fallbacks
+ * in place.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @init, used to remove various changes set in
+ * @init.
+ *
+ * All fields except @ivsize is mandatory and must be filled.
+ */
+struct aead_alg {
+ int (*setkey)(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen);
+ int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
+ int (*encrypt)(struct aead_request *req);
+ int (*decrypt)(struct aead_request *req);
+ int (*init)(struct crypto_aead *tfm);
+ void (*exit)(struct crypto_aead *tfm);
+
+ const char *geniv;
+
+ unsigned int ivsize;
+ unsigned int maxauthsize;
+
+ struct crypto_alg base;
+};
+
+struct crypto_aead {
+ int (*setkey)(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen);
+ int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
+ int (*encrypt)(struct aead_request *req);
+ int (*decrypt)(struct aead_request *req);
+ int (*givencrypt)(struct aead_givcrypt_request *req);
+ int (*givdecrypt)(struct aead_givcrypt_request *req);
+
+ struct crypto_aead *child;
+
+ unsigned int authsize;
+ unsigned int reqsize;
+
+ struct crypto_tfm base;
+};
+
+static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_aead, base);
+}
+
+/**
+ * crypto_alloc_aead() - allocate AEAD cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * AEAD cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an AEAD. The returned struct
+ * crypto_aead is the cipher handle that is required for any subsequent
+ * API invocation for that AEAD.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
+
+static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
+{
+ return &tfm->base;
+}
+
+/**
+ * crypto_free_aead() - zeroize and free aead handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_aead(struct crypto_aead *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm));
+}
+
+static inline struct crypto_aead *crypto_aead_crt(struct crypto_aead *tfm)
+{
+ return tfm;
+}
+
+static inline struct old_aead_alg *crypto_old_aead_alg(struct crypto_aead *tfm)
+{
+ return &crypto_aead_tfm(tfm)->__crt_alg->cra_aead;
+}
+
+static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
+{
+ return container_of(crypto_aead_tfm(tfm)->__crt_alg,
+ struct aead_alg, base);
+}
+
+static inline unsigned int crypto_aead_alg_ivsize(struct aead_alg *alg)
+{
+ return alg->base.cra_aead.encrypt ? alg->base.cra_aead.ivsize :
+ alg->ivsize;
+}
+
+/**
+ * crypto_aead_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the aead referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
+static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
+{
+ return crypto_aead_alg_ivsize(crypto_aead_alg(tfm));
+}
+
+/**
+ * crypto_aead_authsize() - obtain maximum authentication data size
+ * @tfm: cipher handle
+ *
+ * The maximum size of the authentication data for the AEAD cipher referenced
+ * by the AEAD cipher handle is returned. The authentication data size may be
+ * zero if the cipher implements a hard-coded maximum.
+ *
+ * The authentication data may also be known as "tag value".
+ *
+ * Return: authentication data size / tag size in bytes
+ */
+static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
+{
+ return tfm->authsize;
+}
+
+/**
+ * crypto_aead_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the AEAD referenced with the cipher handle is returned.
+ * The caller may use that information to allocate appropriate memory for the
+ * data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
+{
+ return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
+}
+
+static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm)
+{
+ return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm));
+}
+
+static inline u32 crypto_aead_get_flags(struct crypto_aead *tfm)
+{
+ return crypto_tfm_get_flags(crypto_aead_tfm(tfm));
+}
+
+static inline void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags)
+{
+ crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags);
+}
+
+static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
+{
+ crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
+}
+
+/**
+ * crypto_aead_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the AEAD referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+int crypto_aead_setkey(struct crypto_aead *tfm,
+ const u8 *key, unsigned int keylen);
+
+/**
+ * crypto_aead_setauthsize() - set authentication data size
+ * @tfm: cipher handle
+ * @authsize: size of the authentication data / tag in bytes
+ *
+ * Set the authentication data size / tag size. AEAD requires an authentication
+ * tag (or MAC) in addition to the associated data.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
+
+static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
+{
+ return __crypto_aead_cast(req->base.tfm);
+}
+
+/**
+ * crypto_aead_encrypt() - encrypt plaintext
+ * @req: reference to the aead_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Encrypt plaintext data using the aead_request handle. That data structure
+ * and how it is filled with data is discussed with the aead_request_*
+ * functions.
+ *
+ * IMPORTANT NOTE The encryption operation creates the authentication data /
+ * tag. That data is concatenated with the created ciphertext.
+ * The ciphertext memory size is therefore the given number of
+ * block cipher blocks + the size defined by the
+ * crypto_aead_setauthsize invocation. The caller must ensure
+ * that sufficient memory is available for the ciphertext and
+ * the authentication tag.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+static inline int crypto_aead_encrypt(struct aead_request *req)
+{
+ return crypto_aead_reqtfm(req)->encrypt(req);
+}
+
+/**
+ * crypto_aead_decrypt() - decrypt ciphertext
+ * @req: reference to the ablkcipher_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Decrypt ciphertext data using the aead_request handle. That data structure
+ * and how it is filled with data is discussed with the aead_request_*
+ * functions.
+ *
+ * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the
+ * authentication data / tag. That authentication data / tag
+ * must have the size defined by the crypto_aead_setauthsize
+ * invocation.
+ *
+ *
+ * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD
+ * cipher operation performs the authentication of the data during the
+ * decryption operation. Therefore, the function returns this error if
+ * the authentication of the ciphertext was unsuccessful (i.e. the
+ * integrity of the ciphertext or the associated data was violated);
+ * < 0 if an error occurred.
+ */
+static inline int crypto_aead_decrypt(struct aead_request *req)
+{
+ if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req)))
+ return -EINVAL;
+
+ return crypto_aead_reqtfm(req)->decrypt(req);
+}
+
+/**
+ * DOC: Asynchronous AEAD Request Handle
+ *
+ * The aead_request data structure contains all pointers to data required for
+ * the AEAD cipher operation. This includes the cipher handle (which can be
+ * used by multiple aead_request instances), pointer to plaintext and
+ * ciphertext, asynchronous callback function, etc. It acts as a handle to the
+ * aead_request_* API calls in a similar way as AEAD handle to the
+ * crypto_aead_* API calls.
+ */
+
+/**
+ * crypto_aead_reqsize() - obtain size of the request data structure
+ * @tfm: cipher handle
+ *
+ * Return: number of bytes
+ */
+unsigned int crypto_aead_reqsize(struct crypto_aead *tfm);
+
+/**
+ * aead_request_set_tfm() - update cipher handle reference in request
+ * @req: request handle to be modified
+ * @tfm: cipher handle that shall be added to the request handle
+ *
+ * Allow the caller to replace the existing aead handle in the request
+ * data structure with a different one.
+ */
+static inline void aead_request_set_tfm(struct aead_request *req,
+ struct crypto_aead *tfm)
+{
+ req->base.tfm = crypto_aead_tfm(tfm->child);
+}
+
+/**
+ * aead_request_alloc() - allocate request data structure
+ * @tfm: cipher handle to be registered with the request
+ * @gfp: memory allocation flag that is handed to kmalloc by the API call.
+ *
+ * Allocate the request data structure that must be used with the AEAD
+ * encrypt and decrypt API calls. During the allocation, the provided aead
+ * handle is registered in the request data structure.
+ *
+ * Return: allocated request handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
+ gfp_t gfp)
+{
+ struct aead_request *req;
+
+ req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp);
+
+ if (likely(req))
+ aead_request_set_tfm(req, tfm);
+
+ return req;
+}
+
+/**
+ * aead_request_free() - zeroize and free request data structure
+ * @req: request data structure cipher handle to be freed
+ */
+static inline void aead_request_free(struct aead_request *req)
+{
+ kzfree(req);
+}
+
+/**
+ * aead_request_set_callback() - set asynchronous callback function
+ * @req: request handle
+ * @flags: specify zero or an ORing of the flags
+ * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ * increase the wait queue beyond the initial maximum size;
+ * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
+ * @compl: callback function pointer to be registered with the request handle
+ * @data: The data pointer refers to memory that is not used by the kernel
+ * crypto API, but provided to the callback function for it to use. Here,
+ * the caller can provide a reference to memory the callback function can
+ * operate on. As the callback function is invoked asynchronously to the
+ * related functionality, it may need to access data structures of the
+ * related functionality which can be referenced using this pointer. The
+ * callback function can access the memory via the "data" field in the
+ * crypto_async_request data structure provided to the callback function.
+ *
+ * Setting the callback function that is triggered once the cipher operation
+ * completes
+ *
+ * The callback function is registered with the aead_request handle and
+ * must comply with the following template
+ *
+ * void callback_function(struct crypto_async_request *req, int error)
+ */
+static inline void aead_request_set_callback(struct aead_request *req,
+ u32 flags,
+ crypto_completion_t compl,
+ void *data)
+{
+ req->base.complete = compl;
+ req->base.data = data;
+ req->base.flags = flags;
+}
+
+/**
+ * aead_request_set_crypt - set data buffers
+ * @req: request handle
+ * @src: source scatter / gather list
+ * @dst: destination scatter / gather list
+ * @cryptlen: number of bytes to process from @src
+ * @iv: IV for the cipher operation which must comply with the IV size defined
+ * by crypto_aead_ivsize()
+ *
+ * Setting the source data and destination data scatter / gather lists which
+ * hold the associated data concatenated with the plaintext or ciphertext. See
+ * below for the authentication tag.
+ *
+ * For encryption, the source is treated as the plaintext and the
+ * destination is the ciphertext. For a decryption operation, the use is
+ * reversed - the source is the ciphertext and the destination is the plaintext.
+ *
+ * For both src/dst the layout is associated data, plain/cipher text,
+ * authentication tag.
+ *
+ * The content of the AD in the destination buffer after processing
+ * will either be untouched, or it will contain a copy of the AD
+ * from the source buffer. In order to ensure that it always has
+ * a copy of the AD, the user must copy the AD over either before
+ * or after processing. Of course this is not relevant if the user
+ * is doing in-place processing where src == dst.
+ *
+ * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption,
+ * the caller must concatenate the ciphertext followed by the
+ * authentication tag and provide the entire data stream to the
+ * decryption operation (i.e. the data length used for the
+ * initialization of the scatterlist and the data length for the
+ * decryption operation is identical). For encryption, however,
+ * the authentication tag is created while encrypting the data.
+ * The destination buffer must hold sufficient space for the
+ * ciphertext and the authentication tag while the encryption
+ * invocation must only point to the plaintext data size. The
+ * following code snippet illustrates the memory usage
+ * buffer = kmalloc(ptbuflen + (enc ? authsize : 0));
+ * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0));
+ * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv);
+ */
+static inline void aead_request_set_crypt(struct aead_request *req,
+ struct scatterlist *src,
+ struct scatterlist *dst,
+ unsigned int cryptlen, u8 *iv)
+{
+ req->src = src;
+ req->dst = dst;
+ req->cryptlen = cryptlen;
+ req->iv = iv;
+}
+
+/**
+ * aead_request_set_assoc() - set the associated data scatter / gather list
+ * @req: request handle
+ * @assoc: associated data scatter / gather list
+ * @assoclen: number of bytes to process from @assoc
+ *
+ * Obsolete, do not use.
+ */
+static inline void aead_request_set_assoc(struct aead_request *req,
+ struct scatterlist *assoc,
+ unsigned int assoclen)
+{
+ req->assoc = assoc;
+ req->assoclen = assoclen;
+ req->old = true;
+}
+
+/**
+ * aead_request_set_ad - set associated data information
+ * @req: request handle
+ * @assoclen: number of bytes in associated data
+ *
+ * Setting the AD information. This function sets the length of
+ * the associated data.
+ */
+static inline void aead_request_set_ad(struct aead_request *req,
+ unsigned int assoclen)
+{
+ req->assoclen = assoclen;
+ req->old = false;
+}
+
static inline struct crypto_aead *aead_givcrypt_reqtfm(
struct aead_givcrypt_request *req)
{
@@ -38,14 +565,12 @@ static inline struct crypto_aead *aead_givcrypt_reqtfm(
static inline int crypto_aead_givencrypt(struct aead_givcrypt_request *req)
{
- struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req));
- return crt->givencrypt(req);
+ return aead_givcrypt_reqtfm(req)->givencrypt(req);
};
static inline int crypto_aead_givdecrypt(struct aead_givcrypt_request *req)
{
- struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req));
- return crt->givdecrypt(req);
+ return aead_givcrypt_reqtfm(req)->givdecrypt(req);
};
static inline void aead_givcrypt_set_tfm(struct aead_givcrypt_request *req,
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
new file mode 100644
index 0000000..69d163e
--- /dev/null
+++ b/include/crypto/akcipher.h
@@ -0,0 +1,340 @@
+/*
+ * Public Key Encryption
+ *
+ * Copyright (c) 2015, Intel Corporation
+ * Authors: Tadeusz Struk <tadeusz.struk@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_AKCIPHER_H
+#define _CRYPTO_AKCIPHER_H
+#include <linux/crypto.h>
+
+/**
+ * struct akcipher_request - public key request
+ *
+ * @base: Common attributes for async crypto requests
+ * @src: Pointer to memory containing the input parameters
+ * The format of the parameter(s) is expeted to be Octet String
+ * @dst: Pointer to memory whare the result will be stored
+ * @src_len: Size of the input parameter
+ * @dst_len: Size of the output buffer. It needs to be at leaset
+ * as big as the expected result depending on the operation
+ * After operation it will be updated with the acctual size of the
+ * result. In case of error, where the dst_len was insufficient,
+ * it will be updated to the size required for the operation.
+ * @__ctx: Start of private context data
+ */
+struct akcipher_request {
+ struct crypto_async_request base;
+ void *src;
+ void *dst;
+ unsigned int src_len;
+ unsigned int dst_len;
+ void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+/**
+ * struct crypto_akcipher - user-instantiated objects which encapsulate
+ * algorithms and core processing logic
+ *
+ * @base: Common crypto API algorithm data structure
+ */
+struct crypto_akcipher {
+ struct crypto_tfm base;
+};
+
+/**
+ * struct akcipher_alg - generic public key algorithm
+ *
+ * @sign: Function performs a sign operation as defined by public key
+ * algorithm. In case of error, where the dst_len was insufficient,
+ * the req->dst_len will be updated to the size required for the
+ * operation
+ * @verify: Function performs a sign operation as defined by public key
+ * algorithm. In case of error, where the dst_len was insufficient,
+ * the req->dst_len will be updated to the size required for the
+ * operation
+ * @encrypt: Function performs an encrytp operation as defined by public key
+ * algorithm. In case of error, where the dst_len was insufficient,
+ * the req->dst_len will be updated to the size required for the
+ * operation
+ * @decrypt: Function performs a decrypt operation as defined by public key
+ * algorithm. In case of error, where the dst_len was insufficient,
+ * the req->dst_len will be updated to the size required for the
+ * operation
+ * @setkey: Function invokes the algorithm specific set key function, which
+ * knows how to decode and interpret the BER encoded key
+ * @init: Initialize the cryptographic transformation object.
+ * This function is used to initialize the cryptographic
+ * transformation object. This function is called only once at
+ * the instantiation time, right after the transformation context
+ * was allocated. In case the cryptographic hardware has some
+ * special requirements which need to be handled by software, this
+ * function shall check for the precise requirement of the
+ * transformation and put any software fallbacks in place.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @init, used to remove various changes set in
+ * @init.
+ *
+ * @reqsize: Request context size required by algorithm implementation
+ * @base: Common crypto API algorithm data structure
+ */
+struct akcipher_alg {
+ int (*sign)(struct akcipher_request *req);
+ int (*verify)(struct akcipher_request *req);
+ int (*encrypt)(struct akcipher_request *req);
+ int (*decrypt)(struct akcipher_request *req);
+ int (*setkey)(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen);
+ int (*init)(struct crypto_akcipher *tfm);
+ void (*exit)(struct crypto_akcipher *tfm);
+
+ unsigned int reqsize;
+ struct crypto_alg base;
+};
+
+/**
+ * DOC: Generic Public Key API
+ *
+ * The Public Key API is used with the algorithms of type
+ * CRYPTO_ALG_TYPE_AKCIPHER (listed as type "akcipher" in /proc/crypto)
+ */
+
+/**
+ * crypto_alloc_akcipher() -- allocate AKCIPHER tfm handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * public key algorithm e.g. "rsa"
+ * @type: specifies the type of the algorithm
+ * @mask: specifies the mask for the algorithm
+ *
+ * Allocate a handle for public key algorithm. The returned struct
+ * crypto_akcipher is the handle that is required for any subsequent
+ * API invocation for the public key operations.
+ *
+ * Return: allocated handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type,
+ u32 mask);
+
+static inline struct crypto_tfm *crypto_akcipher_tfm(
+ struct crypto_akcipher *tfm)
+{
+ return &tfm->base;
+}
+
+static inline struct akcipher_alg *__crypto_akcipher_alg(struct crypto_alg *alg)
+{
+ return container_of(alg, struct akcipher_alg, base);
+}
+
+static inline struct crypto_akcipher *__crypto_akcipher_tfm(
+ struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_akcipher, base);
+}
+
+static inline struct akcipher_alg *crypto_akcipher_alg(
+ struct crypto_akcipher *tfm)
+{
+ return __crypto_akcipher_alg(crypto_akcipher_tfm(tfm)->__crt_alg);
+}
+
+static inline unsigned int crypto_akcipher_reqsize(struct crypto_akcipher *tfm)
+{
+ return crypto_akcipher_alg(tfm)->reqsize;
+}
+
+static inline void akcipher_request_set_tfm(struct akcipher_request *req,
+ struct crypto_akcipher *tfm)
+{
+ req->base.tfm = crypto_akcipher_tfm(tfm);
+}
+
+static inline struct crypto_akcipher *crypto_akcipher_reqtfm(
+ struct akcipher_request *req)
+{
+ return __crypto_akcipher_tfm(req->base.tfm);
+}
+
+/**
+ * crypto_free_akcipher() -- free AKCIPHER tfm handle
+ *
+ * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ */
+static inline void crypto_free_akcipher(struct crypto_akcipher *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_akcipher_tfm(tfm));
+}
+
+/**
+ * akcipher_request_alloc() -- allocates public key request
+ *
+ * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ * @gfp: allocation flags
+ *
+ * Return: allocated handle in case of success or NULL in case of an error.
+ */
+static inline struct akcipher_request *akcipher_request_alloc(
+ struct crypto_akcipher *tfm, gfp_t gfp)
+{
+ struct akcipher_request *req;
+
+ req = kmalloc(sizeof(*req) + crypto_akcipher_reqsize(tfm), gfp);
+ if (likely(req))
+ akcipher_request_set_tfm(req, tfm);
+
+ return req;
+}
+
+/**
+ * akcipher_request_free() -- zeroize and free public key request
+ *
+ * @req: request to free
+ */
+static inline void akcipher_request_free(struct akcipher_request *req)
+{
+ kzfree(req);
+}
+
+/**
+ * akcipher_request_set_callback() -- Sets an asynchronous callback.
+ *
+ * Callback will be called when an asynchronous operation on a given
+ * request is finished.
+ *
+ * @req: request that the callback will be set for
+ * @flgs: specify for instance if the operation may backlog
+ * @cmlp: callback which will be called
+ * @data: private data used by the caller
+ */
+static inline void akcipher_request_set_callback(struct akcipher_request *req,
+ u32 flgs,
+ crypto_completion_t cmpl,
+ void *data)
+{
+ req->base.complete = cmpl;
+ req->base.data = data;
+ req->base.flags = flgs;
+}
+
+/**
+ * akcipher_request_set_crypt() -- Sets reqest parameters
+ *
+ * Sets parameters required by crypto operation
+ *
+ * @req: public key request
+ * @src: ptr to input parameter
+ * @dst: ptr of output parameter
+ * @src_len: size of the input buffer
+ * @dst_len: size of the output buffer. It will be updated by the
+ * implementation to reflect the acctual size of the result
+ */
+static inline void akcipher_request_set_crypt(struct akcipher_request *req,
+ void *src, void *dst,
+ unsigned int src_len,
+ unsigned int dst_len)
+{
+ req->src = src;
+ req->dst = dst;
+ req->src_len = src_len;
+ req->dst_len = dst_len;
+}
+
+/**
+ * crypto_akcipher_encrypt() -- Invoke public key encrypt operation
+ *
+ * Function invokes the specific public key encrypt operation for a given
+ * public key algorithm
+ *
+ * @req: asymmetric key request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+
+ return alg->encrypt(req);
+}
+
+/**
+ * crypto_akcipher_decrypt() -- Invoke public key decrypt operation
+ *
+ * Function invokes the specific public key decrypt operation for a given
+ * public key algorithm
+ *
+ * @req: asymmetric key request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+
+ return alg->decrypt(req);
+}
+
+/**
+ * crypto_akcipher_sign() -- Invoke public key sign operation
+ *
+ * Function invokes the specific public key sign operation for a given
+ * public key algorithm
+ *
+ * @req: asymmetric key request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_akcipher_sign(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+
+ return alg->sign(req);
+}
+
+/**
+ * crypto_akcipher_verify() -- Invoke public key verify operation
+ *
+ * Function invokes the specific public key verify operation for a given
+ * public key algorithm
+ *
+ * @req: asymmetric key request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_akcipher_verify(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+
+ return alg->verify(req);
+}
+
+/**
+ * crypto_akcipher_setkey() -- Invoke public key setkey operation
+ *
+ * Function invokes the algorithm specific set key function, which knows
+ * how to decode and interpret the encoded key
+ *
+ * @tfm: tfm handle
+ * @key: BER encoded private or public key
+ * @keylen: length of the key
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_akcipher_setkey(struct crypto_akcipher *tfm, void *key,
+ unsigned int keylen)
+{
+ struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+
+ return alg->setkey(tfm, key, keylen);
+}
+#endif
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 623a59c..d4ebf6e 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/skbuff.h>
+struct crypto_aead;
struct module;
struct rtattr;
struct seq_file;
@@ -126,7 +127,6 @@ struct ablkcipher_walk {
};
extern const struct crypto_type crypto_ablkcipher_type;
-extern const struct crypto_type crypto_aead_type;
extern const struct crypto_type crypto_blkcipher_type;
void crypto_mod_put(struct crypto_alg *alg);
@@ -137,13 +137,15 @@ struct crypto_template *crypto_lookup_template(const char *name);
int crypto_register_instance(struct crypto_template *tmpl,
struct crypto_instance *inst);
-int crypto_unregister_instance(struct crypto_alg *alg);
+int crypto_unregister_instance(struct crypto_instance *inst);
int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
struct crypto_instance *inst, u32 mask);
int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
struct crypto_instance *inst,
const struct crypto_type *frontend);
+int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name,
+ u32 type, u32 mask);
void crypto_drop_spawn(struct crypto_spawn *spawn);
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
@@ -239,22 +241,6 @@ static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm)
return crypto_tfm_ctx_aligned(&tfm->base);
}
-static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
-{
- return &crypto_aead_tfm(tfm)->__crt_alg->cra_aead;
-}
-
-static inline void *crypto_aead_ctx(struct crypto_aead *tfm)
-{
- return crypto_tfm_ctx(&tfm->base);
-}
-
-static inline struct crypto_instance *crypto_aead_alg_instance(
- struct crypto_aead *aead)
-{
- return crypto_tfm_alg_instance(&aead->base);
-}
-
static inline struct crypto_blkcipher *crypto_spawn_blkcipher(
struct crypto_spawn *spawn)
{
@@ -363,21 +349,6 @@ static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue,
return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm));
}
-static inline void *aead_request_ctx(struct aead_request *req)
-{
- return req->__ctx;
-}
-
-static inline void aead_request_complete(struct aead_request *req, int err)
-{
- req->base.complete(&req->base, err);
-}
-
-static inline u32 aead_request_flags(struct aead_request *req)
-{
- return req->base.flags;
-}
-
static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
u32 type, u32 mask)
{
diff --git a/include/crypto/compress.h b/include/crypto/compress.h
index 86163ef..5b67af8 100644
--- a/include/crypto/compress.h
+++ b/include/crypto/compress.h
@@ -55,14 +55,14 @@ struct crypto_pcomp {
};
struct pcomp_alg {
- int (*compress_setup)(struct crypto_pcomp *tfm, void *params,
+ int (*compress_setup)(struct crypto_pcomp *tfm, const void *params,
unsigned int len);
int (*compress_init)(struct crypto_pcomp *tfm);
int (*compress_update)(struct crypto_pcomp *tfm,
struct comp_request *req);
int (*compress_final)(struct crypto_pcomp *tfm,
struct comp_request *req);
- int (*decompress_setup)(struct crypto_pcomp *tfm, void *params,
+ int (*decompress_setup)(struct crypto_pcomp *tfm, const void *params,
unsigned int len);
int (*decompress_init)(struct crypto_pcomp *tfm);
int (*decompress_update)(struct crypto_pcomp *tfm,
@@ -97,7 +97,7 @@ static inline struct pcomp_alg *crypto_pcomp_alg(struct crypto_pcomp *tfm)
}
static inline int crypto_compress_setup(struct crypto_pcomp *tfm,
- void *params, unsigned int len)
+ const void *params, unsigned int len)
{
return crypto_pcomp_alg(tfm)->compress_setup(tfm, params, len);
}
@@ -120,7 +120,7 @@ static inline int crypto_compress_final(struct crypto_pcomp *tfm,
}
static inline int crypto_decompress_setup(struct crypto_pcomp *tfm,
- void *params, unsigned int len)
+ const void *params, unsigned int len)
{
return crypto_pcomp_alg(tfm)->decompress_setup(tfm, params, len);
}
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
index ba98918..1547f54 100644
--- a/include/crypto/cryptd.h
+++ b/include/crypto/cryptd.h
@@ -14,6 +14,7 @@
#include <linux/crypto.h>
#include <linux/kernel.h>
+#include <crypto/aead.h>
#include <crypto/hash.h>
struct cryptd_ablkcipher {
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index 5186f75..9756c70 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -49,8 +49,9 @@
#include <crypto/internal/rng.h>
#include <crypto/rng.h>
#include <linux/fips.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/list.h>
+#include <linux/workqueue.h>
/*
* Concatenation Helper and string operation helper
@@ -104,12 +105,13 @@ struct drbg_test_data {
};
struct drbg_state {
- spinlock_t drbg_lock; /* lock around DRBG */
+ struct mutex drbg_mutex; /* lock around DRBG */
unsigned char *V; /* internal state 10.1.1.1 1a) */
/* hash: static value 10.1.1.1 1b) hmac / ctr: key */
unsigned char *C;
/* Number of RNG requests since last reseed -- 10.1.1.1 1c) */
size_t reseed_ctr;
+ size_t reseed_threshold;
/* some memory the DRBG can use for its operation */
unsigned char *scratchpad;
void *priv_data; /* Cipher handle */
@@ -119,9 +121,12 @@ struct drbg_state {
bool fips_primed; /* Continuous test primed? */
unsigned char *prev; /* FIPS 140-2 continuous test value */
#endif
+ struct work_struct seed_work; /* asynchronous seeding support */
+ struct crypto_rng *jent;
const struct drbg_state_ops *d_ops;
const struct drbg_core *core;
- struct drbg_test_data *test_data;
+ struct drbg_string test_data;
+ struct random_ready_callback random_ready;
};
static inline __u8 drbg_statelen(struct drbg_state *drbg)
@@ -177,19 +182,8 @@ static inline size_t drbg_max_requests(struct drbg_state *drbg)
}
/*
- * kernel crypto API input data structure for DRBG generate in case dlen
- * is set to 0
- */
-struct drbg_gen {
- unsigned char *outbuf; /* output buffer for random numbers */
- unsigned int outlen; /* size of output buffer */
- struct drbg_string *addtl; /* additional information string */
- struct drbg_test_data *test_data; /* test data */
-};
-
-/*
* This is a wrapper to the kernel crypto API function of
- * crypto_rng_get_bytes() to allow the caller to provide additional data.
+ * crypto_rng_generate() to allow the caller to provide additional data.
*
* @drng DRBG handle -- see crypto_rng_get_bytes
* @outbuf output buffer -- see crypto_rng_get_bytes
@@ -204,21 +198,15 @@ static inline int crypto_drbg_get_bytes_addtl(struct crypto_rng *drng,
unsigned char *outbuf, unsigned int outlen,
struct drbg_string *addtl)
{
- int ret;
- struct drbg_gen genbuf;
- genbuf.outbuf = outbuf;
- genbuf.outlen = outlen;
- genbuf.addtl = addtl;
- genbuf.test_data = NULL;
- ret = crypto_rng_get_bytes(drng, (u8 *)&genbuf, 0);
- return ret;
+ return crypto_rng_generate(drng, addtl->buf, addtl->len,
+ outbuf, outlen);
}
/*
* TEST code
*
* This is a wrapper to the kernel crypto API function of
- * crypto_rng_get_bytes() to allow the caller to provide additional data and
+ * crypto_rng_generate() to allow the caller to provide additional data and
* allow furnishing of test_data
*
* @drng DRBG handle -- see crypto_rng_get_bytes
@@ -236,14 +224,10 @@ static inline int crypto_drbg_get_bytes_addtl_test(struct crypto_rng *drng,
struct drbg_string *addtl,
struct drbg_test_data *test_data)
{
- int ret;
- struct drbg_gen genbuf;
- genbuf.outbuf = outbuf;
- genbuf.outlen = outlen;
- genbuf.addtl = addtl;
- genbuf.test_data = test_data;
- ret = crypto_rng_get_bytes(drng, (u8 *)&genbuf, 0);
- return ret;
+ crypto_rng_set_entropy(drng, test_data->testentropy->buf,
+ test_data->testentropy->len);
+ return crypto_rng_generate(drng, addtl->buf, addtl->len,
+ outbuf, outlen);
}
/*
@@ -264,14 +248,9 @@ static inline int crypto_drbg_reset_test(struct crypto_rng *drng,
struct drbg_string *pers,
struct drbg_test_data *test_data)
{
- int ret;
- struct drbg_gen genbuf;
- genbuf.outbuf = NULL;
- genbuf.outlen = 0;
- genbuf.addtl = pers;
- genbuf.test_data = test_data;
- ret = crypto_rng_reset(drng, (u8 *)&genbuf, 0);
- return ret;
+ crypto_rng_set_entropy(drng, test_data->testentropy->buf,
+ test_data->testentropy->len);
+ return crypto_rng_reset(drng, pers->buf, pers->len);
}
/* DRBG type flags */
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 98abda9..57c8a6e 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -66,7 +66,7 @@ struct ahash_request {
/**
* struct ahash_alg - asynchronous message digest definition
* @init: Initialize the transformation context. Intended only to initialize the
- * state of the HASH transformation at the begining. This shall fill in
+ * state of the HASH transformation at the beginning. This shall fill in
* the internal structures used during the entire duration of the whole
* transformation. No data processing happens at this point.
* @update: Push a chunk of data into the driver for transformation. This
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index cd62bf4..018afb2 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -50,6 +50,7 @@ struct af_alg_type {
void (*release)(void *private);
int (*setkey)(void *private, const u8 *key, unsigned int keylen);
int (*accept)(void *private, struct sock *sk);
+ int (*setauthsize)(void *private, unsigned int authsize);
struct proto_ops *ops;
struct module *owner;
@@ -57,8 +58,9 @@ struct af_alg_type {
};
struct af_alg_sgl {
- struct scatterlist sg[ALG_MAX_PAGES];
+ struct scatterlist sg[ALG_MAX_PAGES + 1];
struct page *pages[ALG_MAX_PAGES];
+ unsigned int npages;
};
int af_alg_register_type(const struct af_alg_type *type);
@@ -67,9 +69,9 @@ int af_alg_unregister_type(const struct af_alg_type *type);
int af_alg_release(struct socket *sock);
int af_alg_accept(struct sock *sk, struct socket *newsock);
-int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
- int write);
+int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
void af_alg_free_sg(struct af_alg_sgl *sgl);
+void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new);
int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con);
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
index 2eba340..4b25471 100644
--- a/include/crypto/internal/aead.h
+++ b/include/crypto/internal/aead.h
@@ -15,16 +15,75 @@
#include <crypto/aead.h>
#include <crypto/algapi.h>
+#include <linux/stddef.h>
#include <linux/types.h>
struct rtattr;
+struct aead_instance {
+ union {
+ struct {
+ char head[offsetof(struct aead_alg, base)];
+ struct crypto_instance base;
+ } s;
+ struct aead_alg alg;
+ };
+};
+
struct crypto_aead_spawn {
struct crypto_spawn base;
};
+extern const struct crypto_type crypto_aead_type;
extern const struct crypto_type crypto_nivaead_type;
+static inline void *crypto_aead_ctx(struct crypto_aead *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline struct crypto_instance *crypto_aead_alg_instance(
+ struct crypto_aead *aead)
+{
+ return crypto_tfm_alg_instance(&aead->base);
+}
+
+static inline struct crypto_instance *aead_crypto_instance(
+ struct aead_instance *inst)
+{
+ return container_of(&inst->alg.base, struct crypto_instance, alg);
+}
+
+static inline struct aead_instance *aead_instance(struct crypto_instance *inst)
+{
+ return container_of(&inst->alg, struct aead_instance, alg.base);
+}
+
+static inline struct aead_instance *aead_alg_instance(struct crypto_aead *aead)
+{
+ return aead_instance(crypto_aead_alg_instance(aead));
+}
+
+static inline void *aead_instance_ctx(struct aead_instance *inst)
+{
+ return crypto_instance_ctx(aead_crypto_instance(inst));
+}
+
+static inline void *aead_request_ctx(struct aead_request *req)
+{
+ return req->__ctx;
+}
+
+static inline void aead_request_complete(struct aead_request *req, int err)
+{
+ req->base.complete(&req->base, err);
+}
+
+static inline u32 aead_request_flags(struct aead_request *req)
+{
+ return req->base.flags;
+}
+
static inline void crypto_set_aead_spawn(
struct crypto_aead_spawn *spawn, struct crypto_instance *inst)
{
@@ -47,24 +106,27 @@ static inline struct crypto_alg *crypto_aead_spawn_alg(
return spawn->base.alg;
}
+static inline struct aead_alg *crypto_spawn_aead_alg(
+ struct crypto_aead_spawn *spawn)
+{
+ return container_of(spawn->base.alg, struct aead_alg, base);
+}
+
static inline struct crypto_aead *crypto_spawn_aead(
struct crypto_aead_spawn *spawn)
{
- return __crypto_aead_cast(
- crypto_spawn_tfm(&spawn->base, CRYPTO_ALG_TYPE_AEAD,
- CRYPTO_ALG_TYPE_MASK));
+ return crypto_spawn_tfm2(&spawn->base);
}
-struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl,
- struct rtattr **tb, u32 type,
- u32 mask);
-void aead_geniv_free(struct crypto_instance *inst);
+struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
+ struct rtattr **tb, u32 type, u32 mask);
+void aead_geniv_free(struct aead_instance *inst);
int aead_geniv_init(struct crypto_tfm *tfm);
void aead_geniv_exit(struct crypto_tfm *tfm);
static inline struct crypto_aead *aead_geniv_base(struct crypto_aead *geniv)
{
- return crypto_aead_crt(geniv)->base;
+ return geniv->child;
}
static inline void *aead_givcrypt_reqctx(struct aead_givcrypt_request *req)
@@ -78,5 +140,29 @@ static inline void aead_givcrypt_complete(struct aead_givcrypt_request *req,
aead_request_complete(&req->areq, err);
}
+static inline void crypto_aead_set_reqsize(struct crypto_aead *aead,
+ unsigned int reqsize)
+{
+ crypto_aead_crt(aead)->reqsize = reqsize;
+}
+
+static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
+{
+ return alg->base.cra_aead.encrypt ? alg->base.cra_aead.maxauthsize :
+ alg->maxauthsize;
+}
+
+static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
+{
+ return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
+}
+
+int crypto_register_aead(struct aead_alg *alg);
+void crypto_unregister_aead(struct aead_alg *alg);
+int crypto_register_aeads(struct aead_alg *algs, int count);
+void crypto_unregister_aeads(struct aead_alg *algs, int count);
+int aead_register_instance(struct crypto_template *tmpl,
+ struct aead_instance *inst);
+
#endif /* _CRYPTO_INTERNAL_AEAD_H */
diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h
new file mode 100644
index 0000000..9a2bda1
--- /dev/null
+++ b/include/crypto/internal/akcipher.h
@@ -0,0 +1,60 @@
+/*
+ * Public Key Encryption
+ *
+ * Copyright (c) 2015, Intel Corporation
+ * Authors: Tadeusz Struk <tadeusz.struk@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_AKCIPHER_INT_H
+#define _CRYPTO_AKCIPHER_INT_H
+#include <crypto/akcipher.h>
+
+/*
+ * Transform internal helpers.
+ */
+static inline void *akcipher_request_ctx(struct akcipher_request *req)
+{
+ return req->__ctx;
+}
+
+static inline void *akcipher_tfm_ctx(struct crypto_akcipher *tfm)
+{
+ return tfm->base.__crt_ctx;
+}
+
+static inline void akcipher_request_complete(struct akcipher_request *req,
+ int err)
+{
+ req->base.complete(&req->base, err);
+}
+
+static inline const char *akcipher_alg_name(struct crypto_akcipher *tfm)
+{
+ return crypto_akcipher_tfm(tfm)->__crt_alg->cra_name;
+}
+
+/**
+ * crypto_register_akcipher() -- Register public key algorithm
+ *
+ * Function registers an implementation of a public key verify algorithm
+ *
+ * @alg: algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_register_akcipher(struct akcipher_alg *alg);
+
+/**
+ * crypto_unregister_akcipher() -- Unregister public key algorithm
+ *
+ * Function unregisters an implementation of a public key verify algorithm
+ *
+ * @alg: algorithm definition
+ */
+void crypto_unregister_akcipher(struct akcipher_alg *alg);
+#endif
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
new file mode 100644
index 0000000..9ca9b87
--- /dev/null
+++ b/include/crypto/internal/geniv.h
@@ -0,0 +1,24 @@
+/*
+ * geniv: IV generation
+ *
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_INTERNAL_GENIV_H
+#define _CRYPTO_INTERNAL_GENIV_H
+
+#include <crypto/internal/aead.h>
+#include <linux/spinlock.h>
+
+struct aead_geniv_ctx {
+ spinlock_t lock;
+ struct crypto_aead *child;
+};
+
+#endif /* _CRYPTO_INTERNAL_GENIV_H */
diff --git a/include/crypto/internal/rng.h b/include/crypto/internal/rng.h
index 8969733..a52ef34 100644
--- a/include/crypto/internal/rng.h
+++ b/include/crypto/internal/rng.h
@@ -2,6 +2,7 @@
* RNG: Random Number Generator algorithms under the crypto API
*
* Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com>
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -16,11 +17,29 @@
#include <crypto/algapi.h>
#include <crypto/rng.h>
-extern const struct crypto_type crypto_rng_type;
+int crypto_register_rng(struct rng_alg *alg);
+void crypto_unregister_rng(struct rng_alg *alg);
+int crypto_register_rngs(struct rng_alg *algs, int count);
+void crypto_unregister_rngs(struct rng_alg *algs, int count);
+
+#if defined(CONFIG_CRYPTO_RNG) || defined(CONFIG_CRYPTO_RNG_MODULE)
+int crypto_del_default_rng(void);
+#else
+static inline int crypto_del_default_rng(void)
+{
+ return 0;
+}
+#endif
static inline void *crypto_rng_ctx(struct crypto_rng *tfm)
{
return crypto_tfm_ctx(&tfm->base);
}
+static inline void crypto_rng_set_entropy(struct crypto_rng *tfm,
+ const u8 *data, unsigned int len)
+{
+ crypto_rng_alg(tfm)->set_ent(tfm, data, len);
+}
+
#endif
diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h
new file mode 100644
index 0000000..a8c8636
--- /dev/null
+++ b/include/crypto/internal/rsa.h
@@ -0,0 +1,27 @@
+/*
+ * RSA internal helpers
+ *
+ * Copyright (c) 2015, Intel Corporation
+ * Authors: Tadeusz Struk <tadeusz.struk@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _RSA_HELPER_
+#define _RSA_HELPER_
+#include <linux/mpi.h>
+
+struct rsa_key {
+ MPI n;
+ MPI e;
+ MPI d;
+};
+
+int rsa_parse_key(struct rsa_key *rsa_key, const void *key,
+ unsigned int key_len);
+
+void rsa_free_key(struct rsa_key *rsa_key);
+#endif
diff --git a/include/crypto/md5.h b/include/crypto/md5.h
index 65f299b..146af82 100644
--- a/include/crypto/md5.h
+++ b/include/crypto/md5.h
@@ -8,6 +8,11 @@
#define MD5_BLOCK_WORDS 16
#define MD5_HASH_WORDS 4
+#define MD5_H0 0x67452301UL
+#define MD5_H1 0xefcdab89UL
+#define MD5_H2 0x98badcfeUL
+#define MD5_H3 0x10325476UL
+
struct md5_state {
u32 hash[MD5_HASH_WORDS];
u32 block[MD5_BLOCK_WORDS];
diff --git a/include/crypto/null.h b/include/crypto/null.h
index b7c864c..06dc30d 100644
--- a/include/crypto/null.h
+++ b/include/crypto/null.h
@@ -8,4 +8,7 @@
#define NULL_DIGEST_SIZE 0
#define NULL_IV_SIZE 0
+struct crypto_blkcipher *crypto_get_default_null_skcipher(void);
+void crypto_put_default_null_skcipher(void);
+
#endif
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index a16fb10..b95ede3 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -2,6 +2,7 @@
* RNG: Random Number Generator algorithms under the crypto API
*
* Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com>
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -15,6 +16,50 @@
#include <linux/crypto.h>
+struct crypto_rng;
+
+/**
+ * struct rng_alg - random number generator definition
+ *
+ * @generate: The function defined by this variable obtains a
+ * random number. The random number generator transform
+ * must generate the random number out of the context
+ * provided with this call, plus any additional data
+ * if provided to the call.
+ * @seed: Seed or reseed the random number generator. With the
+ * invocation of this function call, the random number
+ * generator shall become ready for generation. If the
+ * random number generator requires a seed for setting
+ * up a new state, the seed must be provided by the
+ * consumer while invoking this function. The required
+ * size of the seed is defined with @seedsize .
+ * @set_ent: Set entropy that would otherwise be obtained from
+ * entropy source. Internal use only.
+ * @seedsize: The seed size required for a random number generator
+ * initialization defined with this variable. Some
+ * random number generators does not require a seed
+ * as the seeding is implemented internally without
+ * the need of support by the consumer. In this case,
+ * the seed size is set to zero.
+ * @base: Common crypto API algorithm data structure.
+ */
+struct rng_alg {
+ int (*generate)(struct crypto_rng *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int dlen);
+ int (*seed)(struct crypto_rng *tfm, const u8 *seed, unsigned int slen);
+ void (*set_ent)(struct crypto_rng *tfm, const u8 *data,
+ unsigned int len);
+
+ unsigned int seedsize;
+
+ struct crypto_alg base;
+};
+
+struct crypto_rng {
+ struct crypto_tfm base;
+};
+
extern struct crypto_rng *crypto_default_rng;
int crypto_get_default_rng(void);
@@ -27,11 +72,6 @@ void crypto_put_default_rng(void);
* CRYPTO_ALG_TYPE_RNG (listed as type "rng" in /proc/crypto)
*/
-static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm)
-{
- return (struct crypto_rng *)tfm;
-}
-
/**
* crypto_alloc_rng() -- allocate RNG handle
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
@@ -52,15 +92,7 @@ static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm)
* Return: allocated cipher handle in case of success; IS_ERR() is true in case
* of an error, PTR_ERR() returns the error code.
*/
-static inline struct crypto_rng *crypto_alloc_rng(const char *alg_name,
- u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_RNG;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return __crypto_rng_cast(crypto_alloc_base(alg_name, type, mask));
-}
+struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask);
static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm)
{
@@ -77,12 +109,8 @@ static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm)
*/
static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
{
- return &crypto_rng_tfm(tfm)->__crt_alg->cra_rng;
-}
-
-static inline struct rng_tfm *crypto_rng_crt(struct crypto_rng *tfm)
-{
- return &crypto_rng_tfm(tfm)->crt_rng;
+ return container_of(crypto_rng_tfm(tfm)->__crt_alg,
+ struct rng_alg, base);
}
/**
@@ -91,7 +119,28 @@ static inline struct rng_tfm *crypto_rng_crt(struct crypto_rng *tfm)
*/
static inline void crypto_free_rng(struct crypto_rng *tfm)
{
- crypto_free_tfm(crypto_rng_tfm(tfm));
+ crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm));
+}
+
+/**
+ * crypto_rng_generate() - get random number
+ * @tfm: cipher handle
+ * @src: Input buffer holding additional data, may be NULL
+ * @slen: Length of additional data
+ * @dst: output buffer holding the random numbers
+ * @dlen: length of the output buffer
+ *
+ * This function fills the caller-allocated buffer with random
+ * numbers using the random number generator referenced by the
+ * cipher handle.
+ *
+ * Return: 0 function was successful; < 0 if an error occurred
+ */
+static inline int crypto_rng_generate(struct crypto_rng *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int dlen)
+{
+ return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
}
/**
@@ -103,13 +152,12 @@ static inline void crypto_free_rng(struct crypto_rng *tfm)
* This function fills the caller-allocated buffer with random numbers using the
* random number generator referenced by the cipher handle.
*
- * Return: > 0 function was successful and returns the number of generated
- * bytes; < 0 if an error occurred
+ * Return: 0 function was successful; < 0 if an error occurred
*/
static inline int crypto_rng_get_bytes(struct crypto_rng *tfm,
u8 *rdata, unsigned int dlen)
{
- return crypto_rng_crt(tfm)->rng_gen_random(tfm, rdata, dlen);
+ return crypto_rng_generate(tfm, NULL, 0, rdata, dlen);
}
/**
@@ -129,11 +177,8 @@ static inline int crypto_rng_get_bytes(struct crypto_rng *tfm,
*
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
-static inline int crypto_rng_reset(struct crypto_rng *tfm,
- u8 *seed, unsigned int slen)
-{
- return crypto_rng_crt(tfm)->rng_reset(tfm, seed, slen);
-}
+int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed,
+ unsigned int slen);
/**
* crypto_rng_seedsize() - obtain seed size of RNG
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 7ef512f..96670e7 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -33,21 +33,13 @@ static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
sg1[num - 1].page_link |= 0x01;
}
-static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
-{
- if (sg_is_last(sg))
- return NULL;
-
- return (++sg)->length ? sg : sg_chain_ptr(sg);
-}
-
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
struct scatterlist *sg,
int chain, int num)
{
if (chain) {
head->length += sg->length;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
if (sg)
@@ -110,4 +102,8 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes);
+struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
+ struct scatterlist *src,
+ unsigned int len);
+
#endif /* _CRYPTO_SCATTERWALK_H */
diff --git a/include/crypto/sha.h b/include/crypto/sha.h
index 190f8a0..dd7905a 100644
--- a/include/crypto/sha.h
+++ b/include/crypto/sha.h
@@ -65,20 +65,20 @@
#define SHA512_H7 0x5be0cd19137e2179ULL
struct sha1_state {
- u64 count;
u32 state[SHA1_DIGEST_SIZE / 4];
+ u64 count;
u8 buffer[SHA1_BLOCK_SIZE];
};
struct sha256_state {
- u64 count;
u32 state[SHA256_DIGEST_SIZE / 4];
+ u64 count;
u8 buf[SHA256_BLOCK_SIZE];
};
struct sha512_state {
- u64 count[2];
u64 state[SHA512_DIGEST_SIZE / 8];
+ u64 count[2];
u8 buf[SHA512_BLOCK_SIZE];
};
@@ -87,9 +87,18 @@ struct shash_desc;
extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len);
+extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash);
+
extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len);
+extern int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash);
+
extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len);
+
+extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash);
#endif
diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h
new file mode 100644
index 0000000..d0df431
--- /dev/null
+++ b/include/crypto/sha1_base.h
@@ -0,0 +1,106 @@
+/*
+ * sha1_base.h - core logic for SHA-1 implementations
+ *
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+#include <asm/unaligned.h>
+
+typedef void (sha1_block_fn)(struct sha1_state *sst, u8 const *src, int blocks);
+
+static inline int sha1_base_init(struct shash_desc *desc)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA1_H0;
+ sctx->state[1] = SHA1_H1;
+ sctx->state[2] = SHA1_H2;
+ sctx->state[3] = SHA1_H3;
+ sctx->state[4] = SHA1_H4;
+ sctx->count = 0;
+
+ return 0;
+}
+
+static inline int sha1_base_do_update(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sha1_block_fn *block_fn)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
+
+ sctx->count += len;
+
+ if (unlikely((partial + len) >= SHA1_BLOCK_SIZE)) {
+ int blocks;
+
+ if (partial) {
+ int p = SHA1_BLOCK_SIZE - partial;
+
+ memcpy(sctx->buffer + partial, data, p);
+ data += p;
+ len -= p;
+
+ block_fn(sctx, sctx->buffer, 1);
+ }
+
+ blocks = len / SHA1_BLOCK_SIZE;
+ len %= SHA1_BLOCK_SIZE;
+
+ if (blocks) {
+ block_fn(sctx, data, blocks);
+ data += blocks * SHA1_BLOCK_SIZE;
+ }
+ partial = 0;
+ }
+ if (len)
+ memcpy(sctx->buffer + partial, data, len);
+
+ return 0;
+}
+
+static inline int sha1_base_do_finalize(struct shash_desc *desc,
+ sha1_block_fn *block_fn)
+{
+ const int bit_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
+ unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
+
+ sctx->buffer[partial++] = 0x80;
+ if (partial > bit_offset) {
+ memset(sctx->buffer + partial, 0x0, SHA1_BLOCK_SIZE - partial);
+ partial = 0;
+
+ block_fn(sctx, sctx->buffer, 1);
+ }
+
+ memset(sctx->buffer + partial, 0x0, bit_offset - partial);
+ *bits = cpu_to_be64(sctx->count << 3);
+ block_fn(sctx, sctx->buffer, 1);
+
+ return 0;
+}
+
+static inline int sha1_base_finish(struct shash_desc *desc, u8 *out)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ __be32 *digest = (__be32 *)out;
+ int i;
+
+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
+ put_unaligned_be32(sctx->state[i], digest++);
+
+ *sctx = (struct sha1_state){};
+ return 0;
+}
diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h
new file mode 100644
index 0000000..d1f2195
--- /dev/null
+++ b/include/crypto/sha256_base.h
@@ -0,0 +1,128 @@
+/*
+ * sha256_base.h - core logic for SHA-256 implementations
+ *
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+#include <asm/unaligned.h>
+
+typedef void (sha256_block_fn)(struct sha256_state *sst, u8 const *src,
+ int blocks);
+
+static inline int sha224_base_init(struct shash_desc *desc)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA224_H0;
+ sctx->state[1] = SHA224_H1;
+ sctx->state[2] = SHA224_H2;
+ sctx->state[3] = SHA224_H3;
+ sctx->state[4] = SHA224_H4;
+ sctx->state[5] = SHA224_H5;
+ sctx->state[6] = SHA224_H6;
+ sctx->state[7] = SHA224_H7;
+ sctx->count = 0;
+
+ return 0;
+}
+
+static inline int sha256_base_init(struct shash_desc *desc)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA256_H0;
+ sctx->state[1] = SHA256_H1;
+ sctx->state[2] = SHA256_H2;
+ sctx->state[3] = SHA256_H3;
+ sctx->state[4] = SHA256_H4;
+ sctx->state[5] = SHA256_H5;
+ sctx->state[6] = SHA256_H6;
+ sctx->state[7] = SHA256_H7;
+ sctx->count = 0;
+
+ return 0;
+}
+
+static inline int sha256_base_do_update(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sha256_block_fn *block_fn)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
+
+ sctx->count += len;
+
+ if (unlikely((partial + len) >= SHA256_BLOCK_SIZE)) {
+ int blocks;
+
+ if (partial) {
+ int p = SHA256_BLOCK_SIZE - partial;
+
+ memcpy(sctx->buf + partial, data, p);
+ data += p;
+ len -= p;
+
+ block_fn(sctx, sctx->buf, 1);
+ }
+
+ blocks = len / SHA256_BLOCK_SIZE;
+ len %= SHA256_BLOCK_SIZE;
+
+ if (blocks) {
+ block_fn(sctx, data, blocks);
+ data += blocks * SHA256_BLOCK_SIZE;
+ }
+ partial = 0;
+ }
+ if (len)
+ memcpy(sctx->buf + partial, data, len);
+
+ return 0;
+}
+
+static inline int sha256_base_do_finalize(struct shash_desc *desc,
+ sha256_block_fn *block_fn)
+{
+ const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64);
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ __be64 *bits = (__be64 *)(sctx->buf + bit_offset);
+ unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
+
+ sctx->buf[partial++] = 0x80;
+ if (partial > bit_offset) {
+ memset(sctx->buf + partial, 0x0, SHA256_BLOCK_SIZE - partial);
+ partial = 0;
+
+ block_fn(sctx, sctx->buf, 1);
+ }
+
+ memset(sctx->buf + partial, 0x0, bit_offset - partial);
+ *bits = cpu_to_be64(sctx->count << 3);
+ block_fn(sctx, sctx->buf, 1);
+
+ return 0;
+}
+
+static inline int sha256_base_finish(struct shash_desc *desc, u8 *out)
+{
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ __be32 *digest = (__be32 *)out;
+ int i;
+
+ for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32))
+ put_unaligned_be32(sctx->state[i], digest++);
+
+ *sctx = (struct sha256_state){};
+ return 0;
+}
diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h
new file mode 100644
index 0000000..6c5341e
--- /dev/null
+++ b/include/crypto/sha512_base.h
@@ -0,0 +1,131 @@
+/*
+ * sha512_base.h - core logic for SHA-512 implementations
+ *
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+#include <asm/unaligned.h>
+
+typedef void (sha512_block_fn)(struct sha512_state *sst, u8 const *src,
+ int blocks);
+
+static inline int sha384_base_init(struct shash_desc *desc)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA384_H0;
+ sctx->state[1] = SHA384_H1;
+ sctx->state[2] = SHA384_H2;
+ sctx->state[3] = SHA384_H3;
+ sctx->state[4] = SHA384_H4;
+ sctx->state[5] = SHA384_H5;
+ sctx->state[6] = SHA384_H6;
+ sctx->state[7] = SHA384_H7;
+ sctx->count[0] = sctx->count[1] = 0;
+
+ return 0;
+}
+
+static inline int sha512_base_init(struct shash_desc *desc)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA512_H0;
+ sctx->state[1] = SHA512_H1;
+ sctx->state[2] = SHA512_H2;
+ sctx->state[3] = SHA512_H3;
+ sctx->state[4] = SHA512_H4;
+ sctx->state[5] = SHA512_H5;
+ sctx->state[6] = SHA512_H6;
+ sctx->state[7] = SHA512_H7;
+ sctx->count[0] = sctx->count[1] = 0;
+
+ return 0;
+}
+
+static inline int sha512_base_do_update(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sha512_block_fn *block_fn)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
+
+ sctx->count[0] += len;
+ if (sctx->count[0] < len)
+ sctx->count[1]++;
+
+ if (unlikely((partial + len) >= SHA512_BLOCK_SIZE)) {
+ int blocks;
+
+ if (partial) {
+ int p = SHA512_BLOCK_SIZE - partial;
+
+ memcpy(sctx->buf + partial, data, p);
+ data += p;
+ len -= p;
+
+ block_fn(sctx, sctx->buf, 1);
+ }
+
+ blocks = len / SHA512_BLOCK_SIZE;
+ len %= SHA512_BLOCK_SIZE;
+
+ if (blocks) {
+ block_fn(sctx, data, blocks);
+ data += blocks * SHA512_BLOCK_SIZE;
+ }
+ partial = 0;
+ }
+ if (len)
+ memcpy(sctx->buf + partial, data, len);
+
+ return 0;
+}
+
+static inline int sha512_base_do_finalize(struct shash_desc *desc,
+ sha512_block_fn *block_fn)
+{
+ const int bit_offset = SHA512_BLOCK_SIZE - sizeof(__be64[2]);
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ __be64 *bits = (__be64 *)(sctx->buf + bit_offset);
+ unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
+
+ sctx->buf[partial++] = 0x80;
+ if (partial > bit_offset) {
+ memset(sctx->buf + partial, 0x0, SHA512_BLOCK_SIZE - partial);
+ partial = 0;
+
+ block_fn(sctx, sctx->buf, 1);
+ }
+
+ memset(sctx->buf + partial, 0x0, bit_offset - partial);
+ bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
+ bits[1] = cpu_to_be64(sctx->count[0] << 3);
+ block_fn(sctx, sctx->buf, 1);
+
+ return 0;
+}
+
+static inline int sha512_base_finish(struct shash_desc *desc, u8 *out)
+{
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ __be64 *digest = (__be64 *)out;
+ int i;
+
+ for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be64))
+ put_unaligned_be64(sctx->state[i], digest++);
+
+ *sctx = (struct sha512_state){};
+ return 0;
+}
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
new file mode 100644
index 0000000..de13bfc
--- /dev/null
+++ b/include/drm/bridge/dw_hdmi.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2011 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DW_HDMI__
+#define __DW_HDMI__
+
+#include <drm/drmP.h>
+
+enum {
+ DW_HDMI_RES_8,
+ DW_HDMI_RES_10,
+ DW_HDMI_RES_12,
+ DW_HDMI_RES_MAX,
+};
+
+enum dw_hdmi_devtype {
+ IMX6Q_HDMI,
+ IMX6DL_HDMI,
+ RK3288_HDMI,
+};
+
+struct dw_hdmi_mpll_config {
+ unsigned long mpixelclock;
+ struct {
+ u16 cpce;
+ u16 gmp;
+ } res[DW_HDMI_RES_MAX];
+};
+
+struct dw_hdmi_curr_ctrl {
+ unsigned long mpixelclock;
+ u16 curr[DW_HDMI_RES_MAX];
+};
+
+struct dw_hdmi_phy_config {
+ unsigned long mpixelclock;
+ u16 sym_ctr; /*clock symbol and transmitter control*/
+ u16 term; /*transmission termination value*/
+ u16 vlev_ctr; /* voltage level control */
+};
+
+struct dw_hdmi_plat_data {
+ enum dw_hdmi_devtype dev_type;
+ const struct dw_hdmi_mpll_config *mpll_cfg;
+ const struct dw_hdmi_curr_ctrl *cur_ctr;
+ const struct dw_hdmi_phy_config *phy_config;
+ enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+};
+
+void dw_hdmi_unbind(struct device *dev, struct device *master, void *data);
+int dw_hdmi_bind(struct device *dev, struct device *master,
+ void *data, struct drm_encoder *encoder,
+ struct resource *iores, int irq,
+ const struct dw_hdmi_plat_data *plat_data);
+#endif /* __IMX_HDMI_H__ */
diff --git a/include/drm/bridge/ptn3460.h b/include/drm/bridge/ptn3460.h
deleted file mode 100644
index ff62344..0000000
--- a/include/drm/bridge/ptn3460.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2013 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _DRM_BRIDGE_PTN3460_H_
-#define _DRM_BRIDGE_PTN3460_H_
-
-struct drm_device;
-struct drm_encoder;
-struct i2c_client;
-struct device_node;
-
-#if defined(CONFIG_DRM_PTN3460) || defined(CONFIG_DRM_PTN3460_MODULE)
-
-int ptn3460_init(struct drm_device *dev, struct drm_encoder *encoder,
- struct i2c_client *client, struct device_node *node);
-#else
-
-static inline int ptn3460_init(struct drm_device *dev,
- struct drm_encoder *encoder, struct i2c_client *client,
- struct device_node *node)
-{
- return 0;
-}
-
-#endif
-
-#endif
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index e1b2e8b..48db6a5 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -104,6 +104,9 @@ struct dma_buf_attachment;
* PRIME: used in the prime code.
* This is the category used by the DRM_DEBUG_PRIME() macro.
*
+ * ATOMIC: used in the atomic code.
+ * This is the category used by the DRM_DEBUG_ATOMIC() macro.
+ *
* Enabling verbose debug messages is done through the drm.debug parameter,
* each category being enabled by a bit.
*
@@ -121,6 +124,7 @@ struct dma_buf_attachment;
#define DRM_UT_DRIVER 0x02
#define DRM_UT_KMS 0x04
#define DRM_UT_PRIME 0x08
+#define DRM_UT_ATOMIC 0x10
extern __printf(2, 3)
void drm_ut_debug_printk(const char *function_name,
@@ -143,6 +147,7 @@ void drm_err(const char *format, ...);
#define DRIVER_MODESET 0x2000
#define DRIVER_PRIME 0x4000
#define DRIVER_RENDER 0x8000
+#define DRIVER_ATOMIC 0x10000
/***********************************************************************/
/** \name Macros to make printk easier */
@@ -206,6 +211,11 @@ void drm_err(const char *format, ...);
if (unlikely(drm_debug & DRM_UT_PRIME)) \
drm_ut_debug_printk(__func__, fmt, ##args); \
} while (0)
+#define DRM_DEBUG_ATOMIC(fmt, args...) \
+ do { \
+ if (unlikely(drm_debug & DRM_UT_ATOMIC)) \
+ drm_ut_debug_printk(__func__, fmt, ##args); \
+ } while (0)
/*@}*/
@@ -243,7 +253,6 @@ struct drm_ioctl_desc {
unsigned int cmd;
int flags;
drm_ioctl_t *func;
- unsigned int cmd_drv;
const char *name;
};
@@ -252,8 +261,13 @@ struct drm_ioctl_desc {
* ioctl, for use by drm_ioctl().
*/
-#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
- [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl}
+#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
+ [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = { \
+ .cmd = DRM_IOCTL_##ioctl, \
+ .func = _func, \
+ .flags = _flags, \
+ .name = #ioctl \
+ }
/* Event queued up for userspace to read */
struct drm_pending_event {
@@ -283,6 +297,8 @@ struct drm_file {
* in the plane list
*/
unsigned universal_planes:1;
+ /* true if client understands atomic properties */
+ unsigned atomic:1;
struct pid *pid;
kuid_t uid;
@@ -310,6 +326,10 @@ struct drm_file {
struct list_head fbs;
struct mutex fbs_lock;
+ /** User-created blob properties; this retains a reference on the
+ * property. */
+ struct list_head blobs;
+
wait_queue_head_t event_wait;
struct list_head event_list;
int event_space;
@@ -339,8 +359,7 @@ struct drm_lock_data {
* @minor: Link back to minor char device we are master for. Immutable.
* @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex.
* @unique_len: Length of unique field. Protected by drm_global_mutex.
- * @magiclist: Hash of used authentication tokens. Protected by struct_mutex.
- * @magicfree: List of used authentication tokens. Protected by struct_mutex.
+ * @magic_map: Map of used authentication tokens. Protected by struct_mutex.
* @lock: DRI lock information.
* @driver_priv: Pointer to driver-private information.
*/
@@ -349,8 +368,7 @@ struct drm_master {
struct drm_minor *minor;
char *unique;
int unique_len;
- struct drm_open_hash magiclist;
- struct list_head magicfree;
+ struct idr magic_map;
struct drm_lock_data lock;
void *driver_priv;
};
@@ -670,9 +688,13 @@ struct drm_pending_vblank_event {
struct drm_vblank_crtc {
struct drm_device *dev; /* pointer to the drm_device */
wait_queue_head_t queue; /**< VBLANK wait queue */
- struct timeval time[DRM_VBLANKTIME_RBSIZE]; /**< timestamp of current count */
struct timer_list disable_timer; /* delayed disable timer */
- atomic_t count; /**< number of VBLANK interrupts */
+
+ /* vblank counter, protected by dev->vblank_time_lock for writes */
+ unsigned long count;
+ /* vblank timestamps, protected by dev->vblank_time_lock for writes */
+ struct timeval time[DRM_VBLANKTIME_RBSIZE];
+
atomic_t refcount; /* number of users of vblank interruptsper crtc */
u32 last; /* protected by dev->vbl_lock, used */
/* for wraparound handling */
@@ -744,8 +766,6 @@ struct drm_device {
/** \name Context support */
/*@{ */
- bool irq_enabled; /**< True if irq handler is enabled */
- int irq;
__volatile__ long context_flag; /**< Context swapping flag */
int last_context; /**< Last current context */
@@ -753,6 +773,8 @@ struct drm_device {
/** \name VBLANK IRQ support */
/*@{ */
+ bool irq_enabled;
+ int irq;
/*
* At load time, disabling the vblank interrupt won't be allowed since
@@ -796,6 +818,7 @@ struct drm_device {
#endif
struct platform_device *platformdev; /**< Platform device struture */
+ struct virtio_device *virtdev;
struct drm_sg_mem *sg; /**< Scatter gather memory */
unsigned int num_crtcs; /**< Number of CRTCs on this device */
@@ -919,6 +942,7 @@ extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
extern void drm_vblank_off(struct drm_device *dev, int crtc);
extern void drm_vblank_on(struct drm_device *dev, int crtc);
extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
+extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
extern void drm_vblank_cleanup(struct drm_device *dev);
@@ -954,6 +978,7 @@ extern void drm_master_put(struct drm_master **master);
extern void drm_put_dev(struct drm_device *dev);
extern void drm_unplug_dev(struct drm_device *dev);
extern unsigned int drm_debug;
+extern bool drm_atomic;
/* Debugfs support */
#if defined(CONFIG_DEBUG_FS)
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index ad22295..8a3a913 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -35,19 +35,89 @@ drm_atomic_state_alloc(struct drm_device *dev);
void drm_atomic_state_clear(struct drm_atomic_state *state);
void drm_atomic_state_free(struct drm_atomic_state *state);
+int __must_check
+drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state);
+void drm_atomic_state_default_clear(struct drm_atomic_state *state);
+void drm_atomic_state_default_release(struct drm_atomic_state *state);
+
struct drm_crtc_state * __must_check
drm_atomic_get_crtc_state(struct drm_atomic_state *state,
struct drm_crtc *crtc);
+int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_crtc_state *state, struct drm_property *property,
+ uint64_t val);
struct drm_plane_state * __must_check
drm_atomic_get_plane_state(struct drm_atomic_state *state,
struct drm_plane *plane);
+int drm_atomic_plane_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state, struct drm_property *property,
+ uint64_t val);
struct drm_connector_state * __must_check
drm_atomic_get_connector_state(struct drm_atomic_state *state,
struct drm_connector *connector);
+int drm_atomic_connector_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state, struct drm_property *property,
+ uint64_t val);
+
+/**
+ * drm_atomic_get_existing_crtc_state - get crtc state, if it exists
+ * @state: global atomic state object
+ * @crtc: crtc to grab
+ *
+ * This function returns the crtc state for the given crtc, or NULL
+ * if the crtc is not part of the global atomic state.
+ */
+static inline struct drm_crtc_state *
+drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ return state->crtc_states[drm_crtc_index(crtc)];
+}
+
+/**
+ * drm_atomic_get_existing_plane_state - get plane state, if it exists
+ * @state: global atomic state object
+ * @plane: plane to grab
+ *
+ * This function returns the plane state for the given plane, or NULL
+ * if the plane is not part of the global atomic state.
+ */
+static inline struct drm_plane_state *
+drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
+ struct drm_plane *plane)
+{
+ return state->plane_states[drm_plane_index(plane)];
+}
+
+/**
+ * drm_atomic_get_existing_connector_state - get connector state, if it exists
+ * @state: global atomic state object
+ * @connector: connector to grab
+ *
+ * This function returns the connector state for the given connector,
+ * or NULL if the connector is not part of the global atomic state.
+ */
+static inline struct drm_connector_state *
+drm_atomic_get_existing_connector_state(struct drm_atomic_state *state,
+ struct drm_connector *connector)
+{
+ int index = drm_connector_index(connector);
+
+ if (index >= state->num_connector)
+ return NULL;
+ return state->connector_states[index];
+}
+
+int __must_check
+drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
+ struct drm_display_mode *mode);
+int __must_check
+drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
+ struct drm_property_blob *blob);
int __must_check
-drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state,
- struct drm_plane *plane, struct drm_crtc *crtc);
+drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
+ struct drm_crtc *crtc);
void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
struct drm_framebuffer *fb);
int __must_check
@@ -56,6 +126,10 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
int __must_check
drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
struct drm_crtc *crtc);
+int __must_check
+drm_atomic_add_affected_planes(struct drm_atomic_state *state,
+ struct drm_crtc *crtc);
+
int
drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
struct drm_crtc *crtc);
@@ -66,4 +140,34 @@ int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
int __must_check drm_atomic_commit(struct drm_atomic_state *state);
int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
+#define for_each_connector_in_state(state, connector, connector_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (state)->num_connector && \
+ ((connector) = (state)->connectors[__i], \
+ (connector_state) = (state)->connector_states[__i], 1); \
+ (__i)++) \
+ if (connector)
+
+#define for_each_crtc_in_state(state, crtc, crtc_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (state)->dev->mode_config.num_crtc && \
+ ((crtc) = (state)->crtcs[__i], \
+ (crtc_state) = (state)->crtc_states[__i], 1); \
+ (__i)++) \
+ if (crtc_state)
+
+#define for_each_plane_in_state(state, plane, plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (state)->dev->mode_config.num_total_plane && \
+ ((plane) = (state)->planes[__i], \
+ (plane_state) = (state)->plane_states[__i], 1); \
+ (__i)++) \
+ if (plane_state)
+static inline bool
+drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state)
+{
+ return state->mode_changed || state->active_changed;
+}
+
+
#endif /* DRM_ATOMIC_H_ */
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index f956b41..cc1fee8 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -30,6 +30,10 @@
#include <drm/drm_crtc.h>
+int drm_atomic_helper_check_modeset(struct drm_device *dev,
+ struct drm_atomic_state *state);
+int drm_atomic_helper_check_planes(struct drm_device *dev,
+ struct drm_atomic_state *state);
int drm_atomic_helper_check(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_atomic_helper_commit(struct drm_device *dev,
@@ -39,9 +43,13 @@ int drm_atomic_helper_commit(struct drm_device *dev,
void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
struct drm_atomic_state *old_state);
-void drm_atomic_helper_commit_pre_planes(struct drm_device *dev,
- struct drm_atomic_state *state);
-void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
+void
+drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
+ struct drm_atomic_state *old_state);
+
+void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
+ struct drm_atomic_state *state);
+void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
struct drm_atomic_state *old_state);
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
@@ -50,6 +58,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
struct drm_atomic_state *old_state);
+void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state);
void drm_atomic_helper_swap_state(struct drm_device *dev,
struct drm_atomic_state *state);
@@ -78,23 +87,39 @@ int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t flags);
+void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
+ int mode);
/* default implementations for state handling */
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
+void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
struct drm_crtc_state *
drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
+void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
void drm_atomic_helper_plane_reset(struct drm_plane *plane);
+void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
struct drm_plane_state *
drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
+void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
void drm_atomic_helper_connector_reset(struct drm_connector *connector);
+void
+__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
+ struct drm_connector_state *state);
struct drm_connector_state *
drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
+void
+__drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state);
void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state);
@@ -123,4 +148,41 @@ void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \
drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask)
+/*
+ * drm_atomic_plane_disabling - check whether a plane is being disabled
+ * @plane: plane object
+ * @old_state: previous atomic state
+ *
+ * Checks the atomic state of a plane to determine whether it's being disabled
+ * or not. This also WARNs if it detects an invalid state (both CRTC and FB
+ * need to either both be NULL or both be non-NULL).
+ *
+ * RETURNS:
+ * True if the plane is being disabled, false otherwise.
+ */
+static inline bool
+drm_atomic_plane_disabling(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ /*
+ * When disabling a plane, CRTC and FB should always be NULL together.
+ * Anything else should be considered a bug in the atomic core, so we
+ * gently warn about it.
+ */
+ WARN_ON((plane->state->crtc == NULL && plane->state->fb != NULL) ||
+ (plane->state->crtc != NULL && plane->state->fb == NULL));
+
+ /*
+ * When using the transitional helpers, old_state may be NULL. If so,
+ * we know nothing about the current state and have to assume that it
+ * might be enabled.
+ *
+ * When using the atomic helpers, old_state won't be NULL. Therefore
+ * this check assumes that either the driver will have reconstructed
+ * the correct state in ->reset() or that the driver will have taken
+ * appropriate measures to disable all planes.
+ */
+ return (!old_state || old_state->crtc) && !plane->state->crtc;
+}
+
#endif /* DRM_ATOMIC_HELPER_H_ */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index b863298..57ca8cc 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -31,6 +31,7 @@
#include <linux/idr.h>
#include <linux/fb.h>
#include <linux/hdmi.h>
+#include <linux/media-bus-format.h>
#include <uapi/drm/drm_mode.h>
#include <uapi/drm/drm_fourcc.h>
#include <drm/drm_modeset_lock.h>
@@ -52,7 +53,6 @@ struct fence;
#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
-#define DRM_MODE_OBJECT_BRIDGE 0xbdbdbdbd
#define DRM_MODE_OBJECT_ANY 0
struct drm_mode_object {
@@ -63,8 +63,16 @@ struct drm_mode_object {
#define DRM_OBJECT_MAX_PROPERTY 24
struct drm_object_properties {
- int count;
- uint32_t ids[DRM_OBJECT_MAX_PROPERTY];
+ int count, atomic_count;
+ /* NOTE: if we ever start dynamically destroying properties (ie.
+ * not at drm_mode_config_cleanup() time), then we'd have to do
+ * a better job of detaching property from mode objects to avoid
+ * dangling property pointers:
+ */
+ struct drm_property *properties[DRM_OBJECT_MAX_PROPERTY];
+ /* do not read/write values directly, but use drm_object_property_get_value()
+ * and drm_object_property_set_value():
+ */
uint64_t values[DRM_OBJECT_MAX_PROPERTY];
};
@@ -131,6 +139,9 @@ struct drm_display_info {
enum subpixel_order subpixel_order;
u32 color_formats;
+ const u32 *bus_formats;
+ unsigned int num_bus_formats;
+
/* Mask of supported hdmi deep color modes */
u8 edid_hdmi_dc_modes;
@@ -190,6 +201,7 @@ struct drm_framebuffer {
const struct drm_framebuffer_funcs *funcs;
unsigned int pitches[4];
unsigned int offsets[4];
+ uint64_t modifier[4];
unsigned int width;
unsigned int height;
/* depth can be 15 or 16 */
@@ -204,7 +216,10 @@ struct drm_framebuffer {
struct drm_property_blob {
struct drm_mode_object base;
- struct list_head head;
+ struct drm_device *dev;
+ struct kref refcount;
+ struct list_head head_global;
+ struct list_head head_file;
size_t length;
unsigned char data[];
};
@@ -237,8 +252,11 @@ struct drm_atomic_state;
/**
* struct drm_crtc_state - mutable CRTC state
+ * @crtc: backpointer to the CRTC
* @enable: whether the CRTC should be enabled, gates all other state
+ * @active: whether the CRTC is actively displaying (used for DPMS)
* @mode_changed: for use by helpers and drivers when computing state updates
+ * @active_changed: for use by helpers and drivers when computing state updates
* @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
* @last_vblank_count: for helpers and drivers to capture the vblank of the
* update to ensure framebuffer cleanup isn't done too early
@@ -248,13 +266,23 @@ struct drm_atomic_state;
* @event: optional pointer to a DRM event to signal upon completion of the
* state update
* @state: backpointer to global drm_atomic_state
+ *
+ * Note that the distinction between @enable and @active is rather subtile:
+ * Flipping @active while @enable is set without changing anything else may
+ * never return in a failure from the ->atomic_check callback. Userspace assumes
+ * that a DPMS On will always succeed. In other words: @enable controls resource
+ * assignment, @active controls the actual hardware state.
*/
struct drm_crtc_state {
+ struct drm_crtc *crtc;
+
bool enable;
+ bool active;
/* computed state bits used by helpers and drivers */
bool planes_changed : 1;
bool mode_changed : 1;
+ bool active_changed : 1;
/* attached planes bitmask:
* WARNING: transitional helpers do not maintain plane_mask so
@@ -271,6 +299,9 @@ struct drm_crtc_state {
struct drm_display_mode mode;
+ /* blob property to expose current mode to atomic userspace */
+ struct drm_property_blob *mode_blob;
+
struct drm_pending_vblank_event *event;
struct drm_atomic_state *state;
@@ -292,6 +323,9 @@ struct drm_crtc_state {
* @atomic_duplicate_state: duplicate the atomic state for this CRTC
* @atomic_destroy_state: destroy an atomic state for this CRTC
* @atomic_set_property: set a property on an atomic state for this CRTC
+ * (do not call directly, use drm_atomic_crtc_set_property())
+ * @atomic_get_property: get a property on an atomic state for this CRTC
+ * (do not call directly, use drm_atomic_crtc_get_property())
*
* The drm_crtc_funcs structure is the central CRTC management structure
* in the DRM. Each CRTC controls one or more connectors (note that the name
@@ -351,6 +385,10 @@ struct drm_crtc_funcs {
struct drm_crtc_state *state,
struct drm_property *property,
uint64_t val);
+ int (*atomic_get_property)(struct drm_crtc *crtc,
+ const struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t *val);
};
/**
@@ -434,7 +472,7 @@ struct drm_crtc {
int framedur_ns, linedur_ns, pixeldur_ns;
/* if you are using the helper */
- void *helper_private;
+ const void *helper_private;
struct drm_object_properties properties;
@@ -449,11 +487,14 @@ struct drm_crtc {
/**
* struct drm_connector_state - mutable connector state
+ * @connector: backpointer to the connector
* @crtc: CRTC to connect connector to, NULL if disabled
* @best_encoder: can be used by helpers and drivers to select the encoder
* @state: backpointer to global drm_atomic_state
*/
struct drm_connector_state {
+ struct drm_connector *connector;
+
struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_connector() */
struct drm_encoder *best_encoder;
@@ -463,7 +504,7 @@ struct drm_connector_state {
/**
* struct drm_connector_funcs - control connectors on a given device
- * @dpms: set power state (see drm_crtc_funcs above)
+ * @dpms: set power state
* @save: save connector state
* @restore: restore connector state
* @reset: reset connector after state has been invalidated (e.g. resume)
@@ -475,6 +516,9 @@ struct drm_connector_state {
* @atomic_duplicate_state: duplicate the atomic state for this connector
* @atomic_destroy_state: destroy an atomic state for this connector
* @atomic_set_property: set a property on an atomic state for this connector
+ * (do not call directly, use drm_atomic_connector_set_property())
+ * @atomic_get_property: get a property on an atomic state for this connector
+ * (do not call directly, use drm_atomic_connector_get_property())
*
* Each CRTC may have one or more connectors attached to it. The functions
* below allow the core DRM code to control connectors, enumerate available modes,
@@ -508,6 +552,10 @@ struct drm_connector_funcs {
struct drm_connector_state *state,
struct drm_property *property,
uint64_t val);
+ int (*atomic_get_property)(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val);
};
/**
@@ -554,7 +602,7 @@ struct drm_encoder {
struct drm_crtc *crtc;
struct drm_bridge *bridge;
const struct drm_encoder_funcs *funcs;
- void *helper_private;
+ const void *helper_private;
};
/* should we poll this connector for connects and disconnects */
@@ -605,6 +653,7 @@ struct drm_encoder {
* @audio_latency: audio latency info from ELD, if found
* @null_edid_counter: track sinks that give us all zeros for the EDID
* @bad_edid_counter: track sinks that give us an EDID with invalid checksum
+ * @edid_corrupt: indicates whether the last read EDID was corrupt
* @debugfs_entry: debugfs directory for this connector
* @state: current atomic state for this connector
* @has_tile: is this connector connected to a tiled monitor
@@ -658,7 +707,7 @@ struct drm_connector {
/* requested DPMS state */
int dpms;
- void *helper_private;
+ const void *helper_private;
/* forced on connector */
struct drm_cmdline_mode cmdline_mode;
@@ -677,6 +726,11 @@ struct drm_connector {
int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
unsigned bad_edid_counter;
+ /* Flag for raw EDID header corruption - used in Displayport
+ * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6
+ */
+ bool edid_corrupt;
+
struct dentry *debugfs_entry;
struct drm_connector_state *state;
@@ -689,10 +743,13 @@ struct drm_connector {
uint8_t num_h_tile, num_v_tile;
uint8_t tile_h_loc, tile_v_loc;
uint16_t tile_h_size, tile_v_size;
+
+ struct list_head destroy_list;
};
/**
* struct drm_plane_state - mutable plane state
+ * @plane: backpointer to the plane
* @crtc: currently bound CRTC, NULL if disabled
* @fb: currently bound framebuffer
* @fence: optional fence to wait for before scanning out @fb
@@ -709,6 +766,8 @@ struct drm_connector {
* @state: backpointer to global drm_atomic_state
*/
struct drm_plane_state {
+ struct drm_plane *plane;
+
struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */
struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */
struct fence *fence;
@@ -721,6 +780,9 @@ struct drm_plane_state {
uint32_t src_x, src_y;
uint32_t src_h, src_w;
+ /* Plane rotation */
+ unsigned int rotation;
+
struct drm_atomic_state *state;
};
@@ -735,6 +797,9 @@ struct drm_plane_state {
* @atomic_duplicate_state: duplicate the atomic state for this plane
* @atomic_destroy_state: destroy an atomic state for this plane
* @atomic_set_property: set a property on an atomic state for this plane
+ * (do not call directly, use drm_atomic_plane_set_property())
+ * @atomic_get_property: get a property on an atomic state for this plane
+ * (do not call directly, use drm_atomic_plane_get_property())
*/
struct drm_plane_funcs {
int (*update_plane)(struct drm_plane *plane,
@@ -758,6 +823,10 @@ struct drm_plane_funcs {
struct drm_plane_state *state,
struct drm_property *property,
uint64_t val);
+ int (*atomic_get_property)(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t *val);
};
enum drm_plane_type {
@@ -774,6 +843,7 @@ enum drm_plane_type {
* @possible_crtcs: pipes this plane can be bound to
* @format_types: array of formats supported by this plane
* @format_count: number of formats supported
+ * @format_default: driver hasn't supplied supported formats for the plane
* @crtc: currently bound CRTC
* @fb: currently bound fb
* @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
@@ -794,6 +864,7 @@ struct drm_plane {
uint32_t possible_crtcs;
uint32_t *format_types;
uint32_t format_count;
+ bool format_default;
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
@@ -806,22 +877,23 @@ struct drm_plane {
enum drm_plane_type type;
- void *helper_private;
+ const void *helper_private;
struct drm_plane_state *state;
};
/**
* struct drm_bridge_funcs - drm_bridge control functions
+ * @attach: Called during drm_bridge_attach
* @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge
* @disable: Called right before encoder prepare, disables the bridge
* @post_disable: Called right after encoder prepare, for lockstepped disable
* @mode_set: Set this mode to the bridge
* @pre_enable: Called right before encoder commit, for lockstepped commit
* @enable: Called right after encoder commit, enables the bridge
- * @destroy: make object go away
*/
struct drm_bridge_funcs {
+ int (*attach)(struct drm_bridge *bridge);
bool (*mode_fixup)(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
@@ -832,31 +904,37 @@ struct drm_bridge_funcs {
struct drm_display_mode *adjusted_mode);
void (*pre_enable)(struct drm_bridge *bridge);
void (*enable)(struct drm_bridge *bridge);
- void (*destroy)(struct drm_bridge *bridge);
};
/**
* struct drm_bridge - central DRM bridge control structure
* @dev: DRM device this bridge belongs to
- * @head: list management
+ * @encoder: encoder to which this bridge is connected
+ * @next: the next bridge in the encoder chain
+ * @of_node: device node pointer to the bridge
+ * @list: to keep track of all added bridges
* @base: base mode object
* @funcs: control functions
* @driver_private: pointer to the bridge driver's internal context
*/
struct drm_bridge {
struct drm_device *dev;
- struct list_head head;
-
- struct drm_mode_object base;
+ struct drm_encoder *encoder;
+ struct drm_bridge *next;
+#ifdef CONFIG_OF
+ struct device_node *of_node;
+#endif
+ struct list_head list;
const struct drm_bridge_funcs *funcs;
void *driver_private;
};
/**
- * struct struct drm_atomic_state - the global state object for atomic updates
+ * struct drm_atomic_state - the global state object for atomic updates
* @dev: parent DRM device
- * @flags: state flags like async update
+ * @allow_modeset: allow full modeset
+ * @legacy_cursor_update: hint to enforce legacy cursor ioctl semantics
* @planes: pointer to array of plane pointers
* @plane_states: pointer to array of plane states pointers
* @crtcs: pointer to array of CRTC pointers
@@ -868,7 +946,8 @@ struct drm_bridge {
*/
struct drm_atomic_state {
struct drm_device *dev;
- uint32_t flags;
+ bool allow_modeset : 1;
+ bool legacy_cursor_update : 1;
struct drm_plane **planes;
struct drm_plane_state **plane_states;
struct drm_crtc **crtcs;
@@ -912,9 +991,12 @@ struct drm_mode_set {
* struct drm_mode_config_funcs - basic driver provided mode setting functions
* @fb_create: create a new framebuffer object
* @output_poll_changed: function to handle output configuration changes
- * @atomic_check: check whether a give atomic state update is possible
+ * @atomic_check: check whether a given atomic state update is possible
* @atomic_commit: commit an atomic state update previously verified with
* atomic_check()
+ * @atomic_state_alloc: allocate a new atomic state
+ * @atomic_state_clear: clear the atomic state
+ * @atomic_state_free: free the atomic state
*
* Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
* involve drivers.
@@ -930,6 +1012,9 @@ struct drm_mode_config_funcs {
int (*atomic_commit)(struct drm_device *dev,
struct drm_atomic_state *a,
bool async);
+ struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device *dev);
+ void (*atomic_state_clear)(struct drm_atomic_state *state);
+ void (*atomic_state_free)(struct drm_atomic_state *state);
};
/**
@@ -950,7 +1035,6 @@ struct drm_mode_group {
uint32_t num_crtcs;
uint32_t num_encoders;
uint32_t num_connectors;
- uint32_t num_bridges;
/* list of object IDs for this group */
uint32_t *id_list;
@@ -969,8 +1053,6 @@ struct drm_mode_group {
* @fb_list: list of framebuffers available
* @num_connector: number of connectors on this device
* @connector_list: list of connector objects
- * @num_bridge: number of bridges on this device
- * @bridge_list: list of bridge objects
* @num_encoder: number of encoders on this device
* @encoder_list: list of encoder objects
* @num_overlay_plane: number of overlay planes on this device
@@ -989,6 +1071,7 @@ struct drm_mode_group {
* @poll_running: track polling status for this device
* @output_poll_work: delayed work for polling in process context
* @property_blob_list: list of all the blob property objects
+ * @blob_lock: mutex for blob property allocation and management
* @*_property: core property tracking
* @preferred_depth: preferred RBG pixel depth, used by fb helpers
* @prefer_shadow: hint to userspace to prefer shadow-fb rendering
@@ -1015,8 +1098,6 @@ struct drm_mode_config {
int num_connector;
struct list_head connector_list;
- int num_bridge;
- struct list_head bridge_list;
int num_encoder;
struct list_head encoder_list;
@@ -1043,8 +1124,11 @@ struct drm_mode_config {
/* output poll support */
bool poll_enabled;
bool poll_running;
+ bool delayed_event;
struct delayed_work output_poll_work;
+ struct mutex blob_lock;
+
/* pointers to standard properties */
struct list_head property_blob_list;
struct drm_property *edid_property;
@@ -1053,6 +1137,18 @@ struct drm_mode_config {
struct drm_property *tile_property;
struct drm_property *plane_type_property;
struct drm_property *rotation_property;
+ struct drm_property *prop_src_x;
+ struct drm_property *prop_src_y;
+ struct drm_property *prop_src_w;
+ struct drm_property *prop_src_h;
+ struct drm_property *prop_crtc_x;
+ struct drm_property *prop_crtc_y;
+ struct drm_property *prop_crtc_w;
+ struct drm_property *prop_crtc_h;
+ struct drm_property *prop_fb_id;
+ struct drm_property *prop_crtc_id;
+ struct drm_property *prop_active;
+ struct drm_property *prop_mode_id;
/* DVI-I properties */
struct drm_property *dvi_i_subconnector_property;
@@ -1088,6 +1184,9 @@ struct drm_mode_config {
/* whether async page flip is supported or not */
bool async_page_flip;
+ /* whether the driver supports fb modifiers */
+ bool allow_fb_modifiers;
+
/* cursor size */
uint32_t cursor_width, cursor_height;
};
@@ -1153,9 +1252,21 @@ extern unsigned int drm_connector_index(struct drm_connector *connector);
/* helper to unplug all connectors from sysfs for device */
extern void drm_connector_unplug_all(struct drm_device *dev);
-extern int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
- const struct drm_bridge_funcs *funcs);
-extern void drm_bridge_cleanup(struct drm_bridge *bridge);
+extern int drm_bridge_add(struct drm_bridge *bridge);
+extern void drm_bridge_remove(struct drm_bridge *bridge);
+extern struct drm_bridge *of_drm_find_bridge(struct device_node *np);
+extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge);
+
+bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+void drm_bridge_disable(struct drm_bridge *bridge);
+void drm_bridge_post_disable(struct drm_bridge *bridge);
+void drm_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+void drm_bridge_pre_enable(struct drm_bridge *bridge);
+void drm_bridge_enable(struct drm_bridge *bridge);
extern int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
@@ -1190,7 +1301,12 @@ extern int drm_plane_init(struct drm_device *dev,
bool is_primary);
extern void drm_plane_cleanup(struct drm_plane *plane);
extern unsigned int drm_plane_index(struct drm_plane *plane);
+extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
extern void drm_plane_force_disable(struct drm_plane *plane);
+extern int drm_plane_check_pixel_format(const struct drm_plane *plane,
+ u32 format);
+extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
+ int *hdisplay, int *vdisplay);
extern int drm_crtc_check_viewport(const struct drm_crtc *crtc,
int x, int y,
const struct drm_display_mode *mode,
@@ -1206,6 +1322,8 @@ extern const char *drm_get_dvi_i_select_name(int val);
extern const char *drm_get_tv_subconnector_name(int val);
extern const char *drm_get_tv_select_name(int val);
extern void drm_fb_release(struct drm_file *file_priv);
+extern void drm_property_destroy_user_blobs(struct drm_device *dev,
+ struct drm_file *file_priv);
extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
extern void drm_mode_group_destroy(struct drm_mode_group *group);
extern void drm_reinit_primary_mode_group(struct drm_device *dev);
@@ -1224,6 +1342,10 @@ int drm_mode_connector_set_tile_property(struct drm_connector *connector);
extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
const struct edid *edid);
+extern int drm_display_info_set_bus_formats(struct drm_display_info *info,
+ const u32 *formats,
+ unsigned int num_formats);
+
static inline bool drm_property_type_is(struct drm_property *property,
uint32_t type)
{
@@ -1279,6 +1401,15 @@ struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
int64_t min, int64_t max);
struct drm_property *drm_property_create_object(struct drm_device *dev,
int flags, const char *name, uint32_t type);
+struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
+ const char *name);
+struct drm_property_blob *drm_property_create_blob(struct drm_device *dev,
+ size_t length,
+ const void *data);
+struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
+ uint32_t id);
+struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob);
+void drm_property_unreference_blob(struct drm_property_blob *blob);
extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
extern int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name);
@@ -1290,6 +1421,10 @@ extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
+extern bool drm_property_change_valid_get(struct drm_property *property,
+ uint64_t value, struct drm_mode_object **ref);
+extern void drm_property_change_valid_put(struct drm_property *property,
+ struct drm_mode_object *ref);
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
@@ -1334,6 +1469,10 @@ extern int drm_mode_getproperty_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_getblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
+extern int drm_mode_createblob_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_destroyblob_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_getencoder(struct drm_device *dev,
@@ -1355,7 +1494,8 @@ extern void drm_set_preferred_mode(struct drm_connector *connector,
int hpref, int vpref);
extern int drm_edid_header_is_valid(const u8 *raw_edid);
-extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
+extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
+ bool *edid_corrupt);
extern bool drm_edid_is_valid(struct edid *edid);
extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
@@ -1381,6 +1521,8 @@ extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
struct drm_property *property,
uint64_t value);
+extern int drm_mode_atomic_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
int *bpp);
@@ -1436,14 +1578,6 @@ static inline struct drm_property *drm_property_find(struct drm_device *dev,
return mo ? obj_to_property(mo) : NULL;
}
-static inline struct drm_property_blob *
-drm_property_blob_find(struct drm_device *dev, uint32_t id)
-{
- struct drm_mode_object *mo;
- mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_BLOB);
- return mo ? obj_to_blob(mo) : NULL;
-}
-
/* Plane list iterator for legacy (overlay only) planes. */
#define drm_for_each_legacy_plane(plane, planelist) \
list_for_each_entry(plane, planelist, head) \
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 7adbb65..c8fc187 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -39,17 +39,38 @@
#include <linux/fb.h>
+#include <drm/drm_crtc.h>
+
enum mode_set_atomic {
LEAVE_ATOMIC_MODE_SET,
ENTER_ATOMIC_MODE_SET,
};
/**
- * drm_crtc_helper_funcs - helper operations for CRTCs
- * @mode_fixup: try to fixup proposed mode for this connector
+ * struct drm_crtc_helper_funcs - helper operations for CRTCs
+ * @dpms: set power state
+ * @prepare: prepare the CRTC, called before @mode_set
+ * @commit: commit changes to CRTC, called after @mode_set
+ * @mode_fixup: try to fixup proposed mode for this CRTC
* @mode_set: set this mode
+ * @mode_set_nofb: set mode only (no scanout buffer attached)
+ * @mode_set_base: update the scanout buffer
+ * @mode_set_base_atomic: non-blocking mode set (used for kgdb support)
+ * @load_lut: load color palette
+ * @disable: disable CRTC when no longer in use
+ * @enable: enable CRTC
+ * @atomic_check: check for validity of an atomic state
+ * @atomic_begin: begin atomic update
+ * @atomic_flush: flush atomic update
*
* The helper operations are called by the mid-layer CRTC helper.
+ *
+ * Note that with atomic helpers @dpms, @prepare and @commit hooks are
+ * deprecated. Used @enable and @disable instead exclusively.
+ *
+ * With legacy crtc helpers there's a big semantic difference between @disable
+ * and the other hooks: @disable also needs to release any resources acquired in
+ * @mode_set (like shared PLLs).
*/
struct drm_crtc_helper_funcs {
/*
@@ -68,6 +89,7 @@ struct drm_crtc_helper_funcs {
int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, int x, int y,
struct drm_framebuffer *old_fb);
+ /* Actually set the mode for atomic helpers, optional */
void (*mode_set_nofb)(struct drm_crtc *crtc);
/* Move the crtc on the current fb to the given position *optional* */
@@ -80,8 +102,8 @@ struct drm_crtc_helper_funcs {
/* reload the current crtc LUT */
void (*load_lut)(struct drm_crtc *crtc);
- /* disable crtc when not in use - more explicit than dpms off */
void (*disable)(struct drm_crtc *crtc);
+ void (*enable)(struct drm_crtc *crtc);
/* atomic helpers */
int (*atomic_check)(struct drm_crtc *crtc,
@@ -91,11 +113,28 @@ struct drm_crtc_helper_funcs {
};
/**
- * drm_encoder_helper_funcs - helper operations for encoders
+ * struct drm_encoder_helper_funcs - helper operations for encoders
+ * @dpms: set power state
+ * @save: save connector state
+ * @restore: restore connector state
* @mode_fixup: try to fixup proposed mode for this connector
- * @mode_set: set this mode
+ * @prepare: part of the disable sequence, called before the CRTC modeset
+ * @commit: called after the CRTC modeset
+ * @mode_set: set this mode, optional for atomic helpers
+ * @get_crtc: return CRTC that the encoder is currently attached to
+ * @detect: connection status detection
+ * @disable: disable encoder when not in use (overrides DPMS off)
+ * @enable: enable encoder
+ * @atomic_check: check for validity of an atomic update
*
* The helper operations are called by the mid-layer CRTC helper.
+ *
+ * Note that with atomic helpers @dpms, @prepare and @commit hooks are
+ * deprecated. Used @enable and @disable instead exclusively.
+ *
+ * With legacy crtc helpers there's a big semantic difference between @disable
+ * and the other hooks: @disable also needs to release any resources acquired in
+ * @mode_set (like shared PLLs).
*/
struct drm_encoder_helper_funcs {
void (*dpms)(struct drm_encoder *encoder, int mode);
@@ -114,14 +153,21 @@ struct drm_encoder_helper_funcs {
/* detect for DAC style encoders */
enum drm_connector_status (*detect)(struct drm_encoder *encoder,
struct drm_connector *connector);
- /* disable encoder when not in use - more explicit than dpms off */
void (*disable)(struct drm_encoder *encoder);
+
+ void (*enable)(struct drm_encoder *encoder);
+
+ /* atomic helpers */
+ int (*atomic_check)(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
};
/**
- * drm_connector_helper_funcs - helper operations for connectors
+ * struct drm_connector_helper_funcs - helper operations for connectors
* @get_modes: get mode list for this connector
- * @mode_valid (optional): is this mode valid on the given connector?
+ * @mode_valid: is this mode valid on the given connector? (optional)
+ * @best_encoder: return the preferred encoder for this connector
*
* The helper operations are called by the mid-layer CRTC helper.
*/
@@ -151,19 +197,19 @@ extern void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
const struct drm_crtc_helper_funcs *funcs)
{
- crtc->helper_private = (void *)funcs;
+ crtc->helper_private = funcs;
}
static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
const struct drm_encoder_helper_funcs *funcs)
{
- encoder->helper_private = (void *)funcs;
+ encoder->helper_private = funcs;
}
static inline void drm_connector_helper_add(struct drm_connector *connector,
const struct drm_connector_helper_funcs *funcs)
{
- connector->helper_private = (void *)funcs;
+ connector->helper_private = funcs;
}
extern void drm_helper_resume_force_mode(struct drm_device *dev);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 11f8c84..2e86f64 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -42,6 +42,8 @@
* 1.2 formally includes both eDP and DPI definitions.
*/
+#define DP_AUX_MAX_PAYLOAD_BYTES 16
+
#define DP_AUX_I2C_WRITE 0x0
#define DP_AUX_I2C_READ 0x1
#define DP_AUX_I2C_STATUS 0x2
@@ -92,6 +94,15 @@
# define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */
# define DP_OUI_SUPPORT (1 << 7)
+#define DP_RECEIVE_PORT_0_CAP_0 0x008
+# define DP_LOCAL_EDID_PRESENT (1 << 1)
+# define DP_ASSOCIATED_TO_PRECEDING_PORT (1 << 2)
+
+#define DP_RECEIVE_PORT_0_BUFFER_SIZE 0x009
+
+#define DP_RECEIVE_PORT_1_CAP_0 0x00a
+#define DP_RECEIVE_PORT_1_BUFFER_SIZE 0x00b
+
#define DP_I2C_SPEED_CAP 0x00c /* DPI */
# define DP_I2C_SPEED_1K 0x01
# define DP_I2C_SPEED_5K 0x02
@@ -101,8 +112,19 @@
# define DP_I2C_SPEED_1M 0x20
#define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */
+# define DP_ALTERNATE_SCRAMBLER_RESET_CAP (1 << 0)
+# define DP_FRAMING_CHANGE_CAP (1 << 1)
+# define DP_DPCD_DISPLAY_CONTROL_CAPABLE (1 << 3) /* edp v1.2 or higher */
+
#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
+#define DP_ADAPTER_CAP 0x00f /* 1.2 */
+# define DP_FORCE_LOAD_SENSE_CAP (1 << 0)
+# define DP_ALTERNATE_I2C_PATTERN_CAP (1 << 1)
+
+#define DP_SUPPORTED_LINK_RATES 0x010 /* eDP 1.4 */
+# define DP_MAX_SUPPORTED_RATES 8 /* 16-bit little-endian */
+
/* Multiple stream transport */
#define DP_FAUX_CAP 0x020 /* 1.2 */
# define DP_FAUX_CAP_1 (1 << 0)
@@ -110,10 +132,56 @@
#define DP_MSTM_CAP 0x021 /* 1.2 */
# define DP_MST_CAP (1 << 0)
+#define DP_NUMBER_OF_AUDIO_ENDPOINTS 0x022 /* 1.2 */
+
+/* AV_SYNC_DATA_BLOCK 1.2 */
+#define DP_AV_GRANULARITY 0x023
+# define DP_AG_FACTOR_MASK (0xf << 0)
+# define DP_AG_FACTOR_3MS (0 << 0)
+# define DP_AG_FACTOR_2MS (1 << 0)
+# define DP_AG_FACTOR_1MS (2 << 0)
+# define DP_AG_FACTOR_500US (3 << 0)
+# define DP_AG_FACTOR_200US (4 << 0)
+# define DP_AG_FACTOR_100US (5 << 0)
+# define DP_AG_FACTOR_10US (6 << 0)
+# define DP_AG_FACTOR_1US (7 << 0)
+# define DP_VG_FACTOR_MASK (0xf << 4)
+# define DP_VG_FACTOR_3MS (0 << 4)
+# define DP_VG_FACTOR_2MS (1 << 4)
+# define DP_VG_FACTOR_1MS (2 << 4)
+# define DP_VG_FACTOR_500US (3 << 4)
+# define DP_VG_FACTOR_200US (4 << 4)
+# define DP_VG_FACTOR_100US (5 << 4)
+
+#define DP_AUD_DEC_LAT0 0x024
+#define DP_AUD_DEC_LAT1 0x025
+
+#define DP_AUD_PP_LAT0 0x026
+#define DP_AUD_PP_LAT1 0x027
+
+#define DP_VID_INTER_LAT 0x028
+
+#define DP_VID_PROG_LAT 0x029
+
+#define DP_REP_LAT 0x02a
+
+#define DP_AUD_DEL_INS0 0x02b
+#define DP_AUD_DEL_INS1 0x02c
+#define DP_AUD_DEL_INS2 0x02d
+/* End of AV_SYNC_DATA_BLOCK */
+
+#define DP_RECEIVER_ALPM_CAP 0x02e /* eDP 1.4 */
+# define DP_ALPM_CAP (1 << 0)
+
+#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP 0x02f /* eDP 1.4 */
+# define DP_AUX_FRAME_SYNC_CAP (1 << 0)
+
#define DP_GUID 0x030 /* 1.2 */
#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
# define DP_PSR_IS_SUPPORTED 1
+# define DP_PSR2_IS_SUPPORTED 2 /* eDP 1.4 */
+
#define DP_PSR_CAPS 0x071 /* XXX 1.2? */
# define DP_PSR_NO_TRAIN_ON_EXIT 1
# define DP_PSR_SETUP_TIME_330 (0 << 1)
@@ -153,6 +221,7 @@
/* link configuration */
#define DP_LINK_BW_SET 0x100
+# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */
# define DP_LINK_BW_1_62 0x06
# define DP_LINK_BW_2_7 0x0a
# define DP_LINK_BW_5_4 0x14 /* 1.2 */
@@ -168,11 +237,12 @@
# define DP_TRAINING_PATTERN_3 3 /* 1.2 */
# define DP_TRAINING_PATTERN_MASK 0x3
-# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
-# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2)
-# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2)
-# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2)
-# define DP_LINK_QUAL_PATTERN_MASK (3 << 2)
+/* DPCD 1.1 only. For DPCD >= 1.2 see per-lane DP_LINK_QUAL_LANEn_SET */
+# define DP_LINK_QUAL_PATTERN_11_DISABLE (0 << 2)
+# define DP_LINK_QUAL_PATTERN_11_D10_2 (1 << 2)
+# define DP_LINK_QUAL_PATTERN_11_ERROR_RATE (2 << 2)
+# define DP_LINK_QUAL_PATTERN_11_PRBS7 (3 << 2)
+# define DP_LINK_QUAL_PATTERN_11_MASK (3 << 2)
# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
@@ -215,17 +285,63 @@
/* bitmask as for DP_I2C_SPEED_CAP */
#define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */
+# define DP_ALTERNATE_SCRAMBLER_RESET_ENABLE (1 << 0)
+# define DP_FRAMING_CHANGE_ENABLE (1 << 1)
+# define DP_PANEL_SELF_TEST_ENABLE (1 << 7)
+
+#define DP_LINK_QUAL_LANE0_SET 0x10b /* DPCD >= 1.2 */
+#define DP_LINK_QUAL_LANE1_SET 0x10c
+#define DP_LINK_QUAL_LANE2_SET 0x10d
+#define DP_LINK_QUAL_LANE3_SET 0x10e
+# define DP_LINK_QUAL_PATTERN_DISABLE 0
+# define DP_LINK_QUAL_PATTERN_D10_2 1
+# define DP_LINK_QUAL_PATTERN_ERROR_RATE 2
+# define DP_LINK_QUAL_PATTERN_PRBS7 3
+# define DP_LINK_QUAL_PATTERN_80BIT_CUSTOM 4
+# define DP_LINK_QUAL_PATTERN_HBR2_EYE 5
+# define DP_LINK_QUAL_PATTERN_MASK 7
+
+#define DP_TRAINING_LANE0_1_SET2 0x10f
+#define DP_TRAINING_LANE2_3_SET2 0x110
+# define DP_LANE02_POST_CURSOR2_SET_MASK (3 << 0)
+# define DP_LANE02_MAX_POST_CURSOR2_REACHED (1 << 2)
+# define DP_LANE13_POST_CURSOR2_SET_MASK (3 << 4)
+# define DP_LANE13_MAX_POST_CURSOR2_REACHED (1 << 6)
#define DP_MSTM_CTRL 0x111 /* 1.2 */
# define DP_MST_EN (1 << 0)
# define DP_UP_REQ_EN (1 << 1)
# define DP_UPSTREAM_IS_SRC (1 << 2)
+#define DP_AUDIO_DELAY0 0x112 /* 1.2 */
+#define DP_AUDIO_DELAY1 0x113
+#define DP_AUDIO_DELAY2 0x114
+
+#define DP_LINK_RATE_SET 0x115 /* eDP 1.4 */
+# define DP_LINK_RATE_SET_SHIFT 0
+# define DP_LINK_RATE_SET_MASK (7 << 0)
+
+#define DP_RECEIVER_ALPM_CONFIG 0x116 /* eDP 1.4 */
+# define DP_ALPM_ENABLE (1 << 0)
+# define DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE (1 << 1)
+
+#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF 0x117 /* eDP 1.4 */
+# define DP_AUX_FRAME_SYNC_ENABLE (1 << 0)
+# define DP_IRQ_HPD_ENABLE (1 << 1)
+
+#define DP_UPSTREAM_DEVICE_DP_PWR_NEED 0x118 /* 1.2 */
+# define DP_PWR_NOT_NEEDED (1 << 0)
+
+#define DP_AUX_FRAME_SYNC_VALUE 0x15c /* eDP 1.4 */
+# define DP_AUX_FRAME_SYNC_VALID (1 << 0)
+
#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
# define DP_PSR_ENABLE (1 << 0)
# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1)
# define DP_PSR_CRC_VERIFICATION (1 << 2)
# define DP_PSR_FRAME_CAPTURE (1 << 3)
+# define DP_PSR_SELECTIVE_UPDATE (1 << 4)
+# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5)
#define DP_ADAPTER_CTRL 0x1a0
# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0)
@@ -332,6 +448,49 @@
# define DP_SET_POWER_D3 0x2
# define DP_SET_POWER_MASK 0x3
+#define DP_EDP_DPCD_REV 0x700 /* eDP 1.2 */
+# define DP_EDP_11 0x00
+# define DP_EDP_12 0x01
+# define DP_EDP_13 0x02
+# define DP_EDP_14 0x03
+
+#define DP_EDP_GENERAL_CAP_1 0x701
+
+#define DP_EDP_BACKLIGHT_ADJUSTMENT_CAP 0x702
+
+#define DP_EDP_GENERAL_CAP_2 0x703
+
+#define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */
+
+#define DP_EDP_DISPLAY_CONTROL_REGISTER 0x720
+
+#define DP_EDP_BACKLIGHT_MODE_SET_REGISTER 0x721
+
+#define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722
+#define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723
+
+#define DP_EDP_PWMGEN_BIT_COUNT 0x724
+#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN 0x725
+#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX 0x726
+
+#define DP_EDP_BACKLIGHT_CONTROL_STATUS 0x727
+
+#define DP_EDP_BACKLIGHT_FREQ_SET 0x728
+
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MSB 0x72a
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MID 0x72b
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_LSB 0x72c
+
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MSB 0x72d
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MID 0x72e
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB 0x72f
+
+#define DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET 0x732
+#define DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET 0x733
+
+#define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */
+#define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */
+
#define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */
#define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */
#define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */
@@ -350,6 +509,7 @@
#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */
# define DP_PSR_LINK_CRC_ERROR (1 << 0)
# define DP_PSR_RFB_STORAGE_ERROR (1 << 1)
+# define DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR (1 << 2) /* eDP 1.4 */
#define DP_PSR_ESI 0x2007 /* XXX 1.2? */
# define DP_PSR_CAPS_CHANGE (1 << 0)
@@ -363,6 +523,9 @@
# define DP_PSR_SINK_INTERNAL_ERROR 7
# define DP_PSR_SINK_STATE_MASK 0x07
+#define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */
+# define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0)
+
/* DP 1.2 Sideband message defines */
/* peer device type - DP 1.2a Table 2-92 */
#define DP_PEER_DEVICE_NONE 0x0
@@ -516,9 +679,12 @@ struct drm_dp_aux_msg {
* An AUX channel can also be used to transport I2C messages to a sink. A
* typical application of that is to access an EDID that's present in the
* sink device. The .transfer() function can also be used to execute such
- * transactions. The drm_dp_aux_register_i2c_bus() function registers an
- * I2C adapter that can be passed to drm_probe_ddc(). Upon removal, drivers
- * should call drm_dp_aux_unregister_i2c_bus() to remove the I2C adapter.
+ * transactions. The drm_dp_aux_register() function registers an I2C
+ * adapter that can be passed to drm_probe_ddc(). Upon removal, drivers
+ * should call drm_dp_aux_unregister() to remove the I2C adapter.
+ * The I2C adapter uses long transfers by default; if a partial response is
+ * received, the adapter will drop down to the size given by the partial
+ * response for this transaction only.
*
* Note that the aux helper code assumes that the .transfer() function
* only modifies the reply field of the drm_dp_aux_msg structure. The
@@ -586,6 +752,7 @@ struct drm_dp_link {
int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_aux_register(struct drm_dp_aux *aux);
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 00c1da9..86d0b25 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -463,6 +463,10 @@ struct drm_dp_mst_topology_mgr {
struct work_struct work;
struct work_struct tx_work;
+
+ struct list_head destroy_connector_list;
+ struct mutex destroy_connector_lock;
+ struct work_struct destroy_connector_work;
};
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id);
@@ -486,6 +490,8 @@ int drm_dp_calc_pbn_mode(int clock, int bpp);
bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots);
+int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+
void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 87d85e8..7990501 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -215,6 +215,8 @@ struct detailed_timing {
#define DRM_ELD_VER 0
# define DRM_ELD_VER_SHIFT 3
# define DRM_ELD_VER_MASK (0x1f << 3)
+# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */
+# define DRM_ELD_VER_CANNED (0x1f << 3)
#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index b597068..0dfd94def 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -44,6 +44,25 @@ struct drm_fb_helper_crtc {
int x, y;
};
+/**
+ * struct drm_fb_helper_surface_size - describes fbdev size and scanout surface size
+ * @fb_width: fbdev width
+ * @fb_height: fbdev height
+ * @surface_width: scanout buffer width
+ * @surface_height: scanout buffer height
+ * @surface_bpp: scanout buffer bpp
+ * @surface_depth: scanout buffer depth
+ *
+ * Note that the scanout surface width/height may be larger than the fbdev
+ * width/height. In case of multiple displays, the scanout surface is sized
+ * according to the largest width/height (so it is large enough for all CRTCs
+ * to scanout). But the fbdev width/height is sized to the minimum width/
+ * height of all the displays. This ensures that fbcon fits on the smallest
+ * of the attached displays.
+ *
+ * So what is passed to drm_fb_helper_fill_var() should be fb_width/fb_height,
+ * rather than the surface size.
+ */
struct drm_fb_helper_surface_size {
u32 fb_width;
u32 fb_height;
@@ -125,7 +144,7 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
-bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
+int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
int drm_fb_helper_debug_enter(struct fb_info *info);
int drm_fb_helper_debug_leave(struct fb_info *info);
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 1e6ae14..7a592d7 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -149,14 +149,16 @@ drm_gem_object_unreference(struct drm_gem_object *obj)
static inline void
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
{
- if (obj && !atomic_add_unless(&obj->refcount.refcount, -1, 1)) {
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev;
+
+ if (!obj)
+ return;
- mutex_lock(&dev->struct_mutex);
- if (likely(atomic_dec_and_test(&obj->refcount.refcount)))
- drm_gem_object_free(&obj->refcount);
+ dev = obj->dev;
+ if (kref_put_mutex(&obj->refcount, drm_gem_object_free, &dev->struct_mutex))
mutex_unlock(&dev->struct_mutex);
- }
+ else
+ might_lock(&dev->struct_mutex);
}
int drm_gem_handle_create(struct drm_file *file_priv,
diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h
index 19a2404..e42495a 100644
--- a/include/drm/drm_mem_util.h
+++ b/include/drm/drm_mem_util.h
@@ -56,10 +56,7 @@ static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
static __inline void drm_free_large(void *ptr)
{
- if (!is_vmalloc_addr(ptr))
- return kfree(ptr);
-
- vfree(ptr);
+ kvfree(ptr);
}
#endif
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index a24addf..0de6290 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -68,8 +68,8 @@ struct drm_mm_node {
unsigned scanned_preceeds_hole : 1;
unsigned allocated : 1;
unsigned long color;
- unsigned long start;
- unsigned long size;
+ u64 start;
+ u64 size;
struct drm_mm *mm;
};
@@ -82,16 +82,16 @@ struct drm_mm {
unsigned int scan_check_range : 1;
unsigned scan_alignment;
unsigned long scan_color;
- unsigned long scan_size;
- unsigned long scan_hit_start;
- unsigned long scan_hit_end;
+ u64 scan_size;
+ u64 scan_hit_start;
+ u64 scan_hit_end;
unsigned scanned_blocks;
- unsigned long scan_start;
- unsigned long scan_end;
+ u64 scan_start;
+ u64 scan_end;
struct drm_mm_node *prev_scanned_node;
void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
- unsigned long *start, unsigned long *end);
+ u64 *start, u64 *end);
};
/**
@@ -124,7 +124,7 @@ static inline bool drm_mm_initialized(struct drm_mm *mm)
return mm->hole_stack.next;
}
-static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
{
return hole_node->start + hole_node->size;
}
@@ -140,13 +140,13 @@ static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_no
* Returns:
* Start of the subsequent hole.
*/
-static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node)
{
BUG_ON(!hole_node->hole_follows);
return __drm_mm_hole_node_start(hole_node);
}
-static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
{
return list_entry(hole_node->node_list.next,
struct drm_mm_node, node_list)->start;
@@ -163,7 +163,7 @@ static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node
* Returns:
* End of the subsequent hole.
*/
-static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
{
return __drm_mm_hole_node_end(hole_node);
}
@@ -222,7 +222,7 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
int drm_mm_insert_node_generic(struct drm_mm *mm,
struct drm_mm_node *node,
- unsigned long size,
+ u64 size,
unsigned alignment,
unsigned long color,
enum drm_mm_search_flags sflags,
@@ -245,7 +245,7 @@ int drm_mm_insert_node_generic(struct drm_mm *mm,
*/
static inline int drm_mm_insert_node(struct drm_mm *mm,
struct drm_mm_node *node,
- unsigned long size,
+ u64 size,
unsigned alignment,
enum drm_mm_search_flags flags)
{
@@ -255,11 +255,11 @@ static inline int drm_mm_insert_node(struct drm_mm *mm,
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
struct drm_mm_node *node,
- unsigned long size,
+ u64 size,
unsigned alignment,
unsigned long color,
- unsigned long start,
- unsigned long end,
+ u64 start,
+ u64 end,
enum drm_mm_search_flags sflags,
enum drm_mm_allocator_flags aflags);
/**
@@ -282,10 +282,10 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
*/
static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
struct drm_mm_node *node,
- unsigned long size,
+ u64 size,
unsigned alignment,
- unsigned long start,
- unsigned long end,
+ u64 start,
+ u64 end,
enum drm_mm_search_flags flags)
{
return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
@@ -296,21 +296,21 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
void drm_mm_remove_node(struct drm_mm_node *node);
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
void drm_mm_init(struct drm_mm *mm,
- unsigned long start,
- unsigned long size);
+ u64 start,
+ u64 size);
void drm_mm_takedown(struct drm_mm *mm);
bool drm_mm_clean(struct drm_mm *mm);
void drm_mm_init_scan(struct drm_mm *mm,
- unsigned long size,
+ u64 size,
unsigned alignment,
unsigned long color);
void drm_mm_init_scan_with_range(struct drm_mm *mm,
- unsigned long size,
+ u64 size,
unsigned alignment,
unsigned long color,
- unsigned long start,
- unsigned long end);
+ u64 start,
+ u64 end);
bool drm_mm_scan_add_block(struct drm_mm_node *node);
bool drm_mm_scan_remove_block(struct drm_mm_node *node);
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index 91d0582..08a8cac 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -90,6 +90,9 @@ enum drm_mode_status {
#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */
#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
+#define CRTC_NO_DBLSCAN (1 << 2) /* don't adjust doublescan */
+#define CRTC_NO_VSCAN (1 << 3) /* don't adjust doublescan */
+#define CRTC_STEREO_DOUBLE_ONLY (CRTC_STEREO_DOUBLE | CRTC_NO_DBLSCAN | CRTC_NO_VSCAN)
#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
@@ -179,6 +182,10 @@ struct drm_cmdline_mode;
struct drm_display_mode *drm_mode_create(struct drm_device *dev);
void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
+void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
+ const struct drm_display_mode *in);
+int drm_mode_convert_umode(struct drm_display_mode *out,
+ const struct drm_mode_modeinfo *in);
void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
void drm_mode_debug_printmodeline(const struct drm_display_mode *mode);
@@ -197,6 +204,8 @@ struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
int GTF_K, int GTF_2J);
void drm_display_mode_from_videomode(const struct videomode *vm,
struct drm_display_mode *dmode);
+void drm_display_mode_to_videomode(const struct drm_display_mode *dmode,
+ struct videomode *vm);
int of_get_drm_display_mode(struct device_node *np,
struct drm_display_mode *dmode,
int index);
@@ -217,9 +226,9 @@ bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2);
/* for use by the crtc helper probe functions */
-void drm_mode_validate_size(struct drm_device *dev,
- struct list_head *mode_list,
- int maxX, int maxY);
+enum drm_mode_status drm_mode_validate_basic(const struct drm_display_mode *mode);
+enum drm_mode_status drm_mode_validate_size(const struct drm_display_mode *mode,
+ int maxX, int maxY);
void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose);
void drm_mode_sort(struct list_head *mode_list);
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 1fbcc96..13ff44b 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -29,6 +29,7 @@
struct drm_connector;
struct drm_device;
struct drm_panel;
+struct display_timing;
/**
* struct drm_panel_funcs - perform operations on a given panel
@@ -38,6 +39,8 @@ struct drm_panel;
* @enable: enable panel (turn on back light, etc.)
* @get_modes: add modes to the connector that the panel is attached to and
* return the number of modes added
+ * @get_timings: copy display timings into the provided array and return
+ * the number of display timings available
*
* The .prepare() function is typically called before the display controller
* starts to transmit video data. Panel drivers can use this to turn the panel
@@ -68,6 +71,8 @@ struct drm_panel_funcs {
int (*prepare)(struct drm_panel *panel);
int (*enable)(struct drm_panel *panel);
int (*get_modes)(struct drm_panel *panel);
+ int (*get_timings)(struct drm_panel *panel, unsigned int num_timings,
+ struct display_timing *timings);
};
struct drm_panel {
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 2dd405c..45c39a3 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -186,6 +186,7 @@
{0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x665f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index a185392..96e1628 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -52,26 +52,31 @@ extern int drm_crtc_init(struct drm_device *dev,
* @prepare_fb: prepare a framebuffer for use by the plane
* @cleanup_fb: cleanup a framebuffer when it's no longer used by the plane
* @atomic_check: check that a given atomic state is valid and can be applied
- * @atomic_update: apply an atomic state to the plane
+ * @atomic_update: apply an atomic state to the plane (mandatory)
+ * @atomic_disable: disable the plane
*
* The helper operations are called by the mid-layer CRTC helper.
*/
struct drm_plane_helper_funcs {
int (*prepare_fb)(struct drm_plane *plane,
- struct drm_framebuffer *fb);
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *new_state);
void (*cleanup_fb)(struct drm_plane *plane,
- struct drm_framebuffer *fb);
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *old_state);
int (*atomic_check)(struct drm_plane *plane,
struct drm_plane_state *state);
void (*atomic_update)(struct drm_plane *plane,
struct drm_plane_state *old_state);
+ void (*atomic_disable)(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
};
static inline void drm_plane_helper_add(struct drm_plane *plane,
const struct drm_plane_helper_funcs *funcs)
{
- plane->helper_private = (void *)funcs;
+ plane->helper_private = funcs;
}
extern int drm_plane_helper_check_update(struct drm_plane *plane,
@@ -95,10 +100,6 @@ extern int drm_primary_helper_update(struct drm_plane *plane,
extern int drm_primary_helper_disable(struct drm_plane *plane);
extern void drm_primary_helper_destroy(struct drm_plane *plane);
extern const struct drm_plane_funcs drm_primary_helper_funcs;
-extern struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
- const uint32_t *formats,
- int num_formats);
-
int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
new file mode 100644
index 0000000..c9a8b64
--- /dev/null
+++ b/include/drm/i915_component.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _I915_COMPONENT_H_
+#define _I915_COMPONENT_H_
+
+struct i915_audio_component {
+ struct device *dev;
+
+ const struct i915_audio_component_ops {
+ struct module *owner;
+ void (*get_power)(struct device *);
+ void (*put_power)(struct device *);
+ void (*codec_wake_override)(struct device *, bool enable);
+ int (*get_cdclk_freq)(struct device *);
+ } *ops;
+};
+
+#endif /* _I915_COMPONENT_H_ */
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 180ad0e..17c4456 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -208,40 +208,41 @@
#define INTEL_VLV_D_IDS(info) \
INTEL_VGA_DEVICE(0x0155, info)
-#define _INTEL_BDW_M(gt, id, info) \
- INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
-#define _INTEL_BDW_D(gt, id, info) \
- INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
-
-#define _INTEL_BDW_M_IDS(gt, info) \
- _INTEL_BDW_M(gt, 0x1602, info), /* ULT */ \
- _INTEL_BDW_M(gt, 0x1606, info), /* ULT */ \
- _INTEL_BDW_M(gt, 0x160B, info), /* Iris */ \
- _INTEL_BDW_M(gt, 0x160E, info) /* ULX */
-
-#define _INTEL_BDW_D_IDS(gt, info) \
- _INTEL_BDW_D(gt, 0x160A, info), /* Server */ \
- _INTEL_BDW_D(gt, 0x160D, info) /* Workstation */
-
-#define INTEL_BDW_GT12M_IDS(info) \
- _INTEL_BDW_M_IDS(1, info), \
- _INTEL_BDW_M_IDS(2, info)
+#define INTEL_BDW_GT12M_IDS(info) \
+ INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \
+ INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \
+ INTEL_VGA_DEVICE(0x160B, info), /* GT1 Iris */ \
+ INTEL_VGA_DEVICE(0x160E, info), /* GT1 ULX */ \
+ INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
+ INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \
+ INTEL_VGA_DEVICE(0x161B, info), /* GT2 ULT */ \
+ INTEL_VGA_DEVICE(0x161E, info) /* GT2 ULX */
#define INTEL_BDW_GT12D_IDS(info) \
- _INTEL_BDW_D_IDS(1, info), \
- _INTEL_BDW_D_IDS(2, info)
+ INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \
+ INTEL_VGA_DEVICE(0x160D, info), /* GT1 Workstation */ \
+ INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \
+ INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */
#define INTEL_BDW_GT3M_IDS(info) \
- _INTEL_BDW_M_IDS(3, info)
+ INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x162B, info), /* Iris */ \
+ INTEL_VGA_DEVICE(0x162E, info) /* ULX */
#define INTEL_BDW_GT3D_IDS(info) \
- _INTEL_BDW_D_IDS(3, info)
+ INTEL_VGA_DEVICE(0x162A, info), /* Server */ \
+ INTEL_VGA_DEVICE(0x162D, info) /* Workstation */
#define INTEL_BDW_RSVDM_IDS(info) \
- _INTEL_BDW_M_IDS(4, info)
+ INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x163B, info), /* Iris */ \
+ INTEL_VGA_DEVICE(0x163E, info) /* ULX */
#define INTEL_BDW_RSVDD_IDS(info) \
- _INTEL_BDW_D_IDS(4, info)
+ INTEL_VGA_DEVICE(0x163A, info), /* Server */ \
+ INTEL_VGA_DEVICE(0x163D, info) /* Workstation */
#define INTEL_BDW_M_IDS(info) \
INTEL_BDW_GT12M_IDS(info), \
@@ -259,21 +260,35 @@
INTEL_VGA_DEVICE(0x22b2, info), \
INTEL_VGA_DEVICE(0x22b3, info)
-#define INTEL_SKL_IDS(info) \
- INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
+#define INTEL_SKL_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
- INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \
INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
+ INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
+ INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
+ INTEL_VGA_DEVICE(0x190A, info) /* SRV GT1 */
+
+#define INTEL_SKL_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
+ INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \
INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \
INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \
- INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
- INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
- INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
- INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \
- INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \
INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
+#define INTEL_SKL_GT3_IDS(info) \
+ INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
+ INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
+ INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ \
+
+#define INTEL_SKL_IDS(info) \
+ INTEL_SKL_GT1_IDS(info), \
+ INTEL_SKL_GT2_IDS(info), \
+ INTEL_SKL_GT3_IDS(info)
+
+#define INTEL_BXT_IDS(info) \
+ INTEL_VGA_DEVICE(0x0A84, info), \
+ INTEL_VGA_DEVICE(0x1A84, info), \
+ INTEL_VGA_DEVICE(0x5A84, info)
+
#endif /* _I915_PCIIDS_H */
diff --git a/include/drm/i915_powerwell.h b/include/drm/i915_powerwell.h
deleted file mode 100644
index baa6f11..0000000
--- a/include/drm/i915_powerwell.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2013 Intel Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- **************************************************************************/
-
-#ifndef _I915_POWERWELL_H_
-#define _I915_POWERWELL_H_
-
-/* For use by hda_i915 driver */
-extern int i915_request_power_well(void);
-extern int i915_release_power_well(void);
-extern int i915_get_cdclk_freq(void);
-
-#endif /* _I915_POWERWELL_H_ */
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 0ccf7f2..c768ddf 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -249,7 +249,7 @@ struct ttm_buffer_object {
* either of these locks held.
*/
- unsigned long offset;
+ uint64_t offset; /* GPU address space is independent of CPU word size */
uint32_t cur_placement;
struct sg_table *sg;
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 142d752..813042c 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -277,7 +277,7 @@ struct ttm_mem_type_manager {
bool has_type;
bool use_type;
uint32_t flags;
- unsigned long gpu_offset;
+ uint64_t gpu_offset; /* GPU address space is independent of CPU word size */
uint64_t size;
uint32_t available_caching;
uint32_t default_caching;
diff --git a/include/dt-bindings/clock/alphascale,asm9260.h b/include/dt-bindings/clock/alphascale,asm9260.h
new file mode 100644
index 0000000..04e8db2
--- /dev/null
+++ b/include/dt-bindings/clock/alphascale,asm9260.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2014 Oleksij Rempel <linux@rempel-privat.de>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_ASM9260_H
+#define _DT_BINDINGS_CLK_ASM9260_H
+
+/* ahb gate */
+#define CLKID_AHB_ROM 0
+#define CLKID_AHB_RAM 1
+#define CLKID_AHB_GPIO 2
+#define CLKID_AHB_MAC 3
+#define CLKID_AHB_EMI 4
+#define CLKID_AHB_USB0 5
+#define CLKID_AHB_USB1 6
+#define CLKID_AHB_DMA0 7
+#define CLKID_AHB_DMA1 8
+#define CLKID_AHB_UART0 9
+#define CLKID_AHB_UART1 10
+#define CLKID_AHB_UART2 11
+#define CLKID_AHB_UART3 12
+#define CLKID_AHB_UART4 13
+#define CLKID_AHB_UART5 14
+#define CLKID_AHB_UART6 15
+#define CLKID_AHB_UART7 16
+#define CLKID_AHB_UART8 17
+#define CLKID_AHB_UART9 18
+#define CLKID_AHB_I2S0 19
+#define CLKID_AHB_I2C0 20
+#define CLKID_AHB_I2C1 21
+#define CLKID_AHB_SSP0 22
+#define CLKID_AHB_IOCONFIG 23
+#define CLKID_AHB_WDT 24
+#define CLKID_AHB_CAN0 25
+#define CLKID_AHB_CAN1 26
+#define CLKID_AHB_MPWM 27
+#define CLKID_AHB_SPI0 28
+#define CLKID_AHB_SPI1 29
+#define CLKID_AHB_QEI 30
+#define CLKID_AHB_QUADSPI0 31
+#define CLKID_AHB_CAMIF 32
+#define CLKID_AHB_LCDIF 33
+#define CLKID_AHB_TIMER0 34
+#define CLKID_AHB_TIMER1 35
+#define CLKID_AHB_TIMER2 36
+#define CLKID_AHB_TIMER3 37
+#define CLKID_AHB_IRQ 38
+#define CLKID_AHB_RTC 39
+#define CLKID_AHB_NAND 40
+#define CLKID_AHB_ADC0 41
+#define CLKID_AHB_LED 42
+#define CLKID_AHB_DAC0 43
+#define CLKID_AHB_LCD 44
+#define CLKID_AHB_I2S1 45
+#define CLKID_AHB_MAC1 46
+
+/* devider */
+#define CLKID_SYS_CPU 47
+#define CLKID_SYS_AHB 48
+#define CLKID_SYS_I2S0M 49
+#define CLKID_SYS_I2S0S 50
+#define CLKID_SYS_I2S1M 51
+#define CLKID_SYS_I2S1S 52
+#define CLKID_SYS_UART0 53
+#define CLKID_SYS_UART1 54
+#define CLKID_SYS_UART2 55
+#define CLKID_SYS_UART3 56
+#define CLKID_SYS_UART4 56
+#define CLKID_SYS_UART5 57
+#define CLKID_SYS_UART6 58
+#define CLKID_SYS_UART7 59
+#define CLKID_SYS_UART8 60
+#define CLKID_SYS_UART9 61
+#define CLKID_SYS_SPI0 62
+#define CLKID_SYS_SPI1 63
+#define CLKID_SYS_QUADSPI 64
+#define CLKID_SYS_SSP0 65
+#define CLKID_SYS_NAND 66
+#define CLKID_SYS_TRACE 67
+#define CLKID_SYS_CAMM 68
+#define CLKID_SYS_WDT 69
+#define CLKID_SYS_CLKOUT 70
+#define CLKID_SYS_MAC 71
+#define CLKID_SYS_LCD 72
+#define CLKID_SYS_ADCANA 73
+
+#define MAX_CLKS 74
+#endif
diff --git a/include/dt-bindings/clock/bcm-cygnus.h b/include/dt-bindings/clock/bcm-cygnus.h
new file mode 100644
index 0000000..32fbc47
--- /dev/null
+++ b/include/dt-bindings/clock/bcm-cygnus.h
@@ -0,0 +1,68 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Broadcom Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CLOCK_BCM_CYGNUS_H
+#define _CLOCK_BCM_CYGNUS_H
+
+/* GENPLL clock ID */
+#define BCM_CYGNUS_GENPLL 0
+#define BCM_CYGNUS_GENPLL_AXI21_CLK 1
+#define BCM_CYGNUS_GENPLL_250MHZ_CLK 2
+#define BCM_CYGNUS_GENPLL_IHOST_SYS_CLK 3
+#define BCM_CYGNUS_GENPLL_ENET_SW_CLK 4
+#define BCM_CYGNUS_GENPLL_AUDIO_125_CLK 5
+#define BCM_CYGNUS_GENPLL_CAN_CLK 6
+
+/* LCPLL0 clock ID */
+#define BCM_CYGNUS_LCPLL0 0
+#define BCM_CYGNUS_LCPLL0_PCIE_PHY_REF_CLK 1
+#define BCM_CYGNUS_LCPLL0_DDR_PHY_CLK 2
+#define BCM_CYGNUS_LCPLL0_SDIO_CLK 3
+#define BCM_CYGNUS_LCPLL0_USB_PHY_REF_CLK 4
+#define BCM_CYGNUS_LCPLL0_SMART_CARD_CLK 5
+#define BCM_CYGNUS_LCPLL0_CH5_UNUSED 6
+
+/* MIPI PLL clock ID */
+#define BCM_CYGNUS_MIPIPLL 0
+#define BCM_CYGNUS_MIPIPLL_CH0_UNUSED 1
+#define BCM_CYGNUS_MIPIPLL_CH1_LCD 2
+#define BCM_CYGNUS_MIPIPLL_CH2_V3D 3
+#define BCM_CYGNUS_MIPIPLL_CH3_UNUSED 4
+#define BCM_CYGNUS_MIPIPLL_CH4_UNUSED 5
+#define BCM_CYGNUS_MIPIPLL_CH5_UNUSED 6
+
+/* ASIU clock ID */
+#define BCM_CYGNUS_ASIU_KEYPAD_CLK 0
+#define BCM_CYGNUS_ASIU_ADC_CLK 1
+#define BCM_CYGNUS_ASIU_PWM_CLK 2
+
+#endif /* _CLOCK_BCM_CYGNUS_H */
diff --git a/include/dt-bindings/clock/exynos3250.h b/include/dt-bindings/clock/exynos3250.h
index 961b9c1..aab088d 100644
--- a/include/dt-bindings/clock/exynos3250.h
+++ b/include/dt-bindings/clock/exynos3250.h
@@ -282,4 +282,65 @@
*/
#define NR_CLKS_DMC 21
+/*
+ * CMU ISP
+ */
+
+/* Dividers */
+
+#define CLK_DIV_ISP1 1
+#define CLK_DIV_ISP0 2
+#define CLK_DIV_MCUISP1 3
+#define CLK_DIV_MCUISP0 4
+#define CLK_DIV_MPWM 5
+
+/* Gates */
+
+#define CLK_UART_ISP 8
+#define CLK_WDT_ISP 9
+#define CLK_PWM_ISP 10
+#define CLK_I2C1_ISP 11
+#define CLK_I2C0_ISP 12
+#define CLK_MPWM_ISP 13
+#define CLK_MCUCTL_ISP 14
+#define CLK_PPMUISPX 15
+#define CLK_PPMUISPMX 16
+#define CLK_QE_LITE1 17
+#define CLK_QE_LITE0 18
+#define CLK_QE_FD 19
+#define CLK_QE_DRC 20
+#define CLK_QE_ISP 21
+#define CLK_CSIS1 22
+#define CLK_SMMU_LITE1 23
+#define CLK_SMMU_LITE0 24
+#define CLK_SMMU_FD 25
+#define CLK_SMMU_DRC 26
+#define CLK_SMMU_ISP 27
+#define CLK_GICISP 28
+#define CLK_CSIS0 29
+#define CLK_MCUISP 30
+#define CLK_LITE1 31
+#define CLK_LITE0 32
+#define CLK_FD 33
+#define CLK_DRC 34
+#define CLK_ISP 35
+#define CLK_QE_ISPCX 36
+#define CLK_QE_SCALERP 37
+#define CLK_QE_SCALERC 38
+#define CLK_SMMU_SCALERP 39
+#define CLK_SMMU_SCALERC 40
+#define CLK_SCALERP 41
+#define CLK_SCALERC 42
+#define CLK_SPI1_ISP 43
+#define CLK_SPI0_ISP 44
+#define CLK_SMMU_ISPCX 45
+#define CLK_ASYNCAXIM 46
+#define CLK_SCLK_MPWM_ISP 47
+
+/*
+ * Total number of clocks of CMU_ISP.
+ * NOTE: Must be equal to last clock ID increased by one.
+ */
+#define NR_CLKS_ISP 48
+
#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS3250_CLOCK_H */
diff --git a/include/dt-bindings/clock/exynos4.h b/include/dt-bindings/clock/exynos4.h
index 34fe28c..c4b1676 100644
--- a/include/dt-bindings/clock/exynos4.h
+++ b/include/dt-bindings/clock/exynos4.h
@@ -262,8 +262,13 @@
#define CLK_DIV_MCUISP1 453 /* Exynos4x12 only */
#define CLK_DIV_ACLK200 454 /* Exynos4x12 only */
#define CLK_DIV_ACLK400_MCUISP 455 /* Exynos4x12 only */
+#define CLK_DIV_ACP 456
+#define CLK_DIV_DMC 457
+#define CLK_DIV_C2C 458 /* Exynos4x12 only */
+#define CLK_DIV_GDL 459
+#define CLK_DIV_GDR 460
/* must be greater than maximal clock id */
-#define CLK_NR_CLKS 456
+#define CLK_NR_CLKS 461
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_4_H */
diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h
index 8dc0913..99da0d1 100644
--- a/include/dt-bindings/clock/exynos5420.h
+++ b/include/dt-bindings/clock/exynos5420.h
@@ -204,6 +204,12 @@
#define CLK_MOUT_MAUDIO0 643
#define CLK_MOUT_USER_ACLK333 644
#define CLK_MOUT_SW_ACLK333 645
+#define CLK_MOUT_USER_ACLK200_DISP1 646
+#define CLK_MOUT_SW_ACLK200 647
+#define CLK_MOUT_USER_ACLK300_DISP1 648
+#define CLK_MOUT_SW_ACLK300 649
+#define CLK_MOUT_USER_ACLK400_DISP1 650
+#define CLK_MOUT_SW_ACLK400 651
/* divider clocks */
#define CLK_DOUT_PIXEL 768
diff --git a/include/dt-bindings/clock/exynos5433.h b/include/dt-bindings/clock/exynos5433.h
new file mode 100644
index 0000000..5bd80d5
--- /dev/null
+++ b/include/dt-bindings/clock/exynos5433.h
@@ -0,0 +1,1403 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOS5433_H
+#define _DT_BINDINGS_CLOCK_EXYNOS5433_H
+
+/* CMU_TOP */
+#define CLK_FOUT_ISP_PLL 1
+#define CLK_FOUT_AUD_PLL 2
+
+#define CLK_MOUT_AUD_PLL 10
+#define CLK_MOUT_ISP_PLL 11
+#define CLK_MOUT_AUD_PLL_USER_T 12
+#define CLK_MOUT_MPHY_PLL_USER 13
+#define CLK_MOUT_MFC_PLL_USER 14
+#define CLK_MOUT_BUS_PLL_USER 15
+#define CLK_MOUT_ACLK_HEVC_400 16
+#define CLK_MOUT_ACLK_CAM1_333 17
+#define CLK_MOUT_ACLK_CAM1_552_B 18
+#define CLK_MOUT_ACLK_CAM1_552_A 19
+#define CLK_MOUT_ACLK_ISP_DIS_400 20
+#define CLK_MOUT_ACLK_ISP_400 21
+#define CLK_MOUT_ACLK_BUS0_400 22
+#define CLK_MOUT_ACLK_MSCL_400_B 23
+#define CLK_MOUT_ACLK_MSCL_400_A 24
+#define CLK_MOUT_ACLK_GSCL_333 25
+#define CLK_MOUT_ACLK_G2D_400_B 26
+#define CLK_MOUT_ACLK_G2D_400_A 27
+#define CLK_MOUT_SCLK_JPEG_C 28
+#define CLK_MOUT_SCLK_JPEG_B 29
+#define CLK_MOUT_SCLK_JPEG_A 30
+#define CLK_MOUT_SCLK_MMC2_B 31
+#define CLK_MOUT_SCLK_MMC2_A 32
+#define CLK_MOUT_SCLK_MMC1_B 33
+#define CLK_MOUT_SCLK_MMC1_A 34
+#define CLK_MOUT_SCLK_MMC0_D 35
+#define CLK_MOUT_SCLK_MMC0_C 36
+#define CLK_MOUT_SCLK_MMC0_B 37
+#define CLK_MOUT_SCLK_MMC0_A 38
+#define CLK_MOUT_SCLK_SPI4 39
+#define CLK_MOUT_SCLK_SPI3 40
+#define CLK_MOUT_SCLK_UART2 41
+#define CLK_MOUT_SCLK_UART1 42
+#define CLK_MOUT_SCLK_UART0 43
+#define CLK_MOUT_SCLK_SPI2 44
+#define CLK_MOUT_SCLK_SPI1 45
+#define CLK_MOUT_SCLK_SPI0 46
+#define CLK_MOUT_ACLK_MFC_400_C 47
+#define CLK_MOUT_ACLK_MFC_400_B 48
+#define CLK_MOUT_ACLK_MFC_400_A 49
+#define CLK_MOUT_SCLK_ISP_SENSOR2 50
+#define CLK_MOUT_SCLK_ISP_SENSOR1 51
+#define CLK_MOUT_SCLK_ISP_SENSOR0 52
+#define CLK_MOUT_SCLK_ISP_UART 53
+#define CLK_MOUT_SCLK_ISP_SPI1 54
+#define CLK_MOUT_SCLK_ISP_SPI0 55
+#define CLK_MOUT_SCLK_PCIE_100 56
+#define CLK_MOUT_SCLK_UFSUNIPRO 57
+#define CLK_MOUT_SCLK_USBHOST30 58
+#define CLK_MOUT_SCLK_USBDRD30 59
+#define CLK_MOUT_SCLK_SLIMBUS 60
+#define CLK_MOUT_SCLK_SPDIF 61
+#define CLK_MOUT_SCLK_AUDIO1 62
+#define CLK_MOUT_SCLK_AUDIO0 63
+#define CLK_MOUT_SCLK_HDMI_SPDIF 64
+
+#define CLK_DIV_ACLK_FSYS_200 100
+#define CLK_DIV_ACLK_IMEM_SSSX_266 101
+#define CLK_DIV_ACLK_IMEM_200 102
+#define CLK_DIV_ACLK_IMEM_266 103
+#define CLK_DIV_ACLK_PERIC_66_B 104
+#define CLK_DIV_ACLK_PERIC_66_A 105
+#define CLK_DIV_ACLK_PERIS_66_B 106
+#define CLK_DIV_ACLK_PERIS_66_A 107
+#define CLK_DIV_SCLK_MMC1_B 108
+#define CLK_DIV_SCLK_MMC1_A 109
+#define CLK_DIV_SCLK_MMC0_B 110
+#define CLK_DIV_SCLK_MMC0_A 111
+#define CLK_DIV_SCLK_MMC2_B 112
+#define CLK_DIV_SCLK_MMC2_A 113
+#define CLK_DIV_SCLK_SPI1_B 114
+#define CLK_DIV_SCLK_SPI1_A 115
+#define CLK_DIV_SCLK_SPI0_B 116
+#define CLK_DIV_SCLK_SPI0_A 117
+#define CLK_DIV_SCLK_SPI2_B 118
+#define CLK_DIV_SCLK_SPI2_A 119
+#define CLK_DIV_SCLK_UART2 120
+#define CLK_DIV_SCLK_UART1 121
+#define CLK_DIV_SCLK_UART0 122
+#define CLK_DIV_SCLK_SPI4_B 123
+#define CLK_DIV_SCLK_SPI4_A 124
+#define CLK_DIV_SCLK_SPI3_B 125
+#define CLK_DIV_SCLK_SPI3_A 126
+#define CLK_DIV_SCLK_I2S1 127
+#define CLK_DIV_SCLK_PCM1 128
+#define CLK_DIV_SCLK_AUDIO1 129
+#define CLK_DIV_SCLK_AUDIO0 130
+#define CLK_DIV_ACLK_GSCL_111 131
+#define CLK_DIV_ACLK_GSCL_333 132
+#define CLK_DIV_ACLK_HEVC_400 133
+#define CLK_DIV_ACLK_MFC_400 134
+#define CLK_DIV_ACLK_G2D_266 135
+#define CLK_DIV_ACLK_G2D_400 136
+#define CLK_DIV_ACLK_G3D_400 137
+#define CLK_DIV_ACLK_BUS0_400 138
+#define CLK_DIV_ACLK_BUS1_400 139
+#define CLK_DIV_SCLK_PCIE_100 140
+#define CLK_DIV_SCLK_USBHOST30 141
+#define CLK_DIV_SCLK_UFSUNIPRO 142
+#define CLK_DIV_SCLK_USBDRD30 143
+#define CLK_DIV_SCLK_JPEG 144
+#define CLK_DIV_ACLK_MSCL_400 145
+#define CLK_DIV_ACLK_ISP_DIS_400 146
+#define CLK_DIV_ACLK_ISP_400 147
+#define CLK_DIV_ACLK_CAM0_333 148
+#define CLK_DIV_ACLK_CAM0_400 149
+#define CLK_DIV_ACLK_CAM0_552 150
+#define CLK_DIV_ACLK_CAM1_333 151
+#define CLK_DIV_ACLK_CAM1_400 152
+#define CLK_DIV_ACLK_CAM1_552 153
+#define CLK_DIV_SCLK_ISP_UART 154
+#define CLK_DIV_SCLK_ISP_SPI1_B 155
+#define CLK_DIV_SCLK_ISP_SPI1_A 156
+#define CLK_DIV_SCLK_ISP_SPI0_B 157
+#define CLK_DIV_SCLK_ISP_SPI0_A 158
+#define CLK_DIV_SCLK_ISP_SENSOR2_B 159
+#define CLK_DIV_SCLK_ISP_SENSOR2_A 160
+#define CLK_DIV_SCLK_ISP_SENSOR1_B 161
+#define CLK_DIV_SCLK_ISP_SENSOR1_A 162
+#define CLK_DIV_SCLK_ISP_SENSOR0_B 163
+#define CLK_DIV_SCLK_ISP_SENSOR0_A 164
+
+#define CLK_ACLK_PERIC_66 200
+#define CLK_ACLK_PERIS_66 201
+#define CLK_ACLK_FSYS_200 202
+#define CLK_SCLK_MMC2_FSYS 203
+#define CLK_SCLK_MMC1_FSYS 204
+#define CLK_SCLK_MMC0_FSYS 205
+#define CLK_SCLK_SPI4_PERIC 206
+#define CLK_SCLK_SPI3_PERIC 207
+#define CLK_SCLK_UART2_PERIC 208
+#define CLK_SCLK_UART1_PERIC 209
+#define CLK_SCLK_UART0_PERIC 210
+#define CLK_SCLK_SPI2_PERIC 211
+#define CLK_SCLK_SPI1_PERIC 212
+#define CLK_SCLK_SPI0_PERIC 213
+#define CLK_SCLK_SPDIF_PERIC 214
+#define CLK_SCLK_I2S1_PERIC 215
+#define CLK_SCLK_PCM1_PERIC 216
+#define CLK_SCLK_SLIMBUS 217
+#define CLK_SCLK_AUDIO1 218
+#define CLK_SCLK_AUDIO0 219
+#define CLK_ACLK_G2D_266 220
+#define CLK_ACLK_G2D_400 221
+#define CLK_ACLK_G3D_400 222
+#define CLK_ACLK_IMEM_SSX_266 223
+#define CLK_ACLK_BUS0_400 224
+#define CLK_ACLK_BUS1_400 225
+#define CLK_ACLK_IMEM_200 226
+#define CLK_ACLK_IMEM_266 227
+#define CLK_SCLK_PCIE_100_FSYS 228
+#define CLK_SCLK_UFSUNIPRO_FSYS 229
+#define CLK_SCLK_USBHOST30_FSYS 230
+#define CLK_SCLK_USBDRD30_FSYS 231
+#define CLK_ACLK_GSCL_111 232
+#define CLK_ACLK_GSCL_333 233
+#define CLK_SCLK_JPEG_MSCL 234
+#define CLK_ACLK_MSCL_400 235
+#define CLK_ACLK_MFC_400 236
+#define CLK_ACLK_HEVC_400 237
+#define CLK_ACLK_ISP_DIS_400 238
+#define CLK_ACLK_ISP_400 239
+#define CLK_ACLK_CAM0_333 240
+#define CLK_ACLK_CAM0_400 241
+#define CLK_ACLK_CAM0_552 242
+#define CLK_ACLK_CAM1_333 243
+#define CLK_ACLK_CAM1_400 244
+#define CLK_ACLK_CAM1_552 245
+#define CLK_SCLK_ISP_SENSOR2 246
+#define CLK_SCLK_ISP_SENSOR1 247
+#define CLK_SCLK_ISP_SENSOR0 248
+#define CLK_SCLK_ISP_MCTADC_CAM1 249
+#define CLK_SCLK_ISP_UART_CAM1 250
+#define CLK_SCLK_ISP_SPI1_CAM1 251
+#define CLK_SCLK_ISP_SPI0_CAM1 252
+#define CLK_SCLK_HDMI_SPDIF_DISP 253
+
+#define TOP_NR_CLK 254
+
+/* CMU_CPIF */
+#define CLK_FOUT_MPHY_PLL 1
+
+#define CLK_MOUT_MPHY_PLL 2
+
+#define CLK_DIV_SCLK_MPHY 10
+
+#define CLK_SCLK_MPHY_PLL 11
+#define CLK_SCLK_UFS_MPHY 11
+
+#define CPIF_NR_CLK 12
+
+/* CMU_MIF */
+#define CLK_FOUT_MEM0_PLL 1
+#define CLK_FOUT_MEM1_PLL 2
+#define CLK_FOUT_BUS_PLL 3
+#define CLK_FOUT_MFC_PLL 4
+#define CLK_DOUT_MFC_PLL 5
+#define CLK_DOUT_BUS_PLL 6
+#define CLK_DOUT_MEM1_PLL 7
+#define CLK_DOUT_MEM0_PLL 8
+
+#define CLK_MOUT_MFC_PLL_DIV2 10
+#define CLK_MOUT_BUS_PLL_DIV2 11
+#define CLK_MOUT_MEM1_PLL_DIV2 12
+#define CLK_MOUT_MEM0_PLL_DIV2 13
+#define CLK_MOUT_MFC_PLL 14
+#define CLK_MOUT_BUS_PLL 15
+#define CLK_MOUT_MEM1_PLL 16
+#define CLK_MOUT_MEM0_PLL 17
+#define CLK_MOUT_CLK2X_PHY_C 18
+#define CLK_MOUT_CLK2X_PHY_B 19
+#define CLK_MOUT_CLK2X_PHY_A 20
+#define CLK_MOUT_CLKM_PHY_C 21
+#define CLK_MOUT_CLKM_PHY_B 22
+#define CLK_MOUT_CLKM_PHY_A 23
+#define CLK_MOUT_ACLK_MIFNM_200 24
+#define CLK_MOUT_ACLK_MIFNM_400 25
+#define CLK_MOUT_ACLK_DISP_333_B 26
+#define CLK_MOUT_ACLK_DISP_333_A 27
+#define CLK_MOUT_SCLK_DECON_VCLK_C 28
+#define CLK_MOUT_SCLK_DECON_VCLK_B 29
+#define CLK_MOUT_SCLK_DECON_VCLK_A 30
+#define CLK_MOUT_SCLK_DECON_ECLK_C 31
+#define CLK_MOUT_SCLK_DECON_ECLK_B 32
+#define CLK_MOUT_SCLK_DECON_ECLK_A 33
+#define CLK_MOUT_SCLK_DECON_TV_ECLK_C 34
+#define CLK_MOUT_SCLK_DECON_TV_ECLK_B 35
+#define CLK_MOUT_SCLK_DECON_TV_ECLK_A 36
+#define CLK_MOUT_SCLK_DSD_C 37
+#define CLK_MOUT_SCLK_DSD_B 38
+#define CLK_MOUT_SCLK_DSD_A 39
+#define CLK_MOUT_SCLK_DSIM0_C 40
+#define CLK_MOUT_SCLK_DSIM0_B 41
+#define CLK_MOUT_SCLK_DSIM0_A 42
+#define CLK_MOUT_SCLK_DECON_TV_VCLK_C 46
+#define CLK_MOUT_SCLK_DECON_TV_VCLK_B 47
+#define CLK_MOUT_SCLK_DECON_TV_VCLK_A 48
+#define CLK_MOUT_SCLK_DSIM1_C 49
+#define CLK_MOUT_SCLK_DSIM1_B 50
+#define CLK_MOUT_SCLK_DSIM1_A 51
+
+#define CLK_DIV_SCLK_HPM_MIF 55
+#define CLK_DIV_ACLK_DREX1 56
+#define CLK_DIV_ACLK_DREX0 57
+#define CLK_DIV_CLK2XPHY 58
+#define CLK_DIV_ACLK_MIF_266 59
+#define CLK_DIV_ACLK_MIFND_133 60
+#define CLK_DIV_ACLK_MIF_133 61
+#define CLK_DIV_ACLK_MIFNM_200 62
+#define CLK_DIV_ACLK_MIF_200 63
+#define CLK_DIV_ACLK_MIF_400 64
+#define CLK_DIV_ACLK_BUS2_400 65
+#define CLK_DIV_ACLK_DISP_333 66
+#define CLK_DIV_ACLK_CPIF_200 67
+#define CLK_DIV_SCLK_DSIM1 68
+#define CLK_DIV_SCLK_DECON_TV_VCLK 69
+#define CLK_DIV_SCLK_DSIM0 70
+#define CLK_DIV_SCLK_DSD 71
+#define CLK_DIV_SCLK_DECON_TV_ECLK 72
+#define CLK_DIV_SCLK_DECON_VCLK 73
+#define CLK_DIV_SCLK_DECON_ECLK 74
+#define CLK_DIV_MIF_PRE 75
+
+#define CLK_CLK2X_PHY1 80
+#define CLK_CLK2X_PHY0 81
+#define CLK_CLKM_PHY1 82
+#define CLK_CLKM_PHY0 83
+#define CLK_RCLK_DREX1 84
+#define CLK_RCLK_DREX0 85
+#define CLK_ACLK_DREX1_TZ 86
+#define CLK_ACLK_DREX0_TZ 87
+#define CLK_ACLK_DREX1_PEREV 88
+#define CLK_ACLK_DREX0_PEREV 89
+#define CLK_ACLK_DREX1_MEMIF 90
+#define CLK_ACLK_DREX0_MEMIF 91
+#define CLK_ACLK_DREX1_SCH 92
+#define CLK_ACLK_DREX0_SCH 93
+#define CLK_ACLK_DREX1_BUSIF 94
+#define CLK_ACLK_DREX0_BUSIF 95
+#define CLK_ACLK_DREX1_BUSIF_RD 96
+#define CLK_ACLK_DREX0_BUSIF_RD 97
+#define CLK_ACLK_DREX1 98
+#define CLK_ACLK_DREX0 99
+#define CLK_ACLK_ASYNCAXIM_ATLAS_CCIX 100
+#define CLK_ACLK_ASYNCAXIS_ATLAS_MIF 101
+#define CLK_ACLK_ASYNCAXIM_ATLAS_MIF 102
+#define CLK_ACLK_ASYNCAXIS_MIF_IMEM 103
+#define CLK_ACLK_ASYNCAXIS_NOC_P_CCI 104
+#define CLK_ACLK_ASYNCAXIM_NOC_P_CCI 105
+#define CLK_ACLK_ASYNCAXIS_CP1 106
+#define CLK_ACLK_ASYNCAXIM_CP1 107
+#define CLK_ACLK_ASYNCAXIS_CP0 108
+#define CLK_ACLK_ASYNCAXIM_CP0 109
+#define CLK_ACLK_ASYNCAXIS_DREX1_3 110
+#define CLK_ACLK_ASYNCAXIM_DREX1_3 111
+#define CLK_ACLK_ASYNCAXIS_DREX1_1 112
+#define CLK_ACLK_ASYNCAXIM_DREX1_1 113
+#define CLK_ACLK_ASYNCAXIS_DREX1_0 114
+#define CLK_ACLK_ASYNCAXIM_DREX1_0 115
+#define CLK_ACLK_ASYNCAXIS_DREX0_3 116
+#define CLK_ACLK_ASYNCAXIM_DREX0_3 117
+#define CLK_ACLK_ASYNCAXIS_DREX0_1 118
+#define CLK_ACLK_ASYNCAXIM_DREX0_1 119
+#define CLK_ACLK_ASYNCAXIS_DREX0_0 120
+#define CLK_ACLK_ASYNCAXIM_DREX0_0 121
+#define CLK_ACLK_AHB2APB_MIF2P 122
+#define CLK_ACLK_AHB2APB_MIF1P 123
+#define CLK_ACLK_AHB2APB_MIF0P 124
+#define CLK_ACLK_IXIU_CCI 125
+#define CLK_ACLK_XIU_MIFSFRX 126
+#define CLK_ACLK_MIFNP_133 127
+#define CLK_ACLK_MIFNM_200 128
+#define CLK_ACLK_MIFND_133 129
+#define CLK_ACLK_MIFND_400 130
+#define CLK_ACLK_CCI 131
+#define CLK_ACLK_MIFND_266 132
+#define CLK_ACLK_PPMU_DREX1S3 133
+#define CLK_ACLK_PPMU_DREX1S1 134
+#define CLK_ACLK_PPMU_DREX1S0 135
+#define CLK_ACLK_PPMU_DREX0S3 136
+#define CLK_ACLK_PPMU_DREX0S1 137
+#define CLK_ACLK_PPMU_DREX0S0 138
+#define CLK_ACLK_BTS_APOLLO 139
+#define CLK_ACLK_BTS_ATLAS 140
+#define CLK_ACLK_ACE_SEL_APOLL 141
+#define CLK_ACLK_ACE_SEL_ATLAS 142
+#define CLK_ACLK_AXIDS_CCI_MIFSFRX 143
+#define CLK_ACLK_AXIUS_ATLAS_CCI 144
+#define CLK_ACLK_AXISYNCDNS_CCI 145
+#define CLK_ACLK_AXISYNCDN_CCI 146
+#define CLK_ACLK_AXISYNCDN_NOC_D 147
+#define CLK_ACLK_ASYNCACEM_APOLLO_CCI 148
+#define CLK_ACLK_ASYNCACEM_ATLAS_CCI 149
+#define CLK_ACLK_ASYNCAPBS_MIF_CSSYS 150
+#define CLK_ACLK_BUS2_400 151
+#define CLK_ACLK_DISP_333 152
+#define CLK_ACLK_CPIF_200 153
+#define CLK_PCLK_PPMU_DREX1S3 154
+#define CLK_PCLK_PPMU_DREX1S1 155
+#define CLK_PCLK_PPMU_DREX1S0 156
+#define CLK_PCLK_PPMU_DREX0S3 157
+#define CLK_PCLK_PPMU_DREX0S1 158
+#define CLK_PCLK_PPMU_DREX0S0 159
+#define CLK_PCLK_BTS_APOLLO 160
+#define CLK_PCLK_BTS_ATLAS 161
+#define CLK_PCLK_ASYNCAXI_NOC_P_CCI 162
+#define CLK_PCLK_ASYNCAXI_CP1 163
+#define CLK_PCLK_ASYNCAXI_CP0 164
+#define CLK_PCLK_ASYNCAXI_DREX1_3 165
+#define CLK_PCLK_ASYNCAXI_DREX1_1 166
+#define CLK_PCLK_ASYNCAXI_DREX1_0 167
+#define CLK_PCLK_ASYNCAXI_DREX0_3 168
+#define CLK_PCLK_ASYNCAXI_DREX0_1 169
+#define CLK_PCLK_ASYNCAXI_DREX0_0 170
+#define CLK_PCLK_MIFSRVND_133 171
+#define CLK_PCLK_PMU_MIF 172
+#define CLK_PCLK_SYSREG_MIF 173
+#define CLK_PCLK_GPIO_ALIVE 174
+#define CLK_PCLK_ABB 175
+#define CLK_PCLK_PMU_APBIF 176
+#define CLK_PCLK_DDR_PHY1 177
+#define CLK_PCLK_DREX1 178
+#define CLK_PCLK_DDR_PHY0 179
+#define CLK_PCLK_DREX0 180
+#define CLK_PCLK_DREX0_TZ 181
+#define CLK_PCLK_DREX1_TZ 182
+#define CLK_PCLK_MONOTONIC_CNT 183
+#define CLK_PCLK_RTC 184
+#define CLK_SCLK_DSIM1_DISP 185
+#define CLK_SCLK_DECON_TV_VCLK_DISP 186
+#define CLK_SCLK_FREQ_DET_BUS_PLL 187
+#define CLK_SCLK_FREQ_DET_MFC_PLL 188
+#define CLK_SCLK_FREQ_DET_MEM0_PLL 189
+#define CLK_SCLK_FREQ_DET_MEM1_PLL 190
+#define CLK_SCLK_DSIM0_DISP 191
+#define CLK_SCLK_DSD_DISP 192
+#define CLK_SCLK_DECON_TV_ECLK_DISP 193
+#define CLK_SCLK_DECON_VCLK_DISP 194
+#define CLK_SCLK_DECON_ECLK_DISP 195
+#define CLK_SCLK_HPM_MIF 196
+#define CLK_SCLK_MFC_PLL 197
+#define CLK_SCLK_BUS_PLL 198
+#define CLK_SCLK_BUS_PLL_APOLLO 199
+#define CLK_SCLK_BUS_PLL_ATLAS 200
+
+#define MIF_NR_CLK 201
+
+/* CMU_PERIC */
+#define CLK_PCLK_SPI2 1
+#define CLK_PCLK_SPI1 2
+#define CLK_PCLK_SPI0 3
+#define CLK_PCLK_UART2 4
+#define CLK_PCLK_UART1 5
+#define CLK_PCLK_UART0 6
+#define CLK_PCLK_HSI2C3 7
+#define CLK_PCLK_HSI2C2 8
+#define CLK_PCLK_HSI2C1 9
+#define CLK_PCLK_HSI2C0 10
+#define CLK_PCLK_I2C7 11
+#define CLK_PCLK_I2C6 12
+#define CLK_PCLK_I2C5 13
+#define CLK_PCLK_I2C4 14
+#define CLK_PCLK_I2C3 15
+#define CLK_PCLK_I2C2 16
+#define CLK_PCLK_I2C1 17
+#define CLK_PCLK_I2C0 18
+#define CLK_PCLK_SPI4 19
+#define CLK_PCLK_SPI3 20
+#define CLK_PCLK_HSI2C11 21
+#define CLK_PCLK_HSI2C10 22
+#define CLK_PCLK_HSI2C9 23
+#define CLK_PCLK_HSI2C8 24
+#define CLK_PCLK_HSI2C7 25
+#define CLK_PCLK_HSI2C6 26
+#define CLK_PCLK_HSI2C5 27
+#define CLK_PCLK_HSI2C4 28
+#define CLK_SCLK_SPI4 29
+#define CLK_SCLK_SPI3 30
+#define CLK_SCLK_SPI2 31
+#define CLK_SCLK_SPI1 32
+#define CLK_SCLK_SPI0 33
+#define CLK_SCLK_UART2 34
+#define CLK_SCLK_UART1 35
+#define CLK_SCLK_UART0 36
+#define CLK_ACLK_AHB2APB_PERIC2P 37
+#define CLK_ACLK_AHB2APB_PERIC1P 38
+#define CLK_ACLK_AHB2APB_PERIC0P 39
+#define CLK_ACLK_PERICNP_66 40
+#define CLK_PCLK_SCI 41
+#define CLK_PCLK_GPIO_FINGER 42
+#define CLK_PCLK_GPIO_ESE 43
+#define CLK_PCLK_PWM 44
+#define CLK_PCLK_SPDIF 45
+#define CLK_PCLK_PCM1 46
+#define CLK_PCLK_I2S1 47
+#define CLK_PCLK_ADCIF 48
+#define CLK_PCLK_GPIO_TOUCH 49
+#define CLK_PCLK_GPIO_NFC 50
+#define CLK_PCLK_GPIO_PERIC 51
+#define CLK_PCLK_PMU_PERIC 52
+#define CLK_PCLK_SYSREG_PERIC 53
+#define CLK_SCLK_IOCLK_SPI4 54
+#define CLK_SCLK_IOCLK_SPI3 55
+#define CLK_SCLK_SCI 56
+#define CLK_SCLK_SC_IN 57
+#define CLK_SCLK_PWM 58
+#define CLK_SCLK_IOCLK_SPI2 59
+#define CLK_SCLK_IOCLK_SPI1 60
+#define CLK_SCLK_IOCLK_SPI0 61
+#define CLK_SCLK_IOCLK_I2S1_BCLK 62
+#define CLK_SCLK_SPDIF 63
+#define CLK_SCLK_PCM1 64
+#define CLK_SCLK_I2S1 65
+
+#define CLK_DIV_SCLK_SCI 70
+#define CLK_DIV_SCLK_SC_IN 71
+
+#define PERIC_NR_CLK 72
+
+/* CMU_PERIS */
+#define CLK_PCLK_HPM_APBIF 1
+#define CLK_PCLK_TMU1_APBIF 2
+#define CLK_PCLK_TMU0_APBIF 3
+#define CLK_PCLK_PMU_PERIS 4
+#define CLK_PCLK_SYSREG_PERIS 5
+#define CLK_PCLK_CMU_TOP_APBIF 6
+#define CLK_PCLK_WDT_APOLLO 7
+#define CLK_PCLK_WDT_ATLAS 8
+#define CLK_PCLK_MCT 9
+#define CLK_PCLK_HDMI_CEC 10
+#define CLK_ACLK_AHB2APB_PERIS1P 11
+#define CLK_ACLK_AHB2APB_PERIS0P 12
+#define CLK_ACLK_PERISNP_66 13
+#define CLK_PCLK_TZPC12 14
+#define CLK_PCLK_TZPC11 15
+#define CLK_PCLK_TZPC10 16
+#define CLK_PCLK_TZPC9 17
+#define CLK_PCLK_TZPC8 18
+#define CLK_PCLK_TZPC7 19
+#define CLK_PCLK_TZPC6 20
+#define CLK_PCLK_TZPC5 21
+#define CLK_PCLK_TZPC4 22
+#define CLK_PCLK_TZPC3 23
+#define CLK_PCLK_TZPC2 24
+#define CLK_PCLK_TZPC1 25
+#define CLK_PCLK_TZPC0 26
+#define CLK_PCLK_SECKEY_APBIF 27
+#define CLK_PCLK_CHIPID_APBIF 28
+#define CLK_PCLK_TOPRTC 29
+#define CLK_PCLK_CUSTOM_EFUSE_APBIF 30
+#define CLK_PCLK_ANTIRBK_CNT_APBIF 31
+#define CLK_PCLK_OTP_CON_APBIF 32
+#define CLK_SCLK_ASV_TB 33
+#define CLK_SCLK_TMU1 34
+#define CLK_SCLK_TMU0 35
+#define CLK_SCLK_SECKEY 36
+#define CLK_SCLK_CHIPID 37
+#define CLK_SCLK_TOPRTC 38
+#define CLK_SCLK_CUSTOM_EFUSE 39
+#define CLK_SCLK_ANTIRBK_CNT 40
+#define CLK_SCLK_OTP_CON 41
+
+#define PERIS_NR_CLK 42
+
+/* CMU_FSYS */
+#define CLK_MOUT_ACLK_FSYS_200_USER 1
+#define CLK_MOUT_SCLK_MMC2_USER 2
+#define CLK_MOUT_SCLK_MMC1_USER 3
+#define CLK_MOUT_SCLK_MMC0_USER 4
+#define CLK_MOUT_SCLK_UFS_MPHY_USER 5
+#define CLK_MOUT_SCLK_PCIE_100_USER 6
+#define CLK_MOUT_SCLK_UFSUNIPRO_USER 7
+#define CLK_MOUT_SCLK_USBHOST30_USER 8
+#define CLK_MOUT_SCLK_USBDRD30_USER 9
+#define CLK_MOUT_PHYCLK_USBHOST30_UHOST30_PIPE_PCLK_USER 10
+#define CLK_MOUT_PHYCLK_USBHOST30_UHOST30_PHYCLOCK_USER 11
+#define CLK_MOUT_PHYCLK_USBHOST20_PHY_HSIC1_USER 12
+#define CLK_MOUT_PHYCLK_USBHOST20_PHY_CLK48MOHCI_USER 13
+#define CLK_MOUT_PHYCLK_USBHOST20_PHY_PHYCLOCK_USER 14
+#define CLK_MOUT_PHYCLK_USBHOST20_PHY_PHY_FREECLK_USER 15
+#define CLK_MOUT_PHYCLK_USBDRD30_UDRD30_PIPE_PCLK_USER 16
+#define CLK_MOUT_PHYCLK_USBDRD30_UDRD30_PHYCLOCK_USER 17
+#define CLK_MOUT_PHYCLK_UFS_RX1_SYMBOL_USER 18
+#define CLK_MOUT_PHYCLK_UFS_RX0_SYMBOL_USER 19
+#define CLK_MOUT_PHYCLK_UFS_TX1_SYMBOL_USER 20
+#define CLK_MOUT_PHYCLK_UFS_TX0_SYMBOL_USER 21
+#define CLK_MOUT_PHYCLK_LLI_MPHY_TO_UFS_USER 22
+#define CLK_MOUT_SCLK_MPHY 23
+
+#define CLK_PHYCLK_USBDRD30_UDRD30_PHYCLOCK_PHY 25
+#define CLK_PHYCLK_USBDRD30_UDRD30_PIPE_PCLK_PHY 26
+#define CLK_PHYCLK_USBHOST30_UHOST30_PHYCLOCK_PHY 27
+#define CLK_PHYCLK_USBHOST30_UHOST30_PIPE_PCLK_PHY 28
+#define CLK_PHYCLK_USBHOST20_PHY_FREECLK_PHY 29
+#define CLK_PHYCLK_USBHOST20_PHY_PHYCLOCK_PHY 30
+#define CLK_PHYCLK_USBHOST20_PHY_CLK48MOHCI_PHY 31
+#define CLK_PHYCLK_USBHOST20_PHY_HSIC1_PHY 32
+#define CLK_PHYCLK_UFS_TX0_SYMBOL_PHY 33
+#define CLK_PHYCLK_UFS_RX0_SYMBOL_PHY 34
+#define CLK_PHYCLK_UFS_TX1_SYMBOL_PHY 35
+#define CLK_PHYCLK_UFS_RX1_SYMBOL_PHY 36
+#define CLK_PHYCLK_LLI_MPHY_TO_UFS_PHY 37
+
+#define CLK_ACLK_PCIE 50
+#define CLK_ACLK_PDMA1 51
+#define CLK_ACLK_TSI 52
+#define CLK_ACLK_MMC2 53
+#define CLK_ACLK_MMC1 54
+#define CLK_ACLK_MMC0 55
+#define CLK_ACLK_UFS 56
+#define CLK_ACLK_USBHOST20 57
+#define CLK_ACLK_USBHOST30 58
+#define CLK_ACLK_USBDRD30 59
+#define CLK_ACLK_PDMA0 60
+#define CLK_SCLK_MMC2 61
+#define CLK_SCLK_MMC1 62
+#define CLK_SCLK_MMC0 63
+#define CLK_PDMA1 64
+#define CLK_PDMA0 65
+#define CLK_ACLK_XIU_FSYSPX 66
+#define CLK_ACLK_AHB_USBLINKH1 67
+#define CLK_ACLK_SMMU_PDMA1 68
+#define CLK_ACLK_BTS_PCIE 69
+#define CLK_ACLK_AXIUS_PDMA1 70
+#define CLK_ACLK_SMMU_PDMA0 71
+#define CLK_ACLK_BTS_UFS 72
+#define CLK_ACLK_BTS_USBHOST30 73
+#define CLK_ACLK_BTS_USBDRD30 74
+#define CLK_ACLK_AXIUS_PDMA0 75
+#define CLK_ACLK_AXIUS_USBHS 76
+#define CLK_ACLK_AXIUS_FSYSSX 77
+#define CLK_ACLK_AHB2APB_FSYSP 78
+#define CLK_ACLK_AHB2AXI_USBHS 79
+#define CLK_ACLK_AHB_USBLINKH0 80
+#define CLK_ACLK_AHB_USBHS 81
+#define CLK_ACLK_AHB_FSYSH 82
+#define CLK_ACLK_XIU_FSYSX 83
+#define CLK_ACLK_XIU_FSYSSX 84
+#define CLK_ACLK_FSYSNP_200 85
+#define CLK_ACLK_FSYSND_200 86
+#define CLK_PCLK_PCIE_CTRL 87
+#define CLK_PCLK_SMMU_PDMA1 88
+#define CLK_PCLK_PCIE_PHY 89
+#define CLK_PCLK_BTS_PCIE 90
+#define CLK_PCLK_SMMU_PDMA0 91
+#define CLK_PCLK_BTS_UFS 92
+#define CLK_PCLK_BTS_USBHOST30 93
+#define CLK_PCLK_BTS_USBDRD30 94
+#define CLK_PCLK_GPIO_FSYS 95
+#define CLK_PCLK_PMU_FSYS 96
+#define CLK_PCLK_SYSREG_FSYS 97
+#define CLK_SCLK_PCIE_100 98
+#define CLK_PHYCLK_USBHOST30_UHOST30_PIPE_PCLK 99
+#define CLK_PHYCLK_USBHOST30_UHOST30_PHYCLOCK 100
+#define CLK_PHYCLK_UFS_RX1_SYMBOL 101
+#define CLK_PHYCLK_UFS_RX0_SYMBOL 102
+#define CLK_PHYCLK_UFS_TX1_SYMBOL 103
+#define CLK_PHYCLK_UFS_TX0_SYMBOL 104
+#define CLK_PHYCLK_USBHOST20_PHY_HSIC1 105
+#define CLK_PHYCLK_USBHOST20_PHY_CLK48MOHCI 106
+#define CLK_PHYCLK_USBHOST20_PHY_PHYCLOCK 107
+#define CLK_PHYCLK_USBHOST20_PHY_FREECLK 108
+#define CLK_PHYCLK_USBDRD30_UDRD30_PIPE_PCLK 109
+#define CLK_PHYCLK_USBDRD30_UDRD30_PHYCLOCK 110
+#define CLK_SCLK_MPHY 111
+#define CLK_SCLK_UFSUNIPRO 112
+#define CLK_SCLK_USBHOST30 113
+#define CLK_SCLK_USBDRD30 114
+
+#define FSYS_NR_CLK 115
+
+/* CMU_G2D */
+#define CLK_MUX_ACLK_G2D_266_USER 1
+#define CLK_MUX_ACLK_G2D_400_USER 2
+
+#define CLK_DIV_PCLK_G2D 3
+
+#define CLK_ACLK_SMMU_MDMA1 4
+#define CLK_ACLK_BTS_MDMA1 5
+#define CLK_ACLK_BTS_G2D 6
+#define CLK_ACLK_ALB_G2D 7
+#define CLK_ACLK_AXIUS_G2DX 8
+#define CLK_ACLK_ASYNCAXI_SYSX 9
+#define CLK_ACLK_AHB2APB_G2D1P 10
+#define CLK_ACLK_AHB2APB_G2D0P 11
+#define CLK_ACLK_XIU_G2DX 12
+#define CLK_ACLK_G2DNP_133 13
+#define CLK_ACLK_G2DND_400 14
+#define CLK_ACLK_MDMA1 15
+#define CLK_ACLK_G2D 16
+#define CLK_ACLK_SMMU_G2D 17
+#define CLK_PCLK_SMMU_MDMA1 18
+#define CLK_PCLK_BTS_MDMA1 19
+#define CLK_PCLK_BTS_G2D 20
+#define CLK_PCLK_ALB_G2D 21
+#define CLK_PCLK_ASYNCAXI_SYSX 22
+#define CLK_PCLK_PMU_G2D 23
+#define CLK_PCLK_SYSREG_G2D 24
+#define CLK_PCLK_G2D 25
+#define CLK_PCLK_SMMU_G2D 26
+
+#define G2D_NR_CLK 27
+
+/* CMU_DISP */
+#define CLK_FOUT_DISP_PLL 1
+
+#define CLK_MOUT_DISP_PLL 2
+#define CLK_MOUT_SCLK_DSIM1_USER 3
+#define CLK_MOUT_SCLK_DSIM0_USER 4
+#define CLK_MOUT_SCLK_DSD_USER 5
+#define CLK_MOUT_SCLK_DECON_TV_ECLK_USER 6
+#define CLK_MOUT_SCLK_DECON_VCLK_USER 7
+#define CLK_MOUT_SCLK_DECON_ECLK_USER 8
+#define CLK_MOUT_SCLK_DECON_TV_VCLK_USER 9
+#define CLK_MOUT_ACLK_DISP_333_USER 10
+#define CLK_MOUT_PHYCLK_MIPIDPHY1_BITCLKDIV8_USER 11
+#define CLK_MOUT_PHYCLK_MIPIDPHY1_RXCLKESC0_USER 12
+#define CLK_MOUT_PHYCLK_MIPIDPHY0_BITCLKDIV8_USER 13
+#define CLK_MOUT_PHYCLK_MIPIDPHY0_RXCLKESC0_USER 14
+#define CLK_MOUT_PHYCLK_HDMIPHY_TMDS_CLKO_USER 15
+#define CLK_MOUT_PHYCLK_HDMIPHY_PIXEL_CLKO_USER 16
+#define CLK_MOUT_SCLK_DSIM0 17
+#define CLK_MOUT_SCLK_DECON_TV_ECLK 18
+#define CLK_MOUT_SCLK_DECON_VCLK 19
+#define CLK_MOUT_SCLK_DECON_ECLK 20
+#define CLK_MOUT_SCLK_DSIM1_B_DISP 21
+#define CLK_MOUT_SCLK_DSIM1_A_DISP 22
+#define CLK_MOUT_SCLK_DECON_TV_VCLK_C_DISP 23
+#define CLK_MOUT_SCLK_DECON_TV_VCLK_B_DISP 24
+#define CLK_MOUT_SCLK_DECON_TV_VCLK_A_DISP 25
+
+#define CLK_DIV_SCLK_DSIM1_DISP 30
+#define CLK_DIV_SCLK_DECON_TV_VCLK_DISP 31
+#define CLK_DIV_SCLK_DSIM0_DISP 32
+#define CLK_DIV_SCLK_DECON_TV_ECLK_DISP 33
+#define CLK_DIV_SCLK_DECON_VCLK_DISP 34
+#define CLK_DIV_SCLK_DECON_ECLK_DISP 35
+#define CLK_DIV_PCLK_DISP 36
+
+#define CLK_ACLK_DECON_TV 40
+#define CLK_ACLK_DECON 41
+#define CLK_ACLK_SMMU_TV1X 42
+#define CLK_ACLK_SMMU_TV0X 43
+#define CLK_ACLK_SMMU_DECON1X 44
+#define CLK_ACLK_SMMU_DECON0X 45
+#define CLK_ACLK_BTS_DECON_TV_M3 46
+#define CLK_ACLK_BTS_DECON_TV_M2 47
+#define CLK_ACLK_BTS_DECON_TV_M1 48
+#define CLK_ACLK_BTS_DECON_TV_M0 49
+#define CLK_ACLK_BTS_DECON_NM4 50
+#define CLK_ACLK_BTS_DECON_NM3 51
+#define CLK_ACLK_BTS_DECON_NM2 52
+#define CLK_ACLK_BTS_DECON_NM1 53
+#define CLK_ACLK_BTS_DECON_NM0 54
+#define CLK_ACLK_AHB2APB_DISPSFR2P 55
+#define CLK_ACLK_AHB2APB_DISPSFR1P 56
+#define CLK_ACLK_AHB2APB_DISPSFR0P 57
+#define CLK_ACLK_AHB_DISPH 58
+#define CLK_ACLK_XIU_TV1X 59
+#define CLK_ACLK_XIU_TV0X 60
+#define CLK_ACLK_XIU_DECON1X 61
+#define CLK_ACLK_XIU_DECON0X 62
+#define CLK_ACLK_XIU_DISP1X 63
+#define CLK_ACLK_XIU_DISPNP_100 64
+#define CLK_ACLK_DISP1ND_333 65
+#define CLK_ACLK_DISP0ND_333 66
+#define CLK_PCLK_SMMU_TV1X 67
+#define CLK_PCLK_SMMU_TV0X 68
+#define CLK_PCLK_SMMU_DECON1X 69
+#define CLK_PCLK_SMMU_DECON0X 70
+#define CLK_PCLK_BTS_DECON_TV_M3 71
+#define CLK_PCLK_BTS_DECON_TV_M2 72
+#define CLK_PCLK_BTS_DECON_TV_M1 73
+#define CLK_PCLK_BTS_DECON_TV_M0 74
+#define CLK_PCLK_BTS_DECONM4 75
+#define CLK_PCLK_BTS_DECONM3 76
+#define CLK_PCLK_BTS_DECONM2 77
+#define CLK_PCLK_BTS_DECONM1 78
+#define CLK_PCLK_BTS_DECONM0 79
+#define CLK_PCLK_MIC1 80
+#define CLK_PCLK_PMU_DISP 81
+#define CLK_PCLK_SYSREG_DISP 82
+#define CLK_PCLK_HDMIPHY 83
+#define CLK_PCLK_HDMI 84
+#define CLK_PCLK_MIC0 85
+#define CLK_PCLK_DSIM1 86
+#define CLK_PCLK_DSIM0 87
+#define CLK_PCLK_DECON_TV 88
+#define CLK_PHYCLK_MIPIDPHY1_BITCLKDIV8 89
+#define CLK_PHYCLK_MIPIDPHY1_RXCLKESC0 90
+#define CLK_SCLK_RGB_TV_VCLK_TO_DSIM1 91
+#define CLK_SCLK_RGB_TV_VCLK_TO_MIC1 92
+#define CLK_SCLK_DSIM1 93
+#define CLK_SCLK_DECON_TV_VCLK 94
+#define CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8 95
+#define CLK_PHYCLK_MIPIDPHY0_RXCLKESC0 96
+#define CLK_PHYCLK_HDMIPHY_TMDS_CLKO 97
+#define CLK_PHYCLK_HDMI_PIXEL 98
+#define CLK_SCLK_RGB_VCLK_TO_SMIES 99
+#define CLK_SCLK_FREQ_DET_DISP_PLL 100
+#define CLK_SCLK_RGB_VCLK_TO_DSIM0 101
+#define CLK_SCLK_RGB_VCLK_TO_MIC0 102
+#define CLK_SCLK_DSD 103
+#define CLK_SCLK_HDMI_SPDIF 104
+#define CLK_SCLK_DSIM0 105
+#define CLK_SCLK_DECON_TV_ECLK 106
+#define CLK_SCLK_DECON_VCLK 107
+#define CLK_SCLK_DECON_ECLK 108
+#define CLK_SCLK_RGB_VCLK 109
+#define CLK_SCLK_RGB_TV_VCLK 110
+
+#define DISP_NR_CLK 111
+
+/* CMU_AUD */
+#define CLK_MOUT_AUD_PLL_USER 1
+#define CLK_MOUT_SCLK_AUD_PCM 2
+#define CLK_MOUT_SCLK_AUD_I2S 3
+
+#define CLK_DIV_ATCLK_AUD 4
+#define CLK_DIV_PCLK_DBG_AUD 5
+#define CLK_DIV_ACLK_AUD 6
+#define CLK_DIV_AUD_CA5 7
+#define CLK_DIV_SCLK_AUD_SLIMBUS 8
+#define CLK_DIV_SCLK_AUD_UART 9
+#define CLK_DIV_SCLK_AUD_PCM 10
+#define CLK_DIV_SCLK_AUD_I2S 11
+
+#define CLK_ACLK_INTR_CTRL 12
+#define CLK_ACLK_AXIDS2_LPASSP 13
+#define CLK_ACLK_AXIDS1_LPASSP 14
+#define CLK_ACLK_AXI2APB1_LPASSP 15
+#define CLK_ACLK_AXI2APH_LPASSP 16
+#define CLK_ACLK_SMMU_LPASSX 17
+#define CLK_ACLK_AXIDS0_LPASSP 18
+#define CLK_ACLK_AXI2APB0_LPASSP 19
+#define CLK_ACLK_XIU_LPASSX 20
+#define CLK_ACLK_AUDNP_133 21
+#define CLK_ACLK_AUDND_133 22
+#define CLK_ACLK_SRAMC 23
+#define CLK_ACLK_DMAC 24
+#define CLK_PCLK_WDT1 25
+#define CLK_PCLK_WDT0 26
+#define CLK_PCLK_SFR1 27
+#define CLK_PCLK_SMMU_LPASSX 28
+#define CLK_PCLK_GPIO_AUD 29
+#define CLK_PCLK_PMU_AUD 30
+#define CLK_PCLK_SYSREG_AUD 31
+#define CLK_PCLK_AUD_SLIMBUS 32
+#define CLK_PCLK_AUD_UART 33
+#define CLK_PCLK_AUD_PCM 34
+#define CLK_PCLK_AUD_I2S 35
+#define CLK_PCLK_TIMER 36
+#define CLK_PCLK_SFR0_CTRL 37
+#define CLK_ATCLK_AUD 38
+#define CLK_PCLK_DBG_AUD 39
+#define CLK_SCLK_AUD_CA5 40
+#define CLK_SCLK_JTAG_TCK 41
+#define CLK_SCLK_SLIMBUS_CLKIN 42
+#define CLK_SCLK_AUD_SLIMBUS 43
+#define CLK_SCLK_AUD_UART 44
+#define CLK_SCLK_AUD_PCM 45
+#define CLK_SCLK_I2S_BCLK 46
+#define CLK_SCLK_AUD_I2S 47
+
+#define AUD_NR_CLK 48
+
+/* CMU_BUS{0|1|2} */
+#define CLK_DIV_PCLK_BUS_133 1
+
+#define CLK_ACLK_AHB2APB_BUSP 2
+#define CLK_ACLK_BUSNP_133 3
+#define CLK_ACLK_BUSND_400 4
+#define CLK_PCLK_BUSSRVND_133 5
+#define CLK_PCLK_PMU_BUS 6
+#define CLK_PCLK_SYSREG_BUS 7
+
+#define CLK_MOUT_ACLK_BUS2_400_USER 8 /* Only CMU_BUS2 */
+#define CLK_ACLK_BUS2BEND_400 9 /* Only CMU_BUS2 */
+#define CLK_ACLK_BUS2RTND_400 10 /* Only CMU_BUS2 */
+
+#define BUSx_NR_CLK 11
+
+/* CMU_G3D */
+#define CLK_FOUT_G3D_PLL 1
+
+#define CLK_MOUT_ACLK_G3D_400 2
+#define CLK_MOUT_G3D_PLL 3
+
+#define CLK_DIV_SCLK_HPM_G3D 4
+#define CLK_DIV_PCLK_G3D 5
+#define CLK_DIV_ACLK_G3D 6
+#define CLK_ACLK_BTS_G3D1 7
+#define CLK_ACLK_BTS_G3D0 8
+#define CLK_ACLK_ASYNCAPBS_G3D 9
+#define CLK_ACLK_ASYNCAPBM_G3D 10
+#define CLK_ACLK_AHB2APB_G3DP 11
+#define CLK_ACLK_G3DNP_150 12
+#define CLK_ACLK_G3DND_600 13
+#define CLK_ACLK_G3D 14
+#define CLK_PCLK_BTS_G3D1 15
+#define CLK_PCLK_BTS_G3D0 16
+#define CLK_PCLK_PMU_G3D 17
+#define CLK_PCLK_SYSREG_G3D 18
+#define CLK_SCLK_HPM_G3D 19
+
+#define G3D_NR_CLK 20
+
+/* CMU_GSCL */
+#define CLK_MOUT_ACLK_GSCL_111_USER 1
+#define CLK_MOUT_ACLK_GSCL_333_USER 2
+
+#define CLK_ACLK_BTS_GSCL2 3
+#define CLK_ACLK_BTS_GSCL1 4
+#define CLK_ACLK_BTS_GSCL0 5
+#define CLK_ACLK_AHB2APB_GSCLP 6
+#define CLK_ACLK_XIU_GSCLX 7
+#define CLK_ACLK_GSCLNP_111 8
+#define CLK_ACLK_GSCLRTND_333 9
+#define CLK_ACLK_GSCLBEND_333 10
+#define CLK_ACLK_GSD 11
+#define CLK_ACLK_GSCL2 12
+#define CLK_ACLK_GSCL1 13
+#define CLK_ACLK_GSCL0 14
+#define CLK_ACLK_SMMU_GSCL0 15
+#define CLK_ACLK_SMMU_GSCL1 16
+#define CLK_ACLK_SMMU_GSCL2 17
+#define CLK_PCLK_BTS_GSCL2 18
+#define CLK_PCLK_BTS_GSCL1 19
+#define CLK_PCLK_BTS_GSCL0 20
+#define CLK_PCLK_PMU_GSCL 21
+#define CLK_PCLK_SYSREG_GSCL 22
+#define CLK_PCLK_GSCL2 23
+#define CLK_PCLK_GSCL1 24
+#define CLK_PCLK_GSCL0 25
+#define CLK_PCLK_SMMU_GSCL0 26
+#define CLK_PCLK_SMMU_GSCL1 27
+#define CLK_PCLK_SMMU_GSCL2 28
+
+#define GSCL_NR_CLK 29
+
+/* CMU_APOLLO */
+#define CLK_FOUT_APOLLO_PLL 1
+
+#define CLK_MOUT_APOLLO_PLL 2
+#define CLK_MOUT_BUS_PLL_APOLLO_USER 3
+#define CLK_MOUT_APOLLO 4
+
+#define CLK_DIV_CNTCLK_APOLLO 5
+#define CLK_DIV_PCLK_DBG_APOLLO 6
+#define CLK_DIV_ATCLK_APOLLO 7
+#define CLK_DIV_PCLK_APOLLO 8
+#define CLK_DIV_ACLK_APOLLO 9
+#define CLK_DIV_APOLLO2 10
+#define CLK_DIV_APOLLO1 11
+#define CLK_DIV_SCLK_HPM_APOLLO 12
+#define CLK_DIV_APOLLO_PLL 13
+
+#define CLK_ACLK_ATBDS_APOLLO_3 14
+#define CLK_ACLK_ATBDS_APOLLO_2 15
+#define CLK_ACLK_ATBDS_APOLLO_1 16
+#define CLK_ACLK_ATBDS_APOLLO_0 17
+#define CLK_ACLK_ASATBSLV_APOLLO_3_CSSYS 18
+#define CLK_ACLK_ASATBSLV_APOLLO_2_CSSYS 19
+#define CLK_ACLK_ASATBSLV_APOLLO_1_CSSYS 20
+#define CLK_ACLK_ASATBSLV_APOLLO_0_CSSYS 21
+#define CLK_ACLK_ASYNCACES_APOLLO_CCI 22
+#define CLK_ACLK_AHB2APB_APOLLOP 23
+#define CLK_ACLK_APOLLONP_200 24
+#define CLK_PCLK_ASAPBMST_CSSYS_APOLLO 25
+#define CLK_PCLK_PMU_APOLLO 26
+#define CLK_PCLK_SYSREG_APOLLO 27
+#define CLK_CNTCLK_APOLLO 28
+#define CLK_SCLK_HPM_APOLLO 29
+#define CLK_SCLK_APOLLO 30
+
+#define APOLLO_NR_CLK 31
+
+/* CMU_ATLAS */
+#define CLK_FOUT_ATLAS_PLL 1
+
+#define CLK_MOUT_ATLAS_PLL 2
+#define CLK_MOUT_BUS_PLL_ATLAS_USER 3
+#define CLK_MOUT_ATLAS 4
+
+#define CLK_DIV_CNTCLK_ATLAS 5
+#define CLK_DIV_PCLK_DBG_ATLAS 6
+#define CLK_DIV_ATCLK_ATLASO 7
+#define CLK_DIV_PCLK_ATLAS 8
+#define CLK_DIV_ACLK_ATLAS 9
+#define CLK_DIV_ATLAS2 10
+#define CLK_DIV_ATLAS1 11
+#define CLK_DIV_SCLK_HPM_ATLAS 12
+#define CLK_DIV_ATLAS_PLL 13
+
+#define CLK_ACLK_ATB_AUD_CSSYS 14
+#define CLK_ACLK_ATB_APOLLO3_CSSYS 15
+#define CLK_ACLK_ATB_APOLLO2_CSSYS 16
+#define CLK_ACLK_ATB_APOLLO1_CSSYS 17
+#define CLK_ACLK_ATB_APOLLO0_CSSYS 18
+#define CLK_ACLK_ASYNCAHBS_CSSYS_SSS 19
+#define CLK_ACLK_ASYNCAXIS_CSSYS_CCIX 20
+#define CLK_ACLK_ASYNCACES_ATLAS_CCI 21
+#define CLK_ACLK_AHB2APB_ATLASP 22
+#define CLK_ACLK_ATLASNP_200 23
+#define CLK_PCLK_ASYNCAPB_AUD_CSSYS 24
+#define CLK_PCLK_ASYNCAPB_ISP_CSSYS 25
+#define CLK_PCLK_ASYNCAPB_APOLLO_CSSYS 26
+#define CLK_PCLK_PMU_ATLAS 27
+#define CLK_PCLK_SYSREG_ATLAS 28
+#define CLK_PCLK_SECJTAG 29
+#define CLK_CNTCLK_ATLAS 30
+#define CLK_SCLK_FREQ_DET_ATLAS_PLL 31
+#define CLK_SCLK_HPM_ATLAS 32
+#define CLK_TRACECLK 33
+#define CLK_CTMCLK 34
+#define CLK_HCLK_CSSYS 35
+#define CLK_PCLK_DBG_CSSYS 36
+#define CLK_PCLK_DBG 37
+#define CLK_ATCLK 38
+#define CLK_SCLK_ATLAS 39
+
+#define ATLAS_NR_CLK 40
+
+/* CMU_MSCL */
+#define CLK_MOUT_SCLK_JPEG_USER 1
+#define CLK_MOUT_ACLK_MSCL_400_USER 2
+#define CLK_MOUT_SCLK_JPEG 3
+
+#define CLK_DIV_PCLK_MSCL 4
+
+#define CLK_ACLK_BTS_JPEG 5
+#define CLK_ACLK_BTS_M2MSCALER1 6
+#define CLK_ACLK_BTS_M2MSCALER0 7
+#define CLK_ACLK_AHB2APB_MSCL0P 8
+#define CLK_ACLK_XIU_MSCLX 9
+#define CLK_ACLK_MSCLNP_100 10
+#define CLK_ACLK_MSCLND_400 11
+#define CLK_ACLK_JPEG 12
+#define CLK_ACLK_M2MSCALER1 13
+#define CLK_ACLK_M2MSCALER0 14
+#define CLK_ACLK_SMMU_M2MSCALER0 15
+#define CLK_ACLK_SMMU_M2MSCALER1 16
+#define CLK_ACLK_SMMU_JPEG 17
+#define CLK_PCLK_BTS_JPEG 18
+#define CLK_PCLK_BTS_M2MSCALER1 19
+#define CLK_PCLK_BTS_M2MSCALER0 20
+#define CLK_PCLK_PMU_MSCL 21
+#define CLK_PCLK_SYSREG_MSCL 22
+#define CLK_PCLK_JPEG 23
+#define CLK_PCLK_M2MSCALER1 24
+#define CLK_PCLK_M2MSCALER0 25
+#define CLK_PCLK_SMMU_M2MSCALER0 26
+#define CLK_PCLK_SMMU_M2MSCALER1 27
+#define CLK_PCLK_SMMU_JPEG 28
+#define CLK_SCLK_JPEG 29
+
+#define MSCL_NR_CLK 30
+
+/* CMU_MFC */
+#define CLK_MOUT_ACLK_MFC_400_USER 1
+
+#define CLK_DIV_PCLK_MFC 2
+
+#define CLK_ACLK_BTS_MFC_1 3
+#define CLK_ACLK_BTS_MFC_0 4
+#define CLK_ACLK_AHB2APB_MFCP 5
+#define CLK_ACLK_XIU_MFCX 6
+#define CLK_ACLK_MFCNP_100 7
+#define CLK_ACLK_MFCND_400 8
+#define CLK_ACLK_MFC 9
+#define CLK_ACLK_SMMU_MFC_1 10
+#define CLK_ACLK_SMMU_MFC_0 11
+#define CLK_PCLK_BTS_MFC_1 12
+#define CLK_PCLK_BTS_MFC_0 13
+#define CLK_PCLK_PMU_MFC 14
+#define CLK_PCLK_SYSREG_MFC 15
+#define CLK_PCLK_MFC 16
+#define CLK_PCLK_SMMU_MFC_1 17
+#define CLK_PCLK_SMMU_MFC_0 18
+
+#define MFC_NR_CLK 19
+
+/* CMU_HEVC */
+#define CLK_MOUT_ACLK_HEVC_400_USER 1
+
+#define CLK_DIV_PCLK_HEVC 2
+
+#define CLK_ACLK_BTS_HEVC_1 3
+#define CLK_ACLK_BTS_HEVC_0 4
+#define CLK_ACLK_AHB2APB_HEVCP 5
+#define CLK_ACLK_XIU_HEVCX 6
+#define CLK_ACLK_HEVCNP_100 7
+#define CLK_ACLK_HEVCND_400 8
+#define CLK_ACLK_HEVC 9
+#define CLK_ACLK_SMMU_HEVC_1 10
+#define CLK_ACLK_SMMU_HEVC_0 11
+#define CLK_PCLK_BTS_HEVC_1 12
+#define CLK_PCLK_BTS_HEVC_0 13
+#define CLK_PCLK_PMU_HEVC 14
+#define CLK_PCLK_SYSREG_HEVC 15
+#define CLK_PCLK_HEVC 16
+#define CLK_PCLK_SMMU_HEVC_1 17
+#define CLK_PCLK_SMMU_HEVC_0 18
+
+#define HEVC_NR_CLK 19
+
+/* CMU_ISP */
+#define CLK_MOUT_ACLK_ISP_DIS_400_USER 1
+#define CLK_MOUT_ACLK_ISP_400_USER 2
+
+#define CLK_DIV_PCLK_ISP_DIS 3
+#define CLK_DIV_PCLK_ISP 4
+#define CLK_DIV_ACLK_ISP_D_200 5
+#define CLK_DIV_ACLK_ISP_C_200 6
+
+#define CLK_ACLK_ISP_D_GLUE 7
+#define CLK_ACLK_SCALERP 8
+#define CLK_ACLK_3DNR 9
+#define CLK_ACLK_DIS 10
+#define CLK_ACLK_SCALERC 11
+#define CLK_ACLK_DRC 12
+#define CLK_ACLK_ISP 13
+#define CLK_ACLK_AXIUS_SCALERP 14
+#define CLK_ACLK_AXIUS_SCALERC 15
+#define CLK_ACLK_AXIUS_DRC 16
+#define CLK_ACLK_ASYNCAHBM_ISP2P 17
+#define CLK_ACLK_ASYNCAHBM_ISP1P 18
+#define CLK_ACLK_ASYNCAXIS_DIS1 19
+#define CLK_ACLK_ASYNCAXIS_DIS0 20
+#define CLK_ACLK_ASYNCAXIM_DIS1 21
+#define CLK_ACLK_ASYNCAXIM_DIS0 22
+#define CLK_ACLK_ASYNCAXIM_ISP2P 23
+#define CLK_ACLK_ASYNCAXIM_ISP1P 24
+#define CLK_ACLK_AHB2APB_ISP2P 25
+#define CLK_ACLK_AHB2APB_ISP1P 26
+#define CLK_ACLK_AXI2APB_ISP2P 27
+#define CLK_ACLK_AXI2APB_ISP1P 28
+#define CLK_ACLK_XIU_ISPEX1 29
+#define CLK_ACLK_XIU_ISPEX0 30
+#define CLK_ACLK_ISPND_400 31
+#define CLK_ACLK_SMMU_SCALERP 32
+#define CLK_ACLK_SMMU_3DNR 33
+#define CLK_ACLK_SMMU_DIS1 34
+#define CLK_ACLK_SMMU_DIS0 35
+#define CLK_ACLK_SMMU_SCALERC 36
+#define CLK_ACLK_SMMU_DRC 37
+#define CLK_ACLK_SMMU_ISP 38
+#define CLK_ACLK_BTS_SCALERP 39
+#define CLK_ACLK_BTS_3DR 40
+#define CLK_ACLK_BTS_DIS1 41
+#define CLK_ACLK_BTS_DIS0 42
+#define CLK_ACLK_BTS_SCALERC 43
+#define CLK_ACLK_BTS_DRC 44
+#define CLK_ACLK_BTS_ISP 45
+#define CLK_PCLK_SMMU_SCALERP 46
+#define CLK_PCLK_SMMU_3DNR 47
+#define CLK_PCLK_SMMU_DIS1 48
+#define CLK_PCLK_SMMU_DIS0 49
+#define CLK_PCLK_SMMU_SCALERC 50
+#define CLK_PCLK_SMMU_DRC 51
+#define CLK_PCLK_SMMU_ISP 52
+#define CLK_PCLK_BTS_SCALERP 53
+#define CLK_PCLK_BTS_3DNR 54
+#define CLK_PCLK_BTS_DIS1 55
+#define CLK_PCLK_BTS_DIS0 56
+#define CLK_PCLK_BTS_SCALERC 57
+#define CLK_PCLK_BTS_DRC 58
+#define CLK_PCLK_BTS_ISP 59
+#define CLK_PCLK_ASYNCAXI_DIS1 60
+#define CLK_PCLK_ASYNCAXI_DIS0 61
+#define CLK_PCLK_PMU_ISP 62
+#define CLK_PCLK_SYSREG_ISP 63
+#define CLK_PCLK_CMU_ISP_LOCAL 64
+#define CLK_PCLK_SCALERP 65
+#define CLK_PCLK_3DNR 66
+#define CLK_PCLK_DIS_CORE 67
+#define CLK_PCLK_DIS 68
+#define CLK_PCLK_SCALERC 69
+#define CLK_PCLK_DRC 70
+#define CLK_PCLK_ISP 71
+#define CLK_SCLK_PIXELASYNCS_DIS 72
+#define CLK_SCLK_PIXELASYNCM_DIS 73
+#define CLK_SCLK_PIXELASYNCS_SCALERP 74
+#define CLK_SCLK_PIXELASYNCM_ISPD 75
+#define CLK_SCLK_PIXELASYNCS_ISPC 76
+#define CLK_SCLK_PIXELASYNCM_ISPC 77
+
+#define ISP_NR_CLK 78
+
+/* CMU_CAM0 */
+#define CLK_PHYCLK_RXBYTEECLKHS0_S4_PHY 1
+#define CLK_PHYCLK_RXBYTEECLKHS0_S2A_PHY 2
+
+#define CLK_MOUT_ACLK_CAM0_333_USER 3
+#define CLK_MOUT_ACLK_CAM0_400_USER 4
+#define CLK_MOUT_ACLK_CAM0_552_USER 5
+#define CLK_MOUT_PHYCLK_RXBYTECLKHS0_S4_USER 6
+#define CLK_MOUT_PHYCLK_RXBYTECLKHS0_S2A_USER 7
+#define CLK_MOUT_ACLK_LITE_D_B 8
+#define CLK_MOUT_ACLK_LITE_D_A 9
+#define CLK_MOUT_ACLK_LITE_B_B 10
+#define CLK_MOUT_ACLK_LITE_B_A 11
+#define CLK_MOUT_ACLK_LITE_A_B 12
+#define CLK_MOUT_ACLK_LITE_A_A 13
+#define CLK_MOUT_ACLK_CAM0_400 14
+#define CLK_MOUT_ACLK_CSIS1_B 15
+#define CLK_MOUT_ACLK_CSIS1_A 16
+#define CLK_MOUT_ACLK_CSIS0_B 17
+#define CLK_MOUT_ACLK_CSIS0_A 18
+#define CLK_MOUT_ACLK_3AA1_B 19
+#define CLK_MOUT_ACLK_3AA1_A 20
+#define CLK_MOUT_ACLK_3AA0_B 21
+#define CLK_MOUT_ACLK_3AA0_A 22
+#define CLK_MOUT_SCLK_LITE_FREECNT_C 23
+#define CLK_MOUT_SCLK_LITE_FREECNT_B 24
+#define CLK_MOUT_SCLK_LITE_FREECNT_A 25
+#define CLK_MOUT_SCLK_PIXELASYNC_LITE_C_B 26
+#define CLK_MOUT_SCLK_PIXELASYNC_LITE_C_A 27
+#define CLK_MOUT_SCLK_PIXELASYNC_LITE_C_INIT_B 28
+#define CLK_MOUT_SCLK_PIXELASYNC_LITE_C_INIT_A 29
+
+#define CLK_DIV_PCLK_CAM0_50 30
+#define CLK_DIV_ACLK_CAM0_200 31
+#define CLK_DIV_ACLK_CAM0_BUS_400 32
+#define CLK_DIV_PCLK_LITE_D 33
+#define CLK_DIV_ACLK_LITE_D 34
+#define CLK_DIV_PCLK_LITE_B 35
+#define CLK_DIV_ACLK_LITE_B 36
+#define CLK_DIV_PCLK_LITE_A 37
+#define CLK_DIV_ACLK_LITE_A 38
+#define CLK_DIV_ACLK_CSIS1 39
+#define CLK_DIV_ACLK_CSIS0 40
+#define CLK_DIV_PCLK_3AA1 41
+#define CLK_DIV_ACLK_3AA1 42
+#define CLK_DIV_PCLK_3AA0 43
+#define CLK_DIV_ACLK_3AA0 44
+#define CLK_DIV_SCLK_PIXELASYNC_LITE_C 45
+#define CLK_DIV_PCLK_PIXELASYNC_LITE_C 46
+#define CLK_DIV_SCLK_PIXELASYNC_LITE_C_INIT 47
+
+#define CLK_ACLK_CSIS1 50
+#define CLK_ACLK_CSIS0 51
+#define CLK_ACLK_3AA1 52
+#define CLK_ACLK_3AA0 53
+#define CLK_ACLK_LITE_D 54
+#define CLK_ACLK_LITE_B 55
+#define CLK_ACLK_LITE_A 56
+#define CLK_ACLK_AHBSYNCDN 57
+#define CLK_ACLK_AXIUS_LITE_D 58
+#define CLK_ACLK_AXIUS_LITE_B 59
+#define CLK_ACLK_AXIUS_LITE_A 60
+#define CLK_ACLK_ASYNCAPBM_3AA1 61
+#define CLK_ACLK_ASYNCAPBS_3AA1 62
+#define CLK_ACLK_ASYNCAPBM_3AA0 63
+#define CLK_ACLK_ASYNCAPBS_3AA0 64
+#define CLK_ACLK_ASYNCAPBM_LITE_D 65
+#define CLK_ACLK_ASYNCAPBS_LITE_D 66
+#define CLK_ACLK_ASYNCAPBM_LITE_B 67
+#define CLK_ACLK_ASYNCAPBS_LITE_B 68
+#define CLK_ACLK_ASYNCAPBM_LITE_A 69
+#define CLK_ACLK_ASYNCAPBS_LITE_A 70
+#define CLK_ACLK_ASYNCAXIM_ISP0P 71
+#define CLK_ACLK_ASYNCAXIM_3AA1 72
+#define CLK_ACLK_ASYNCAXIS_3AA1 73
+#define CLK_ACLK_ASYNCAXIM_3AA0 74
+#define CLK_ACLK_ASYNCAXIS_3AA0 75
+#define CLK_ACLK_ASYNCAXIM_LITE_D 76
+#define CLK_ACLK_ASYNCAXIS_LITE_D 77
+#define CLK_ACLK_ASYNCAXIM_LITE_B 78
+#define CLK_ACLK_ASYNCAXIS_LITE_B 79
+#define CLK_ACLK_ASYNCAXIM_LITE_A 80
+#define CLK_ACLK_ASYNCAXIS_LITE_A 81
+#define CLK_ACLK_AHB2APB_ISPSFRP 82
+#define CLK_ACLK_AXI2APB_ISP0P 83
+#define CLK_ACLK_AXI2AHB_ISP0P 84
+#define CLK_ACLK_XIU_IS0X 85
+#define CLK_ACLK_XIU_ISP0EX 86
+#define CLK_ACLK_CAM0NP_276 87
+#define CLK_ACLK_CAM0ND_400 88
+#define CLK_ACLK_SMMU_3AA1 89
+#define CLK_ACLK_SMMU_3AA0 90
+#define CLK_ACLK_SMMU_LITE_D 91
+#define CLK_ACLK_SMMU_LITE_B 92
+#define CLK_ACLK_SMMU_LITE_A 93
+#define CLK_ACLK_BTS_3AA1 94
+#define CLK_ACLK_BTS_3AA0 95
+#define CLK_ACLK_BTS_LITE_D 96
+#define CLK_ACLK_BTS_LITE_B 97
+#define CLK_ACLK_BTS_LITE_A 98
+#define CLK_PCLK_SMMU_3AA1 99
+#define CLK_PCLK_SMMU_3AA0 100
+#define CLK_PCLK_SMMU_LITE_D 101
+#define CLK_PCLK_SMMU_LITE_B 102
+#define CLK_PCLK_SMMU_LITE_A 103
+#define CLK_PCLK_BTS_3AA1 104
+#define CLK_PCLK_BTS_3AA0 105
+#define CLK_PCLK_BTS_LITE_D 106
+#define CLK_PCLK_BTS_LITE_B 107
+#define CLK_PCLK_BTS_LITE_A 108
+#define CLK_PCLK_ASYNCAXI_CAM1 109
+#define CLK_PCLK_ASYNCAXI_3AA1 110
+#define CLK_PCLK_ASYNCAXI_3AA0 111
+#define CLK_PCLK_ASYNCAXI_LITE_D 112
+#define CLK_PCLK_ASYNCAXI_LITE_B 113
+#define CLK_PCLK_ASYNCAXI_LITE_A 114
+#define CLK_PCLK_PMU_CAM0 115
+#define CLK_PCLK_SYSREG_CAM0 116
+#define CLK_PCLK_CMU_CAM0_LOCAL 117
+#define CLK_PCLK_CSIS1 118
+#define CLK_PCLK_CSIS0 119
+#define CLK_PCLK_3AA1 120
+#define CLK_PCLK_3AA0 121
+#define CLK_PCLK_LITE_D 122
+#define CLK_PCLK_LITE_B 123
+#define CLK_PCLK_LITE_A 124
+#define CLK_PHYCLK_RXBYTECLKHS0_S4 125
+#define CLK_PHYCLK_RXBYTECLKHS0_S2A 126
+#define CLK_SCLK_LITE_FREECNT 127
+#define CLK_SCLK_PIXELASYNCM_3AA1 128
+#define CLK_SCLK_PIXELASYNCM_3AA0 129
+#define CLK_SCLK_PIXELASYNCS_3AA0 130
+#define CLK_SCLK_PIXELASYNCM_LITE_C 131
+#define CLK_SCLK_PIXELASYNCM_LITE_C_INIT 132
+#define CLK_SCLK_PIXELASYNCS_LITE_C_INIT 133
+
+#define CAM0_NR_CLK 134
+
+/* CMU_CAM1 */
+#define CLK_PHYCLK_RXBYTEECLKHS0_S2B 1
+
+#define CLK_MOUT_SCLK_ISP_UART_USER 2
+#define CLK_MOUT_SCLK_ISP_SPI1_USER 3
+#define CLK_MOUT_SCLK_ISP_SPI0_USER 4
+#define CLK_MOUT_ACLK_CAM1_333_USER 5
+#define CLK_MOUT_ACLK_CAM1_400_USER 6
+#define CLK_MOUT_ACLK_CAM1_552_USER 7
+#define CLK_MOUT_PHYCLK_RXBYTECLKHS0_S2B_USER 8
+#define CLK_MOUT_ACLK_CSIS2_B 9
+#define CLK_MOUT_ACLK_CSIS2_A 10
+#define CLK_MOUT_ACLK_FD_B 11
+#define CLK_MOUT_ACLK_FD_A 12
+#define CLK_MOUT_ACLK_LITE_C_B 13
+#define CLK_MOUT_ACLK_LITE_C_A 14
+
+#define CLK_DIV_SCLK_ISP_WPWM 15
+#define CLK_DIV_PCLK_CAM1_83 16
+#define CLK_DIV_PCLK_CAM1_166 17
+#define CLK_DIV_PCLK_DBG_CAM1 18
+#define CLK_DIV_ATCLK_CAM1 19
+#define CLK_DIV_ACLK_CSIS2 20
+#define CLK_DIV_PCLK_FD 21
+#define CLK_DIV_ACLK_FD 22
+#define CLK_DIV_PCLK_LITE_C 23
+#define CLK_DIV_ACLK_LITE_C 24
+
+#define CLK_ACLK_ISP_GIC 25
+#define CLK_ACLK_FD 26
+#define CLK_ACLK_LITE_C 27
+#define CLK_ACLK_CSIS2 28
+#define CLK_ACLK_ASYNCAPBM_FD 29
+#define CLK_ACLK_ASYNCAPBS_FD 30
+#define CLK_ACLK_ASYNCAPBM_LITE_C 31
+#define CLK_ACLK_ASYNCAPBS_LITE_C 32
+#define CLK_ACLK_ASYNCAHBS_SFRISP2H2 33
+#define CLK_ACLK_ASYNCAHBS_SFRISP2H1 34
+#define CLK_ACLK_ASYNCAXIM_CA5 35
+#define CLK_ACLK_ASYNCAXIS_CA5 36
+#define CLK_ACLK_ASYNCAXIS_ISPX2 37
+#define CLK_ACLK_ASYNCAXIS_ISPX1 38
+#define CLK_ACLK_ASYNCAXIS_ISPX0 39
+#define CLK_ACLK_ASYNCAXIM_ISPEX 40
+#define CLK_ACLK_ASYNCAXIM_ISP3P 41
+#define CLK_ACLK_ASYNCAXIS_ISP3P 42
+#define CLK_ACLK_ASYNCAXIM_FD 43
+#define CLK_ACLK_ASYNCAXIS_FD 44
+#define CLK_ACLK_ASYNCAXIM_LITE_C 45
+#define CLK_ACLK_ASYNCAXIS_LITE_C 46
+#define CLK_ACLK_AHB2APB_ISP5P 47
+#define CLK_ACLK_AHB2APB_ISP3P 48
+#define CLK_ACLK_AXI2APB_ISP3P 49
+#define CLK_ACLK_AHB_SFRISP2H 50
+#define CLK_ACLK_AXI_ISP_HX_R 51
+#define CLK_ACLK_AXI_ISP_CX_R 52
+#define CLK_ACLK_AXI_ISP_HX 53
+#define CLK_ACLK_AXI_ISP_CX 54
+#define CLK_ACLK_XIU_ISPX 55
+#define CLK_ACLK_XIU_ISPEX 56
+#define CLK_ACLK_CAM1NP_333 57
+#define CLK_ACLK_CAM1ND_400 58
+#define CLK_ACLK_SMMU_ISPCPU 59
+#define CLK_ACLK_SMMU_FD 60
+#define CLK_ACLK_SMMU_LITE_C 61
+#define CLK_ACLK_BTS_ISP3P 62
+#define CLK_ACLK_BTS_FD 63
+#define CLK_ACLK_BTS_LITE_C 64
+#define CLK_ACLK_AHBDN_SFRISP2H 65
+#define CLK_ACLK_AHBDN_ISP5P 66
+#define CLK_ACLK_AXIUS_ISP3P 67
+#define CLK_ACLK_AXIUS_FD 68
+#define CLK_ACLK_AXIUS_LITE_C 69
+#define CLK_PCLK_SMMU_ISPCPU 70
+#define CLK_PCLK_SMMU_FD 71
+#define CLK_PCLK_SMMU_LITE_C 72
+#define CLK_PCLK_BTS_ISP3P 73
+#define CLK_PCLK_BTS_FD 74
+#define CLK_PCLK_BTS_LITE_C 75
+#define CLK_PCLK_ASYNCAXIM_CA5 76
+#define CLK_PCLK_ASYNCAXIM_ISPEX 77
+#define CLK_PCLK_ASYNCAXIM_ISP3P 78
+#define CLK_PCLK_ASYNCAXIM_FD 79
+#define CLK_PCLK_ASYNCAXIM_LITE_C 80
+#define CLK_PCLK_PMU_CAM1 81
+#define CLK_PCLK_SYSREG_CAM1 82
+#define CLK_PCLK_CMU_CAM1_LOCAL 83
+#define CLK_PCLK_ISP_MCTADC 84
+#define CLK_PCLK_ISP_WDT 85
+#define CLK_PCLK_ISP_PWM 86
+#define CLK_PCLK_ISP_UART 87
+#define CLK_PCLK_ISP_MCUCTL 88
+#define CLK_PCLK_ISP_SPI1 89
+#define CLK_PCLK_ISP_SPI0 90
+#define CLK_PCLK_ISP_I2C2 91
+#define CLK_PCLK_ISP_I2C1 92
+#define CLK_PCLK_ISP_I2C0 93
+#define CLK_PCLK_ISP_MPWM 94
+#define CLK_PCLK_FD 95
+#define CLK_PCLK_LITE_C 96
+#define CLK_PCLK_CSIS2 97
+#define CLK_SCLK_ISP_I2C2 98
+#define CLK_SCLK_ISP_I2C1 99
+#define CLK_SCLK_ISP_I2C0 100
+#define CLK_SCLK_ISP_PWM 101
+#define CLK_PHYCLK_RXBYTECLKHS0_S2B 102
+#define CLK_SCLK_LITE_C_FREECNT 103
+#define CLK_SCLK_PIXELASYNCM_FD 104
+#define CLK_SCLK_ISP_MCTADC 105
+#define CLK_SCLK_ISP_UART 106
+#define CLK_SCLK_ISP_SPI1 107
+#define CLK_SCLK_ISP_SPI0 108
+#define CLK_SCLK_ISP_MPWM 109
+#define CLK_PCLK_DBG_ISP 110
+#define CLK_ATCLK_ISP 111
+#define CLK_SCLK_ISP_CA5 112
+
+#define CAM1_NR_CLK 113
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOS5433_H */
diff --git a/include/dt-bindings/clock/exynos7-clk.h b/include/dt-bindings/clock/exynos7-clk.h
index 8e4681b..e33c75a 100644
--- a/include/dt-bindings/clock/exynos7-clk.h
+++ b/include/dt-bindings/clock/exynos7-clk.h
@@ -17,7 +17,11 @@
#define DOUT_SCLK_CC_PLL 4
#define DOUT_SCLK_MFC_PLL 5
#define DOUT_ACLK_CCORE_133 6
-#define TOPC_NR_CLK 7
+#define DOUT_ACLK_MSCL_532 7
+#define ACLK_MSCL_532 8
+#define DOUT_SCLK_AUD_PLL 9
+#define FOUT_AUD_PLL 10
+#define TOPC_NR_CLK 11
/* TOP0 */
#define DOUT_ACLK_PERIC1 1
@@ -26,7 +30,15 @@
#define CLK_SCLK_UART1 4
#define CLK_SCLK_UART2 5
#define CLK_SCLK_UART3 6
-#define TOP0_NR_CLK 7
+#define CLK_SCLK_SPI0 7
+#define CLK_SCLK_SPI1 8
+#define CLK_SCLK_SPI2 9
+#define CLK_SCLK_SPI3 10
+#define CLK_SCLK_SPI4 11
+#define CLK_SCLK_SPDIF 12
+#define CLK_SCLK_PCM1 13
+#define CLK_SCLK_I2S1 14
+#define TOP0_NR_CLK 15
/* TOP1 */
#define DOUT_ACLK_FSYS1_200 1
@@ -70,7 +82,23 @@
#define PCLK_HSI2C6 9
#define PCLK_HSI2C7 10
#define PCLK_HSI2C8 11
-#define PERIC1_NR_CLK 12
+#define PCLK_SPI0 12
+#define PCLK_SPI1 13
+#define PCLK_SPI2 14
+#define PCLK_SPI3 15
+#define PCLK_SPI4 16
+#define SCLK_SPI0 17
+#define SCLK_SPI1 18
+#define SCLK_SPI2 19
+#define SCLK_SPI3 20
+#define SCLK_SPI4 21
+#define PCLK_I2S1 22
+#define PCLK_PCM1 23
+#define PCLK_SPDIF 24
+#define SCLK_I2S1 25
+#define SCLK_PCM1 26
+#define SCLK_SPDIF 27
+#define PERIC1_NR_CLK 28
/* PERIS */
#define PCLK_CHIPID 1
@@ -82,11 +110,63 @@
/* FSYS0 */
#define ACLK_MMC2 1
-#define FSYS0_NR_CLK 2
+#define ACLK_AXIUS_USBDRD30X_FSYS0X 2
+#define ACLK_USBDRD300 3
+#define SCLK_USBDRD300_SUSPENDCLK 4
+#define SCLK_USBDRD300_REFCLK 5
+#define PHYCLK_USBDRD300_UDRD30_PIPE_PCLK_USER 6
+#define PHYCLK_USBDRD300_UDRD30_PHYCLK_USER 7
+#define OSCCLK_PHY_CLKOUT_USB30_PHY 8
+#define ACLK_PDMA0 9
+#define ACLK_PDMA1 10
+#define FSYS0_NR_CLK 11
/* FSYS1 */
#define ACLK_MMC1 1
#define ACLK_MMC0 2
#define FSYS1_NR_CLK 3
+/* MSCL */
+#define USERMUX_ACLK_MSCL_532 1
+#define DOUT_PCLK_MSCL 2
+#define ACLK_MSCL_0 3
+#define ACLK_MSCL_1 4
+#define ACLK_JPEG 5
+#define ACLK_G2D 6
+#define ACLK_LH_ASYNC_SI_MSCL_0 7
+#define ACLK_LH_ASYNC_SI_MSCL_1 8
+#define ACLK_AXI2ACEL_BRIDGE 9
+#define ACLK_XIU_MSCLX_0 10
+#define ACLK_XIU_MSCLX_1 11
+#define ACLK_QE_MSCL_0 12
+#define ACLK_QE_MSCL_1 13
+#define ACLK_QE_JPEG 14
+#define ACLK_QE_G2D 15
+#define ACLK_PPMU_MSCL_0 16
+#define ACLK_PPMU_MSCL_1 17
+#define ACLK_MSCLNP_133 18
+#define ACLK_AHB2APB_MSCL0P 19
+#define ACLK_AHB2APB_MSCL1P 20
+
+#define PCLK_MSCL_0 21
+#define PCLK_MSCL_1 22
+#define PCLK_JPEG 23
+#define PCLK_G2D 24
+#define PCLK_QE_MSCL_0 25
+#define PCLK_QE_MSCL_1 26
+#define PCLK_QE_JPEG 27
+#define PCLK_QE_G2D 28
+#define PCLK_PPMU_MSCL_0 29
+#define PCLK_PPMU_MSCL_1 30
+#define PCLK_AXI2ACEL_BRIDGE 31
+#define PCLK_PMU_MSCL 32
+#define MSCL_NR_CLK 33
+
+/* AUD */
+#define SCLK_I2S 1
+#define SCLK_PCM 2
+#define PCLK_I2S 3
+#define PCLK_PCM 4
+#define ACLK_ADMA 5
+#define AUD_NR_CLK 6
#endif /* _DT_BINDINGS_CLOCK_EXYNOS7_H */
diff --git a/include/dt-bindings/clock/hi6220-clock.h b/include/dt-bindings/clock/hi6220-clock.h
new file mode 100644
index 0000000..70ee383
--- /dev/null
+++ b/include/dt-bindings/clock/hi6220-clock.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2015 Hisilicon Limited.
+ *
+ * Author: Bintian Wang <bintian.wang@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_HI6220_H
+#define __DT_BINDINGS_CLOCK_HI6220_H
+
+/* clk in Hi6220 AO (always on) controller */
+#define HI6220_NONE_CLOCK 0
+
+/* fixed rate clocks */
+#define HI6220_REF32K 1
+#define HI6220_CLK_TCXO 2
+#define HI6220_MMC1_PAD 3
+#define HI6220_MMC2_PAD 4
+#define HI6220_MMC0_PAD 5
+#define HI6220_PLL_BBP 6
+#define HI6220_PLL_GPU 7
+#define HI6220_PLL1_DDR 8
+#define HI6220_PLL_SYS 9
+#define HI6220_PLL_SYS_MEDIA 10
+#define HI6220_DDR_SRC 11
+#define HI6220_PLL_MEDIA 12
+#define HI6220_PLL_DDR 13
+
+/* fixed factor clocks */
+#define HI6220_300M 14
+#define HI6220_150M 15
+#define HI6220_PICOPHY_SRC 16
+#define HI6220_MMC0_SRC_SEL 17
+#define HI6220_MMC1_SRC_SEL 18
+#define HI6220_MMC2_SRC_SEL 19
+#define HI6220_VPU_CODEC 20
+#define HI6220_MMC0_SMP 21
+#define HI6220_MMC1_SMP 22
+#define HI6220_MMC2_SMP 23
+
+/* gate clocks */
+#define HI6220_WDT0_PCLK 24
+#define HI6220_WDT1_PCLK 25
+#define HI6220_WDT2_PCLK 26
+#define HI6220_TIMER0_PCLK 27
+#define HI6220_TIMER1_PCLK 28
+#define HI6220_TIMER2_PCLK 29
+#define HI6220_TIMER3_PCLK 30
+#define HI6220_TIMER4_PCLK 31
+#define HI6220_TIMER5_PCLK 32
+#define HI6220_TIMER6_PCLK 33
+#define HI6220_TIMER7_PCLK 34
+#define HI6220_TIMER8_PCLK 35
+#define HI6220_UART0_PCLK 36
+
+#define HI6220_AO_NR_CLKS 37
+
+/* clk in Hi6220 systrl */
+/* gate clock */
+#define HI6220_MMC0_CLK 1
+#define HI6220_MMC0_CIUCLK 2
+#define HI6220_MMC1_CLK 3
+#define HI6220_MMC1_CIUCLK 4
+#define HI6220_MMC2_CLK 5
+#define HI6220_MMC2_CIUCLK 6
+#define HI6220_USBOTG_HCLK 7
+#define HI6220_CLK_PICOPHY 8
+#define HI6220_HIFI 9
+#define HI6220_DACODEC_PCLK 10
+#define HI6220_EDMAC_ACLK 11
+#define HI6220_CS_ATB 12
+#define HI6220_I2C0_CLK 13
+#define HI6220_I2C1_CLK 14
+#define HI6220_I2C2_CLK 15
+#define HI6220_I2C3_CLK 16
+#define HI6220_UART1_PCLK 17
+#define HI6220_UART2_PCLK 18
+#define HI6220_UART3_PCLK 19
+#define HI6220_UART4_PCLK 20
+#define HI6220_SPI_CLK 21
+#define HI6220_TSENSOR_CLK 22
+#define HI6220_MMU_CLK 23
+#define HI6220_HIFI_SEL 24
+#define HI6220_MMC0_SYSPLL 25
+#define HI6220_MMC1_SYSPLL 26
+#define HI6220_MMC2_SYSPLL 27
+#define HI6220_MMC0_SEL 28
+#define HI6220_MMC1_SEL 29
+#define HI6220_BBPPLL_SEL 30
+#define HI6220_MEDIA_PLL_SRC 31
+#define HI6220_MMC2_SEL 32
+#define HI6220_CS_ATB_SYSPLL 33
+
+/* mux clocks */
+#define HI6220_MMC0_SRC 34
+#define HI6220_MMC0_SMP_IN 35
+#define HI6220_MMC1_SRC 36
+#define HI6220_MMC1_SMP_IN 37
+#define HI6220_MMC2_SRC 38
+#define HI6220_MMC2_SMP_IN 39
+#define HI6220_HIFI_SRC 40
+#define HI6220_UART1_SRC 41
+#define HI6220_UART2_SRC 42
+#define HI6220_UART3_SRC 43
+#define HI6220_UART4_SRC 44
+#define HI6220_MMC0_MUX0 45
+#define HI6220_MMC1_MUX0 46
+#define HI6220_MMC2_MUX0 47
+#define HI6220_MMC0_MUX1 48
+#define HI6220_MMC1_MUX1 49
+#define HI6220_MMC2_MUX1 50
+
+/* divider clocks */
+#define HI6220_CLK_BUS 51
+#define HI6220_MMC0_DIV 52
+#define HI6220_MMC1_DIV 53
+#define HI6220_MMC2_DIV 54
+#define HI6220_HIFI_DIV 55
+#define HI6220_BBPPLL0_DIV 56
+#define HI6220_CS_DAPB 57
+#define HI6220_CS_ATB_DIV 58
+
+#define HI6220_SYS_NR_CLKS 59
+
+/* clk in Hi6220 media controller */
+/* gate clocks */
+#define HI6220_DSI_PCLK 1
+#define HI6220_G3D_PCLK 2
+#define HI6220_ACLK_CODEC_VPU 3
+#define HI6220_ISP_SCLK 4
+#define HI6220_ADE_CORE 5
+#define HI6220_MED_MMU 6
+#define HI6220_CFG_CSI4PHY 7
+#define HI6220_CFG_CSI2PHY 8
+#define HI6220_ISP_SCLK_GATE 9
+#define HI6220_ISP_SCLK_GATE1 10
+#define HI6220_ADE_CORE_GATE 11
+#define HI6220_CODEC_VPU_GATE 12
+#define HI6220_MED_SYSPLL 13
+
+/* mux clocks */
+#define HI6220_1440_1200 14
+#define HI6220_1000_1200 15
+#define HI6220_1000_1440 16
+
+/* divider clocks */
+#define HI6220_CODEC_JPEG 17
+#define HI6220_ISP_SCLK_SRC 18
+#define HI6220_ISP_SCLK1 19
+#define HI6220_ADE_CORE_SRC 20
+#define HI6220_ADE_PIX_SRC 21
+#define HI6220_G3D_CLK 22
+#define HI6220_CODEC_VPU_SRC 23
+
+#define HI6220_MEDIA_NR_CLKS 24
+
+/* clk in Hi6220 power controller */
+/* gate clocks */
+#define HI6220_PLL_GPU_GATE 1
+#define HI6220_PLL1_DDR_GATE 2
+#define HI6220_PLL_DDR_GATE 3
+#define HI6220_PLL_MEDIA_GATE 4
+#define HI6220_PLL0_BBP_GATE 5
+
+/* divider clocks */
+#define HI6220_DDRC_SRC 6
+#define HI6220_DDRC_AXI1 7
+
+#define HI6220_POWER_NR_CLKS 8
+#endif
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index b690cdb..8780868 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -248,6 +248,9 @@
#define IMX6QDL_PLL6_BYPASS 235
#define IMX6QDL_PLL7_BYPASS 236
#define IMX6QDL_CLK_GPT_3M 237
-#define IMX6QDL_CLK_END 238
+#define IMX6QDL_CLK_VIDEO_27M 238
+#define IMX6QDL_CLK_MIPI_CORE_CFG 239
+#define IMX6QDL_CLK_MIPI_IPG 240
+#define IMX6QDL_CLK_END 241
#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
diff --git a/include/dt-bindings/clock/imx7d-clock.h b/include/dt-bindings/clock/imx7d-clock.h
new file mode 100644
index 0000000..728df28
--- /dev/null
+++ b/include/dt-bindings/clock/imx7d-clock.h
@@ -0,0 +1,450 @@
+/*
+ * Copyright (C) 2014-2015 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX7D_H
+#define __DT_BINDINGS_CLOCK_IMX7D_H
+
+#define IMX7D_OSC_24M_CLK 0
+#define IMX7D_PLL_ARM_MAIN 1
+#define IMX7D_PLL_ARM_MAIN_CLK 2
+#define IMX7D_PLL_ARM_MAIN_SRC 3
+#define IMX7D_PLL_ARM_MAIN_BYPASS 4
+#define IMX7D_PLL_SYS_MAIN 5
+#define IMX7D_PLL_SYS_MAIN_CLK 6
+#define IMX7D_PLL_SYS_MAIN_SRC 7
+#define IMX7D_PLL_SYS_MAIN_BYPASS 8
+#define IMX7D_PLL_SYS_MAIN_480M 9
+#define IMX7D_PLL_SYS_MAIN_240M 10
+#define IMX7D_PLL_SYS_MAIN_120M 11
+#define IMX7D_PLL_SYS_MAIN_480M_CLK 12
+#define IMX7D_PLL_SYS_MAIN_240M_CLK 13
+#define IMX7D_PLL_SYS_MAIN_120M_CLK 14
+#define IMX7D_PLL_SYS_PFD0_392M_CLK 15
+#define IMX7D_PLL_SYS_PFD0_196M 16
+#define IMX7D_PLL_SYS_PFD0_196M_CLK 17
+#define IMX7D_PLL_SYS_PFD1_332M_CLK 18
+#define IMX7D_PLL_SYS_PFD1_166M 19
+#define IMX7D_PLL_SYS_PFD1_166M_CLK 20
+#define IMX7D_PLL_SYS_PFD2_270M_CLK 21
+#define IMX7D_PLL_SYS_PFD2_135M 22
+#define IMX7D_PLL_SYS_PFD2_135M_CLK 23
+#define IMX7D_PLL_SYS_PFD3_CLK 24
+#define IMX7D_PLL_SYS_PFD4_CLK 25
+#define IMX7D_PLL_SYS_PFD5_CLK 26
+#define IMX7D_PLL_SYS_PFD6_CLK 27
+#define IMX7D_PLL_SYS_PFD7_CLK 28
+#define IMX7D_PLL_ENET_MAIN 29
+#define IMX7D_PLL_ENET_MAIN_CLK 30
+#define IMX7D_PLL_ENET_MAIN_SRC 31
+#define IMX7D_PLL_ENET_MAIN_BYPASS 32
+#define IMX7D_PLL_ENET_MAIN_500M 33
+#define IMX7D_PLL_ENET_MAIN_250M 34
+#define IMX7D_PLL_ENET_MAIN_125M 35
+#define IMX7D_PLL_ENET_MAIN_100M 36
+#define IMX7D_PLL_ENET_MAIN_50M 37
+#define IMX7D_PLL_ENET_MAIN_40M 38
+#define IMX7D_PLL_ENET_MAIN_25M 39
+#define IMX7D_PLL_ENET_MAIN_500M_CLK 40
+#define IMX7D_PLL_ENET_MAIN_250M_CLK 41
+#define IMX7D_PLL_ENET_MAIN_125M_CLK 42
+#define IMX7D_PLL_ENET_MAIN_100M_CLK 43
+#define IMX7D_PLL_ENET_MAIN_50M_CLK 44
+#define IMX7D_PLL_ENET_MAIN_40M_CLK 45
+#define IMX7D_PLL_ENET_MAIN_25M_CLK 46
+#define IMX7D_PLL_DRAM_MAIN 47
+#define IMX7D_PLL_DRAM_MAIN_CLK 48
+#define IMX7D_PLL_DRAM_MAIN_SRC 49
+#define IMX7D_PLL_DRAM_MAIN_BYPASS 50
+#define IMX7D_PLL_DRAM_MAIN_533M 51
+#define IMX7D_PLL_DRAM_MAIN_533M_CLK 52
+#define IMX7D_PLL_AUDIO_MAIN 53
+#define IMX7D_PLL_AUDIO_MAIN_CLK 54
+#define IMX7D_PLL_AUDIO_MAIN_SRC 55
+#define IMX7D_PLL_AUDIO_MAIN_BYPASS 56
+#define IMX7D_PLL_VIDEO_MAIN_CLK 57
+#define IMX7D_PLL_VIDEO_MAIN 58
+#define IMX7D_PLL_VIDEO_MAIN_SRC 59
+#define IMX7D_PLL_VIDEO_MAIN_BYPASS 60
+#define IMX7D_USB_MAIN_480M_CLK 61
+#define IMX7D_ARM_A7_ROOT_CLK 62
+#define IMX7D_ARM_A7_ROOT_SRC 63
+#define IMX7D_ARM_A7_ROOT_CG 64
+#define IMX7D_ARM_A7_ROOT_DIV 65
+#define IMX7D_ARM_M4_ROOT_CLK 66
+#define IMX7D_ARM_M4_ROOT_SRC 67
+#define IMX7D_ARM_M4_ROOT_CG 68
+#define IMX7D_ARM_M4_ROOT_DIV 69
+#define IMX7D_ARM_M0_ROOT_CLK 70
+#define IMX7D_ARM_M0_ROOT_SRC 71
+#define IMX7D_ARM_M0_ROOT_CG 72
+#define IMX7D_ARM_M0_ROOT_DIV 73
+#define IMX7D_MAIN_AXI_ROOT_CLK 74
+#define IMX7D_MAIN_AXI_ROOT_SRC 75
+#define IMX7D_MAIN_AXI_ROOT_CG 76
+#define IMX7D_MAIN_AXI_ROOT_DIV 77
+#define IMX7D_DISP_AXI_ROOT_CLK 78
+#define IMX7D_DISP_AXI_ROOT_SRC 79
+#define IMX7D_DISP_AXI_ROOT_CG 80
+#define IMX7D_DISP_AXI_ROOT_DIV 81
+#define IMX7D_ENET_AXI_ROOT_CLK 82
+#define IMX7D_ENET_AXI_ROOT_SRC 83
+#define IMX7D_ENET_AXI_ROOT_CG 84
+#define IMX7D_ENET_AXI_ROOT_DIV 85
+#define IMX7D_NAND_USDHC_BUS_ROOT_CLK 86
+#define IMX7D_NAND_USDHC_BUS_ROOT_SRC 87
+#define IMX7D_NAND_USDHC_BUS_ROOT_CG 88
+#define IMX7D_NAND_USDHC_BUS_ROOT_DIV 89
+#define IMX7D_AHB_CHANNEL_ROOT_CLK 90
+#define IMX7D_AHB_CHANNEL_ROOT_SRC 91
+#define IMX7D_AHB_CHANNEL_ROOT_CG 92
+#define IMX7D_AHB_CHANNEL_ROOT_DIV 93
+#define IMX7D_DRAM_PHYM_ROOT_CLK 94
+#define IMX7D_DRAM_PHYM_ROOT_SRC 95
+#define IMX7D_DRAM_PHYM_ROOT_CG 96
+#define IMX7D_DRAM_PHYM_ROOT_DIV 97
+#define IMX7D_DRAM_ROOT_CLK 98
+#define IMX7D_DRAM_ROOT_SRC 99
+#define IMX7D_DRAM_ROOT_CG 100
+#define IMX7D_DRAM_ROOT_DIV 101
+#define IMX7D_DRAM_PHYM_ALT_ROOT_CLK 102
+#define IMX7D_DRAM_PHYM_ALT_ROOT_SRC 103
+#define IMX7D_DRAM_PHYM_ALT_ROOT_CG 104
+#define IMX7D_DRAM_PHYM_ALT_ROOT_DIV 105
+#define IMX7D_DRAM_ALT_ROOT_CLK 106
+#define IMX7D_DRAM_ALT_ROOT_SRC 107
+#define IMX7D_DRAM_ALT_ROOT_CG 108
+#define IMX7D_DRAM_ALT_ROOT_DIV 109
+#define IMX7D_USB_HSIC_ROOT_CLK 110
+#define IMX7D_USB_HSIC_ROOT_SRC 111
+#define IMX7D_USB_HSIC_ROOT_CG 112
+#define IMX7D_USB_HSIC_ROOT_DIV 113
+#define IMX7D_PCIE_CTRL_ROOT_CLK 114
+#define IMX7D_PCIE_CTRL_ROOT_SRC 115
+#define IMX7D_PCIE_CTRL_ROOT_CG 116
+#define IMX7D_PCIE_CTRL_ROOT_DIV 117
+#define IMX7D_PCIE_PHY_ROOT_CLK 118
+#define IMX7D_PCIE_PHY_ROOT_SRC 119
+#define IMX7D_PCIE_PHY_ROOT_CG 120
+#define IMX7D_PCIE_PHY_ROOT_DIV 121
+#define IMX7D_EPDC_PIXEL_ROOT_CLK 122
+#define IMX7D_EPDC_PIXEL_ROOT_SRC 123
+#define IMX7D_EPDC_PIXEL_ROOT_CG 124
+#define IMX7D_EPDC_PIXEL_ROOT_DIV 125
+#define IMX7D_LCDIF_PIXEL_ROOT_CLK 126
+#define IMX7D_LCDIF_PIXEL_ROOT_SRC 127
+#define IMX7D_LCDIF_PIXEL_ROOT_CG 128
+#define IMX7D_LCDIF_PIXEL_ROOT_DIV 129
+#define IMX7D_MIPI_DSI_ROOT_CLK 130
+#define IMX7D_MIPI_DSI_ROOT_SRC 131
+#define IMX7D_MIPI_DSI_ROOT_CG 132
+#define IMX7D_MIPI_DSI_ROOT_DIV 133
+#define IMX7D_MIPI_CSI_ROOT_CLK 134
+#define IMX7D_MIPI_CSI_ROOT_SRC 135
+#define IMX7D_MIPI_CSI_ROOT_CG 136
+#define IMX7D_MIPI_CSI_ROOT_DIV 137
+#define IMX7D_MIPI_DPHY_ROOT_CLK 138
+#define IMX7D_MIPI_DPHY_ROOT_SRC 139
+#define IMX7D_MIPI_DPHY_ROOT_CG 140
+#define IMX7D_MIPI_DPHY_ROOT_DIV 141
+#define IMX7D_SAI1_ROOT_CLK 142
+#define IMX7D_SAI1_ROOT_SRC 143
+#define IMX7D_SAI1_ROOT_CG 144
+#define IMX7D_SAI1_ROOT_DIV 145
+#define IMX7D_SAI2_ROOT_CLK 146
+#define IMX7D_SAI2_ROOT_SRC 147
+#define IMX7D_SAI2_ROOT_CG 148
+#define IMX7D_SAI2_ROOT_DIV 149
+#define IMX7D_SAI3_ROOT_CLK 150
+#define IMX7D_SAI3_ROOT_SRC 151
+#define IMX7D_SAI3_ROOT_CG 152
+#define IMX7D_SAI3_ROOT_DIV 153
+#define IMX7D_SPDIF_ROOT_CLK 154
+#define IMX7D_SPDIF_ROOT_SRC 155
+#define IMX7D_SPDIF_ROOT_CG 156
+#define IMX7D_SPDIF_ROOT_DIV 157
+#define IMX7D_ENET1_REF_ROOT_CLK 158
+#define IMX7D_ENET1_REF_ROOT_SRC 159
+#define IMX7D_ENET1_REF_ROOT_CG 160
+#define IMX7D_ENET1_REF_ROOT_DIV 161
+#define IMX7D_ENET1_TIME_ROOT_CLK 162
+#define IMX7D_ENET1_TIME_ROOT_SRC 163
+#define IMX7D_ENET1_TIME_ROOT_CG 164
+#define IMX7D_ENET1_TIME_ROOT_DIV 165
+#define IMX7D_ENET2_REF_ROOT_CLK 166
+#define IMX7D_ENET2_REF_ROOT_SRC 167
+#define IMX7D_ENET2_REF_ROOT_CG 168
+#define IMX7D_ENET2_REF_ROOT_DIV 169
+#define IMX7D_ENET2_TIME_ROOT_CLK 170
+#define IMX7D_ENET2_TIME_ROOT_SRC 171
+#define IMX7D_ENET2_TIME_ROOT_CG 172
+#define IMX7D_ENET2_TIME_ROOT_DIV 173
+#define IMX7D_ENET_PHY_REF_ROOT_CLK 174
+#define IMX7D_ENET_PHY_REF_ROOT_SRC 175
+#define IMX7D_ENET_PHY_REF_ROOT_CG 176
+#define IMX7D_ENET_PHY_REF_ROOT_DIV 177
+#define IMX7D_EIM_ROOT_CLK 178
+#define IMX7D_EIM_ROOT_SRC 179
+#define IMX7D_EIM_ROOT_CG 180
+#define IMX7D_EIM_ROOT_DIV 181
+#define IMX7D_NAND_ROOT_CLK 182
+#define IMX7D_NAND_ROOT_SRC 183
+#define IMX7D_NAND_ROOT_CG 184
+#define IMX7D_NAND_ROOT_DIV 185
+#define IMX7D_QSPI_ROOT_CLK 186
+#define IMX7D_QSPI_ROOT_SRC 187
+#define IMX7D_QSPI_ROOT_CG 188
+#define IMX7D_QSPI_ROOT_DIV 189
+#define IMX7D_USDHC1_ROOT_CLK 190
+#define IMX7D_USDHC1_ROOT_SRC 191
+#define IMX7D_USDHC1_ROOT_CG 192
+#define IMX7D_USDHC1_ROOT_DIV 193
+#define IMX7D_USDHC2_ROOT_CLK 194
+#define IMX7D_USDHC2_ROOT_SRC 195
+#define IMX7D_USDHC2_ROOT_CG 196
+#define IMX7D_USDHC2_ROOT_DIV 197
+#define IMX7D_USDHC3_ROOT_CLK 198
+#define IMX7D_USDHC3_ROOT_SRC 199
+#define IMX7D_USDHC3_ROOT_CG 200
+#define IMX7D_USDHC3_ROOT_DIV 201
+#define IMX7D_CAN1_ROOT_CLK 202
+#define IMX7D_CAN1_ROOT_SRC 203
+#define IMX7D_CAN1_ROOT_CG 204
+#define IMX7D_CAN1_ROOT_DIV 205
+#define IMX7D_CAN2_ROOT_CLK 206
+#define IMX7D_CAN2_ROOT_SRC 207
+#define IMX7D_CAN2_ROOT_CG 208
+#define IMX7D_CAN2_ROOT_DIV 209
+#define IMX7D_I2C1_ROOT_CLK 210
+#define IMX7D_I2C1_ROOT_SRC 211
+#define IMX7D_I2C1_ROOT_CG 212
+#define IMX7D_I2C1_ROOT_DIV 213
+#define IMX7D_I2C2_ROOT_CLK 214
+#define IMX7D_I2C2_ROOT_SRC 215
+#define IMX7D_I2C2_ROOT_CG 216
+#define IMX7D_I2C2_ROOT_DIV 217
+#define IMX7D_I2C3_ROOT_CLK 218
+#define IMX7D_I2C3_ROOT_SRC 219
+#define IMX7D_I2C3_ROOT_CG 220
+#define IMX7D_I2C3_ROOT_DIV 221
+#define IMX7D_I2C4_ROOT_CLK 222
+#define IMX7D_I2C4_ROOT_SRC 223
+#define IMX7D_I2C4_ROOT_CG 224
+#define IMX7D_I2C4_ROOT_DIV 225
+#define IMX7D_UART1_ROOT_CLK 226
+#define IMX7D_UART1_ROOT_SRC 227
+#define IMX7D_UART1_ROOT_CG 228
+#define IMX7D_UART1_ROOT_DIV 229
+#define IMX7D_UART2_ROOT_CLK 230
+#define IMX7D_UART2_ROOT_SRC 231
+#define IMX7D_UART2_ROOT_CG 232
+#define IMX7D_UART2_ROOT_DIV 233
+#define IMX7D_UART3_ROOT_CLK 234
+#define IMX7D_UART3_ROOT_SRC 235
+#define IMX7D_UART3_ROOT_CG 236
+#define IMX7D_UART3_ROOT_DIV 237
+#define IMX7D_UART4_ROOT_CLK 238
+#define IMX7D_UART4_ROOT_SRC 239
+#define IMX7D_UART4_ROOT_CG 240
+#define IMX7D_UART4_ROOT_DIV 241
+#define IMX7D_UART5_ROOT_CLK 242
+#define IMX7D_UART5_ROOT_SRC 243
+#define IMX7D_UART5_ROOT_CG 244
+#define IMX7D_UART5_ROOT_DIV 245
+#define IMX7D_UART6_ROOT_CLK 246
+#define IMX7D_UART6_ROOT_SRC 247
+#define IMX7D_UART6_ROOT_CG 248
+#define IMX7D_UART6_ROOT_DIV 249
+#define IMX7D_UART7_ROOT_CLK 250
+#define IMX7D_UART7_ROOT_SRC 251
+#define IMX7D_UART7_ROOT_CG 252
+#define IMX7D_UART7_ROOT_DIV 253
+#define IMX7D_ECSPI1_ROOT_CLK 254
+#define IMX7D_ECSPI1_ROOT_SRC 255
+#define IMX7D_ECSPI1_ROOT_CG 256
+#define IMX7D_ECSPI1_ROOT_DIV 257
+#define IMX7D_ECSPI2_ROOT_CLK 258
+#define IMX7D_ECSPI2_ROOT_SRC 259
+#define IMX7D_ECSPI2_ROOT_CG 260
+#define IMX7D_ECSPI2_ROOT_DIV 261
+#define IMX7D_ECSPI3_ROOT_CLK 262
+#define IMX7D_ECSPI3_ROOT_SRC 263
+#define IMX7D_ECSPI3_ROOT_CG 264
+#define IMX7D_ECSPI3_ROOT_DIV 265
+#define IMX7D_ECSPI4_ROOT_CLK 266
+#define IMX7D_ECSPI4_ROOT_SRC 267
+#define IMX7D_ECSPI4_ROOT_CG 268
+#define IMX7D_ECSPI4_ROOT_DIV 269
+#define IMX7D_PWM1_ROOT_CLK 270
+#define IMX7D_PWM1_ROOT_SRC 271
+#define IMX7D_PWM1_ROOT_CG 272
+#define IMX7D_PWM1_ROOT_DIV 273
+#define IMX7D_PWM2_ROOT_CLK 274
+#define IMX7D_PWM2_ROOT_SRC 275
+#define IMX7D_PWM2_ROOT_CG 276
+#define IMX7D_PWM2_ROOT_DIV 277
+#define IMX7D_PWM3_ROOT_CLK 278
+#define IMX7D_PWM3_ROOT_SRC 279
+#define IMX7D_PWM3_ROOT_CG 280
+#define IMX7D_PWM3_ROOT_DIV 281
+#define IMX7D_PWM4_ROOT_CLK 282
+#define IMX7D_PWM4_ROOT_SRC 283
+#define IMX7D_PWM4_ROOT_CG 284
+#define IMX7D_PWM4_ROOT_DIV 285
+#define IMX7D_FLEXTIMER1_ROOT_CLK 286
+#define IMX7D_FLEXTIMER1_ROOT_SRC 287
+#define IMX7D_FLEXTIMER1_ROOT_CG 288
+#define IMX7D_FLEXTIMER1_ROOT_DIV 289
+#define IMX7D_FLEXTIMER2_ROOT_CLK 290
+#define IMX7D_FLEXTIMER2_ROOT_SRC 291
+#define IMX7D_FLEXTIMER2_ROOT_CG 292
+#define IMX7D_FLEXTIMER2_ROOT_DIV 293
+#define IMX7D_SIM1_ROOT_CLK 294
+#define IMX7D_SIM1_ROOT_SRC 295
+#define IMX7D_SIM1_ROOT_CG 296
+#define IMX7D_SIM1_ROOT_DIV 297
+#define IMX7D_SIM2_ROOT_CLK 298
+#define IMX7D_SIM2_ROOT_SRC 299
+#define IMX7D_SIM2_ROOT_CG 300
+#define IMX7D_SIM2_ROOT_DIV 301
+#define IMX7D_GPT1_ROOT_CLK 302
+#define IMX7D_GPT1_ROOT_SRC 303
+#define IMX7D_GPT1_ROOT_CG 304
+#define IMX7D_GPT1_ROOT_DIV 305
+#define IMX7D_GPT2_ROOT_CLK 306
+#define IMX7D_GPT2_ROOT_SRC 307
+#define IMX7D_GPT2_ROOT_CG 308
+#define IMX7D_GPT2_ROOT_DIV 309
+#define IMX7D_GPT3_ROOT_CLK 310
+#define IMX7D_GPT3_ROOT_SRC 311
+#define IMX7D_GPT3_ROOT_CG 312
+#define IMX7D_GPT3_ROOT_DIV 313
+#define IMX7D_GPT4_ROOT_CLK 314
+#define IMX7D_GPT4_ROOT_SRC 315
+#define IMX7D_GPT4_ROOT_CG 316
+#define IMX7D_GPT4_ROOT_DIV 317
+#define IMX7D_TRACE_ROOT_CLK 318
+#define IMX7D_TRACE_ROOT_SRC 319
+#define IMX7D_TRACE_ROOT_CG 320
+#define IMX7D_TRACE_ROOT_DIV 321
+#define IMX7D_WDOG1_ROOT_CLK 322
+#define IMX7D_WDOG_ROOT_SRC 323
+#define IMX7D_WDOG_ROOT_CG 324
+#define IMX7D_WDOG_ROOT_DIV 325
+#define IMX7D_CSI_MCLK_ROOT_CLK 326
+#define IMX7D_CSI_MCLK_ROOT_SRC 327
+#define IMX7D_CSI_MCLK_ROOT_CG 328
+#define IMX7D_CSI_MCLK_ROOT_DIV 329
+#define IMX7D_AUDIO_MCLK_ROOT_CLK 330
+#define IMX7D_AUDIO_MCLK_ROOT_SRC 331
+#define IMX7D_AUDIO_MCLK_ROOT_CG 332
+#define IMX7D_AUDIO_MCLK_ROOT_DIV 333
+#define IMX7D_WRCLK_ROOT_CLK 334
+#define IMX7D_WRCLK_ROOT_SRC 335
+#define IMX7D_WRCLK_ROOT_CG 336
+#define IMX7D_WRCLK_ROOT_DIV 337
+#define IMX7D_CLKO1_ROOT_SRC 338
+#define IMX7D_CLKO1_ROOT_CG 339
+#define IMX7D_CLKO1_ROOT_DIV 340
+#define IMX7D_CLKO2_ROOT_SRC 341
+#define IMX7D_CLKO2_ROOT_CG 342
+#define IMX7D_CLKO2_ROOT_DIV 343
+#define IMX7D_MAIN_AXI_ROOT_PRE_DIV 344
+#define IMX7D_DISP_AXI_ROOT_PRE_DIV 345
+#define IMX7D_ENET_AXI_ROOT_PRE_DIV 346
+#define IMX7D_NAND_USDHC_BUS_ROOT_PRE_DIV 347
+#define IMX7D_AHB_CHANNEL_ROOT_PRE_DIV 348
+#define IMX7D_USB_HSIC_ROOT_PRE_DIV 349
+#define IMX7D_PCIE_CTRL_ROOT_PRE_DIV 350
+#define IMX7D_PCIE_PHY_ROOT_PRE_DIV 351
+#define IMX7D_EPDC_PIXEL_ROOT_PRE_DIV 352
+#define IMX7D_LCDIF_PIXEL_ROOT_PRE_DIV 353
+#define IMX7D_MIPI_DSI_ROOT_PRE_DIV 354
+#define IMX7D_MIPI_CSI_ROOT_PRE_DIV 355
+#define IMX7D_MIPI_DPHY_ROOT_PRE_DIV 356
+#define IMX7D_SAI1_ROOT_PRE_DIV 357
+#define IMX7D_SAI2_ROOT_PRE_DIV 358
+#define IMX7D_SAI3_ROOT_PRE_DIV 359
+#define IMX7D_SPDIF_ROOT_PRE_DIV 360
+#define IMX7D_ENET1_REF_ROOT_PRE_DIV 361
+#define IMX7D_ENET1_TIME_ROOT_PRE_DIV 362
+#define IMX7D_ENET2_REF_ROOT_PRE_DIV 363
+#define IMX7D_ENET2_TIME_ROOT_PRE_DIV 364
+#define IMX7D_ENET_PHY_REF_ROOT_PRE_DIV 365
+#define IMX7D_EIM_ROOT_PRE_DIV 366
+#define IMX7D_NAND_ROOT_PRE_DIV 367
+#define IMX7D_QSPI_ROOT_PRE_DIV 368
+#define IMX7D_USDHC1_ROOT_PRE_DIV 369
+#define IMX7D_USDHC2_ROOT_PRE_DIV 370
+#define IMX7D_USDHC3_ROOT_PRE_DIV 371
+#define IMX7D_CAN1_ROOT_PRE_DIV 372
+#define IMX7D_CAN2_ROOT_PRE_DIV 373
+#define IMX7D_I2C1_ROOT_PRE_DIV 374
+#define IMX7D_I2C2_ROOT_PRE_DIV 375
+#define IMX7D_I2C3_ROOT_PRE_DIV 376
+#define IMX7D_I2C4_ROOT_PRE_DIV 377
+#define IMX7D_UART1_ROOT_PRE_DIV 378
+#define IMX7D_UART2_ROOT_PRE_DIV 379
+#define IMX7D_UART3_ROOT_PRE_DIV 380
+#define IMX7D_UART4_ROOT_PRE_DIV 381
+#define IMX7D_UART5_ROOT_PRE_DIV 382
+#define IMX7D_UART6_ROOT_PRE_DIV 383
+#define IMX7D_UART7_ROOT_PRE_DIV 384
+#define IMX7D_ECSPI1_ROOT_PRE_DIV 385
+#define IMX7D_ECSPI2_ROOT_PRE_DIV 386
+#define IMX7D_ECSPI3_ROOT_PRE_DIV 387
+#define IMX7D_ECSPI4_ROOT_PRE_DIV 388
+#define IMX7D_PWM1_ROOT_PRE_DIV 389
+#define IMX7D_PWM2_ROOT_PRE_DIV 390
+#define IMX7D_PWM3_ROOT_PRE_DIV 391
+#define IMX7D_PWM4_ROOT_PRE_DIV 392
+#define IMX7D_FLEXTIMER1_ROOT_PRE_DIV 393
+#define IMX7D_FLEXTIMER2_ROOT_PRE_DIV 394
+#define IMX7D_SIM1_ROOT_PRE_DIV 395
+#define IMX7D_SIM2_ROOT_PRE_DIV 396
+#define IMX7D_GPT1_ROOT_PRE_DIV 397
+#define IMX7D_GPT2_ROOT_PRE_DIV 398
+#define IMX7D_GPT3_ROOT_PRE_DIV 399
+#define IMX7D_GPT4_ROOT_PRE_DIV 400
+#define IMX7D_TRACE_ROOT_PRE_DIV 401
+#define IMX7D_WDOG_ROOT_PRE_DIV 402
+#define IMX7D_CSI_MCLK_ROOT_PRE_DIV 403
+#define IMX7D_AUDIO_MCLK_ROOT_PRE_DIV 404
+#define IMX7D_WRCLK_ROOT_PRE_DIV 405
+#define IMX7D_CLKO1_ROOT_PRE_DIV 406
+#define IMX7D_CLKO2_ROOT_PRE_DIV 407
+#define IMX7D_DRAM_PHYM_ALT_ROOT_PRE_DIV 408
+#define IMX7D_DRAM_ALT_ROOT_PRE_DIV 409
+#define IMX7D_LVDS1_IN_CLK 410
+#define IMX7D_LVDS1_OUT_SEL 411
+#define IMX7D_LVDS1_OUT_CLK 412
+#define IMX7D_CLK_DUMMY 413
+#define IMX7D_GPT_3M_CLK 414
+#define IMX7D_OCRAM_CLK 415
+#define IMX7D_OCRAM_S_CLK 416
+#define IMX7D_WDOG2_ROOT_CLK 417
+#define IMX7D_WDOG3_ROOT_CLK 418
+#define IMX7D_WDOG4_ROOT_CLK 419
+#define IMX7D_SDMA_CORE_CLK 420
+#define IMX7D_USB1_MAIN_480M_CLK 421
+#define IMX7D_USB_CTRL_CLK 422
+#define IMX7D_USB_PHY1_CLK 423
+#define IMX7D_USB_PHY2_CLK 424
+#define IMX7D_IPG_ROOT_CLK 425
+#define IMX7D_SAI1_IPG_CLK 426
+#define IMX7D_SAI2_IPG_CLK 427
+#define IMX7D_SAI3_IPG_CLK 428
+#define IMX7D_PLL_AUDIO_TEST_DIV 429
+#define IMX7D_PLL_AUDIO_POST_DIV 430
+#define IMX7D_PLL_VIDEO_TEST_DIV 431
+#define IMX7D_PLL_VIDEO_POST_DIV 432
+#define IMX7D_MU_ROOT_CLK 433
+#define IMX7D_SEMA4_HS_ROOT_CLK 434
+#define IMX7D_PLL_DRAM_TEST_DIV 435
+#define IMX7D_CLK_END 436
+#endif /* __DT_BINDINGS_CLOCK_IMX7D_H */
diff --git a/include/dt-bindings/clock/jz4740-cgu.h b/include/dt-bindings/clock/jz4740-cgu.h
new file mode 100644
index 0000000..43153d3
--- /dev/null
+++ b/include/dt-bindings/clock/jz4740-cgu.h
@@ -0,0 +1,37 @@
+/*
+ * This header provides clock numbers for the ingenic,jz4740-cgu DT binding.
+ *
+ * They are roughly ordered as:
+ * - external clocks
+ * - PLLs
+ * - muxes/dividers in the order they appear in the jz4740 programmers manual
+ * - gates in order of their bit in the CLKGR* registers
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_JZ4740_CGU_H__
+#define __DT_BINDINGS_CLOCK_JZ4740_CGU_H__
+
+#define JZ4740_CLK_EXT 0
+#define JZ4740_CLK_RTC 1
+#define JZ4740_CLK_PLL 2
+#define JZ4740_CLK_PLL_HALF 3
+#define JZ4740_CLK_CCLK 4
+#define JZ4740_CLK_HCLK 5
+#define JZ4740_CLK_PCLK 6
+#define JZ4740_CLK_MCLK 7
+#define JZ4740_CLK_LCD 8
+#define JZ4740_CLK_LCD_PCLK 9
+#define JZ4740_CLK_I2S 10
+#define JZ4740_CLK_SPI 11
+#define JZ4740_CLK_MMC 12
+#define JZ4740_CLK_UHC 13
+#define JZ4740_CLK_UDC 14
+#define JZ4740_CLK_UART0 15
+#define JZ4740_CLK_UART1 16
+#define JZ4740_CLK_DMA 17
+#define JZ4740_CLK_IPU 18
+#define JZ4740_CLK_ADC 19
+#define JZ4740_CLK_I2C 20
+#define JZ4740_CLK_AIC 21
+
+#endif /* __DT_BINDINGS_CLOCK_JZ4740_CGU_H__ */
diff --git a/include/dt-bindings/clock/jz4780-cgu.h b/include/dt-bindings/clock/jz4780-cgu.h
new file mode 100644
index 0000000..467165e
--- /dev/null
+++ b/include/dt-bindings/clock/jz4780-cgu.h
@@ -0,0 +1,88 @@
+/*
+ * This header provides clock numbers for the ingenic,jz4780-cgu DT binding.
+ *
+ * They are roughly ordered as:
+ * - external clocks
+ * - PLLs
+ * - muxes/dividers in the order they appear in the jz4780 programmers manual
+ * - gates in order of their bit in the CLKGR* registers
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_JZ4780_CGU_H__
+#define __DT_BINDINGS_CLOCK_JZ4780_CGU_H__
+
+#define JZ4780_CLK_EXCLK 0
+#define JZ4780_CLK_RTCLK 1
+#define JZ4780_CLK_APLL 2
+#define JZ4780_CLK_MPLL 3
+#define JZ4780_CLK_EPLL 4
+#define JZ4780_CLK_VPLL 5
+#define JZ4780_CLK_OTGPHY 6
+#define JZ4780_CLK_SCLKA 7
+#define JZ4780_CLK_CPUMUX 8
+#define JZ4780_CLK_CPU 9
+#define JZ4780_CLK_L2CACHE 10
+#define JZ4780_CLK_AHB0 11
+#define JZ4780_CLK_AHB2PMUX 12
+#define JZ4780_CLK_AHB2 13
+#define JZ4780_CLK_PCLK 14
+#define JZ4780_CLK_DDR 15
+#define JZ4780_CLK_VPU 16
+#define JZ4780_CLK_I2SPLL 17
+#define JZ4780_CLK_I2S 18
+#define JZ4780_CLK_LCD0PIXCLK 19
+#define JZ4780_CLK_LCD1PIXCLK 20
+#define JZ4780_CLK_MSCMUX 21
+#define JZ4780_CLK_MSC0 22
+#define JZ4780_CLK_MSC1 23
+#define JZ4780_CLK_MSC2 24
+#define JZ4780_CLK_UHC 25
+#define JZ4780_CLK_SSIPLL 26
+#define JZ4780_CLK_SSI 27
+#define JZ4780_CLK_CIMMCLK 28
+#define JZ4780_CLK_PCMPLL 29
+#define JZ4780_CLK_PCM 30
+#define JZ4780_CLK_GPU 31
+#define JZ4780_CLK_HDMI 32
+#define JZ4780_CLK_BCH 33
+#define JZ4780_CLK_NEMC 34
+#define JZ4780_CLK_OTG0 35
+#define JZ4780_CLK_SSI0 36
+#define JZ4780_CLK_SMB0 37
+#define JZ4780_CLK_SMB1 38
+#define JZ4780_CLK_SCC 39
+#define JZ4780_CLK_AIC 40
+#define JZ4780_CLK_TSSI0 41
+#define JZ4780_CLK_OWI 42
+#define JZ4780_CLK_KBC 43
+#define JZ4780_CLK_SADC 44
+#define JZ4780_CLK_UART0 45
+#define JZ4780_CLK_UART1 46
+#define JZ4780_CLK_UART2 47
+#define JZ4780_CLK_UART3 48
+#define JZ4780_CLK_SSI1 49
+#define JZ4780_CLK_SSI2 50
+#define JZ4780_CLK_PDMA 51
+#define JZ4780_CLK_GPS 52
+#define JZ4780_CLK_MAC 53
+#define JZ4780_CLK_SMB2 54
+#define JZ4780_CLK_CIM 55
+#define JZ4780_CLK_LCD 56
+#define JZ4780_CLK_TVE 57
+#define JZ4780_CLK_IPU 58
+#define JZ4780_CLK_DDR0 59
+#define JZ4780_CLK_DDR1 60
+#define JZ4780_CLK_SMB3 61
+#define JZ4780_CLK_TSSI1 62
+#define JZ4780_CLK_COMPRESS 63
+#define JZ4780_CLK_AIC1 64
+#define JZ4780_CLK_GPVLC 65
+#define JZ4780_CLK_OTG1 66
+#define JZ4780_CLK_UART4 67
+#define JZ4780_CLK_AHBMON 68
+#define JZ4780_CLK_SMB4 69
+#define JZ4780_CLK_DES 70
+#define JZ4780_CLK_X2D 71
+#define JZ4780_CLK_CORE1 72
+
+#endif /* __DT_BINDINGS_CLOCK_JZ4780_CGU_H__ */
diff --git a/include/dt-bindings/clock/lpc18xx-ccu.h b/include/dt-bindings/clock/lpc18xx-ccu.h
new file mode 100644
index 0000000..bbfe00b
--- /dev/null
+++ b/include/dt-bindings/clock/lpc18xx-ccu.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This code is released using a dual license strategy: BSD/GPL
+ * You can choose the licence that better fits your requirements.
+ *
+ * Released under the terms of 3-clause BSD License
+ * Released under the terms of GNU General Public License Version 2.0
+ *
+ */
+
+/* Clock Control Unit 1 (CCU1) clock offsets */
+#define CLK_APB3_BUS 0x100
+#define CLK_APB3_I2C1 0x108
+#define CLK_APB3_DAC 0x110
+#define CLK_APB3_ADC0 0x118
+#define CLK_APB3_ADC1 0x120
+#define CLK_APB3_CAN0 0x128
+#define CLK_APB1_BUS 0x200
+#define CLK_APB1_MOTOCON_PWM 0x208
+#define CLK_APB1_I2C0 0x210
+#define CLK_APB1_I2S 0x218
+#define CLK_APB1_CAN1 0x220
+#define CLK_SPIFI 0x300
+#define CLK_CPU_BUS 0x400
+#define CLK_CPU_SPIFI 0x408
+#define CLK_CPU_GPIO 0x410
+#define CLK_CPU_LCD 0x418
+#define CLK_CPU_ETHERNET 0x420
+#define CLK_CPU_USB0 0x428
+#define CLK_CPU_EMC 0x430
+#define CLK_CPU_SDIO 0x438
+#define CLK_CPU_DMA 0x440
+#define CLK_CPU_CORE 0x448
+#define CLK_CPU_SCT 0x468
+#define CLK_CPU_USB1 0x470
+#define CLK_CPU_EMCDIV 0x478
+#define CLK_CPU_FLASHA 0x480
+#define CLK_CPU_FLASHB 0x488
+#define CLK_CPU_M0APP 0x490
+#define CLK_CPU_ADCHS 0x498
+#define CLK_CPU_EEPROM 0x4a0
+#define CLK_CPU_WWDT 0x500
+#define CLK_CPU_UART0 0x508
+#define CLK_CPU_UART1 0x510
+#define CLK_CPU_SSP0 0x518
+#define CLK_CPU_TIMER0 0x520
+#define CLK_CPU_TIMER1 0x528
+#define CLK_CPU_SCU 0x530
+#define CLK_CPU_CREG 0x538
+#define CLK_CPU_RITIMER 0x600
+#define CLK_CPU_UART2 0x608
+#define CLK_CPU_UART3 0x610
+#define CLK_CPU_TIMER2 0x618
+#define CLK_CPU_TIMER3 0x620
+#define CLK_CPU_SSP1 0x628
+#define CLK_CPU_QEI 0x630
+#define CLK_PERIPH_BUS 0x700
+#define CLK_PERIPH_CORE 0x710
+#define CLK_PERIPH_SGPIO 0x718
+#define CLK_USB0 0x800
+#define CLK_USB1 0x900
+#define CLK_SPI 0xA00
+#define CLK_ADCHS 0xB00
+
+/* Clock Control Unit 2 (CCU2) clock offsets */
+#define CLK_AUDIO 0x100
+#define CLK_APB2_UART3 0x200
+#define CLK_APB2_UART2 0x300
+#define CLK_APB0_UART1 0x400
+#define CLK_APB0_UART0 0x500
+#define CLK_APB2_SSP1 0x600
+#define CLK_APB0_SSP0 0x700
+#define CLK_SDIO 0x800
diff --git a/include/dt-bindings/clock/lpc18xx-cgu.h b/include/dt-bindings/clock/lpc18xx-cgu.h
new file mode 100644
index 0000000..6e57c6d
--- /dev/null
+++ b/include/dt-bindings/clock/lpc18xx-cgu.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This code is released using a dual license strategy: BSD/GPL
+ * You can choose the licence that better fits your requirements.
+ *
+ * Released under the terms of 3-clause BSD License
+ * Released under the terms of GNU General Public License Version 2.0
+ *
+ */
+
+/* LPC18xx/43xx base clock ids */
+#define BASE_SAFE_CLK 0
+#define BASE_USB0_CLK 1
+#define BASE_PERIPH_CLK 2
+#define BASE_USB1_CLK 3
+#define BASE_CPU_CLK 4
+#define BASE_SPIFI_CLK 5
+#define BASE_SPI_CLK 6
+#define BASE_PHY_RX_CLK 7
+#define BASE_PHY_TX_CLK 8
+#define BASE_APB1_CLK 9
+#define BASE_APB3_CLK 10
+#define BASE_LCD_CLK 11
+#define BASE_ADCHS_CLK 12
+#define BASE_SDIO_CLK 13
+#define BASE_SSP0_CLK 14
+#define BASE_SSP1_CLK 15
+#define BASE_UART0_CLK 16
+#define BASE_UART1_CLK 17
+#define BASE_UART2_CLK 18
+#define BASE_UART3_CLK 19
+#define BASE_OUT_CLK 20
+#define BASE_RES1_CLK 21
+#define BASE_RES2_CLK 22
+#define BASE_RES3_CLK 23
+#define BASE_RES4_CLK 24
+#define BASE_AUDIO_CLK 25
+#define BASE_CGU_OUT0_CLK 26
+#define BASE_CGU_OUT1_CLK 27
+#define BASE_CLK_MAX (BASE_CGU_OUT1_CLK + 1)
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h
index 591f7fb..7a51038 100644
--- a/include/dt-bindings/clock/marvell,mmp2.h
+++ b/include/dt-bindings/clock/marvell,mmp2.h
@@ -48,6 +48,7 @@
#define MMP2_CLK_SSP1 78
#define MMP2_CLK_SSP2 79
#define MMP2_CLK_SSP3 80
+#define MMP2_CLK_TIMER 81
/* axi periphrals */
#define MMP2_CLK_SDH0 101
diff --git a/include/dt-bindings/clock/marvell,pxa168.h b/include/dt-bindings/clock/marvell,pxa168.h
index 79630b9..3e45bdf 100644
--- a/include/dt-bindings/clock/marvell,pxa168.h
+++ b/include/dt-bindings/clock/marvell,pxa168.h
@@ -18,7 +18,9 @@
#define PXA168_CLK_PLL1_13_1_5 18
#define PXA168_CLK_PLL1_2_1_5 19
#define PXA168_CLK_PLL1_3_16 20
+#define PXA168_CLK_PLL1_192 21
#define PXA168_CLK_UART_PLL 27
+#define PXA168_CLK_USB_PLL 28
/* apb periphrals */
#define PXA168_CLK_TWSI0 60
@@ -40,6 +42,7 @@
#define PXA168_CLK_SSP2 76
#define PXA168_CLK_SSP3 77
#define PXA168_CLK_SSP4 78
+#define PXA168_CLK_TIMER 79
/* axi periphrals */
#define PXA168_CLK_DFC 100
diff --git a/include/dt-bindings/clock/marvell,pxa1928.h b/include/dt-bindings/clock/marvell,pxa1928.h
new file mode 100644
index 0000000..d4f2e18
--- /dev/null
+++ b/include/dt-bindings/clock/marvell,pxa1928.h
@@ -0,0 +1,57 @@
+#ifndef __DTS_MARVELL_PXA1928_CLOCK_H
+#define __DTS_MARVELL_PXA1928_CLOCK_H
+
+/*
+ * Clock ID values here correspond to the control register offset/4.
+ */
+
+/* apb peripherals */
+#define PXA1928_CLK_RTC 0x00
+#define PXA1928_CLK_TWSI0 0x01
+#define PXA1928_CLK_TWSI1 0x02
+#define PXA1928_CLK_TWSI2 0x03
+#define PXA1928_CLK_TWSI3 0x04
+#define PXA1928_CLK_OWIRE 0x05
+#define PXA1928_CLK_KPC 0x06
+#define PXA1928_CLK_TB_ROTARY 0x07
+#define PXA1928_CLK_SW_JTAG 0x08
+#define PXA1928_CLK_TIMER1 0x09
+#define PXA1928_CLK_UART0 0x0b
+#define PXA1928_CLK_UART1 0x0c
+#define PXA1928_CLK_UART2 0x0d
+#define PXA1928_CLK_GPIO 0x0e
+#define PXA1928_CLK_PWM0 0x0f
+#define PXA1928_CLK_PWM1 0x10
+#define PXA1928_CLK_PWM2 0x11
+#define PXA1928_CLK_PWM3 0x12
+#define PXA1928_CLK_SSP0 0x13
+#define PXA1928_CLK_SSP1 0x14
+#define PXA1928_CLK_SSP2 0x15
+
+#define PXA1928_CLK_TWSI4 0x1f
+#define PXA1928_CLK_TWSI5 0x20
+#define PXA1928_CLK_UART3 0x22
+#define PXA1928_CLK_THSENS_GLOB 0x24
+#define PXA1928_CLK_THSENS_CPU 0x26
+#define PXA1928_CLK_THSENS_VPU 0x27
+#define PXA1928_CLK_THSENS_GC 0x28
+#define PXA1928_APBC_NR_CLKS 0x30
+
+
+/* axi peripherals */
+#define PXA1928_CLK_SDH0 0x15
+#define PXA1928_CLK_SDH1 0x16
+#define PXA1928_CLK_USB 0x17
+#define PXA1928_CLK_NAND 0x18
+#define PXA1928_CLK_DMA 0x19
+
+#define PXA1928_CLK_SDH2 0x3a
+#define PXA1928_CLK_SDH3 0x3b
+#define PXA1928_CLK_HSIC 0x3e
+#define PXA1928_CLK_SDH4 0x57
+#define PXA1928_CLK_GC3D 0x5d
+#define PXA1928_CLK_GC2D 0x5f
+
+#define PXA1928_APMU_NR_CLKS 0x60
+
+#endif
diff --git a/include/dt-bindings/clock/marvell,pxa910.h b/include/dt-bindings/clock/marvell,pxa910.h
index 719cffb..135082a 100644
--- a/include/dt-bindings/clock/marvell,pxa910.h
+++ b/include/dt-bindings/clock/marvell,pxa910.h
@@ -18,7 +18,9 @@
#define PXA910_CLK_PLL1_13_1_5 18
#define PXA910_CLK_PLL1_2_1_5 19
#define PXA910_CLK_PLL1_3_16 20
+#define PXA910_CLK_PLL1_192 21
#define PXA910_CLK_UART_PLL 27
+#define PXA910_CLK_USB_PLL 28
/* apb periphrals */
#define PXA910_CLK_TWSI0 60
@@ -37,6 +39,8 @@
#define PXA910_CLK_UART2 73
#define PXA910_CLK_SSP0 74
#define PXA910_CLK_SSP1 75
+#define PXA910_CLK_TIMER0 76
+#define PXA910_CLK_TIMER1 77
/* axi periphrals */
#define PXA910_CLK_DFC 100
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
new file mode 100644
index 0000000..bd2720d
--- /dev/null
+++ b/include/dt-bindings/clock/meson8b-clkc.h
@@ -0,0 +1,25 @@
+/*
+ * Meson8b clock tree IDs
+ */
+
+#ifndef __MESON8B_CLKC_H
+#define __MESON8B_CLKC_H
+
+#define CLKID_UNUSED 0
+#define CLKID_XTAL 1
+#define CLKID_PLL_FIXED 2
+#define CLKID_PLL_VID 3
+#define CLKID_PLL_SYS 4
+#define CLKID_FCLK_DIV2 5
+#define CLKID_FCLK_DIV3 6
+#define CLKID_FCLK_DIV4 7
+#define CLKID_FCLK_DIV5 8
+#define CLKID_FCLK_DIV7 9
+#define CLKID_CLK81 10
+#define CLKID_MALI 11
+#define CLKID_CPUCLK 12
+#define CLKID_ZERO 13
+
+#define CLK_NR_CLKS (CLKID_ZERO + 1)
+
+#endif /* __MESON8B_CLKC_H */
diff --git a/include/dt-bindings/clock/mt8135-clk.h b/include/dt-bindings/clock/mt8135-clk.h
new file mode 100644
index 0000000..6dac6c0
--- /dev/null
+++ b/include/dt-bindings/clock/mt8135-clk.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8135_H
+#define _DT_BINDINGS_CLK_MT8135_H
+
+/* TOPCKGEN */
+
+#define CLK_TOP_DSI0_LNTC_DSICLK 1
+#define CLK_TOP_HDMITX_CLKDIG_CTS 2
+#define CLK_TOP_CLKPH_MCK 3
+#define CLK_TOP_CPUM_TCK_IN 4
+#define CLK_TOP_MAINPLL_806M 5
+#define CLK_TOP_MAINPLL_537P3M 6
+#define CLK_TOP_MAINPLL_322P4M 7
+#define CLK_TOP_MAINPLL_230P3M 8
+#define CLK_TOP_UNIVPLL_624M 9
+#define CLK_TOP_UNIVPLL_416M 10
+#define CLK_TOP_UNIVPLL_249P6M 11
+#define CLK_TOP_UNIVPLL_178P3M 12
+#define CLK_TOP_UNIVPLL_48M 13
+#define CLK_TOP_MMPLL_D2 14
+#define CLK_TOP_MMPLL_D3 15
+#define CLK_TOP_MMPLL_D5 16
+#define CLK_TOP_MMPLL_D7 17
+#define CLK_TOP_MMPLL_D4 18
+#define CLK_TOP_MMPLL_D6 19
+#define CLK_TOP_SYSPLL_D2 20
+#define CLK_TOP_SYSPLL_D4 21
+#define CLK_TOP_SYSPLL_D6 22
+#define CLK_TOP_SYSPLL_D8 23
+#define CLK_TOP_SYSPLL_D10 24
+#define CLK_TOP_SYSPLL_D12 25
+#define CLK_TOP_SYSPLL_D16 26
+#define CLK_TOP_SYSPLL_D24 27
+#define CLK_TOP_SYSPLL_D3 28
+#define CLK_TOP_SYSPLL_D2P5 29
+#define CLK_TOP_SYSPLL_D5 30
+#define CLK_TOP_SYSPLL_D3P5 31
+#define CLK_TOP_UNIVPLL1_D2 32
+#define CLK_TOP_UNIVPLL1_D4 33
+#define CLK_TOP_UNIVPLL1_D6 34
+#define CLK_TOP_UNIVPLL1_D8 35
+#define CLK_TOP_UNIVPLL1_D10 36
+#define CLK_TOP_UNIVPLL2_D2 37
+#define CLK_TOP_UNIVPLL2_D4 38
+#define CLK_TOP_UNIVPLL2_D6 39
+#define CLK_TOP_UNIVPLL2_D8 40
+#define CLK_TOP_UNIVPLL_D3 41
+#define CLK_TOP_UNIVPLL_D5 42
+#define CLK_TOP_UNIVPLL_D7 43
+#define CLK_TOP_UNIVPLL_D10 44
+#define CLK_TOP_UNIVPLL_D26 45
+#define CLK_TOP_APLL 46
+#define CLK_TOP_APLL_D4 47
+#define CLK_TOP_APLL_D8 48
+#define CLK_TOP_APLL_D16 49
+#define CLK_TOP_APLL_D24 50
+#define CLK_TOP_LVDSPLL_D2 51
+#define CLK_TOP_LVDSPLL_D4 52
+#define CLK_TOP_LVDSPLL_D8 53
+#define CLK_TOP_LVDSTX_CLKDIG_CT 54
+#define CLK_TOP_VPLL_DPIX 55
+#define CLK_TOP_TVHDMI_H 56
+#define CLK_TOP_HDMITX_CLKDIG_D2 57
+#define CLK_TOP_HDMITX_CLKDIG_D3 58
+#define CLK_TOP_TVHDMI_D2 59
+#define CLK_TOP_TVHDMI_D4 60
+#define CLK_TOP_MEMPLL_MCK_D4 61
+#define CLK_TOP_AXI_SEL 62
+#define CLK_TOP_SMI_SEL 63
+#define CLK_TOP_MFG_SEL 64
+#define CLK_TOP_IRDA_SEL 65
+#define CLK_TOP_CAM_SEL 66
+#define CLK_TOP_AUD_INTBUS_SEL 67
+#define CLK_TOP_JPG_SEL 68
+#define CLK_TOP_DISP_SEL 69
+#define CLK_TOP_MSDC30_1_SEL 70
+#define CLK_TOP_MSDC30_2_SEL 71
+#define CLK_TOP_MSDC30_3_SEL 72
+#define CLK_TOP_MSDC30_4_SEL 73
+#define CLK_TOP_USB20_SEL 74
+#define CLK_TOP_VENC_SEL 75
+#define CLK_TOP_SPI_SEL 76
+#define CLK_TOP_UART_SEL 77
+#define CLK_TOP_MEM_SEL 78
+#define CLK_TOP_CAMTG_SEL 79
+#define CLK_TOP_AUDIO_SEL 80
+#define CLK_TOP_FIX_SEL 81
+#define CLK_TOP_VDEC_SEL 82
+#define CLK_TOP_DDRPHYCFG_SEL 83
+#define CLK_TOP_DPILVDS_SEL 84
+#define CLK_TOP_PMICSPI_SEL 85
+#define CLK_TOP_MSDC30_0_SEL 86
+#define CLK_TOP_SMI_MFG_AS_SEL 87
+#define CLK_TOP_GCPU_SEL 88
+#define CLK_TOP_DPI1_SEL 89
+#define CLK_TOP_CCI_SEL 90
+#define CLK_TOP_APLL_SEL 91
+#define CLK_TOP_HDMIPLL_SEL 92
+#define CLK_TOP_NR_CLK 93
+
+/* APMIXED_SYS */
+
+#define CLK_APMIXED_ARMPLL1 1
+#define CLK_APMIXED_ARMPLL2 2
+#define CLK_APMIXED_MAINPLL 3
+#define CLK_APMIXED_UNIVPLL 4
+#define CLK_APMIXED_MMPLL 5
+#define CLK_APMIXED_MSDCPLL 6
+#define CLK_APMIXED_TVDPLL 7
+#define CLK_APMIXED_LVDSPLL 8
+#define CLK_APMIXED_AUDPLL 9
+#define CLK_APMIXED_VDECPLL 10
+#define CLK_APMIXED_NR_CLK 11
+
+/* INFRA_SYS */
+
+#define CLK_INFRA_PMIC_WRAP 1
+#define CLK_INFRA_PMICSPI 2
+#define CLK_INFRA_CCIF1_AP_CTRL 3
+#define CLK_INFRA_CCIF0_AP_CTRL 4
+#define CLK_INFRA_KP 5
+#define CLK_INFRA_CPUM 6
+#define CLK_INFRA_M4U 7
+#define CLK_INFRA_MFGAXI 8
+#define CLK_INFRA_DEVAPC 9
+#define CLK_INFRA_AUDIO 10
+#define CLK_INFRA_MFG_BUS 11
+#define CLK_INFRA_SMI 12
+#define CLK_INFRA_DBGCLK 13
+#define CLK_INFRA_NR_CLK 14
+
+/* PERI_SYS */
+
+#define CLK_PERI_I2C5 1
+#define CLK_PERI_I2C4 2
+#define CLK_PERI_I2C3 3
+#define CLK_PERI_I2C2 4
+#define CLK_PERI_I2C1 5
+#define CLK_PERI_I2C0 6
+#define CLK_PERI_UART3 7
+#define CLK_PERI_UART2 8
+#define CLK_PERI_UART1 9
+#define CLK_PERI_UART0 10
+#define CLK_PERI_IRDA 11
+#define CLK_PERI_NLI 12
+#define CLK_PERI_MD_HIF 13
+#define CLK_PERI_AP_HIF 14
+#define CLK_PERI_MSDC30_3 15
+#define CLK_PERI_MSDC30_2 16
+#define CLK_PERI_MSDC30_1 17
+#define CLK_PERI_MSDC20_2 18
+#define CLK_PERI_MSDC20_1 19
+#define CLK_PERI_AP_DMA 20
+#define CLK_PERI_USB1 21
+#define CLK_PERI_USB0 22
+#define CLK_PERI_PWM 23
+#define CLK_PERI_PWM7 24
+#define CLK_PERI_PWM6 25
+#define CLK_PERI_PWM5 26
+#define CLK_PERI_PWM4 27
+#define CLK_PERI_PWM3 28
+#define CLK_PERI_PWM2 29
+#define CLK_PERI_PWM1 30
+#define CLK_PERI_THERM 31
+#define CLK_PERI_NFI 32
+#define CLK_PERI_USBSLV 33
+#define CLK_PERI_USB1_MCU 34
+#define CLK_PERI_USB0_MCU 35
+#define CLK_PERI_GCPU 36
+#define CLK_PERI_FHCTL 37
+#define CLK_PERI_SPI1 38
+#define CLK_PERI_AUXADC 39
+#define CLK_PERI_PERI_PWRAP 40
+#define CLK_PERI_I2C6 41
+#define CLK_PERI_UART0_SEL 42
+#define CLK_PERI_UART1_SEL 43
+#define CLK_PERI_UART2_SEL 44
+#define CLK_PERI_UART3_SEL 45
+#define CLK_PERI_NR_CLK 46
+
+#endif /* _DT_BINDINGS_CLK_MT8135_H */
diff --git a/include/dt-bindings/clock/mt8173-clk.h b/include/dt-bindings/clock/mt8173-clk.h
new file mode 100644
index 0000000..4ad76ed
--- /dev/null
+++ b/include/dt-bindings/clock/mt8173-clk.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8173_H
+#define _DT_BINDINGS_CLK_MT8173_H
+
+/* TOPCKGEN */
+
+#define CLK_TOP_CLKPH_MCK_O 1
+#define CLK_TOP_DPI 2
+#define CLK_TOP_USB_SYSPLL_125M 3
+#define CLK_TOP_HDMITX_DIG_CTS 4
+#define CLK_TOP_ARMCA7PLL_754M 5
+#define CLK_TOP_ARMCA7PLL_502M 6
+#define CLK_TOP_MAIN_H546M 7
+#define CLK_TOP_MAIN_H364M 8
+#define CLK_TOP_MAIN_H218P4M 9
+#define CLK_TOP_MAIN_H156M 10
+#define CLK_TOP_TVDPLL_445P5M 11
+#define CLK_TOP_TVDPLL_594M 12
+#define CLK_TOP_UNIV_624M 13
+#define CLK_TOP_UNIV_416M 14
+#define CLK_TOP_UNIV_249P6M 15
+#define CLK_TOP_UNIV_178P3M 16
+#define CLK_TOP_UNIV_48M 17
+#define CLK_TOP_CLKRTC_EXT 18
+#define CLK_TOP_CLKRTC_INT 19
+#define CLK_TOP_FPC 20
+#define CLK_TOP_HDMITXPLL_D2 21
+#define CLK_TOP_HDMITXPLL_D3 22
+#define CLK_TOP_ARMCA7PLL_D2 23
+#define CLK_TOP_ARMCA7PLL_D3 24
+#define CLK_TOP_APLL1 25
+#define CLK_TOP_APLL2 26
+#define CLK_TOP_DMPLL 27
+#define CLK_TOP_DMPLL_D2 28
+#define CLK_TOP_DMPLL_D4 29
+#define CLK_TOP_DMPLL_D8 30
+#define CLK_TOP_DMPLL_D16 31
+#define CLK_TOP_LVDSPLL_D2 32
+#define CLK_TOP_LVDSPLL_D4 33
+#define CLK_TOP_LVDSPLL_D8 34
+#define CLK_TOP_MMPLL 35
+#define CLK_TOP_MMPLL_D2 36
+#define CLK_TOP_MSDCPLL 37
+#define CLK_TOP_MSDCPLL_D2 38
+#define CLK_TOP_MSDCPLL_D4 39
+#define CLK_TOP_MSDCPLL2 40
+#define CLK_TOP_MSDCPLL2_D2 41
+#define CLK_TOP_MSDCPLL2_D4 42
+#define CLK_TOP_SYSPLL_D2 43
+#define CLK_TOP_SYSPLL1_D2 44
+#define CLK_TOP_SYSPLL1_D4 45
+#define CLK_TOP_SYSPLL1_D8 46
+#define CLK_TOP_SYSPLL1_D16 47
+#define CLK_TOP_SYSPLL_D3 48
+#define CLK_TOP_SYSPLL2_D2 49
+#define CLK_TOP_SYSPLL2_D4 50
+#define CLK_TOP_SYSPLL_D5 51
+#define CLK_TOP_SYSPLL3_D2 52
+#define CLK_TOP_SYSPLL3_D4 53
+#define CLK_TOP_SYSPLL_D7 54
+#define CLK_TOP_SYSPLL4_D2 55
+#define CLK_TOP_SYSPLL4_D4 56
+#define CLK_TOP_TVDPLL 57
+#define CLK_TOP_TVDPLL_D2 58
+#define CLK_TOP_TVDPLL_D4 59
+#define CLK_TOP_TVDPLL_D8 60
+#define CLK_TOP_TVDPLL_D16 61
+#define CLK_TOP_UNIVPLL_D2 62
+#define CLK_TOP_UNIVPLL1_D2 63
+#define CLK_TOP_UNIVPLL1_D4 64
+#define CLK_TOP_UNIVPLL1_D8 65
+#define CLK_TOP_UNIVPLL_D3 66
+#define CLK_TOP_UNIVPLL2_D2 67
+#define CLK_TOP_UNIVPLL2_D4 68
+#define CLK_TOP_UNIVPLL2_D8 69
+#define CLK_TOP_UNIVPLL_D5 70
+#define CLK_TOP_UNIVPLL3_D2 71
+#define CLK_TOP_UNIVPLL3_D4 72
+#define CLK_TOP_UNIVPLL3_D8 73
+#define CLK_TOP_UNIVPLL_D7 74
+#define CLK_TOP_UNIVPLL_D26 75
+#define CLK_TOP_UNIVPLL_D52 76
+#define CLK_TOP_VCODECPLL 77
+#define CLK_TOP_VCODECPLL_370P5 78
+#define CLK_TOP_VENCPLL 79
+#define CLK_TOP_VENCPLL_D2 80
+#define CLK_TOP_VENCPLL_D4 81
+#define CLK_TOP_AXI_SEL 82
+#define CLK_TOP_MEM_SEL 83
+#define CLK_TOP_DDRPHYCFG_SEL 84
+#define CLK_TOP_MM_SEL 85
+#define CLK_TOP_PWM_SEL 86
+#define CLK_TOP_VDEC_SEL 87
+#define CLK_TOP_VENC_SEL 88
+#define CLK_TOP_MFG_SEL 89
+#define CLK_TOP_CAMTG_SEL 90
+#define CLK_TOP_UART_SEL 91
+#define CLK_TOP_SPI_SEL 92
+#define CLK_TOP_USB20_SEL 93
+#define CLK_TOP_USB30_SEL 94
+#define CLK_TOP_MSDC50_0_H_SEL 95
+#define CLK_TOP_MSDC50_0_SEL 96
+#define CLK_TOP_MSDC30_1_SEL 97
+#define CLK_TOP_MSDC30_2_SEL 98
+#define CLK_TOP_MSDC30_3_SEL 99
+#define CLK_TOP_AUDIO_SEL 100
+#define CLK_TOP_AUD_INTBUS_SEL 101
+#define CLK_TOP_PMICSPI_SEL 102
+#define CLK_TOP_SCP_SEL 103
+#define CLK_TOP_ATB_SEL 104
+#define CLK_TOP_VENC_LT_SEL 105
+#define CLK_TOP_DPI0_SEL 106
+#define CLK_TOP_IRDA_SEL 107
+#define CLK_TOP_CCI400_SEL 108
+#define CLK_TOP_AUD_1_SEL 109
+#define CLK_TOP_AUD_2_SEL 110
+#define CLK_TOP_MEM_MFG_IN_SEL 111
+#define CLK_TOP_AXI_MFG_IN_SEL 112
+#define CLK_TOP_SCAM_SEL 113
+#define CLK_TOP_SPINFI_IFR_SEL 114
+#define CLK_TOP_HDMI_SEL 115
+#define CLK_TOP_DPILVDS_SEL 116
+#define CLK_TOP_MSDC50_2_H_SEL 117
+#define CLK_TOP_HDCP_SEL 118
+#define CLK_TOP_HDCP_24M_SEL 119
+#define CLK_TOP_RTC_SEL 120
+#define CLK_TOP_APLL1_DIV0 121
+#define CLK_TOP_APLL1_DIV1 122
+#define CLK_TOP_APLL1_DIV2 123
+#define CLK_TOP_APLL1_DIV3 124
+#define CLK_TOP_APLL1_DIV4 125
+#define CLK_TOP_APLL1_DIV5 126
+#define CLK_TOP_APLL2_DIV0 127
+#define CLK_TOP_APLL2_DIV1 128
+#define CLK_TOP_APLL2_DIV2 129
+#define CLK_TOP_APLL2_DIV3 130
+#define CLK_TOP_APLL2_DIV4 131
+#define CLK_TOP_APLL2_DIV5 132
+#define CLK_TOP_I2S0_M_SEL 133
+#define CLK_TOP_I2S1_M_SEL 134
+#define CLK_TOP_I2S2_M_SEL 135
+#define CLK_TOP_I2S3_M_SEL 136
+#define CLK_TOP_I2S3_B_SEL 137
+#define CLK_TOP_NR_CLK 138
+
+/* APMIXED_SYS */
+
+#define CLK_APMIXED_ARMCA15PLL 1
+#define CLK_APMIXED_ARMCA7PLL 2
+#define CLK_APMIXED_MAINPLL 3
+#define CLK_APMIXED_UNIVPLL 4
+#define CLK_APMIXED_MMPLL 5
+#define CLK_APMIXED_MSDCPLL 6
+#define CLK_APMIXED_VENCPLL 7
+#define CLK_APMIXED_TVDPLL 8
+#define CLK_APMIXED_MPLL 9
+#define CLK_APMIXED_VCODECPLL 10
+#define CLK_APMIXED_APLL1 11
+#define CLK_APMIXED_APLL2 12
+#define CLK_APMIXED_LVDSPLL 13
+#define CLK_APMIXED_MSDCPLL2 14
+#define CLK_APMIXED_NR_CLK 15
+
+/* INFRA_SYS */
+
+#define CLK_INFRA_DBGCLK 1
+#define CLK_INFRA_SMI 2
+#define CLK_INFRA_AUDIO 3
+#define CLK_INFRA_GCE 4
+#define CLK_INFRA_L2C_SRAM 5
+#define CLK_INFRA_M4U 6
+#define CLK_INFRA_CPUM 7
+#define CLK_INFRA_KP 8
+#define CLK_INFRA_CEC 9
+#define CLK_INFRA_PMICSPI 10
+#define CLK_INFRA_PMICWRAP 11
+#define CLK_INFRA_NR_CLK 12
+
+/* PERI_SYS */
+
+#define CLK_PERI_NFI 1
+#define CLK_PERI_THERM 2
+#define CLK_PERI_PWM1 3
+#define CLK_PERI_PWM2 4
+#define CLK_PERI_PWM3 5
+#define CLK_PERI_PWM4 6
+#define CLK_PERI_PWM5 7
+#define CLK_PERI_PWM6 8
+#define CLK_PERI_PWM7 9
+#define CLK_PERI_PWM 10
+#define CLK_PERI_USB0 11
+#define CLK_PERI_USB1 12
+#define CLK_PERI_AP_DMA 13
+#define CLK_PERI_MSDC30_0 14
+#define CLK_PERI_MSDC30_1 15
+#define CLK_PERI_MSDC30_2 16
+#define CLK_PERI_MSDC30_3 17
+#define CLK_PERI_NLI_ARB 18
+#define CLK_PERI_IRDA 19
+#define CLK_PERI_UART0 20
+#define CLK_PERI_UART1 21
+#define CLK_PERI_UART2 22
+#define CLK_PERI_UART3 23
+#define CLK_PERI_I2C0 24
+#define CLK_PERI_I2C1 25
+#define CLK_PERI_I2C2 26
+#define CLK_PERI_I2C3 27
+#define CLK_PERI_I2C4 28
+#define CLK_PERI_AUXADC 29
+#define CLK_PERI_SPI0 30
+#define CLK_PERI_I2C5 31
+#define CLK_PERI_NFIECC 32
+#define CLK_PERI_SPI 33
+#define CLK_PERI_IRRX 34
+#define CLK_PERI_I2C6 35
+#define CLK_PERI_UART0_SEL 36
+#define CLK_PERI_UART1_SEL 37
+#define CLK_PERI_UART2_SEL 38
+#define CLK_PERI_UART3_SEL 39
+#define CLK_PERI_NR_CLK 40
+
+#endif /* _DT_BINDINGS_CLK_MT8173_H */
diff --git a/include/dt-bindings/clock/pistachio-clk.h b/include/dt-bindings/clock/pistachio-clk.h
new file mode 100644
index 0000000..039f83f
--- /dev/null
+++ b/include/dt-bindings/clock/pistachio-clk.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_PISTACHIO_H
+#define _DT_BINDINGS_CLOCK_PISTACHIO_H
+
+/* PLLs */
+#define CLK_MIPS_PLL 0
+#define CLK_AUDIO_PLL 1
+#define CLK_RPU_V_PLL 2
+#define CLK_RPU_L_PLL 3
+#define CLK_SYS_PLL 4
+#define CLK_WIFI_PLL 5
+#define CLK_BT_PLL 6
+
+/* Fixed-factor clocks */
+#define CLK_WIFI_DIV4 16
+#define CLK_WIFI_DIV8 17
+
+/* Gate clocks */
+#define CLK_MIPS 32
+#define CLK_AUDIO_IN 33
+#define CLK_AUDIO 34
+#define CLK_I2S 35
+#define CLK_SPDIF 36
+#define CLK_AUDIO_DAC 37
+#define CLK_RPU_V 38
+#define CLK_RPU_L 39
+#define CLK_RPU_SLEEP 40
+#define CLK_WIFI_PLL_GATE 41
+#define CLK_RPU_CORE 42
+#define CLK_WIFI_ADC 43
+#define CLK_WIFI_DAC 44
+#define CLK_USB_PHY 45
+#define CLK_ENET_IN 46
+#define CLK_ENET 47
+#define CLK_UART0 48
+#define CLK_UART1 49
+#define CLK_PERIPH_SYS 50
+#define CLK_SPI0 51
+#define CLK_SPI1 52
+#define CLK_EVENT_TIMER 53
+#define CLK_AUX_ADC_INTERNAL 54
+#define CLK_AUX_ADC 55
+#define CLK_SD_HOST 56
+#define CLK_BT 57
+#define CLK_BT_DIV4 58
+#define CLK_BT_DIV8 59
+#define CLK_BT_1MHZ 60
+
+/* Divider clocks */
+#define CLK_MIPS_INTERNAL_DIV 64
+#define CLK_MIPS_DIV 65
+#define CLK_AUDIO_DIV 66
+#define CLK_I2S_DIV 67
+#define CLK_SPDIF_DIV 68
+#define CLK_AUDIO_DAC_DIV 69
+#define CLK_RPU_V_DIV 70
+#define CLK_RPU_L_DIV 71
+#define CLK_RPU_SLEEP_DIV 72
+#define CLK_RPU_CORE_DIV 73
+#define CLK_USB_PHY_DIV 74
+#define CLK_ENET_DIV 75
+#define CLK_UART0_INTERNAL_DIV 76
+#define CLK_UART0_DIV 77
+#define CLK_UART1_INTERNAL_DIV 78
+#define CLK_UART1_DIV 79
+#define CLK_SYS_INTERNAL_DIV 80
+#define CLK_SPI0_INTERNAL_DIV 81
+#define CLK_SPI0_DIV 82
+#define CLK_SPI1_INTERNAL_DIV 83
+#define CLK_SPI1_DIV 84
+#define CLK_EVENT_TIMER_INTERNAL_DIV 85
+#define CLK_EVENT_TIMER_DIV 86
+#define CLK_AUX_ADC_INTERNAL_DIV 87
+#define CLK_AUX_ADC_DIV 88
+#define CLK_SD_HOST_DIV 89
+#define CLK_BT_DIV 90
+#define CLK_BT_DIV4_DIV 91
+#define CLK_BT_DIV8_DIV 92
+#define CLK_BT_1MHZ_INTERNAL_DIV 93
+#define CLK_BT_1MHZ_DIV 94
+
+/* Mux clocks */
+#define CLK_AUDIO_REF_MUX 96
+#define CLK_MIPS_PLL_MUX 97
+#define CLK_AUDIO_PLL_MUX 98
+#define CLK_AUDIO_MUX 99
+#define CLK_RPU_V_PLL_MUX 100
+#define CLK_RPU_L_PLL_MUX 101
+#define CLK_RPU_L_MUX 102
+#define CLK_WIFI_PLL_MUX 103
+#define CLK_WIFI_DIV4_MUX 104
+#define CLK_WIFI_DIV8_MUX 105
+#define CLK_RPU_CORE_MUX 106
+#define CLK_SYS_PLL_MUX 107
+#define CLK_ENET_MUX 108
+#define CLK_EVENT_TIMER_MUX 109
+#define CLK_SD_HOST_MUX 110
+#define CLK_BT_PLL_MUX 111
+#define CLK_DEBUG_MUX 112
+
+#define CLK_NR_CLKS 113
+
+/* Peripheral gate clocks */
+#define PERIPH_CLK_SYS 0
+#define PERIPH_CLK_SYS_BUS 1
+#define PERIPH_CLK_DDR 2
+#define PERIPH_CLK_ROM 3
+#define PERIPH_CLK_COUNTER_FAST 4
+#define PERIPH_CLK_COUNTER_SLOW 5
+#define PERIPH_CLK_IR 6
+#define PERIPH_CLK_WD 7
+#define PERIPH_CLK_PDM 8
+#define PERIPH_CLK_PWM 9
+#define PERIPH_CLK_I2C0 10
+#define PERIPH_CLK_I2C1 11
+#define PERIPH_CLK_I2C2 12
+#define PERIPH_CLK_I2C3 13
+
+/* Peripheral divider clocks */
+#define PERIPH_CLK_ROM_DIV 32
+#define PERIPH_CLK_COUNTER_FAST_DIV 33
+#define PERIPH_CLK_COUNTER_SLOW_PRE_DIV 34
+#define PERIPH_CLK_COUNTER_SLOW_DIV 35
+#define PERIPH_CLK_IR_PRE_DIV 36
+#define PERIPH_CLK_IR_DIV 37
+#define PERIPH_CLK_WD_PRE_DIV 38
+#define PERIPH_CLK_WD_DIV 39
+#define PERIPH_CLK_PDM_PRE_DIV 40
+#define PERIPH_CLK_PDM_DIV 41
+#define PERIPH_CLK_PWM_PRE_DIV 42
+#define PERIPH_CLK_PWM_DIV 43
+#define PERIPH_CLK_I2C0_PRE_DIV 44
+#define PERIPH_CLK_I2C0_DIV 45
+#define PERIPH_CLK_I2C1_PRE_DIV 46
+#define PERIPH_CLK_I2C1_DIV 47
+#define PERIPH_CLK_I2C2_PRE_DIV 48
+#define PERIPH_CLK_I2C2_DIV 49
+#define PERIPH_CLK_I2C3_PRE_DIV 50
+#define PERIPH_CLK_I2C3_DIV 51
+
+#define PERIPH_CLK_NR_CLKS 52
+
+/* System gate clocks */
+#define SYS_CLK_I2C0 0
+#define SYS_CLK_I2C1 1
+#define SYS_CLK_I2C2 2
+#define SYS_CLK_I2C3 3
+#define SYS_CLK_I2S_IN 4
+#define SYS_CLK_PAUD_OUT 5
+#define SYS_CLK_SPDIF_OUT 6
+#define SYS_CLK_SPI0_MASTER 7
+#define SYS_CLK_SPI0_SLAVE 8
+#define SYS_CLK_PWM 9
+#define SYS_CLK_UART0 10
+#define SYS_CLK_UART1 11
+#define SYS_CLK_SPI1 12
+#define SYS_CLK_MDC 13
+#define SYS_CLK_SD_HOST 14
+#define SYS_CLK_ENET 15
+#define SYS_CLK_IR 16
+#define SYS_CLK_WD 17
+#define SYS_CLK_TIMER 18
+#define SYS_CLK_I2S_OUT 24
+#define SYS_CLK_SPDIF_IN 25
+#define SYS_CLK_EVENT_TIMER 26
+#define SYS_CLK_HASH 27
+
+#define SYS_CLK_NR_CLKS 28
+
+/* Gates for external input clocks */
+#define EXT_CLK_AUDIO_IN 0
+#define EXT_CLK_ENET_IN 1
+
+#define EXT_CLK_NR_CLKS 2
+
+#endif /* _DT_BINDINGS_CLOCK_PISTACHIO_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq806x.h b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
index b857cad..dc4254b 100644
--- a/include/dt-bindings/clock/qcom,gcc-ipq806x.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
@@ -238,7 +238,6 @@
#define PLL0_VOTE 221
#define PLL3 222
#define PLL3_VOTE 223
-#define PLL4 224
#define PLL4_VOTE 225
#define PLL8 226
#define PLL8_VOTE 227
@@ -289,5 +288,8 @@
#define UBI32_CORE2_CLK_SRC 278
#define UBI32_CORE1_CLK 279
#define UBI32_CORE2_CLK 280
+#define EBI2_AON_CLK 281
+#define NSSTCM_CLK_SRC 282
+#define NSSTCM_CLK 283
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8916.h b/include/dt-bindings/clock/qcom,gcc-msm8916.h
new file mode 100644
index 0000000..e430f64
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-msm8916.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2015 Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_8916_H
+#define _DT_BINDINGS_CLK_MSM_GCC_8916_H
+
+#define GPLL0 0
+#define GPLL0_VOTE 1
+#define BIMC_PLL 2
+#define BIMC_PLL_VOTE 3
+#define GPLL1 4
+#define GPLL1_VOTE 5
+#define GPLL2 6
+#define GPLL2_VOTE 7
+#define PCNOC_BFDCD_CLK_SRC 8
+#define SYSTEM_NOC_BFDCD_CLK_SRC 9
+#define CAMSS_AHB_CLK_SRC 10
+#define APSS_AHB_CLK_SRC 11
+#define CSI0_CLK_SRC 12
+#define CSI1_CLK_SRC 13
+#define GFX3D_CLK_SRC 14
+#define VFE0_CLK_SRC 15
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 16
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 17
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 18
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 19
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 20
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 21
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 22
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 23
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC 24
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC 25
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC 26
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC 27
+#define BLSP1_UART1_APPS_CLK_SRC 28
+#define BLSP1_UART2_APPS_CLK_SRC 29
+#define CCI_CLK_SRC 30
+#define CAMSS_GP0_CLK_SRC 31
+#define CAMSS_GP1_CLK_SRC 32
+#define JPEG0_CLK_SRC 33
+#define MCLK0_CLK_SRC 34
+#define MCLK1_CLK_SRC 35
+#define CSI0PHYTIMER_CLK_SRC 36
+#define CSI1PHYTIMER_CLK_SRC 37
+#define CPP_CLK_SRC 38
+#define CRYPTO_CLK_SRC 39
+#define GP1_CLK_SRC 40
+#define GP2_CLK_SRC 41
+#define GP3_CLK_SRC 42
+#define BYTE0_CLK_SRC 43
+#define ESC0_CLK_SRC 44
+#define MDP_CLK_SRC 45
+#define PCLK0_CLK_SRC 46
+#define VSYNC_CLK_SRC 47
+#define PDM2_CLK_SRC 48
+#define SDCC1_APPS_CLK_SRC 49
+#define SDCC2_APPS_CLK_SRC 50
+#define APSS_TCU_CLK_SRC 51
+#define USB_HS_SYSTEM_CLK_SRC 52
+#define VCODEC0_CLK_SRC 53
+#define GCC_BLSP1_AHB_CLK 54
+#define GCC_BLSP1_SLEEP_CLK 55
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 56
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 57
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 58
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 59
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 60
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 61
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 62
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 63
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK 64
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK 65
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK 66
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK 67
+#define GCC_BLSP1_UART1_APPS_CLK 68
+#define GCC_BLSP1_UART2_APPS_CLK 69
+#define GCC_BOOT_ROM_AHB_CLK 70
+#define GCC_CAMSS_CCI_AHB_CLK 71
+#define GCC_CAMSS_CCI_CLK 72
+#define GCC_CAMSS_CSI0_AHB_CLK 73
+#define GCC_CAMSS_CSI0_CLK 74
+#define GCC_CAMSS_CSI0PHY_CLK 75
+#define GCC_CAMSS_CSI0PIX_CLK 76
+#define GCC_CAMSS_CSI0RDI_CLK 77
+#define GCC_CAMSS_CSI1_AHB_CLK 78
+#define GCC_CAMSS_CSI1_CLK 79
+#define GCC_CAMSS_CSI1PHY_CLK 80
+#define GCC_CAMSS_CSI1PIX_CLK 81
+#define GCC_CAMSS_CSI1RDI_CLK 82
+#define GCC_CAMSS_CSI_VFE0_CLK 83
+#define GCC_CAMSS_GP0_CLK 84
+#define GCC_CAMSS_GP1_CLK 85
+#define GCC_CAMSS_ISPIF_AHB_CLK 86
+#define GCC_CAMSS_JPEG0_CLK 87
+#define GCC_CAMSS_JPEG_AHB_CLK 88
+#define GCC_CAMSS_JPEG_AXI_CLK 89
+#define GCC_CAMSS_MCLK0_CLK 90
+#define GCC_CAMSS_MCLK1_CLK 91
+#define GCC_CAMSS_MICRO_AHB_CLK 92
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 93
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 94
+#define GCC_CAMSS_AHB_CLK 95
+#define GCC_CAMSS_TOP_AHB_CLK 96
+#define GCC_CAMSS_CPP_AHB_CLK 97
+#define GCC_CAMSS_CPP_CLK 98
+#define GCC_CAMSS_VFE0_CLK 99
+#define GCC_CAMSS_VFE_AHB_CLK 100
+#define GCC_CAMSS_VFE_AXI_CLK 101
+#define GCC_CRYPTO_AHB_CLK 102
+#define GCC_CRYPTO_AXI_CLK 103
+#define GCC_CRYPTO_CLK 104
+#define GCC_OXILI_GMEM_CLK 105
+#define GCC_GP1_CLK 106
+#define GCC_GP2_CLK 107
+#define GCC_GP3_CLK 108
+#define GCC_MDSS_AHB_CLK 109
+#define GCC_MDSS_AXI_CLK 110
+#define GCC_MDSS_BYTE0_CLK 111
+#define GCC_MDSS_ESC0_CLK 112
+#define GCC_MDSS_MDP_CLK 113
+#define GCC_MDSS_PCLK0_CLK 114
+#define GCC_MDSS_VSYNC_CLK 115
+#define GCC_MSS_CFG_AHB_CLK 116
+#define GCC_OXILI_AHB_CLK 117
+#define GCC_OXILI_GFX3D_CLK 118
+#define GCC_PDM2_CLK 119
+#define GCC_PDM_AHB_CLK 120
+#define GCC_PRNG_AHB_CLK 121
+#define GCC_SDCC1_AHB_CLK 122
+#define GCC_SDCC1_APPS_CLK 123
+#define GCC_SDCC2_AHB_CLK 124
+#define GCC_SDCC2_APPS_CLK 125
+#define GCC_GTCU_AHB_CLK 126
+#define GCC_JPEG_TBU_CLK 127
+#define GCC_MDP_TBU_CLK 128
+#define GCC_SMMU_CFG_CLK 129
+#define GCC_VENUS_TBU_CLK 130
+#define GCC_VFE_TBU_CLK 131
+#define GCC_USB2A_PHY_SLEEP_CLK 132
+#define GCC_USB_HS_AHB_CLK 133
+#define GCC_USB_HS_SYSTEM_CLK 134
+#define GCC_VENUS0_AHB_CLK 135
+#define GCC_VENUS0_AXI_CLK 136
+#define GCC_VENUS0_VCODEC0_CLK 137
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,lcc-ipq806x.h b/include/dt-bindings/clock/qcom,lcc-ipq806x.h
new file mode 100644
index 0000000..4e944b8
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,lcc-ipq806x.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_LCC_IPQ806X_H
+#define _DT_BINDINGS_CLK_LCC_IPQ806X_H
+
+#define PLL4 0
+#define MI2S_OSR_SRC 1
+#define MI2S_OSR_CLK 2
+#define MI2S_DIV_CLK 3
+#define MI2S_BIT_DIV_CLK 4
+#define MI2S_BIT_CLK 5
+#define PCM_SRC 6
+#define PCM_CLK_OUT 7
+#define PCM_CLK 8
+#define SPDIF_SRC 9
+#define SPDIF_CLK 10
+#define AHBIX_CLK 11
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,lcc-msm8960.h b/include/dt-bindings/clock/qcom,lcc-msm8960.h
new file mode 100644
index 0000000..4fb2aa6
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,lcc-msm8960.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_LCC_MSM8960_H
+#define _DT_BINDINGS_CLK_LCC_MSM8960_H
+
+#define PLL4 0
+#define MI2S_OSR_SRC 1
+#define MI2S_OSR_CLK 2
+#define MI2S_DIV_CLK 3
+#define MI2S_BIT_DIV_CLK 4
+#define MI2S_BIT_CLK 5
+#define PCM_SRC 6
+#define PCM_CLK_OUT 7
+#define PCM_CLK 8
+#define SLIMBUS_SRC 9
+#define AUDIO_SLIMBUS_CLK 10
+#define SPS_SLIMBUS_CLK 11
+#define CODEC_I2S_MIC_OSR_SRC 12
+#define CODEC_I2S_MIC_OSR_CLK 13
+#define CODEC_I2S_MIC_DIV_CLK 14
+#define CODEC_I2S_MIC_BIT_DIV_CLK 15
+#define CODEC_I2S_MIC_BIT_CLK 16
+#define SPARE_I2S_MIC_OSR_SRC 17
+#define SPARE_I2S_MIC_OSR_CLK 18
+#define SPARE_I2S_MIC_DIV_CLK 19
+#define SPARE_I2S_MIC_BIT_DIV_CLK 20
+#define SPARE_I2S_MIC_BIT_CLK 21
+#define CODEC_I2S_SPKR_OSR_SRC 22
+#define CODEC_I2S_SPKR_OSR_CLK 23
+#define CODEC_I2S_SPKR_DIV_CLK 24
+#define CODEC_I2S_SPKR_BIT_DIV_CLK 25
+#define CODEC_I2S_SPKR_BIT_CLK 26
+#define SPARE_I2S_SPKR_OSR_SRC 27
+#define SPARE_I2S_SPKR_OSR_CLK 28
+#define SPARE_I2S_SPKR_DIV_CLK 29
+#define SPARE_I2S_SPKR_BIT_DIV_CLK 30
+#define SPARE_I2S_SPKR_BIT_CLK 31
+
+#endif
diff --git a/include/dt-bindings/clock/r8a73a4-clock.h b/include/dt-bindings/clock/r8a73a4-clock.h
new file mode 100644
index 0000000..dd11ecd
--- /dev/null
+++ b/include/dt-bindings/clock/r8a73a4-clock.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2014 Ulrich Hecht
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_R8A73A4_H__
+#define __DT_BINDINGS_CLOCK_R8A73A4_H__
+
+/* CPG */
+#define R8A73A4_CLK_MAIN 0
+#define R8A73A4_CLK_PLL0 1
+#define R8A73A4_CLK_PLL1 2
+#define R8A73A4_CLK_PLL2 3
+#define R8A73A4_CLK_PLL2S 4
+#define R8A73A4_CLK_PLL2H 5
+#define R8A73A4_CLK_Z 6
+#define R8A73A4_CLK_Z2 7
+#define R8A73A4_CLK_I 8
+#define R8A73A4_CLK_M3 9
+#define R8A73A4_CLK_B 10
+#define R8A73A4_CLK_M1 11
+#define R8A73A4_CLK_M2 12
+#define R8A73A4_CLK_ZX 13
+#define R8A73A4_CLK_ZS 14
+#define R8A73A4_CLK_HP 15
+
+/* MSTP2 */
+#define R8A73A4_CLK_DMAC 18
+#define R8A73A4_CLK_SCIFB3 17
+#define R8A73A4_CLK_SCIFB2 16
+#define R8A73A4_CLK_SCIFB1 7
+#define R8A73A4_CLK_SCIFB0 6
+#define R8A73A4_CLK_SCIFA0 4
+#define R8A73A4_CLK_SCIFA1 3
+
+/* MSTP3 */
+#define R8A73A4_CLK_CMT1 29
+#define R8A73A4_CLK_IIC1 23
+#define R8A73A4_CLK_IIC0 18
+#define R8A73A4_CLK_IIC7 17
+#define R8A73A4_CLK_IIC6 16
+#define R8A73A4_CLK_MMCIF0 15
+#define R8A73A4_CLK_SDHI0 14
+#define R8A73A4_CLK_SDHI1 13
+#define R8A73A4_CLK_SDHI2 12
+#define R8A73A4_CLK_MMCIF1 5
+#define R8A73A4_CLK_IIC2 0
+
+/* MSTP4 */
+#define R8A73A4_CLK_IIC3 11
+#define R8A73A4_CLK_IIC4 10
+#define R8A73A4_CLK_IIC5 9
+#define R8A73A4_CLK_IRQC 7
+
+/* MSTP5 */
+#define R8A73A4_CLK_THERMAL 22
+#define R8A73A4_CLK_IIC8 15
+
+#endif /* __DT_BINDINGS_CLOCK_R8A73A4_H__ */
diff --git a/include/dt-bindings/clock/r8a7778-clock.h b/include/dt-bindings/clock/r8a7778-clock.h
new file mode 100644
index 0000000..f6b07c5
--- /dev/null
+++ b/include/dt-bindings/clock/r8a7778-clock.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2014 Ulrich Hecht
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_R8A7778_H__
+#define __DT_BINDINGS_CLOCK_R8A7778_H__
+
+/* CPG */
+#define R8A7778_CLK_PLLA 0
+#define R8A7778_CLK_PLLB 1
+#define R8A7778_CLK_B 2
+#define R8A7778_CLK_OUT 3
+#define R8A7778_CLK_P 4
+#define R8A7778_CLK_S 5
+#define R8A7778_CLK_S1 6
+
+/* MSTP0 */
+#define R8A7778_CLK_I2C0 30
+#define R8A7778_CLK_I2C1 29
+#define R8A7778_CLK_I2C2 28
+#define R8A7778_CLK_I2C3 27
+#define R8A7778_CLK_SCIF0 26
+#define R8A7778_CLK_SCIF1 25
+#define R8A7778_CLK_SCIF2 24
+#define R8A7778_CLK_SCIF3 23
+#define R8A7778_CLK_SCIF4 22
+#define R8A7778_CLK_SCIF5 21
+#define R8A7778_CLK_TMU0 16
+#define R8A7778_CLK_TMU1 15
+#define R8A7778_CLK_TMU2 14
+#define R8A7778_CLK_SSI0 12
+#define R8A7778_CLK_SSI1 11
+#define R8A7778_CLK_SSI2 10
+#define R8A7778_CLK_SSI3 9
+#define R8A7778_CLK_SRU 8
+#define R8A7778_CLK_HSPI 7
+
+/* MSTP1 */
+#define R8A7778_CLK_ETHER 14
+#define R8A7778_CLK_VIN0 10
+#define R8A7778_CLK_VIN1 9
+#define R8A7778_CLK_USB 0
+
+/* MSTP3 */
+#define R8A7778_CLK_MMC 31
+#define R8A7778_CLK_SDHI0 23
+#define R8A7778_CLK_SDHI1 22
+#define R8A7778_CLK_SDHI2 21
+#define R8A7778_CLK_SSI4 11
+#define R8A7778_CLK_SSI5 10
+#define R8A7778_CLK_SSI6 9
+#define R8A7778_CLK_SSI7 8
+#define R8A7778_CLK_SSI8 7
+
+/* MSTP5 */
+#define R8A7778_CLK_SRU_SRC0 31
+#define R8A7778_CLK_SRU_SRC1 30
+#define R8A7778_CLK_SRU_SRC2 29
+#define R8A7778_CLK_SRU_SRC3 28
+#define R8A7778_CLK_SRU_SRC4 27
+#define R8A7778_CLK_SRU_SRC5 26
+#define R8A7778_CLK_SRU_SRC6 25
+#define R8A7778_CLK_SRU_SRC7 24
+#define R8A7778_CLK_SRU_SRC8 23
+
+#endif /* __DT_BINDINGS_CLOCK_R8A7778_H__ */
diff --git a/include/dt-bindings/clock/r8a7790-clock.h b/include/dt-bindings/clock/r8a7790-clock.h
index c27b3b5..ff7ca35 100644
--- a/include/dt-bindings/clock/r8a7790-clock.h
+++ b/include/dt-bindings/clock/r8a7790-clock.h
@@ -21,6 +21,8 @@
#define R8A7790_CLK_SD0 7
#define R8A7790_CLK_SD1 8
#define R8A7790_CLK_Z 9
+#define R8A7790_CLK_RCAN 10
+#define R8A7790_CLK_ADSP 11
/* MSTP0 */
#define R8A7790_CLK_MSIOF0 0
@@ -77,9 +79,13 @@
#define R8A7790_CLK_USBDMAC0 30
#define R8A7790_CLK_USBDMAC1 31
+/* MSTP4 */
+#define R8A7790_CLK_IRQC 7
+
/* MSTP5 */
#define R8A7790_CLK_AUDIO_DMAC1 1
#define R8A7790_CLK_AUDIO_DMAC0 2
+#define R8A7790_CLK_ADSP_MOD 6
#define R8A7790_CLK_THERMAL 22
#define R8A7790_CLK_PWM 23
@@ -97,6 +103,7 @@
#define R8A7790_CLK_LVDS0 26
/* MSTP8 */
+#define R8A7790_CLK_MLB 2
#define R8A7790_CLK_VIN3 8
#define R8A7790_CLK_VIN2 9
#define R8A7790_CLK_VIN1 10
diff --git a/include/dt-bindings/clock/r8a7791-clock.h b/include/dt-bindings/clock/r8a7791-clock.h
index 3ea2bbc..4022683 100644
--- a/include/dt-bindings/clock/r8a7791-clock.h
+++ b/include/dt-bindings/clock/r8a7791-clock.h
@@ -20,6 +20,8 @@
#define R8A7791_CLK_SDH 6
#define R8A7791_CLK_SD0 7
#define R8A7791_CLK_Z 8
+#define R8A7791_CLK_RCAN 9
+#define R8A7791_CLK_ADSP 10
/* MSTP0 */
#define R8A7791_CLK_MSIOF0 0
@@ -68,9 +70,13 @@
#define R8A7791_CLK_USBDMAC0 30
#define R8A7791_CLK_USBDMAC1 31
+/* MSTP4 */
+#define R8A7791_CLK_IRQC 7
+
/* MSTP5 */
#define R8A7791_CLK_AUDIO_DMAC1 1
#define R8A7791_CLK_AUDIO_DMAC0 2
+#define R8A7791_CLK_ADSP_MOD 6
#define R8A7791_CLK_THERMAL 22
#define R8A7791_CLK_PWM 23
@@ -91,6 +97,8 @@
#define R8A7791_CLK_LVDS0 26
/* MSTP8 */
+#define R8A7791_CLK_IPMMU_SGX 0
+#define R8A7791_CLK_MLB 2
#define R8A7791_CLK_VIN2 9
#define R8A7791_CLK_VIN1 10
#define R8A7791_CLK_VIN0 11
diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h
index aa9c286e..09da38a 100644
--- a/include/dt-bindings/clock/r8a7794-clock.h
+++ b/include/dt-bindings/clock/r8a7794-clock.h
@@ -48,15 +48,28 @@
#define R8A7794_CLK_SCIFB1 7
#define R8A7794_CLK_MSIOF1 8
#define R8A7794_CLK_SCIFB2 16
+#define R8A7794_CLK_SYS_DMAC1 18
+#define R8A7794_CLK_SYS_DMAC0 19
/* MSTP3 */
+#define R8A7794_CLK_SDHI2 11
+#define R8A7794_CLK_SDHI1 12
+#define R8A7794_CLK_SDHI0 14
+#define R8A7794_CLK_MMCIF0 15
#define R8A7794_CLK_CMT1 29
+#define R8A7794_CLK_USBDMAC0 30
+#define R8A7794_CLK_USBDMAC1 31
+
+/* MSTP4 */
+#define R8A7794_CLK_IRQC 7
/* MSTP5 */
#define R8A7794_CLK_THERMAL 22
#define R8A7794_CLK_PWM 23
/* MSTP7 */
+#define R8A7794_CLK_EHCI 3
+#define R8A7794_CLK_HSUSB 4
#define R8A7794_CLK_HSCIF2 13
#define R8A7794_CLK_SCIF5 14
#define R8A7794_CLK_SCIF4 15
@@ -80,6 +93,13 @@
#define R8A7794_CLK_GPIO2 10
#define R8A7794_CLK_GPIO1 11
#define R8A7794_CLK_GPIO0 12
+#define R8A7794_CLK_QSPI_MOD 17
+#define R8A7794_CLK_I2C5 25
+#define R8A7794_CLK_I2C4 27
+#define R8A7794_CLK_I2C3 28
+#define R8A7794_CLK_I2C2 29
+#define R8A7794_CLK_I2C1 30
+#define R8A7794_CLK_I2C0 31
/* MSTP11 */
#define R8A7794_CLK_SCIFA3 6
diff --git a/include/dt-bindings/clock/rk3288-cru.h b/include/dt-bindings/clock/rk3288-cru.h
index f60ce72..dea4197 100644
--- a/include/dt-bindings/clock/rk3288-cru.h
+++ b/include/dt-bindings/clock/rk3288-cru.h
@@ -80,6 +80,12 @@
#define SCLK_SDIO0_SAMPLE 119
#define SCLK_SDIO1_SAMPLE 120
#define SCLK_EMMC_SAMPLE 121
+#define SCLK_USBPHY480M_SRC 122
+#define SCLK_PVTM_CORE 123
+#define SCLK_PVTM_GPU 124
+
+#define SCLK_MAC 151
+#define SCLK_MACREF_OUT 152
#define DCLK_VOP0 190
#define DCLK_VOP1 191
@@ -154,6 +160,7 @@
#define PCLK_PUBL0 365
#define PCLK_DDRUPCTL1 366
#define PCLK_PUBL1 367
+#define PCLK_WDT 368
/* hclk gates */
#define HCLK_GPS 448
diff --git a/include/dt-bindings/clock/samsung,s2mps11.h b/include/dt-bindings/clock/samsung,s2mps11.h
new file mode 100644
index 0000000..b903d7d
--- /dev/null
+++ b/include/dt-bindings/clock/samsung,s2mps11.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2015 Markus Reichl
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Device Tree binding constants clocks for the Samsung S2MPS11 PMIC.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S2MPS11_CLOCK_H
+#define _DT_BINDINGS_CLOCK_SAMSUNG_S2MPS11_CLOCK_H
+
+/* Fixed rate clocks. */
+
+#define S2MPS11_CLK_AP 0
+#define S2MPS11_CLK_CP 1
+#define S2MPS11_CLK_BT 2
+
+/* Total number of clocks. */
+#define S2MPS11_CLKS_NUM (S2MPS11_CLK_BT + 1)
+
+#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_S2MPS11_CLOCK_H */
diff --git a/include/dt-bindings/clock/sh73a0-clock.h b/include/dt-bindings/clock/sh73a0-clock.h
new file mode 100644
index 0000000..5336956
--- /dev/null
+++ b/include/dt-bindings/clock/sh73a0-clock.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2014 Ulrich Hecht
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_SH73A0_H__
+#define __DT_BINDINGS_CLOCK_SH73A0_H__
+
+/* CPG */
+#define SH73A0_CLK_MAIN 0
+#define SH73A0_CLK_PLL0 1
+#define SH73A0_CLK_PLL1 2
+#define SH73A0_CLK_PLL2 3
+#define SH73A0_CLK_PLL3 4
+#define SH73A0_CLK_DSI0PHY 5
+#define SH73A0_CLK_DSI1PHY 6
+#define SH73A0_CLK_ZG 7
+#define SH73A0_CLK_M3 8
+#define SH73A0_CLK_B 9
+#define SH73A0_CLK_M1 10
+#define SH73A0_CLK_M2 11
+#define SH73A0_CLK_Z 12
+#define SH73A0_CLK_ZX 13
+#define SH73A0_CLK_HP 14
+
+/* MSTP0 */
+#define SH73A0_CLK_IIC2 1
+
+/* MSTP1 */
+#define SH73A0_CLK_CEU1 29
+#define SH73A0_CLK_CSI2_RX1 28
+#define SH73A0_CLK_CEU0 27
+#define SH73A0_CLK_CSI2_RX0 26
+#define SH73A0_CLK_TMU0 25
+#define SH73A0_CLK_DSITX0 18
+#define SH73A0_CLK_IIC0 16
+#define SH73A0_CLK_SGX 12
+#define SH73A0_CLK_LCDC0 0
+
+/* MSTP2 */
+#define SH73A0_CLK_SCIFA7 19
+#define SH73A0_CLK_SY_DMAC 18
+#define SH73A0_CLK_MP_DMAC 17
+#define SH73A0_CLK_SCIFA5 7
+#define SH73A0_CLK_SCIFB 6
+#define SH73A0_CLK_SCIFA0 4
+#define SH73A0_CLK_SCIFA1 3
+#define SH73A0_CLK_SCIFA2 2
+#define SH73A0_CLK_SCIFA3 1
+#define SH73A0_CLK_SCIFA4 0
+
+/* MSTP3 */
+#define SH73A0_CLK_SCIFA6 31
+#define SH73A0_CLK_CMT1 29
+#define SH73A0_CLK_FSI 28
+#define SH73A0_CLK_IRDA 25
+#define SH73A0_CLK_IIC1 23
+#define SH73A0_CLK_USB 22
+#define SH73A0_CLK_FLCTL 15
+#define SH73A0_CLK_SDHI0 14
+#define SH73A0_CLK_SDHI1 13
+#define SH73A0_CLK_MMCIF0 12
+#define SH73A0_CLK_SDHI2 11
+#define SH73A0_CLK_TPU0 4
+#define SH73A0_CLK_TPU1 3
+#define SH73A0_CLK_TPU2 2
+#define SH73A0_CLK_TPU3 1
+#define SH73A0_CLK_TPU4 0
+
+/* MSTP4 */
+#define SH73A0_CLK_IIC3 11
+#define SH73A0_CLK_IIC4 10
+#define SH73A0_CLK_KEYSC 3
+
+/* MSTP5 */
+#define SH73A0_CLK_INTCA0 8
+
+#endif
diff --git a/include/dt-bindings/clock/stih418-clks.h b/include/dt-bindings/clock/stih418-clks.h
new file mode 100644
index 0000000..b62aa0b
--- /dev/null
+++ b/include/dt-bindings/clock/stih418-clks.h
@@ -0,0 +1,34 @@
+/*
+ * This header provides constants clk index STMicroelectronics
+ * STiH418 SoC.
+ */
+#ifndef _DT_BINDINGS_CLK_STIH418
+#define _DT_BINDINGS_CLK_STIH418
+
+#include "stih410-clks.h"
+
+/* STiH418 introduces new clock outputs compared to STiH410 */
+
+/* CLOCKGEN C0 */
+#define CLK_PROC_BDISP_0 14
+#define CLK_PROC_BDISP_1 15
+#define CLK_TX_ICN_1 23
+#define CLK_ETH_PHYREF 27
+#define CLK_PP_HEVC 35
+#define CLK_CLUST_HEVC 36
+#define CLK_HWPE_HEVC 37
+#define CLK_FC_HEVC 38
+#define CLK_PROC_MIXER 39
+#define CLK_PROC_SC 40
+#define CLK_AVSP_HEVC 41
+
+/* CLOCKGEN D2 */
+#undef CLK_PIX_PIP
+#undef CLK_PIX_GDP1
+#undef CLK_PIX_GDP2
+#undef CLK_PIX_GDP3
+#undef CLK_PIX_GDP4
+
+#define CLK_TMDS_HDMI_DIV2 5
+#define CLK_VP9 47
+#endif
diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h
new file mode 100644
index 0000000..a215609
--- /dev/null
+++ b/include/dt-bindings/clock/tegra124-car-common.h
@@ -0,0 +1,345 @@
+/*
+ * This header provides constants for binding nvidia,tegra124-car or
+ * nvidia,tegra132-car.
+ *
+ * The first 192 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB
+ * registers. These IDs often match those in the CAR's RST_DEVICES registers,
+ * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In
+ * this case, those clocks are assigned IDs above 185 in order to highlight
+ * this issue. Implementations that interpret these clock IDs as bit values
+ * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to
+ * explicitly handle these special cases.
+ *
+ * The balance of the clocks controlled by the CAR are assigned IDs of 185 and
+ * above.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H
+#define _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H
+
+/* 0 */
+/* 1 */
+/* 2 */
+#define TEGRA124_CLK_ISPB 3
+#define TEGRA124_CLK_RTC 4
+#define TEGRA124_CLK_TIMER 5
+#define TEGRA124_CLK_UARTA 6
+/* 7 (register bit affects uartb and vfir) */
+/* 8 */
+#define TEGRA124_CLK_SDMMC2 9
+/* 10 (register bit affects spdif_in and spdif_out) */
+#define TEGRA124_CLK_I2S1 11
+#define TEGRA124_CLK_I2C1 12
+/* 13 */
+#define TEGRA124_CLK_SDMMC1 14
+#define TEGRA124_CLK_SDMMC4 15
+/* 16 */
+#define TEGRA124_CLK_PWM 17
+#define TEGRA124_CLK_I2S2 18
+/* 20 (register bit affects vi and vi_sensor) */
+/* 21 */
+#define TEGRA124_CLK_USBD 22
+#define TEGRA124_CLK_ISP 23
+/* 26 */
+/* 25 */
+#define TEGRA124_CLK_DISP2 26
+#define TEGRA124_CLK_DISP1 27
+#define TEGRA124_CLK_HOST1X 28
+#define TEGRA124_CLK_VCP 29
+#define TEGRA124_CLK_I2S0 30
+/* 31 */
+
+#define TEGRA124_CLK_MC 32
+/* 33 */
+#define TEGRA124_CLK_APBDMA 34
+/* 35 */
+#define TEGRA124_CLK_KBC 36
+/* 37 */
+/* 38 */
+/* 39 (register bit affects fuse and fuse_burn) */
+#define TEGRA124_CLK_KFUSE 40
+#define TEGRA124_CLK_SBC1 41
+#define TEGRA124_CLK_NOR 42
+/* 43 */
+#define TEGRA124_CLK_SBC2 44
+/* 45 */
+#define TEGRA124_CLK_SBC3 46
+#define TEGRA124_CLK_I2C5 47
+#define TEGRA124_CLK_DSIA 48
+/* 49 */
+#define TEGRA124_CLK_MIPI 50
+#define TEGRA124_CLK_HDMI 51
+#define TEGRA124_CLK_CSI 52
+/* 53 */
+#define TEGRA124_CLK_I2C2 54
+#define TEGRA124_CLK_UARTC 55
+#define TEGRA124_CLK_MIPI_CAL 56
+#define TEGRA124_CLK_EMC 57
+#define TEGRA124_CLK_USB2 58
+#define TEGRA124_CLK_USB3 59
+/* 60 */
+#define TEGRA124_CLK_VDE 61
+#define TEGRA124_CLK_BSEA 62
+#define TEGRA124_CLK_BSEV 63
+
+/* 64 */
+#define TEGRA124_CLK_UARTD 65
+/* 66 */
+#define TEGRA124_CLK_I2C3 67
+#define TEGRA124_CLK_SBC4 68
+#define TEGRA124_CLK_SDMMC3 69
+#define TEGRA124_CLK_PCIE 70
+#define TEGRA124_CLK_OWR 71
+#define TEGRA124_CLK_AFI 72
+#define TEGRA124_CLK_CSITE 73
+/* 74 */
+/* 75 */
+#define TEGRA124_CLK_LA 76
+#define TEGRA124_CLK_TRACE 77
+#define TEGRA124_CLK_SOC_THERM 78
+#define TEGRA124_CLK_DTV 79
+/* 80 */
+#define TEGRA124_CLK_I2CSLOW 81
+#define TEGRA124_CLK_DSIB 82
+#define TEGRA124_CLK_TSEC 83
+/* 84 */
+/* 85 */
+/* 86 */
+/* 87 */
+/* 88 */
+#define TEGRA124_CLK_XUSB_HOST 89
+/* 90 */
+#define TEGRA124_CLK_MSENC 91
+#define TEGRA124_CLK_CSUS 92
+/* 93 */
+/* 94 */
+/* 95 (bit affects xusb_dev and xusb_dev_src) */
+
+/* 96 */
+/* 97 */
+/* 98 */
+#define TEGRA124_CLK_MSELECT 99
+#define TEGRA124_CLK_TSENSOR 100
+#define TEGRA124_CLK_I2S3 101
+#define TEGRA124_CLK_I2S4 102
+#define TEGRA124_CLK_I2C4 103
+#define TEGRA124_CLK_SBC5 104
+#define TEGRA124_CLK_SBC6 105
+#define TEGRA124_CLK_D_AUDIO 106
+#define TEGRA124_CLK_APBIF 107
+#define TEGRA124_CLK_DAM0 108
+#define TEGRA124_CLK_DAM1 109
+#define TEGRA124_CLK_DAM2 110
+#define TEGRA124_CLK_HDA2CODEC_2X 111
+/* 112 */
+#define TEGRA124_CLK_AUDIO0_2X 113
+#define TEGRA124_CLK_AUDIO1_2X 114
+#define TEGRA124_CLK_AUDIO2_2X 115
+#define TEGRA124_CLK_AUDIO3_2X 116
+#define TEGRA124_CLK_AUDIO4_2X 117
+#define TEGRA124_CLK_SPDIF_2X 118
+#define TEGRA124_CLK_ACTMON 119
+#define TEGRA124_CLK_EXTERN1 120
+#define TEGRA124_CLK_EXTERN2 121
+#define TEGRA124_CLK_EXTERN3 122
+#define TEGRA124_CLK_SATA_OOB 123
+#define TEGRA124_CLK_SATA 124
+#define TEGRA124_CLK_HDA 125
+/* 126 */
+#define TEGRA124_CLK_SE 127
+
+#define TEGRA124_CLK_HDA2HDMI 128
+#define TEGRA124_CLK_SATA_COLD 129
+/* 130 */
+/* 131 */
+/* 132 */
+/* 133 */
+/* 134 */
+/* 135 */
+/* 136 */
+/* 137 */
+/* 138 */
+/* 139 */
+/* 140 */
+/* 141 */
+/* 142 */
+/* 143 (bit affects xusb_falcon_src, xusb_fs_src, */
+/* xusb_host_src and xusb_ss_src) */
+#define TEGRA124_CLK_CILAB 144
+#define TEGRA124_CLK_CILCD 145
+#define TEGRA124_CLK_CILE 146
+#define TEGRA124_CLK_DSIALP 147
+#define TEGRA124_CLK_DSIBLP 148
+#define TEGRA124_CLK_ENTROPY 149
+#define TEGRA124_CLK_DDS 150
+/* 151 */
+#define TEGRA124_CLK_DP2 152
+#define TEGRA124_CLK_AMX 153
+#define TEGRA124_CLK_ADX 154
+/* 155 (bit affects dfll_ref and dfll_soc) */
+#define TEGRA124_CLK_XUSB_SS 156
+/* 157 */
+/* 158 */
+/* 159 */
+
+/* 160 */
+/* 161 */
+/* 162 */
+/* 163 */
+/* 164 */
+/* 165 */
+#define TEGRA124_CLK_I2C6 166
+/* 167 */
+/* 168 */
+/* 169 */
+/* 170 */
+#define TEGRA124_CLK_VIM2_CLK 171
+/* 172 */
+/* 173 */
+/* 174 */
+/* 175 */
+#define TEGRA124_CLK_HDMI_AUDIO 176
+#define TEGRA124_CLK_CLK72MHZ 177
+#define TEGRA124_CLK_VIC03 178
+/* 179 */
+#define TEGRA124_CLK_ADX1 180
+#define TEGRA124_CLK_DPAUX 181
+#define TEGRA124_CLK_SOR0 182
+/* 183 */
+#define TEGRA124_CLK_GPU 184
+#define TEGRA124_CLK_AMX1 185
+/* 186 */
+/* 187 */
+/* 188 */
+/* 189 */
+/* 190 */
+/* 191 */
+#define TEGRA124_CLK_UARTB 192
+#define TEGRA124_CLK_VFIR 193
+#define TEGRA124_CLK_SPDIF_IN 194
+#define TEGRA124_CLK_SPDIF_OUT 195
+#define TEGRA124_CLK_VI 196
+#define TEGRA124_CLK_VI_SENSOR 197
+#define TEGRA124_CLK_FUSE 198
+#define TEGRA124_CLK_FUSE_BURN 199
+#define TEGRA124_CLK_CLK_32K 200
+#define TEGRA124_CLK_CLK_M 201
+#define TEGRA124_CLK_CLK_M_DIV2 202
+#define TEGRA124_CLK_CLK_M_DIV4 203
+#define TEGRA124_CLK_PLL_REF 204
+#define TEGRA124_CLK_PLL_C 205
+#define TEGRA124_CLK_PLL_C_OUT1 206
+#define TEGRA124_CLK_PLL_C2 207
+#define TEGRA124_CLK_PLL_C3 208
+#define TEGRA124_CLK_PLL_M 209
+#define TEGRA124_CLK_PLL_M_OUT1 210
+#define TEGRA124_CLK_PLL_P 211
+#define TEGRA124_CLK_PLL_P_OUT1 212
+#define TEGRA124_CLK_PLL_P_OUT2 213
+#define TEGRA124_CLK_PLL_P_OUT3 214
+#define TEGRA124_CLK_PLL_P_OUT4 215
+#define TEGRA124_CLK_PLL_A 216
+#define TEGRA124_CLK_PLL_A_OUT0 217
+#define TEGRA124_CLK_PLL_D 218
+#define TEGRA124_CLK_PLL_D_OUT0 219
+#define TEGRA124_CLK_PLL_D2 220
+#define TEGRA124_CLK_PLL_D2_OUT0 221
+#define TEGRA124_CLK_PLL_U 222
+#define TEGRA124_CLK_PLL_U_480M 223
+
+#define TEGRA124_CLK_PLL_U_60M 224
+#define TEGRA124_CLK_PLL_U_48M 225
+#define TEGRA124_CLK_PLL_U_12M 226
+/* 227 */
+/* 228 */
+#define TEGRA124_CLK_PLL_RE_VCO 229
+#define TEGRA124_CLK_PLL_RE_OUT 230
+#define TEGRA124_CLK_PLL_E 231
+#define TEGRA124_CLK_SPDIF_IN_SYNC 232
+#define TEGRA124_CLK_I2S0_SYNC 233
+#define TEGRA124_CLK_I2S1_SYNC 234
+#define TEGRA124_CLK_I2S2_SYNC 235
+#define TEGRA124_CLK_I2S3_SYNC 236
+#define TEGRA124_CLK_I2S4_SYNC 237
+#define TEGRA124_CLK_VIMCLK_SYNC 238
+#define TEGRA124_CLK_AUDIO0 239
+#define TEGRA124_CLK_AUDIO1 240
+#define TEGRA124_CLK_AUDIO2 241
+#define TEGRA124_CLK_AUDIO3 242
+#define TEGRA124_CLK_AUDIO4 243
+#define TEGRA124_CLK_SPDIF 244
+#define TEGRA124_CLK_CLK_OUT_1 245
+#define TEGRA124_CLK_CLK_OUT_2 246
+#define TEGRA124_CLK_CLK_OUT_3 247
+#define TEGRA124_CLK_BLINK 248
+/* 249 */
+/* 250 */
+/* 251 */
+#define TEGRA124_CLK_XUSB_HOST_SRC 252
+#define TEGRA124_CLK_XUSB_FALCON_SRC 253
+#define TEGRA124_CLK_XUSB_FS_SRC 254
+#define TEGRA124_CLK_XUSB_SS_SRC 255
+
+#define TEGRA124_CLK_XUSB_DEV_SRC 256
+#define TEGRA124_CLK_XUSB_DEV 257
+#define TEGRA124_CLK_XUSB_HS_SRC 258
+#define TEGRA124_CLK_SCLK 259
+#define TEGRA124_CLK_HCLK 260
+#define TEGRA124_CLK_PCLK 261
+/* 262 */
+/* 263 */
+#define TEGRA124_CLK_DFLL_REF 264
+#define TEGRA124_CLK_DFLL_SOC 265
+#define TEGRA124_CLK_VI_SENSOR2 266
+#define TEGRA124_CLK_PLL_P_OUT5 267
+#define TEGRA124_CLK_CML0 268
+#define TEGRA124_CLK_CML1 269
+#define TEGRA124_CLK_PLL_C4 270
+#define TEGRA124_CLK_PLL_DP 271
+#define TEGRA124_CLK_PLL_E_MUX 272
+#define TEGRA124_CLK_PLL_D_DSI_OUT 273
+/* 274 */
+/* 275 */
+/* 276 */
+/* 277 */
+/* 278 */
+/* 279 */
+/* 280 */
+/* 281 */
+/* 282 */
+/* 283 */
+/* 284 */
+/* 285 */
+/* 286 */
+/* 287 */
+
+/* 288 */
+/* 289 */
+/* 290 */
+/* 291 */
+/* 292 */
+/* 293 */
+/* 294 */
+/* 295 */
+/* 296 */
+/* 297 */
+/* 298 */
+/* 299 */
+#define TEGRA124_CLK_AUDIO0_MUX 300
+#define TEGRA124_CLK_AUDIO1_MUX 301
+#define TEGRA124_CLK_AUDIO2_MUX 302
+#define TEGRA124_CLK_AUDIO3_MUX 303
+#define TEGRA124_CLK_AUDIO4_MUX 304
+#define TEGRA124_CLK_SPDIF_MUX 305
+#define TEGRA124_CLK_CLK_OUT_1_MUX 306
+#define TEGRA124_CLK_CLK_OUT_2_MUX 307
+#define TEGRA124_CLK_CLK_OUT_3_MUX 308
+/* 309 */
+/* 310 */
+#define TEGRA124_CLK_SOR0_LVDS 311
+#define TEGRA124_CLK_XUSB_SS_DIV2 312
+
+#define TEGRA124_CLK_PLL_M_UD 313
+#define TEGRA124_CLK_PLL_C_UD 314
+
+#endif /* _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H */
diff --git a/include/dt-bindings/clock/tegra124-car.h b/include/dt-bindings/clock/tegra124-car.h
index af9bc9a..2860737 100644
--- a/include/dt-bindings/clock/tegra124-car.h
+++ b/include/dt-bindings/clock/tegra124-car.h
@@ -1,346 +1,19 @@
/*
- * This header provides constants for binding nvidia,tegra124-car.
- *
- * The first 192 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB
- * registers. These IDs often match those in the CAR's RST_DEVICES registers,
- * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In
- * this case, those clocks are assigned IDs above 185 in order to highlight
- * this issue. Implementations that interpret these clock IDs as bit values
- * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to
- * explicitly handle these special cases.
- *
- * The balance of the clocks controlled by the CAR are assigned IDs of 185 and
- * above.
+ * This header provides Tegra124-specific constants for binding
+ * nvidia,tegra124-car.
*/
+#include <dt-bindings/clock/tegra124-car-common.h>
+
#ifndef _DT_BINDINGS_CLOCK_TEGRA124_CAR_H
#define _DT_BINDINGS_CLOCK_TEGRA124_CAR_H
-/* 0 */
-/* 1 */
-/* 2 */
-#define TEGRA124_CLK_ISPB 3
-#define TEGRA124_CLK_RTC 4
-#define TEGRA124_CLK_TIMER 5
-#define TEGRA124_CLK_UARTA 6
-/* 7 (register bit affects uartb and vfir) */
-/* 8 */
-#define TEGRA124_CLK_SDMMC2 9
-/* 10 (register bit affects spdif_in and spdif_out) */
-#define TEGRA124_CLK_I2S1 11
-#define TEGRA124_CLK_I2C1 12
-/* 13 */
-#define TEGRA124_CLK_SDMMC1 14
-#define TEGRA124_CLK_SDMMC4 15
-/* 16 */
-#define TEGRA124_CLK_PWM 17
-#define TEGRA124_CLK_I2S2 18
-/* 20 (register bit affects vi and vi_sensor) */
-/* 21 */
-#define TEGRA124_CLK_USBD 22
-#define TEGRA124_CLK_ISP 23
-/* 26 */
-/* 25 */
-#define TEGRA124_CLK_DISP2 26
-#define TEGRA124_CLK_DISP1 27
-#define TEGRA124_CLK_HOST1X 28
-#define TEGRA124_CLK_VCP 29
-#define TEGRA124_CLK_I2S0 30
-/* 31 */
-
-#define TEGRA124_CLK_MC 32
-/* 33 */
-#define TEGRA124_CLK_APBDMA 34
-/* 35 */
-#define TEGRA124_CLK_KBC 36
-/* 37 */
-/* 38 */
-/* 39 (register bit affects fuse and fuse_burn) */
-#define TEGRA124_CLK_KFUSE 40
-#define TEGRA124_CLK_SBC1 41
-#define TEGRA124_CLK_NOR 42
-/* 43 */
-#define TEGRA124_CLK_SBC2 44
-/* 45 */
-#define TEGRA124_CLK_SBC3 46
-#define TEGRA124_CLK_I2C5 47
-#define TEGRA124_CLK_DSIA 48
-/* 49 */
-#define TEGRA124_CLK_MIPI 50
-#define TEGRA124_CLK_HDMI 51
-#define TEGRA124_CLK_CSI 52
-/* 53 */
-#define TEGRA124_CLK_I2C2 54
-#define TEGRA124_CLK_UARTC 55
-#define TEGRA124_CLK_MIPI_CAL 56
-#define TEGRA124_CLK_EMC 57
-#define TEGRA124_CLK_USB2 58
-#define TEGRA124_CLK_USB3 59
-/* 60 */
-#define TEGRA124_CLK_VDE 61
-#define TEGRA124_CLK_BSEA 62
-#define TEGRA124_CLK_BSEV 63
-
-/* 64 */
-#define TEGRA124_CLK_UARTD 65
-/* 66 */
-#define TEGRA124_CLK_I2C3 67
-#define TEGRA124_CLK_SBC4 68
-#define TEGRA124_CLK_SDMMC3 69
-#define TEGRA124_CLK_PCIE 70
-#define TEGRA124_CLK_OWR 71
-#define TEGRA124_CLK_AFI 72
-#define TEGRA124_CLK_CSITE 73
-/* 74 */
-/* 75 */
-#define TEGRA124_CLK_LA 76
-#define TEGRA124_CLK_TRACE 77
-#define TEGRA124_CLK_SOC_THERM 78
-#define TEGRA124_CLK_DTV 79
-/* 80 */
-#define TEGRA124_CLK_I2CSLOW 81
-#define TEGRA124_CLK_DSIB 82
-#define TEGRA124_CLK_TSEC 83
-/* 84 */
-/* 85 */
-/* 86 */
-/* 87 */
-/* 88 */
-#define TEGRA124_CLK_XUSB_HOST 89
-/* 90 */
-#define TEGRA124_CLK_MSENC 91
-#define TEGRA124_CLK_CSUS 92
-/* 93 */
-/* 94 */
-/* 95 (bit affects xusb_dev and xusb_dev_src) */
-
-/* 96 */
-/* 97 */
-/* 98 */
-#define TEGRA124_CLK_MSELECT 99
-#define TEGRA124_CLK_TSENSOR 100
-#define TEGRA124_CLK_I2S3 101
-#define TEGRA124_CLK_I2S4 102
-#define TEGRA124_CLK_I2C4 103
-#define TEGRA124_CLK_SBC5 104
-#define TEGRA124_CLK_SBC6 105
-#define TEGRA124_CLK_D_AUDIO 106
-#define TEGRA124_CLK_APBIF 107
-#define TEGRA124_CLK_DAM0 108
-#define TEGRA124_CLK_DAM1 109
-#define TEGRA124_CLK_DAM2 110
-#define TEGRA124_CLK_HDA2CODEC_2X 111
-/* 112 */
-#define TEGRA124_CLK_AUDIO0_2X 113
-#define TEGRA124_CLK_AUDIO1_2X 114
-#define TEGRA124_CLK_AUDIO2_2X 115
-#define TEGRA124_CLK_AUDIO3_2X 116
-#define TEGRA124_CLK_AUDIO4_2X 117
-#define TEGRA124_CLK_SPDIF_2X 118
-#define TEGRA124_CLK_ACTMON 119
-#define TEGRA124_CLK_EXTERN1 120
-#define TEGRA124_CLK_EXTERN2 121
-#define TEGRA124_CLK_EXTERN3 122
-#define TEGRA124_CLK_SATA_OOB 123
-#define TEGRA124_CLK_SATA 124
-#define TEGRA124_CLK_HDA 125
-/* 126 */
-#define TEGRA124_CLK_SE 127
-
-#define TEGRA124_CLK_HDA2HDMI 128
-#define TEGRA124_CLK_SATA_COLD 129
-/* 130 */
-/* 131 */
-/* 132 */
-/* 133 */
-/* 134 */
-/* 135 */
-/* 136 */
-/* 137 */
-/* 138 */
-/* 139 */
-/* 140 */
-/* 141 */
-/* 142 */
-/* 143 (bit affects xusb_falcon_src, xusb_fs_src, */
-/* xusb_host_src and xusb_ss_src) */
-#define TEGRA124_CLK_CILAB 144
-#define TEGRA124_CLK_CILCD 145
-#define TEGRA124_CLK_CILE 146
-#define TEGRA124_CLK_DSIALP 147
-#define TEGRA124_CLK_DSIBLP 148
-#define TEGRA124_CLK_ENTROPY 149
-#define TEGRA124_CLK_DDS 150
-/* 151 */
-#define TEGRA124_CLK_DP2 152
-#define TEGRA124_CLK_AMX 153
-#define TEGRA124_CLK_ADX 154
-/* 155 (bit affects dfll_ref and dfll_soc) */
-#define TEGRA124_CLK_XUSB_SS 156
-/* 157 */
-/* 158 */
-/* 159 */
-
-/* 160 */
-/* 161 */
-/* 162 */
-/* 163 */
-/* 164 */
-/* 165 */
-#define TEGRA124_CLK_I2C6 166
-/* 167 */
-/* 168 */
-/* 169 */
-/* 170 */
-#define TEGRA124_CLK_VIM2_CLK 171
-/* 172 */
-/* 173 */
-/* 174 */
-/* 175 */
-#define TEGRA124_CLK_HDMI_AUDIO 176
-#define TEGRA124_CLK_CLK72MHZ 177
-#define TEGRA124_CLK_VIC03 178
-/* 179 */
-#define TEGRA124_CLK_ADX1 180
-#define TEGRA124_CLK_DPAUX 181
-#define TEGRA124_CLK_SOR0 182
-/* 183 */
-#define TEGRA124_CLK_GPU 184
-#define TEGRA124_CLK_AMX1 185
-/* 186 */
-/* 187 */
-/* 188 */
-/* 189 */
-/* 190 */
-/* 191 */
-#define TEGRA124_CLK_UARTB 192
-#define TEGRA124_CLK_VFIR 193
-#define TEGRA124_CLK_SPDIF_IN 194
-#define TEGRA124_CLK_SPDIF_OUT 195
-#define TEGRA124_CLK_VI 196
-#define TEGRA124_CLK_VI_SENSOR 197
-#define TEGRA124_CLK_FUSE 198
-#define TEGRA124_CLK_FUSE_BURN 199
-#define TEGRA124_CLK_CLK_32K 200
-#define TEGRA124_CLK_CLK_M 201
-#define TEGRA124_CLK_CLK_M_DIV2 202
-#define TEGRA124_CLK_CLK_M_DIV4 203
-#define TEGRA124_CLK_PLL_REF 204
-#define TEGRA124_CLK_PLL_C 205
-#define TEGRA124_CLK_PLL_C_OUT1 206
-#define TEGRA124_CLK_PLL_C2 207
-#define TEGRA124_CLK_PLL_C3 208
-#define TEGRA124_CLK_PLL_M 209
-#define TEGRA124_CLK_PLL_M_OUT1 210
-#define TEGRA124_CLK_PLL_P 211
-#define TEGRA124_CLK_PLL_P_OUT1 212
-#define TEGRA124_CLK_PLL_P_OUT2 213
-#define TEGRA124_CLK_PLL_P_OUT3 214
-#define TEGRA124_CLK_PLL_P_OUT4 215
-#define TEGRA124_CLK_PLL_A 216
-#define TEGRA124_CLK_PLL_A_OUT0 217
-#define TEGRA124_CLK_PLL_D 218
-#define TEGRA124_CLK_PLL_D_OUT0 219
-#define TEGRA124_CLK_PLL_D2 220
-#define TEGRA124_CLK_PLL_D2_OUT0 221
-#define TEGRA124_CLK_PLL_U 222
-#define TEGRA124_CLK_PLL_U_480M 223
-
-#define TEGRA124_CLK_PLL_U_60M 224
-#define TEGRA124_CLK_PLL_U_48M 225
-#define TEGRA124_CLK_PLL_U_12M 226
-#define TEGRA124_CLK_PLL_X 227
-#define TEGRA124_CLK_PLL_X_OUT0 228
-#define TEGRA124_CLK_PLL_RE_VCO 229
-#define TEGRA124_CLK_PLL_RE_OUT 230
-#define TEGRA124_CLK_PLL_E 231
-#define TEGRA124_CLK_SPDIF_IN_SYNC 232
-#define TEGRA124_CLK_I2S0_SYNC 233
-#define TEGRA124_CLK_I2S1_SYNC 234
-#define TEGRA124_CLK_I2S2_SYNC 235
-#define TEGRA124_CLK_I2S3_SYNC 236
-#define TEGRA124_CLK_I2S4_SYNC 237
-#define TEGRA124_CLK_VIMCLK_SYNC 238
-#define TEGRA124_CLK_AUDIO0 239
-#define TEGRA124_CLK_AUDIO1 240
-#define TEGRA124_CLK_AUDIO2 241
-#define TEGRA124_CLK_AUDIO3 242
-#define TEGRA124_CLK_AUDIO4 243
-#define TEGRA124_CLK_SPDIF 244
-#define TEGRA124_CLK_CLK_OUT_1 245
-#define TEGRA124_CLK_CLK_OUT_2 246
-#define TEGRA124_CLK_CLK_OUT_3 247
-#define TEGRA124_CLK_BLINK 248
-/* 249 */
-/* 250 */
-/* 251 */
-#define TEGRA124_CLK_XUSB_HOST_SRC 252
-#define TEGRA124_CLK_XUSB_FALCON_SRC 253
-#define TEGRA124_CLK_XUSB_FS_SRC 254
-#define TEGRA124_CLK_XUSB_SS_SRC 255
-
-#define TEGRA124_CLK_XUSB_DEV_SRC 256
-#define TEGRA124_CLK_XUSB_DEV 257
-#define TEGRA124_CLK_XUSB_HS_SRC 258
-#define TEGRA124_CLK_SCLK 259
-#define TEGRA124_CLK_HCLK 260
-#define TEGRA124_CLK_PCLK 261
-#define TEGRA124_CLK_CCLK_G 262
-#define TEGRA124_CLK_CCLK_LP 263
-#define TEGRA124_CLK_DFLL_REF 264
-#define TEGRA124_CLK_DFLL_SOC 265
-#define TEGRA124_CLK_VI_SENSOR2 266
-#define TEGRA124_CLK_PLL_P_OUT5 267
-#define TEGRA124_CLK_CML0 268
-#define TEGRA124_CLK_CML1 269
-#define TEGRA124_CLK_PLL_C4 270
-#define TEGRA124_CLK_PLL_DP 271
-#define TEGRA124_CLK_PLL_E_MUX 272
-/* 273 */
-/* 274 */
-/* 275 */
-/* 276 */
-/* 277 */
-/* 278 */
-/* 279 */
-/* 280 */
-/* 281 */
-/* 282 */
-/* 283 */
-/* 284 */
-/* 285 */
-/* 286 */
-/* 287 */
-
-/* 288 */
-/* 289 */
-/* 290 */
-/* 291 */
-/* 292 */
-/* 293 */
-/* 294 */
-/* 295 */
-/* 296 */
-/* 297 */
-/* 298 */
-/* 299 */
-#define TEGRA124_CLK_AUDIO0_MUX 300
-#define TEGRA124_CLK_AUDIO1_MUX 301
-#define TEGRA124_CLK_AUDIO2_MUX 302
-#define TEGRA124_CLK_AUDIO3_MUX 303
-#define TEGRA124_CLK_AUDIO4_MUX 304
-#define TEGRA124_CLK_SPDIF_MUX 305
-#define TEGRA124_CLK_CLK_OUT_1_MUX 306
-#define TEGRA124_CLK_CLK_OUT_2_MUX 307
-#define TEGRA124_CLK_CLK_OUT_3_MUX 308
-#define TEGRA124_CLK_DSIA_MUX 309
-#define TEGRA124_CLK_DSIB_MUX 310
-#define TEGRA124_CLK_SOR0_LVDS 311
-#define TEGRA124_CLK_XUSB_SS_DIV2 312
+#define TEGRA124_CLK_PLL_X 227
+#define TEGRA124_CLK_PLL_X_OUT0 228
-#define TEGRA124_CLK_PLL_M_UD 313
-#define TEGRA124_CLK_PLL_C_UD 314
+#define TEGRA124_CLK_CCLK_G 262
+#define TEGRA124_CLK_CCLK_LP 263
-#define TEGRA124_CLK_CLK_MAX 315
+#define TEGRA124_CLK_CLK_MAX 315
#endif /* _DT_BINDINGS_CLOCK_TEGRA124_CAR_H */
diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h
index 801c0ac..d197634 100644
--- a/include/dt-bindings/clock/vf610-clock.h
+++ b/include/dt-bindings/clock/vf610-clock.h
@@ -192,6 +192,8 @@
#define VF610_PLL5_BYPASS 179
#define VF610_PLL6_BYPASS 180
#define VF610_PLL7_BYPASS 181
-#define VF610_CLK_END 182
+#define VF610_CLK_SNVS 182
+#define VF610_CLK_DAP 183
+#define VF610_CLK_END 184
#endif /* __DT_BINDINGS_CLOCK_VF610_H */
diff --git a/include/dt-bindings/clock/zx296702-clock.h b/include/dt-bindings/clock/zx296702-clock.h
new file mode 100644
index 0000000..e683dbb
--- /dev/null
+++ b/include/dt-bindings/clock/zx296702-clock.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2014 Linaro Ltd.
+ * Copyright (C) 2014 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_ZX296702_H
+#define __DT_BINDINGS_CLOCK_ZX296702_H
+
+#define ZX296702_OSC 0
+#define ZX296702_PLL_A9 1
+#define ZX296702_PLL_A9_350M 2
+#define ZX296702_PLL_MAC_1000M 3
+#define ZX296702_PLL_MAC_333M 4
+#define ZX296702_PLL_MM0_1188M 5
+#define ZX296702_PLL_MM0_396M 6
+#define ZX296702_PLL_MM0_198M 7
+#define ZX296702_PLL_MM1_108M 8
+#define ZX296702_PLL_MM1_72M 9
+#define ZX296702_PLL_MM1_54M 10
+#define ZX296702_PLL_LSP_104M 11
+#define ZX296702_PLL_LSP_26M 12
+#define ZX296702_PLL_AUDIO_294M912 13
+#define ZX296702_PLL_DDR_266M 14
+#define ZX296702_CLK_148M5 15
+#define ZX296702_MATRIX_ACLK 16
+#define ZX296702_MAIN_HCLK 17
+#define ZX296702_MAIN_PCLK 18
+#define ZX296702_CLK_500 19
+#define ZX296702_CLK_250 20
+#define ZX296702_CLK_125 21
+#define ZX296702_CLK_74M25 22
+#define ZX296702_A9_WCLK 23
+#define ZX296702_A9_AS1_ACLK_MUX 24
+#define ZX296702_A9_TRACE_CLKIN_MUX 25
+#define ZX296702_A9_AS1_ACLK_DIV 26
+#define ZX296702_CLK_2 27
+#define ZX296702_CLK_27 28
+#define ZX296702_DECPPU_ACLK_MUX 29
+#define ZX296702_PPU_ACLK_MUX 30
+#define ZX296702_MALI400_ACLK_MUX 31
+#define ZX296702_VOU_ACLK_MUX 32
+#define ZX296702_VOU_MAIN_WCLK_MUX 33
+#define ZX296702_VOU_AUX_WCLK_MUX 34
+#define ZX296702_VOU_SCALER_WCLK_MUX 35
+#define ZX296702_R2D_ACLK_MUX 36
+#define ZX296702_R2D_WCLK_MUX 37
+#define ZX296702_CLK_50 38
+#define ZX296702_CLK_25 39
+#define ZX296702_CLK_12 40
+#define ZX296702_CLK_16M384 41
+#define ZX296702_CLK_32K768 42
+#define ZX296702_SEC_WCLK_DIV 43
+#define ZX296702_DDR_WCLK_MUX 44
+#define ZX296702_NAND_WCLK_MUX 45
+#define ZX296702_LSP_26_WCLK_MUX 46
+#define ZX296702_A9_AS0_ACLK 47
+#define ZX296702_A9_AS1_ACLK 48
+#define ZX296702_A9_TRACE_CLKIN 49
+#define ZX296702_DECPPU_AXI_M_ACLK 50
+#define ZX296702_DECPPU_AHB_S_HCLK 51
+#define ZX296702_PPU_AXI_M_ACLK 52
+#define ZX296702_PPU_AHB_S_HCLK 53
+#define ZX296702_VOU_AXI_M_ACLK 54
+#define ZX296702_VOU_APB_PCLK 55
+#define ZX296702_VOU_MAIN_CHANNEL_WCLK 56
+#define ZX296702_VOU_AUX_CHANNEL_WCLK 57
+#define ZX296702_VOU_HDMI_OSCLK_CEC 58
+#define ZX296702_VOU_SCALER_WCLK 59
+#define ZX296702_MALI400_AXI_M_ACLK 60
+#define ZX296702_MALI400_APB_PCLK 61
+#define ZX296702_R2D_WCLK 62
+#define ZX296702_R2D_AXI_M_ACLK 63
+#define ZX296702_R2D_AHB_HCLK 64
+#define ZX296702_DDR3_AXI_S0_ACLK 65
+#define ZX296702_DDR3_APB_PCLK 66
+#define ZX296702_DDR3_WCLK 67
+#define ZX296702_USB20_0_AHB_HCLK 68
+#define ZX296702_USB20_0_EXTREFCLK 69
+#define ZX296702_USB20_1_AHB_HCLK 70
+#define ZX296702_USB20_1_EXTREFCLK 71
+#define ZX296702_USB20_2_AHB_HCLK 72
+#define ZX296702_USB20_2_EXTREFCLK 73
+#define ZX296702_GMAC_AXI_M_ACLK 74
+#define ZX296702_GMAC_APB_PCLK 75
+#define ZX296702_GMAC_125_CLKIN 76
+#define ZX296702_GMAC_RMII_CLKIN 77
+#define ZX296702_GMAC_25M_CLK 78
+#define ZX296702_NANDFLASH_AHB_HCLK 79
+#define ZX296702_NANDFLASH_WCLK 80
+#define ZX296702_LSP0_APB_PCLK 81
+#define ZX296702_LSP0_AHB_HCLK 82
+#define ZX296702_LSP0_26M_WCLK 83
+#define ZX296702_LSP0_104M_WCLK 84
+#define ZX296702_LSP0_16M384_WCLK 85
+#define ZX296702_LSP1_APB_PCLK 86
+#define ZX296702_LSP1_26M_WCLK 87
+#define ZX296702_LSP1_104M_WCLK 88
+#define ZX296702_LSP1_32K_CLK 89
+#define ZX296702_AON_HCLK 90
+#define ZX296702_SYS_CTRL_PCLK 91
+#define ZX296702_DMA_PCLK 92
+#define ZX296702_DMA_ACLK 93
+#define ZX296702_SEC_HCLK 94
+#define ZX296702_AES_WCLK 95
+#define ZX296702_DES_WCLK 96
+#define ZX296702_IRAM_ACLK 97
+#define ZX296702_IROM_ACLK 98
+#define ZX296702_BOOT_CTRL_HCLK 99
+#define ZX296702_EFUSE_CLK_30 100
+#define ZX296702_VOU_MAIN_CHANNEL_DIV 101
+#define ZX296702_VOU_AUX_CHANNEL_DIV 102
+#define ZX296702_VOU_TV_ENC_HD_DIV 103
+#define ZX296702_VOU_TV_ENC_SD_DIV 104
+#define ZX296702_VL0_MUX 105
+#define ZX296702_VL1_MUX 106
+#define ZX296702_VL2_MUX 107
+#define ZX296702_GL0_MUX 108
+#define ZX296702_GL1_MUX 109
+#define ZX296702_GL2_MUX 110
+#define ZX296702_WB_MUX 111
+#define ZX296702_HDMI_MUX 112
+#define ZX296702_VOU_TV_ENC_HD_MUX 113
+#define ZX296702_VOU_TV_ENC_SD_MUX 114
+#define ZX296702_VL0_CLK 115
+#define ZX296702_VL1_CLK 116
+#define ZX296702_VL2_CLK 117
+#define ZX296702_GL0_CLK 118
+#define ZX296702_GL1_CLK 119
+#define ZX296702_GL2_CLK 120
+#define ZX296702_WB_CLK 121
+#define ZX296702_CL_CLK 122
+#define ZX296702_MAIN_MIX_CLK 123
+#define ZX296702_AUX_MIX_CLK 124
+#define ZX296702_HDMI_CLK 125
+#define ZX296702_VOU_TV_ENC_HD_DAC_CLK 126
+#define ZX296702_VOU_TV_ENC_SD_DAC_CLK 127
+#define ZX296702_A9_PERIPHCLK 128
+#define ZX296702_TOPCLK_END 129
+
+#define ZX296702_SDMMC1_WCLK_MUX 0
+#define ZX296702_SDMMC1_WCLK_DIV 1
+#define ZX296702_SDMMC1_WCLK 2
+#define ZX296702_SDMMC1_PCLK 3
+#define ZX296702_SPDIF0_WCLK_MUX 4
+#define ZX296702_SPDIF0_WCLK 5
+#define ZX296702_SPDIF0_PCLK 6
+#define ZX296702_SPDIF0_DIV 7
+#define ZX296702_I2S0_WCLK_MUX 8
+#define ZX296702_I2S0_WCLK 9
+#define ZX296702_I2S0_PCLK 10
+#define ZX296702_I2S0_DIV 11
+#define ZX296702_LSP0CLK_END 12
+
+#define ZX296702_UART0_WCLK_MUX 0
+#define ZX296702_UART0_WCLK 1
+#define ZX296702_UART0_PCLK 2
+#define ZX296702_UART1_WCLK_MUX 3
+#define ZX296702_UART1_WCLK 4
+#define ZX296702_UART1_PCLK 5
+#define ZX296702_SDMMC0_WCLK_MUX 6
+#define ZX296702_SDMMC0_WCLK_DIV 7
+#define ZX296702_SDMMC0_WCLK 8
+#define ZX296702_SDMMC0_PCLK 9
+#define ZX296702_LSP1CLK_END 10
+
+#endif /* __DT_BINDINGS_CLOCK_ZX296702_H */
diff --git a/include/dt-bindings/dma/jz4780-dma.h b/include/dt-bindings/dma/jz4780-dma.h
new file mode 100644
index 0000000..df017fd
--- /dev/null
+++ b/include/dt-bindings/dma/jz4780-dma.h
@@ -0,0 +1,49 @@
+#ifndef __DT_BINDINGS_DMA_JZ4780_DMA_H__
+#define __DT_BINDINGS_DMA_JZ4780_DMA_H__
+
+/*
+ * Request type numbers for the JZ4780 DMA controller (written to the DRTn
+ * register for the channel).
+ */
+#define JZ4780_DMA_I2S1_TX 0x4
+#define JZ4780_DMA_I2S1_RX 0x5
+#define JZ4780_DMA_I2S0_TX 0x6
+#define JZ4780_DMA_I2S0_RX 0x7
+#define JZ4780_DMA_AUTO 0x8
+#define JZ4780_DMA_SADC_RX 0x9
+#define JZ4780_DMA_UART4_TX 0xc
+#define JZ4780_DMA_UART4_RX 0xd
+#define JZ4780_DMA_UART3_TX 0xe
+#define JZ4780_DMA_UART3_RX 0xf
+#define JZ4780_DMA_UART2_TX 0x10
+#define JZ4780_DMA_UART2_RX 0x11
+#define JZ4780_DMA_UART1_TX 0x12
+#define JZ4780_DMA_UART1_RX 0x13
+#define JZ4780_DMA_UART0_TX 0x14
+#define JZ4780_DMA_UART0_RX 0x15
+#define JZ4780_DMA_SSI0_TX 0x16
+#define JZ4780_DMA_SSI0_RX 0x17
+#define JZ4780_DMA_SSI1_TX 0x18
+#define JZ4780_DMA_SSI1_RX 0x19
+#define JZ4780_DMA_MSC0_TX 0x1a
+#define JZ4780_DMA_MSC0_RX 0x1b
+#define JZ4780_DMA_MSC1_TX 0x1c
+#define JZ4780_DMA_MSC1_RX 0x1d
+#define JZ4780_DMA_MSC2_TX 0x1e
+#define JZ4780_DMA_MSC2_RX 0x1f
+#define JZ4780_DMA_PCM0_TX 0x20
+#define JZ4780_DMA_PCM0_RX 0x21
+#define JZ4780_DMA_SMB0_TX 0x24
+#define JZ4780_DMA_SMB0_RX 0x25
+#define JZ4780_DMA_SMB1_TX 0x26
+#define JZ4780_DMA_SMB1_RX 0x27
+#define JZ4780_DMA_SMB2_TX 0x28
+#define JZ4780_DMA_SMB2_RX 0x29
+#define JZ4780_DMA_SMB3_TX 0x2a
+#define JZ4780_DMA_SMB3_RX 0x2b
+#define JZ4780_DMA_SMB4_TX 0x2c
+#define JZ4780_DMA_SMB4_RX 0x2d
+#define JZ4780_DMA_DES_TX 0x2e
+#define JZ4780_DMA_DES_RX 0x2f
+
+#endif /* __DT_BINDINGS_DMA_JZ4780_DMA_H__ */
diff --git a/include/dt-bindings/dma/sun4i-a10.h b/include/dt-bindings/dma/sun4i-a10.h
new file mode 100644
index 0000000..8caba9e
--- /dev/null
+++ b/include/dt-bindings/dma/sun4i-a10.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2014 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this file; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __DT_BINDINGS_DMA_SUN4I_A10_H_
+#define __DT_BINDINGS_DMA_SUN4I_A10_H_
+
+#define SUN4I_DMA_NORMAL 0
+#define SUN4I_DMA_DEDICATED 1
+
+#endif /* __DT_BINDINGS_DMA_SUN4I_A10_H_ */
diff --git a/include/dt-bindings/gpio/meson8-gpio.h b/include/dt-bindings/gpio/meson8-gpio.h
new file mode 100644
index 0000000..fdaeb5c
--- /dev/null
+++ b/include/dt-bindings/gpio/meson8-gpio.h
@@ -0,0 +1,157 @@
+/*
+ * GPIO definitions for Amlogic Meson8 SoCs
+ *
+ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DT_BINDINGS_MESON8_GPIO_H
+#define _DT_BINDINGS_MESON8_GPIO_H
+
+/* First GPIO chip */
+#define GPIOX_0 0
+#define GPIOX_1 1
+#define GPIOX_2 2
+#define GPIOX_3 3
+#define GPIOX_4 4
+#define GPIOX_5 5
+#define GPIOX_6 6
+#define GPIOX_7 7
+#define GPIOX_8 8
+#define GPIOX_9 9
+#define GPIOX_10 10
+#define GPIOX_11 11
+#define GPIOX_12 12
+#define GPIOX_13 13
+#define GPIOX_14 14
+#define GPIOX_15 15
+#define GPIOX_16 16
+#define GPIOX_17 17
+#define GPIOX_18 18
+#define GPIOX_19 19
+#define GPIOX_20 20
+#define GPIOX_21 21
+#define GPIOY_0 22
+#define GPIOY_1 23
+#define GPIOY_2 24
+#define GPIOY_3 25
+#define GPIOY_4 26
+#define GPIOY_5 27
+#define GPIOY_6 28
+#define GPIOY_7 29
+#define GPIOY_8 30
+#define GPIOY_9 31
+#define GPIOY_10 32
+#define GPIOY_11 33
+#define GPIOY_12 34
+#define GPIOY_13 35
+#define GPIOY_14 36
+#define GPIOY_15 37
+#define GPIOY_16 38
+#define GPIODV_0 39
+#define GPIODV_1 40
+#define GPIODV_2 41
+#define GPIODV_3 42
+#define GPIODV_4 43
+#define GPIODV_5 44
+#define GPIODV_6 45
+#define GPIODV_7 46
+#define GPIODV_8 47
+#define GPIODV_9 48
+#define GPIODV_10 49
+#define GPIODV_11 50
+#define GPIODV_12 51
+#define GPIODV_13 52
+#define GPIODV_14 53
+#define GPIODV_15 54
+#define GPIODV_16 55
+#define GPIODV_17 56
+#define GPIODV_18 57
+#define GPIODV_19 58
+#define GPIODV_20 59
+#define GPIODV_21 60
+#define GPIODV_22 61
+#define GPIODV_23 62
+#define GPIODV_24 63
+#define GPIODV_25 64
+#define GPIODV_26 65
+#define GPIODV_27 66
+#define GPIODV_28 67
+#define GPIODV_29 68
+#define GPIOH_0 69
+#define GPIOH_1 70
+#define GPIOH_2 71
+#define GPIOH_3 72
+#define GPIOH_4 73
+#define GPIOH_5 74
+#define GPIOH_6 75
+#define GPIOH_7 76
+#define GPIOH_8 77
+#define GPIOH_9 78
+#define GPIOZ_0 79
+#define GPIOZ_1 80
+#define GPIOZ_2 81
+#define GPIOZ_3 82
+#define GPIOZ_4 83
+#define GPIOZ_5 84
+#define GPIOZ_6 85
+#define GPIOZ_7 86
+#define GPIOZ_8 87
+#define GPIOZ_9 88
+#define GPIOZ_10 89
+#define GPIOZ_11 90
+#define GPIOZ_12 91
+#define GPIOZ_13 92
+#define GPIOZ_14 93
+#define CARD_0 94
+#define CARD_1 95
+#define CARD_2 96
+#define CARD_3 97
+#define CARD_4 98
+#define CARD_5 99
+#define CARD_6 100
+#define BOOT_0 101
+#define BOOT_1 102
+#define BOOT_2 103
+#define BOOT_3 104
+#define BOOT_4 105
+#define BOOT_5 106
+#define BOOT_6 107
+#define BOOT_7 108
+#define BOOT_8 109
+#define BOOT_9 110
+#define BOOT_10 111
+#define BOOT_11 112
+#define BOOT_12 113
+#define BOOT_13 114
+#define BOOT_14 115
+#define BOOT_15 116
+#define BOOT_16 117
+#define BOOT_17 118
+#define BOOT_18 119
+
+/* Second GPIO chip */
+#define GPIOAO_0 0
+#define GPIOAO_1 1
+#define GPIOAO_2 2
+#define GPIOAO_3 3
+#define GPIOAO_4 4
+#define GPIOAO_5 5
+#define GPIOAO_6 6
+#define GPIOAO_7 7
+#define GPIOAO_8 8
+#define GPIOAO_9 9
+#define GPIOAO_10 10
+#define GPIOAO_11 11
+#define GPIOAO_12 12
+#define GPIOAO_13 13
+#define GPIO_BSD_EN 14
+#define GPIO_TEST_N 15
+
+#endif /* _DT_BINDINGS_MESON8_GPIO_H */
diff --git a/include/dt-bindings/gpio/meson8b-gpio.h b/include/dt-bindings/gpio/meson8b-gpio.h
new file mode 100644
index 0000000..c38cb20
--- /dev/null
+++ b/include/dt-bindings/gpio/meson8b-gpio.h
@@ -0,0 +1,32 @@
+/*
+ * GPIO definitions for Amlogic Meson8b SoCs
+ *
+ * Copyright (C) 2015 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DT_BINDINGS_MESON8B_GPIO_H
+#define _DT_BINDINGS_MESON8B_GPIO_H
+
+#include <dt-bindings/gpio/meson8-gpio.h>
+
+/* GPIO Bank DIF */
+#define DIF_0_P 120
+#define DIF_0_N 121
+#define DIF_1_P 122
+#define DIF_1_N 123
+#define DIF_2_P 124
+#define DIF_2_N 125
+#define DIF_3_P 126
+#define DIF_3_N 127
+#define DIF_4_P 128
+#define DIF_4_N 129
+
+#endif /* _DT_BINDINGS_MESON8B_GPIO_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-vadc.h b/include/dt-bindings/iio/qcom,spmi-vadc.h
new file mode 100644
index 0000000..42121fa
--- /dev/null
+++ b/include/dt-bindings/iio/qcom,spmi-vadc.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_H
+#define _DT_BINDINGS_QCOM_SPMI_VADC_H
+
+/* Voltage ADC channels */
+#define VADC_USBIN 0x00
+#define VADC_DCIN 0x01
+#define VADC_VCHG_SNS 0x02
+#define VADC_SPARE1_03 0x03
+#define VADC_USB_ID_MV 0x04
+#define VADC_VCOIN 0x05
+#define VADC_VBAT_SNS 0x06
+#define VADC_VSYS 0x07
+#define VADC_DIE_TEMP 0x08
+#define VADC_REF_625MV 0x09
+#define VADC_REF_1250MV 0x0a
+#define VADC_CHG_TEMP 0x0b
+#define VADC_SPARE1 0x0c
+#define VADC_SPARE2 0x0d
+#define VADC_GND_REF 0x0e
+#define VADC_VDD_VADC 0x0f
+
+#define VADC_P_MUX1_1_1 0x10
+#define VADC_P_MUX2_1_1 0x11
+#define VADC_P_MUX3_1_1 0x12
+#define VADC_P_MUX4_1_1 0x13
+#define VADC_P_MUX5_1_1 0x14
+#define VADC_P_MUX6_1_1 0x15
+#define VADC_P_MUX7_1_1 0x16
+#define VADC_P_MUX8_1_1 0x17
+#define VADC_P_MUX9_1_1 0x18
+#define VADC_P_MUX10_1_1 0x19
+#define VADC_P_MUX11_1_1 0x1a
+#define VADC_P_MUX12_1_1 0x1b
+#define VADC_P_MUX13_1_1 0x1c
+#define VADC_P_MUX14_1_1 0x1d
+#define VADC_P_MUX15_1_1 0x1e
+#define VADC_P_MUX16_1_1 0x1f
+
+#define VADC_P_MUX1_1_3 0x20
+#define VADC_P_MUX2_1_3 0x21
+#define VADC_P_MUX3_1_3 0x22
+#define VADC_P_MUX4_1_3 0x23
+#define VADC_P_MUX5_1_3 0x24
+#define VADC_P_MUX6_1_3 0x25
+#define VADC_P_MUX7_1_3 0x26
+#define VADC_P_MUX8_1_3 0x27
+#define VADC_P_MUX9_1_3 0x28
+#define VADC_P_MUX10_1_3 0x29
+#define VADC_P_MUX11_1_3 0x2a
+#define VADC_P_MUX12_1_3 0x2b
+#define VADC_P_MUX13_1_3 0x2c
+#define VADC_P_MUX14_1_3 0x2d
+#define VADC_P_MUX15_1_3 0x2e
+#define VADC_P_MUX16_1_3 0x2f
+
+#define VADC_LR_MUX1_BAT_THERM 0x30
+#define VADC_LR_MUX2_BAT_ID 0x31
+#define VADC_LR_MUX3_XO_THERM 0x32
+#define VADC_LR_MUX4_AMUX_THM1 0x33
+#define VADC_LR_MUX5_AMUX_THM2 0x34
+#define VADC_LR_MUX6_AMUX_THM3 0x35
+#define VADC_LR_MUX7_HW_ID 0x36
+#define VADC_LR_MUX8_AMUX_THM4 0x37
+#define VADC_LR_MUX9_AMUX_THM5 0x38
+#define VADC_LR_MUX10_USB_ID 0x39
+#define VADC_AMUX_PU1 0x3a
+#define VADC_AMUX_PU2 0x3b
+#define VADC_LR_MUX3_BUF_XO_THERM 0x3c
+
+#define VADC_LR_MUX1_PU1_BAT_THERM 0x70
+#define VADC_LR_MUX2_PU1_BAT_ID 0x71
+#define VADC_LR_MUX3_PU1_XO_THERM 0x72
+#define VADC_LR_MUX4_PU1_AMUX_THM1 0x73
+#define VADC_LR_MUX5_PU1_AMUX_THM2 0x74
+#define VADC_LR_MUX6_PU1_AMUX_THM3 0x75
+#define VADC_LR_MUX7_PU1_AMUX_HW_ID 0x76
+#define VADC_LR_MUX8_PU1_AMUX_THM4 0x77
+#define VADC_LR_MUX9_PU1_AMUX_THM5 0x78
+#define VADC_LR_MUX10_PU1_AMUX_USB_ID 0x79
+#define VADC_LR_MUX3_BUF_PU1_XO_THERM 0x7c
+
+#define VADC_LR_MUX1_PU2_BAT_THERM 0xb0
+#define VADC_LR_MUX2_PU2_BAT_ID 0xb1
+#define VADC_LR_MUX3_PU2_XO_THERM 0xb2
+#define VADC_LR_MUX4_PU2_AMUX_THM1 0xb3
+#define VADC_LR_MUX5_PU2_AMUX_THM2 0xb4
+#define VADC_LR_MUX6_PU2_AMUX_THM3 0xb5
+#define VADC_LR_MUX7_PU2_AMUX_HW_ID 0xb6
+#define VADC_LR_MUX8_PU2_AMUX_THM4 0xb7
+#define VADC_LR_MUX9_PU2_AMUX_THM5 0xb8
+#define VADC_LR_MUX10_PU2_AMUX_USB_ID 0xb9
+#define VADC_LR_MUX3_BUF_PU2_XO_THERM 0xbc
+
+#define VADC_LR_MUX1_PU1_PU2_BAT_THERM 0xf0
+#define VADC_LR_MUX2_PU1_PU2_BAT_ID 0xf1
+#define VADC_LR_MUX3_PU1_PU2_XO_THERM 0xf2
+#define VADC_LR_MUX4_PU1_PU2_AMUX_THM1 0xf3
+#define VADC_LR_MUX5_PU1_PU2_AMUX_THM2 0xf4
+#define VADC_LR_MUX6_PU1_PU2_AMUX_THM3 0xf5
+#define VADC_LR_MUX7_PU1_PU2_AMUX_HW_ID 0xf6
+#define VADC_LR_MUX8_PU1_PU2_AMUX_THM4 0xf7
+#define VADC_LR_MUX9_PU1_PU2_AMUX_THM5 0xf8
+#define VADC_LR_MUX10_PU1_PU2_AMUX_USB_ID 0xf9
+#define VADC_LR_MUX3_BUF_PU1_PU2_XO_THERM 0xfc
+
+#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_H */
diff --git a/include/dt-bindings/interrupt-controller/irq-st.h b/include/dt-bindings/interrupt-controller/irq-st.h
new file mode 100644
index 0000000..4c59ace
--- /dev/null
+++ b/include/dt-bindings/interrupt-controller/irq-st.h
@@ -0,0 +1,30 @@
+/*
+ * include/linux/irqchip/irq-st.h
+ *
+ * Copyright (C) 2014 STMicroelectronics – All Rights Reserved
+ *
+ * Author: Lee Jones <lee.jones@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_ST_H
+#define _DT_BINDINGS_INTERRUPT_CONTROLLER_ST_H
+
+#define ST_IRQ_SYSCFG_EXT_0 0
+#define ST_IRQ_SYSCFG_EXT_1 1
+#define ST_IRQ_SYSCFG_EXT_2 2
+#define ST_IRQ_SYSCFG_CTI_0 3
+#define ST_IRQ_SYSCFG_CTI_1 4
+#define ST_IRQ_SYSCFG_PMU_0 5
+#define ST_IRQ_SYSCFG_PMU_1 6
+#define ST_IRQ_SYSCFG_pl310_L2 7
+#define ST_IRQ_SYSCFG_DISABLED 0xFFFFFFFF
+
+#define ST_IRQ_SYSCFG_EXT_1_INV 0x1
+#define ST_IRQ_SYSCFG_EXT_2_INV 0x2
+#define ST_IRQ_SYSCFG_EXT_3_INV 0x4
+
+#endif
diff --git a/include/dt-bindings/leds/common.h b/include/dt-bindings/leds/common.h
new file mode 100644
index 0000000..79fcef7
--- /dev/null
+++ b/include/dt-bindings/leds/common.h
@@ -0,0 +1,21 @@
+/*
+ * This header provides macros for the common LEDs device tree bindings.
+ *
+ * Copyright (C) 2015, Samsung Electronics Co., Ltd.
+ *
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ */
+
+#ifndef __DT_BINDINGS_LEDS_H__
+#define __DT_BINDINGS_LEDS_H
+
+/* External trigger type */
+#define LEDS_TRIG_TYPE_EDGE 0
+#define LEDS_TRIG_TYPE_LEVEL 1
+
+/* Boost modes */
+#define LEDS_BOOST_OFF 0
+#define LEDS_BOOST_ADAPTIVE 1
+#define LEDS_BOOST_FIXED 2
+
+#endif /* __DT_BINDINGS_LEDS_H */
diff --git a/include/dt-bindings/media/omap3-isp.h b/include/dt-bindings/media/omap3-isp.h
new file mode 100644
index 0000000..b18c60e
--- /dev/null
+++ b/include/dt-bindings/media/omap3-isp.h
@@ -0,0 +1,22 @@
+/*
+ * include/dt-bindings/media/omap3-isp.h
+ *
+ * Copyright (C) 2015 Sakari Ailus
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __DT_BINDINGS_OMAP3_ISP_H__
+#define __DT_BINDINGS_OMAP3_ISP_H__
+
+#define OMAP3ISP_PHY_TYPE_COMPLEX_IO 0
+#define OMAP3ISP_PHY_TYPE_CSIPHY 1
+
+#endif /* __DT_BINDINGS_OMAP3_ISP_H__ */
diff --git a/include/dt-bindings/media/xilinx-vip.h b/include/dt-bindings/media/xilinx-vip.h
new file mode 100644
index 0000000..6298fec
--- /dev/null
+++ b/include/dt-bindings/media/xilinx-vip.h
@@ -0,0 +1,39 @@
+/*
+ * Xilinx Video IP Core
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DT_BINDINGS_MEDIA_XILINX_VIP_H__
+#define __DT_BINDINGS_MEDIA_XILINX_VIP_H__
+
+/*
+ * Video format codes as defined in "AXI4-Stream Video IP and System Design
+ * Guide".
+ */
+#define XVIP_VF_YUV_422 0
+#define XVIP_VF_YUV_444 1
+#define XVIP_VF_RBG 2
+#define XVIP_VF_YUV_420 3
+#define XVIP_VF_YUVA_422 4
+#define XVIP_VF_YUVA_444 5
+#define XVIP_VF_RGBA 6
+#define XVIP_VF_YUVA_420 7
+#define XVIP_VF_YUVD_422 8
+#define XVIP_VF_YUVD_444 9
+#define XVIP_VF_RGBD 10
+#define XVIP_VF_YUVD_420 11
+#define XVIP_VF_MONO_SENSOR 12
+#define XVIP_VF_CUSTOM2 13
+#define XVIP_VF_CUSTOM3 14
+#define XVIP_VF_CUSTOM4 15
+
+#endif /* __DT_BINDINGS_MEDIA_XILINX_VIP_H__ */
diff --git a/include/linux/mfd/arizona/gpio.h b/include/dt-bindings/mfd/arizona.h
index d2146bb..7b2000c 100644
--- a/include/linux/mfd/arizona/gpio.h
+++ b/include/dt-bindings/mfd/arizona.h
@@ -1,7 +1,7 @@
/*
- * GPIO configuration for Arizona devices
+ * Device Tree defines for Arizona devices
*
- * Copyright 2013 Wolfson Microelectronics. PLC.
+ * Copyright 2015 Cirrus Logic Inc.
*
* Author: Charles Keepax <ckeepax@opensource.wolfsonmicro.com>
*
@@ -10,9 +10,10 @@
* published by the Free Software Foundation.
*/
-#ifndef _ARIZONA_GPIO_H
-#define _ARIZONA_GPIO_H
+#ifndef _DT_BINDINGS_MFD_ARIZONA_H
+#define _DT_BINDINGS_MFD_ARIZONA_H
+/* GPIO Function Definitions */
#define ARIZONA_GP_FN_TXLRCLK 0x00
#define ARIZONA_GP_FN_GPIO 0x01
#define ARIZONA_GP_FN_IRQ1 0x02
@@ -61,36 +62,50 @@
#define ARIZONA_GP_FN_SYSCLK_ENA_STATUS 0x4B
#define ARIZONA_GP_FN_ASYNCCLK_ENA_STATUS 0x4C
-#define ARIZONA_GPN_DIR 0x8000 /* GPN_DIR */
-#define ARIZONA_GPN_DIR_MASK 0x8000 /* GPN_DIR */
-#define ARIZONA_GPN_DIR_SHIFT 15 /* GPN_DIR */
-#define ARIZONA_GPN_DIR_WIDTH 1 /* GPN_DIR */
-#define ARIZONA_GPN_PU 0x4000 /* GPN_PU */
-#define ARIZONA_GPN_PU_MASK 0x4000 /* GPN_PU */
-#define ARIZONA_GPN_PU_SHIFT 14 /* GPN_PU */
-#define ARIZONA_GPN_PU_WIDTH 1 /* GPN_PU */
-#define ARIZONA_GPN_PD 0x2000 /* GPN_PD */
-#define ARIZONA_GPN_PD_MASK 0x2000 /* GPN_PD */
-#define ARIZONA_GPN_PD_SHIFT 13 /* GPN_PD */
-#define ARIZONA_GPN_PD_WIDTH 1 /* GPN_PD */
-#define ARIZONA_GPN_LVL 0x0800 /* GPN_LVL */
-#define ARIZONA_GPN_LVL_MASK 0x0800 /* GPN_LVL */
-#define ARIZONA_GPN_LVL_SHIFT 11 /* GPN_LVL */
-#define ARIZONA_GPN_LVL_WIDTH 1 /* GPN_LVL */
-#define ARIZONA_GPN_POL 0x0400 /* GPN_POL */
-#define ARIZONA_GPN_POL_MASK 0x0400 /* GPN_POL */
-#define ARIZONA_GPN_POL_SHIFT 10 /* GPN_POL */
-#define ARIZONA_GPN_POL_WIDTH 1 /* GPN_POL */
-#define ARIZONA_GPN_OP_CFG 0x0200 /* GPN_OP_CFG */
-#define ARIZONA_GPN_OP_CFG_MASK 0x0200 /* GPN_OP_CFG */
-#define ARIZONA_GPN_OP_CFG_SHIFT 9 /* GPN_OP_CFG */
-#define ARIZONA_GPN_OP_CFG_WIDTH 1 /* GPN_OP_CFG */
-#define ARIZONA_GPN_DB 0x0100 /* GPN_DB */
-#define ARIZONA_GPN_DB_MASK 0x0100 /* GPN_DB */
-#define ARIZONA_GPN_DB_SHIFT 8 /* GPN_DB */
-#define ARIZONA_GPN_DB_WIDTH 1 /* GPN_DB */
-#define ARIZONA_GPN_FN_MASK 0x007F /* GPN_DB */
-#define ARIZONA_GPN_FN_SHIFT 0 /* GPN_DB */
-#define ARIZONA_GPN_FN_WIDTH 7 /* GPN_DB */
+/* GPIO Configuration Bits */
+#define ARIZONA_GPN_DIR 0x8000
+#define ARIZONA_GPN_PU 0x4000
+#define ARIZONA_GPN_PD 0x2000
+#define ARIZONA_GPN_LVL 0x0800
+#define ARIZONA_GPN_POL 0x0400
+#define ARIZONA_GPN_OP_CFG 0x0200
+#define ARIZONA_GPN_DB 0x0100
+
+/* Provide some defines for the most common configs */
+#define ARIZONA_GP_DEFAULT 0xffffffff
+#define ARIZONA_GP_OUTPUT (ARIZONA_GP_FN_GPIO)
+#define ARIZONA_GP_INPUT (ARIZONA_GP_FN_GPIO | \
+ ARIZONA_GPN_DIR)
+
+#define ARIZONA_32KZ_MCLK1 1
+#define ARIZONA_32KZ_MCLK2 2
+#define ARIZONA_32KZ_NONE 3
+
+#define ARIZONA_DMIC_MICVDD 0
+#define ARIZONA_DMIC_MICBIAS1 1
+#define ARIZONA_DMIC_MICBIAS2 2
+#define ARIZONA_DMIC_MICBIAS3 3
+
+#define ARIZONA_INMODE_DIFF 0
+#define ARIZONA_INMODE_SE 1
+#define ARIZONA_INMODE_DMIC 2
+
+#define ARIZONA_MICD_TIME_CONTINUOUS 0
+#define ARIZONA_MICD_TIME_250US 1
+#define ARIZONA_MICD_TIME_500US 2
+#define ARIZONA_MICD_TIME_1MS 3
+#define ARIZONA_MICD_TIME_2MS 4
+#define ARIZONA_MICD_TIME_4MS 5
+#define ARIZONA_MICD_TIME_8MS 6
+#define ARIZONA_MICD_TIME_16MS 7
+#define ARIZONA_MICD_TIME_32MS 8
+#define ARIZONA_MICD_TIME_64MS 9
+#define ARIZONA_MICD_TIME_128MS 10
+#define ARIZONA_MICD_TIME_256MS 11
+#define ARIZONA_MICD_TIME_512MS 12
+
+#define ARIZONA_ACCDET_MODE_MIC 0
+#define ARIZONA_ACCDET_MODE_HPL 1
+#define ARIZONA_ACCDET_MODE_HPR 2
#endif
diff --git a/include/dt-bindings/mfd/qcom-rpm.h b/include/dt-bindings/mfd/qcom-rpm.h
new file mode 100644
index 0000000..13a9d4b
--- /dev/null
+++ b/include/dt-bindings/mfd/qcom-rpm.h
@@ -0,0 +1,160 @@
+/*
+ * This header provides constants for the Qualcomm RPM bindings.
+ */
+
+#ifndef _DT_BINDINGS_MFD_QCOM_RPM_H
+#define _DT_BINDINGS_MFD_QCOM_RPM_H
+
+/*
+ * Constants use to identify individual resources in the RPM.
+ */
+#define QCOM_RPM_APPS_FABRIC_ARB 1
+#define QCOM_RPM_APPS_FABRIC_CLK 2
+#define QCOM_RPM_APPS_FABRIC_HALT 3
+#define QCOM_RPM_APPS_FABRIC_IOCTL 4
+#define QCOM_RPM_APPS_FABRIC_MODE 5
+#define QCOM_RPM_APPS_L2_CACHE_CTL 6
+#define QCOM_RPM_CFPB_CLK 7
+#define QCOM_RPM_CXO_BUFFERS 8
+#define QCOM_RPM_CXO_CLK 9
+#define QCOM_RPM_DAYTONA_FABRIC_CLK 10
+#define QCOM_RPM_DDR_DMM 11
+#define QCOM_RPM_EBI1_CLK 12
+#define QCOM_RPM_HDMI_SWITCH 13
+#define QCOM_RPM_MMFPB_CLK 14
+#define QCOM_RPM_MM_FABRIC_ARB 15
+#define QCOM_RPM_MM_FABRIC_CLK 16
+#define QCOM_RPM_MM_FABRIC_HALT 17
+#define QCOM_RPM_MM_FABRIC_IOCTL 18
+#define QCOM_RPM_MM_FABRIC_MODE 19
+#define QCOM_RPM_PLL_4 20
+#define QCOM_RPM_PM8058_LDO0 21
+#define QCOM_RPM_PM8058_LDO1 22
+#define QCOM_RPM_PM8058_LDO2 23
+#define QCOM_RPM_PM8058_LDO3 24
+#define QCOM_RPM_PM8058_LDO4 25
+#define QCOM_RPM_PM8058_LDO5 26
+#define QCOM_RPM_PM8058_LDO6 27
+#define QCOM_RPM_PM8058_LDO7 28
+#define QCOM_RPM_PM8058_LDO8 29
+#define QCOM_RPM_PM8058_LDO9 30
+#define QCOM_RPM_PM8058_LDO10 31
+#define QCOM_RPM_PM8058_LDO11 32
+#define QCOM_RPM_PM8058_LDO12 33
+#define QCOM_RPM_PM8058_LDO13 34
+#define QCOM_RPM_PM8058_LDO14 35
+#define QCOM_RPM_PM8058_LDO15 36
+#define QCOM_RPM_PM8058_LDO16 37
+#define QCOM_RPM_PM8058_LDO17 38
+#define QCOM_RPM_PM8058_LDO18 39
+#define QCOM_RPM_PM8058_LDO19 40
+#define QCOM_RPM_PM8058_LDO20 41
+#define QCOM_RPM_PM8058_LDO21 42
+#define QCOM_RPM_PM8058_LDO22 43
+#define QCOM_RPM_PM8058_LDO23 44
+#define QCOM_RPM_PM8058_LDO24 45
+#define QCOM_RPM_PM8058_LDO25 46
+#define QCOM_RPM_PM8058_LVS0 47
+#define QCOM_RPM_PM8058_LVS1 48
+#define QCOM_RPM_PM8058_NCP 49
+#define QCOM_RPM_PM8058_SMPS0 50
+#define QCOM_RPM_PM8058_SMPS1 51
+#define QCOM_RPM_PM8058_SMPS2 52
+#define QCOM_RPM_PM8058_SMPS3 53
+#define QCOM_RPM_PM8058_SMPS4 54
+#define QCOM_RPM_PM8821_LDO1 55
+#define QCOM_RPM_PM8821_SMPS1 56
+#define QCOM_RPM_PM8821_SMPS2 57
+#define QCOM_RPM_PM8901_LDO0 58
+#define QCOM_RPM_PM8901_LDO1 59
+#define QCOM_RPM_PM8901_LDO2 60
+#define QCOM_RPM_PM8901_LDO3 61
+#define QCOM_RPM_PM8901_LDO4 62
+#define QCOM_RPM_PM8901_LDO5 63
+#define QCOM_RPM_PM8901_LDO6 64
+#define QCOM_RPM_PM8901_LVS0 65
+#define QCOM_RPM_PM8901_LVS1 66
+#define QCOM_RPM_PM8901_LVS2 67
+#define QCOM_RPM_PM8901_LVS3 68
+#define QCOM_RPM_PM8901_MVS 69
+#define QCOM_RPM_PM8901_SMPS0 70
+#define QCOM_RPM_PM8901_SMPS1 71
+#define QCOM_RPM_PM8901_SMPS2 72
+#define QCOM_RPM_PM8901_SMPS3 73
+#define QCOM_RPM_PM8901_SMPS4 74
+#define QCOM_RPM_PM8921_CLK1 75
+#define QCOM_RPM_PM8921_CLK2 76
+#define QCOM_RPM_PM8921_LDO1 77
+#define QCOM_RPM_PM8921_LDO2 78
+#define QCOM_RPM_PM8921_LDO3 79
+#define QCOM_RPM_PM8921_LDO4 80
+#define QCOM_RPM_PM8921_LDO5 81
+#define QCOM_RPM_PM8921_LDO6 82
+#define QCOM_RPM_PM8921_LDO7 83
+#define QCOM_RPM_PM8921_LDO8 84
+#define QCOM_RPM_PM8921_LDO9 85
+#define QCOM_RPM_PM8921_LDO10 86
+#define QCOM_RPM_PM8921_LDO11 87
+#define QCOM_RPM_PM8921_LDO12 88
+#define QCOM_RPM_PM8921_LDO13 89
+#define QCOM_RPM_PM8921_LDO14 90
+#define QCOM_RPM_PM8921_LDO15 91
+#define QCOM_RPM_PM8921_LDO16 92
+#define QCOM_RPM_PM8921_LDO17 93
+#define QCOM_RPM_PM8921_LDO18 94
+#define QCOM_RPM_PM8921_LDO19 95
+#define QCOM_RPM_PM8921_LDO20 96
+#define QCOM_RPM_PM8921_LDO21 97
+#define QCOM_RPM_PM8921_LDO22 98
+#define QCOM_RPM_PM8921_LDO23 99
+#define QCOM_RPM_PM8921_LDO24 100
+#define QCOM_RPM_PM8921_LDO25 101
+#define QCOM_RPM_PM8921_LDO26 102
+#define QCOM_RPM_PM8921_LDO27 103
+#define QCOM_RPM_PM8921_LDO28 104
+#define QCOM_RPM_PM8921_LDO29 105
+#define QCOM_RPM_PM8921_LVS1 106
+#define QCOM_RPM_PM8921_LVS2 107
+#define QCOM_RPM_PM8921_LVS3 108
+#define QCOM_RPM_PM8921_LVS4 109
+#define QCOM_RPM_PM8921_LVS5 110
+#define QCOM_RPM_PM8921_LVS6 111
+#define QCOM_RPM_PM8921_LVS7 112
+#define QCOM_RPM_PM8921_MVS 113
+#define QCOM_RPM_PM8921_NCP 114
+#define QCOM_RPM_PM8921_SMPS1 115
+#define QCOM_RPM_PM8921_SMPS2 116
+#define QCOM_RPM_PM8921_SMPS3 117
+#define QCOM_RPM_PM8921_SMPS4 118
+#define QCOM_RPM_PM8921_SMPS5 119
+#define QCOM_RPM_PM8921_SMPS6 120
+#define QCOM_RPM_PM8921_SMPS7 121
+#define QCOM_RPM_PM8921_SMPS8 122
+#define QCOM_RPM_PXO_CLK 123
+#define QCOM_RPM_QDSS_CLK 124
+#define QCOM_RPM_SFPB_CLK 125
+#define QCOM_RPM_SMI_CLK 126
+#define QCOM_RPM_SYS_FABRIC_ARB 127
+#define QCOM_RPM_SYS_FABRIC_CLK 128
+#define QCOM_RPM_SYS_FABRIC_HALT 129
+#define QCOM_RPM_SYS_FABRIC_IOCTL 130
+#define QCOM_RPM_SYS_FABRIC_MODE 131
+#define QCOM_RPM_USB_OTG_SWITCH 132
+#define QCOM_RPM_VDDMIN_GPIO 133
+#define QCOM_RPM_NSS_FABRIC_0_CLK 134
+#define QCOM_RPM_NSS_FABRIC_1_CLK 135
+#define QCOM_RPM_SMB208_S1a 136
+#define QCOM_RPM_SMB208_S1b 137
+#define QCOM_RPM_SMB208_S2a 138
+#define QCOM_RPM_SMB208_S2b 139
+
+/*
+ * Constants used to select force mode for regulators.
+ */
+#define QCOM_RPM_FORCE_MODE_NONE 0
+#define QCOM_RPM_FORCE_MODE_LPM 1
+#define QCOM_RPM_FORCE_MODE_HPM 2
+#define QCOM_RPM_FORCE_MODE_AUTO 3
+#define QCOM_RPM_FORCE_MODE_BYPASS 4
+
+#endif
diff --git a/include/dt-bindings/mfd/st-lpc.h b/include/dt-bindings/mfd/st-lpc.h
new file mode 100644
index 0000000..e3e6c75
--- /dev/null
+++ b/include/dt-bindings/mfd/st-lpc.h
@@ -0,0 +1,15 @@
+/*
+ * This header provides shared DT/Driver defines for ST's LPC device
+ *
+ * Copyright (C) 2014 STMicroelectronics -- All Rights Reserved
+ *
+ * Author: Lee Jones <lee.jones@linaro.org> for STMicroelectronics
+ */
+
+#ifndef __DT_BINDINGS_ST_LPC_H__
+#define __DT_BINDINGS_ST_LPC_H__
+
+#define ST_LPC_MODE_RTC 0
+#define ST_LPC_MODE_WDT 1
+
+#endif /* __DT_BINDINGS_ST_LPC_H__ */
diff --git a/include/dt-bindings/net/ti-dp83867.h b/include/dt-bindings/net/ti-dp83867.h
new file mode 100644
index 0000000..172744a
--- /dev/null
+++ b/include/dt-bindings/net/ti-dp83867.h
@@ -0,0 +1,45 @@
+/*
+ * Device Tree constants for the Texas Instruments DP83867 PHY
+ *
+ * Author: Dan Murphy <dmurphy@ti.com>
+ *
+ * Copyright: (C) 2015 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_TI_DP83867_H
+#define _DT_BINDINGS_TI_DP83867_H
+
+/* PHY CTRL bits */
+#define DP83867_PHYCR_FIFO_DEPTH_3_B_NIB 0x00
+#define DP83867_PHYCR_FIFO_DEPTH_4_B_NIB 0x01
+#define DP83867_PHYCR_FIFO_DEPTH_6_B_NIB 0x02
+#define DP83867_PHYCR_FIFO_DEPTH_8_B_NIB 0x03
+
+/* RGMIIDCTL internal delay for rx and tx */
+#define DP83867_RGMIIDCTL_250_PS 0x0
+#define DP83867_RGMIIDCTL_500_PS 0x1
+#define DP83867_RGMIIDCTL_750_PS 0x2
+#define DP83867_RGMIIDCTL_1_NS 0x3
+#define DP83867_RGMIIDCTL_1_25_NS 0x4
+#define DP83867_RGMIIDCTL_1_50_NS 0x5
+#define DP83867_RGMIIDCTL_1_75_NS 0x6
+#define DP83867_RGMIIDCTL_2_00_NS 0x7
+#define DP83867_RGMIIDCTL_2_25_NS 0x8
+#define DP83867_RGMIIDCTL_2_50_NS 0x9
+#define DP83867_RGMIIDCTL_2_75_NS 0xa
+#define DP83867_RGMIIDCTL_3_00_NS 0xb
+#define DP83867_RGMIIDCTL_3_25_NS 0xc
+#define DP83867_RGMIIDCTL_3_50_NS 0xd
+#define DP83867_RGMIIDCTL_3_75_NS 0xe
+#define DP83867_RGMIIDCTL_4_00_NS 0xf
+
+#endif
diff --git a/include/dt-bindings/phy/phy-miphy365x.h b/include/dt-bindings/phy/phy-miphy365x.h
deleted file mode 100644
index 8ef8aba..0000000
--- a/include/dt-bindings/phy/phy-miphy365x.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * This header provides constants for the phy framework
- * based on the STMicroelectronics MiPHY365x.
- *
- * Author: Lee Jones <lee.jones@linaro.org>
- */
-#ifndef _DT_BINDINGS_PHY_MIPHY
-#define _DT_BINDINGS_PHY_MIPHY
-
-#define MIPHY_TYPE_SATA 1
-#define MIPHY_TYPE_PCIE 2
-#define MIPHY_TYPE_USB 3
-
-#endif /* _DT_BINDINGS_PHY_MIPHY */
diff --git a/include/dt-bindings/phy/phy-pistachio-usb.h b/include/dt-bindings/phy/phy-pistachio-usb.h
new file mode 100644
index 0000000..d1877aa
--- /dev/null
+++ b/include/dt-bindings/phy/phy-pistachio-usb.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _DT_BINDINGS_PHY_PISTACHIO
+#define _DT_BINDINGS_PHY_PISTACHIO
+
+#define REFCLK_XO_CRYSTAL 0x0
+#define REFCLK_X0_EXT_CLK 0x1
+#define REFCLK_CLK_CORE 0x2
+
+#endif /* _DT_BINDINGS_PHY_PISTACHIO */
diff --git a/include/dt-bindings/pinctrl/am33xx.h b/include/dt-bindings/pinctrl/am33xx.h
index 2fbc804..226f772 100644
--- a/include/dt-bindings/pinctrl/am33xx.h
+++ b/include/dt-bindings/pinctrl/am33xx.h
@@ -13,7 +13,8 @@
#define PULL_DISABLE (1 << 3)
#define INPUT_EN (1 << 5)
-#define SLEWCTRL_FAST (1 << 6)
+#define SLEWCTRL_SLOW (1 << 6)
+#define SLEWCTRL_FAST 0
/* update macro depending on INPUT_EN and PULL_ENA */
#undef PIN_OUTPUT
diff --git a/include/dt-bindings/pinctrl/am43xx.h b/include/dt-bindings/pinctrl/am43xx.h
index 9c2e4f8..b00bbc9 100644
--- a/include/dt-bindings/pinctrl/am43xx.h
+++ b/include/dt-bindings/pinctrl/am43xx.h
@@ -18,8 +18,10 @@
#define PULL_DISABLE (1 << 16)
#define PULL_UP (1 << 17)
#define INPUT_EN (1 << 18)
-#define SLEWCTRL_FAST (1 << 19)
+#define SLEWCTRL_SLOW (1 << 19)
+#define SLEWCTRL_FAST 0
#define DS0_PULL_UP_DOWN_EN (1 << 27)
+#define WAKEUP_ENABLE (1 << 29)
#define PIN_OUTPUT (PULL_DISABLE)
#define PIN_OUTPUT_PULLUP (PULL_UP)
diff --git a/include/dt-bindings/pinctrl/bcm2835.h b/include/dt-bindings/pinctrl/bcm2835.h
new file mode 100644
index 0000000..6f0bc37
--- /dev/null
+++ b/include/dt-bindings/pinctrl/bcm2835.h
@@ -0,0 +1,27 @@
+/*
+ * Header providing constants for bcm2835 pinctrl bindings.
+ *
+ * Copyright (C) 2015 Stefan Wahren <stefan.wahren@i2se.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_BCM2835_H__
+#define __DT_BINDINGS_PINCTRL_BCM2835_H__
+
+/* brcm,function property */
+#define BCM2835_FSEL_GPIO_IN 0
+#define BCM2835_FSEL_GPIO_OUT 1
+#define BCM2835_FSEL_ALT5 2
+#define BCM2835_FSEL_ALT4 3
+#define BCM2835_FSEL_ALT0 4
+#define BCM2835_FSEL_ALT1 5
+#define BCM2835_FSEL_ALT2 6
+#define BCM2835_FSEL_ALT3 7
+
+#endif /* __DT_BINDINGS_PINCTRL_BCM2835_H__ */
diff --git a/include/dt-bindings/pinctrl/mt6397-pinfunc.h b/include/dt-bindings/pinctrl/mt6397-pinfunc.h
new file mode 100644
index 0000000..85739b3
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt6397-pinfunc.h
@@ -0,0 +1,256 @@
+#ifndef __DTS_MT6397_PINFUNC_H
+#define __DTS_MT6397_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define MT6397_PIN_0_INT__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define MT6397_PIN_0_INT__FUNC_INT (MTK_PIN_NO(0) | 1)
+
+#define MT6397_PIN_1_SRCVOLTEN__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define MT6397_PIN_1_SRCVOLTEN__FUNC_SRCVOLTEN (MTK_PIN_NO(1) | 1)
+#define MT6397_PIN_1_SRCVOLTEN__FUNC_TEST_CK1 (MTK_PIN_NO(1) | 6)
+
+#define MT6397_PIN_2_SRCLKEN_PERI__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define MT6397_PIN_2_SRCLKEN_PERI__FUNC_SRCLKEN_PERI (MTK_PIN_NO(2) | 1)
+#define MT6397_PIN_2_SRCLKEN_PERI__FUNC_TEST_CK2 (MTK_PIN_NO(2) | 6)
+
+#define MT6397_PIN_3_RTC_32K1V8__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define MT6397_PIN_3_RTC_32K1V8__FUNC_RTC_32K1V8 (MTK_PIN_NO(3) | 1)
+#define MT6397_PIN_3_RTC_32K1V8__FUNC_TEST_CK3 (MTK_PIN_NO(3) | 6)
+
+#define MT6397_PIN_4_WRAP_EVENT__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define MT6397_PIN_4_WRAP_EVENT__FUNC_WRAP_EVENT (MTK_PIN_NO(4) | 1)
+
+#define MT6397_PIN_5_SPI_CLK__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define MT6397_PIN_5_SPI_CLK__FUNC_SPI_CLK (MTK_PIN_NO(5) | 1)
+
+#define MT6397_PIN_6_SPI_CSN__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define MT6397_PIN_6_SPI_CSN__FUNC_SPI_CSN (MTK_PIN_NO(6) | 1)
+
+#define MT6397_PIN_7_SPI_MOSI__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define MT6397_PIN_7_SPI_MOSI__FUNC_SPI_MOSI (MTK_PIN_NO(7) | 1)
+
+#define MT6397_PIN_8_SPI_MISO__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define MT6397_PIN_8_SPI_MISO__FUNC_SPI_MISO (MTK_PIN_NO(8) | 1)
+
+#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_AUD_CLK (MTK_PIN_NO(9) | 1)
+#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_TEST_IN0 (MTK_PIN_NO(9) | 6)
+#define MT6397_PIN_9_AUD_CLK_MOSI__FUNC_TEST_OUT0 (MTK_PIN_NO(9) | 7)
+
+#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_AUD_MISO (MTK_PIN_NO(10) | 1)
+#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_TEST_IN1 (MTK_PIN_NO(10) | 6)
+#define MT6397_PIN_10_AUD_DAT_MISO__FUNC_TEST_OUT1 (MTK_PIN_NO(10) | 7)
+
+#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_AUD_MOSI (MTK_PIN_NO(11) | 1)
+#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_TEST_IN2 (MTK_PIN_NO(11) | 6)
+#define MT6397_PIN_11_AUD_DAT_MOSI__FUNC_TEST_OUT2 (MTK_PIN_NO(11) | 7)
+
+#define MT6397_PIN_12_COL0__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define MT6397_PIN_12_COL0__FUNC_COL0_USBDL (MTK_PIN_NO(12) | 1)
+#define MT6397_PIN_12_COL0__FUNC_EINT10_1X (MTK_PIN_NO(12) | 2)
+#define MT6397_PIN_12_COL0__FUNC_PWM1_3X (MTK_PIN_NO(12) | 3)
+#define MT6397_PIN_12_COL0__FUNC_TEST_IN3 (MTK_PIN_NO(12) | 6)
+#define MT6397_PIN_12_COL0__FUNC_TEST_OUT3 (MTK_PIN_NO(12) | 7)
+
+#define MT6397_PIN_13_COL1__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define MT6397_PIN_13_COL1__FUNC_COL1 (MTK_PIN_NO(13) | 1)
+#define MT6397_PIN_13_COL1__FUNC_EINT11_1X (MTK_PIN_NO(13) | 2)
+#define MT6397_PIN_13_COL1__FUNC_SCL0_2X (MTK_PIN_NO(13) | 3)
+#define MT6397_PIN_13_COL1__FUNC_TEST_IN4 (MTK_PIN_NO(13) | 6)
+#define MT6397_PIN_13_COL1__FUNC_TEST_OUT4 (MTK_PIN_NO(13) | 7)
+
+#define MT6397_PIN_14_COL2__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define MT6397_PIN_14_COL2__FUNC_COL2 (MTK_PIN_NO(14) | 1)
+#define MT6397_PIN_14_COL2__FUNC_EINT12_1X (MTK_PIN_NO(14) | 2)
+#define MT6397_PIN_14_COL2__FUNC_SDA0_2X (MTK_PIN_NO(14) | 3)
+#define MT6397_PIN_14_COL2__FUNC_TEST_IN5 (MTK_PIN_NO(14) | 6)
+#define MT6397_PIN_14_COL2__FUNC_TEST_OUT5 (MTK_PIN_NO(14) | 7)
+
+#define MT6397_PIN_15_COL3__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define MT6397_PIN_15_COL3__FUNC_COL3 (MTK_PIN_NO(15) | 1)
+#define MT6397_PIN_15_COL3__FUNC_EINT13_1X (MTK_PIN_NO(15) | 2)
+#define MT6397_PIN_15_COL3__FUNC_SCL1_2X (MTK_PIN_NO(15) | 3)
+#define MT6397_PIN_15_COL3__FUNC_TEST_IN6 (MTK_PIN_NO(15) | 6)
+#define MT6397_PIN_15_COL3__FUNC_TEST_OUT6 (MTK_PIN_NO(15) | 7)
+
+#define MT6397_PIN_16_COL4__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define MT6397_PIN_16_COL4__FUNC_COL4 (MTK_PIN_NO(16) | 1)
+#define MT6397_PIN_16_COL4__FUNC_EINT14_1X (MTK_PIN_NO(16) | 2)
+#define MT6397_PIN_16_COL4__FUNC_SDA1_2X (MTK_PIN_NO(16) | 3)
+#define MT6397_PIN_16_COL4__FUNC_TEST_IN7 (MTK_PIN_NO(16) | 6)
+#define MT6397_PIN_16_COL4__FUNC_TEST_OUT7 (MTK_PIN_NO(16) | 7)
+
+#define MT6397_PIN_17_COL5__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define MT6397_PIN_17_COL5__FUNC_COL5 (MTK_PIN_NO(17) | 1)
+#define MT6397_PIN_17_COL5__FUNC_EINT15_1X (MTK_PIN_NO(17) | 2)
+#define MT6397_PIN_17_COL5__FUNC_SCL2_2X (MTK_PIN_NO(17) | 3)
+#define MT6397_PIN_17_COL5__FUNC_TEST_IN8 (MTK_PIN_NO(17) | 6)
+#define MT6397_PIN_17_COL5__FUNC_TEST_OUT8 (MTK_PIN_NO(17) | 7)
+
+#define MT6397_PIN_18_COL6__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define MT6397_PIN_18_COL6__FUNC_COL6 (MTK_PIN_NO(18) | 1)
+#define MT6397_PIN_18_COL6__FUNC_EINT16_1X (MTK_PIN_NO(18) | 2)
+#define MT6397_PIN_18_COL6__FUNC_SDA2_2X (MTK_PIN_NO(18) | 3)
+#define MT6397_PIN_18_COL6__FUNC_GPIO32K_0 (MTK_PIN_NO(18) | 4)
+#define MT6397_PIN_18_COL6__FUNC_GPIO26M_0 (MTK_PIN_NO(18) | 5)
+#define MT6397_PIN_18_COL6__FUNC_TEST_IN9 (MTK_PIN_NO(18) | 6)
+#define MT6397_PIN_18_COL6__FUNC_TEST_OUT9 (MTK_PIN_NO(18) | 7)
+
+#define MT6397_PIN_19_COL7__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define MT6397_PIN_19_COL7__FUNC_COL7 (MTK_PIN_NO(19) | 1)
+#define MT6397_PIN_19_COL7__FUNC_EINT17_1X (MTK_PIN_NO(19) | 2)
+#define MT6397_PIN_19_COL7__FUNC_PWM2_3X (MTK_PIN_NO(19) | 3)
+#define MT6397_PIN_19_COL7__FUNC_GPIO32K_1 (MTK_PIN_NO(19) | 4)
+#define MT6397_PIN_19_COL7__FUNC_GPIO26M_1 (MTK_PIN_NO(19) | 5)
+#define MT6397_PIN_19_COL7__FUNC_TEST_IN10 (MTK_PIN_NO(19) | 6)
+#define MT6397_PIN_19_COL7__FUNC_TEST_OUT10 (MTK_PIN_NO(19) | 7)
+
+#define MT6397_PIN_20_ROW0__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define MT6397_PIN_20_ROW0__FUNC_ROW0 (MTK_PIN_NO(20) | 1)
+#define MT6397_PIN_20_ROW0__FUNC_EINT18_1X (MTK_PIN_NO(20) | 2)
+#define MT6397_PIN_20_ROW0__FUNC_SCL0_3X (MTK_PIN_NO(20) | 3)
+#define MT6397_PIN_20_ROW0__FUNC_TEST_IN11 (MTK_PIN_NO(20) | 6)
+#define MT6397_PIN_20_ROW0__FUNC_TEST_OUT11 (MTK_PIN_NO(20) | 7)
+
+#define MT6397_PIN_21_ROW1__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define MT6397_PIN_21_ROW1__FUNC_ROW1 (MTK_PIN_NO(21) | 1)
+#define MT6397_PIN_21_ROW1__FUNC_EINT19_1X (MTK_PIN_NO(21) | 2)
+#define MT6397_PIN_21_ROW1__FUNC_SDA0_3X (MTK_PIN_NO(21) | 3)
+#define MT6397_PIN_21_ROW1__FUNC_AUD_TSTCK (MTK_PIN_NO(21) | 4)
+#define MT6397_PIN_21_ROW1__FUNC_TEST_IN12 (MTK_PIN_NO(21) | 6)
+#define MT6397_PIN_21_ROW1__FUNC_TEST_OUT12 (MTK_PIN_NO(21) | 7)
+
+#define MT6397_PIN_22_ROW2__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define MT6397_PIN_22_ROW2__FUNC_ROW2 (MTK_PIN_NO(22) | 1)
+#define MT6397_PIN_22_ROW2__FUNC_EINT20_1X (MTK_PIN_NO(22) | 2)
+#define MT6397_PIN_22_ROW2__FUNC_SCL1_3X (MTK_PIN_NO(22) | 3)
+#define MT6397_PIN_22_ROW2__FUNC_TEST_IN13 (MTK_PIN_NO(22) | 6)
+#define MT6397_PIN_22_ROW2__FUNC_TEST_OUT13 (MTK_PIN_NO(22) | 7)
+
+#define MT6397_PIN_23_ROW3__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define MT6397_PIN_23_ROW3__FUNC_ROW3 (MTK_PIN_NO(23) | 1)
+#define MT6397_PIN_23_ROW3__FUNC_EINT21_1X (MTK_PIN_NO(23) | 2)
+#define MT6397_PIN_23_ROW3__FUNC_SDA1_3X (MTK_PIN_NO(23) | 3)
+#define MT6397_PIN_23_ROW3__FUNC_TEST_IN14 (MTK_PIN_NO(23) | 6)
+#define MT6397_PIN_23_ROW3__FUNC_TEST_OUT14 (MTK_PIN_NO(23) | 7)
+
+#define MT6397_PIN_24_ROW4__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define MT6397_PIN_24_ROW4__FUNC_ROW4 (MTK_PIN_NO(24) | 1)
+#define MT6397_PIN_24_ROW4__FUNC_EINT22_1X (MTK_PIN_NO(24) | 2)
+#define MT6397_PIN_24_ROW4__FUNC_SCL2_3X (MTK_PIN_NO(24) | 3)
+#define MT6397_PIN_24_ROW4__FUNC_TEST_IN15 (MTK_PIN_NO(24) | 6)
+#define MT6397_PIN_24_ROW4__FUNC_TEST_OUT15 (MTK_PIN_NO(24) | 7)
+
+#define MT6397_PIN_25_ROW5__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define MT6397_PIN_25_ROW5__FUNC_ROW5 (MTK_PIN_NO(25) | 1)
+#define MT6397_PIN_25_ROW5__FUNC_EINT23_1X (MTK_PIN_NO(25) | 2)
+#define MT6397_PIN_25_ROW5__FUNC_SDA2_3X (MTK_PIN_NO(25) | 3)
+#define MT6397_PIN_25_ROW5__FUNC_TEST_IN16 (MTK_PIN_NO(25) | 6)
+#define MT6397_PIN_25_ROW5__FUNC_TEST_OUT16 (MTK_PIN_NO(25) | 7)
+
+#define MT6397_PIN_26_ROW6__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define MT6397_PIN_26_ROW6__FUNC_ROW6 (MTK_PIN_NO(26) | 1)
+#define MT6397_PIN_26_ROW6__FUNC_EINT24_1X (MTK_PIN_NO(26) | 2)
+#define MT6397_PIN_26_ROW6__FUNC_PWM3_3X (MTK_PIN_NO(26) | 3)
+#define MT6397_PIN_26_ROW6__FUNC_GPIO32K_2 (MTK_PIN_NO(26) | 4)
+#define MT6397_PIN_26_ROW6__FUNC_GPIO26M_2 (MTK_PIN_NO(26) | 5)
+#define MT6397_PIN_26_ROW6__FUNC_TEST_IN17 (MTK_PIN_NO(26) | 6)
+#define MT6397_PIN_26_ROW6__FUNC_TEST_OUT17 (MTK_PIN_NO(26) | 7)
+
+#define MT6397_PIN_27_ROW7__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define MT6397_PIN_27_ROW7__FUNC_ROW7 (MTK_PIN_NO(27) | 1)
+#define MT6397_PIN_27_ROW7__FUNC_EINT3_1X (MTK_PIN_NO(27) | 2)
+#define MT6397_PIN_27_ROW7__FUNC_CBUS (MTK_PIN_NO(27) | 3)
+#define MT6397_PIN_27_ROW7__FUNC_GPIO32K_3 (MTK_PIN_NO(27) | 4)
+#define MT6397_PIN_27_ROW7__FUNC_GPIO26M_3 (MTK_PIN_NO(27) | 5)
+#define MT6397_PIN_27_ROW7__FUNC_TEST_IN18 (MTK_PIN_NO(27) | 6)
+#define MT6397_PIN_27_ROW7__FUNC_TEST_OUT18 (MTK_PIN_NO(27) | 7)
+
+#define MT6397_PIN_28_PWM1__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define MT6397_PIN_28_PWM1__FUNC_PWM1 (MTK_PIN_NO(28) | 1)
+#define MT6397_PIN_28_PWM1__FUNC_EINT4_1X (MTK_PIN_NO(28) | 2)
+#define MT6397_PIN_28_PWM1__FUNC_GPIO32K_4 (MTK_PIN_NO(28) | 4)
+#define MT6397_PIN_28_PWM1__FUNC_GPIO26M_4 (MTK_PIN_NO(28) | 5)
+#define MT6397_PIN_28_PWM1__FUNC_TEST_IN19 (MTK_PIN_NO(28) | 6)
+#define MT6397_PIN_28_PWM1__FUNC_TEST_OUT19 (MTK_PIN_NO(28) | 7)
+
+#define MT6397_PIN_29_PWM2__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define MT6397_PIN_29_PWM2__FUNC_PWM2 (MTK_PIN_NO(29) | 1)
+#define MT6397_PIN_29_PWM2__FUNC_EINT5_1X (MTK_PIN_NO(29) | 2)
+#define MT6397_PIN_29_PWM2__FUNC_GPIO32K_5 (MTK_PIN_NO(29) | 4)
+#define MT6397_PIN_29_PWM2__FUNC_GPIO26M_5 (MTK_PIN_NO(29) | 5)
+#define MT6397_PIN_29_PWM2__FUNC_TEST_IN20 (MTK_PIN_NO(29) | 6)
+#define MT6397_PIN_29_PWM2__FUNC_TEST_OUT20 (MTK_PIN_NO(29) | 7)
+
+#define MT6397_PIN_30_PWM3__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define MT6397_PIN_30_PWM3__FUNC_PWM3 (MTK_PIN_NO(30) | 1)
+#define MT6397_PIN_30_PWM3__FUNC_EINT6_1X (MTK_PIN_NO(30) | 2)
+#define MT6397_PIN_30_PWM3__FUNC_COL0 (MTK_PIN_NO(30) | 3)
+#define MT6397_PIN_30_PWM3__FUNC_GPIO32K_6 (MTK_PIN_NO(30) | 4)
+#define MT6397_PIN_30_PWM3__FUNC_GPIO26M_6 (MTK_PIN_NO(30) | 5)
+#define MT6397_PIN_30_PWM3__FUNC_TEST_IN21 (MTK_PIN_NO(30) | 6)
+#define MT6397_PIN_30_PWM3__FUNC_TEST_OUT21 (MTK_PIN_NO(30) | 7)
+
+#define MT6397_PIN_31_SCL0__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define MT6397_PIN_31_SCL0__FUNC_SCL0 (MTK_PIN_NO(31) | 1)
+#define MT6397_PIN_31_SCL0__FUNC_EINT7_1X (MTK_PIN_NO(31) | 2)
+#define MT6397_PIN_31_SCL0__FUNC_PWM1_2X (MTK_PIN_NO(31) | 3)
+#define MT6397_PIN_31_SCL0__FUNC_TEST_IN22 (MTK_PIN_NO(31) | 6)
+#define MT6397_PIN_31_SCL0__FUNC_TEST_OUT22 (MTK_PIN_NO(31) | 7)
+
+#define MT6397_PIN_32_SDA0__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define MT6397_PIN_32_SDA0__FUNC_SDA0 (MTK_PIN_NO(32) | 1)
+#define MT6397_PIN_32_SDA0__FUNC_EINT8_1X (MTK_PIN_NO(32) | 2)
+#define MT6397_PIN_32_SDA0__FUNC_TEST_IN23 (MTK_PIN_NO(32) | 6)
+#define MT6397_PIN_32_SDA0__FUNC_TEST_OUT23 (MTK_PIN_NO(32) | 7)
+
+#define MT6397_PIN_33_SCL1__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define MT6397_PIN_33_SCL1__FUNC_SCL1 (MTK_PIN_NO(33) | 1)
+#define MT6397_PIN_33_SCL1__FUNC_EINT9_1X (MTK_PIN_NO(33) | 2)
+#define MT6397_PIN_33_SCL1__FUNC_PWM2_2X (MTK_PIN_NO(33) | 3)
+#define MT6397_PIN_33_SCL1__FUNC_TEST_IN24 (MTK_PIN_NO(33) | 6)
+#define MT6397_PIN_33_SCL1__FUNC_TEST_OUT24 (MTK_PIN_NO(33) | 7)
+
+#define MT6397_PIN_34_SDA1__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define MT6397_PIN_34_SDA1__FUNC_SDA1 (MTK_PIN_NO(34) | 1)
+#define MT6397_PIN_34_SDA1__FUNC_EINT0_1X (MTK_PIN_NO(34) | 2)
+#define MT6397_PIN_34_SDA1__FUNC_TEST_IN25 (MTK_PIN_NO(34) | 6)
+#define MT6397_PIN_34_SDA1__FUNC_TEST_OUT25 (MTK_PIN_NO(34) | 7)
+
+#define MT6397_PIN_35_SCL2__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define MT6397_PIN_35_SCL2__FUNC_SCL2 (MTK_PIN_NO(35) | 1)
+#define MT6397_PIN_35_SCL2__FUNC_EINT1_1X (MTK_PIN_NO(35) | 2)
+#define MT6397_PIN_35_SCL2__FUNC_PWM3_2X (MTK_PIN_NO(35) | 3)
+#define MT6397_PIN_35_SCL2__FUNC_TEST_IN26 (MTK_PIN_NO(35) | 6)
+#define MT6397_PIN_35_SCL2__FUNC_TEST_OUT26 (MTK_PIN_NO(35) | 7)
+
+#define MT6397_PIN_36_SDA2__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define MT6397_PIN_36_SDA2__FUNC_SDA2 (MTK_PIN_NO(36) | 1)
+#define MT6397_PIN_36_SDA2__FUNC_EINT2_1X (MTK_PIN_NO(36) | 2)
+#define MT6397_PIN_36_SDA2__FUNC_TEST_IN27 (MTK_PIN_NO(36) | 6)
+#define MT6397_PIN_36_SDA2__FUNC_TEST_OUT27 (MTK_PIN_NO(36) | 7)
+
+#define MT6397_PIN_37_HDMISD__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define MT6397_PIN_37_HDMISD__FUNC_HDMISD (MTK_PIN_NO(37) | 1)
+#define MT6397_PIN_37_HDMISD__FUNC_TEST_IN28 (MTK_PIN_NO(37) | 6)
+#define MT6397_PIN_37_HDMISD__FUNC_TEST_OUT28 (MTK_PIN_NO(37) | 7)
+
+#define MT6397_PIN_38_HDMISCK__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define MT6397_PIN_38_HDMISCK__FUNC_HDMISCK (MTK_PIN_NO(38) | 1)
+#define MT6397_PIN_38_HDMISCK__FUNC_TEST_IN29 (MTK_PIN_NO(38) | 6)
+#define MT6397_PIN_38_HDMISCK__FUNC_TEST_OUT29 (MTK_PIN_NO(38) | 7)
+
+#define MT6397_PIN_39_HTPLG__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define MT6397_PIN_39_HTPLG__FUNC_HTPLG (MTK_PIN_NO(39) | 1)
+#define MT6397_PIN_39_HTPLG__FUNC_TEST_IN30 (MTK_PIN_NO(39) | 6)
+#define MT6397_PIN_39_HTPLG__FUNC_TEST_OUT30 (MTK_PIN_NO(39) | 7)
+
+#define MT6397_PIN_40_CEC__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define MT6397_PIN_40_CEC__FUNC_CEC (MTK_PIN_NO(40) | 1)
+#define MT6397_PIN_40_CEC__FUNC_TEST_IN31 (MTK_PIN_NO(40) | 6)
+#define MT6397_PIN_40_CEC__FUNC_TEST_OUT31 (MTK_PIN_NO(40) | 7)
+
+#endif /* __DTS_MT6397_PINFUNC_H */
diff --git a/include/dt-bindings/pinctrl/mt65xx.h b/include/dt-bindings/pinctrl/mt65xx.h
new file mode 100644
index 0000000..1198f45
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt65xx.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Hongzhou.Yang <hongzhou.yang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_MT65XX_H
+#define _DT_BINDINGS_PINCTRL_MT65XX_H
+
+#define MTK_PIN_NO(x) ((x) << 8)
+#define MTK_GET_PIN_NO(x) ((x) >> 8)
+#define MTK_GET_PIN_FUNC(x) ((x) & 0xf)
+
+#define MTK_PUPD_SET_R1R0_00 100
+#define MTK_PUPD_SET_R1R0_01 101
+#define MTK_PUPD_SET_R1R0_10 102
+#define MTK_PUPD_SET_R1R0_11 103
+
+#define MTK_DRIVE_2mA 2
+#define MTK_DRIVE_4mA 4
+#define MTK_DRIVE_6mA 6
+#define MTK_DRIVE_8mA 8
+#define MTK_DRIVE_10mA 10
+#define MTK_DRIVE_12mA 12
+#define MTK_DRIVE_14mA 14
+#define MTK_DRIVE_16mA 16
+#define MTK_DRIVE_20mA 20
+#define MTK_DRIVE_24mA 24
+#define MTK_DRIVE_28mA 28
+#define MTK_DRIVE_32mA 32
+
+#endif /* _DT_BINDINGS_PINCTRL_MT65XX_H */
diff --git a/include/dt-bindings/pinctrl/omap.h b/include/dt-bindings/pinctrl/omap.h
index 1c75b8c..1394925 100644
--- a/include/dt-bindings/pinctrl/omap.h
+++ b/include/dt-bindings/pinctrl/omap.h
@@ -61,6 +61,7 @@
#define OMAP3430_CORE2_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x25d8) (val)
#define OMAP3630_CORE2_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x25a0) (val)
#define OMAP3_WKUP_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x2a00) (val)
+#define DM816X_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val)
#define AM33XX_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val)
#define AM4372_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val)
#define DRA7XX_CORE_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x3400) (val)
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
index fa74d7c..aafa76c 100644
--- a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
+++ b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
@@ -48,6 +48,14 @@
#define PM8058_GPIO_L5 6
#define PM8058_GPIO_L2 7
+/*
+ * Note: PM8916 GPIO1 and GPIO2 are supporting
+ * only L2(1.15V) and L5(1.8V) options
+ */
+#define PM8916_GPIO_VPH 0
+#define PM8916_GPIO_L2 2
+#define PM8916_GPIO_L5 3
+
#define PM8917_GPIO_VPH 0
#define PM8917_GPIO_S4 2
#define PM8917_GPIO_L15 3
@@ -115,6 +123,13 @@
#define PM8058_GPIO39_MP3_CLK PMIC_GPIO_FUNC_FUNC1
#define PM8058_GPIO40_EXT_BB_EN PMIC_GPIO_FUNC_FUNC1
+#define PM8916_GPIO1_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1
+#define PM8916_GPIO1_KEYP_DRV PMIC_GPIO_FUNC_FUNC2
+#define PM8916_GPIO2_DIV_CLK PMIC_GPIO_FUNC_FUNC1
+#define PM8916_GPIO2_SLEEP_CLK PMIC_GPIO_FUNC_FUNC2
+#define PM8916_GPIO3_KEYP_DRV PMIC_GPIO_FUNC_FUNC1
+#define PM8916_GPIO4_KEYP_DRV PMIC_GPIO_FUNC_FUNC2
+
#define PM8917_GPIO9_18_KEYP_DRV PMIC_GPIO_FUNC_FUNC1
#define PM8917_GPIO20_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1
#define PM8917_GPIO21_23_UART_TX PMIC_GPIO_FUNC_FUNC2
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
index d2c7dab..c102054 100644
--- a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
+++ b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
@@ -10,6 +10,10 @@
#define PM8841_MPP_VPH 0
#define PM8841_MPP_S3 2
+#define PM8916_MPP_VPH 0
+#define PM8916_MPP_L2 2
+#define PM8916_MPP_L5 3
+
#define PM8941_MPP_VPH 0
#define PM8941_MPP_L1 1
#define PM8941_MPP_S3 2
diff --git a/include/dt-bindings/pinctrl/sun4i-a10.h b/include/dt-bindings/pinctrl/sun4i-a10.h
new file mode 100644
index 0000000..f7553c1
--- /dev/null
+++ b/include/dt-bindings/pinctrl/sun4i-a10.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2014 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this file; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_SUN4I_A10_H_
+#define __DT_BINDINGS_PINCTRL_SUN4I_A10_H_
+
+#define SUN4I_PINCTRL_10_MA 0
+#define SUN4I_PINCTRL_20_MA 1
+#define SUN4I_PINCTRL_30_MA 2
+#define SUN4I_PINCTRL_40_MA 3
+
+#define SUN4I_PINCTRL_NO_PULL 0
+#define SUN4I_PINCTRL_PULL_UP 1
+#define SUN4I_PINCTRL_PULL_DOWN 2
+
+#endif /* __DT_BINDINGS_PINCTRL_SUN4I_A10_H_ */
diff --git a/include/dt-bindings/reset-controller/mt8135-resets.h b/include/dt-bindings/reset-controller/mt8135-resets.h
new file mode 100644
index 0000000..1fb6295
--- /dev/null
+++ b/include/dt-bindings/reset-controller/mt8135-resets.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu, MediaTek
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8135
+#define _DT_BINDINGS_RESET_CONTROLLER_MT8135
+
+/* INFRACFG resets */
+#define MT8135_INFRA_EMI_REG_RST 0
+#define MT8135_INFRA_DRAMC0_A0_RST 1
+#define MT8135_INFRA_CCIF0_RST 2
+#define MT8135_INFRA_APCIRQ_EINT_RST 3
+#define MT8135_INFRA_APXGPT_RST 4
+#define MT8135_INFRA_SCPSYS_RST 5
+#define MT8135_INFRA_CCIF1_RST 6
+#define MT8135_INFRA_PMIC_WRAP_RST 7
+#define MT8135_INFRA_KP_RST 8
+#define MT8135_INFRA_EMI_RST 32
+#define MT8135_INFRA_DRAMC0_RST 34
+#define MT8135_INFRA_SMI_RST 35
+#define MT8135_INFRA_M4U_RST 36
+
+/* PERICFG resets */
+#define MT8135_PERI_UART0_SW_RST 0
+#define MT8135_PERI_UART1_SW_RST 1
+#define MT8135_PERI_UART2_SW_RST 2
+#define MT8135_PERI_UART3_SW_RST 3
+#define MT8135_PERI_IRDA_SW_RST 4
+#define MT8135_PERI_PTP_SW_RST 5
+#define MT8135_PERI_AP_HIF_SW_RST 6
+#define MT8135_PERI_GPCU_SW_RST 7
+#define MT8135_PERI_MD_HIF_SW_RST 8
+#define MT8135_PERI_NLI_SW_RST 9
+#define MT8135_PERI_AUXADC_SW_RST 10
+#define MT8135_PERI_DMA_SW_RST 11
+#define MT8135_PERI_NFI_SW_RST 14
+#define MT8135_PERI_PWM_SW_RST 15
+#define MT8135_PERI_THERM_SW_RST 16
+#define MT8135_PERI_MSDC0_SW_RST 17
+#define MT8135_PERI_MSDC1_SW_RST 18
+#define MT8135_PERI_MSDC2_SW_RST 19
+#define MT8135_PERI_MSDC3_SW_RST 20
+#define MT8135_PERI_I2C0_SW_RST 22
+#define MT8135_PERI_I2C1_SW_RST 23
+#define MT8135_PERI_I2C2_SW_RST 24
+#define MT8135_PERI_I2C3_SW_RST 25
+#define MT8135_PERI_I2C4_SW_RST 26
+#define MT8135_PERI_I2C5_SW_RST 27
+#define MT8135_PERI_I2C6_SW_RST 28
+#define MT8135_PERI_USB_SW_RST 29
+#define MT8135_PERI_SPI1_SW_RST 33
+#define MT8135_PERI_PWRAP_BRIDGE_SW_RST 34
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8135 */
diff --git a/include/dt-bindings/reset-controller/mt8173-resets.h b/include/dt-bindings/reset-controller/mt8173-resets.h
new file mode 100644
index 0000000..9464b37
--- /dev/null
+++ b/include/dt-bindings/reset-controller/mt8173-resets.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu, MediaTek
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8173
+#define _DT_BINDINGS_RESET_CONTROLLER_MT8173
+
+/* INFRACFG resets */
+#define MT8173_INFRA_EMI_REG_RST 0
+#define MT8173_INFRA_DRAMC0_A0_RST 1
+#define MT8173_INFRA_APCIRQ_EINT_RST 3
+#define MT8173_INFRA_APXGPT_RST 4
+#define MT8173_INFRA_SCPSYS_RST 5
+#define MT8173_INFRA_KP_RST 6
+#define MT8173_INFRA_PMIC_WRAP_RST 7
+#define MT8173_INFRA_MPIP_RST 8
+#define MT8173_INFRA_CEC_RST 9
+#define MT8173_INFRA_EMI_RST 32
+#define MT8173_INFRA_DRAMC0_RST 34
+#define MT8173_INFRA_APMIXEDSYS_RST 35
+#define MT8173_INFRA_MIPI_DSI_RST 36
+#define MT8173_INFRA_TRNG_RST 37
+#define MT8173_INFRA_SYSIRQ_RST 38
+#define MT8173_INFRA_MIPI_CSI_RST 39
+#define MT8173_INFRA_GCE_FAXI_RST 40
+#define MT8173_INFRA_MMIOMMURST 47
+
+
+/* PERICFG resets */
+#define MT8173_PERI_UART0_SW_RST 0
+#define MT8173_PERI_UART1_SW_RST 1
+#define MT8173_PERI_UART2_SW_RST 2
+#define MT8173_PERI_UART3_SW_RST 3
+#define MT8173_PERI_IRRX_SW_RST 4
+#define MT8173_PERI_PWM_SW_RST 8
+#define MT8173_PERI_AUXADC_SW_RST 10
+#define MT8173_PERI_DMA_SW_RST 11
+#define MT8173_PERI_I2C6_SW_RST 13
+#define MT8173_PERI_NFI_SW_RST 14
+#define MT8173_PERI_THERM_SW_RST 16
+#define MT8173_PERI_MSDC2_SW_RST 17
+#define MT8173_PERI_MSDC3_SW_RST 18
+#define MT8173_PERI_MSDC0_SW_RST 19
+#define MT8173_PERI_MSDC1_SW_RST 20
+#define MT8173_PERI_I2C0_SW_RST 22
+#define MT8173_PERI_I2C1_SW_RST 23
+#define MT8173_PERI_I2C2_SW_RST 24
+#define MT8173_PERI_I2C3_SW_RST 25
+#define MT8173_PERI_I2C4_SW_RST 26
+#define MT8173_PERI_HDMI_SW_RST 29
+#define MT8173_PERI_SPI0_SW_RST 33
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8173 */
diff --git a/include/dt-bindings/reset/qcom,gcc-ipq806x.h b/include/dt-bindings/reset/qcom,gcc-ipq806x.h
index 0ad5ef9..de9c814 100644
--- a/include/dt-bindings/reset/qcom,gcc-ipq806x.h
+++ b/include/dt-bindings/reset/qcom,gcc-ipq806x.h
@@ -129,4 +129,47 @@
#define USB30_1_PHY_RESET 112
#define NSSFB0_RESET 113
#define NSSFB1_RESET 114
+#define UBI32_CORE1_CLKRST_CLAMP_RESET 115
+#define UBI32_CORE1_CLAMP_RESET 116
+#define UBI32_CORE1_AHB_RESET 117
+#define UBI32_CORE1_AXI_RESET 118
+#define UBI32_CORE2_CLKRST_CLAMP_RESET 119
+#define UBI32_CORE2_CLAMP_RESET 120
+#define UBI32_CORE2_AHB_RESET 121
+#define UBI32_CORE2_AXI_RESET 122
+#define GMAC_CORE1_RESET 123
+#define GMAC_CORE2_RESET 124
+#define GMAC_CORE3_RESET 125
+#define GMAC_CORE4_RESET 126
+#define GMAC_AHB_RESET 127
+#define NSS_CH0_RST_RX_CLK_N_RESET 128
+#define NSS_CH0_RST_TX_CLK_N_RESET 129
+#define NSS_CH0_RST_RX_125M_N_RESET 130
+#define NSS_CH0_HW_RST_RX_125M_N_RESET 131
+#define NSS_CH0_RST_TX_125M_N_RESET 132
+#define NSS_CH1_RST_RX_CLK_N_RESET 133
+#define NSS_CH1_RST_TX_CLK_N_RESET 134
+#define NSS_CH1_RST_RX_125M_N_RESET 135
+#define NSS_CH1_HW_RST_RX_125M_N_RESET 136
+#define NSS_CH1_RST_TX_125M_N_RESET 137
+#define NSS_CH2_RST_RX_CLK_N_RESET 138
+#define NSS_CH2_RST_TX_CLK_N_RESET 139
+#define NSS_CH2_RST_RX_125M_N_RESET 140
+#define NSS_CH2_HW_RST_RX_125M_N_RESET 141
+#define NSS_CH2_RST_TX_125M_N_RESET 142
+#define NSS_CH3_RST_RX_CLK_N_RESET 143
+#define NSS_CH3_RST_TX_CLK_N_RESET 144
+#define NSS_CH3_RST_RX_125M_N_RESET 145
+#define NSS_CH3_HW_RST_RX_125M_N_RESET 146
+#define NSS_CH3_RST_TX_125M_N_RESET 147
+#define NSS_RST_RX_250M_125M_N_RESET 148
+#define NSS_RST_TX_250M_125M_N_RESET 149
+#define NSS_QSGMII_TXPI_RST_N_RESET 150
+#define NSS_QSGMII_CDR_RST_N_RESET 151
+#define NSS_SGMII2_CDR_RST_N_RESET 152
+#define NSS_SGMII3_CDR_RST_N_RESET 153
+#define NSS_CAL_PRBS_RST_N_RESET 154
+#define NSS_LCKDT_RST_N_RESET 155
+#define NSS_SRDS_N_RESET 156
+
#endif
diff --git a/include/dt-bindings/reset/qcom,gcc-msm8916.h b/include/dt-bindings/reset/qcom,gcc-msm8916.h
new file mode 100644
index 0000000..3d90410
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,gcc-msm8916.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2015 Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_RESET_MSM_GCC_8916_H
+#define _DT_BINDINGS_RESET_MSM_GCC_8916_H
+
+#define GCC_BLSP1_BCR 0
+#define GCC_BLSP1_QUP1_BCR 1
+#define GCC_BLSP1_UART1_BCR 2
+#define GCC_BLSP1_QUP2_BCR 3
+#define GCC_BLSP1_UART2_BCR 4
+#define GCC_BLSP1_QUP3_BCR 5
+#define GCC_BLSP1_QUP4_BCR 6
+#define GCC_BLSP1_QUP5_BCR 7
+#define GCC_BLSP1_QUP6_BCR 8
+#define GCC_IMEM_BCR 9
+#define GCC_SMMU_BCR 10
+#define GCC_APSS_TCU_BCR 11
+#define GCC_SMMU_XPU_BCR 12
+#define GCC_PCNOC_TBU_BCR 13
+#define GCC_PRNG_BCR 14
+#define GCC_BOOT_ROM_BCR 15
+#define GCC_CRYPTO_BCR 16
+#define GCC_SEC_CTRL_BCR 17
+#define GCC_AUDIO_CORE_BCR 18
+#define GCC_ULT_AUDIO_BCR 19
+#define GCC_DEHR_BCR 20
+#define GCC_SYSTEM_NOC_BCR 21
+#define GCC_PCNOC_BCR 22
+#define GCC_TCSR_BCR 23
+#define GCC_QDSS_BCR 24
+#define GCC_DCD_BCR 25
+#define GCC_MSG_RAM_BCR 26
+#define GCC_MPM_BCR 27
+#define GCC_SPMI_BCR 28
+#define GCC_SPDM_BCR 29
+#define GCC_MM_SPDM_BCR 30
+#define GCC_BIMC_BCR 31
+#define GCC_RBCPR_BCR 32
+#define GCC_TLMM_BCR 33
+#define GCC_USB_HS_BCR 34
+#define GCC_USB2A_PHY_BCR 35
+#define GCC_SDCC1_BCR 36
+#define GCC_SDCC2_BCR 37
+#define GCC_PDM_BCR 38
+#define GCC_SNOC_BUS_TIMEOUT0_BCR 39
+#define GCC_PCNOC_BUS_TIMEOUT0_BCR 40
+#define GCC_PCNOC_BUS_TIMEOUT1_BCR 41
+#define GCC_PCNOC_BUS_TIMEOUT2_BCR 42
+#define GCC_PCNOC_BUS_TIMEOUT3_BCR 43
+#define GCC_PCNOC_BUS_TIMEOUT4_BCR 44
+#define GCC_PCNOC_BUS_TIMEOUT5_BCR 45
+#define GCC_PCNOC_BUS_TIMEOUT6_BCR 46
+#define GCC_PCNOC_BUS_TIMEOUT7_BCR 47
+#define GCC_PCNOC_BUS_TIMEOUT8_BCR 48
+#define GCC_PCNOC_BUS_TIMEOUT9_BCR 49
+#define GCC_MMSS_BCR 50
+#define GCC_VENUS0_BCR 51
+#define GCC_MDSS_BCR 52
+#define GCC_CAMSS_PHY0_BCR 53
+#define GCC_CAMSS_CSI0_BCR 54
+#define GCC_CAMSS_CSI0PHY_BCR 55
+#define GCC_CAMSS_CSI0RDI_BCR 56
+#define GCC_CAMSS_CSI0PIX_BCR 57
+#define GCC_CAMSS_PHY1_BCR 58
+#define GCC_CAMSS_CSI1_BCR 59
+#define GCC_CAMSS_CSI1PHY_BCR 60
+#define GCC_CAMSS_CSI1RDI_BCR 61
+#define GCC_CAMSS_CSI1PIX_BCR 62
+#define GCC_CAMSS_ISPIF_BCR 63
+#define GCC_CAMSS_CCI_BCR 64
+#define GCC_CAMSS_MCLK0_BCR 65
+#define GCC_CAMSS_MCLK1_BCR 66
+#define GCC_CAMSS_GP0_BCR 67
+#define GCC_CAMSS_GP1_BCR 68
+#define GCC_CAMSS_TOP_BCR 69
+#define GCC_CAMSS_MICRO_BCR 70
+#define GCC_CAMSS_JPEG_BCR 71
+#define GCC_CAMSS_VFE_BCR 72
+#define GCC_CAMSS_CSI_VFE0_BCR 73
+#define GCC_OXILI_BCR 74
+#define GCC_GMEM_BCR 75
+#define GCC_CAMSS_AHB_BCR 76
+#define GCC_MDP_TBU_BCR 77
+#define GCC_GFX_TBU_BCR 78
+#define GCC_GFX_TCU_BCR 79
+#define GCC_MSS_TBU_AXI_BCR 80
+#define GCC_MSS_TBU_GSS_AXI_BCR 81
+#define GCC_MSS_TBU_Q6_AXI_BCR 82
+#define GCC_GTCU_AHB_BCR 83
+#define GCC_SMMU_CFG_BCR 84
+#define GCC_VFE_TBU_BCR 85
+#define GCC_VENUS_TBU_BCR 86
+#define GCC_JPEG_TBU_BCR 87
+#define GCC_PRONTO_TBU_BCR 88
+#define GCC_SMMU_CATS_BCR 89
+
+#endif
diff --git a/include/dt-bindings/sound/apq8016-lpass.h b/include/dt-bindings/sound/apq8016-lpass.h
new file mode 100644
index 0000000..499076e
--- /dev/null
+++ b/include/dt-bindings/sound/apq8016-lpass.h
@@ -0,0 +1,9 @@
+#ifndef __DT_APQ8016_LPASS_H
+#define __DT_APQ8016_LPASS_H
+
+#define MI2S_PRIMARY 0
+#define MI2S_SECONDARY 1
+#define MI2S_TERTIARY 2
+#define MI2S_QUATERNARY 3
+
+#endif /* __DT_APQ8016_LPASS_H */
diff --git a/include/dt-bindings/sound/audio-jack-events.h b/include/dt-bindings/sound/audio-jack-events.h
new file mode 100644
index 0000000..378349f
--- /dev/null
+++ b/include/dt-bindings/sound/audio-jack-events.h
@@ -0,0 +1,9 @@
+#ifndef __AUDIO_JACK_EVENTS_H
+#define __AUDIO_JACK_EVENTS_H
+
+#define JACK_HEADPHONE 1
+#define JACK_MICROPHONE 2
+#define JACK_LINEOUT 3
+#define JACK_LINEIN 4
+
+#endif /* __AUDIO_JACK_EVENTS_H */
diff --git a/include/dt-bindings/sound/samsung-i2s.h b/include/dt-bindings/sound/samsung-i2s.h
new file mode 100644
index 0000000..0c69818
--- /dev/null
+++ b/include/dt-bindings/sound/samsung-i2s.h
@@ -0,0 +1,8 @@
+#ifndef _DT_BINDINGS_SAMSUNG_I2S_H
+#define _DT_BINDINGS_SAMSUNG_I2S_H
+
+#define CLK_I2S_CDCLK 0
+#define CLK_I2S_RCLK_SRC 1
+#define CLK_I2S_RCLK_PSR 2
+
+#endif /* _DT_BINDINGS_SAMSUNG_I2S_H */
diff --git a/include/dt-bindings/sound/tas2552.h b/include/dt-bindings/sound/tas2552.h
new file mode 100644
index 0000000..a4e1a07
--- /dev/null
+++ b/include/dt-bindings/sound/tas2552.h
@@ -0,0 +1,18 @@
+#ifndef __DT_TAS2552_H
+#define __DT_TAS2552_H
+
+#define TAS2552_PLL_CLKIN (0)
+#define TAS2552_PDM_CLK (1)
+#define TAS2552_CLK_TARGET_MASK (1)
+
+#define TAS2552_PLL_CLKIN_MCLK ((0 << 1) | TAS2552_PLL_CLKIN)
+#define TAS2552_PLL_CLKIN_BCLK ((1 << 1) | TAS2552_PLL_CLKIN)
+#define TAS2552_PLL_CLKIN_IVCLKIN ((2 << 1) | TAS2552_PLL_CLKIN)
+#define TAS2552_PLL_CLKIN_1_8_FIXED ((3 << 1) | TAS2552_PLL_CLKIN)
+
+#define TAS2552_PDM_CLK_PLL ((0 << 1) | TAS2552_PDM_CLK)
+#define TAS2552_PDM_CLK_IVCLKIN ((1 << 1) | TAS2552_PDM_CLK)
+#define TAS2552_PDM_CLK_BCLK ((2 << 1) | TAS2552_PDM_CLK)
+#define TAS2552_PDM_CLK_MCLK ((3 << 1) | TAS2552_PDM_CLK)
+
+#endif /* __DT_TAS2552_H */
diff --git a/include/linux/clk/sunxi.h b/include/dt-bindings/thermal/thermal_exynos.h
index aed28c4..0646500 100644
--- a/include/linux/clk/sunxi.h
+++ b/include/dt-bindings/thermal/thermal_exynos.h
@@ -1,5 +1,8 @@
/*
- * Copyright 2013 - Hans de Goede <hdegoede@redhat.com>
+ * thermal_exynos.h - Samsung EXYNOS TMU device tree definitions
+ *
+ * Copyright (C) 2014 Samsung Electronics
+ * Lukasz Majewski <l.majewski@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -10,13 +13,16 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
+ *
*/
-#ifndef __LINUX_CLK_SUNXI_H_
-#define __LINUX_CLK_SUNXI_H_
-
-#include <linux/clk.h>
+#ifndef _EXYNOS_THERMAL_TMU_DT_H
+#define _EXYNOS_THERMAL_TMU_DT_H
-void clk_sunxi_mmc_phase_control(struct clk *clk, u8 sample, u8 output);
+#define TYPE_ONE_POINT_TRIMMING 0
+#define TYPE_ONE_POINT_TRIMMING_25 1
+#define TYPE_ONE_POINT_TRIMMING_85 2
+#define TYPE_TWO_POINT_TRIMMING 3
+#define TYPE_NONE 4
-#endif
+#endif /* _EXYNOS_THERMAL_TMU_DT_H */
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index b3f45a5..e596675 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -24,17 +24,14 @@
#include <linux/workqueue.h>
struct arch_timer_kvm {
-#ifdef CONFIG_KVM_ARM_TIMER
/* Is the timer enabled */
bool enabled;
/* Virtual offset */
cycle_t cntvoff;
-#endif
};
struct arch_timer_cpu {
-#ifdef CONFIG_KVM_ARM_TIMER
/* Registers: control register, timer value */
u32 cntv_ctl; /* Saved/restored */
cycle_t cntv_cval; /* Saved/restored */
@@ -55,10 +52,8 @@ struct arch_timer_cpu {
/* Timer IRQ */
const struct kvm_irq_level *irq;
-#endif
};
-#ifdef CONFIG_KVM_ARM_TIMER
int kvm_timer_hyp_init(void);
void kvm_timer_enable(struct kvm *kvm);
void kvm_timer_init(struct kvm *kvm);
@@ -72,30 +67,6 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
-#else
-static inline int kvm_timer_hyp_init(void)
-{
- return 0;
-};
-
-static inline void kvm_timer_enable(struct kvm *kvm) {}
-static inline void kvm_timer_init(struct kvm *kvm) {}
-static inline void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
- const struct kvm_irq_level *irq) {}
-static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {}
-static inline void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) {}
-static inline void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) {}
-static inline void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) {}
-
-static inline int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
-{
- return 0;
-}
-
-static inline u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
-{
- return 0;
-}
-#endif
+bool kvm_timer_should_fire(struct kvm_vcpu *vcpu);
#endif
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index ac4888d..133ea00 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -24,6 +24,7 @@
#include <linux/irqreturn.h>
#include <linux/spinlock.h>
#include <linux/types.h>
+#include <kvm/iodev.h>
#define VGIC_NR_IRQS_LEGACY 256
#define VGIC_NR_SGIS 16
@@ -33,10 +34,11 @@
#define VGIC_V2_MAX_LRS (1 << 6)
#define VGIC_V3_MAX_LRS 16
#define VGIC_MAX_IRQS 1024
+#define VGIC_V2_MAX_CPUS 8
/* Sanity checks... */
-#if (KVM_MAX_VCPUS > 8)
-#error Invalid number of CPU interfaces
+#if (KVM_MAX_VCPUS > 255)
+#error Too many KVM VCPUs, the VGIC only supports up to 255 VCPUs for now
#endif
#if (VGIC_NR_IRQS_LEGACY & 31)
@@ -113,6 +115,7 @@ struct vgic_ops {
void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
+ void (*clear_eisr)(struct kvm_vcpu *vcpu);
u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
void (*enable_underflow)(struct kvm_vcpu *vcpu);
void (*disable_underflow)(struct kvm_vcpu *vcpu);
@@ -132,14 +135,34 @@ struct vgic_params {
unsigned int maint_irq;
/* Virtual control interface base address */
void __iomem *vctrl_base;
+ int max_gic_vcpus;
+ /* Only needed for the legacy KVM_CREATE_IRQCHIP */
+ bool can_emulate_gicv2;
+};
+
+struct vgic_vm_ops {
+ bool (*queue_sgi)(struct kvm_vcpu *, int irq);
+ void (*add_sgi_source)(struct kvm_vcpu *, int irq, int source);
+ int (*init_model)(struct kvm *);
+ int (*map_resources)(struct kvm *, const struct vgic_params *);
+};
+
+struct vgic_io_device {
+ gpa_t addr;
+ int len;
+ const struct vgic_io_range *reg_ranges;
+ struct kvm_vcpu *redist_vcpu;
+ struct kvm_io_device dev;
};
struct vgic_dist {
-#ifdef CONFIG_KVM_ARM_VGIC
spinlock_t lock;
bool in_kernel;
bool ready;
+ /* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */
+ u32 vgic_model;
+
int nr_cpus;
int nr_irqs;
@@ -148,7 +171,11 @@ struct vgic_dist {
/* Distributor and vcpu interface mapping in the guest */
phys_addr_t vgic_dist_base;
- phys_addr_t vgic_cpu_base;
+ /* GICv2 and GICv3 use different mapped register blocks */
+ union {
+ phys_addr_t vgic_cpu_base;
+ phys_addr_t vgic_redist_base;
+ };
/* Distributor enabled */
u32 enabled;
@@ -176,6 +203,9 @@ struct vgic_dist {
/* Level-triggered interrupt queued on VCPU interface */
struct vgic_bitmap irq_queued;
+ /* Interrupt was active when unqueue from VCPU interface */
+ struct vgic_bitmap irq_active;
+
/* Interrupt priority. Not used yet. */
struct vgic_bytemap irq_priority;
@@ -210,9 +240,18 @@ struct vgic_dist {
*/
struct vgic_bitmap *irq_spi_target;
+ /* Target MPIDR for each IRQ (needed for GICv3 IROUTERn) only */
+ u32 *irq_spi_mpidr;
+
/* Bitmap indicating which CPU has something pending */
unsigned long *irq_pending_on_cpu;
-#endif
+
+ /* Bitmap indicating which CPU has active IRQs */
+ unsigned long *irq_active_on_cpu;
+
+ struct vgic_vm_ops vm_ops;
+ struct vgic_io_device dist_iodev;
+ struct vgic_io_device *redist_iodevs;
};
struct vgic_v2_cpu_if {
@@ -229,6 +268,7 @@ struct vgic_v3_cpu_if {
#ifdef CONFIG_ARM_GIC_V3
u32 vgic_hcr;
u32 vgic_vmcr;
+ u32 vgic_sre; /* Restored only, change ignored */
u32 vgic_misr; /* Saved only */
u32 vgic_eisr; /* Saved only */
u32 vgic_elrsr; /* Saved only */
@@ -239,13 +279,18 @@ struct vgic_v3_cpu_if {
};
struct vgic_cpu {
-#ifdef CONFIG_KVM_ARM_VGIC
/* per IRQ to LR mapping */
u8 *vgic_irq_lr_map;
- /* Pending interrupts on this VCPU */
+ /* Pending/active/both interrupts on this VCPU */
DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS);
+ DECLARE_BITMAP( active_percpu, VGIC_NR_PRIVATE_IRQS);
+ DECLARE_BITMAP( pend_act_percpu, VGIC_NR_PRIVATE_IRQS);
+
+ /* Pending/active/both shared interrupts, dynamically sized */
unsigned long *pending_shared;
+ unsigned long *active_shared;
+ unsigned long *pend_act_shared;
/* Bitmap of used/free list registers */
DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS);
@@ -258,7 +303,6 @@ struct vgic_cpu {
struct vgic_v2_cpu_if vgic_v2;
struct vgic_v3_cpu_if vgic_v3;
};
-#endif
};
#define LR_EMPTY 0xff
@@ -268,23 +312,21 @@ struct vgic_cpu {
struct kvm;
struct kvm_vcpu;
-struct kvm_run;
-struct kvm_exit_mmio;
-#ifdef CONFIG_KVM_ARM_VGIC
int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
int kvm_vgic_hyp_init(void);
int kvm_vgic_map_resources(struct kvm *kvm);
-int kvm_vgic_create(struct kvm *kvm);
+int kvm_vgic_get_max_vcpus(void);
+int kvm_vgic_create(struct kvm *kvm, u32 type);
void kvm_vgic_destroy(struct kvm *kvm);
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
bool level);
+void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
-bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
- struct kvm_exit_mmio *mmio);
+int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu);
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
@@ -306,79 +348,4 @@ static inline int vgic_v3_probe(struct device_node *vgic_node,
}
#endif
-#else
-static inline int kvm_vgic_hyp_init(void)
-{
- return 0;
-}
-
-static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
-{
- return 0;
-}
-
-static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
-{
- return -ENXIO;
-}
-
-static inline int kvm_vgic_map_resources(struct kvm *kvm)
-{
- return 0;
-}
-
-static inline int kvm_vgic_create(struct kvm *kvm)
-{
- return 0;
-}
-
-static inline void kvm_vgic_destroy(struct kvm *kvm)
-{
-}
-
-static inline void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
-{
-}
-
-static inline int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
-{
- return 0;
-}
-
-static inline void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) {}
-static inline void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) {}
-
-static inline int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid,
- unsigned int irq_num, bool level)
-{
- return 0;
-}
-
-static inline int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
-{
- return 0;
-}
-
-static inline bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
- struct kvm_exit_mmio *mmio)
-{
- return false;
-}
-
-static inline int irqchip_in_kernel(struct kvm *kvm)
-{
- return 0;
-}
-
-static inline bool vgic_initialized(struct kvm *kvm)
-{
- return true;
-}
-
-static inline bool vgic_ready(struct kvm *kvm)
-{
- return true;
-}
-#endif
-
#endif
diff --git a/include/kvm/iodev.h b/include/kvm/iodev.h
new file mode 100644
index 0000000..a6d208b
--- /dev/null
+++ b/include/kvm/iodev.h
@@ -0,0 +1,76 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __KVM_IODEV_H__
+#define __KVM_IODEV_H__
+
+#include <linux/kvm_types.h>
+#include <linux/errno.h>
+
+struct kvm_io_device;
+struct kvm_vcpu;
+
+/**
+ * kvm_io_device_ops are called under kvm slots_lock.
+ * read and write handlers return 0 if the transaction has been handled,
+ * or non-zero to have it passed to the next device.
+ **/
+struct kvm_io_device_ops {
+ int (*read)(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
+ gpa_t addr,
+ int len,
+ void *val);
+ int (*write)(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
+ gpa_t addr,
+ int len,
+ const void *val);
+ void (*destructor)(struct kvm_io_device *this);
+};
+
+
+struct kvm_io_device {
+ const struct kvm_io_device_ops *ops;
+};
+
+static inline void kvm_iodevice_init(struct kvm_io_device *dev,
+ const struct kvm_io_device_ops *ops)
+{
+ dev->ops = ops;
+}
+
+static inline int kvm_iodevice_read(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *dev, gpa_t addr,
+ int l, void *v)
+{
+ return dev->ops->read ? dev->ops->read(vcpu, dev, addr, l, v)
+ : -EOPNOTSUPP;
+}
+
+static inline int kvm_iodevice_write(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *dev, gpa_t addr,
+ int l, const void *v)
+{
+ return dev->ops->write ? dev->ops->write(vcpu, dev, addr, l, v)
+ : -EOPNOTSUPP;
+}
+
+static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
+{
+ if (dev->ops->destructor)
+ dev->ops->destructor(dev);
+}
+
+#endif /* __KVM_IODEV_H__ */
diff --git a/include/linux/a.out.h b/include/linux/a.out.h
index 220f143..ee88416 100644
--- a/include/linux/a.out.h
+++ b/include/linux/a.out.h
@@ -4,44 +4,6 @@
#include <uapi/linux/a.out.h>
#ifndef __ASSEMBLY__
-#if defined (M_OLDSUN2)
-#else
-#endif
-#if defined (M_68010)
-#else
-#endif
-#if defined (M_68020)
-#else
-#endif
-#if defined (M_SPARC)
-#else
-#endif
-#if !defined (N_MAGIC)
-#endif
-#if !defined (N_BADMAG)
-#endif
-#if !defined (N_TXTOFF)
-#endif
-#if !defined (N_DATOFF)
-#endif
-#if !defined (N_TRELOFF)
-#endif
-#if !defined (N_DRELOFF)
-#endif
-#if !defined (N_SYMOFF)
-#endif
-#if !defined (N_STROFF)
-#endif
-#if !defined (N_TXTADDR)
-#endif
-#if defined(vax) || defined(hp300) || defined(pyr)
-#endif
-#ifdef sony
-#endif /* Sony. */
-#ifdef is68k
-#endif
-#if defined(m68k) && defined(PORTAR)
-#endif
#ifdef linux
#include <asm/page.h>
#if defined(__i386__) || defined(__mc68000__)
@@ -51,34 +13,5 @@
#endif
#endif
#endif
-#ifndef N_DATADDR
-#endif
-#if !defined (N_BSSADDR)
-#endif
-#if !defined (N_NLIST_DECLARED)
-#endif /* no N_NLIST_DECLARED. */
-#if !defined (N_UNDF)
-#endif
-#if !defined (N_ABS)
-#endif
-#if !defined (N_TEXT)
-#endif
-#if !defined (N_DATA)
-#endif
-#if !defined (N_BSS)
-#endif
-#if !defined (N_FN)
-#endif
-#if !defined (N_EXT)
-#endif
-#if !defined (N_TYPE)
-#endif
-#if !defined (N_STAB)
-#endif
-#if !defined (N_RELOCATION_INFO_DECLARED)
-#ifdef NS32K
-#else
-#endif
-#endif /* no N_RELOCATION_INFO_DECLARED. */
#endif /*__ASSEMBLY__ */
#endif /* __A_OUT_GNU_H__ */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index d459cd1..d2445fa 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -27,6 +27,7 @@
#include <linux/errno.h>
#include <linux/ioport.h> /* for struct resource */
+#include <linux/resource_ext.h>
#include <linux/device.h>
#include <linux/property.h>
@@ -52,10 +53,29 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
return adev ? adev->handle : NULL;
}
-#define ACPI_COMPANION(dev) ((dev)->acpi_node.companion)
-#define ACPI_COMPANION_SET(dev, adev) ACPI_COMPANION(dev) = (adev)
+#define ACPI_COMPANION(dev) to_acpi_node((dev)->fwnode)
+#define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \
+ acpi_fwnode_handle(adev) : NULL)
#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
+/**
+ * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with
+ * the PCI-defined class-code information
+ *
+ * @_cls : the class, subclass, prog-if triple for this device
+ * @_msk : the class mask for this device
+ *
+ * This macro is used to create a struct acpi_device_id that matches a
+ * specific PCI class. The .id and .driver_data fields will be left
+ * initialized with the default value.
+ */
+#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (_cls), .cls_msk = (_msk),
+
+static inline bool has_acpi_companion(struct device *dev)
+{
+ return is_acpi_node(dev->fwnode);
+}
+
static inline void acpi_preset_companion(struct device *dev,
struct acpi_device *parent, u64 addr)
{
@@ -72,6 +92,7 @@ enum acpi_irq_model_id {
ACPI_IRQ_MODEL_IOAPIC,
ACPI_IRQ_MODEL_IOSAPIC,
ACPI_IRQ_MODEL_PLATFORM,
+ ACPI_IRQ_MODEL_GIC,
ACPI_IRQ_MODEL_COUNT
};
@@ -145,12 +166,31 @@ void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
void acpi_numa_arch_fixup(void);
+#ifndef PHYS_CPUID_INVALID
+typedef u32 phys_cpuid_t;
+#define PHYS_CPUID_INVALID (phys_cpuid_t)(-1)
+#endif
+
+static inline bool invalid_logical_cpuid(u32 cpuid)
+{
+ return (int)cpuid < 0;
+}
+
+static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
+{
+ return phys_id == PHYS_CPUID_INVALID;
+}
+
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
-int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu);
+int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu);
int acpi_unmap_cpu(int cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
+#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
+#endif
+
int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base);
int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base);
@@ -226,54 +266,21 @@ extern bool wmi_has_guid(const char *guid);
#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400
#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800
-#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
-
-extern long acpi_video_get_capabilities(acpi_handle graphics_dev_handle);
+extern char acpi_video_backlight_string[];
extern long acpi_is_video_device(acpi_handle handle);
-extern void acpi_video_dmi_promote_vendor(void);
-extern void acpi_video_dmi_demote_vendor(void);
-extern int acpi_video_backlight_support(void);
-extern int acpi_video_display_switch_support(void);
-
-#else
-
-static inline long acpi_video_get_capabilities(acpi_handle graphics_dev_handle)
-{
- return 0;
-}
-
-static inline long acpi_is_video_device(acpi_handle handle)
-{
- return 0;
-}
-
-static inline void acpi_video_dmi_promote_vendor(void)
-{
-}
-
-static inline void acpi_video_dmi_demote_vendor(void)
-{
-}
-
-static inline int acpi_video_backlight_support(void)
-{
- return 0;
-}
-
-static inline int acpi_video_display_switch_support(void)
-{
- return 0;
-}
-
-#endif /* defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) */
-
extern int acpi_blacklisted(void);
extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d);
extern void acpi_osi_setup(char *str);
+extern bool acpi_osi_is_win8(void);
#ifdef CONFIG_ACPI_NUMA
+int acpi_map_pxm_to_online_node(int pxm);
int acpi_get_node(acpi_handle handle);
#else
+static inline int acpi_map_pxm_to_online_node(int pxm)
+{
+ return 0;
+}
static inline int acpi_get_node(acpi_handle handle)
{
return 0;
@@ -288,22 +295,25 @@ extern int pnpacpi_disabled;
bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res);
bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res);
bool acpi_dev_resource_address_space(struct acpi_resource *ares,
- struct resource *res);
+ struct resource_win *win);
bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares,
- struct resource *res);
+ struct resource_win *win);
unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable);
bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
struct resource *res);
-struct resource_list_entry {
- struct list_head node;
- struct resource res;
-};
-
void acpi_dev_free_resource_list(struct list_head *list);
int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
int (*preproc)(struct acpi_resource *, void *),
void *preproc_data);
+int acpi_dev_filter_resource_type(struct acpi_resource *ares,
+ unsigned long types);
+
+static inline int acpi_dev_filter_resource_type_cb(struct acpi_resource *ares,
+ void *arg)
+{
+ return acpi_dev_filter_resource_type(ares, (unsigned long)arg);
+}
int acpi_check_resource_conflict(const struct resource *res);
@@ -420,6 +430,7 @@ extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
#define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82
extern void acpi_early_init(void);
+extern void acpi_subsystem_init(void);
extern int acpi_nvs_register(__u64 start, __u64 size);
@@ -445,6 +456,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *);
#define ACPI_COMPANION(dev) (NULL)
#define ACPI_COMPANION_SET(dev, adev) do { } while (0)
#define ACPI_HANDLE(dev) (NULL)
+#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0),
struct fwnode_handle;
@@ -453,7 +465,7 @@ static inline bool is_acpi_node(struct fwnode_handle *fwnode)
return false;
}
-static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode)
+static inline struct acpi_device *to_acpi_node(struct fwnode_handle *fwnode)
{
return NULL;
}
@@ -463,12 +475,18 @@ static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev)
return NULL;
}
+static inline bool has_acpi_companion(struct device *dev)
+{
+ return false;
+}
+
static inline const char *acpi_dev_name(struct acpi_device *adev)
{
return NULL;
}
static inline void acpi_early_init(void) { }
+static inline void acpi_subsystem_init(void) { }
static inline int early_acpi_boot_init(void)
{
@@ -544,6 +562,11 @@ static inline int acpi_device_modalias(struct device *dev,
return -ENODEV;
}
+static inline bool acpi_check_dma(struct acpi_device *adev, bool *coherent)
+{
+ return false;
+}
+
#define ACPI_PTR(_ptr) (NULL)
#endif /* !CONFIG_ACPI */
@@ -696,6 +719,8 @@ static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev)
if (adev)
adev->driver_gpios = NULL;
}
+
+int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index);
#else
static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
const struct acpi_gpio_mapping *gpios)
@@ -703,6 +728,11 @@ static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
return -ENXIO;
}
static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {}
+
+static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
+{
+ return -ENXIO;
+}
#endif
/* Device properties */
diff --git a/include/linux/acpi_irq.h b/include/linux/acpi_irq.h
new file mode 100644
index 0000000..f10c872
--- /dev/null
+++ b/include/linux/acpi_irq.h
@@ -0,0 +1,10 @@
+#ifndef _LINUX_ACPI_IRQ_H
+#define _LINUX_ACPI_IRQ_H
+
+#include <linux/irq.h>
+
+#ifndef acpi_irq_init
+static inline void acpi_irq_init(void) { }
+#endif
+
+#endif /* _LINUX_ACPI_IRQ_H */
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index 642d6ae..a270f25e 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -21,16 +21,20 @@ struct device;
struct ata_port_info;
struct ahci_host_priv;
struct platform_device;
+struct scsi_host_template;
int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
+int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv);
+void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv);
int ahci_platform_enable_resources(struct ahci_host_priv *hpriv);
void ahci_platform_disable_resources(struct ahci_host_priv *hpriv);
struct ahci_host_priv *ahci_platform_get_resources(
struct platform_device *pdev);
int ahci_platform_init_host(struct platform_device *pdev,
struct ahci_host_priv *hpriv,
- const struct ata_port_info *pi_template);
+ const struct ata_port_info *pi_template,
+ struct scsi_host_template *sht);
int ahci_platform_suspend_host(struct device *dev);
int ahci_platform_resume_host(struct device *dev);
diff --git a/include/linux/aio.h b/include/linux/aio.h
index d9c92da..9eb42db 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -1,86 +1,23 @@
#ifndef __LINUX__AIO_H
#define __LINUX__AIO_H
-#include <linux/list.h>
-#include <linux/workqueue.h>
#include <linux/aio_abi.h>
-#include <linux/uio.h>
-#include <linux/rcupdate.h>
-
-#include <linux/atomic.h>
struct kioctx;
struct kiocb;
+struct mm_struct;
#define KIOCB_KEY 0
-/*
- * We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either
- * cancelled or completed (this makes a certain amount of sense because
- * successful cancellation - io_cancel() - does deliver the completion to
- * userspace).
- *
- * And since most things don't implement kiocb cancellation and we'd really like
- * kiocb completion to be lockless when possible, we use ki_cancel to
- * synchronize cancellation and completion - we only set it to KIOCB_CANCELLED
- * with xchg() or cmpxchg(), see batch_complete_aio() and kiocb_cancel().
- */
-#define KIOCB_CANCELLED ((void *) (~0ULL))
-
typedef int (kiocb_cancel_fn)(struct kiocb *);
-struct kiocb {
- struct file *ki_filp;
- struct kioctx *ki_ctx; /* NULL for sync ops */
- kiocb_cancel_fn *ki_cancel;
- void *private;
-
- union {
- void __user *user;
- struct task_struct *tsk;
- } ki_obj;
-
- __u64 ki_user_data; /* user's data for completion */
- loff_t ki_pos;
- size_t ki_nbytes; /* copy of iocb->aio_nbytes */
-
- struct list_head ki_list; /* the aio core uses this
- * for cancellation */
-
- /*
- * If the aio_resfd field of the userspace iocb is not zero,
- * this is the underlying eventfd context to deliver events to.
- */
- struct eventfd_ctx *ki_eventfd;
-};
-
-static inline bool is_sync_kiocb(struct kiocb *kiocb)
-{
- return kiocb->ki_ctx == NULL;
-}
-
-static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
-{
- *kiocb = (struct kiocb) {
- .ki_ctx = NULL,
- .ki_filp = filp,
- .ki_obj.tsk = current,
- };
-}
-
/* prototypes */
#ifdef CONFIG_AIO
-extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
-extern void aio_complete(struct kiocb *iocb, long res, long res2);
-struct mm_struct;
extern void exit_aio(struct mm_struct *mm);
extern long do_io_submit(aio_context_t ctx_id, long nr,
struct iocb __user *__user *iocbpp, bool compat);
void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
#else
-static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
-static inline void aio_complete(struct kiocb *iocb, long res, long res2) { }
-struct mm_struct;
static inline void exit_aio(struct mm_struct *mm) { }
static inline long do_io_submit(aio_context_t ctx_id, long nr,
struct iocb __user * __user *iocbpp,
@@ -89,11 +26,6 @@ static inline void kiocb_set_cancel_fn(struct kiocb *req,
kiocb_cancel_fn *cancel) { }
#endif /* CONFIG_AIO */
-static inline struct kiocb *list_kiocb(struct list_head *h)
-{
- return list_entry(h, struct kiocb, ki_list);
-}
-
/* for sysctl: */
extern unsigned long aio_nr;
extern unsigned long aio_max_nr;
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h
index a899402..52f3b7d 100644
--- a/include/linux/alarmtimer.h
+++ b/include/linux/alarmtimer.h
@@ -43,8 +43,8 @@ struct alarm {
void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
enum alarmtimer_restart (*function)(struct alarm *, ktime_t));
-int alarm_start(struct alarm *alarm, ktime_t start);
-int alarm_start_relative(struct alarm *alarm, ktime_t start);
+void alarm_start(struct alarm *alarm, ktime_t start);
+void alarm_start_relative(struct alarm *alarm, ktime_t start);
void alarm_restart(struct alarm *alarm);
int alarm_try_to_cancel(struct alarm *alarm);
int alarm_cancel(struct alarm *alarm);
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 2afc618..50fc668 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -33,6 +33,7 @@ struct amba_device {
struct clk *pclk;
unsigned int periphid;
unsigned int irq[AMBA_NR_IRQS];
+ char *driver_override;
};
struct amba_driver {
@@ -92,11 +93,15 @@ struct amba_device *amba_find_device(const char *, struct device *, unsigned int
int amba_request_regions(struct amba_device *, const char *);
void amba_release_regions(struct amba_device *);
-#define amba_pclk_enable(d) \
- (IS_ERR((d)->pclk) ? 0 : clk_enable((d)->pclk))
+static inline int amba_pclk_enable(struct amba_device *dev)
+{
+ return clk_enable(dev->pclk);
+}
-#define amba_pclk_disable(d) \
- do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0)
+static inline void amba_pclk_disable(struct amba_device *dev)
+{
+ clk_disable(dev->pclk);
+}
static inline int amba_pclk_prepare(struct amba_device *dev)
{
diff --git a/include/linux/amba/sp810.h b/include/linux/amba/sp810.h
index c7df89f..58fe9e8 100644
--- a/include/linux/amba/sp810.h
+++ b/include/linux/amba/sp810.h
@@ -2,7 +2,7 @@
* ARM PrimeXsys System Controller SP810 header file
*
* Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/include/linux/arm-cci.h b/include/linux/arm-cci.h
index 79d6edf..521ec1f 100644
--- a/include/linux/arm-cci.h
+++ b/include/linux/arm-cci.h
@@ -24,16 +24,22 @@
#include <linux/errno.h>
#include <linux/types.h>
+#include <asm/arm-cci.h>
+
struct device_node;
#ifdef CONFIG_ARM_CCI
extern bool cci_probed(void);
+#else
+static inline bool cci_probed(void) { return false; }
+#endif
+
+#ifdef CONFIG_ARM_CCI400_PORT_CTRL
extern int cci_ace_get_port(struct device_node *dn);
extern int cci_disable_port_by_cpu(u64 mpidr);
extern int __cci_control_port_by_device(struct device_node *dn, bool enable);
extern int __cci_control_port_by_index(u32 port, bool enable);
#else
-static inline bool cci_probed(void) { return false; }
static inline int cci_ace_get_port(struct device_node *dn)
{
return -ENODEV;
@@ -49,6 +55,7 @@ static inline int __cci_control_port_by_index(u32 port, bool enable)
return -ENODEV;
}
#endif
+
#define cci_disable_port_by_device(dev) \
__cci_control_port_by_device(dev, false)
#define cci_enable_port_by_device(dev) \
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 179b38f..388574e 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -60,12 +60,15 @@ struct dma_chan_ref {
* dependency chain
* @ASYNC_TX_FENCE: specify that the next operation in the dependency
* chain uses this operation's result as an input
+ * @ASYNC_TX_PQ_XOR_DST: do not overwrite the syndrome but XOR it with the
+ * input data. Required for rmw case.
*/
enum async_tx_flags {
ASYNC_TX_XOR_ZERO_DST = (1 << 0),
ASYNC_TX_XOR_DROP_DST = (1 << 1),
ASYNC_TX_ACK = (1 << 2),
ASYNC_TX_FENCE = (1 << 3),
+ ASYNC_TX_PQ_XOR_DST = (1 << 4),
};
/**
diff --git a/include/linux/ata.h b/include/linux/ata.h
index f2f4d8d..fed3641 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -94,6 +94,8 @@ enum {
ATA_ID_SECTOR_SIZE = 106,
ATA_ID_WWN = 108,
ATA_ID_LOGICAL_SECTOR_SIZE = 117, /* and 118 */
+ ATA_ID_COMMAND_SET_3 = 119,
+ ATA_ID_COMMAND_SET_4 = 120,
ATA_ID_LAST_LUN = 126,
ATA_ID_DLF = 128,
ATA_ID_CSFO = 129,
@@ -177,7 +179,7 @@ enum {
ATA_DSC = (1 << 4), /* drive seek complete */
ATA_DRQ = (1 << 3), /* data request i/o */
ATA_CORR = (1 << 2), /* corrected data error */
- ATA_IDX = (1 << 1), /* index */
+ ATA_SENSE = (1 << 1), /* sense code available */
ATA_ERR = (1 << 0), /* have an error */
ATA_SRST = (1 << 2), /* software reset */
ATA_ICRC = (1 << 7), /* interface CRC error */
@@ -382,6 +384,8 @@ enum {
SATA_SSP = 0x06, /* Software Settings Preservation */
SATA_DEVSLP = 0x09, /* Device Sleep */
+ SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
+
/* feature values for SET_MAX */
ATA_SET_MAX_ADDR = 0x00,
ATA_SET_MAX_PASSWD = 0x01,
@@ -503,7 +507,7 @@ struct ata_bmdma_prd {
#define ata_id_has_dma(id) ((id)[ATA_ID_CAPABILITY] & (1 << 8))
#define ata_id_has_ncq(id) ((id)[ATA_ID_SATA_CAPABILITY] & (1 << 8))
#define ata_id_queue_depth(id) (((id)[ATA_ID_QUEUE_DEPTH] & 0x1f) + 1)
-#define ata_id_removeable(id) ((id)[ATA_ID_CONFIG] & (1 << 7))
+#define ata_id_removable(id) ((id)[ATA_ID_CONFIG] & (1 << 7))
#define ata_id_has_atapi_AN(id) \
((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
@@ -525,6 +529,8 @@ struct ata_bmdma_prd {
#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
+#define ata_id_has_ncq_autosense(id) \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
static inline bool ata_id_has_hipm(const u16 *id)
{
@@ -696,6 +702,37 @@ static inline bool ata_id_wcache_enabled(const u16 *id)
return id[ATA_ID_CFS_ENABLE_1] & (1 << 5);
}
+static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
+{
+ /* Word 86 must have bit 15 set */
+ if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+ return false;
+
+ /* READ LOG DMA EXT support can be signaled either from word 119
+ * or from word 120. The format is the same for both words: Bit
+ * 15 must be cleared, bit 14 set and bit 3 set.
+ */
+ if ((id[ATA_ID_COMMAND_SET_3] & 0xC008) == 0x4008 ||
+ (id[ATA_ID_COMMAND_SET_4] & 0xC008) == 0x4008)
+ return true;
+
+ return false;
+}
+
+static inline bool ata_id_has_sense_reporting(const u16 *id)
+{
+ if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+ return false;
+ return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
+}
+
+static inline bool ata_id_sense_reporting_enabled(const u16 *id)
+{
+ if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+ return false;
+ return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
+}
+
/**
* ata_id_major_version - get ATA level of drive
* @id: Identify data
diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h
index 5c618a0..619d9e7 100644
--- a/include/linux/ata_platform.h
+++ b/include/linux/ata_platform.h
@@ -10,12 +10,15 @@ struct pata_platform_info {
unsigned int ioport_shift;
};
+struct scsi_host_template;
+
extern int __pata_platform_probe(struct device *dev,
struct resource *io_res,
struct resource *ctl_res,
struct resource *irq_res,
unsigned int ioport_shift,
- int __pio_mask);
+ int __pio_mask,
+ struct scsi_host_template *sht);
/*
* Marvell SATA private data
diff --git a/include/linux/audit.h b/include/linux/audit.h
index af84234..c2e7e3a 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -46,7 +46,6 @@ struct audit_tree;
struct sk_buff;
struct audit_krule {
- int vers_ops;
u32 pflags;
u32 flags;
u32 listnr;
@@ -128,7 +127,6 @@ extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1,
extern void __audit_syscall_exit(int ret_success, long ret_value);
extern struct filename *__audit_reusename(const __user char *uptr);
extern void __audit_getname(struct filename *name);
-extern void audit_putname(struct filename *name);
#define AUDIT_INODE_PARENT 1 /* dentry represents the parent */
#define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */
@@ -353,8 +351,6 @@ static inline struct filename *audit_reusename(const __user char *name)
}
static inline void audit_getname(struct filename *name)
{ }
-static inline void audit_putname(struct filename *name)
-{ }
static inline void __audit_inode(struct filename *name,
const struct dentry *dentry,
unsigned int flags)
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
new file mode 100644
index 0000000..a23209b
--- /dev/null
+++ b/include/linux/backing-dev-defs.h
@@ -0,0 +1,256 @@
+#ifndef __LINUX_BACKING_DEV_DEFS_H
+#define __LINUX_BACKING_DEV_DEFS_H
+
+#include <linux/list.h>
+#include <linux/radix-tree.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/percpu_counter.h>
+#include <linux/percpu-refcount.h>
+#include <linux/flex_proportions.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+
+struct page;
+struct device;
+struct dentry;
+
+/*
+ * Bits in bdi_writeback.state
+ */
+enum wb_state {
+ WB_registered, /* bdi_register() was done */
+ WB_writeback_running, /* Writeback is in progress */
+ WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
+};
+
+enum wb_congested_state {
+ WB_async_congested, /* The async (write) queue is getting full */
+ WB_sync_congested, /* The sync queue is getting full */
+};
+
+typedef int (congested_fn)(void *, int);
+
+enum wb_stat_item {
+ WB_RECLAIMABLE,
+ WB_WRITEBACK,
+ WB_DIRTIED,
+ WB_WRITTEN,
+ NR_WB_STAT_ITEMS
+};
+
+#define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
+
+/*
+ * For cgroup writeback, multiple wb's may map to the same blkcg. Those
+ * wb's can operate mostly independently but should share the congested
+ * state. To facilitate such sharing, the congested state is tracked using
+ * the following struct which is created on demand, indexed by blkcg ID on
+ * its bdi, and refcounted.
+ */
+struct bdi_writeback_congested {
+ unsigned long state; /* WB_[a]sync_congested flags */
+ atomic_t refcnt; /* nr of attached wb's and blkg */
+
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct backing_dev_info *bdi; /* the associated bdi */
+ int blkcg_id; /* ID of the associated blkcg */
+ struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */
+#endif
+};
+
+/*
+ * Each wb (bdi_writeback) can perform writeback operations, is measured
+ * and throttled, independently. Without cgroup writeback, each bdi
+ * (bdi_writeback) is served by its embedded bdi->wb.
+ *
+ * On the default hierarchy, blkcg implicitly enables memcg. This allows
+ * using memcg's page ownership for attributing writeback IOs, and every
+ * memcg - blkcg combination can be served by its own wb by assigning a
+ * dedicated wb to each memcg, which enables isolation across different
+ * cgroups and propagation of IO back pressure down from the IO layer upto
+ * the tasks which are generating the dirty pages to be written back.
+ *
+ * A cgroup wb is indexed on its bdi by the ID of the associated memcg,
+ * refcounted with the number of inodes attached to it, and pins the memcg
+ * and the corresponding blkcg. As the corresponding blkcg for a memcg may
+ * change as blkcg is disabled and enabled higher up in the hierarchy, a wb
+ * is tested for blkcg after lookup and removed from index on mismatch so
+ * that a new wb for the combination can be created.
+ */
+struct bdi_writeback {
+ struct backing_dev_info *bdi; /* our parent bdi */
+
+ unsigned long state; /* Always use atomic bitops on this */
+ unsigned long last_old_flush; /* last old data flush */
+
+ struct list_head b_dirty; /* dirty inodes */
+ struct list_head b_io; /* parked for writeback */
+ struct list_head b_more_io; /* parked for more writeback */
+ struct list_head b_dirty_time; /* time stamps are dirty */
+ spinlock_t list_lock; /* protects the b_* lists */
+
+ struct percpu_counter stat[NR_WB_STAT_ITEMS];
+
+ struct bdi_writeback_congested *congested;
+
+ unsigned long bw_time_stamp; /* last time write bw is updated */
+ unsigned long dirtied_stamp;
+ unsigned long written_stamp; /* pages written at bw_time_stamp */
+ unsigned long write_bandwidth; /* the estimated write bandwidth */
+ unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */
+
+ /*
+ * The base dirty throttle rate, re-calculated on every 200ms.
+ * All the bdi tasks' dirty rate will be curbed under it.
+ * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
+ * in small steps and is much more smooth/stable than the latter.
+ */
+ unsigned long dirty_ratelimit;
+ unsigned long balanced_dirty_ratelimit;
+
+ struct fprop_local_percpu completions;
+ int dirty_exceeded;
+
+ spinlock_t work_lock; /* protects work_list & dwork scheduling */
+ struct list_head work_list;
+ struct delayed_work dwork; /* work item used for writeback */
+
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct percpu_ref refcnt; /* used only for !root wb's */
+ struct fprop_local_percpu memcg_completions;
+ struct cgroup_subsys_state *memcg_css; /* the associated memcg */
+ struct cgroup_subsys_state *blkcg_css; /* and blkcg */
+ struct list_head memcg_node; /* anchored at memcg->cgwb_list */
+ struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */
+
+ union {
+ struct work_struct release_work;
+ struct rcu_head rcu;
+ };
+#endif
+};
+
+struct backing_dev_info {
+ struct list_head bdi_list;
+ unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
+ unsigned int capabilities; /* Device capabilities */
+ congested_fn *congested_fn; /* Function pointer if device is md/dm */
+ void *congested_data; /* Pointer to aux data for congested func */
+
+ char *name;
+
+ unsigned int min_ratio;
+ unsigned int max_ratio, max_prop_frac;
+
+ /*
+ * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are
+ * any dirty wbs, which is depended upon by bdi_has_dirty().
+ */
+ atomic_long_t tot_write_bandwidth;
+
+ struct bdi_writeback wb; /* the root writeback info for this bdi */
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
+ struct rb_root cgwb_congested_tree; /* their congested states */
+ atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */
+#else
+ struct bdi_writeback_congested *wb_congested;
+#endif
+ wait_queue_head_t wb_waitq;
+
+ struct device *dev;
+
+ struct timer_list laptop_mode_wb_timer;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debug_dir;
+ struct dentry *debug_stats;
+#endif
+};
+
+enum {
+ BLK_RW_ASYNC = 0,
+ BLK_RW_SYNC = 1,
+};
+
+void clear_wb_congested(struct bdi_writeback_congested *congested, int sync);
+void set_wb_congested(struct bdi_writeback_congested *congested, int sync);
+
+static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
+{
+ clear_wb_congested(bdi->wb.congested, sync);
+}
+
+static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
+{
+ set_wb_congested(bdi->wb.congested, sync);
+}
+
+#ifdef CONFIG_CGROUP_WRITEBACK
+
+/**
+ * wb_tryget - try to increment a wb's refcount
+ * @wb: bdi_writeback to get
+ */
+static inline bool wb_tryget(struct bdi_writeback *wb)
+{
+ if (wb != &wb->bdi->wb)
+ return percpu_ref_tryget(&wb->refcnt);
+ return true;
+}
+
+/**
+ * wb_get - increment a wb's refcount
+ * @wb: bdi_writeback to get
+ */
+static inline void wb_get(struct bdi_writeback *wb)
+{
+ if (wb != &wb->bdi->wb)
+ percpu_ref_get(&wb->refcnt);
+}
+
+/**
+ * wb_put - decrement a wb's refcount
+ * @wb: bdi_writeback to put
+ */
+static inline void wb_put(struct bdi_writeback *wb)
+{
+ if (wb != &wb->bdi->wb)
+ percpu_ref_put(&wb->refcnt);
+}
+
+/**
+ * wb_dying - is a wb dying?
+ * @wb: bdi_writeback of interest
+ *
+ * Returns whether @wb is unlinked and being drained.
+ */
+static inline bool wb_dying(struct bdi_writeback *wb)
+{
+ return percpu_ref_is_dying(&wb->refcnt);
+}
+
+#else /* CONFIG_CGROUP_WRITEBACK */
+
+static inline bool wb_tryget(struct bdi_writeback *wb)
+{
+ return true;
+}
+
+static inline void wb_get(struct bdi_writeback *wb)
+{
+}
+
+static inline void wb_put(struct bdi_writeback *wb)
+{
+}
+
+static inline bool wb_dying(struct bdi_writeback *wb)
+{
+ return false;
+}
+
+#endif /* CONFIG_CGROUP_WRITEBACK */
+
+#endif /* __LINUX_BACKING_DEV_DEFS_H */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 5da6012..0fe9df9 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -8,103 +8,14 @@
#ifndef _LINUX_BACKING_DEV_H
#define _LINUX_BACKING_DEV_H
-#include <linux/percpu_counter.h>
-#include <linux/log2.h>
-#include <linux/flex_proportions.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/sched.h>
-#include <linux/timer.h>
+#include <linux/blkdev.h>
#include <linux/writeback.h>
-#include <linux/atomic.h>
-#include <linux/sysctl.h>
-#include <linux/workqueue.h>
-
-struct page;
-struct device;
-struct dentry;
-
-/*
- * Bits in backing_dev_info.state
- */
-enum bdi_state {
- BDI_async_congested, /* The async (write) queue is getting full */
- BDI_sync_congested, /* The sync queue is getting full */
- BDI_registered, /* bdi_register() was done */
- BDI_writeback_running, /* Writeback is in progress */
-};
-
-typedef int (congested_fn)(void *, int);
-
-enum bdi_stat_item {
- BDI_RECLAIMABLE,
- BDI_WRITEBACK,
- BDI_DIRTIED,
- BDI_WRITTEN,
- NR_BDI_STAT_ITEMS
-};
-
-#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
-
-struct bdi_writeback {
- struct backing_dev_info *bdi; /* our parent bdi */
-
- unsigned long last_old_flush; /* last old data flush */
-
- struct delayed_work dwork; /* work item used for writeback */
- struct list_head b_dirty; /* dirty inodes */
- struct list_head b_io; /* parked for writeback */
- struct list_head b_more_io; /* parked for more writeback */
- spinlock_t list_lock; /* protects the b_* lists */
-};
-
-struct backing_dev_info {
- struct list_head bdi_list;
- unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
- unsigned long state; /* Always use atomic bitops on this */
- unsigned int capabilities; /* Device capabilities */
- congested_fn *congested_fn; /* Function pointer if device is md/dm */
- void *congested_data; /* Pointer to aux data for congested func */
-
- char *name;
-
- struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
-
- unsigned long bw_time_stamp; /* last time write bw is updated */
- unsigned long dirtied_stamp;
- unsigned long written_stamp; /* pages written at bw_time_stamp */
- unsigned long write_bandwidth; /* the estimated write bandwidth */
- unsigned long avg_write_bandwidth; /* further smoothed write bw */
-
- /*
- * The base dirty throttle rate, re-calculated on every 200ms.
- * All the bdi tasks' dirty rate will be curbed under it.
- * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
- * in small steps and is much more smooth/stable than the latter.
- */
- unsigned long dirty_ratelimit;
- unsigned long balanced_dirty_ratelimit;
-
- struct fprop_local_percpu completions;
- int dirty_exceeded;
-
- unsigned int min_ratio;
- unsigned int max_ratio, max_prop_frac;
-
- struct bdi_writeback wb; /* default writeback info for this bdi */
- spinlock_t wb_lock; /* protects work_list & wb.dwork scheduling */
-
- struct list_head work_list;
-
- struct device *dev;
-
- struct timer_list laptop_mode_wb_timer;
-
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debug_dir;
- struct dentry *debug_stats;
-#endif
-};
+#include <linux/blk-cgroup.h>
+#include <linux/backing-dev-defs.h>
+#include <linux/slab.h>
int __must_check bdi_init(struct backing_dev_info *bdi);
void bdi_destroy(struct backing_dev_info *bdi);
@@ -113,99 +24,100 @@ __printf(3, 4)
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...);
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
-void bdi_unregister(struct backing_dev_info *bdi);
-int __must_check bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
-void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
- enum wb_reason reason);
-void bdi_start_background_writeback(struct backing_dev_info *bdi);
-void bdi_writeback_workfn(struct work_struct *work);
-int bdi_has_dirty_io(struct backing_dev_info *bdi);
-void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
+int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
+void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
+ bool range_cyclic, enum wb_reason reason);
+void wb_start_background_writeback(struct bdi_writeback *wb);
+void wb_workfn(struct work_struct *work);
+void wb_wakeup_delayed(struct bdi_writeback *wb);
extern spinlock_t bdi_lock;
extern struct list_head bdi_list;
extern struct workqueue_struct *bdi_wq;
-static inline int wb_has_dirty_io(struct bdi_writeback *wb)
+static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
+{
+ return test_bit(WB_has_dirty_io, &wb->state);
+}
+
+static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
{
- return !list_empty(&wb->b_dirty) ||
- !list_empty(&wb->b_io) ||
- !list_empty(&wb->b_more_io);
+ /*
+ * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
+ * any dirty wbs. See wb_update_write_bandwidth().
+ */
+ return atomic_long_read(&bdi->tot_write_bandwidth);
}
-static inline void __add_bdi_stat(struct backing_dev_info *bdi,
- enum bdi_stat_item item, s64 amount)
+static inline void __add_wb_stat(struct bdi_writeback *wb,
+ enum wb_stat_item item, s64 amount)
{
- __percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH);
+ __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
}
-static inline void __inc_bdi_stat(struct backing_dev_info *bdi,
- enum bdi_stat_item item)
+static inline void __inc_wb_stat(struct bdi_writeback *wb,
+ enum wb_stat_item item)
{
- __add_bdi_stat(bdi, item, 1);
+ __add_wb_stat(wb, item, 1);
}
-static inline void inc_bdi_stat(struct backing_dev_info *bdi,
- enum bdi_stat_item item)
+static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
unsigned long flags;
local_irq_save(flags);
- __inc_bdi_stat(bdi, item);
+ __inc_wb_stat(wb, item);
local_irq_restore(flags);
}
-static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
- enum bdi_stat_item item)
+static inline void __dec_wb_stat(struct bdi_writeback *wb,
+ enum wb_stat_item item)
{
- __add_bdi_stat(bdi, item, -1);
+ __add_wb_stat(wb, item, -1);
}
-static inline void dec_bdi_stat(struct backing_dev_info *bdi,
- enum bdi_stat_item item)
+static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
unsigned long flags;
local_irq_save(flags);
- __dec_bdi_stat(bdi, item);
+ __dec_wb_stat(wb, item);
local_irq_restore(flags);
}
-static inline s64 bdi_stat(struct backing_dev_info *bdi,
- enum bdi_stat_item item)
+static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
- return percpu_counter_read_positive(&bdi->bdi_stat[item]);
+ return percpu_counter_read_positive(&wb->stat[item]);
}
-static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi,
- enum bdi_stat_item item)
+static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
+ enum wb_stat_item item)
{
- return percpu_counter_sum_positive(&bdi->bdi_stat[item]);
+ return percpu_counter_sum_positive(&wb->stat[item]);
}
-static inline s64 bdi_stat_sum(struct backing_dev_info *bdi,
- enum bdi_stat_item item)
+static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
{
s64 sum;
unsigned long flags;
local_irq_save(flags);
- sum = __bdi_stat_sum(bdi, item);
+ sum = __wb_stat_sum(wb, item);
local_irq_restore(flags);
return sum;
}
-extern void bdi_writeout_inc(struct backing_dev_info *bdi);
+extern void wb_writeout_inc(struct bdi_writeback *wb);
/*
* maximal error of a stat counter.
*/
-static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
+static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
{
#ifdef CONFIG_SMP
- return nr_cpu_ids * BDI_STAT_BATCH;
+ return nr_cpu_ids * WB_STAT_BATCH;
#else
return 1;
#endif
@@ -228,80 +140,58 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
* BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
* BDI_CAP_NO_WRITEBACK: Don't write pages back
* BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
- *
- * These flags let !MMU mmap() govern direct device mapping vs immediate
- * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
- *
- * BDI_CAP_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
- * BDI_CAP_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
- * BDI_CAP_READ_MAP: Can be mapped for reading
- * BDI_CAP_WRITE_MAP: Can be mapped for writing
- * BDI_CAP_EXEC_MAP: Can be mapped for execution
- *
- * BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed.
- *
* BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
+ *
+ * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
*/
#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
#define BDI_CAP_NO_WRITEBACK 0x00000002
-#define BDI_CAP_MAP_COPY 0x00000004
-#define BDI_CAP_MAP_DIRECT 0x00000008
-#define BDI_CAP_READ_MAP 0x00000010
-#define BDI_CAP_WRITE_MAP 0x00000020
-#define BDI_CAP_EXEC_MAP 0x00000040
-#define BDI_CAP_NO_ACCT_WB 0x00000080
-#define BDI_CAP_SWAP_BACKED 0x00000100
-#define BDI_CAP_STABLE_WRITES 0x00000200
-#define BDI_CAP_STRICTLIMIT 0x00000400
-
-#define BDI_CAP_VMFLAGS \
- (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
+#define BDI_CAP_NO_ACCT_WB 0x00000004
+#define BDI_CAP_STABLE_WRITES 0x00000008
+#define BDI_CAP_STRICTLIMIT 0x00000010
+#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
-#if defined(VM_MAYREAD) && \
- (BDI_CAP_READ_MAP != VM_MAYREAD || \
- BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
- BDI_CAP_EXEC_MAP != VM_MAYEXEC)
-#error please change backing_dev_info::capabilities flags
-#endif
-
-extern struct backing_dev_info default_backing_dev_info;
extern struct backing_dev_info noop_backing_dev_info;
-int writeback_in_progress(struct backing_dev_info *bdi);
-
-static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
+/**
+ * writeback_in_progress - determine whether there is writeback in progress
+ * @wb: bdi_writeback of interest
+ *
+ * Determine whether there is writeback waiting to be handled against a
+ * bdi_writeback.
+ */
+static inline bool writeback_in_progress(struct bdi_writeback *wb)
{
- if (bdi->congested_fn)
- return bdi->congested_fn(bdi->congested_data, bdi_bits);
- return (bdi->state & bdi_bits);
+ return test_bit(WB_writeback_running, &wb->state);
}
-static inline int bdi_read_congested(struct backing_dev_info *bdi)
+static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
{
- return bdi_congested(bdi, 1 << BDI_sync_congested);
-}
+ struct super_block *sb;
-static inline int bdi_write_congested(struct backing_dev_info *bdi)
-{
- return bdi_congested(bdi, 1 << BDI_async_congested);
+ if (!inode)
+ return &noop_backing_dev_info;
+
+ sb = inode->i_sb;
+#ifdef CONFIG_BLOCK
+ if (sb_is_blkdev_sb(sb))
+ return blk_get_backing_dev_info(I_BDEV(inode));
+#endif
+ return sb->s_bdi;
}
-static inline int bdi_rw_congested(struct backing_dev_info *bdi)
+static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
{
- return bdi_congested(bdi, (1 << BDI_sync_congested) |
- (1 << BDI_async_congested));
-}
+ struct backing_dev_info *bdi = wb->bdi;
-enum {
- BLK_RW_ASYNC = 0,
- BLK_RW_SYNC = 1,
-};
+ if (bdi->congested_fn)
+ return bdi->congested_fn(bdi->congested_data, cong_bits);
+ return wb->congested->state & cong_bits;
+}
-void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
-void set_bdi_congested(struct backing_dev_info *bdi, int sync);
long congestion_wait(int sync, long timeout);
long wait_iff_congested(struct zone *zone, int sync, long timeout);
int pdflush_proc_obsolete(struct ctl_table *table, int write,
@@ -329,30 +219,352 @@ static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
BDI_CAP_NO_WRITEBACK));
}
-static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
+static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
{
- return bdi->capabilities & BDI_CAP_SWAP_BACKED;
+ return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
}
-static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
+static inline bool mapping_cap_account_dirty(struct address_space *mapping)
{
- return bdi_cap_writeback_dirty(mapping->backing_dev_info);
+ return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
}
-static inline bool mapping_cap_account_dirty(struct address_space *mapping)
+static inline int bdi_sched_wait(void *word)
{
- return bdi_cap_account_dirty(mapping->backing_dev_info);
+ schedule();
+ return 0;
}
-static inline bool mapping_cap_swap_backed(struct address_space *mapping)
+#ifdef CONFIG_CGROUP_WRITEBACK
+
+struct bdi_writeback_congested *
+wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
+void wb_congested_put(struct bdi_writeback_congested *congested);
+struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
+ struct cgroup_subsys_state *memcg_css,
+ gfp_t gfp);
+void wb_memcg_offline(struct mem_cgroup *memcg);
+void wb_blkcg_offline(struct blkcg *blkcg);
+int inode_congested(struct inode *inode, int cong_bits);
+
+/**
+ * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
+ * @inode: inode of interest
+ *
+ * cgroup writeback requires support from both the bdi and filesystem.
+ * Test whether @inode has both.
+ */
+static inline bool inode_cgwb_enabled(struct inode *inode)
{
- return bdi_cap_swap_backed(mapping->backing_dev_info);
+ struct backing_dev_info *bdi = inode_to_bdi(inode);
+
+ return bdi_cap_account_dirty(bdi) &&
+ (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
+ (inode->i_sb->s_iflags & SB_I_CGROUPWB);
}
-static inline int bdi_sched_wait(void *word)
+/**
+ * wb_find_current - find wb for %current on a bdi
+ * @bdi: bdi of interest
+ *
+ * Find the wb of @bdi which matches both the memcg and blkcg of %current.
+ * Must be called under rcu_read_lock() which protects the returend wb.
+ * NULL if not found.
+ */
+static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
{
- schedule();
- return 0;
+ struct cgroup_subsys_state *memcg_css;
+ struct bdi_writeback *wb;
+
+ memcg_css = task_css(current, memory_cgrp_id);
+ if (!memcg_css->parent)
+ return &bdi->wb;
+
+ wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
+
+ /*
+ * %current's blkcg equals the effective blkcg of its memcg. No
+ * need to use the relatively expensive cgroup_get_e_css().
+ */
+ if (likely(wb && wb->blkcg_css == task_css(current, blkio_cgrp_id)))
+ return wb;
+ return NULL;
+}
+
+/**
+ * wb_get_create_current - get or create wb for %current on a bdi
+ * @bdi: bdi of interest
+ * @gfp: allocation mask
+ *
+ * Equivalent to wb_get_create() on %current's memcg. This function is
+ * called from a relatively hot path and optimizes the common cases using
+ * wb_find_current().
+ */
+static inline struct bdi_writeback *
+wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
+{
+ struct bdi_writeback *wb;
+
+ rcu_read_lock();
+ wb = wb_find_current(bdi);
+ if (wb && unlikely(!wb_tryget(wb)))
+ wb = NULL;
+ rcu_read_unlock();
+
+ if (unlikely(!wb)) {
+ struct cgroup_subsys_state *memcg_css;
+
+ memcg_css = task_get_css(current, memory_cgrp_id);
+ wb = wb_get_create(bdi, memcg_css, gfp);
+ css_put(memcg_css);
+ }
+ return wb;
+}
+
+/**
+ * inode_to_wb_is_valid - test whether an inode has a wb associated
+ * @inode: inode of interest
+ *
+ * Returns %true if @inode has a wb associated. May be called without any
+ * locking.
+ */
+static inline bool inode_to_wb_is_valid(struct inode *inode)
+{
+ return inode->i_wb;
+}
+
+/**
+ * inode_to_wb - determine the wb of an inode
+ * @inode: inode of interest
+ *
+ * Returns the wb @inode is currently associated with. The caller must be
+ * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
+ * associated wb's list_lock.
+ */
+static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
+{
+#ifdef CONFIG_LOCKDEP
+ WARN_ON_ONCE(debug_locks &&
+ (!lockdep_is_held(&inode->i_lock) &&
+ !lockdep_is_held(&inode->i_mapping->tree_lock) &&
+ !lockdep_is_held(&inode->i_wb->list_lock)));
+#endif
+ return inode->i_wb;
+}
+
+/**
+ * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
+ * @inode: target inode
+ * @lockedp: temp bool output param, to be passed to the end function
+ *
+ * The caller wants to access the wb associated with @inode but isn't
+ * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
+ * function determines the wb associated with @inode and ensures that the
+ * association doesn't change until the transaction is finished with
+ * unlocked_inode_to_wb_end().
+ *
+ * The caller must call unlocked_inode_to_wb_end() with *@lockdep
+ * afterwards and can't sleep during transaction. IRQ may or may not be
+ * disabled on return.
+ */
+static inline struct bdi_writeback *
+unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
+{
+ rcu_read_lock();
+
+ /*
+ * Paired with store_release in inode_switch_wb_work_fn() and
+ * ensures that we see the new wb if we see cleared I_WB_SWITCH.
+ */
+ *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
+
+ if (unlikely(*lockedp))
+ spin_lock_irq(&inode->i_mapping->tree_lock);
+
+ /*
+ * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
+ * inode_to_wb() will bark. Deref directly.
+ */
+ return inode->i_wb;
+}
+
+/**
+ * unlocked_inode_to_wb_end - end inode wb access transaction
+ * @inode: target inode
+ * @locked: *@lockedp from unlocked_inode_to_wb_begin()
+ */
+static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
+{
+ if (unlikely(locked))
+ spin_unlock_irq(&inode->i_mapping->tree_lock);
+
+ rcu_read_unlock();
+}
+
+struct wb_iter {
+ int start_blkcg_id;
+ struct radix_tree_iter tree_iter;
+ void **slot;
+};
+
+static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
+ struct backing_dev_info *bdi)
+{
+ struct radix_tree_iter *titer = &iter->tree_iter;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (iter->start_blkcg_id >= 0) {
+ iter->slot = radix_tree_iter_init(titer, iter->start_blkcg_id);
+ iter->start_blkcg_id = -1;
+ } else {
+ iter->slot = radix_tree_next_slot(iter->slot, titer, 0);
+ }
+
+ if (!iter->slot)
+ iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0);
+ if (iter->slot)
+ return *iter->slot;
+ return NULL;
+}
+
+static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter,
+ struct backing_dev_info *bdi,
+ int start_blkcg_id)
+{
+ iter->start_blkcg_id = start_blkcg_id;
+
+ if (start_blkcg_id)
+ return __wb_iter_next(iter, bdi);
+ else
+ return &bdi->wb;
+}
+
+/**
+ * bdi_for_each_wb - walk all wb's of a bdi in ascending blkcg ID order
+ * @wb_cur: cursor struct bdi_writeback pointer
+ * @bdi: bdi to walk wb's of
+ * @iter: pointer to struct wb_iter to be used as iteration buffer
+ * @start_blkcg_id: blkcg ID to start iteration from
+ *
+ * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
+ * blkcg ID order starting from @start_blkcg_id. @iter is struct wb_iter
+ * to be used as temp storage during iteration. rcu_read_lock() must be
+ * held throughout iteration.
+ */
+#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
+ for ((wb_cur) = __wb_iter_init(iter, bdi, start_blkcg_id); \
+ (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
+
+#else /* CONFIG_CGROUP_WRITEBACK */
+
+static inline bool inode_cgwb_enabled(struct inode *inode)
+{
+ return false;
+}
+
+static inline struct bdi_writeback_congested *
+wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
+{
+ atomic_inc(&bdi->wb_congested->refcnt);
+ return bdi->wb_congested;
+}
+
+static inline void wb_congested_put(struct bdi_writeback_congested *congested)
+{
+ if (atomic_dec_and_test(&congested->refcnt))
+ kfree(congested);
+}
+
+static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
+{
+ return &bdi->wb;
+}
+
+static inline struct bdi_writeback *
+wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
+{
+ return &bdi->wb;
+}
+
+static inline bool inode_to_wb_is_valid(struct inode *inode)
+{
+ return true;
+}
+
+static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
+{
+ return &inode_to_bdi(inode)->wb;
+}
+
+static inline struct bdi_writeback *
+unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
+{
+ return inode_to_wb(inode);
+}
+
+static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
+{
+}
+
+static inline void wb_memcg_offline(struct mem_cgroup *memcg)
+{
+}
+
+static inline void wb_blkcg_offline(struct blkcg *blkcg)
+{
+}
+
+struct wb_iter {
+ int next_id;
+};
+
+#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
+ for ((iter)->next_id = (start_blkcg_id); \
+ ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); )
+
+static inline int inode_congested(struct inode *inode, int cong_bits)
+{
+ return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
+}
+
+#endif /* CONFIG_CGROUP_WRITEBACK */
+
+static inline int inode_read_congested(struct inode *inode)
+{
+ return inode_congested(inode, 1 << WB_sync_congested);
+}
+
+static inline int inode_write_congested(struct inode *inode)
+{
+ return inode_congested(inode, 1 << WB_async_congested);
+}
+
+static inline int inode_rw_congested(struct inode *inode)
+{
+ return inode_congested(inode, (1 << WB_sync_congested) |
+ (1 << WB_async_congested));
+}
+
+static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
+{
+ return wb_congested(&bdi->wb, cong_bits);
+}
+
+static inline int bdi_read_congested(struct backing_dev_info *bdi)
+{
+ return bdi_congested(bdi, 1 << WB_sync_congested);
+}
+
+static inline int bdi_write_congested(struct backing_dev_info *bdi)
+{
+ return bdi_congested(bdi, 1 << WB_async_congested);
+}
+
+static inline int bdi_rw_congested(struct backing_dev_info *bdi)
+{
+ return bdi_congested(bdi, (1 << WB_sync_congested) |
+ (1 << WB_async_congested));
}
-#endif /* _LINUX_BACKING_DEV_H */
+#endif /* _LINUX_BACKING_DEV_H */
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index adb14a8..1e7a69a 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -117,12 +117,16 @@ struct backlight_device {
int use_count;
};
-static inline void backlight_update_status(struct backlight_device *bd)
+static inline int backlight_update_status(struct backlight_device *bd)
{
+ int ret = -ENOENT;
+
mutex_lock(&bd->update_lock);
if (bd->ops && bd->ops->update_status)
- bd->ops->update_status(bd);
+ ret = bd->ops->update_status(bd);
mutex_unlock(&bd->update_lock);
+
+ return ret;
}
extern struct backlight_device *backlight_device_register(const char *name,
diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h
index 0e97856..14eea94 100644
--- a/include/linux/basic_mmio_gpio.h
+++ b/include/linux/basic_mmio_gpio.h
@@ -74,5 +74,6 @@ int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
#define BGPIOF_UNREADABLE_REG_SET BIT(1) /* reg_set is unreadable */
#define BGPIOF_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */
#define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3)
+#define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */
#endif /* __BASIC_MMIO_GPIO_H */
diff --git a/include/linux/bcm47xx_nvram.h b/include/linux/bcm47xx_nvram.h
new file mode 100644
index 0000000..2793652
--- /dev/null
+++ b/include/linux/bcm47xx_nvram.h
@@ -0,0 +1,49 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __BCM47XX_NVRAM_H
+#define __BCM47XX_NVRAM_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+
+#ifdef CONFIG_BCM47XX_NVRAM
+int bcm47xx_nvram_init_from_mem(u32 base, u32 lim);
+int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len);
+int bcm47xx_nvram_gpio_pin(const char *name);
+char *bcm47xx_nvram_get_contents(size_t *val_len);
+static inline void bcm47xx_nvram_release_contents(char *nvram)
+{
+ vfree(nvram);
+};
+#else
+static inline int bcm47xx_nvram_init_from_mem(u32 base, u32 lim)
+{
+ return -ENOTSUPP;
+};
+static inline int bcm47xx_nvram_getenv(const char *name, char *val,
+ size_t val_len)
+{
+ return -ENOTSUPP;
+};
+static inline int bcm47xx_nvram_gpio_pin(const char *name)
+{
+ return -ENOTSUPP;
+};
+
+static inline char *bcm47xx_nvram_get_contents(size_t *val_len)
+{
+ return NULL;
+};
+
+static inline void bcm47xx_nvram_release_contents(char *nvram)
+{
+};
+#endif
+
+#endif /* __BCM47XX_NVRAM_H */
diff --git a/include/linux/bcm47xx_wdt.h b/include/linux/bcm47xx_wdt.h
index b708786..5582c21 100644
--- a/include/linux/bcm47xx_wdt.h
+++ b/include/linux/bcm47xx_wdt.h
@@ -16,6 +16,7 @@ struct bcm47xx_wdt {
struct watchdog_device wdd;
struct notifier_block notifier;
+ struct notifier_block restart_handler;
struct timer_list soft_timer;
atomic_t soft_ticks;
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index eb1c6a4..2ff4a99 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -305,6 +305,15 @@ int __bcma_driver_register(struct bcma_driver *drv, struct module *owner);
extern void bcma_driver_unregister(struct bcma_driver *drv);
+/* module_bcma_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_bcma_driver(__bcma_driver) \
+ module_driver(__bcma_driver, bcma_driver_register, \
+ bcma_driver_unregister)
+
/* Set a fallback SPROM.
* See kdoc at the function definition for complete documentation. */
extern int bcma_arch_register_fallback_sprom(
@@ -318,6 +327,7 @@ struct bcma_bus {
const struct bcma_host_ops *ops;
enum bcma_hosttype hosttype;
+ bool host_is_pcie2; /* Used for BCMA_HOSTTYPE_PCI only */
union {
/* Pointer to the PCI bus (only for BCMA_HOSTTYPE_PCI) */
struct pci_dev *host_pci;
@@ -433,6 +443,27 @@ static inline struct bcma_device *bcma_find_core(struct bcma_bus *bus,
return bcma_find_core_unit(bus, coreid, 0);
}
+#ifdef CONFIG_BCMA_HOST_PCI
+extern void bcma_host_pci_up(struct bcma_bus *bus);
+extern void bcma_host_pci_down(struct bcma_bus *bus);
+extern int bcma_host_pci_irq_ctl(struct bcma_bus *bus,
+ struct bcma_device *core, bool enable);
+#else
+static inline void bcma_host_pci_up(struct bcma_bus *bus)
+{
+}
+static inline void bcma_host_pci_down(struct bcma_bus *bus)
+{
+}
+static inline int bcma_host_pci_irq_ctl(struct bcma_bus *bus,
+ struct bcma_device *core, bool enable)
+{
+ if (bus->hosttype == BCMA_HOSTTYPE_PCI)
+ return -ENOTSUPP;
+ return 0;
+}
+#endif
+
extern bool bcma_core_is_enabled(struct bcma_device *core);
extern void bcma_core_disable(struct bcma_device *core, u32 flags);
extern int bcma_core_enable(struct bcma_device *core, u32 flags);
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index db6fa21..6cceedf 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -663,14 +663,6 @@ struct bcma_drv_cc_b {
#define bcma_cc_maskset32(cc, offset, mask, set) \
bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set))
-extern void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
-extern void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
-
-extern void bcma_chipco_suspend(struct bcma_drv_cc *cc);
-extern void bcma_chipco_resume(struct bcma_drv_cc *cc);
-
-void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
-
extern u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks);
extern u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc);
@@ -690,9 +682,6 @@ u32 bcma_chipco_gpio_pullup(struct bcma_drv_cc *cc, u32 mask, u32 value);
u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value);
/* PMU support */
-extern void bcma_pmu_init(struct bcma_drv_cc *cc);
-extern void bcma_pmu_early_init(struct bcma_drv_cc *cc);
-
extern void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset,
u32 value);
extern void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset,
diff --git a/include/linux/bcma/bcma_driver_gmac_cmn.h b/include/linux/bcma/bcma_driver_gmac_cmn.h
index 4dd1f33..4354d4e 100644
--- a/include/linux/bcma/bcma_driver_gmac_cmn.h
+++ b/include/linux/bcma/bcma_driver_gmac_cmn.h
@@ -91,10 +91,4 @@ struct bcma_drv_gmac_cmn {
#define gmac_cmn_write16(gc, offset, val) bcma_write16((gc)->core, offset, val)
#define gmac_cmn_write32(gc, offset, val) bcma_write32((gc)->core, offset, val)
-#ifdef CONFIG_BCMA_DRIVER_GMAC_CMN
-extern void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc);
-#else
-static inline void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc) { }
-#endif
-
#endif /* LINUX_BCMA_DRIVER_GMAC_CMN_H_ */
diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h
index 0b3b32a..8eea7f9 100644
--- a/include/linux/bcma/bcma_driver_mips.h
+++ b/include/linux/bcma/bcma_driver_mips.h
@@ -39,21 +39,6 @@ struct bcma_drv_mips {
u8 early_setup_done:1;
};
-#ifdef CONFIG_BCMA_DRIVER_MIPS
-extern void bcma_core_mips_init(struct bcma_drv_mips *mcore);
-extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore);
-
-extern unsigned int bcma_core_mips_irq(struct bcma_device *dev);
-#else
-static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { }
-static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { }
-
-static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev)
-{
- return 0;
-}
-#endif
-
extern u32 bcma_cpu_clock(struct bcma_drv_mips *mcore);
#endif /* LINUX_BCMA_DRIVER_MIPS_H_ */
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index 0333e60..9657f11 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -223,6 +223,7 @@ struct bcma_drv_pci_host {
struct bcma_drv_pci {
struct bcma_device *core;
+ u8 early_setup_done:1;
u8 setup_done:1;
u8 hostmode:1;
@@ -237,14 +238,26 @@ struct bcma_drv_pci {
#define pcicore_write16(pc, offset, val) bcma_write16((pc)->core, offset, val)
#define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val)
-extern void bcma_core_pci_init(struct bcma_drv_pci *pc);
-extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc,
- struct bcma_device *core, bool enable);
-extern void bcma_core_pci_up(struct bcma_bus *bus);
-extern void bcma_core_pci_down(struct bcma_bus *bus);
+#ifdef CONFIG_BCMA_DRIVER_PCI
extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up);
+#else
+static inline void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
+{
+}
+#endif
+#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
+#else
+static inline int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev)
+{
+ return -ENOTSUPP;
+}
+static inline int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
+{
+ return -ENOTSUPP;
+}
+#endif
#endif /* LINUX_BCMA_DRIVER_PCI_H_ */
diff --git a/include/linux/bcma/bcma_driver_pcie2.h b/include/linux/bcma/bcma_driver_pcie2.h
index 5988b05..31e6d17 100644
--- a/include/linux/bcma/bcma_driver_pcie2.h
+++ b/include/linux/bcma/bcma_driver_pcie2.h
@@ -143,6 +143,8 @@
struct bcma_drv_pcie2 {
struct bcma_device *core;
+
+ u16 reqsize;
};
#define pcie2_read16(pcie2, offset) bcma_read16((pcie2)->core, offset)
@@ -153,6 +155,4 @@ struct bcma_drv_pcie2 {
#define pcie2_set32(pcie2, offset, set) bcma_set32((pcie2)->core, offset, set)
#define pcie2_mask32(pcie2, offset, mask) bcma_mask32((pcie2)->core, offset, mask)
-void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2);
-
#endif /* LINUX_BCMA_DRIVER_PCIE2_H_ */
diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h
index e64ae7b..ebd5c1f 100644
--- a/include/linux/bcma/bcma_regs.h
+++ b/include/linux/bcma/bcma_regs.h
@@ -64,6 +64,8 @@
#define BCMA_PCI_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal powerup */
#define BCMA_PCI_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL powerdown */
+#define BCMA_PCIE2_BAR0_WIN2 0x70
+
/* SiliconBackplane Address Map.
* All regions may not exist on all chips.
*/
diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h
index f24d245..1b5fc0c 100644
--- a/include/linux/bcma/bcma_soc.h
+++ b/include/linux/bcma/bcma_soc.h
@@ -5,8 +5,6 @@
struct bcma_soc {
struct bcma_bus bus;
- struct bcma_device core_cc;
- struct bcma_device core_mips;
};
int __init bcma_host_soc_register(struct bcma_soc *soc);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index efead0b..5e963a6 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -290,7 +290,21 @@ static inline unsigned bio_segments(struct bio *bio)
* returns. and then bio would be freed memory when if (bio->bi_flags ...)
* runs
*/
-#define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
+static inline void bio_get(struct bio *bio)
+{
+ bio->bi_flags |= (1 << BIO_REFFED);
+ smp_mb__before_atomic();
+ atomic_inc(&bio->__bi_cnt);
+}
+
+static inline void bio_cnt_set(struct bio *bio, unsigned int count)
+{
+ if (count != 1) {
+ bio->bi_flags |= (1 << BIO_REFFED);
+ smp_mb__before_atomic();
+ }
+ atomic_set(&bio->__bi_cnt, count);
+}
enum bip_flags {
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
@@ -413,7 +427,6 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
}
extern void bio_endio(struct bio *, int);
-extern void bio_endio_nodec(struct bio *, int);
struct request_queue;
extern int bio_phys_segments(struct request_queue *, struct bio *);
@@ -428,13 +441,9 @@ extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
extern int bio_get_nr_vecs(struct block_device *);
-extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
- unsigned long, unsigned int, int, gfp_t);
-struct sg_iovec;
struct rq_map_data;
extern struct bio *bio_map_user_iov(struct request_queue *,
- struct block_device *,
- const struct sg_iovec *, int, int, gfp_t);
+ const struct iov_iter *, gfp_t);
extern void bio_unmap_user(struct bio *);
extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
gfp_t);
@@ -462,12 +471,10 @@ static inline void bio_flush_dcache_pages(struct bio *bi)
extern void bio_copy_data(struct bio *dst, struct bio *src);
extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
-extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
- unsigned long, unsigned int, int, gfp_t);
extern struct bio *bio_copy_user_iov(struct request_queue *,
struct rq_map_data *,
- const struct sg_iovec *,
- int, int, gfp_t);
+ const struct iov_iter *,
+ gfp_t);
extern int bio_uncopy_user(struct bio *);
void zero_fill_bio(struct bio *bio);
extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
@@ -475,9 +482,12 @@ extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
extern unsigned int bvec_nr_vecs(unsigned short idx);
#ifdef CONFIG_BLK_CGROUP
+int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
int bio_associate_current(struct bio *bio);
void bio_disassociate_task(struct bio *bio);
#else /* CONFIG_BLK_CGROUP */
+static inline int bio_associate_blkcg(struct bio *bio,
+ struct cgroup_subsys_state *blkcg_css) { return 0; }
static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
static inline void bio_disassociate_task(struct bio *bio) { }
#endif /* CONFIG_BLK_CGROUP */
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 202e403..ea17cca 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -52,16 +52,13 @@
* bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
* bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
* bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
- * bitmap_scnprintf(buf, len, src, nbits) Print bitmap src to buf
* bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
* bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
- * bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf
* bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf
* bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
* bitmap_release_region(bitmap, pos, order) Free specified bit region
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
- * bitmap_print_to_pagebuf(list, buf, mask, nbits) Print bitmap src as list/hex
*/
/*
@@ -96,10 +93,10 @@ extern int __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
unsigned int nbits);
-extern void __bitmap_shift_right(unsigned long *dst,
- const unsigned long *src, int shift, int bits);
-extern void __bitmap_shift_left(unsigned long *dst,
- const unsigned long *src, int shift, int bits);
+extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits);
+extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits);
extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
@@ -147,71 +144,67 @@ bitmap_find_next_zero_area(unsigned long *map,
align_mask, 0);
}
-extern int bitmap_scnprintf(char *buf, unsigned int len,
- const unsigned long *src, int nbits);
extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
unsigned long *dst, int nbits);
extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
unsigned long *dst, int nbits);
-extern int bitmap_scnlistprintf(char *buf, unsigned int len,
- const unsigned long *src, int nbits);
extern int bitmap_parselist(const char *buf, unsigned long *maskp,
int nmaskbits);
extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
unsigned long *dst, int nbits);
extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
- const unsigned long *old, const unsigned long *new, int bits);
+ const unsigned long *old, const unsigned long *new, unsigned int nbits);
extern int bitmap_bitremap(int oldbit,
const unsigned long *old, const unsigned long *new, int bits);
extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
- const unsigned long *relmap, int bits);
+ const unsigned long *relmap, unsigned int bits);
extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
- int sz, int bits);
+ unsigned int sz, unsigned int nbits);
extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
-extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
-extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits);
+#ifdef __BIG_ENDIAN
+extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
+#else
+#define bitmap_copy_le bitmap_copy
+#endif
+extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits);
extern int bitmap_print_to_pagebuf(bool list, char *buf,
const unsigned long *maskp, int nmaskbits);
-#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
-#define BITMAP_LAST_WORD_MASK(nbits) \
-( \
- ((nbits) % BITS_PER_LONG) ? \
- (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \
-)
+#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
+#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
#define small_const_nbits(nbits) \
(__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
-static inline void bitmap_zero(unsigned long *dst, int nbits)
+static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = 0UL;
else {
- int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memset(dst, 0, len);
}
}
-static inline void bitmap_fill(unsigned long *dst, int nbits)
+static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
- size_t nlongs = BITS_TO_LONGS(nbits);
+ unsigned int nlongs = BITS_TO_LONGS(nbits);
if (!small_const_nbits(nbits)) {
- int len = (nlongs - 1) * sizeof(unsigned long);
+ unsigned int len = (nlongs - 1) * sizeof(unsigned long);
memset(dst, 0xff, len);
}
dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
}
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
- int nbits)
+ unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src;
else {
- int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memcpy(dst, src, len);
}
}
@@ -290,16 +283,16 @@ static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
- else
- return __bitmap_empty(src, nbits);
+
+ return find_first_bit(src, nbits) == nbits;
}
static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
- else
- return __bitmap_full(src, nbits);
+
+ return find_first_zero_bit(src, nbits) == nbits;
}
static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
@@ -309,22 +302,22 @@ static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
return __bitmap_weight(src, nbits);
}
-static inline void bitmap_shift_right(unsigned long *dst,
- const unsigned long *src, int n, int nbits)
+static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, int nbits)
{
if (small_const_nbits(nbits))
- *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> n;
+ *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
else
- __bitmap_shift_right(dst, src, n, nbits);
+ __bitmap_shift_right(dst, src, shift, nbits);
}
-static inline void bitmap_shift_left(unsigned long *dst,
- const unsigned long *src, int n, int nbits)
+static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
- *dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits);
+ *dst = (*src << shift) & BITMAP_LAST_WORD_MASK(nbits);
else
- __bitmap_shift_left(dst, src, n, nbits);
+ __bitmap_shift_left(dst, src, shift, nbits);
}
static inline int bitmap_parse(const char *buf, unsigned int buflen,
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 5d858e0..297f5bd 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -218,9 +218,9 @@ static inline unsigned long __ffs64(u64 word)
/**
* find_last_bit - find the last set bit in a memory region
* @addr: The address to start the search at
- * @size: The maximum size to search
+ * @size: The number of bits to search
*
- * Returns the bit number of the first set bit, or size.
+ * Returns the bit number of the last set bit, or size.
*/
extern unsigned long find_last_bit(const unsigned long *addr,
unsigned long size);
diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h
index 7ffe03f..fb790b8 100644
--- a/include/linux/bitrev.h
+++ b/include/linux/bitrev.h
@@ -3,14 +3,83 @@
#include <linux/types.h>
-extern u8 const byte_rev_table[256];
+#ifdef CONFIG_HAVE_ARCH_BITREVERSE
+#include <asm/bitrev.h>
+
+#define __bitrev32 __arch_bitrev32
+#define __bitrev16 __arch_bitrev16
+#define __bitrev8 __arch_bitrev8
-static inline u8 bitrev8(u8 byte)
+#else
+extern u8 const byte_rev_table[256];
+static inline u8 __bitrev8(u8 byte)
{
return byte_rev_table[byte];
}
-extern u16 bitrev16(u16 in);
-extern u32 bitrev32(u32 in);
+static inline u16 __bitrev16(u16 x)
+{
+ return (__bitrev8(x & 0xff) << 8) | __bitrev8(x >> 8);
+}
+
+static inline u32 __bitrev32(u32 x)
+{
+ return (__bitrev16(x & 0xffff) << 16) | __bitrev16(x >> 16);
+}
+
+#endif /* CONFIG_HAVE_ARCH_BITREVERSE */
+
+#define __constant_bitrev32(x) \
+({ \
+ u32 __x = x; \
+ __x = (__x >> 16) | (__x << 16); \
+ __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \
+ __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
+ __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
+ __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
+ __x; \
+})
+
+#define __constant_bitrev16(x) \
+({ \
+ u16 __x = x; \
+ __x = (__x >> 8) | (__x << 8); \
+ __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \
+ __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \
+ __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \
+ __x; \
+})
+
+#define __constant_bitrev8(x) \
+({ \
+ u8 __x = x; \
+ __x = (__x >> 4) | (__x << 4); \
+ __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \
+ __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \
+ __x; \
+})
+
+#define bitrev32(x) \
+({ \
+ u32 __x = x; \
+ __builtin_constant_p(__x) ? \
+ __constant_bitrev32(__x) : \
+ __bitrev32(__x); \
+})
+
+#define bitrev16(x) \
+({ \
+ u16 __x = x; \
+ __builtin_constant_p(__x) ? \
+ __constant_bitrev16(__x) : \
+ __bitrev16(__x); \
+ })
+#define bitrev8(x) \
+({ \
+ u8 __x = x; \
+ __builtin_constant_p(__x) ? \
+ __constant_bitrev8(__x) : \
+ __bitrev8(__x) ; \
+ })
#endif /* _LINUX_BITREV_H */
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
new file mode 100644
index 0000000..1b62d76
--- /dev/null
+++ b/include/linux/blk-cgroup.h
@@ -0,0 +1,650 @@
+#ifndef _BLK_CGROUP_H
+#define _BLK_CGROUP_H
+/*
+ * Common Block IO controller cgroup interface
+ *
+ * Based on ideas and code from CFQ, CFS and BFQ:
+ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
+ *
+ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
+ * Paolo Valente <paolo.valente@unimore.it>
+ *
+ * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
+ * Nauman Rafique <nauman@google.com>
+ */
+
+#include <linux/cgroup.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/seq_file.h>
+#include <linux/radix-tree.h>
+#include <linux/blkdev.h>
+#include <linux/atomic.h>
+
+/* Max limits for throttle policy */
+#define THROTL_IOPS_MAX UINT_MAX
+
+#ifdef CONFIG_BLK_CGROUP
+
+enum blkg_rwstat_type {
+ BLKG_RWSTAT_READ,
+ BLKG_RWSTAT_WRITE,
+ BLKG_RWSTAT_SYNC,
+ BLKG_RWSTAT_ASYNC,
+
+ BLKG_RWSTAT_NR,
+ BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
+};
+
+struct blkcg_gq;
+
+struct blkcg {
+ struct cgroup_subsys_state css;
+ spinlock_t lock;
+
+ struct radix_tree_root blkg_tree;
+ struct blkcg_gq *blkg_hint;
+ struct hlist_head blkg_list;
+
+ struct blkcg_policy_data *pd[BLKCG_MAX_POLS];
+
+ struct list_head all_blkcgs_node;
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct list_head cgwb_list;
+#endif
+};
+
+struct blkg_stat {
+ struct u64_stats_sync syncp;
+ uint64_t cnt;
+};
+
+struct blkg_rwstat {
+ struct u64_stats_sync syncp;
+ uint64_t cnt[BLKG_RWSTAT_NR];
+};
+
+/*
+ * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
+ * request_queue (q). This is used by blkcg policies which need to track
+ * information per blkcg - q pair.
+ *
+ * There can be multiple active blkcg policies and each has its private
+ * data on each blkg, the size of which is determined by
+ * blkcg_policy->pd_size. blkcg core allocates and frees such areas
+ * together with blkg and invokes pd_init/exit_fn() methods.
+ *
+ * Such private data must embed struct blkg_policy_data (pd) at the
+ * beginning and pd_size can't be smaller than pd.
+ */
+struct blkg_policy_data {
+ /* the blkg and policy id this per-policy data belongs to */
+ struct blkcg_gq *blkg;
+ int plid;
+
+ /* used during policy activation */
+ struct list_head alloc_node;
+};
+
+/*
+ * Policies that need to keep per-blkcg data which is independent
+ * from any request_queue associated to it must specify its size
+ * with the cpd_size field of the blkcg_policy structure and
+ * embed a blkcg_policy_data in it. cpd_init() is invoked to let
+ * each policy handle per-blkcg data.
+ */
+struct blkcg_policy_data {
+ /* the policy id this per-policy data belongs to */
+ int plid;
+};
+
+/* association between a blk cgroup and a request queue */
+struct blkcg_gq {
+ /* Pointer to the associated request_queue */
+ struct request_queue *q;
+ struct list_head q_node;
+ struct hlist_node blkcg_node;
+ struct blkcg *blkcg;
+
+ /*
+ * Each blkg gets congested separately and the congestion state is
+ * propagated to the matching bdi_writeback_congested.
+ */
+ struct bdi_writeback_congested *wb_congested;
+
+ /* all non-root blkcg_gq's are guaranteed to have access to parent */
+ struct blkcg_gq *parent;
+
+ /* request allocation list for this blkcg-q pair */
+ struct request_list rl;
+
+ /* reference count */
+ atomic_t refcnt;
+
+ /* is this blkg online? protected by both blkcg and q locks */
+ bool online;
+
+ struct blkg_policy_data *pd[BLKCG_MAX_POLS];
+
+ struct rcu_head rcu_head;
+};
+
+typedef void (blkcg_pol_init_cpd_fn)(const struct blkcg *blkcg);
+typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
+
+struct blkcg_policy {
+ int plid;
+ /* policy specific private data size */
+ size_t pd_size;
+ /* policy specific per-blkcg data size */
+ size_t cpd_size;
+ /* cgroup files for the policy */
+ struct cftype *cftypes;
+
+ /* operations */
+ blkcg_pol_init_cpd_fn *cpd_init_fn;
+ blkcg_pol_init_pd_fn *pd_init_fn;
+ blkcg_pol_online_pd_fn *pd_online_fn;
+ blkcg_pol_offline_pd_fn *pd_offline_fn;
+ blkcg_pol_exit_pd_fn *pd_exit_fn;
+ blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
+};
+
+extern struct blkcg blkcg_root;
+extern struct cgroup_subsys_state * const blkcg_root_css;
+
+struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
+struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q);
+int blkcg_init_queue(struct request_queue *q);
+void blkcg_drain_queue(struct request_queue *q);
+void blkcg_exit_queue(struct request_queue *q);
+
+/* Blkio controller policy registration */
+int blkcg_policy_register(struct blkcg_policy *pol);
+void blkcg_policy_unregister(struct blkcg_policy *pol);
+int blkcg_activate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol);
+void blkcg_deactivate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol);
+
+void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
+ u64 (*prfill)(struct seq_file *,
+ struct blkg_policy_data *, int),
+ const struct blkcg_policy *pol, int data,
+ bool show_total);
+u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
+u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ const struct blkg_rwstat *rwstat);
+u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
+u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off);
+
+u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
+struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
+ int off);
+
+struct blkg_conf_ctx {
+ struct gendisk *disk;
+ struct blkcg_gq *blkg;
+ u64 v;
+};
+
+int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
+ const char *input, struct blkg_conf_ctx *ctx);
+void blkg_conf_finish(struct blkg_conf_ctx *ctx);
+
+
+static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
+{
+ return css ? container_of(css, struct blkcg, css) : NULL;
+}
+
+static inline struct blkcg *task_blkcg(struct task_struct *tsk)
+{
+ return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
+}
+
+static inline struct blkcg *bio_blkcg(struct bio *bio)
+{
+ if (bio && bio->bi_css)
+ return css_to_blkcg(bio->bi_css);
+ return task_blkcg(current);
+}
+
+static inline struct cgroup_subsys_state *
+task_get_blkcg_css(struct task_struct *task)
+{
+ return task_get_css(task, blkio_cgrp_id);
+}
+
+/**
+ * blkcg_parent - get the parent of a blkcg
+ * @blkcg: blkcg of interest
+ *
+ * Return the parent blkcg of @blkcg. Can be called anytime.
+ */
+static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
+{
+ return css_to_blkcg(blkcg->css.parent);
+}
+
+/**
+ * blkg_to_pdata - get policy private data
+ * @blkg: blkg of interest
+ * @pol: policy of interest
+ *
+ * Return pointer to private data associated with the @blkg-@pol pair.
+ */
+static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
+ struct blkcg_policy *pol)
+{
+ return blkg ? blkg->pd[pol->plid] : NULL;
+}
+
+static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
+ struct blkcg_policy *pol)
+{
+ return blkcg ? blkcg->pd[pol->plid] : NULL;
+}
+
+/**
+ * pdata_to_blkg - get blkg associated with policy private data
+ * @pd: policy private data of interest
+ *
+ * @pd is policy private data. Determine the blkg it's associated with.
+ */
+static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
+{
+ return pd ? pd->blkg : NULL;
+}
+
+/**
+ * blkg_path - format cgroup path of blkg
+ * @blkg: blkg of interest
+ * @buf: target buffer
+ * @buflen: target buffer length
+ *
+ * Format the path of the cgroup of @blkg into @buf.
+ */
+static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
+{
+ char *p;
+
+ p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
+ if (!p) {
+ strncpy(buf, "<unavailable>", buflen);
+ return -ENAMETOOLONG;
+ }
+
+ memmove(buf, p, buf + buflen - p);
+ return 0;
+}
+
+/**
+ * blkg_get - get a blkg reference
+ * @blkg: blkg to get
+ *
+ * The caller should be holding an existing reference.
+ */
+static inline void blkg_get(struct blkcg_gq *blkg)
+{
+ WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
+ atomic_inc(&blkg->refcnt);
+}
+
+void __blkg_release_rcu(struct rcu_head *rcu);
+
+/**
+ * blkg_put - put a blkg reference
+ * @blkg: blkg to put
+ */
+static inline void blkg_put(struct blkcg_gq *blkg)
+{
+ WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
+ if (atomic_dec_and_test(&blkg->refcnt))
+ call_rcu(&blkg->rcu_head, __blkg_release_rcu);
+}
+
+struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
+ bool update_hint);
+
+/**
+ * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
+ * @d_blkg: loop cursor pointing to the current descendant
+ * @pos_css: used for iteration
+ * @p_blkg: target blkg to walk descendants of
+ *
+ * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
+ * read locked. If called under either blkcg or queue lock, the iteration
+ * is guaranteed to include all and only online blkgs. The caller may
+ * update @pos_css by calling css_rightmost_descendant() to skip subtree.
+ * @p_blkg is included in the iteration and the first node to be visited.
+ */
+#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
+ css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
+ if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
+ (p_blkg)->q, false)))
+
+/**
+ * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
+ * @d_blkg: loop cursor pointing to the current descendant
+ * @pos_css: used for iteration
+ * @p_blkg: target blkg to walk descendants of
+ *
+ * Similar to blkg_for_each_descendant_pre() but performs post-order
+ * traversal instead. Synchronization rules are the same. @p_blkg is
+ * included in the iteration and the last node to be visited.
+ */
+#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
+ css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
+ if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
+ (p_blkg)->q, false)))
+
+/**
+ * blk_get_rl - get request_list to use
+ * @q: request_queue of interest
+ * @bio: bio which will be attached to the allocated request (may be %NULL)
+ *
+ * The caller wants to allocate a request from @q to use for @bio. Find
+ * the request_list to use and obtain a reference on it. Should be called
+ * under queue_lock. This function is guaranteed to return non-%NULL
+ * request_list.
+ */
+static inline struct request_list *blk_get_rl(struct request_queue *q,
+ struct bio *bio)
+{
+ struct blkcg *blkcg;
+ struct blkcg_gq *blkg;
+
+ rcu_read_lock();
+
+ blkcg = bio_blkcg(bio);
+
+ /* bypass blkg lookup and use @q->root_rl directly for root */
+ if (blkcg == &blkcg_root)
+ goto root_rl;
+
+ /*
+ * Try to use blkg->rl. blkg lookup may fail under memory pressure
+ * or if either the blkcg or queue is going away. Fall back to
+ * root_rl in such cases.
+ */
+ blkg = blkg_lookup_create(blkcg, q);
+ if (unlikely(IS_ERR(blkg)))
+ goto root_rl;
+
+ blkg_get(blkg);
+ rcu_read_unlock();
+ return &blkg->rl;
+root_rl:
+ rcu_read_unlock();
+ return &q->root_rl;
+}
+
+/**
+ * blk_put_rl - put request_list
+ * @rl: request_list to put
+ *
+ * Put the reference acquired by blk_get_rl(). Should be called under
+ * queue_lock.
+ */
+static inline void blk_put_rl(struct request_list *rl)
+{
+ /* root_rl may not have blkg set */
+ if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
+ blkg_put(rl->blkg);
+}
+
+/**
+ * blk_rq_set_rl - associate a request with a request_list
+ * @rq: request of interest
+ * @rl: target request_list
+ *
+ * Associate @rq with @rl so that accounting and freeing can know the
+ * request_list @rq came from.
+ */
+static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
+{
+ rq->rl = rl;
+}
+
+/**
+ * blk_rq_rl - return the request_list a request came from
+ * @rq: request of interest
+ *
+ * Return the request_list @rq is allocated from.
+ */
+static inline struct request_list *blk_rq_rl(struct request *rq)
+{
+ return rq->rl;
+}
+
+struct request_list *__blk_queue_next_rl(struct request_list *rl,
+ struct request_queue *q);
+/**
+ * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
+ *
+ * Should be used under queue_lock.
+ */
+#define blk_queue_for_each_rl(rl, q) \
+ for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
+
+static inline void blkg_stat_init(struct blkg_stat *stat)
+{
+ u64_stats_init(&stat->syncp);
+}
+
+/**
+ * blkg_stat_add - add a value to a blkg_stat
+ * @stat: target blkg_stat
+ * @val: value to add
+ *
+ * Add @val to @stat. The caller is responsible for synchronizing calls to
+ * this function.
+ */
+static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
+{
+ u64_stats_update_begin(&stat->syncp);
+ stat->cnt += val;
+ u64_stats_update_end(&stat->syncp);
+}
+
+/**
+ * blkg_stat_read - read the current value of a blkg_stat
+ * @stat: blkg_stat to read
+ *
+ * Read the current value of @stat. This function can be called without
+ * synchroniztion and takes care of u64 atomicity.
+ */
+static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
+{
+ unsigned int start;
+ uint64_t v;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&stat->syncp);
+ v = stat->cnt;
+ } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
+
+ return v;
+}
+
+/**
+ * blkg_stat_reset - reset a blkg_stat
+ * @stat: blkg_stat to reset
+ */
+static inline void blkg_stat_reset(struct blkg_stat *stat)
+{
+ stat->cnt = 0;
+}
+
+/**
+ * blkg_stat_merge - merge a blkg_stat into another
+ * @to: the destination blkg_stat
+ * @from: the source
+ *
+ * Add @from's count to @to.
+ */
+static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
+{
+ blkg_stat_add(to, blkg_stat_read(from));
+}
+
+static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
+{
+ u64_stats_init(&rwstat->syncp);
+}
+
+/**
+ * blkg_rwstat_add - add a value to a blkg_rwstat
+ * @rwstat: target blkg_rwstat
+ * @rw: mask of REQ_{WRITE|SYNC}
+ * @val: value to add
+ *
+ * Add @val to @rwstat. The counters are chosen according to @rw. The
+ * caller is responsible for synchronizing calls to this function.
+ */
+static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
+ int rw, uint64_t val)
+{
+ u64_stats_update_begin(&rwstat->syncp);
+
+ if (rw & REQ_WRITE)
+ rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
+ else
+ rwstat->cnt[BLKG_RWSTAT_READ] += val;
+ if (rw & REQ_SYNC)
+ rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
+ else
+ rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
+
+ u64_stats_update_end(&rwstat->syncp);
+}
+
+/**
+ * blkg_rwstat_read - read the current values of a blkg_rwstat
+ * @rwstat: blkg_rwstat to read
+ *
+ * Read the current snapshot of @rwstat and return it as the return value.
+ * This function can be called without synchronization and takes care of
+ * u64 atomicity.
+ */
+static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
+{
+ unsigned int start;
+ struct blkg_rwstat tmp;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&rwstat->syncp);
+ tmp = *rwstat;
+ } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
+
+ return tmp;
+}
+
+/**
+ * blkg_rwstat_total - read the total count of a blkg_rwstat
+ * @rwstat: blkg_rwstat to read
+ *
+ * Return the total count of @rwstat regardless of the IO direction. This
+ * function can be called without synchronization and takes care of u64
+ * atomicity.
+ */
+static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
+{
+ struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
+
+ return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
+}
+
+/**
+ * blkg_rwstat_reset - reset a blkg_rwstat
+ * @rwstat: blkg_rwstat to reset
+ */
+static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
+{
+ memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
+}
+
+/**
+ * blkg_rwstat_merge - merge a blkg_rwstat into another
+ * @to: the destination blkg_rwstat
+ * @from: the source
+ *
+ * Add @from's counts to @to.
+ */
+static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
+ struct blkg_rwstat *from)
+{
+ struct blkg_rwstat v = blkg_rwstat_read(from);
+ int i;
+
+ u64_stats_update_begin(&to->syncp);
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ to->cnt[i] += v.cnt[i];
+ u64_stats_update_end(&to->syncp);
+}
+
+#else /* CONFIG_BLK_CGROUP */
+
+struct blkcg {
+};
+
+struct blkg_policy_data {
+};
+
+struct blkcg_policy_data {
+};
+
+struct blkcg_gq {
+};
+
+struct blkcg_policy {
+};
+
+#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
+
+static inline struct cgroup_subsys_state *
+task_get_blkcg_css(struct task_struct *task)
+{
+ return NULL;
+}
+
+#ifdef CONFIG_BLOCK
+
+static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
+static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
+static inline void blkcg_drain_queue(struct request_queue *q) { }
+static inline void blkcg_exit_queue(struct request_queue *q) { }
+static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
+static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
+static inline int blkcg_activate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol) { return 0; }
+static inline void blkcg_deactivate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol) { }
+
+static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
+
+static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
+ struct blkcg_policy *pol) { return NULL; }
+static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
+static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
+static inline void blkg_get(struct blkcg_gq *blkg) { }
+static inline void blkg_put(struct blkcg_gq *blkg) { }
+
+static inline struct request_list *blk_get_rl(struct request_queue *q,
+ struct bio *bio) { return &q->root_rl; }
+static inline void blk_put_rl(struct request_list *rl) { }
+static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
+static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
+
+#define blk_queue_for_each_rl(rl, q) \
+ for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
+
+#endif /* CONFIG_BLOCK */
+#endif /* CONFIG_BLK_CGROUP */
+#endif /* _BLK_CGROUP_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 5735e71..37d1602 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -13,7 +13,7 @@ struct blk_mq_cpu_notifier {
};
struct blk_mq_ctxmap {
- unsigned int map_size;
+ unsigned int size;
unsigned int bits_per_word;
struct blk_align_bitmap *map;
};
@@ -96,6 +96,7 @@ typedef void (exit_request_fn)(void *, struct request *, unsigned int,
typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
bool);
+typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
struct blk_mq_ops {
/*
@@ -146,6 +147,8 @@ enum {
BLK_MQ_F_SG_MERGE = 1 << 2,
BLK_MQ_F_SYSFS_UP = 1 << 3,
BLK_MQ_F_DEFER_ISSUE = 1 << 4,
+ BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
+ BLK_MQ_F_ALLOC_POLICY_BITS = 1,
BLK_MQ_S_STOPPED = 0,
BLK_MQ_S_TAG_ACTIVE = 1,
@@ -154,8 +157,16 @@ enum {
BLK_MQ_CPU_WORK_BATCH = 8,
};
+#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
+ ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
+ ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
+#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
+ ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
+ << BLK_MQ_F_ALLOC_POLICY_START_BIT)
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
+struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ struct request_queue *q);
void blk_mq_finish_init(struct request_queue *q);
int blk_mq_register_disk(struct gendisk *);
void blk_mq_unregister_disk(struct gendisk *);
@@ -166,13 +177,13 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
void blk_mq_insert_request(struct request *, bool, bool, bool);
-void blk_mq_run_queues(struct request_queue *q, bool async);
void blk_mq_free_request(struct request *rq);
void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
gfp_t gfp, bool reserved);
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
+struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags);
enum {
BLK_MQ_UNIQUE_TAG_BITS = 16,
@@ -211,15 +222,19 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_stop_hw_queues(struct request_queue *q);
void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
+void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
void *priv);
+void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
+ void *priv);
+void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_unfreeze_queue(struct request_queue *q);
void blk_mq_freeze_queue_start(struct request_queue *q);
/*
* Driver command data is immediately after the request. So subtract request
- * size to get back to the original request.
+ * size to get back to the original request, add request size to get the PDU.
*/
static inline struct request *blk_mq_rq_from_pdu(void *pdu)
{
@@ -227,7 +242,7 @@ static inline struct request *blk_mq_rq_from_pdu(void *pdu)
}
static inline void *blk_mq_rq_to_pdu(struct request *rq)
{
- return (void *) rq + sizeof(*rq);
+ return rq + 1;
}
#define queue_for_each_hw_ctx(q, hctx, i) \
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index c294e3e..7303b34 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -65,7 +65,7 @@ struct bio {
unsigned int bi_seg_front_size;
unsigned int bi_seg_back_size;
- atomic_t bi_remaining;
+ atomic_t __bi_remaining;
bio_end_io_t *bi_end_io;
@@ -92,7 +92,7 @@ struct bio {
unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
- atomic_t bi_cnt; /* pin count */
+ atomic_t __bi_cnt; /* pin count */
struct bio_vec *bi_io_vec; /* the actual vec list */
@@ -112,16 +112,15 @@ struct bio {
* bio flags
*/
#define BIO_UPTODATE 0 /* ok after I/O completion */
-#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
-#define BIO_EOF 2 /* out-out-bounds error */
-#define BIO_SEG_VALID 3 /* bi_phys_segments valid */
-#define BIO_CLONED 4 /* doesn't own data */
-#define BIO_BOUNCED 5 /* bio is a bounce bio */
-#define BIO_USER_MAPPED 6 /* contains user pages */
-#define BIO_EOPNOTSUPP 7 /* not supported */
-#define BIO_NULL_MAPPED 8 /* contains invalid user pages */
-#define BIO_QUIET 9 /* Make BIO Quiet */
-#define BIO_SNAP_STABLE 10 /* bio data must be snapshotted during write */
+#define BIO_SEG_VALID 1 /* bi_phys_segments valid */
+#define BIO_CLONED 2 /* doesn't own data */
+#define BIO_BOUNCED 3 /* bio is a bounce bio */
+#define BIO_USER_MAPPED 4 /* contains user pages */
+#define BIO_NULL_MAPPED 5 /* contains invalid user pages */
+#define BIO_QUIET 6 /* Make BIO Quiet */
+#define BIO_SNAP_STABLE 7 /* bio data must be snapshotted during write */
+#define BIO_CHAIN 8 /* chained bio, ->bi_remaining in effect */
+#define BIO_REFFED 9 /* bio has elevated ->bi_cnt */
/*
* Flags starting here get preserved by bio_reset() - this includes
@@ -181,7 +180,9 @@ enum rq_flag_bits {
__REQ_ELVPRIV, /* elevator private data attached */
__REQ_FAILED, /* set if the request failed */
__REQ_QUIET, /* don't worry about errors */
- __REQ_PREEMPT, /* set for "ide_preempt" requests */
+ __REQ_PREEMPT, /* set for "ide_preempt" requests and also
+ for requests for which the SCSI "quiesce"
+ state must be ignored. */
__REQ_ALLOCED, /* request came from our alloc pool */
__REQ_COPY_USER, /* contains copies of user pages */
__REQ_FLUSH_SEQ, /* request for flush sequence */
@@ -218,7 +219,7 @@ enum rq_flag_bits {
/* This mask is used for both bio and request merge checking */
#define REQ_NOMERGE_FLAGS \
- (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
+ (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_FLUSH_SEQ)
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_THROTTLED (1ULL << __REQ_THROTTLED)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 92f4b4b..d4068c1 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -12,7 +12,7 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/pagemap.h>
-#include <linux/backing-dev.h>
+#include <linux/backing-dev-defs.h>
#include <linux/wait.h>
#include <linux/mempool.h>
#include <linux/bio.h>
@@ -22,15 +22,13 @@
#include <linux/smp.h>
#include <linux/rcupdate.h>
#include <linux/percpu-refcount.h>
-
-#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
struct module;
struct scsi_ioctl_command;
struct request_queue;
struct elevator_queue;
-struct request_pm_state;
struct blk_trace;
struct request;
struct sg_io_hdr;
@@ -75,18 +73,7 @@ struct request_list {
enum rq_cmd_type_bits {
REQ_TYPE_FS = 1, /* fs request */
REQ_TYPE_BLOCK_PC, /* scsi command */
- REQ_TYPE_SENSE, /* sense request */
- REQ_TYPE_PM_SUSPEND, /* suspend request */
- REQ_TYPE_PM_RESUME, /* resume request */
- REQ_TYPE_PM_SHUTDOWN, /* shutdown request */
- REQ_TYPE_SPECIAL, /* driver defined type */
- /*
- * for ATA/ATAPI devices. this really doesn't belong here, ide should
- * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
- * private REQ_LB opcodes to differentiate what type of request this is
- */
- REQ_TYPE_ATA_TASKFILE,
- REQ_TYPE_ATA_PC,
+ REQ_TYPE_DRV_PRIV, /* driver defined types from here */
};
#define BLK_MAX_CDB 16
@@ -108,7 +95,7 @@ struct request {
struct blk_mq_ctx *mq_ctx;
u64 cmd_flags;
- enum rq_cmd_type_bits cmd_type;
+ unsigned cmd_type;
unsigned long atomic_flags;
int cpu;
@@ -216,19 +203,6 @@ static inline unsigned short req_get_ioprio(struct request *req)
return req->ioprio;
}
-/*
- * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
- * requests. Some step values could eventually be made generic.
- */
-struct request_pm_state
-{
- /* PM state machine step value, currently driver specific */
- int pm_step;
- /* requested PM state value (S1, S2, S3, S4, ...) */
- u32 pm_state;
- void* data; /* for driver use */
-};
-
#include <linux/elevator.h>
struct blk_queue_ctx;
@@ -272,7 +246,11 @@ struct blk_queue_tag {
int max_depth; /* what we will send to device */
int real_max_depth; /* what the array can hold */
atomic_t refcnt; /* map can be shared */
+ int alloc_policy; /* tag allocation policy */
+ int next_tag; /* next tag */
};
+#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
+#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
#define BLK_SCSI_MAX_CMDS (256)
#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
@@ -465,7 +443,7 @@ struct request_queue {
struct mutex sysfs_lock;
int bypass_depth;
- int mq_freeze_depth;
+ atomic_t mq_freeze_depth;
#if defined(CONFIG_BLK_DEV_BSG)
bsg_job_fn *bsg_job_fn;
@@ -516,6 +494,7 @@ struct request_queue {
(1 << QUEUE_FLAG_ADD_RANDOM))
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
+ (1 << QUEUE_FLAG_STACKABLE) | \
(1 << QUEUE_FLAG_SAME_COMP))
static inline void queue_lockdep_assert_held(struct request_queue *q)
@@ -605,10 +584,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
(((rq)->cmd_flags & REQ_STARTED) && \
((rq)->cmd_type == REQ_TYPE_FS))
-#define blk_pm_request(rq) \
- ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
- (rq)->cmd_type == REQ_TYPE_PM_RESUME)
-
#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
/* rq->queuelist of dequeued request must be list_empty() */
@@ -816,32 +791,12 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
struct scsi_ioctl_command __user *);
-extern void blk_queue_bio(struct request_queue *q, struct bio *bio);
-
-/*
- * A queue has just exitted congestion. Note this in the global counter of
- * congested queues, and wake up anyone who was waiting for requests to be
- * put back.
- */
-static inline void blk_clear_queue_congested(struct request_queue *q, int sync)
-{
- clear_bdi_congested(&q->backing_dev_info, sync);
-}
-
-/*
- * A queue has just entered congestion. Flag that in the queue's VM-visible
- * state flags and increment the global gounter of congested queues.
- */
-static inline void blk_set_queue_congested(struct request_queue *q, int sync)
-{
- set_bdi_congested(&q->backing_dev_info, sync);
-}
-
extern void blk_start_queue(struct request_queue *q);
extern void blk_stop_queue(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
extern void __blk_stop_queue(struct request_queue *q);
extern void __blk_run_queue(struct request_queue *q);
+extern void __blk_run_queue_uncond(struct request_queue *q);
extern void blk_run_queue(struct request_queue *);
extern void blk_run_queue_async(struct request_queue *q);
extern int blk_rq_map_user(struct request_queue *, struct request *,
@@ -850,8 +805,8 @@ extern int blk_rq_map_user(struct request_queue *, struct request *,
extern int blk_rq_unmap_user(struct bio *);
extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, const struct sg_iovec *,
- int, unsigned int, gfp_t);
+ struct rq_map_data *, const struct iov_iter *,
+ gfp_t);
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
struct request *, int);
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
@@ -930,7 +885,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
return q->limits.max_hw_sectors;
- if (!q->limits.chunk_sectors)
+ if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD))
return blk_queue_get_max_sectors(q, rq->cmd_flags);
return min(blk_max_size_offset(q, blk_rq_pos(rq)),
@@ -1044,8 +999,6 @@ extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
-extern int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
- struct scatterlist *sglist);
extern void blk_dump_rq_flags(struct request *, char *);
extern long nr_blockdev_pages(void);
@@ -1053,6 +1006,7 @@ bool __must_check blk_get_queue(struct request_queue *);
struct request_queue *blk_alloc_queue(gfp_t);
struct request_queue *blk_alloc_queue_node(gfp_t, int);
extern void blk_put_queue(struct request_queue *);
+extern void blk_set_queue_dying(struct request_queue *);
/*
* block layer runtime pm functions
@@ -1139,11 +1093,11 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
extern int blk_queue_start_tag(struct request_queue *, struct request *);
extern struct request *blk_queue_find_tag(struct request_queue *, int);
extern void blk_queue_end_tag(struct request_queue *, struct request *);
-extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);
+extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
extern void blk_queue_free_tags(struct request_queue *);
extern int blk_queue_resize_tags(struct request_queue *, int);
extern void blk_queue_invalidate_tags(struct request_queue *);
-extern struct blk_queue_tag *blk_init_tags(int);
+extern struct blk_queue_tag *blk_init_tags(int, int);
extern void blk_free_tags(struct blk_queue_tag *);
static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
@@ -1162,7 +1116,7 @@ extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct page *page);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask);
+ sector_t nr_sects, gfp_t gfp_mask, bool discard);
static inline int sb_issue_discard(struct super_block *sb, sector_t block,
sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
{
@@ -1176,7 +1130,7 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
return blkdev_issue_zeroout(sb->s_bdev,
block << (sb->s_blocksize_bits - 9),
nr_blocks << (sb->s_blocksize_bits - 9),
- gfp_mask);
+ gfp_mask, true);
}
extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
@@ -1601,8 +1555,8 @@ struct block_device_operations {
int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
- int (*direct_access) (struct block_device *, sector_t,
- void **, unsigned long *);
+ long (*direct_access)(struct block_device *, sector_t,
+ void **, unsigned long *pfn, long size);
unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing);
/* ->media_changed() is DEPRECATED, use ->check_events() instead */
@@ -1620,6 +1574,8 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
extern int bdev_read_page(struct block_device *, sector_t, struct page *);
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
struct writeback_control *);
+extern long bdev_direct_access(struct block_device *, sector_t, void **addr,
+ unsigned long *pfn, long size);
#else /* CONFIG_BLOCK */
struct block_device;
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 0995c2d..f589222 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -357,12 +357,12 @@ extern void *alloc_large_system_hash(const char *tablename,
/* Only NUMA needs hash distribution. 64bit NUMA architectures have
* sufficient vmalloc space.
*/
-#if defined(CONFIG_NUMA) && defined(CONFIG_64BIT)
-#define HASHDIST_DEFAULT 1
+#ifdef CONFIG_NUMA
+#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
+extern int hashdist; /* Distribute hashes across NUMA nodes? */
#else
-#define HASHDIST_DEFAULT 0
+#define hashdist (0)
#endif
-extern int hashdist; /* Distribute hashes across NUMA nodes? */
#endif /* _LINUX_BOOTMEM_H */
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
index 86c12c9..8fdcb78 100644
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -2,7 +2,6 @@
#define _LINUX_BH_H
#include <linux/preempt.h>
-#include <linux/preempt_mask.h>
#ifdef CONFIG_TRACE_IRQFLAGS
extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index bbfceb7..4383476 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -32,23 +32,19 @@ struct bpf_map {
u32 key_size;
u32 value_size;
u32 max_entries;
- struct bpf_map_ops *ops;
+ const struct bpf_map_ops *ops;
struct work_struct work;
};
struct bpf_map_type_list {
struct list_head list_node;
- struct bpf_map_ops *ops;
+ const struct bpf_map_ops *ops;
enum bpf_map_type type;
};
-void bpf_register_map_type(struct bpf_map_type_list *tl);
-void bpf_map_put(struct bpf_map *map);
-struct bpf_map *bpf_map_get(struct fd f);
-
/* function argument constraints */
enum bpf_arg_type {
- ARG_ANYTHING = 0, /* any argument is ok */
+ ARG_DONTCARE = 0, /* unused argument in helper function */
/* the following constraints used to prototype
* bpf_map_lookup/update/delete_elem() functions
@@ -62,6 +58,9 @@ enum bpf_arg_type {
*/
ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
+
+ ARG_PTR_TO_CTX, /* pointer to context */
+ ARG_ANYTHING, /* any (initialized) argument is ok */
};
/* type of values returned from helper functions */
@@ -105,41 +104,93 @@ struct bpf_verifier_ops {
* with 'type' (read or write) is allowed
*/
bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
+
+ u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
+ int src_reg, int ctx_off,
+ struct bpf_insn *insn);
};
struct bpf_prog_type_list {
struct list_head list_node;
- struct bpf_verifier_ops *ops;
+ const struct bpf_verifier_ops *ops;
enum bpf_prog_type type;
};
-void bpf_register_prog_type(struct bpf_prog_type_list *tl);
-
struct bpf_prog;
struct bpf_prog_aux {
atomic_t refcnt;
- bool is_gpl_compatible;
- enum bpf_prog_type prog_type;
- struct bpf_verifier_ops *ops;
- struct bpf_map **used_maps;
u32 used_map_cnt;
+ const struct bpf_verifier_ops *ops;
+ struct bpf_map **used_maps;
struct bpf_prog *prog;
- struct work_struct work;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
+};
+
+struct bpf_array {
+ struct bpf_map map;
+ u32 elem_size;
+ /* 'ownership' of prog_array is claimed by the first program that
+ * is going to use this map or by the first program which FD is stored
+ * in the map to make sure that all callers and callees have the same
+ * prog_type and JITed flag
+ */
+ enum bpf_prog_type owner_prog_type;
+ bool owner_jited;
+ union {
+ char value[0] __aligned(8);
+ struct bpf_prog *prog[0] __aligned(8);
+ };
};
+#define MAX_TAIL_CALL_CNT 32
+
+u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
+void bpf_prog_array_map_clear(struct bpf_map *map);
+bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
+const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
#ifdef CONFIG_BPF_SYSCALL
-void bpf_prog_put(struct bpf_prog *prog);
-#else
-static inline void bpf_prog_put(struct bpf_prog *prog) {}
-#endif
+void bpf_register_prog_type(struct bpf_prog_type_list *tl);
+void bpf_register_map_type(struct bpf_map_type_list *tl);
+
struct bpf_prog *bpf_prog_get(u32 ufd);
+void bpf_prog_put(struct bpf_prog *prog);
+void bpf_prog_put_rcu(struct bpf_prog *prog);
+
+struct bpf_map *bpf_map_get(struct fd f);
+void bpf_map_put(struct bpf_map *map);
+
/* verify correctness of eBPF program */
-int bpf_check(struct bpf_prog *fp, union bpf_attr *attr);
+int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
+#else
+static inline void bpf_register_prog_type(struct bpf_prog_type_list *tl)
+{
+}
+
+static inline struct bpf_prog *bpf_prog_get(u32 ufd)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void bpf_prog_put(struct bpf_prog *prog)
+{
+}
+#endif /* CONFIG_BPF_SYSCALL */
/* verifier prototypes for helper functions called from eBPF programs */
-extern struct bpf_func_proto bpf_map_lookup_elem_proto;
-extern struct bpf_func_proto bpf_map_update_elem_proto;
-extern struct bpf_func_proto bpf_map_delete_elem_proto;
+extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
+extern const struct bpf_func_proto bpf_map_update_elem_proto;
+extern const struct bpf_func_proto bpf_map_delete_elem_proto;
+
+extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
+extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
+extern const struct bpf_func_proto bpf_tail_call_proto;
+extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
+extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
+extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
+extern const struct bpf_func_proto bpf_get_current_comm_proto;
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 7ccd928..697ca77 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -1,6 +1,13 @@
#ifndef _LINUX_BRCMPHY_H
#define _LINUX_BRCMPHY_H
+#include <linux/phy.h>
+
+/* All Broadcom Ethernet switches have a pseudo-PHY at address 30 which is used
+ * to configure the switch internal registers via MDIO accesses.
+ */
+#define BRCM_PSEUDO_PHY_ADDR 30
+
#define PHY_ID_BCM50610 0x0143bd60
#define PHY_ID_BCM50610M 0x0143bd70
#define PHY_ID_BCM5241 0x0143bc30
@@ -11,14 +18,16 @@
#define PHY_ID_BCM5421 0x002060e0
#define PHY_ID_BCM5464 0x002060b0
#define PHY_ID_BCM5461 0x002060c0
+#define PHY_ID_BCM54616S 0x03625d10
#define PHY_ID_BCM57780 0x03625d90
#define PHY_ID_BCM7250 0xae025280
#define PHY_ID_BCM7364 0xae025260
#define PHY_ID_BCM7366 0x600d8490
-#define PHY_ID_BCM7425 0x03625e60
+#define PHY_ID_BCM7425 0x600d86b0
#define PHY_ID_BCM7429 0x600d8730
#define PHY_ID_BCM7439 0x600d8480
+#define PHY_ID_BCM7439_2 0xae025080
#define PHY_ID_BCM7445 0x600d8510
#define PHY_BCM_OUI_MASK 0xfffffc00
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 73b4522..e6797de 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -317,6 +317,13 @@ sb_getblk(struct super_block *sb, sector_t block)
return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
}
+
+static inline struct buffer_head *
+sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
+{
+ return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
+}
+
static inline struct buffer_head *
sb_find_get_block(struct super_block *sb, sector_t block)
{
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 3daf5ed..2189935 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -19,7 +19,7 @@ enum cache_type {
/**
* struct cacheinfo - represent a cache leaf node
* @type: type of the cache - data, inst or unified
- * @level: represents the hierarcy in the multi-level cache
+ * @level: represents the hierarchy in the multi-level cache
* @coherency_line_size: size of each cache line usually representing
* the minimum amount of data that gets transferred from memory
* @number_of_sets: total number of sets, a set is a collection of cache
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index c05ff0f..c3a9c8f 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -61,6 +61,8 @@ struct can_priv {
char tx_led_trig_name[CAN_LED_NAME_SZ];
struct led_trigger *rx_led_trig;
char rx_led_trig_name[CAN_LED_NAME_SZ];
+ struct led_trigger *rxtx_led_trig;
+ char rxtx_led_trig_name[CAN_LED_NAME_SZ];
#endif
};
diff --git a/include/linux/can/led.h b/include/linux/can/led.h
index e0475c5..146de45 100644
--- a/include/linux/can/led.h
+++ b/include/linux/can/led.h
@@ -21,8 +21,10 @@ enum can_led_event {
#ifdef CONFIG_CAN_LEDS
-/* keep space for interface name + "-tx"/"-rx" suffix and null terminator */
-#define CAN_LED_NAME_SZ (IFNAMSIZ + 4)
+/* keep space for interface name + "-tx"/"-rx"/"-rxtx"
+ * suffix and null terminator
+ */
+#define CAN_LED_NAME_SZ (IFNAMSIZ + 6)
void can_led_event(struct net_device *netdev, enum can_led_event event);
void devm_can_led_init(struct net_device *netdev);
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index cc00d15..51bb653 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -27,10 +27,12 @@
/**
* struct can_skb_priv - private additional data inside CAN sk_buffs
* @ifindex: ifindex of the first interface the CAN frame appeared on
+ * @skbcnt: atomic counter to have an unique id together with skb pointer
* @cf: align to the following CAN frame at skb->data
*/
struct can_skb_priv {
int ifindex;
+ int skbcnt;
struct can_frame cf[0];
};
@@ -44,16 +46,11 @@ static inline void can_skb_reserve(struct sk_buff *skb)
skb_reserve(skb, sizeof(struct can_skb_priv));
}
-static inline void can_skb_destructor(struct sk_buff *skb)
-{
- sock_put(skb->sk);
-}
-
static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
{
if (sk) {
sock_hold(sk);
- skb->destructor = can_skb_destructor;
+ skb->destructor = sock_efree;
skb->sk = sk;
}
}
diff --git a/include/linux/capability.h b/include/linux/capability.h
index aa93e5e..af9f0b9 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -205,6 +205,7 @@ static inline kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
cap_intersect(permitted, __cap_nfsd_set));
}
+#ifdef CONFIG_MULTIUSER
extern bool has_capability(struct task_struct *t, int cap);
extern bool has_ns_capability(struct task_struct *t,
struct user_namespace *ns, int cap);
@@ -213,6 +214,34 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
struct user_namespace *ns, int cap);
extern bool capable(int cap);
extern bool ns_capable(struct user_namespace *ns, int cap);
+#else
+static inline bool has_capability(struct task_struct *t, int cap)
+{
+ return true;
+}
+static inline bool has_ns_capability(struct task_struct *t,
+ struct user_namespace *ns, int cap)
+{
+ return true;
+}
+static inline bool has_capability_noaudit(struct task_struct *t, int cap)
+{
+ return true;
+}
+static inline bool has_ns_capability_noaudit(struct task_struct *t,
+ struct user_namespace *ns, int cap)
+{
+ return true;
+}
+static inline bool capable(int cap)
+{
+ return true;
+}
+static inline bool ns_capable(struct user_namespace *ns, int cap)
+{
+ return true;
+}
+#endif /* CONFIG_MULTIUSER */
extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
diff --git a/include/linux/cdev.h b/include/linux/cdev.h
index fb45919..f876361 100644
--- a/include/linux/cdev.h
+++ b/include/linux/cdev.h
@@ -30,6 +30,4 @@ void cdev_del(struct cdev *);
void cd_forget(struct inode *);
-extern struct backing_dev_info directly_mappable_cdev_bdi;
-
#endif
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index 71e05bb..4763ad6 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -50,6 +50,19 @@
#define CEPH_FEATURE_MDS_INLINE_DATA (1ULL<<40)
#define CEPH_FEATURE_CRUSH_TUNABLES3 (1ULL<<41)
#define CEPH_FEATURE_OSD_PRIMARY_AFFINITY (1ULL<<41) /* overlap w/ tunables3 */
+#define CEPH_FEATURE_MSGR_KEEPALIVE2 (1ULL<<42)
+#define CEPH_FEATURE_OSD_POOLRESEND (1ULL<<43)
+#define CEPH_FEATURE_ERASURE_CODE_PLUGINS_V2 (1ULL<<44)
+#define CEPH_FEATURE_OSD_SET_ALLOC_HINT (1ULL<<45)
+#define CEPH_FEATURE_OSD_FADVISE_FLAGS (1ULL<<46)
+#define CEPH_FEATURE_OSD_REPOP (1ULL<<46) /* overlap with fadvise */
+#define CEPH_FEATURE_OSD_OBJECT_DIGEST (1ULL<<46) /* overlap with fadvise */
+#define CEPH_FEATURE_OSD_TRANSACTION_MAY_LAYOUT (1ULL<<46) /* overlap w/ fadvise */
+#define CEPH_FEATURE_MDS_QUOTA (1ULL<<47)
+#define CEPH_FEATURE_CRUSH_V4 (1ULL<<48) /* straw2 buckets */
+#define CEPH_FEATURE_OSD_MIN_SIZE_RECOVERY (1ULL<<49)
+// duplicated since it was introduced at the same time as MIN_SIZE_RECOVERY
+#define CEPH_FEATURE_OSD_PROXY_FEATURES (1ULL<<49) /* overlap w/ above */
/*
* The introduction of CEPH_FEATURE_OSD_SNAPMAPPER caused the feature
@@ -93,7 +106,8 @@ static inline u64 ceph_sanitize_features(u64 features)
CEPH_FEATURE_EXPORT_PEER | \
CEPH_FEATURE_OSDMAP_ENC | \
CEPH_FEATURE_CRUSH_TUNABLES3 | \
- CEPH_FEATURE_OSD_PRIMARY_AFFINITY)
+ CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
+ CEPH_FEATURE_CRUSH_V4)
#define CEPH_FEATURES_REQUIRED_DEFAULT \
(CEPH_FEATURE_NOSRCADDR | \
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index c0dadaa..d7d072a 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -158,17 +158,6 @@ enum {
};
-/* pool operations */
-enum {
- POOL_OP_CREATE = 0x01,
- POOL_OP_DELETE = 0x02,
- POOL_OP_AUID_CHANGE = 0x03,
- POOL_OP_CREATE_SNAP = 0x11,
- POOL_OP_DELETE_SNAP = 0x12,
- POOL_OP_CREATE_UNMANAGED_SNAP = 0x21,
- POOL_OP_DELETE_UNMANAGED_SNAP = 0x22,
-};
-
struct ceph_mon_request_header {
__le64 have_version;
__le16 session_mon;
@@ -191,31 +180,6 @@ struct ceph_mon_statfs_reply {
struct ceph_statfs st;
} __attribute__ ((packed));
-const char *ceph_pool_op_name(int op);
-
-struct ceph_mon_poolop {
- struct ceph_mon_request_header monhdr;
- struct ceph_fsid fsid;
- __le32 pool;
- __le32 op;
- __le64 auid;
- __le64 snapid;
- __le32 name_len;
-} __attribute__ ((packed));
-
-struct ceph_mon_poolop_reply {
- struct ceph_mon_request_header monhdr;
- struct ceph_fsid fsid;
- __le32 reply_code;
- __le32 epoch;
- char has_data;
- char data[0];
-} __attribute__ ((packed));
-
-struct ceph_mon_unmanaged_snap {
- __le64 snapid;
-} __attribute__ ((packed));
-
struct ceph_osd_getmap {
struct ceph_mon_request_header monhdr;
struct ceph_fsid fsid;
@@ -307,6 +271,7 @@ enum {
CEPH_SESSION_RECALL_STATE,
CEPH_SESSION_FLUSHMSG,
CEPH_SESSION_FLUSHMSG_ACK,
+ CEPH_SESSION_FORCE_RO,
};
extern const char *ceph_session_op_name(int op);
@@ -358,6 +323,7 @@ enum {
CEPH_MDS_OP_MKSNAP = 0x01400,
CEPH_MDS_OP_RMSNAP = 0x01401,
CEPH_MDS_OP_LSSNAP = 0x00402,
+ CEPH_MDS_OP_RENAMESNAP = 0x01403,
};
extern const char *ceph_mds_op_name(int op);
diff --git a/include/linux/ceph/debugfs.h b/include/linux/ceph/debugfs.h
index 1df086d..29cf897 100644
--- a/include/linux/ceph/debugfs.h
+++ b/include/linux/ceph/debugfs.h
@@ -7,13 +7,7 @@
#define CEPH_DEFINE_SHOW_FUNC(name) \
static int name##_open(struct inode *inode, struct file *file) \
{ \
- struct seq_file *sf; \
- int ret; \
- \
- ret = single_open(file, name, NULL); \
- sf = file->private_data; \
- sf->private = inode->i_private; \
- return ret; \
+ return single_open(file, name, inode->i_private); \
} \
\
static const struct file_operations name##_fops = { \
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 8b11a79..9ebee53 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -30,8 +30,9 @@
#define CEPH_OPT_MYIP (1<<2) /* specified my ip */
#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */
#define CEPH_OPT_NOMSGAUTH (1<<4) /* not require cephx message signature */
+#define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */
-#define CEPH_OPT_DEFAULT (0)
+#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY)
#define ceph_set_opt(client, opt) \
(client)->options->flags |= CEPH_OPT_##opt;
@@ -42,9 +43,9 @@ struct ceph_options {
int flags;
struct ceph_fsid fsid;
struct ceph_entity_addr my_addr;
- int mount_timeout;
- int osd_idle_ttl;
- int osd_keepalive_timeout;
+ unsigned long mount_timeout; /* jiffies */
+ unsigned long osd_idle_ttl; /* jiffies */
+ unsigned long osd_keepalive_timeout; /* jiffies */
/*
* any type that can't be simply compared or doesn't need need
@@ -62,9 +63,9 @@ struct ceph_options {
/*
* defaults
*/
-#define CEPH_MOUNT_TIMEOUT_DEFAULT 60
-#define CEPH_OSD_KEEPALIVE_DEFAULT 5
-#define CEPH_OSD_IDLE_TTL_DEFAULT 60
+#define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000)
+#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000)
+#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000)
#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
@@ -92,13 +93,9 @@ enum {
CEPH_MOUNT_SHUTDOWN,
};
-/*
- * subtract jiffies
- */
-static inline unsigned long time_sub(unsigned long a, unsigned long b)
+static inline unsigned long ceph_timeout_jiffies(unsigned long timeout)
{
- BUG_ON(time_after(b, a));
- return (long)a - (long)b;
+ return timeout ?: MAX_SCHEDULE_TIMEOUT;
}
struct ceph_mds_client;
@@ -134,6 +131,7 @@ struct ceph_client {
struct dentry *debugfs_dir;
struct dentry *debugfs_monmap;
struct dentry *debugfs_osdmap;
+ struct dentry *debugfs_options;
#endif
};
@@ -176,6 +174,7 @@ static inline int calc_pages_for(u64 off, u64 len)
extern struct kmem_cache *ceph_inode_cachep;
extern struct kmem_cache *ceph_cap_cachep;
+extern struct kmem_cache *ceph_cap_flush_cachep;
extern struct kmem_cache *ceph_dentry_cachep;
extern struct kmem_cache *ceph_file_cachep;
@@ -190,6 +189,7 @@ extern struct ceph_options *ceph_parse_options(char *options,
const char *dev_name, const char *dev_name_end,
int (*parse_extra_token)(char *c, void *private),
void *private);
+int ceph_print_client_options(struct seq_file *m, struct ceph_client *client);
extern void ceph_destroy_options(struct ceph_options *opt);
extern int ceph_compare_options(struct ceph_options *new_opt,
struct ceph_client *client);
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index d9d396c..3775327 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -8,6 +8,7 @@
#include <linux/radix-tree.h>
#include <linux/uio.h>
#include <linux/workqueue.h>
+#include <net/net_namespace.h>
#include <linux/ceph/types.h>
#include <linux/ceph/buffer.h>
@@ -56,7 +57,9 @@ struct ceph_messenger {
struct ceph_entity_addr my_enc_addr;
atomic_t stopping;
+ possible_net_t net;
bool nocrc;
+ bool tcp_nodelay;
/*
* the global_seq counts connections i (attempt to) initiate
@@ -264,7 +267,9 @@ extern void ceph_messenger_init(struct ceph_messenger *msgr,
struct ceph_entity_addr *myaddr,
u64 supported_features,
u64 required_features,
- bool nocrc);
+ bool nocrc,
+ bool tcp_nodelay);
+extern void ceph_messenger_fini(struct ceph_messenger *msgr);
extern void ceph_con_init(struct ceph_connection *con, void *private,
const struct ceph_connection_operations *ops,
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index deb47e4..81810dc 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -40,7 +40,7 @@ struct ceph_mon_request {
};
/*
- * ceph_mon_generic_request is being used for the statfs, poolop and
+ * ceph_mon_generic_request is being used for the statfs and
* mon_get_version requests which are being done a bit differently
* because we need to get data back to the caller
*/
@@ -50,7 +50,6 @@ struct ceph_mon_generic_request {
struct rb_node node;
int result;
void *buf;
- int buf_len;
struct completion completion;
struct ceph_msg *request; /* original request */
struct ceph_msg *reply; /* and reply */
@@ -117,10 +116,4 @@ extern int ceph_monc_open_session(struct ceph_mon_client *monc);
extern int ceph_monc_validate_auth(struct ceph_mon_client *monc);
-extern int ceph_monc_create_snapid(struct ceph_mon_client *monc,
- u32 pool, u64 *snapid);
-
-extern int ceph_monc_delete_snapid(struct ceph_mon_client *monc,
- u32 pool, u64 snapid);
-
#endif
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 61b19c4..7506b48 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -249,7 +249,7 @@ extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
extern void osd_req_op_init(struct ceph_osd_request *osd_req,
- unsigned int which, u16 opcode);
+ unsigned int which, u16 opcode, u32 flags);
extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *,
unsigned int which,
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index 561ea89..e55c08b 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -175,13 +175,12 @@ static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid)
__u8 version;
if (!ceph_has_room(p, end, 1 + 8 + 4 + 4)) {
- pr_warning("incomplete pg encoding");
-
+ pr_warn("incomplete pg encoding\n");
return -EINVAL;
}
version = ceph_decode_8(p);
if (version > 1) {
- pr_warning("do not understand pg encoding %d > 1",
+ pr_warn("do not understand pg encoding %d > 1\n",
(int)version);
return -EINVAL;
}
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
new file mode 100644
index 0000000..93755a6
--- /dev/null
+++ b/include/linux/cgroup-defs.h
@@ -0,0 +1,501 @@
+/*
+ * linux/cgroup-defs.h - basic definitions for cgroup
+ *
+ * This file provides basic type and interface. Include this file directly
+ * only if necessary to avoid cyclic dependencies.
+ */
+#ifndef _LINUX_CGROUP_DEFS_H
+#define _LINUX_CGROUP_DEFS_H
+
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/idr.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/rcupdate.h>
+#include <linux/percpu-refcount.h>
+#include <linux/percpu-rwsem.h>
+#include <linux/workqueue.h>
+
+#ifdef CONFIG_CGROUPS
+
+struct cgroup;
+struct cgroup_root;
+struct cgroup_subsys;
+struct cgroup_taskset;
+struct kernfs_node;
+struct kernfs_ops;
+struct kernfs_open_file;
+struct seq_file;
+
+#define MAX_CGROUP_TYPE_NAMELEN 32
+#define MAX_CGROUP_ROOT_NAMELEN 64
+#define MAX_CFTYPE_NAME 64
+
+/* define the enumeration of all cgroup subsystems */
+#define SUBSYS(_x) _x ## _cgrp_id,
+enum cgroup_subsys_id {
+#include <linux/cgroup_subsys.h>
+ CGROUP_SUBSYS_COUNT,
+};
+#undef SUBSYS
+
+/* bits in struct cgroup_subsys_state flags field */
+enum {
+ CSS_NO_REF = (1 << 0), /* no reference counting for this css */
+ CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
+ CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
+};
+
+/* bits in struct cgroup flags field */
+enum {
+ /* Control Group requires release notifications to userspace */
+ CGRP_NOTIFY_ON_RELEASE,
+ /*
+ * Clone the parent's configuration when creating a new child
+ * cpuset cgroup. For historical reasons, this option can be
+ * specified at mount time and thus is implemented here.
+ */
+ CGRP_CPUSET_CLONE_CHILDREN,
+};
+
+/* cgroup_root->flags */
+enum {
+ CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), /* __DEVEL__sane_behavior specified */
+ CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */
+ CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */
+};
+
+/* cftype->flags */
+enum {
+ CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
+ CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
+ CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
+
+ /* internal flags, do not use outside cgroup core proper */
+ __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
+ __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
+};
+
+/*
+ * Per-subsystem/per-cgroup state maintained by the system. This is the
+ * fundamental structural building block that controllers deal with.
+ *
+ * Fields marked with "PI:" are public and immutable and may be accessed
+ * directly without synchronization.
+ */
+struct cgroup_subsys_state {
+ /* PI: the cgroup that this css is attached to */
+ struct cgroup *cgroup;
+
+ /* PI: the cgroup subsystem that this css is attached to */
+ struct cgroup_subsys *ss;
+
+ /* reference count - access via css_[try]get() and css_put() */
+ struct percpu_ref refcnt;
+
+ /* PI: the parent css */
+ struct cgroup_subsys_state *parent;
+
+ /* siblings list anchored at the parent's ->children */
+ struct list_head sibling;
+ struct list_head children;
+
+ /*
+ * PI: Subsys-unique ID. 0 is unused and root is always 1. The
+ * matching css can be looked up using css_from_id().
+ */
+ int id;
+
+ unsigned int flags;
+
+ /*
+ * Monotonically increasing unique serial number which defines a
+ * uniform order among all csses. It's guaranteed that all
+ * ->children lists are in the ascending order of ->serial_nr and
+ * used to allow interrupting and resuming iterations.
+ */
+ u64 serial_nr;
+
+ /* percpu_ref killing and RCU release */
+ struct rcu_head rcu_head;
+ struct work_struct destroy_work;
+};
+
+/*
+ * A css_set is a structure holding pointers to a set of
+ * cgroup_subsys_state objects. This saves space in the task struct
+ * object and speeds up fork()/exit(), since a single inc/dec and a
+ * list_add()/del() can bump the reference count on the entire cgroup
+ * set for a task.
+ */
+struct css_set {
+ /* Reference count */
+ atomic_t refcount;
+
+ /*
+ * List running through all cgroup groups in the same hash
+ * slot. Protected by css_set_lock
+ */
+ struct hlist_node hlist;
+
+ /*
+ * Lists running through all tasks using this cgroup group.
+ * mg_tasks lists tasks which belong to this cset but are in the
+ * process of being migrated out or in. Protected by
+ * css_set_rwsem, but, during migration, once tasks are moved to
+ * mg_tasks, it can be read safely while holding cgroup_mutex.
+ */
+ struct list_head tasks;
+ struct list_head mg_tasks;
+
+ /*
+ * List of cgrp_cset_links pointing at cgroups referenced from this
+ * css_set. Protected by css_set_lock.
+ */
+ struct list_head cgrp_links;
+
+ /* the default cgroup associated with this css_set */
+ struct cgroup *dfl_cgrp;
+
+ /*
+ * Set of subsystem states, one for each subsystem. This array is
+ * immutable after creation apart from the init_css_set during
+ * subsystem registration (at boot time).
+ */
+ struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
+
+ /*
+ * List of csets participating in the on-going migration either as
+ * source or destination. Protected by cgroup_mutex.
+ */
+ struct list_head mg_preload_node;
+ struct list_head mg_node;
+
+ /*
+ * If this cset is acting as the source of migration the following
+ * two fields are set. mg_src_cgrp is the source cgroup of the
+ * on-going migration and mg_dst_cset is the destination cset the
+ * target tasks on this cset should be migrated to. Protected by
+ * cgroup_mutex.
+ */
+ struct cgroup *mg_src_cgrp;
+ struct css_set *mg_dst_cset;
+
+ /*
+ * On the default hierarhcy, ->subsys[ssid] may point to a css
+ * attached to an ancestor instead of the cgroup this css_set is
+ * associated with. The following node is anchored at
+ * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
+ * iterate through all css's attached to a given cgroup.
+ */
+ struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
+
+ /* For RCU-protected deletion */
+ struct rcu_head rcu_head;
+};
+
+struct cgroup {
+ /* self css with NULL ->ss, points back to this cgroup */
+ struct cgroup_subsys_state self;
+
+ unsigned long flags; /* "unsigned long" so bitops work */
+
+ /*
+ * idr allocated in-hierarchy ID.
+ *
+ * ID 0 is not used, the ID of the root cgroup is always 1, and a
+ * new cgroup will be assigned with a smallest available ID.
+ *
+ * Allocating/Removing ID must be protected by cgroup_mutex.
+ */
+ int id;
+
+ /*
+ * If this cgroup contains any tasks, it contributes one to
+ * populated_cnt. All children with non-zero popuplated_cnt of
+ * their own contribute one. The count is zero iff there's no task
+ * in this cgroup or its subtree.
+ */
+ int populated_cnt;
+
+ struct kernfs_node *kn; /* cgroup kernfs entry */
+ struct kernfs_node *procs_kn; /* kn for "cgroup.procs" */
+ struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */
+
+ /*
+ * The bitmask of subsystems enabled on the child cgroups.
+ * ->subtree_control is the one configured through
+ * "cgroup.subtree_control" while ->child_subsys_mask is the
+ * effective one which may have more subsystems enabled.
+ * Controller knobs are made available iff it's enabled in
+ * ->subtree_control.
+ */
+ unsigned int subtree_control;
+ unsigned int child_subsys_mask;
+
+ /* Private pointers for each registered subsystem */
+ struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
+
+ struct cgroup_root *root;
+
+ /*
+ * List of cgrp_cset_links pointing at css_sets with tasks in this
+ * cgroup. Protected by css_set_lock.
+ */
+ struct list_head cset_links;
+
+ /*
+ * On the default hierarchy, a css_set for a cgroup with some
+ * susbsys disabled will point to css's which are associated with
+ * the closest ancestor which has the subsys enabled. The
+ * following lists all css_sets which point to this cgroup's css
+ * for the given subsystem.
+ */
+ struct list_head e_csets[CGROUP_SUBSYS_COUNT];
+
+ /*
+ * list of pidlists, up to two for each namespace (one for procs, one
+ * for tasks); created on demand.
+ */
+ struct list_head pidlists;
+ struct mutex pidlist_mutex;
+
+ /* used to wait for offlining of csses */
+ wait_queue_head_t offline_waitq;
+
+ /* used to schedule release agent */
+ struct work_struct release_agent_work;
+};
+
+/*
+ * A cgroup_root represents the root of a cgroup hierarchy, and may be
+ * associated with a kernfs_root to form an active hierarchy. This is
+ * internal to cgroup core. Don't access directly from controllers.
+ */
+struct cgroup_root {
+ struct kernfs_root *kf_root;
+
+ /* The bitmask of subsystems attached to this hierarchy */
+ unsigned int subsys_mask;
+
+ /* Unique id for this hierarchy. */
+ int hierarchy_id;
+
+ /* The root cgroup. Root is destroyed on its release. */
+ struct cgroup cgrp;
+
+ /* Number of cgroups in the hierarchy, used only for /proc/cgroups */
+ atomic_t nr_cgrps;
+
+ /* A list running through the active hierarchies */
+ struct list_head root_list;
+
+ /* Hierarchy-specific flags */
+ unsigned int flags;
+
+ /* IDs for cgroups in this hierarchy */
+ struct idr cgroup_idr;
+
+ /* The path to use for release notifications. */
+ char release_agent_path[PATH_MAX];
+
+ /* The name for this hierarchy - may be empty */
+ char name[MAX_CGROUP_ROOT_NAMELEN];
+};
+
+/*
+ * struct cftype: handler definitions for cgroup control files
+ *
+ * When reading/writing to a file:
+ * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata
+ * - the 'cftype' of the file is file->f_path.dentry->d_fsdata
+ */
+struct cftype {
+ /*
+ * By convention, the name should begin with the name of the
+ * subsystem, followed by a period. Zero length string indicates
+ * end of cftype array.
+ */
+ char name[MAX_CFTYPE_NAME];
+ int private;
+ /*
+ * If not 0, file mode is set to this value, otherwise it will
+ * be figured out automatically
+ */
+ umode_t mode;
+
+ /*
+ * The maximum length of string, excluding trailing nul, that can
+ * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed.
+ */
+ size_t max_write_len;
+
+ /* CFTYPE_* flags */
+ unsigned int flags;
+
+ /*
+ * Fields used for internal bookkeeping. Initialized automatically
+ * during registration.
+ */
+ struct cgroup_subsys *ss; /* NULL for cgroup core files */
+ struct list_head node; /* anchored at ss->cfts */
+ struct kernfs_ops *kf_ops;
+
+ /*
+ * read_u64() is a shortcut for the common case of returning a
+ * single integer. Use it in place of read()
+ */
+ u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft);
+ /*
+ * read_s64() is a signed version of read_u64()
+ */
+ s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft);
+
+ /* generic seq_file read interface */
+ int (*seq_show)(struct seq_file *sf, void *v);
+
+ /* optional ops, implement all or none */
+ void *(*seq_start)(struct seq_file *sf, loff_t *ppos);
+ void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos);
+ void (*seq_stop)(struct seq_file *sf, void *v);
+
+ /*
+ * write_u64() is a shortcut for the common case of accepting
+ * a single integer (as parsed by simple_strtoull) from
+ * userspace. Use in place of write(); return 0 or error.
+ */
+ int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 val);
+ /*
+ * write_s64() is a signed version of write_u64()
+ */
+ int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft,
+ s64 val);
+
+ /*
+ * write() is the generic write callback which maps directly to
+ * kernfs write operation and overrides all other operations.
+ * Maximum write size is determined by ->max_write_len. Use
+ * of_css/cft() to access the associated css and cft.
+ */
+ ssize_t (*write)(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lock_class_key lockdep_key;
+#endif
+};
+
+/*
+ * Control Group subsystem type.
+ * See Documentation/cgroups/cgroups.txt for details
+ */
+struct cgroup_subsys {
+ struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
+ int (*css_online)(struct cgroup_subsys_state *css);
+ void (*css_offline)(struct cgroup_subsys_state *css);
+ void (*css_released)(struct cgroup_subsys_state *css);
+ void (*css_free)(struct cgroup_subsys_state *css);
+ void (*css_reset)(struct cgroup_subsys_state *css);
+ void (*css_e_css_changed)(struct cgroup_subsys_state *css);
+
+ int (*can_attach)(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset);
+ void (*cancel_attach)(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset);
+ void (*attach)(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset);
+ void (*fork)(struct task_struct *task);
+ void (*exit)(struct cgroup_subsys_state *css,
+ struct cgroup_subsys_state *old_css,
+ struct task_struct *task);
+ void (*bind)(struct cgroup_subsys_state *root_css);
+
+ int disabled;
+ int early_init;
+
+ /*
+ * If %false, this subsystem is properly hierarchical -
+ * configuration, resource accounting and restriction on a parent
+ * cgroup cover those of its children. If %true, hierarchy support
+ * is broken in some ways - some subsystems ignore hierarchy
+ * completely while others are only implemented half-way.
+ *
+ * It's now disallowed to create nested cgroups if the subsystem is
+ * broken and cgroup core will emit a warning message on such
+ * cases. Eventually, all subsystems will be made properly
+ * hierarchical and this will go away.
+ */
+ bool broken_hierarchy;
+ bool warned_broken_hierarchy;
+
+ /* the following two fields are initialized automtically during boot */
+ int id;
+ const char *name;
+
+ /* link to parent, protected by cgroup_lock() */
+ struct cgroup_root *root;
+
+ /* idr for css->id */
+ struct idr css_idr;
+
+ /*
+ * List of cftypes. Each entry is the first entry of an array
+ * terminated by zero length name.
+ */
+ struct list_head cfts;
+
+ /*
+ * Base cftypes which are automatically registered. The two can
+ * point to the same array.
+ */
+ struct cftype *dfl_cftypes; /* for the default hierarchy */
+ struct cftype *legacy_cftypes; /* for the legacy hierarchies */
+
+ /*
+ * A subsystem may depend on other subsystems. When such subsystem
+ * is enabled on a cgroup, the depended-upon subsystems are enabled
+ * together if available. Subsystems enabled due to dependency are
+ * not visible to userland until explicitly enabled. The following
+ * specifies the mask of subsystems that this one depends on.
+ */
+ unsigned int depends_on;
+};
+
+extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
+
+/**
+ * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
+ * @tsk: target task
+ *
+ * Called from threadgroup_change_begin() and allows cgroup operations to
+ * synchronize against threadgroup changes using a percpu_rw_semaphore.
+ */
+static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
+{
+ percpu_down_read(&cgroup_threadgroup_rwsem);
+}
+
+/**
+ * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
+ * @tsk: target task
+ *
+ * Called from threadgroup_change_end(). Counterpart of
+ * cgroup_threadcgroup_change_begin().
+ */
+static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
+{
+ percpu_up_read(&cgroup_threadgroup_rwsem);
+}
+
+#else /* CONFIG_CGROUPS */
+
+#define CGROUP_SUBSYS_COUNT 0
+
+static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {}
+static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
+
+#endif /* CONFIG_CGROUPS */
+
+#endif /* _LINUX_CGROUP_DEFS_H */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index da0dae0..a593e29 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -11,94 +11,200 @@
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/nodemask.h>
-#include <linux/rcupdate.h>
#include <linux/rculist.h>
#include <linux/cgroupstats.h>
#include <linux/rwsem.h>
-#include <linux/idr.h>
-#include <linux/workqueue.h>
#include <linux/fs.h>
-#include <linux/percpu-refcount.h>
#include <linux/seq_file.h>
#include <linux/kernfs.h>
-#include <linux/wait.h>
+
+#include <linux/cgroup-defs.h>
#ifdef CONFIG_CGROUPS
-struct cgroup_root;
-struct cgroup_subsys;
-struct cgroup;
+/* a css_task_iter should be treated as an opaque object */
+struct css_task_iter {
+ struct cgroup_subsys *ss;
-extern int cgroup_init_early(void);
-extern int cgroup_init(void);
-extern void cgroup_fork(struct task_struct *p);
-extern void cgroup_post_fork(struct task_struct *p);
-extern void cgroup_exit(struct task_struct *p);
-extern int cgroupstats_build(struct cgroupstats *stats,
- struct dentry *dentry);
+ struct list_head *cset_pos;
+ struct list_head *cset_head;
-extern int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
- struct pid *pid, struct task_struct *tsk);
+ struct list_head *task_pos;
+ struct list_head *tasks_head;
+ struct list_head *mg_tasks_head;
+};
-/* define the enumeration of all cgroup subsystems */
-#define SUBSYS(_x) _x ## _cgrp_id,
-enum cgroup_subsys_id {
+extern struct cgroup_root cgrp_dfl_root;
+extern struct css_set init_css_set;
+
+#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
#include <linux/cgroup_subsys.h>
- CGROUP_SUBSYS_COUNT,
-};
#undef SUBSYS
+bool css_has_online_children(struct cgroup_subsys_state *css);
+struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
+struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
+ struct cgroup_subsys *ss);
+struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
+ struct cgroup_subsys *ss);
+
+bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
+int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
+int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
+
+int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
+int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
+int cgroup_rm_cftypes(struct cftype *cfts);
+
+char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
+int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
+int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *tsk);
+
+void cgroup_fork(struct task_struct *p);
+void cgroup_post_fork(struct task_struct *p);
+void cgroup_exit(struct task_struct *p);
+
+int cgroup_init_early(void);
+int cgroup_init(void);
+
/*
- * Per-subsystem/per-cgroup state maintained by the system. This is the
- * fundamental structural building block that controllers deal with.
+ * Iteration helpers and macros.
+ */
+
+struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
+ struct cgroup_subsys_state *parent);
+struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
+ struct cgroup_subsys_state *css);
+struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
+struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
+ struct cgroup_subsys_state *css);
+
+struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
+struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
+
+void css_task_iter_start(struct cgroup_subsys_state *css,
+ struct css_task_iter *it);
+struct task_struct *css_task_iter_next(struct css_task_iter *it);
+void css_task_iter_end(struct css_task_iter *it);
+
+/**
+ * css_for_each_child - iterate through children of a css
+ * @pos: the css * to use as the loop cursor
+ * @parent: css whose children to walk
*
- * Fields marked with "PI:" are public and immutable and may be accessed
- * directly without synchronization.
+ * Walk @parent's children. Must be called under rcu_read_lock().
+ *
+ * If a subsystem synchronizes ->css_online() and the start of iteration, a
+ * css which finished ->css_online() is guaranteed to be visible in the
+ * future iterations and will stay visible until the last reference is put.
+ * A css which hasn't finished ->css_online() or already finished
+ * ->css_offline() may show up during traversal. It's each subsystem's
+ * responsibility to synchronize against on/offlining.
+ *
+ * It is allowed to temporarily drop RCU read lock during iteration. The
+ * caller is responsible for ensuring that @pos remains accessible until
+ * the start of the next iteration by, for example, bumping the css refcnt.
*/
-struct cgroup_subsys_state {
- /* PI: the cgroup that this css is attached to */
- struct cgroup *cgroup;
-
- /* PI: the cgroup subsystem that this css is attached to */
- struct cgroup_subsys *ss;
-
- /* reference count - access via css_[try]get() and css_put() */
- struct percpu_ref refcnt;
-
- /* PI: the parent css */
- struct cgroup_subsys_state *parent;
-
- /* siblings list anchored at the parent's ->children */
- struct list_head sibling;
- struct list_head children;
-
- /*
- * PI: Subsys-unique ID. 0 is unused and root is always 1. The
- * matching css can be looked up using css_from_id().
- */
- int id;
-
- unsigned int flags;
-
- /*
- * Monotonically increasing unique serial number which defines a
- * uniform order among all csses. It's guaranteed that all
- * ->children lists are in the ascending order of ->serial_nr and
- * used to allow interrupting and resuming iterations.
- */
- u64 serial_nr;
-
- /* percpu_ref killing and RCU release */
- struct rcu_head rcu_head;
- struct work_struct destroy_work;
-};
+#define css_for_each_child(pos, parent) \
+ for ((pos) = css_next_child(NULL, (parent)); (pos); \
+ (pos) = css_next_child((pos), (parent)))
-/* bits in struct cgroup_subsys_state flags field */
-enum {
- CSS_NO_REF = (1 << 0), /* no reference counting for this css */
- CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
- CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
-};
+/**
+ * css_for_each_descendant_pre - pre-order walk of a css's descendants
+ * @pos: the css * to use as the loop cursor
+ * @root: css whose descendants to walk
+ *
+ * Walk @root's descendants. @root is included in the iteration and the
+ * first node to be visited. Must be called under rcu_read_lock().
+ *
+ * If a subsystem synchronizes ->css_online() and the start of iteration, a
+ * css which finished ->css_online() is guaranteed to be visible in the
+ * future iterations and will stay visible until the last reference is put.
+ * A css which hasn't finished ->css_online() or already finished
+ * ->css_offline() may show up during traversal. It's each subsystem's
+ * responsibility to synchronize against on/offlining.
+ *
+ * For example, the following guarantees that a descendant can't escape
+ * state updates of its ancestors.
+ *
+ * my_online(@css)
+ * {
+ * Lock @css's parent and @css;
+ * Inherit state from the parent;
+ * Unlock both.
+ * }
+ *
+ * my_update_state(@css)
+ * {
+ * css_for_each_descendant_pre(@pos, @css) {
+ * Lock @pos;
+ * if (@pos == @css)
+ * Update @css's state;
+ * else
+ * Verify @pos is alive and inherit state from its parent;
+ * Unlock @pos;
+ * }
+ * }
+ *
+ * As long as the inheriting step, including checking the parent state, is
+ * enclosed inside @pos locking, double-locking the parent isn't necessary
+ * while inheriting. The state update to the parent is guaranteed to be
+ * visible by walking order and, as long as inheriting operations to the
+ * same @pos are atomic to each other, multiple updates racing each other
+ * still result in the correct state. It's guaranateed that at least one
+ * inheritance happens for any css after the latest update to its parent.
+ *
+ * If checking parent's state requires locking the parent, each inheriting
+ * iteration should lock and unlock both @pos->parent and @pos.
+ *
+ * Alternatively, a subsystem may choose to use a single global lock to
+ * synchronize ->css_online() and ->css_offline() against tree-walking
+ * operations.
+ *
+ * It is allowed to temporarily drop RCU read lock during iteration. The
+ * caller is responsible for ensuring that @pos remains accessible until
+ * the start of the next iteration by, for example, bumping the css refcnt.
+ */
+#define css_for_each_descendant_pre(pos, css) \
+ for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
+ (pos) = css_next_descendant_pre((pos), (css)))
+
+/**
+ * css_for_each_descendant_post - post-order walk of a css's descendants
+ * @pos: the css * to use as the loop cursor
+ * @css: css whose descendants to walk
+ *
+ * Similar to css_for_each_descendant_pre() but performs post-order
+ * traversal instead. @root is included in the iteration and the last
+ * node to be visited.
+ *
+ * If a subsystem synchronizes ->css_online() and the start of iteration, a
+ * css which finished ->css_online() is guaranteed to be visible in the
+ * future iterations and will stay visible until the last reference is put.
+ * A css which hasn't finished ->css_online() or already finished
+ * ->css_offline() may show up during traversal. It's each subsystem's
+ * responsibility to synchronize against on/offlining.
+ *
+ * Note that the walk visibility guarantee example described in pre-order
+ * walk doesn't apply the same to post-order walks.
+ */
+#define css_for_each_descendant_post(pos, css) \
+ for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
+ (pos) = css_next_descendant_post((pos), (css)))
+
+/**
+ * cgroup_taskset_for_each - iterate cgroup_taskset
+ * @task: the loop cursor
+ * @tset: taskset to iterate
+ */
+#define cgroup_taskset_for_each(task, tset) \
+ for ((task) = cgroup_taskset_first((tset)); (task); \
+ (task) = cgroup_taskset_next((tset)))
+
+/*
+ * Inline functions.
+ */
/**
* css_get - obtain a reference on the specified css
@@ -185,309 +291,112 @@ static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
percpu_ref_put_many(&css->refcnt, n);
}
-/* bits in struct cgroup flags field */
-enum {
- /* Control Group requires release notifications to userspace */
- CGRP_NOTIFY_ON_RELEASE,
- /*
- * Clone the parent's configuration when creating a new child
- * cpuset cgroup. For historical reasons, this option can be
- * specified at mount time and thus is implemented here.
- */
- CGRP_CPUSET_CLONE_CHILDREN,
-};
-
-struct cgroup {
- /* self css with NULL ->ss, points back to this cgroup */
- struct cgroup_subsys_state self;
-
- unsigned long flags; /* "unsigned long" so bitops work */
-
- /*
- * idr allocated in-hierarchy ID.
- *
- * ID 0 is not used, the ID of the root cgroup is always 1, and a
- * new cgroup will be assigned with a smallest available ID.
- *
- * Allocating/Removing ID must be protected by cgroup_mutex.
- */
- int id;
-
- /*
- * If this cgroup contains any tasks, it contributes one to
- * populated_cnt. All children with non-zero popuplated_cnt of
- * their own contribute one. The count is zero iff there's no task
- * in this cgroup or its subtree.
- */
- int populated_cnt;
-
- struct kernfs_node *kn; /* cgroup kernfs entry */
- struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */
-
- /*
- * The bitmask of subsystems enabled on the child cgroups.
- * ->subtree_control is the one configured through
- * "cgroup.subtree_control" while ->child_subsys_mask is the
- * effective one which may have more subsystems enabled.
- * Controller knobs are made available iff it's enabled in
- * ->subtree_control.
- */
- unsigned int subtree_control;
- unsigned int child_subsys_mask;
-
- /* Private pointers for each registered subsystem */
- struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
-
- struct cgroup_root *root;
-
- /*
- * List of cgrp_cset_links pointing at css_sets with tasks in this
- * cgroup. Protected by css_set_lock.
- */
- struct list_head cset_links;
-
- /*
- * On the default hierarchy, a css_set for a cgroup with some
- * susbsys disabled will point to css's which are associated with
- * the closest ancestor which has the subsys enabled. The
- * following lists all css_sets which point to this cgroup's css
- * for the given subsystem.
- */
- struct list_head e_csets[CGROUP_SUBSYS_COUNT];
-
- /*
- * list of pidlists, up to two for each namespace (one for procs, one
- * for tasks); created on demand.
- */
- struct list_head pidlists;
- struct mutex pidlist_mutex;
-
- /* used to wait for offlining of csses */
- wait_queue_head_t offline_waitq;
-
- /* used to schedule release agent */
- struct work_struct release_agent_work;
-};
-
-#define MAX_CGROUP_ROOT_NAMELEN 64
-
-/* cgroup_root->flags */
-enum {
- CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), /* __DEVEL__sane_behavior specified */
- CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */
- CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */
-};
-
-/*
- * A cgroup_root represents the root of a cgroup hierarchy, and may be
- * associated with a kernfs_root to form an active hierarchy. This is
- * internal to cgroup core. Don't access directly from controllers.
+/**
+ * task_css_set_check - obtain a task's css_set with extra access conditions
+ * @task: the task to obtain css_set for
+ * @__c: extra condition expression to be passed to rcu_dereference_check()
+ *
+ * A task's css_set is RCU protected, initialized and exited while holding
+ * task_lock(), and can only be modified while holding both cgroup_mutex
+ * and task_lock() while the task is alive. This macro verifies that the
+ * caller is inside proper critical section and returns @task's css_set.
+ *
+ * The caller can also specify additional allowed conditions via @__c, such
+ * as locks used during the cgroup_subsys::attach() methods.
*/
-struct cgroup_root {
- struct kernfs_root *kf_root;
-
- /* The bitmask of subsystems attached to this hierarchy */
- unsigned int subsys_mask;
-
- /* Unique id for this hierarchy. */
- int hierarchy_id;
-
- /* The root cgroup. Root is destroyed on its release. */
- struct cgroup cgrp;
-
- /* Number of cgroups in the hierarchy, used only for /proc/cgroups */
- atomic_t nr_cgrps;
-
- /* A list running through the active hierarchies */
- struct list_head root_list;
-
- /* Hierarchy-specific flags */
- unsigned int flags;
-
- /* IDs for cgroups in this hierarchy */
- struct idr cgroup_idr;
-
- /* The path to use for release notifications. */
- char release_agent_path[PATH_MAX];
-
- /* The name for this hierarchy - may be empty */
- char name[MAX_CGROUP_ROOT_NAMELEN];
-};
+#ifdef CONFIG_PROVE_RCU
+extern struct mutex cgroup_mutex;
+extern struct rw_semaphore css_set_rwsem;
+#define task_css_set_check(task, __c) \
+ rcu_dereference_check((task)->cgroups, \
+ lockdep_is_held(&cgroup_mutex) || \
+ lockdep_is_held(&css_set_rwsem) || \
+ ((task)->flags & PF_EXITING) || (__c))
+#else
+#define task_css_set_check(task, __c) \
+ rcu_dereference((task)->cgroups)
+#endif
-/*
- * A css_set is a structure holding pointers to a set of
- * cgroup_subsys_state objects. This saves space in the task struct
- * object and speeds up fork()/exit(), since a single inc/dec and a
- * list_add()/del() can bump the reference count on the entire cgroup
- * set for a task.
+/**
+ * task_css_check - obtain css for (task, subsys) w/ extra access conds
+ * @task: the target task
+ * @subsys_id: the target subsystem ID
+ * @__c: extra condition expression to be passed to rcu_dereference_check()
+ *
+ * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
+ * synchronization rules are the same as task_css_set_check().
*/
+#define task_css_check(task, subsys_id, __c) \
+ task_css_set_check((task), (__c))->subsys[(subsys_id)]
-struct css_set {
-
- /* Reference count */
- atomic_t refcount;
-
- /*
- * List running through all cgroup groups in the same hash
- * slot. Protected by css_set_lock
- */
- struct hlist_node hlist;
-
- /*
- * Lists running through all tasks using this cgroup group.
- * mg_tasks lists tasks which belong to this cset but are in the
- * process of being migrated out or in. Protected by
- * css_set_rwsem, but, during migration, once tasks are moved to
- * mg_tasks, it can be read safely while holding cgroup_mutex.
- */
- struct list_head tasks;
- struct list_head mg_tasks;
-
- /*
- * List of cgrp_cset_links pointing at cgroups referenced from this
- * css_set. Protected by css_set_lock.
- */
- struct list_head cgrp_links;
-
- /* the default cgroup associated with this css_set */
- struct cgroup *dfl_cgrp;
-
- /*
- * Set of subsystem states, one for each subsystem. This array is
- * immutable after creation apart from the init_css_set during
- * subsystem registration (at boot time).
- */
- struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
-
- /*
- * List of csets participating in the on-going migration either as
- * source or destination. Protected by cgroup_mutex.
- */
- struct list_head mg_preload_node;
- struct list_head mg_node;
-
- /*
- * If this cset is acting as the source of migration the following
- * two fields are set. mg_src_cgrp is the source cgroup of the
- * on-going migration and mg_dst_cset is the destination cset the
- * target tasks on this cset should be migrated to. Protected by
- * cgroup_mutex.
- */
- struct cgroup *mg_src_cgrp;
- struct css_set *mg_dst_cset;
-
- /*
- * On the default hierarhcy, ->subsys[ssid] may point to a css
- * attached to an ancestor instead of the cgroup this css_set is
- * associated with. The following node is anchored at
- * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
- * iterate through all css's attached to a given cgroup.
- */
- struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
-
- /* For RCU-protected deletion */
- struct rcu_head rcu_head;
-};
-
-/*
- * struct cftype: handler definitions for cgroup control files
+/**
+ * task_css_set - obtain a task's css_set
+ * @task: the task to obtain css_set for
*
- * When reading/writing to a file:
- * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata
- * - the 'cftype' of the file is file->f_path.dentry->d_fsdata
+ * See task_css_set_check().
*/
+static inline struct css_set *task_css_set(struct task_struct *task)
+{
+ return task_css_set_check(task, false);
+}
-/* cftype->flags */
-enum {
- CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
- CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
- CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
+/**
+ * task_css - obtain css for (task, subsys)
+ * @task: the target task
+ * @subsys_id: the target subsystem ID
+ *
+ * See task_css_check().
+ */
+static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
+ int subsys_id)
+{
+ return task_css_check(task, subsys_id, false);
+}
- /* internal flags, do not use outside cgroup core proper */
- __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
- __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
-};
+/**
+ * task_get_css - find and get the css for (task, subsys)
+ * @task: the target task
+ * @subsys_id: the target subsystem ID
+ *
+ * Find the css for the (@task, @subsys_id) combination, increment a
+ * reference on and return it. This function is guaranteed to return a
+ * valid css.
+ */
+static inline struct cgroup_subsys_state *
+task_get_css(struct task_struct *task, int subsys_id)
+{
+ struct cgroup_subsys_state *css;
+
+ rcu_read_lock();
+ while (true) {
+ css = task_css(task, subsys_id);
+ if (likely(css_tryget_online(css)))
+ break;
+ cpu_relax();
+ }
+ rcu_read_unlock();
+ return css;
+}
-#define MAX_CFTYPE_NAME 64
-
-struct cftype {
- /*
- * By convention, the name should begin with the name of the
- * subsystem, followed by a period. Zero length string indicates
- * end of cftype array.
- */
- char name[MAX_CFTYPE_NAME];
- int private;
- /*
- * If not 0, file mode is set to this value, otherwise it will
- * be figured out automatically
- */
- umode_t mode;
-
- /*
- * The maximum length of string, excluding trailing nul, that can
- * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed.
- */
- size_t max_write_len;
-
- /* CFTYPE_* flags */
- unsigned int flags;
-
- /*
- * Fields used for internal bookkeeping. Initialized automatically
- * during registration.
- */
- struct cgroup_subsys *ss; /* NULL for cgroup core files */
- struct list_head node; /* anchored at ss->cfts */
- struct kernfs_ops *kf_ops;
-
- /*
- * read_u64() is a shortcut for the common case of returning a
- * single integer. Use it in place of read()
- */
- u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft);
- /*
- * read_s64() is a signed version of read_u64()
- */
- s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft);
-
- /* generic seq_file read interface */
- int (*seq_show)(struct seq_file *sf, void *v);
-
- /* optional ops, implement all or none */
- void *(*seq_start)(struct seq_file *sf, loff_t *ppos);
- void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos);
- void (*seq_stop)(struct seq_file *sf, void *v);
-
- /*
- * write_u64() is a shortcut for the common case of accepting
- * a single integer (as parsed by simple_strtoull) from
- * userspace. Use in place of write(); return 0 or error.
- */
- int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft,
- u64 val);
- /*
- * write_s64() is a signed version of write_u64()
- */
- int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft,
- s64 val);
-
- /*
- * write() is the generic write callback which maps directly to
- * kernfs write operation and overrides all other operations.
- * Maximum write size is determined by ->max_write_len. Use
- * of_css/cft() to access the associated css and cft.
- */
- ssize_t (*write)(struct kernfs_open_file *of,
- char *buf, size_t nbytes, loff_t off);
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lock_class_key lockdep_key;
-#endif
-};
+/**
+ * task_css_is_root - test whether a task belongs to the root css
+ * @task: the target task
+ * @subsys_id: the target subsystem ID
+ *
+ * Test whether @task belongs to the root css on the specified subsystem.
+ * May be invoked in any context.
+ */
+static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
+{
+ return task_css_check(task, subsys_id, true) ==
+ init_css_set.subsys[subsys_id];
+}
-extern struct cgroup_root cgrp_dfl_root;
-extern struct css_set init_css_set;
+static inline struct cgroup *task_cgroup(struct task_struct *task,
+ int subsys_id)
+{
+ return task_css(task, subsys_id)->cgroup;
+}
/**
* cgroup_on_dfl - test whether a cgroup is on the default hierarchy
@@ -604,363 +513,22 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
pr_cont_kernfs_path(cgrp->kn);
}
-char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
-
-int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
-int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
-int cgroup_rm_cftypes(struct cftype *cfts);
-
-bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
-
-/*
- * Control Group taskset, used to pass around set of tasks to cgroup_subsys
- * methods.
- */
-struct cgroup_taskset;
-struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
-struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
-
-/**
- * cgroup_taskset_for_each - iterate cgroup_taskset
- * @task: the loop cursor
- * @tset: taskset to iterate
- */
-#define cgroup_taskset_for_each(task, tset) \
- for ((task) = cgroup_taskset_first((tset)); (task); \
- (task) = cgroup_taskset_next((tset)))
-
-/*
- * Control Group subsystem type.
- * See Documentation/cgroups/cgroups.txt for details
- */
-
-struct cgroup_subsys {
- struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
- int (*css_online)(struct cgroup_subsys_state *css);
- void (*css_offline)(struct cgroup_subsys_state *css);
- void (*css_released)(struct cgroup_subsys_state *css);
- void (*css_free)(struct cgroup_subsys_state *css);
- void (*css_reset)(struct cgroup_subsys_state *css);
- void (*css_e_css_changed)(struct cgroup_subsys_state *css);
-
- int (*can_attach)(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset);
- void (*cancel_attach)(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset);
- void (*attach)(struct cgroup_subsys_state *css,
- struct cgroup_taskset *tset);
- void (*fork)(struct task_struct *task);
- void (*exit)(struct cgroup_subsys_state *css,
- struct cgroup_subsys_state *old_css,
- struct task_struct *task);
- void (*bind)(struct cgroup_subsys_state *root_css);
-
- int disabled;
- int early_init;
-
- /*
- * If %false, this subsystem is properly hierarchical -
- * configuration, resource accounting and restriction on a parent
- * cgroup cover those of its children. If %true, hierarchy support
- * is broken in some ways - some subsystems ignore hierarchy
- * completely while others are only implemented half-way.
- *
- * It's now disallowed to create nested cgroups if the subsystem is
- * broken and cgroup core will emit a warning message on such
- * cases. Eventually, all subsystems will be made properly
- * hierarchical and this will go away.
- */
- bool broken_hierarchy;
- bool warned_broken_hierarchy;
-
- /* the following two fields are initialized automtically during boot */
- int id;
-#define MAX_CGROUP_TYPE_NAMELEN 32
- const char *name;
-
- /* link to parent, protected by cgroup_lock() */
- struct cgroup_root *root;
-
- /* idr for css->id */
- struct idr css_idr;
-
- /*
- * List of cftypes. Each entry is the first entry of an array
- * terminated by zero length name.
- */
- struct list_head cfts;
-
- /*
- * Base cftypes which are automatically registered. The two can
- * point to the same array.
- */
- struct cftype *dfl_cftypes; /* for the default hierarchy */
- struct cftype *legacy_cftypes; /* for the legacy hierarchies */
-
- /*
- * A subsystem may depend on other subsystems. When such subsystem
- * is enabled on a cgroup, the depended-upon subsystems are enabled
- * together if available. Subsystems enabled due to dependency are
- * not visible to userland until explicitly enabled. The following
- * specifies the mask of subsystems that this one depends on.
- */
- unsigned int depends_on;
-};
-
-#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
-#include <linux/cgroup_subsys.h>
-#undef SUBSYS
-
-/**
- * task_css_set_check - obtain a task's css_set with extra access conditions
- * @task: the task to obtain css_set for
- * @__c: extra condition expression to be passed to rcu_dereference_check()
- *
- * A task's css_set is RCU protected, initialized and exited while holding
- * task_lock(), and can only be modified while holding both cgroup_mutex
- * and task_lock() while the task is alive. This macro verifies that the
- * caller is inside proper critical section and returns @task's css_set.
- *
- * The caller can also specify additional allowed conditions via @__c, such
- * as locks used during the cgroup_subsys::attach() methods.
- */
-#ifdef CONFIG_PROVE_RCU
-extern struct mutex cgroup_mutex;
-extern struct rw_semaphore css_set_rwsem;
-#define task_css_set_check(task, __c) \
- rcu_dereference_check((task)->cgroups, \
- lockdep_is_held(&cgroup_mutex) || \
- lockdep_is_held(&css_set_rwsem) || \
- ((task)->flags & PF_EXITING) || (__c))
-#else
-#define task_css_set_check(task, __c) \
- rcu_dereference((task)->cgroups)
-#endif
-
-/**
- * task_css_check - obtain css for (task, subsys) w/ extra access conds
- * @task: the target task
- * @subsys_id: the target subsystem ID
- * @__c: extra condition expression to be passed to rcu_dereference_check()
- *
- * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
- * synchronization rules are the same as task_css_set_check().
- */
-#define task_css_check(task, subsys_id, __c) \
- task_css_set_check((task), (__c))->subsys[(subsys_id)]
-
-/**
- * task_css_set - obtain a task's css_set
- * @task: the task to obtain css_set for
- *
- * See task_css_set_check().
- */
-static inline struct css_set *task_css_set(struct task_struct *task)
-{
- return task_css_set_check(task, false);
-}
-
-/**
- * task_css - obtain css for (task, subsys)
- * @task: the target task
- * @subsys_id: the target subsystem ID
- *
- * See task_css_check().
- */
-static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
- int subsys_id)
-{
- return task_css_check(task, subsys_id, false);
-}
-
-/**
- * task_css_is_root - test whether a task belongs to the root css
- * @task: the target task
- * @subsys_id: the target subsystem ID
- *
- * Test whether @task belongs to the root css on the specified subsystem.
- * May be invoked in any context.
- */
-static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
-{
- return task_css_check(task, subsys_id, true) ==
- init_css_set.subsys[subsys_id];
-}
-
-static inline struct cgroup *task_cgroup(struct task_struct *task,
- int subsys_id)
-{
- return task_css(task, subsys_id)->cgroup;
-}
-
-struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
- struct cgroup_subsys_state *parent);
-
-struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
-
-/**
- * css_for_each_child - iterate through children of a css
- * @pos: the css * to use as the loop cursor
- * @parent: css whose children to walk
- *
- * Walk @parent's children. Must be called under rcu_read_lock().
- *
- * If a subsystem synchronizes ->css_online() and the start of iteration, a
- * css which finished ->css_online() is guaranteed to be visible in the
- * future iterations and will stay visible until the last reference is put.
- * A css which hasn't finished ->css_online() or already finished
- * ->css_offline() may show up during traversal. It's each subsystem's
- * responsibility to synchronize against on/offlining.
- *
- * It is allowed to temporarily drop RCU read lock during iteration. The
- * caller is responsible for ensuring that @pos remains accessible until
- * the start of the next iteration by, for example, bumping the css refcnt.
- */
-#define css_for_each_child(pos, parent) \
- for ((pos) = css_next_child(NULL, (parent)); (pos); \
- (pos) = css_next_child((pos), (parent)))
-
-struct cgroup_subsys_state *
-css_next_descendant_pre(struct cgroup_subsys_state *pos,
- struct cgroup_subsys_state *css);
-
-struct cgroup_subsys_state *
-css_rightmost_descendant(struct cgroup_subsys_state *pos);
-
-/**
- * css_for_each_descendant_pre - pre-order walk of a css's descendants
- * @pos: the css * to use as the loop cursor
- * @root: css whose descendants to walk
- *
- * Walk @root's descendants. @root is included in the iteration and the
- * first node to be visited. Must be called under rcu_read_lock().
- *
- * If a subsystem synchronizes ->css_online() and the start of iteration, a
- * css which finished ->css_online() is guaranteed to be visible in the
- * future iterations and will stay visible until the last reference is put.
- * A css which hasn't finished ->css_online() or already finished
- * ->css_offline() may show up during traversal. It's each subsystem's
- * responsibility to synchronize against on/offlining.
- *
- * For example, the following guarantees that a descendant can't escape
- * state updates of its ancestors.
- *
- * my_online(@css)
- * {
- * Lock @css's parent and @css;
- * Inherit state from the parent;
- * Unlock both.
- * }
- *
- * my_update_state(@css)
- * {
- * css_for_each_descendant_pre(@pos, @css) {
- * Lock @pos;
- * if (@pos == @css)
- * Update @css's state;
- * else
- * Verify @pos is alive and inherit state from its parent;
- * Unlock @pos;
- * }
- * }
- *
- * As long as the inheriting step, including checking the parent state, is
- * enclosed inside @pos locking, double-locking the parent isn't necessary
- * while inheriting. The state update to the parent is guaranteed to be
- * visible by walking order and, as long as inheriting operations to the
- * same @pos are atomic to each other, multiple updates racing each other
- * still result in the correct state. It's guaranateed that at least one
- * inheritance happens for any css after the latest update to its parent.
- *
- * If checking parent's state requires locking the parent, each inheriting
- * iteration should lock and unlock both @pos->parent and @pos.
- *
- * Alternatively, a subsystem may choose to use a single global lock to
- * synchronize ->css_online() and ->css_offline() against tree-walking
- * operations.
- *
- * It is allowed to temporarily drop RCU read lock during iteration. The
- * caller is responsible for ensuring that @pos remains accessible until
- * the start of the next iteration by, for example, bumping the css refcnt.
- */
-#define css_for_each_descendant_pre(pos, css) \
- for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
- (pos) = css_next_descendant_pre((pos), (css)))
-
-struct cgroup_subsys_state *
-css_next_descendant_post(struct cgroup_subsys_state *pos,
- struct cgroup_subsys_state *css);
-
-/**
- * css_for_each_descendant_post - post-order walk of a css's descendants
- * @pos: the css * to use as the loop cursor
- * @css: css whose descendants to walk
- *
- * Similar to css_for_each_descendant_pre() but performs post-order
- * traversal instead. @root is included in the iteration and the last
- * node to be visited.
- *
- * If a subsystem synchronizes ->css_online() and the start of iteration, a
- * css which finished ->css_online() is guaranteed to be visible in the
- * future iterations and will stay visible until the last reference is put.
- * A css which hasn't finished ->css_online() or already finished
- * ->css_offline() may show up during traversal. It's each subsystem's
- * responsibility to synchronize against on/offlining.
- *
- * Note that the walk visibility guarantee example described in pre-order
- * walk doesn't apply the same to post-order walks.
- */
-#define css_for_each_descendant_post(pos, css) \
- for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
- (pos) = css_next_descendant_post((pos), (css)))
-
-bool css_has_online_children(struct cgroup_subsys_state *css);
-
-/* A css_task_iter should be treated as an opaque object */
-struct css_task_iter {
- struct cgroup_subsys *ss;
-
- struct list_head *cset_pos;
- struct list_head *cset_head;
-
- struct list_head *task_pos;
- struct list_head *tasks_head;
- struct list_head *mg_tasks_head;
-};
-
-void css_task_iter_start(struct cgroup_subsys_state *css,
- struct css_task_iter *it);
-struct task_struct *css_task_iter_next(struct css_task_iter *it);
-void css_task_iter_end(struct css_task_iter *it);
+#else /* !CONFIG_CGROUPS */
-int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
-int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
+struct cgroup_subsys_state;
-struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
- struct cgroup_subsys *ss);
-struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
- struct cgroup_subsys *ss);
-
-#else /* !CONFIG_CGROUPS */
+static inline void css_put(struct cgroup_subsys_state *css) {}
+static inline int cgroup_attach_task_all(struct task_struct *from,
+ struct task_struct *t) { return 0; }
+static inline int cgroupstats_build(struct cgroupstats *stats,
+ struct dentry *dentry) { return -EINVAL; }
-static inline int cgroup_init_early(void) { return 0; }
-static inline int cgroup_init(void) { return 0; }
static inline void cgroup_fork(struct task_struct *p) {}
static inline void cgroup_post_fork(struct task_struct *p) {}
static inline void cgroup_exit(struct task_struct *p) {}
-static inline int cgroupstats_build(struct cgroupstats *stats,
- struct dentry *dentry)
-{
- return -EINVAL;
-}
-
-/* No cgroups - nothing to do */
-static inline int cgroup_attach_task_all(struct task_struct *from,
- struct task_struct *t)
-{
- return 0;
-}
+static inline int cgroup_init_early(void) { return 0; }
+static inline int cgroup_init(void) { return 0; }
#endif /* !CONFIG_CGROUPS */
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index 98c4f9b..e4a96fb 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -15,6 +15,10 @@ SUBSYS(cpu)
SUBSYS(cpuacct)
#endif
+#if IS_ENABLED(CONFIG_BLK_CGROUP)
+SUBSYS(blkio)
+#endif
+
#if IS_ENABLED(CONFIG_MEMCG)
SUBSYS(memory)
#endif
@@ -31,10 +35,6 @@ SUBSYS(freezer)
SUBSYS(net_cls)
#endif
-#if IS_ENABLED(CONFIG_BLK_CGROUP)
-SUBSYS(blkio)
-#endif
-
#if IS_ENABLED(CONFIG_CGROUP_PERF)
SUBSYS(perf_event)
#endif
diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
index 4ce9056..bda5ec0b4 100644
--- a/include/linux/cleancache.h
+++ b/include/linux/cleancache.h
@@ -5,6 +5,10 @@
#include <linux/exportfs.h>
#include <linux/mm.h>
+#define CLEANCACHE_NO_POOL -1
+#define CLEANCACHE_NO_BACKEND -2
+#define CLEANCACHE_NO_BACKEND_SHARED -3
+
#define CLEANCACHE_KEY_MAX 6
/*
@@ -33,10 +37,9 @@ struct cleancache_ops {
void (*invalidate_fs)(int);
};
-extern struct cleancache_ops *
- cleancache_register_ops(struct cleancache_ops *ops);
+extern int cleancache_register_ops(struct cleancache_ops *ops);
extern void __cleancache_init_fs(struct super_block *);
-extern void __cleancache_init_shared_fs(char *, struct super_block *);
+extern void __cleancache_init_shared_fs(struct super_block *);
extern int __cleancache_get_page(struct page *);
extern void __cleancache_put_page(struct page *);
extern void __cleancache_invalidate_page(struct address_space *, struct page *);
@@ -78,10 +81,10 @@ static inline void cleancache_init_fs(struct super_block *sb)
__cleancache_init_fs(sb);
}
-static inline void cleancache_init_shared_fs(char *uuid, struct super_block *sb)
+static inline void cleancache_init_shared_fs(struct super_block *sb)
{
if (cleancache_enabled)
- __cleancache_init_shared_fs(uuid, sb);
+ __cleancache_init_shared_fs(sb);
}
static inline int cleancache_get_page(struct page *page)
diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h
deleted file mode 100644
index 0ca5f60..0000000
--- a/include/linux/clk-private.h
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * linux/include/linux/clk-private.h
- *
- * Copyright (c) 2010-2011 Jeremy Kerr <jeremy.kerr@canonical.com>
- * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __LINUX_CLK_PRIVATE_H
-#define __LINUX_CLK_PRIVATE_H
-
-#include <linux/clk-provider.h>
-#include <linux/kref.h>
-#include <linux/list.h>
-
-/*
- * WARNING: Do not include clk-private.h from any file that implements struct
- * clk_ops. Doing so is a layering violation!
- *
- * This header exists only to allow for statically initialized clock data. Any
- * static clock data must be defined in a separate file from the logic that
- * implements the clock operations for that same data.
- */
-
-#ifdef CONFIG_COMMON_CLK
-
-struct module;
-
-struct clk {
- const char *name;
- const struct clk_ops *ops;
- struct clk_hw *hw;
- struct module *owner;
- struct clk *parent;
- const char **parent_names;
- struct clk **parents;
- u8 num_parents;
- u8 new_parent_index;
- unsigned long rate;
- unsigned long new_rate;
- struct clk *new_parent;
- struct clk *new_child;
- unsigned long flags;
- unsigned int enable_count;
- unsigned int prepare_count;
- unsigned long accuracy;
- int phase;
- struct hlist_head children;
- struct hlist_node child_node;
- struct hlist_node debug_node;
- unsigned int notifier_count;
-#ifdef CONFIG_DEBUG_FS
- struct dentry *dentry;
-#endif
- struct kref ref;
-};
-
-/*
- * DOC: Basic clock implementations common to many platforms
- *
- * Each basic clock hardware type is comprised of a structure describing the
- * clock hardware, implementations of the relevant callbacks in struct clk_ops,
- * unique flags for that hardware type, a registration function and an
- * alternative macro for static initialization
- */
-
-#define DEFINE_CLK(_name, _ops, _flags, _parent_names, \
- _parents) \
- static struct clk _name = { \
- .name = #_name, \
- .ops = &_ops, \
- .hw = &_name##_hw.hw, \
- .parent_names = _parent_names, \
- .num_parents = ARRAY_SIZE(_parent_names), \
- .parents = _parents, \
- .flags = _flags | CLK_IS_BASIC, \
- }
-
-#define DEFINE_CLK_FIXED_RATE(_name, _flags, _rate, \
- _fixed_rate_flags) \
- static struct clk _name; \
- static const char *_name##_parent_names[] = {}; \
- static struct clk_fixed_rate _name##_hw = { \
- .hw = { \
- .clk = &_name, \
- }, \
- .fixed_rate = _rate, \
- .flags = _fixed_rate_flags, \
- }; \
- DEFINE_CLK(_name, clk_fixed_rate_ops, _flags, \
- _name##_parent_names, NULL);
-
-#define DEFINE_CLK_GATE(_name, _parent_name, _parent_ptr, \
- _flags, _reg, _bit_idx, \
- _gate_flags, _lock) \
- static struct clk _name; \
- static const char *_name##_parent_names[] = { \
- _parent_name, \
- }; \
- static struct clk *_name##_parents[] = { \
- _parent_ptr, \
- }; \
- static struct clk_gate _name##_hw = { \
- .hw = { \
- .clk = &_name, \
- }, \
- .reg = _reg, \
- .bit_idx = _bit_idx, \
- .flags = _gate_flags, \
- .lock = _lock, \
- }; \
- DEFINE_CLK(_name, clk_gate_ops, _flags, \
- _name##_parent_names, _name##_parents);
-
-#define _DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \
- _flags, _reg, _shift, _width, \
- _divider_flags, _table, _lock) \
- static struct clk _name; \
- static const char *_name##_parent_names[] = { \
- _parent_name, \
- }; \
- static struct clk *_name##_parents[] = { \
- _parent_ptr, \
- }; \
- static struct clk_divider _name##_hw = { \
- .hw = { \
- .clk = &_name, \
- }, \
- .reg = _reg, \
- .shift = _shift, \
- .width = _width, \
- .flags = _divider_flags, \
- .table = _table, \
- .lock = _lock, \
- }; \
- DEFINE_CLK(_name, clk_divider_ops, _flags, \
- _name##_parent_names, _name##_parents);
-
-#define DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \
- _flags, _reg, _shift, _width, \
- _divider_flags, _lock) \
- _DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \
- _flags, _reg, _shift, _width, \
- _divider_flags, NULL, _lock)
-
-#define DEFINE_CLK_DIVIDER_TABLE(_name, _parent_name, \
- _parent_ptr, _flags, _reg, \
- _shift, _width, _divider_flags, \
- _table, _lock) \
- _DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \
- _flags, _reg, _shift, _width, \
- _divider_flags, _table, _lock) \
-
-#define DEFINE_CLK_MUX(_name, _parent_names, _parents, _flags, \
- _reg, _shift, _width, \
- _mux_flags, _lock) \
- static struct clk _name; \
- static struct clk_mux _name##_hw = { \
- .hw = { \
- .clk = &_name, \
- }, \
- .reg = _reg, \
- .shift = _shift, \
- .mask = BIT(_width) - 1, \
- .flags = _mux_flags, \
- .lock = _lock, \
- }; \
- DEFINE_CLK(_name, clk_mux_ops, _flags, _parent_names, \
- _parents);
-
-#define DEFINE_CLK_FIXED_FACTOR(_name, _parent_name, \
- _parent_ptr, _flags, \
- _mult, _div) \
- static struct clk _name; \
- static const char *_name##_parent_names[] = { \
- _parent_name, \
- }; \
- static struct clk *_name##_parents[] = { \
- _parent_ptr, \
- }; \
- static struct clk_fixed_factor _name##_hw = { \
- .hw = { \
- .clk = &_name, \
- }, \
- .mult = _mult, \
- .div = _div, \
- }; \
- DEFINE_CLK(_name, clk_fixed_factor_ops, _flags, \
- _name##_parent_names, _name##_parents);
-
-/**
- * __clk_init - initialize the data structures in a struct clk
- * @dev: device initializing this clk, placeholder for now
- * @clk: clk being initialized
- *
- * Initializes the lists in struct clk, queries the hardware for the
- * parent and rate and sets them both.
- *
- * Any struct clk passed into __clk_init must have the following members
- * populated:
- * .name
- * .ops
- * .hw
- * .parent_names
- * .num_parents
- * .flags
- *
- * It is not necessary to call clk_register if __clk_init is used directly with
- * statically initialized clock data.
- *
- * Returns 0 on success, otherwise an error code.
- */
-int __clk_init(struct device *dev, struct clk *clk);
-
-struct clk *__clk_register(struct device *dev, struct clk_hw *hw);
-
-#endif /* CONFIG_COMMON_CLK */
-#endif /* CLK_PRIVATE_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index d936409..78842f4 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -31,8 +31,10 @@
#define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */
#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
#define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */
+#define CLK_RECALC_NEW_RATES BIT(9) /* recalc rates after notifications */
struct clk_hw;
+struct clk_core;
struct dentry;
/**
@@ -174,9 +176,12 @@ struct clk_ops {
unsigned long parent_rate);
long (*round_rate)(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate);
- long (*determine_rate)(struct clk_hw *hw, unsigned long rate,
- unsigned long *best_parent_rate,
- struct clk_hw **best_parent_hw);
+ long (*determine_rate)(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
+ unsigned long *best_parent_rate,
+ struct clk_hw **best_parent_hw);
int (*set_parent)(struct clk_hw *hw, u8 index);
u8 (*get_parent)(struct clk_hw *hw);
int (*set_rate)(struct clk_hw *hw, unsigned long rate,
@@ -205,7 +210,7 @@ struct clk_ops {
struct clk_init_data {
const char *name;
const struct clk_ops *ops;
- const char **parent_names;
+ const char * const *parent_names;
u8 num_parents;
unsigned long flags;
};
@@ -216,13 +221,17 @@ struct clk_init_data {
* clk_foo and then referenced by the struct clk instance that uses struct
* clk_foo's clk_ops
*
- * @clk: pointer to the struct clk instance that points back to this struct
- * clk_hw instance
+ * @core: pointer to the struct clk_core instance that points back to this
+ * struct clk_hw instance
+ *
+ * @clk: pointer to the per-user struct clk instance that can be used to call
+ * into the clk API
*
* @init: pointer to struct clk_init_data that contains the init data shared
* with the common clock framework.
*/
struct clk_hw {
+ struct clk_core *core;
struct clk *clk;
const struct clk_init_data *init;
};
@@ -294,6 +303,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 bit_idx,
u8 clk_gate_flags, spinlock_t *lock);
+void clk_unregister_gate(struct clk *clk);
struct clk_div_table {
unsigned int val;
@@ -352,6 +362,17 @@ struct clk_divider {
#define CLK_DIVIDER_READ_ONLY BIT(5)
extern const struct clk_ops clk_divider_ops;
+
+unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
+ unsigned int val, const struct clk_div_table *table,
+ unsigned long flags);
+long divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate, const struct clk_div_table *table,
+ u8 width, unsigned long flags);
+int divider_get_val(unsigned long rate, unsigned long parent_rate,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags);
+
struct clk *clk_register_divider(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
@@ -361,6 +382,7 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
void __iomem *reg, u8 shift, u8 width,
u8 clk_divider_flags, const struct clk_div_table *table,
spinlock_t *lock);
+void clk_unregister_divider(struct clk *clk);
/**
* struct clk_mux - multiplexer clock
@@ -382,6 +404,8 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
* register, and mask of mux bits are in higher 16-bit of this register.
* While setting the mux bits, higher 16-bit should also be updated to
* indicate changing mux bits.
+ * CLK_MUX_ROUND_CLOSEST - Use the parent rate that is closest to the desired
+ * frequency.
*/
struct clk_mux {
struct clk_hw hw;
@@ -396,21 +420,26 @@ struct clk_mux {
#define CLK_MUX_INDEX_ONE BIT(0)
#define CLK_MUX_INDEX_BIT BIT(1)
#define CLK_MUX_HIWORD_MASK BIT(2)
-#define CLK_MUX_READ_ONLY BIT(3) /* mux setting cannot be changed */
+#define CLK_MUX_READ_ONLY BIT(3) /* mux can't be changed */
+#define CLK_MUX_ROUND_CLOSEST BIT(4)
extern const struct clk_ops clk_mux_ops;
extern const struct clk_ops clk_mux_ro_ops;
struct clk *clk_register_mux(struct device *dev, const char *name,
- const char **parent_names, u8 num_parents, unsigned long flags,
+ const char * const *parent_names, u8 num_parents,
+ unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
u8 clk_mux_flags, spinlock_t *lock);
struct clk *clk_register_mux_table(struct device *dev, const char *name,
- const char **parent_names, u8 num_parents, unsigned long flags,
+ const char * const *parent_names, u8 num_parents,
+ unsigned long flags,
void __iomem *reg, u8 shift, u32 mask,
u8 clk_mux_flags, u32 *table, spinlock_t *lock);
+void clk_unregister_mux(struct clk *clk);
+
void of_fixed_factor_clk_setup(struct device_node *node);
/**
@@ -431,7 +460,7 @@ struct clk_fixed_factor {
unsigned int div;
};
-extern struct clk_ops clk_fixed_factor_ops;
+extern const struct clk_ops clk_fixed_factor_ops;
struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div);
@@ -492,7 +521,7 @@ struct clk_composite {
};
struct clk *clk_register_composite(struct device *dev, const char *name,
- const char **parent_names, int num_parents,
+ const char * const *parent_names, int num_parents,
struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
@@ -515,7 +544,7 @@ struct clk_gpio {
extern const struct clk_ops clk_gpio_gate_ops;
struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
- const char *parent_name, struct gpio_desc *gpio,
+ const char *parent_name, unsigned gpio, bool active_low,
unsigned long flags);
void of_gpio_clk_gate_setup(struct device_node *node);
@@ -550,15 +579,30 @@ bool __clk_is_prepared(struct clk *clk);
bool __clk_is_enabled(struct clk *clk);
struct clk *__clk_lookup(const char *name);
long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
unsigned long *best_parent_rate,
struct clk_hw **best_parent_p);
+unsigned long __clk_determine_rate(struct clk_hw *core,
+ unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate);
+long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
+ unsigned long *best_parent_rate,
+ struct clk_hw **best_parent_p);
+void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
+
+static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
+{
+ dst->clk = src->clk;
+ dst->core = src->core;
+}
/*
* FIXME clock api without lock protection
*/
-int __clk_prepare(struct clk *clk);
-void __clk_unprepare(struct clk *clk);
-void __clk_reparent(struct clk *clk, struct clk *new_parent);
unsigned long __clk_round_rate(struct clk *clk, unsigned long rate);
struct of_device_id;
@@ -584,6 +628,8 @@ struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
void *data);
struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data);
int of_clk_get_parent_count(struct device_node *np);
+int of_clk_parent_fill(struct device_node *np, const char **parents,
+ unsigned int size);
const char *of_clk_get_parent_name(struct device_node *np, int index);
void of_clk_init(const struct of_device_id *matches);
diff --git a/include/linux/clk.h b/include/linux/clk.h
index c7f258a..0df4a51 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -125,6 +125,19 @@ int clk_set_phase(struct clk *clk, int degrees);
*/
int clk_get_phase(struct clk *clk);
+/**
+ * clk_is_match - check if two clk's point to the same hardware clock
+ * @p: clk compared against q
+ * @q: clk compared against p
+ *
+ * Returns true if the two struct clk pointers both point to the same hardware
+ * clock node. Put differently, returns true if struct clk *p and struct clk *q
+ * share the same struct clk_core object.
+ *
+ * Returns false otherwise. Note that two NULL clks are treated as matching.
+ */
+bool clk_is_match(const struct clk *p, const struct clk *q);
+
#else
static inline long clk_get_accuracy(struct clk *clk)
@@ -142,6 +155,11 @@ static inline long clk_get_phase(struct clk *clk)
return -ENOTSUPP;
}
+static inline bool clk_is_match(const struct clk *p, const struct clk *q)
+{
+ return p == q;
+}
+
#endif
/**
@@ -288,6 +306,20 @@ void devm_clk_put(struct device *dev, struct clk *clk);
* @clk: clock source
* @rate: desired clock rate in Hz
*
+ * This answers the question "if I were to pass @rate to clk_set_rate(),
+ * what clock rate would I end up with?" without changing the hardware
+ * in any way. In other words:
+ *
+ * rate = clk_round_rate(clk, r);
+ *
+ * and:
+ *
+ * clk_set_rate(clk, r);
+ * rate = clk_get_rate(clk);
+ *
+ * are equivalent except the former does not modify the clock hardware
+ * in any way.
+ *
* Returns rounded clock rate in Hz, or negative errno.
*/
long clk_round_rate(struct clk *clk, unsigned long rate);
@@ -302,6 +334,46 @@ long clk_round_rate(struct clk *clk, unsigned long rate);
int clk_set_rate(struct clk *clk, unsigned long rate);
/**
+ * clk_has_parent - check if a clock is a possible parent for another
+ * @clk: clock source
+ * @parent: parent clock source
+ *
+ * This function can be used in drivers that need to check that a clock can be
+ * the parent of another without actually changing the parent.
+ *
+ * Returns true if @parent is a possible parent for @clk, false otherwise.
+ */
+bool clk_has_parent(struct clk *clk, struct clk *parent);
+
+/**
+ * clk_set_rate_range - set a rate range for a clock source
+ * @clk: clock source
+ * @min: desired minimum clock rate in Hz, inclusive
+ * @max: desired maximum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max);
+
+/**
+ * clk_set_min_rate - set a minimum clock rate for a clock source
+ * @clk: clock source
+ * @rate: desired minimum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_min_rate(struct clk *clk, unsigned long rate);
+
+/**
+ * clk_set_max_rate - set a maximum clock rate for a clock source
+ * @clk: clock source
+ * @rate: desired maximum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_max_rate(struct clk *clk, unsigned long rate);
+
+/**
* clk_set_parent - set the parent clock source for this clock
* @clk: clock source
* @parent: parent clock source
@@ -374,6 +446,11 @@ static inline long clk_round_rate(struct clk *clk, unsigned long rate)
return 0;
}
+static inline bool clk_has_parent(struct clk *clk, struct clk *parent)
+{
+ return true;
+}
+
static inline int clk_set_parent(struct clk *clk, struct clk *parent)
{
return 0;
@@ -408,19 +485,6 @@ static inline void clk_disable_unprepare(struct clk *clk)
clk_unprepare(clk);
}
-/**
- * clk_add_alias - add a new clock alias
- * @alias: name for clock alias
- * @alias_dev_name: device name
- * @id: platform specific clock name
- * @dev: device
- *
- * Allows using generic clock names for drivers by adding a new alias.
- * Assumes clkdev, see clkdev.h for more info.
- */
-int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
- struct device *dev);
-
struct device_node;
struct of_phandle_args;
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
index c8e3b3d..7669f76 100644
--- a/include/linux/clk/at91_pmc.h
+++ b/include/linux/clk/at91_pmc.h
@@ -20,10 +20,10 @@
extern void __iomem *at91_pmc_base;
#define at91_pmc_read(field) \
- __raw_readl(at91_pmc_base + field)
+ readl_relaxed(at91_pmc_base + field)
#define at91_pmc_write(field, value) \
- __raw_writel(value, at91_pmc_base + field)
+ writel_relaxed(value, at91_pmc_base + field)
#else
.extern at91_pmc_base
#endif
diff --git a/include/linux/clk/shmobile.h b/include/linux/clk/shmobile.h
index 9f8a140..63a8159 100644
--- a/include/linux/clk/shmobile.h
+++ b/include/linux/clk/shmobile.h
@@ -16,6 +16,7 @@
#include <linux/types.h>
+void r8a7778_clocks_init(u32 mode);
void r8a7779_clocks_init(u32 mode);
void rcar_gen2_clocks_init(u32 mode);
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index 3ca9fca..19c4208 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -120,6 +120,4 @@ static inline void tegra_cpu_clock_resume(void)
}
#endif
-void tegra_clocks_apply_init_table(void);
-
#endif /* __LINUX_CLK_TEGRA_H_ */
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 55ef529..79b76e1 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -15,6 +15,7 @@
#ifndef __LINUX_CLK_TI_H__
#define __LINUX_CLK_TI_H__
+#include <linux/clk-provider.h>
#include <linux/clkdev.h>
/**
@@ -214,8 +215,15 @@ struct ti_dt_clk {
.node_name = name, \
}
-/* Maximum number of clock memmaps */
-#define CLK_MAX_MEMMAPS 4
+/* Static memmap indices */
+enum {
+ TI_CLKM_CM = 0,
+ TI_CLKM_CM2,
+ TI_CLKM_PRM,
+ TI_CLKM_SCRM,
+ TI_CLKM_CTRL,
+ CLK_MAX_MEMMAPS
+};
typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *);
@@ -263,6 +271,8 @@ int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
u8 index);
long omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
unsigned long *best_parent_rate,
struct clk_hw **best_parent_clk);
unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
@@ -272,6 +282,8 @@ long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
unsigned long *parent_rate);
long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
unsigned long *best_parent_rate,
struct clk_hw **best_parent_clk);
u8 omap2_init_dpll_parent(struct clk_hw *hw);
@@ -348,4 +360,17 @@ extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait;
extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait;
extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait;
+#ifdef CONFIG_ATAGS
+int omap3430_clk_legacy_init(void);
+int omap3430es1_clk_legacy_init(void);
+int omap36xx_clk_legacy_init(void);
+int am35xx_clk_legacy_init(void);
+#else
+static inline int omap3430_clk_legacy_init(void) { return -ENXIO; }
+static inline int omap3430es1_clk_legacy_init(void) { return -ENXIO; }
+static inline int omap36xx_clk_legacy_init(void) { return -ENXIO; }
+static inline int am35xx_clk_legacy_init(void) { return -ENXIO; }
+#endif
+
+
#endif
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index 94bad77..08bffcc 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -22,6 +22,7 @@ struct clk_lookup {
const char *dev_id;
const char *con_id;
struct clk *clk;
+ struct clk_hw *clk_hw;
};
#define CLKDEV_INIT(d, n, c) \
@@ -32,15 +33,19 @@ struct clk_lookup {
}
struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
- const char *dev_fmt, ...);
+ const char *dev_fmt, ...) __printf(3, 4);
void clkdev_add(struct clk_lookup *cl);
void clkdev_drop(struct clk_lookup *cl);
+struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id,
+ const char *dev_fmt, ...) __printf(3, 4);
+
void clkdev_add_table(struct clk_lookup *, size_t);
-int clk_add_alias(const char *, const char *, char *, struct device *);
+int clk_add_alias(const char *, const char *, const char *, struct device *);
-int clk_register_clkdev(struct clk *, const char *, const char *, ...);
+int clk_register_clkdev(struct clk *, const char *, const char *, ...)
+ __printf(3, 4);
int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t);
#ifdef CONFIG_COMMON_CLK
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 2e4cb67..597a1e8 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -8,33 +8,19 @@
#ifndef _LINUX_CLOCKCHIPS_H
#define _LINUX_CLOCKCHIPS_H
-/* Clock event notification values */
-enum clock_event_nofitiers {
- CLOCK_EVT_NOTIFY_ADD,
- CLOCK_EVT_NOTIFY_BROADCAST_ON,
- CLOCK_EVT_NOTIFY_BROADCAST_OFF,
- CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
- CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
- CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
- CLOCK_EVT_NOTIFY_SUSPEND,
- CLOCK_EVT_NOTIFY_RESUME,
- CLOCK_EVT_NOTIFY_CPU_DYING,
- CLOCK_EVT_NOTIFY_CPU_DEAD,
-};
-
-#ifdef CONFIG_GENERIC_CLOCKEVENTS_BUILD
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
-#include <linux/clocksource.h>
-#include <linux/cpumask.h>
-#include <linux/ktime.h>
-#include <linux/notifier.h>
+# include <linux/clocksource.h>
+# include <linux/cpumask.h>
+# include <linux/ktime.h>
+# include <linux/notifier.h>
struct clock_event_device;
struct module;
-/* Clock event mode commands */
+/* Clock event mode commands for legacy ->set_mode(): OBSOLETE */
enum clock_event_mode {
- CLOCK_EVT_MODE_UNUSED = 0,
+ CLOCK_EVT_MODE_UNUSED,
CLOCK_EVT_MODE_SHUTDOWN,
CLOCK_EVT_MODE_PERIODIC,
CLOCK_EVT_MODE_ONESHOT,
@@ -42,30 +28,52 @@ enum clock_event_mode {
};
/*
+ * Possible states of a clock event device.
+ *
+ * DETACHED: Device is not used by clockevents core. Initial state or can be
+ * reached from SHUTDOWN.
+ * SHUTDOWN: Device is powered-off. Can be reached from PERIODIC or ONESHOT.
+ * PERIODIC: Device is programmed to generate events periodically. Can be
+ * reached from DETACHED or SHUTDOWN.
+ * ONESHOT: Device is programmed to generate event only once. Can be reached
+ * from DETACHED or SHUTDOWN.
+ * ONESHOT_STOPPED: Device was programmed in ONESHOT mode and is temporarily
+ * stopped.
+ */
+enum clock_event_state {
+ CLOCK_EVT_STATE_DETACHED,
+ CLOCK_EVT_STATE_SHUTDOWN,
+ CLOCK_EVT_STATE_PERIODIC,
+ CLOCK_EVT_STATE_ONESHOT,
+ CLOCK_EVT_STATE_ONESHOT_STOPPED,
+};
+
+/*
* Clock event features
*/
-#define CLOCK_EVT_FEAT_PERIODIC 0x000001
-#define CLOCK_EVT_FEAT_ONESHOT 0x000002
-#define CLOCK_EVT_FEAT_KTIME 0x000004
+# define CLOCK_EVT_FEAT_PERIODIC 0x000001
+# define CLOCK_EVT_FEAT_ONESHOT 0x000002
+# define CLOCK_EVT_FEAT_KTIME 0x000004
+
/*
- * x86(64) specific misfeatures:
+ * x86(64) specific (mis)features:
*
* - Clockevent source stops in C3 State and needs broadcast support.
* - Local APIC timer is used as a dummy device.
*/
-#define CLOCK_EVT_FEAT_C3STOP 0x000008
-#define CLOCK_EVT_FEAT_DUMMY 0x000010
+# define CLOCK_EVT_FEAT_C3STOP 0x000008
+# define CLOCK_EVT_FEAT_DUMMY 0x000010
/*
* Core shall set the interrupt affinity dynamically in broadcast mode
*/
-#define CLOCK_EVT_FEAT_DYNIRQ 0x000020
-#define CLOCK_EVT_FEAT_PERCPU 0x000040
+# define CLOCK_EVT_FEAT_DYNIRQ 0x000020
+# define CLOCK_EVT_FEAT_PERCPU 0x000040
/*
* Clockevent device is based on a hrtimer for broadcast
*/
-#define CLOCK_EVT_FEAT_HRTIMER 0x000080
+# define CLOCK_EVT_FEAT_HRTIMER 0x000080
/**
* struct clock_event_device - clock event device descriptor
@@ -78,10 +86,16 @@ enum clock_event_mode {
* @min_delta_ns: minimum delta value in ns
* @mult: nanosecond to cycles multiplier
* @shift: nanoseconds to cycles divisor (power of two)
- * @mode: operating mode assigned by the management code
+ * @mode: operating mode, relevant only to ->set_mode(), OBSOLETE
+ * @state_use_accessors:current state of the device, assigned by the core code
* @features: features
* @retries: number of forced programming retries
- * @set_mode: set mode function
+ * @set_mode: legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME.
+ * @set_state_periodic: switch state to periodic, if !set_mode
+ * @set_state_oneshot: switch state to oneshot, if !set_mode
+ * @set_state_oneshot_stopped: switch state to oneshot_stopped, if !set_mode
+ * @set_state_shutdown: switch state to shutdown, if !set_mode
+ * @tick_resume: resume clkevt device, if !set_mode
* @broadcast: function to broadcast events
* @min_delta_ticks: minimum delta value in ticks stored for reconfiguration
* @max_delta_ticks: maximum delta value in ticks stored for reconfiguration
@@ -95,22 +109,32 @@ enum clock_event_mode {
*/
struct clock_event_device {
void (*event_handler)(struct clock_event_device *);
- int (*set_next_event)(unsigned long evt,
- struct clock_event_device *);
- int (*set_next_ktime)(ktime_t expires,
- struct clock_event_device *);
+ int (*set_next_event)(unsigned long evt, struct clock_event_device *);
+ int (*set_next_ktime)(ktime_t expires, struct clock_event_device *);
ktime_t next_event;
u64 max_delta_ns;
u64 min_delta_ns;
u32 mult;
u32 shift;
enum clock_event_mode mode;
+ enum clock_event_state state_use_accessors;
unsigned int features;
unsigned long retries;
+ /*
+ * State transition callback(s): Only one of the two groups should be
+ * defined:
+ * - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME.
+ * - set_state_{shutdown|periodic|oneshot|oneshot_stopped}(), tick_resume().
+ */
+ void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *);
+ int (*set_state_periodic)(struct clock_event_device *);
+ int (*set_state_oneshot)(struct clock_event_device *);
+ int (*set_state_oneshot_stopped)(struct clock_event_device *);
+ int (*set_state_shutdown)(struct clock_event_device *);
+ int (*tick_resume)(struct clock_event_device *);
+
void (*broadcast)(const struct cpumask *mask);
- void (*set_mode)(enum clock_event_mode mode,
- struct clock_event_device *);
void (*suspend)(struct clock_event_device *);
void (*resume)(struct clock_event_device *);
unsigned long min_delta_ticks;
@@ -125,6 +149,32 @@ struct clock_event_device {
struct module *owner;
} ____cacheline_aligned;
+/* Helpers to verify state of a clockevent device */
+static inline bool clockevent_state_detached(struct clock_event_device *dev)
+{
+ return dev->state_use_accessors == CLOCK_EVT_STATE_DETACHED;
+}
+
+static inline bool clockevent_state_shutdown(struct clock_event_device *dev)
+{
+ return dev->state_use_accessors == CLOCK_EVT_STATE_SHUTDOWN;
+}
+
+static inline bool clockevent_state_periodic(struct clock_event_device *dev)
+{
+ return dev->state_use_accessors == CLOCK_EVT_STATE_PERIODIC;
+}
+
+static inline bool clockevent_state_oneshot(struct clock_event_device *dev)
+{
+ return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT;
+}
+
+static inline bool clockevent_state_oneshot_stopped(struct clock_event_device *dev)
+{
+ return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT_STOPPED;
+}
+
/*
* Calculate a multiplication factor for scaled math, which is used to convert
* nanoseconds based values to clock ticks:
@@ -136,18 +186,18 @@ struct clock_event_device {
*
* factor = (clock_ticks << shift) / nanoseconds
*/
-static inline unsigned long div_sc(unsigned long ticks, unsigned long nsec,
- int shift)
+static inline unsigned long
+div_sc(unsigned long ticks, unsigned long nsec, int shift)
{
- uint64_t tmp = ((uint64_t)ticks) << shift;
+ u64 tmp = ((u64)ticks) << shift;
do_div(tmp, nsec);
+
return (unsigned long) tmp;
}
/* Clock event layer functions */
-extern u64 clockevent_delta2ns(unsigned long latch,
- struct clock_event_device *evt);
+extern u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt);
extern void clockevents_register_device(struct clock_event_device *dev);
extern int clockevents_unbind_device(struct clock_event_device *ced, int cpu);
@@ -158,57 +208,42 @@ extern void clockevents_config_and_register(struct clock_event_device *dev,
extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq);
-extern void clockevents_exchange_device(struct clock_event_device *old,
- struct clock_event_device *new);
-extern void clockevents_set_mode(struct clock_event_device *dev,
- enum clock_event_mode mode);
-extern int clockevents_program_event(struct clock_event_device *dev,
- ktime_t expires, bool force);
-
-extern void clockevents_handle_noop(struct clock_event_device *dev);
-
static inline void
clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec)
{
- return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC,
- freq, minsec);
+ return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, minsec);
}
extern void clockevents_suspend(void);
extern void clockevents_resume(void);
-#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
-#ifdef CONFIG_ARCH_HAS_TICK_BROADCAST
+# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+# ifdef CONFIG_ARCH_HAS_TICK_BROADCAST
extern void tick_broadcast(const struct cpumask *mask);
-#else
-#define tick_broadcast NULL
-#endif
+# else
+# define tick_broadcast NULL
+# endif
extern int tick_receive_broadcast(void);
-#endif
+# endif
-#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
+# if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
extern void tick_setup_hrtimer_broadcast(void);
extern int tick_check_broadcast_expired(void);
-#else
+# else
static inline int tick_check_broadcast_expired(void) { return 0; }
-static inline void tick_setup_hrtimer_broadcast(void) {};
-#endif
+static inline void tick_setup_hrtimer_broadcast(void) { }
+# endif
-#ifdef CONFIG_GENERIC_CLOCKEVENTS
extern int clockevents_notify(unsigned long reason, void *arg);
-#else
-static inline int clockevents_notify(unsigned long reason, void *arg) { return 0; }
-#endif
-
-#else /* CONFIG_GENERIC_CLOCKEVENTS_BUILD */
-static inline void clockevents_suspend(void) {}
-static inline void clockevents_resume(void) {}
+#else /* !CONFIG_GENERIC_CLOCKEVENTS: */
+static inline void clockevents_suspend(void) { }
+static inline void clockevents_resume(void) { }
static inline int clockevents_notify(unsigned long reason, void *arg) { return 0; }
static inline int tick_check_broadcast_expired(void) { return 0; }
-static inline void tick_setup_hrtimer_broadcast(void) {};
+static inline void tick_setup_hrtimer_broadcast(void) { }
-#endif
+#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
-#endif
+#endif /* _LINUX_CLOCKCHIPS_H */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index abcafaa..278dd27 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -18,8 +18,6 @@
#include <asm/div64.h>
#include <asm/io.h>
-/* clocksource cycle base type */
-typedef u64 cycle_t;
struct clocksource;
struct module;
@@ -28,106 +26,6 @@ struct module;
#endif
/**
- * struct cyclecounter - hardware abstraction for a free running counter
- * Provides completely state-free accessors to the underlying hardware.
- * Depending on which hardware it reads, the cycle counter may wrap
- * around quickly. Locking rules (if necessary) have to be defined
- * by the implementor and user of specific instances of this API.
- *
- * @read: returns the current cycle value
- * @mask: bitmask for two's complement
- * subtraction of non 64 bit counters,
- * see CLOCKSOURCE_MASK() helper macro
- * @mult: cycle to nanosecond multiplier
- * @shift: cycle to nanosecond divisor (power of two)
- */
-struct cyclecounter {
- cycle_t (*read)(const struct cyclecounter *cc);
- cycle_t mask;
- u32 mult;
- u32 shift;
-};
-
-/**
- * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds
- * Contains the state needed by timecounter_read() to detect
- * cycle counter wrap around. Initialize with
- * timecounter_init(). Also used to convert cycle counts into the
- * corresponding nanosecond counts with timecounter_cyc2time(). Users
- * of this code are responsible for initializing the underlying
- * cycle counter hardware, locking issues and reading the time
- * more often than the cycle counter wraps around. The nanosecond
- * counter will only wrap around after ~585 years.
- *
- * @cc: the cycle counter used by this instance
- * @cycle_last: most recent cycle counter value seen by
- * timecounter_read()
- * @nsec: continuously increasing count
- */
-struct timecounter {
- const struct cyclecounter *cc;
- cycle_t cycle_last;
- u64 nsec;
-};
-
-/**
- * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
- * @cc: Pointer to cycle counter.
- * @cycles: Cycles
- *
- * XXX - This could use some mult_lxl_ll() asm optimization. Same code
- * as in cyc2ns, but with unsigned result.
- */
-static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
- cycle_t cycles)
-{
- u64 ret = (u64)cycles;
- ret = (ret * cc->mult) >> cc->shift;
- return ret;
-}
-
-/**
- * timecounter_init - initialize a time counter
- * @tc: Pointer to time counter which is to be initialized/reset
- * @cc: A cycle counter, ready to be used.
- * @start_tstamp: Arbitrary initial time stamp.
- *
- * After this call the current cycle register (roughly) corresponds to
- * the initial time stamp. Every call to timecounter_read() increments
- * the time stamp counter by the number of elapsed nanoseconds.
- */
-extern void timecounter_init(struct timecounter *tc,
- const struct cyclecounter *cc,
- u64 start_tstamp);
-
-/**
- * timecounter_read - return nanoseconds elapsed since timecounter_init()
- * plus the initial time stamp
- * @tc: Pointer to time counter.
- *
- * In other words, keeps track of time since the same epoch as
- * the function which generated the initial time stamp.
- */
-extern u64 timecounter_read(struct timecounter *tc);
-
-/**
- * timecounter_cyc2time - convert a cycle counter to same
- * time base as values returned by
- * timecounter_read()
- * @tc: Pointer to time counter.
- * @cycle_tstamp: a value returned by tc->cc->read()
- *
- * Cycle counts that are converted correctly as long as they
- * fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
- * with "max cycle count" == cs->mask+1.
- *
- * This allows conversion of cycle counter values which were generated
- * in the past.
- */
-extern u64 timecounter_cyc2time(struct timecounter *tc,
- cycle_t cycle_tstamp);
-
-/**
* struct clocksource - hardware abstraction for a free running counter
* Provides mostly state-free accessors to the underlying hardware.
* This is the structure used for system time.
@@ -158,6 +56,7 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
* @shift: cycle to nanosecond divisor (power of two)
* @max_idle_ns: max idle time permitted by the clocksource (nsecs)
* @maxadj: maximum adjustment value to mult (~11%)
+ * @max_cycles: maximum safe cycle value which won't overflow on multiplication
* @flags: flags describing special properties
* @archdata: arch-specific data
* @suspend: suspend function for the clocksource, if necessary
@@ -178,7 +77,7 @@ struct clocksource {
#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
struct arch_clocksource_data archdata;
#endif
-
+ u64 max_cycles;
const char *name;
struct list_head list;
int rating;
@@ -280,10 +179,8 @@ static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift)
}
-extern int clocksource_register(struct clocksource*);
extern int clocksource_unregister(struct clocksource*);
extern void clocksource_touch_watchdog(void);
-extern struct clocksource* clocksource_get_next(void);
extern void clocksource_change_rating(struct clocksource *cs, int rating);
extern void clocksource_suspend(void);
extern void clocksource_resume(void);
@@ -291,7 +188,7 @@ extern struct clocksource * __init clocksource_default_clock(void);
extern void clocksource_mark_unstable(struct clocksource *cs);
extern u64
-clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask);
+clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles);
extern void
clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec);
@@ -302,7 +199,16 @@ clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec);
extern int
__clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq);
extern void
-__clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq);
+__clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq);
+
+/*
+ * Don't call this unless you are a default clocksource
+ * (AKA: jiffies) and absolutely have to.
+ */
+static inline int __clocksource_register(struct clocksource *cs)
+{
+ return __clocksource_register_scale(cs, 1, 0);
+}
static inline int clocksource_register_hz(struct clocksource *cs, u32 hz)
{
@@ -314,14 +220,14 @@ static inline int clocksource_register_khz(struct clocksource *cs, u32 khz)
return __clocksource_register_scale(cs, 1000, khz);
}
-static inline void __clocksource_updatefreq_hz(struct clocksource *cs, u32 hz)
+static inline void __clocksource_update_freq_hz(struct clocksource *cs, u32 hz)
{
- __clocksource_updatefreq_scale(cs, 1, hz);
+ __clocksource_update_freq_scale(cs, 1, hz);
}
-static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz)
+static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz)
{
- __clocksource_updatefreq_scale(cs, 1000, khz);
+ __clocksource_update_freq_scale(cs, 1000, khz);
}
@@ -346,4 +252,10 @@ extern void clocksource_of_init(void);
static inline void clocksource_of_init(void) {}
#endif
+#ifdef CONFIG_ACPI
+void acpi_generic_timer_init(void);
+#else
+static inline void acpi_generic_timer_init(void) { }
+#endif
+
#endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 9384ba6..f7ef093 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -16,16 +16,16 @@
struct cma;
extern unsigned long totalcma_pages;
-extern phys_addr_t cma_get_base(struct cma *cma);
-extern unsigned long cma_get_size(struct cma *cma);
+extern phys_addr_t cma_get_base(const struct cma *cma);
+extern unsigned long cma_get_size(const struct cma *cma);
extern int __init cma_declare_contiguous(phys_addr_t base,
phys_addr_t size, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit,
bool fixed, struct cma **res_cma);
-extern int cma_init_reserved_mem(phys_addr_t base,
- phys_addr_t size, int order_per_bit,
+extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
+ unsigned int order_per_bit,
struct cma **res_cma);
-extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
-extern bool cma_release(struct cma *cma, struct page *pages, int count);
+extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align);
+extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
#endif
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 3238ffa..aa8f61c 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -12,6 +12,10 @@
#define COMPACT_PARTIAL 3
/* The full zone was compacted */
#define COMPACT_COMPLETE 4
+/* For more detailed tracepoint output */
+#define COMPACT_NO_SUITABLE_PAGE 5
+#define COMPACT_NOT_SUITABLE_ZONE 6
+/* When adding new state, please change compaction_status_string, too */
/* Used to signal whether compaction detected need_sched() or lock contention */
/* No contention detected */
@@ -21,6 +25,8 @@
/* Zone lock or lru_lock was contended in async compaction */
#define COMPACT_CONTENDED_LOCK 2
+struct alloc_context; /* in mm/internal.h */
+
#ifdef CONFIG_COMPACTION
extern int sysctl_compact_memory;
extern int sysctl_compaction_handler(struct ctl_table *table, int write,
@@ -28,83 +34,28 @@ extern int sysctl_compaction_handler(struct ctl_table *table, int write,
extern int sysctl_extfrag_threshold;
extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos);
+extern int sysctl_compact_unevictable_allowed;
extern int fragmentation_index(struct zone *zone, unsigned int order);
-extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
- int order, gfp_t gfp_mask, nodemask_t *mask,
- enum migrate_mode mode, int *contended,
- int alloc_flags, int classzone_idx);
+extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
+ int alloc_flags, const struct alloc_context *ac,
+ enum migrate_mode mode, int *contended);
extern void compact_pgdat(pg_data_t *pgdat, int order);
extern void reset_isolation_suitable(pg_data_t *pgdat);
extern unsigned long compaction_suitable(struct zone *zone, int order,
int alloc_flags, int classzone_idx);
-/* Do not skip compaction more than 64 times */
-#define COMPACT_MAX_DEFER_SHIFT 6
-
-/*
- * Compaction is deferred when compaction fails to result in a page
- * allocation success. 1 << compact_defer_limit compactions are skipped up
- * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
- */
-static inline void defer_compaction(struct zone *zone, int order)
-{
- zone->compact_considered = 0;
- zone->compact_defer_shift++;
-
- if (order < zone->compact_order_failed)
- zone->compact_order_failed = order;
-
- if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
- zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
-}
-
-/* Returns true if compaction should be skipped this time */
-static inline bool compaction_deferred(struct zone *zone, int order)
-{
- unsigned long defer_limit = 1UL << zone->compact_defer_shift;
-
- if (order < zone->compact_order_failed)
- return false;
-
- /* Avoid possible overflow */
- if (++zone->compact_considered > defer_limit)
- zone->compact_considered = defer_limit;
-
- return zone->compact_considered < defer_limit;
-}
-
-/*
- * Update defer tracking counters after successful compaction of given order,
- * which means an allocation either succeeded (alloc_success == true) or is
- * expected to succeed.
- */
-static inline void compaction_defer_reset(struct zone *zone, int order,
- bool alloc_success)
-{
- if (alloc_success) {
- zone->compact_considered = 0;
- zone->compact_defer_shift = 0;
- }
- if (order >= zone->compact_order_failed)
- zone->compact_order_failed = order + 1;
-}
-
-/* Returns true if restarting compaction after many failures */
-static inline bool compaction_restarting(struct zone *zone, int order)
-{
- if (order < zone->compact_order_failed)
- return false;
-
- return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
- zone->compact_considered >= 1UL << zone->compact_defer_shift;
-}
+extern void defer_compaction(struct zone *zone, int order);
+extern bool compaction_deferred(struct zone *zone, int order);
+extern void compaction_defer_reset(struct zone *zone, int order,
+ bool alloc_success);
+extern bool compaction_restarting(struct zone *zone, int order);
#else
-static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
- int order, gfp_t gfp_mask, nodemask_t *nodemask,
- enum migrate_mode mode, int *contended,
- int alloc_flags, int classzone_idx)
+static inline unsigned long try_to_compact_pages(gfp_t gfp_mask,
+ unsigned int order, int alloc_flags,
+ const struct alloc_context *ac,
+ enum migrate_mode mode, int *contended)
{
return COMPACT_CONTINUE;
}
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 7450ca2..a76c917 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -424,7 +424,7 @@ asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
-extern int compat_printk(const char *fmt, ...);
+extern __printf(1, 2) int compat_printk(const char *fmt, ...);
extern void sigset_from_compat(sigset_t *set, const compat_sigset_t *compat);
extern void sigset_to_compat(compat_sigset_t *compat, const sigset_t *set);
@@ -689,6 +689,15 @@ asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
compat_stack_t __user *uoss_ptr);
+#ifdef __ARCH_WANT_SYS_SIGPENDING
+asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set);
+#endif
+
+#ifdef __ARCH_WANT_SYS_SIGPROCMASK
+asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *nset,
+ compat_old_sigset_t __user *oset);
+#endif
+
int compat_restore_altstack(const compat_stack_t __user *uss);
int __compat_save_altstack(compat_stack_t __user *, unsigned long);
#define compat_save_altstack_ex(uss, sp) do { \
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 02ae99e..dfaa7b3 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -5,14 +5,28 @@
/*
* Common definitions for all gcc versions go here.
*/
-#define GCC_VERSION (__GNUC__ * 10000 \
- + __GNUC_MINOR__ * 100 \
- + __GNUC_PATCHLEVEL__)
-
+#define GCC_VERSION (__GNUC__ * 10000 \
+ + __GNUC_MINOR__ * 100 \
+ + __GNUC_PATCHLEVEL__)
/* Optimization barrier */
+
/* The "volatile" is due to gcc bugs */
#define barrier() __asm__ __volatile__("": : :"memory")
+/*
+ * This version is i.e. to prevent dead stores elimination on @ptr
+ * where gcc and llvm may behave differently when otherwise using
+ * normal barrier(): while gcc behavior gets along with a normal
+ * barrier(), llvm needs an explicit input variable to be assumed
+ * clobbered. The issue is as follows: while the inline asm might
+ * access any memory it wants, the compiler could have fit all of
+ * @ptr into memory registers instead, and since @ptr never escaped
+ * from that, it proofed that the inline asm wasn't touching any of
+ * it. This version works well with both compilers, i.e. we're telling
+ * the compiler that the inline asm absolutely may see the contents
+ * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
+ */
+#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
/*
* This macro obfuscates arithmetic on a variable address so that gcc
@@ -32,54 +46,63 @@
* the inline assembly constraint from =g to =r, in this particular
* case either is valid.
*/
-#define RELOC_HIDE(ptr, off) \
- ({ unsigned long __ptr; \
- __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
- (typeof(ptr)) (__ptr + (off)); })
+#define RELOC_HIDE(ptr, off) \
+({ \
+ unsigned long __ptr; \
+ __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
+ (typeof(ptr)) (__ptr + (off)); \
+})
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
-#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var))
+#define OPTIMIZER_HIDE_VAR(var) \
+ __asm__ ("" : "=r" (var) : "0" (var))
#ifdef __CHECKER__
-#define __must_be_array(arr) 0
+#define __must_be_array(a) 0
#else
/* &a[0] degrades to a pointer: a different type from an array */
-#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
+#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
#endif
/*
* Force always-inline if the user requests it so via the .config,
* or if gcc is too old:
*/
-#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
+#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
!defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
-# define inline inline __attribute__((always_inline)) notrace
-# define __inline__ __inline__ __attribute__((always_inline)) notrace
-# define __inline __inline __attribute__((always_inline)) notrace
+#define inline inline __attribute__((always_inline)) notrace
+#define __inline__ __inline__ __attribute__((always_inline)) notrace
+#define __inline __inline __attribute__((always_inline)) notrace
#else
/* A lot of inline functions can cause havoc with function tracing */
-# define inline inline notrace
-# define __inline__ __inline__ notrace
-# define __inline __inline notrace
+#define inline inline notrace
+#define __inline__ __inline__ notrace
+#define __inline __inline notrace
#endif
-#define __deprecated __attribute__((deprecated))
-#define __packed __attribute__((packed))
-#define __weak __attribute__((weak))
+#define __always_inline inline __attribute__((always_inline))
+#define noinline __attribute__((noinline))
+
+#define __deprecated __attribute__((deprecated))
+#define __packed __attribute__((packed))
+#define __weak __attribute__((weak))
+#define __alias(symbol) __attribute__((alias(#symbol)))
/*
- * it doesn't make sense on ARM (currently the only user of __naked) to trace
- * naked functions because then mcount is called without stack and frame pointer
- * being set up and there is no chance to restore the lr register to the value
- * before mcount was called.
+ * it doesn't make sense on ARM (currently the only user of __naked)
+ * to trace naked functions because then mcount is called without
+ * stack and frame pointer being set up and there is no chance to
+ * restore the lr register to the value before mcount was called.
+ *
+ * The asm() bodies of naked functions often depend on standard calling
+ * conventions, therefore they must be noinline and noclone.
*
- * The asm() bodies of naked functions often depend on standard calling conventions,
- * therefore they must be noinline and noclone. GCC 4.[56] currently fail to enforce
- * this, so we must do so ourselves. See GCC PR44290.
+ * GCC 4.[56] currently fail to enforce this, so we must do so ourselves.
+ * See GCC PR44290.
*/
-#define __naked __attribute__((naked)) noinline __noclone notrace
+#define __naked __attribute__((naked)) noinline __noclone notrace
-#define __noreturn __attribute__((noreturn))
+#define __noreturn __attribute__((noreturn))
/*
* From the GCC manual:
@@ -91,19 +114,130 @@
* would be.
* [...]
*/
-#define __pure __attribute__((pure))
-#define __aligned(x) __attribute__((aligned(x)))
-#define __printf(a, b) __attribute__((format(printf, a, b)))
-#define __scanf(a, b) __attribute__((format(scanf, a, b)))
-#define noinline __attribute__((noinline))
-#define __attribute_const__ __attribute__((__const__))
-#define __maybe_unused __attribute__((unused))
-#define __always_unused __attribute__((unused))
-
-#define __gcc_header(x) #x
-#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
-#define gcc_header(x) _gcc_header(x)
-#include gcc_header(__GNUC__)
+#define __pure __attribute__((pure))
+#define __aligned(x) __attribute__((aligned(x)))
+#define __printf(a, b) __attribute__((format(printf, a, b)))
+#define __scanf(a, b) __attribute__((format(scanf, a, b)))
+#define __attribute_const__ __attribute__((__const__))
+#define __maybe_unused __attribute__((unused))
+#define __always_unused __attribute__((unused))
+
+/* gcc version specific checks */
+
+#if GCC_VERSION < 30200
+# error Sorry, your compiler is too old - please upgrade it.
+#endif
+
+#if GCC_VERSION < 30300
+# define __used __attribute__((__unused__))
+#else
+# define __used __attribute__((__used__))
+#endif
+
+#ifdef CONFIG_GCOV_KERNEL
+# if GCC_VERSION < 30400
+# error "GCOV profiling support for gcc versions below 3.4 not included"
+# endif /* __GNUC_MINOR__ */
+#endif /* CONFIG_GCOV_KERNEL */
+
+#if GCC_VERSION >= 30400
+#define __must_check __attribute__((warn_unused_result))
+#endif
+
+#if GCC_VERSION >= 40000
+
+/* GCC 4.1.[01] miscompiles __weak */
+#ifdef __KERNEL__
+# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101
+# error Your version of gcc miscompiles the __weak directive
+# endif
+#endif
+
+#define __used __attribute__((__used__))
+#define __compiler_offsetof(a, b) \
+ __builtin_offsetof(a, b)
+
+#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
+# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
+#endif
+
+#if GCC_VERSION >= 40300
+/* Mark functions as cold. gcc will assume any path leading to a call
+ * to them will be unlikely. This means a lot of manual unlikely()s
+ * are unnecessary now for any paths leading to the usual suspects
+ * like BUG(), printk(), panic() etc. [but let's keep them for now for
+ * older compilers]
+ *
+ * Early snapshots of gcc 4.3 don't support this and we can't detect this
+ * in the preprocessor, but we can live with this because they're unreleased.
+ * Maketime probing would be overkill here.
+ *
+ * gcc also has a __attribute__((__hot__)) to move hot functions into
+ * a special section, but I don't see any sense in this right now in
+ * the kernel context
+ */
+#define __cold __attribute__((__cold__))
+
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+#ifndef __CHECKER__
+# define __compiletime_warning(message) __attribute__((warning(message)))
+# define __compiletime_error(message) __attribute__((error(message)))
+#endif /* __CHECKER__ */
+#endif /* GCC_VERSION >= 40300 */
+
+#if GCC_VERSION >= 40500
+/*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ *
+ * Early snapshots of gcc 4.5 don't support this and we can't detect
+ * this in the preprocessor, but we can live with this because they're
+ * unreleased. Really, we need to have autoconf for the kernel.
+ */
+#define unreachable() __builtin_unreachable()
+
+/* Mark a function definition as prohibited from being cloned. */
+#define __noclone __attribute__((__noclone__))
+
+#endif /* GCC_VERSION >= 40500 */
+
+#if GCC_VERSION >= 40600
+/*
+ * Tell the optimizer that something else uses this function or variable.
+ */
+#define __visible __attribute__((externally_visible))
+#endif
+
+/*
+ * GCC 'asm goto' miscompiles certain code sequences:
+ *
+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+ *
+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+
+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+#if GCC_VERSION >= 40400
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#endif
+#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
+#define __HAVE_BUILTIN_BSWAP16__
+#endif
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+
+#if GCC_VERSION >= 50000
+#define KASAN_ABI_VERSION 4
+#elif GCC_VERSION >= 40902
+#define KASAN_ABI_VERSION 3
+#endif
+
+#endif /* gcc version >= 40000 specific checks */
#if !defined(__noclone)
#define __noclone /* not needed */
@@ -114,5 +248,3 @@
* code
*/
#define uninitialized_var(x) x = x
-
-#define __always_inline inline __attribute__((always_inline))
diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h
deleted file mode 100644
index 7d89feb..0000000
--- a/include/linux/compiler-gcc3.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef __LINUX_COMPILER_H
-#error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead."
-#endif
-
-#if GCC_VERSION < 30200
-# error Sorry, your compiler is too old - please upgrade it.
-#endif
-
-#if GCC_VERSION >= 30300
-# define __used __attribute__((__used__))
-#else
-# define __used __attribute__((__unused__))
-#endif
-
-#if GCC_VERSION >= 30400
-#define __must_check __attribute__((warn_unused_result))
-#endif
-
-#ifdef CONFIG_GCOV_KERNEL
-# if GCC_VERSION < 30400
-# error "GCOV profiling support for gcc versions below 3.4 not included"
-# endif /* __GNUC_MINOR__ */
-#endif /* CONFIG_GCOV_KERNEL */
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
deleted file mode 100644
index d1a5582..0000000
--- a/include/linux/compiler-gcc4.h
+++ /dev/null
@@ -1,87 +0,0 @@
-#ifndef __LINUX_COMPILER_H
-#error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead."
-#endif
-
-/* GCC 4.1.[01] miscompiles __weak */
-#ifdef __KERNEL__
-# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101
-# error Your version of gcc miscompiles the __weak directive
-# endif
-#endif
-
-#define __used __attribute__((__used__))
-#define __must_check __attribute__((warn_unused_result))
-#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
-
-#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
-# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
-#endif
-
-#if GCC_VERSION >= 40300
-/* Mark functions as cold. gcc will assume any path leading to a call
- to them will be unlikely. This means a lot of manual unlikely()s
- are unnecessary now for any paths leading to the usual suspects
- like BUG(), printk(), panic() etc. [but let's keep them for now for
- older compilers]
-
- Early snapshots of gcc 4.3 don't support this and we can't detect this
- in the preprocessor, but we can live with this because they're unreleased.
- Maketime probing would be overkill here.
-
- gcc also has a __attribute__((__hot__)) to move hot functions into
- a special section, but I don't see any sense in this right now in
- the kernel context */
-#define __cold __attribute__((__cold__))
-
-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-
-#ifndef __CHECKER__
-# define __compiletime_warning(message) __attribute__((warning(message)))
-# define __compiletime_error(message) __attribute__((error(message)))
-#endif /* __CHECKER__ */
-#endif /* GCC_VERSION >= 40300 */
-
-#if GCC_VERSION >= 40500
-/*
- * Mark a position in code as unreachable. This can be used to
- * suppress control flow warnings after asm blocks that transfer
- * control elsewhere.
- *
- * Early snapshots of gcc 4.5 don't support this and we can't detect
- * this in the preprocessor, but we can live with this because they're
- * unreleased. Really, we need to have autoconf for the kernel.
- */
-#define unreachable() __builtin_unreachable()
-
-/* Mark a function definition as prohibited from being cloned. */
-#define __noclone __attribute__((__noclone__))
-
-#endif /* GCC_VERSION >= 40500 */
-
-#if GCC_VERSION >= 40600
-/*
- * Tell the optimizer that something else uses this function or variable.
- */
-#define __visible __attribute__((externally_visible))
-#endif
-
-/*
- * GCC 'asm goto' miscompiles certain code sequences:
- *
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
- *
- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
- *
- * (asm goto is automatically volatile - the naming reflects this.)
- */
-#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
-
-#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
-#if GCC_VERSION >= 40400
-#define __HAVE_BUILTIN_BSWAP32__
-#define __HAVE_BUILTIN_BSWAP64__
-#endif
-#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
-#define __HAVE_BUILTIN_BSWAP16__
-#endif
-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
deleted file mode 100644
index c8c5659..0000000
--- a/include/linux/compiler-gcc5.h
+++ /dev/null
@@ -1,65 +0,0 @@
-#ifndef __LINUX_COMPILER_H
-#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
-#endif
-
-#define __used __attribute__((__used__))
-#define __must_check __attribute__((warn_unused_result))
-#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
-
-/* Mark functions as cold. gcc will assume any path leading to a call
- to them will be unlikely. This means a lot of manual unlikely()s
- are unnecessary now for any paths leading to the usual suspects
- like BUG(), printk(), panic() etc. [but let's keep them for now for
- older compilers]
-
- Early snapshots of gcc 4.3 don't support this and we can't detect this
- in the preprocessor, but we can live with this because they're unreleased.
- Maketime probing would be overkill here.
-
- gcc also has a __attribute__((__hot__)) to move hot functions into
- a special section, but I don't see any sense in this right now in
- the kernel context */
-#define __cold __attribute__((__cold__))
-
-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-
-#ifndef __CHECKER__
-# define __compiletime_warning(message) __attribute__((warning(message)))
-# define __compiletime_error(message) __attribute__((error(message)))
-#endif /* __CHECKER__ */
-
-/*
- * Mark a position in code as unreachable. This can be used to
- * suppress control flow warnings after asm blocks that transfer
- * control elsewhere.
- *
- * Early snapshots of gcc 4.5 don't support this and we can't detect
- * this in the preprocessor, but we can live with this because they're
- * unreleased. Really, we need to have autoconf for the kernel.
- */
-#define unreachable() __builtin_unreachable()
-
-/* Mark a function definition as prohibited from being cloned. */
-#define __noclone __attribute__((__noclone__))
-
-/*
- * Tell the optimizer that something else uses this function or variable.
- */
-#define __visible __attribute__((externally_visible))
-
-/*
- * GCC 'asm goto' miscompiles certain code sequences:
- *
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
- *
- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
- *
- * (asm goto is automatically volatile - the naming reflects this.)
- */
-#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
-
-#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
-#define __HAVE_BUILTIN_BSWAP32__
-#define __HAVE_BUILTIN_BSWAP64__
-#define __HAVE_BUILTIN_BSWAP16__
-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index ba147a1..d4c7113 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -13,9 +13,14 @@
/* Intel ECC compiler doesn't support gcc specific asm stmts.
* It uses intrinsics to do the equivalent things.
*/
+#undef barrier
+#undef barrier_data
#undef RELOC_HIDE
#undef OPTIMIZER_HIDE_VAR
+#define barrier() __memory_barrier()
+#define barrier_data(ptr) barrier()
+
#define RELOC_HIDE(ptr, off) \
({ unsigned long __ptr; \
__ptr = (unsigned long) (ptr); \
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 33063f8..e08a6ae 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -17,6 +17,7 @@
# define __release(x) __context__(x,-1)
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
# define __percpu __attribute__((noderef, address_space(3)))
+# define __pmem __attribute__((noderef, address_space(5)))
#ifdef CONFIG_SPARSE_RCU_POINTER
# define __rcu __attribute__((noderef, address_space(4)))
#else
@@ -42,6 +43,7 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# define __cond_lock(x,c) (c)
# define __percpu
# define __rcu
+# define __pmem
#endif
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
@@ -54,7 +56,11 @@ extern void __chk_io_ptr(const volatile void __iomem *);
#include <linux/compiler-gcc.h>
#endif
+#ifdef CC_USING_HOTPATCH
+#define notrace __attribute__((hotpatch(0,0)))
+#else
#define notrace __attribute__((no_instrument_function))
+#endif
/* Intel compiler defines __GNUC__. So we will overwrite implementations
* coming from above header files here
@@ -165,6 +171,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define barrier() __memory_barrier()
#endif
+#ifndef barrier_data
+# define barrier_data(ptr) barrier()
+#endif
+
/* Unreachable code */
#ifndef unreachable
# define unreachable() do { } while (1)
@@ -188,29 +198,16 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#include <uapi/linux/types.h>
-static __always_inline void data_access_exceeds_word_size(void)
-#ifdef __compiletime_warning
-__compiletime_warning("data access exceeds word size and won't be atomic")
-#endif
-;
-
-static __always_inline void data_access_exceeds_word_size(void)
-{
-}
-
-static __always_inline void __read_once_size(volatile void *p, void *res, int size)
+static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
{
switch (size) {
case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
-#ifdef CONFIG_64BIT
case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
-#endif
default:
barrier();
__builtin_memcpy((void *)res, (const void *)p, size);
- data_access_exceeds_word_size();
barrier();
}
}
@@ -221,13 +218,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
-#ifdef CONFIG_64BIT
case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
-#endif
default:
barrier();
__builtin_memcpy((void *)p, (const void *)res, size);
- data_access_exceeds_word_size();
barrier();
}
}
@@ -255,10 +249,26 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
*/
#define READ_ONCE(x) \
- ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
+ ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
#define WRITE_ONCE(x, val) \
- ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; })
+ ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+
+/**
+ * READ_ONCE_CTRL - Read a value heading a control dependency
+ * @x: The value to be read, heading the control dependency
+ *
+ * Control dependencies are tricky. See Documentation/memory-barriers.txt
+ * for important information on how to use them. Note that in many cases,
+ * use of smp_load_acquire() will be much simpler. Control dependencies
+ * should be avoided except on the hottest of hotpaths.
+ */
+#define READ_ONCE_CTRL(x) \
+({ \
+ typeof(x) __val = READ_ONCE(x); \
+ smp_read_barrier_depends(); /* Enforce control dependency. */ \
+ __val; \
+})
#endif /* __KERNEL__ */
@@ -385,7 +395,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
/* Is this type a native word size -- useful for atomic operations */
#ifndef __native_word
-# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
+# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
#endif
/* Compile time object size, -1 for unknown */
@@ -447,12 +457,38 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* to make the compiler aware of ordering is to put the two invocations of
* ACCESS_ONCE() in different C statements.
*
- * This macro does absolutely -nothing- to prevent the CPU from reordering,
- * merging, or refetching absolutely anything at any time. Its main intended
- * use is to mediate communication between process-level code and irq/NMI
- * handlers, all running on the same CPU.
+ * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
+ * on a union member will work as long as the size of the member matches the
+ * size of the union and the size is smaller than word size.
+ *
+ * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
+ * between process-level code and irq/NMI handlers, all running on the same CPU,
+ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
+ * mutilate accesses that either do not require ordering or that interact
+ * with an explicit memory barrier or atomic instruction that provides the
+ * required ordering.
+ *
+ * If possible use READ_ONCE()/WRITE_ONCE() instead.
+ */
+#define __ACCESS_ONCE(x) ({ \
+ __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
+ (volatile typeof(x) *)&(x); })
+#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
+
+/**
+ * lockless_dereference() - safely load a pointer for later dereference
+ * @p: The pointer to load
+ *
+ * Similar to rcu_dereference(), but for situations where the pointed-to
+ * object's lifetime is managed by something other than RCU. That
+ * "something other" might be reference counting or simple immortality.
*/
-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+#define lockless_dereference(p) \
+({ \
+ typeof(p) _________p1 = READ_ONCE(p); \
+ smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
+ (_________p1); \
+})
/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
#ifdef CONFIG_KPROBES
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 34025df..63a36e8 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -64,14 +64,14 @@ struct config_item {
struct dentry *ci_dentry;
};
-extern int config_item_set_name(struct config_item *, const char *, ...);
+extern __printf(2, 3)
+int config_item_set_name(struct config_item *, const char *, ...);
static inline char *config_item_name(struct config_item * item)
{
return item->ci_name;
}
-extern void config_item_init(struct config_item *);
extern void config_item_init_type_name(struct config_item *item,
const char *name,
struct config_item_type *type);
diff --git a/include/linux/console.h b/include/linux/console.h
index 7571a16..bd19434 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -115,6 +115,7 @@ static inline int con_debug_leave(void)
#define CON_BOOT (8)
#define CON_ANYTIME (16) /* Safe to call when cpu is offline */
#define CON_BRL (32) /* Used for a braille device */
+#define CON_EXTENDED (64) /* Use the extended output format a la /dev/kmsg */
struct console {
char name[16];
@@ -123,7 +124,7 @@ struct console {
struct tty_driver *(*device)(struct console *, int *);
void (*unblank)(void);
int (*setup)(struct console *, char *);
- int (*early_setup)(void);
+ int (*match)(struct console *, char *name, int idx, char *options);
short flags;
short index;
int cflag;
@@ -141,7 +142,6 @@ extern int console_set_on_cmdline;
extern struct console *early_console;
extern int add_preferred_console(char *name, int idx, char *options);
-extern int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options);
extern void register_console(struct console *);
extern int unregister_console(struct console *);
extern struct console *console_drivers;
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index e859c98..e329ee2 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -104,6 +104,7 @@ struct vc_data {
unsigned int vc_resize_user; /* resize request from user */
unsigned int vc_bell_pitch; /* Console bell pitch */
unsigned int vc_bell_duration; /* Console bell duration */
+ unsigned short vc_cur_blink_ms; /* Cursor blink duration */
struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */
struct uni_pagedir *vc_uni_pagedir;
struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 37b81bd..b96bd29 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -10,10 +10,10 @@
#ifdef CONFIG_CONTEXT_TRACKING
extern void context_tracking_cpu_set(int cpu);
+extern void context_tracking_enter(enum ctx_state state);
+extern void context_tracking_exit(enum ctx_state state);
extern void context_tracking_user_enter(void);
extern void context_tracking_user_exit(void);
-extern void __context_tracking_task_switch(struct task_struct *prev,
- struct task_struct *next);
static inline void user_enter(void)
{
@@ -35,7 +35,8 @@ static inline enum ctx_state exception_enter(void)
return 0;
prev_ctx = this_cpu_read(context_tracking.state);
- context_tracking_user_exit();
+ if (prev_ctx != CONTEXT_KERNEL)
+ context_tracking_exit(prev_ctx);
return prev_ctx;
}
@@ -43,24 +44,16 @@ static inline enum ctx_state exception_enter(void)
static inline void exception_exit(enum ctx_state prev_ctx)
{
if (context_tracking_is_enabled()) {
- if (prev_ctx == IN_USER)
- context_tracking_user_enter();
+ if (prev_ctx != CONTEXT_KERNEL)
+ context_tracking_enter(prev_ctx);
}
}
-static inline void context_tracking_task_switch(struct task_struct *prev,
- struct task_struct *next)
-{
- if (context_tracking_is_enabled())
- __context_tracking_task_switch(prev, next);
-}
#else
static inline void user_enter(void) { }
static inline void user_exit(void) { }
static inline enum ctx_state exception_enter(void) { return 0; }
static inline void exception_exit(enum ctx_state prev_ctx) { }
-static inline void context_tracking_task_switch(struct task_struct *prev,
- struct task_struct *next) { }
#endif /* !CONFIG_CONTEXT_TRACKING */
@@ -78,10 +71,16 @@ static inline void guest_enter(void)
vtime_guest_enter(current);
else
current->flags |= PF_VCPU;
+
+ if (context_tracking_is_enabled())
+ context_tracking_enter(CONTEXT_GUEST);
}
static inline void guest_exit(void)
{
+ if (context_tracking_is_enabled())
+ context_tracking_exit(CONTEXT_GUEST);
+
if (vtime_accounting_enabled())
vtime_guest_exit(current);
else
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index 97a8122..678ecdf 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -12,9 +12,11 @@ struct context_tracking {
* may be further optimized using static keys.
*/
bool active;
+ int recursion;
enum ctx_state {
- IN_KERNEL = 0,
- IN_USER,
+ CONTEXT_KERNEL = 0,
+ CONTEXT_USER,
+ CONTEXT_GUEST,
} state;
};
@@ -34,11 +36,13 @@ static inline bool context_tracking_cpu_is_enabled(void)
static inline bool context_tracking_in_user(void)
{
- return __this_cpu_read(context_tracking.state) == IN_USER;
+ return __this_cpu_read(context_tracking.state) == CONTEXT_USER;
}
#else
static inline bool context_tracking_in_user(void) { return false; }
static inline bool context_tracking_active(void) { return false; }
+static inline bool context_tracking_is_enabled(void) { return false; }
+static inline bool context_tracking_cpu_is_enabled(void) { return false; }
#endif /* CONFIG_CONTEXT_TRACKING */
#endif
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 5d3c543..3486b90 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -179,15 +179,6 @@ struct coresight_device {
#define sink_ops(csdev) csdev->ops->sink_ops
#define link_ops(csdev) csdev->ops->link_ops
-#define CORESIGHT_DEBUGFS_ENTRY(__name, __entry_name, \
- __mode, __get, __set, __fmt) \
-DEFINE_SIMPLE_ATTRIBUTE(__name ## _ops, __get, __set, __fmt); \
-static const struct coresight_ops_entry __name ## _entry = { \
- .name = __entry_name, \
- .mode = __mode, \
- .ops = &__name ## _ops \
-}
-
/**
* struct coresight_ops_sink - basic operations for a sink
* Operations available for sinks
@@ -236,13 +227,8 @@ coresight_register(struct coresight_desc *desc);
extern void coresight_unregister(struct coresight_device *csdev);
extern int coresight_enable(struct coresight_device *csdev);
extern void coresight_disable(struct coresight_device *csdev);
-extern int coresight_is_bit_set(u32 val, int position, int value);
extern int coresight_timeout(void __iomem *addr, u32 offset,
int position, int value);
-#ifdef CONFIG_OF
-extern struct coresight_platform_data *of_get_coresight_platform_data(
- struct device *dev, struct device_node *node);
-#endif
#else
static inline struct coresight_device *
coresight_register(struct coresight_desc *desc) { return NULL; }
@@ -250,14 +236,16 @@ static inline void coresight_unregister(struct coresight_device *csdev) {}
static inline int
coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
static inline void coresight_disable(struct coresight_device *csdev) {}
-static inline int coresight_is_bit_set(u32 val, int position, int value)
- { return 0; }
static inline int coresight_timeout(void __iomem *addr, u32 offset,
int position, int value) { return 1; }
+#endif
+
#ifdef CONFIG_OF
+extern struct coresight_platform_data *of_get_coresight_platform_data(
+ struct device *dev, struct device_node *node);
+#else
static inline struct coresight_platform_data *of_get_coresight_platform_data(
struct device *dev, struct device_node *node) { return NULL; }
#endif
-#endif
#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 4260e85..23c30bd 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -40,9 +40,10 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr);
extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
-extern struct device *cpu_device_create(struct device *parent, void *drvdata,
- const struct attribute_group **groups,
- const char *fmt, ...);
+extern __printf(4, 5)
+struct device *cpu_device_create(struct device *parent, void *drvdata,
+ const struct attribute_group **groups,
+ const char *fmt, ...);
#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu(struct cpu *cpu);
extern ssize_t arch_cpu_probe(const char *, size_t);
@@ -73,6 +74,7 @@ enum {
/* migration should happen before other stuff but after perf */
CPU_PRI_PERF = 20,
CPU_PRI_MIGRATION = 10,
+ CPU_PRI_SMPBOOT = 9,
/* bring up workqueues before normal notifiers and down after */
CPU_PRI_WORKQUEUE_UP = 5,
CPU_PRI_WORKQUEUE_DOWN = -5,
@@ -95,6 +97,10 @@ enum {
* Called on the new cpu, just before
* enabling interrupts. Must not sleep,
* must not fail */
+#define CPU_DYING_IDLE 0x000B /* CPU (unsigned)v dying, reached
+ * idle loop. */
+#define CPU_BROKEN 0x000C /* CPU (unsigned)v did not die properly,
+ * perhaps due to preemption. */
/* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
* operation in progress
@@ -161,6 +167,7 @@ static inline void __unregister_cpu_notifier(struct notifier_block *nb)
}
#endif
+void smpboot_thread_init(void);
int cpu_up(unsigned int cpu);
void notify_cpu_starting(unsigned int cpu);
extern void cpu_maps_update_begin(void);
@@ -208,6 +215,10 @@ static inline void cpu_notifier_register_done(void)
{
}
+static inline void smpboot_thread_init(void)
+{
+}
+
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
@@ -271,4 +282,14 @@ void arch_cpu_idle_enter(void);
void arch_cpu_idle_exit(void);
void arch_cpu_idle_dead(void);
+DECLARE_PER_CPU(bool, cpu_dead_idle);
+
+int cpu_report_state(int cpu);
+int cpu_check_up_prepare(int cpu);
+void cpu_set_state_online(int cpu);
+#ifdef CONFIG_HOTPLUG_CPU
+bool cpu_wait_death(unsigned int cpu, int seconds);
+bool cpu_report_death(void);
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index bd95527..c156f50 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -28,6 +28,9 @@
#include <linux/thermal.h>
#include <linux/cpumask.h>
+typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
+ unsigned long voltage, u32 *power);
+
#ifdef CONFIG_CPU_THERMAL
/**
* cpufreq_cooling_register - function to create cpufreq cooling device.
@@ -36,6 +39,10 @@
struct thermal_cooling_device *
cpufreq_cooling_register(const struct cpumask *clip_cpus);
+struct thermal_cooling_device *
+cpufreq_power_cooling_register(const struct cpumask *clip_cpus,
+ u32 capacitance, get_static_t plat_static_func);
+
/**
* of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
* @np: a valid struct device_node to the cooling device device tree node.
@@ -45,6 +52,12 @@ cpufreq_cooling_register(const struct cpumask *clip_cpus);
struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus);
+
+struct thermal_cooling_device *
+of_cpufreq_power_cooling_register(struct device_node *np,
+ const struct cpumask *clip_cpus,
+ u32 capacitance,
+ get_static_t plat_static_func);
#else
static inline struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
@@ -52,6 +65,15 @@ of_cpufreq_cooling_register(struct device_node *np,
{
return ERR_PTR(-ENOSYS);
}
+
+static inline struct thermal_cooling_device *
+of_cpufreq_power_cooling_register(struct device_node *np,
+ const struct cpumask *clip_cpus,
+ u32 capacitance,
+ get_static_t plat_static_func)
+{
+ return NULL;
+}
#endif
/**
@@ -68,11 +90,28 @@ cpufreq_cooling_register(const struct cpumask *clip_cpus)
return ERR_PTR(-ENOSYS);
}
static inline struct thermal_cooling_device *
+cpufreq_power_cooling_register(const struct cpumask *clip_cpus,
+ u32 capacitance, get_static_t plat_static_func)
+{
+ return NULL;
+}
+
+static inline struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus)
{
return ERR_PTR(-ENOSYS);
}
+
+static inline struct thermal_cooling_device *
+of_cpufreq_power_cooling_register(struct device_node *np,
+ const struct cpumask *clip_cpus,
+ u32 capacitance,
+ get_static_t plat_static_func)
+{
+ return NULL;
+}
+
static inline
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 4d078ce..29ad97c 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -65,9 +65,9 @@ struct cpufreq_policy {
unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
should set cpufreq */
- unsigned int cpu; /* cpu nr of CPU managing this policy */
- unsigned int last_cpu; /* cpu nr of previous CPU that managed
- * this policy */
+ unsigned int cpu; /* cpu managing this policy, must be online */
+ unsigned int kobj_cpu; /* cpu managing sysfs files, can be offline */
+
struct clk *clk;
struct cpufreq_cpuinfo cpuinfo;/* see above */
@@ -82,6 +82,7 @@ struct cpufreq_policy {
struct cpufreq_governor *governor; /* see below */
void *governor_data;
bool governor_enabled; /* governor start/stop flag */
+ char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */
struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */
@@ -113,6 +114,9 @@ struct cpufreq_policy {
wait_queue_head_t transition_wait;
struct task_struct *transition_task; /* Task which is doing the transition */
+ /* cpufreq-stats */
+ struct cpufreq_stats *stats;
+
/* For cpufreq driver's internal use */
void *driver_data;
};
@@ -367,9 +371,8 @@ static inline void cpufreq_resume(void) {}
#define CPUFREQ_INCOMPATIBLE (1)
#define CPUFREQ_NOTIFY (2)
#define CPUFREQ_START (3)
-#define CPUFREQ_UPDATE_POLICY_CPU (4)
-#define CPUFREQ_CREATE_POLICY (5)
-#define CPUFREQ_REMOVE_POLICY (6)
+#define CPUFREQ_CREATE_POLICY (4)
+#define CPUFREQ_REMOVE_POLICY (5)
#ifdef CONFIG_CPU_FREQ
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index ab70f3b..d075d34 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -50,6 +50,15 @@ struct cpuidle_state {
int index);
int (*enter_dead) (struct cpuidle_device *dev, int index);
+
+ /*
+ * CPUs execute ->enter_freeze with the local tick or entire timekeeping
+ * suspended, so it must not re-enable interrupts at any point (even
+ * temporarily) or attempt to change states of clock event devices.
+ */
+ void (*enter_freeze) (struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index);
};
/* Idle State Flags */
@@ -68,7 +77,6 @@ struct cpuidle_device {
unsigned int cpu;
int last_residency;
- int state_count;
struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
struct cpuidle_driver_kobj *kobj_driver;
@@ -117,6 +125,8 @@ struct cpuidle_driver {
#ifdef CONFIG_CPU_IDLE
extern void disable_cpuidle(void);
+extern bool cpuidle_not_available(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev);
extern int cpuidle_select(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
@@ -141,11 +151,13 @@ extern void cpuidle_resume(void);
extern int cpuidle_enable_device(struct cpuidle_device *dev);
extern void cpuidle_disable_device(struct cpuidle_device *dev);
extern int cpuidle_play_dead(void);
-extern void cpuidle_use_deepest_state(bool enable);
extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
#else
static inline void disable_cpuidle(void) { }
+static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
+{return true; }
static inline int cpuidle_select(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
@@ -174,11 +186,28 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev)
{return -ENODEV; }
static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
static inline int cpuidle_play_dead(void) {return -ENODEV; }
-static inline void cpuidle_use_deepest_state(bool enable) {}
static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
struct cpuidle_device *dev) {return NULL; }
#endif
+#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
+extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev);
+extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev);
+#else
+static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
+{return -ENODEV; }
+static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
+{return -ENODEV; }
+#endif
+
+/* kernel/sched/idle.c */
+extern void sched_idle_set_state(struct cpuidle_state *idle_state);
+extern void default_idle_call(void);
+
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
#else
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index b950e9d..59915ea 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -11,6 +11,7 @@
#include <linux/bitmap.h>
#include <linux/bug.h>
+/* Don't assign or return these: may not be this big! */
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
/**
@@ -22,6 +23,14 @@ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
*/
#define cpumask_bits(maskp) ((maskp)->bits)
+/**
+ * cpumask_pr_args - printf args to output a cpumask
+ * @maskp: cpumask to be printed
+ *
+ * Can be used to provide arguments for '%*pb[l]' when printing a cpumask.
+ */
+#define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
+
#if NR_CPUS == 1
#define nr_cpu_ids 1
#else
@@ -142,10 +151,8 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask,
return 1;
}
-static inline int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
+static inline unsigned int cpumask_local_spread(unsigned int i, int node)
{
- set_bit(0, cpumask_bits(dstp));
-
return 0;
}
@@ -199,7 +206,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
-int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
+unsigned int cpumask_local_spread(unsigned int i, int node);
/**
* for_each_cpu - iterate over every cpu in a mask
@@ -281,11 +288,11 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
* @cpumask: the cpumask pointer
*
* Returns 1 if @cpu is set in @cpumask, else returns 0
- *
- * No static inline type checking - see Subtlety (1) above.
*/
-#define cpumask_test_cpu(cpu, cpumask) \
- test_bit(cpumask_check(cpu), cpumask_bits((cpumask)))
+static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
+{
+ return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
+}
/**
* cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
@@ -539,21 +546,6 @@ static inline void cpumask_copy(struct cpumask *dstp,
#define cpumask_of(cpu) (get_cpu_mask(cpu))
/**
- * cpumask_scnprintf - print a cpumask into a string as comma-separated hex
- * @buf: the buffer to sprintf into
- * @len: the length of the buffer
- * @srcp: the cpumask to print
- *
- * If len is zero, returns zero. Otherwise returns the length of the
- * (nul-terminated) @buf string.
- */
-static inline int cpumask_scnprintf(char *buf, int len,
- const struct cpumask *srcp)
-{
- return bitmap_scnprintf(buf, len, cpumask_bits(srcp), nr_cpumask_bits);
-}
-
-/**
* cpumask_parse_user - extract a cpumask from a user string
* @buf: the buffer to extract from
* @len: the length of the buffer
@@ -564,7 +556,7 @@ static inline int cpumask_scnprintf(char *buf, int len,
static inline int cpumask_parse_user(const char __user *buf, int len,
struct cpumask *dstp)
{
- return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
+ return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids);
}
/**
@@ -579,23 +571,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
struct cpumask *dstp)
{
return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
- nr_cpumask_bits);
-}
-
-/**
- * cpulist_scnprintf - print a cpumask into a string as comma-separated list
- * @buf: the buffer to sprintf into
- * @len: the length of the buffer
- * @srcp: the cpumask to print
- *
- * If len is zero, returns zero. Otherwise returns the length of the
- * (nul-terminated) @buf string.
- */
-static inline int cpulist_scnprintf(char *buf, int len,
- const struct cpumask *srcp)
-{
- return bitmap_scnlistprintf(buf, len, cpumask_bits(srcp),
- nr_cpumask_bits);
+ nr_cpu_ids);
}
/**
@@ -610,7 +586,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
char *nl = strchr(buf, '\n');
unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
- return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
+ return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids);
}
/**
@@ -622,7 +598,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
*/
static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
{
- return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
+ return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids);
}
/**
@@ -632,9 +608,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
*/
static inline size_t cpumask_size(void)
{
- /* FIXME: Once all cpumask assignments are eliminated, this
- * can be nr_cpumask_bits */
- return BITS_TO_LONGS(NR_CPUS) * sizeof(long);
+ return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
}
/*
@@ -791,7 +765,7 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
#if NR_CPUS <= BITS_PER_LONG
#define CPU_BITS_ALL \
{ \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+ [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
}
#else /* NR_CPUS > BITS_PER_LONG */
@@ -799,7 +773,7 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
#define CPU_BITS_ALL \
{ \
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+ [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
}
#endif /* NR_CPUS > BITS_PER_LONG */
@@ -817,35 +791,21 @@ static inline ssize_t
cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
{
return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
- nr_cpumask_bits);
+ nr_cpu_ids);
}
-/*
- *
- * From here down, all obsolete. Use cpumask_ variants!
- *
- */
-#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
-
-#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
-
#if NR_CPUS <= BITS_PER_LONG
-
#define CPU_MASK_ALL \
(cpumask_t) { { \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+ [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
} }
-
#else
-
#define CPU_MASK_ALL \
(cpumask_t) { { \
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
- [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+ [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
} }
-
-#endif
+#endif /* NR_CPUS > BITS_PER_LONG */
#define CPU_MASK_NONE \
(cpumask_t) { { \
@@ -857,143 +817,4 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
[0] = 1UL \
} }
-#if NR_CPUS == 1
-#define first_cpu(src) ({ (void)(src); 0; })
-#define next_cpu(n, src) ({ (void)(src); 1; })
-#define any_online_cpu(mask) 0
-#define for_each_cpu_mask(cpu, mask) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
-#else /* NR_CPUS > 1 */
-int __first_cpu(const cpumask_t *srcp);
-int __next_cpu(int n, const cpumask_t *srcp);
-
-#define first_cpu(src) __first_cpu(&(src))
-#define next_cpu(n, src) __next_cpu((n), &(src))
-#define any_online_cpu(mask) cpumask_any_and(&mask, cpu_online_mask)
-#define for_each_cpu_mask(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = next_cpu((cpu), (mask)), \
- (cpu) < NR_CPUS; )
-#endif /* SMP */
-
-#if NR_CPUS <= 64
-
-#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask)
-
-#else /* NR_CPUS > 64 */
-
-int __next_cpu_nr(int n, const cpumask_t *srcp);
-#define for_each_cpu_mask_nr(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = __next_cpu_nr((cpu), &(mask)), \
- (cpu) < nr_cpu_ids; )
-
-#endif /* NR_CPUS > 64 */
-
-#define cpus_addr(src) ((src).bits)
-
-#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
-static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
-{
- set_bit(cpu, dstp->bits);
-}
-
-#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
-static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
-{
- clear_bit(cpu, dstp->bits);
-}
-
-#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
-static inline void __cpus_setall(cpumask_t *dstp, int nbits)
-{
- bitmap_fill(dstp->bits, nbits);
-}
-
-#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
-static inline void __cpus_clear(cpumask_t *dstp, int nbits)
-{
- bitmap_zero(dstp->bits, nbits);
-}
-
-/* No static inline type checking - see Subtlety (1) above. */
-#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
-
-#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
-static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
-{
- return test_and_set_bit(cpu, addr->bits);
-}
-
-#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
-static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_andnot(dst, src1, src2) \
- __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
-static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_equal(const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- return bitmap_equal(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_intersects(const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- return bitmap_intersects(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
-static inline int __cpus_subset(const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
-{
- return bitmap_subset(src1p->bits, src2p->bits, nbits);
-}
-
-#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
-static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
-{
- return bitmap_empty(srcp->bits, nbits);
-}
-
-#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
-static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
-{
- return bitmap_weight(srcp->bits, nbits);
-}
-
-#define cpus_shift_left(dst, src, n) \
- __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
-static inline void __cpus_shift_left(cpumask_t *dstp,
- const cpumask_t *srcp, int n, int nbits)
-{
- bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
-}
-#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
-
#endif /* __LINUX_CPUMASK_H */
diff --git a/include/linux/crc-itu-t.h b/include/linux/crc-itu-t.h
index 84920f3..a9953c7 100644
--- a/include/linux/crc-itu-t.h
+++ b/include/linux/crc-itu-t.h
@@ -3,7 +3,7 @@
*
* Implements the standard CRC ITU-T V.41:
* Width 16
- * Poly 0x0x1021 (x^16 + x^12 + x^15 + 1)
+ * Poly 0x1021 (x^16 + x^12 + x^15 + 1)
* Init 0
*
* This source code is licensed under the GNU General Public License,
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h
index cf53d07..d81961e 100644
--- a/include/linux/crc-t10dif.h
+++ b/include/linux/crc-t10dif.h
@@ -9,5 +9,6 @@
extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer,
size_t len);
extern __u16 crc_t10dif(unsigned char const *, size_t);
+extern __u16 crc_t10dif_update(__u16 crc, unsigned char const *, size_t);
#endif
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 2fb2ca2..8b6c083 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -62,9 +62,27 @@ do { \
groups_free(group_info); \
} while (0)
-extern struct group_info *groups_alloc(int);
extern struct group_info init_groups;
+#ifdef CONFIG_MULTIUSER
+extern struct group_info *groups_alloc(int);
extern void groups_free(struct group_info *);
+
+extern int in_group_p(kgid_t);
+extern int in_egroup_p(kgid_t);
+#else
+static inline void groups_free(struct group_info *group_info)
+{
+}
+
+static inline int in_group_p(kgid_t grp)
+{
+ return 1;
+}
+static inline int in_egroup_p(kgid_t grp)
+{
+ return 1;
+}
+#endif
extern int set_current_groups(struct group_info *);
extern void set_groups(struct cred *, struct group_info *);
extern int groups_search(const struct group_info *, kgid_t);
@@ -74,9 +92,6 @@ extern bool may_setgroups(void);
#define GROUP_AT(gi, i) \
((gi)->blocks[(i) / NGROUPS_PER_BLOCK][(i) % NGROUPS_PER_BLOCK])
-extern int in_group_p(kgid_t);
-extern int in_egroup_p(kgid_t);
-
/*
* The security context of a task
*
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h
index 4fad5f8..48b4930 100644
--- a/include/linux/crush/crush.h
+++ b/include/linux/crush/crush.h
@@ -1,7 +1,11 @@
#ifndef CEPH_CRUSH_CRUSH_H
#define CEPH_CRUSH_CRUSH_H
-#include <linux/types.h>
+#ifdef __KERNEL__
+# include <linux/types.h>
+#else
+# include "crush_compat.h"
+#endif
/*
* CRUSH is a pseudo-random data distribution algorithm that
@@ -20,7 +24,11 @@
#define CRUSH_MAGIC 0x00010000ul /* for detecting algorithm revisions */
#define CRUSH_MAX_DEPTH 10 /* max crush hierarchy depth */
+#define CRUSH_MAX_RULESET (1<<8) /* max crush ruleset number */
+#define CRUSH_MAX_RULES CRUSH_MAX_RULESET /* should be the same as max rulesets */
+#define CRUSH_MAX_DEVICE_WEIGHT (100u * 0x10000u)
+#define CRUSH_MAX_BUCKET_WEIGHT (65535u * 0x10000u)
#define CRUSH_ITEM_UNDEF 0x7ffffffe /* undefined result (internal use only) */
#define CRUSH_ITEM_NONE 0x7fffffff /* no result */
@@ -96,16 +104,27 @@ struct crush_rule {
* uniform O(1) poor poor
* list O(n) optimal poor
* tree O(log n) good good
- * straw O(n) optimal optimal
+ * straw O(n) better better
+ * straw2 O(n) optimal optimal
*/
enum {
CRUSH_BUCKET_UNIFORM = 1,
CRUSH_BUCKET_LIST = 2,
CRUSH_BUCKET_TREE = 3,
- CRUSH_BUCKET_STRAW = 4
+ CRUSH_BUCKET_STRAW = 4,
+ CRUSH_BUCKET_STRAW2 = 5,
};
extern const char *crush_bucket_alg_name(int alg);
+/*
+ * although tree was a legacy algorithm, it has been buggy, so
+ * exclude it.
+ */
+#define CRUSH_LEGACY_ALLOWED_BUCKET_ALGS ( \
+ (1 << CRUSH_BUCKET_UNIFORM) | \
+ (1 << CRUSH_BUCKET_LIST) | \
+ (1 << CRUSH_BUCKET_STRAW))
+
struct crush_bucket {
__s32 id; /* this'll be negative */
__u16 type; /* non-zero; type=0 is reserved for devices */
@@ -149,6 +168,11 @@ struct crush_bucket_straw {
__u32 *straws; /* 16-bit fixed point */
};
+struct crush_bucket_straw2 {
+ struct crush_bucket h;
+ __u32 *item_weights; /* 16-bit fixed point */
+};
+
/*
@@ -167,7 +191,7 @@ struct crush_map {
/* choose local attempts using a fallback permutation before
* re-descent */
__u32 choose_local_fallback_tries;
- /* choose attempts before giving up */
+ /* choose attempts before giving up */
__u32 choose_total_tries;
/* attempt chooseleaf inner descent once for firstn mode; on
* reject retry outer descent. Note that this does *not*
@@ -180,6 +204,25 @@ struct crush_map {
* that want to limit reshuffling, a value of 3 or 4 will make the
* mappings line up a bit better with previous mappings. */
__u8 chooseleaf_vary_r;
+
+#ifndef __KERNEL__
+ /*
+ * version 0 (original) of straw_calc has various flaws. version 1
+ * fixes a few of them.
+ */
+ __u8 straw_calc_version;
+
+ /*
+ * allowed bucket algs is a bitmask, here the bit positions
+ * are CRUSH_BUCKET_*. note that these are *bits* and
+ * CRUSH_BUCKET_* values are not, so we need to or together (1
+ * << CRUSH_BUCKET_WHATEVER). The 0th bit is not used to
+ * minimize confusion (bucket type values start at 1).
+ */
+ __u32 allowed_bucket_algs;
+
+ __u32 *choose_tries;
+#endif
};
@@ -189,6 +232,7 @@ extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b);
extern void crush_destroy_bucket_list(struct crush_bucket_list *b);
extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b);
extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b);
+extern void crush_destroy_bucket_straw2(struct crush_bucket_straw2 *b);
extern void crush_destroy_bucket(struct crush_bucket *b);
extern void crush_destroy_rule(struct crush_rule *r);
extern void crush_destroy(struct crush_map *map);
diff --git a/include/linux/crush/hash.h b/include/linux/crush/hash.h
index 91e8842..d1d9025 100644
--- a/include/linux/crush/hash.h
+++ b/include/linux/crush/hash.h
@@ -1,6 +1,12 @@
#ifndef CEPH_CRUSH_HASH_H
#define CEPH_CRUSH_HASH_H
+#ifdef __KERNEL__
+# include <linux/types.h>
+#else
+# include "crush_compat.h"
+#endif
+
#define CRUSH_HASH_RJENKINS1 0
#define CRUSH_HASH_DEFAULT CRUSH_HASH_RJENKINS1
diff --git a/include/linux/crush/mapper.h b/include/linux/crush/mapper.h
index eab3674..5dfd5b1 100644
--- a/include/linux/crush/mapper.h
+++ b/include/linux/crush/mapper.h
@@ -8,7 +8,7 @@
* LGPL2
*/
-#include <linux/crush/crush.h>
+#include "crush.h"
extern int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size);
extern int crush_do_rule(const struct crush_map *map,
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 9c8776d..81ef938 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -53,6 +53,7 @@
#define CRYPTO_ALG_TYPE_SHASH 0x00000009
#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
+#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
#define CRYPTO_ALG_TYPE_PCOMPRESS 0x0000000f
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
@@ -95,6 +96,18 @@
#define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000
/*
+ * Mark a cipher as a service implementation only usable by another
+ * cipher and never by a normal user of the kernel crypto API
+ */
+#define CRYPTO_ALG_INTERNAL 0x00002000
+
+/*
+ * Temporary flag used to prevent legacy AEAD implementations from
+ * being used by user-space.
+ */
+#define CRYPTO_ALG_AEAD_NEW 0x00004000
+
+/*
* Transform masks and values (for crt_flags).
*/
#define CRYPTO_TFM_REQ_MASK 0x000fff00
@@ -132,9 +145,9 @@ struct crypto_async_request;
struct crypto_aead;
struct crypto_blkcipher;
struct crypto_hash;
-struct crypto_rng;
struct crypto_tfm;
struct crypto_type;
+struct aead_request;
struct aead_givcrypt_request;
struct skcipher_givcrypt_request;
@@ -169,32 +182,6 @@ struct ablkcipher_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
-/**
- * struct aead_request - AEAD request
- * @base: Common attributes for async crypto requests
- * @assoclen: Length in bytes of associated data for authentication
- * @cryptlen: Length of data to be encrypted or decrypted
- * @iv: Initialisation vector
- * @assoc: Associated data
- * @src: Source data
- * @dst: Destination data
- * @__ctx: Start of private context data
- */
-struct aead_request {
- struct crypto_async_request base;
-
- unsigned int assoclen;
- unsigned int cryptlen;
-
- u8 *iv;
-
- struct scatterlist *assoc;
- struct scatterlist *src;
- struct scatterlist *dst;
-
- void *__ctx[] CRYPTO_MINALIGN_ATTR;
-};
-
struct blkcipher_desc {
struct crypto_blkcipher *tfm;
void *info;
@@ -288,7 +275,7 @@ struct ablkcipher_alg {
};
/**
- * struct aead_alg - AEAD cipher definition
+ * struct old_aead_alg - AEAD cipher definition
* @maxauthsize: Set the maximum authentication tag size supported by the
* transformation. A transformation may support smaller tag sizes.
* As the authentication tag is a message digest to ensure the
@@ -313,7 +300,7 @@ struct ablkcipher_alg {
* All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
* mandatory and must be filled.
*/
-struct aead_alg {
+struct old_aead_alg {
int (*setkey)(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen);
int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
@@ -420,40 +407,12 @@ struct compress_alg {
unsigned int slen, u8 *dst, unsigned int *dlen);
};
-/**
- * struct rng_alg - random number generator definition
- * @rng_make_random: The function defined by this variable obtains a random
- * number. The random number generator transform must generate
- * the random number out of the context provided with this
- * call.
- * @rng_reset: Reset of the random number generator by clearing the entire state.
- * With the invocation of this function call, the random number
- * generator shall completely reinitialize its state. If the random
- * number generator requires a seed for setting up a new state,
- * the seed must be provided by the consumer while invoking this
- * function. The required size of the seed is defined with
- * @seedsize .
- * @seedsize: The seed size required for a random number generator
- * initialization defined with this variable. Some random number
- * generators like the SP800-90A DRBG does not require a seed as the
- * seeding is implemented internally without the need of support by
- * the consumer. In this case, the seed size is set to zero.
- */
-struct rng_alg {
- int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata,
- unsigned int dlen);
- int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
-
- unsigned int seedsize;
-};
-
#define cra_ablkcipher cra_u.ablkcipher
#define cra_aead cra_u.aead
#define cra_blkcipher cra_u.blkcipher
#define cra_cipher cra_u.cipher
#define cra_compress cra_u.compress
-#define cra_rng cra_u.rng
/**
* struct crypto_alg - definition of a cryptograpic cipher algorithm
@@ -499,7 +458,7 @@ struct rng_alg {
* transformation algorithm.
* @cra_type: Type of the cryptographic transformation. This is a pointer to
* struct crypto_type, which implements callbacks common for all
- * trasnformation types. There are multiple options:
+ * transformation types. There are multiple options:
* &crypto_blkcipher_type, &crypto_ablkcipher_type,
* &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type.
* This field might be empty. In that case, there are no common
@@ -549,11 +508,10 @@ struct crypto_alg {
union {
struct ablkcipher_alg ablkcipher;
- struct aead_alg aead;
+ struct old_aead_alg aead;
struct blkcipher_alg blkcipher;
struct cipher_alg cipher;
struct compress_alg compress;
- struct rng_alg rng;
} cra_u;
int (*cra_init)(struct crypto_tfm *tfm);
@@ -561,7 +519,7 @@ struct crypto_alg {
void (*cra_destroy)(struct crypto_alg *alg);
struct module *cra_module;
-};
+} CRYPTO_MINALIGN_ATTR;
/*
* Algorithm registration interface.
@@ -596,21 +554,6 @@ struct ablkcipher_tfm {
unsigned int reqsize;
};
-struct aead_tfm {
- int (*setkey)(struct crypto_aead *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct aead_request *req);
- int (*decrypt)(struct aead_request *req);
- int (*givencrypt)(struct aead_givcrypt_request *req);
- int (*givdecrypt)(struct aead_givcrypt_request *req);
-
- struct crypto_aead *base;
-
- unsigned int ivsize;
- unsigned int authsize;
- unsigned int reqsize;
-};
-
struct blkcipher_tfm {
void *iv;
int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
@@ -649,19 +592,11 @@ struct compress_tfm {
u8 *dst, unsigned int *dlen);
};
-struct rng_tfm {
- int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
- unsigned int dlen);
- int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
-};
-
#define crt_ablkcipher crt_u.ablkcipher
-#define crt_aead crt_u.aead
#define crt_blkcipher crt_u.blkcipher
#define crt_cipher crt_u.cipher
#define crt_hash crt_u.hash
#define crt_compress crt_u.compress
-#define crt_rng crt_u.rng
struct crypto_tfm {
@@ -669,12 +604,10 @@ struct crypto_tfm {
union {
struct ablkcipher_tfm ablkcipher;
- struct aead_tfm aead;
struct blkcipher_tfm blkcipher;
struct cipher_tfm cipher;
struct hash_tfm hash;
struct compress_tfm compress;
- struct rng_tfm rng;
} crt_u;
void (*exit)(struct crypto_tfm *tfm);
@@ -688,10 +621,6 @@ struct crypto_ablkcipher {
struct crypto_tfm base;
};
-struct crypto_aead {
- struct crypto_tfm base;
-};
-
struct crypto_blkcipher {
struct crypto_tfm base;
};
@@ -708,10 +637,6 @@ struct crypto_hash {
struct crypto_tfm base;
};
-struct crypto_rng {
- struct crypto_tfm base;
-};
-
enum {
CRYPTOA_UNSPEC,
CRYPTOA_ALG,
@@ -1147,7 +1072,7 @@ static inline void ablkcipher_request_free(struct ablkcipher_request *req)
* cipher operation completes.
*
* The callback function is registered with the ablkcipher_request handle and
- * must comply with the following template:
+ * must comply with the following template
*
* void callback_function(struct crypto_async_request *req, int error)
*/
@@ -1174,7 +1099,7 @@ static inline void ablkcipher_request_set_callback(
*
* For encryption, the source is treated as the plaintext and the
* destination is the ciphertext. For a decryption operation, the use is
- * reversed: the source is the ciphertext and the destination is the plaintext.
+ * reversed - the source is the ciphertext and the destination is the plaintext.
*/
static inline void ablkcipher_request_set_crypt(
struct ablkcipher_request *req,
@@ -1188,397 +1113,6 @@ static inline void ablkcipher_request_set_crypt(
}
/**
- * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
- *
- * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD
- * (listed as type "aead" in /proc/crypto)
- *
- * The most prominent examples for this type of encryption is GCM and CCM.
- * However, the kernel supports other types of AEAD ciphers which are defined
- * with the following cipher string:
- *
- * authenc(keyed message digest, block cipher)
- *
- * For example: authenc(hmac(sha256), cbc(aes))
- *
- * The example code provided for the asynchronous block cipher operation
- * applies here as well. Naturally all *ablkcipher* symbols must be exchanged
- * the *aead* pendants discussed in the following. In addtion, for the AEAD
- * operation, the aead_request_set_assoc function must be used to set the
- * pointer to the associated data memory location before performing the
- * encryption or decryption operation. In case of an encryption, the associated
- * data memory is filled during the encryption operation. For decryption, the
- * associated data memory must contain data that is used to verify the integrity
- * of the decrypted data. Another deviation from the asynchronous block cipher
- * operation is that the caller should explicitly check for -EBADMSG of the
- * crypto_aead_decrypt. That error indicates an authentication error, i.e.
- * a breach in the integrity of the message. In essence, that -EBADMSG error
- * code is the key bonus an AEAD cipher has over "standard" block chaining
- * modes.
- */
-
-static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
-{
- return (struct crypto_aead *)tfm;
-}
-
-/**
- * crypto_alloc_aead() - allocate AEAD cipher handle
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * AEAD cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Allocate a cipher handle for an AEAD. The returned struct
- * crypto_aead is the cipher handle that is required for any subsequent
- * API invocation for that AEAD.
- *
- * Return: allocated cipher handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
- */
-struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
-
-static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
-{
- return &tfm->base;
-}
-
-/**
- * crypto_free_aead() - zeroize and free aead handle
- * @tfm: cipher handle to be freed
- */
-static inline void crypto_free_aead(struct crypto_aead *tfm)
-{
- crypto_free_tfm(crypto_aead_tfm(tfm));
-}
-
-static inline struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm)
-{
- return &crypto_aead_tfm(tfm)->crt_aead;
-}
-
-/**
- * crypto_aead_ivsize() - obtain IV size
- * @tfm: cipher handle
- *
- * The size of the IV for the aead referenced by the cipher handle is
- * returned. This IV size may be zero if the cipher does not need an IV.
- *
- * Return: IV size in bytes
- */
-static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
-{
- return crypto_aead_crt(tfm)->ivsize;
-}
-
-/**
- * crypto_aead_authsize() - obtain maximum authentication data size
- * @tfm: cipher handle
- *
- * The maximum size of the authentication data for the AEAD cipher referenced
- * by the AEAD cipher handle is returned. The authentication data size may be
- * zero if the cipher implements a hard-coded maximum.
- *
- * The authentication data may also be known as "tag value".
- *
- * Return: authentication data size / tag size in bytes
- */
-static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
-{
- return crypto_aead_crt(tfm)->authsize;
-}
-
-/**
- * crypto_aead_blocksize() - obtain block size of cipher
- * @tfm: cipher handle
- *
- * The block size for the AEAD referenced with the cipher handle is returned.
- * The caller may use that information to allocate appropriate memory for the
- * data returned by the encryption or decryption operation
- *
- * Return: block size of cipher
- */
-static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
-{
- return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
-}
-
-static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm));
-}
-
-static inline u32 crypto_aead_get_flags(struct crypto_aead *tfm)
-{
- return crypto_tfm_get_flags(crypto_aead_tfm(tfm));
-}
-
-static inline void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags)
-{
- crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags);
-}
-
-static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
-{
- crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
-}
-
-/**
- * crypto_aead_setkey() - set key for cipher
- * @tfm: cipher handle
- * @key: buffer holding the key
- * @keylen: length of the key in bytes
- *
- * The caller provided key is set for the AEAD referenced by the cipher
- * handle.
- *
- * Note, the key length determines the cipher type. Many block ciphers implement
- * different cipher modes depending on the key size, such as AES-128 vs AES-192
- * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
- * is performed.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct aead_tfm *crt = crypto_aead_crt(tfm);
-
- return crt->setkey(crt->base, key, keylen);
-}
-
-/**
- * crypto_aead_setauthsize() - set authentication data size
- * @tfm: cipher handle
- * @authsize: size of the authentication data / tag in bytes
- *
- * Set the authentication data size / tag size. AEAD requires an authentication
- * tag (or MAC) in addition to the associated data.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
-
-static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
-{
- return __crypto_aead_cast(req->base.tfm);
-}
-
-/**
- * crypto_aead_encrypt() - encrypt plaintext
- * @req: reference to the aead_request handle that holds all information
- * needed to perform the cipher operation
- *
- * Encrypt plaintext data using the aead_request handle. That data structure
- * and how it is filled with data is discussed with the aead_request_*
- * functions.
- *
- * IMPORTANT NOTE The encryption operation creates the authentication data /
- * tag. That data is concatenated with the created ciphertext.
- * The ciphertext memory size is therefore the given number of
- * block cipher blocks + the size defined by the
- * crypto_aead_setauthsize invocation. The caller must ensure
- * that sufficient memory is available for the ciphertext and
- * the authentication tag.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_aead_encrypt(struct aead_request *req)
-{
- return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req);
-}
-
-/**
- * crypto_aead_decrypt() - decrypt ciphertext
- * @req: reference to the ablkcipher_request handle that holds all information
- * needed to perform the cipher operation
- *
- * Decrypt ciphertext data using the aead_request handle. That data structure
- * and how it is filled with data is discussed with the aead_request_*
- * functions.
- *
- * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the
- * authentication data / tag. That authentication data / tag
- * must have the size defined by the crypto_aead_setauthsize
- * invocation.
- *
- *
- * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD
- * cipher operation performs the authentication of the data during the
- * decryption operation. Therefore, the function returns this error if
- * the authentication of the ciphertext was unsuccessful (i.e. the
- * integrity of the ciphertext or the associated data was violated);
- * < 0 if an error occurred.
- */
-static inline int crypto_aead_decrypt(struct aead_request *req)
-{
- return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req);
-}
-
-/**
- * DOC: Asynchronous AEAD Request Handle
- *
- * The aead_request data structure contains all pointers to data required for
- * the AEAD cipher operation. This includes the cipher handle (which can be
- * used by multiple aead_request instances), pointer to plaintext and
- * ciphertext, asynchronous callback function, etc. It acts as a handle to the
- * aead_request_* API calls in a similar way as AEAD handle to the
- * crypto_aead_* API calls.
- */
-
-/**
- * crypto_aead_reqsize() - obtain size of the request data structure
- * @tfm: cipher handle
- *
- * Return: number of bytes
- */
-static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
-{
- return crypto_aead_crt(tfm)->reqsize;
-}
-
-/**
- * aead_request_set_tfm() - update cipher handle reference in request
- * @req: request handle to be modified
- * @tfm: cipher handle that shall be added to the request handle
- *
- * Allow the caller to replace the existing aead handle in the request
- * data structure with a different one.
- */
-static inline void aead_request_set_tfm(struct aead_request *req,
- struct crypto_aead *tfm)
-{
- req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base);
-}
-
-/**
- * aead_request_alloc() - allocate request data structure
- * @tfm: cipher handle to be registered with the request
- * @gfp: memory allocation flag that is handed to kmalloc by the API call.
- *
- * Allocate the request data structure that must be used with the AEAD
- * encrypt and decrypt API calls. During the allocation, the provided aead
- * handle is registered in the request data structure.
- *
- * Return: allocated request handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
- */
-static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
- gfp_t gfp)
-{
- struct aead_request *req;
-
- req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp);
-
- if (likely(req))
- aead_request_set_tfm(req, tfm);
-
- return req;
-}
-
-/**
- * aead_request_free() - zeroize and free request data structure
- * @req: request data structure cipher handle to be freed
- */
-static inline void aead_request_free(struct aead_request *req)
-{
- kzfree(req);
-}
-
-/**
- * aead_request_set_callback() - set asynchronous callback function
- * @req: request handle
- * @flags: specify zero or an ORing of the flags
- * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
- * increase the wait queue beyond the initial maximum size;
- * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
- * @compl: callback function pointer to be registered with the request handle
- * @data: The data pointer refers to memory that is not used by the kernel
- * crypto API, but provided to the callback function for it to use. Here,
- * the caller can provide a reference to memory the callback function can
- * operate on. As the callback function is invoked asynchronously to the
- * related functionality, it may need to access data structures of the
- * related functionality which can be referenced using this pointer. The
- * callback function can access the memory via the "data" field in the
- * crypto_async_request data structure provided to the callback function.
- *
- * Setting the callback function that is triggered once the cipher operation
- * completes
- *
- * The callback function is registered with the aead_request handle and
- * must comply with the following template:
- *
- * void callback_function(struct crypto_async_request *req, int error)
- */
-static inline void aead_request_set_callback(struct aead_request *req,
- u32 flags,
- crypto_completion_t compl,
- void *data)
-{
- req->base.complete = compl;
- req->base.data = data;
- req->base.flags = flags;
-}
-
-/**
- * aead_request_set_crypt - set data buffers
- * @req: request handle
- * @src: source scatter / gather list
- * @dst: destination scatter / gather list
- * @cryptlen: number of bytes to process from @src
- * @iv: IV for the cipher operation which must comply with the IV size defined
- * by crypto_aead_ivsize()
- *
- * Setting the source data and destination data scatter / gather lists.
- *
- * For encryption, the source is treated as the plaintext and the
- * destination is the ciphertext. For a decryption operation, the use is
- * reversed: the source is the ciphertext and the destination is the plaintext.
- *
- * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption,
- * the caller must concatenate the ciphertext followed by the
- * authentication tag and provide the entire data stream to the
- * decryption operation (i.e. the data length used for the
- * initialization of the scatterlist and the data length for the
- * decryption operation is identical). For encryption, however,
- * the authentication tag is created while encrypting the data.
- * The destination buffer must hold sufficient space for the
- * ciphertext and the authentication tag while the encryption
- * invocation must only point to the plaintext data size. The
- * following code snippet illustrates the memory usage
- * buffer = kmalloc(ptbuflen + (enc ? authsize : 0));
- * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0));
- * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv);
- */
-static inline void aead_request_set_crypt(struct aead_request *req,
- struct scatterlist *src,
- struct scatterlist *dst,
- unsigned int cryptlen, u8 *iv)
-{
- req->src = src;
- req->dst = dst;
- req->cryptlen = cryptlen;
- req->iv = iv;
-}
-
-/**
- * aead_request_set_assoc() - set the associated data scatter / gather list
- * @req: request handle
- * @assoc: associated data scatter / gather list
- * @assoclen: number of bytes to process from @assoc
- *
- * For encryption, the memory is filled with the associated data. For
- * decryption, the memory must point to the associated data.
- */
-static inline void aead_request_set_assoc(struct aead_request *req,
- struct scatterlist *assoc,
- unsigned int assoclen)
-{
- req->assoc = assoc;
- req->assoclen = assoclen;
-}
-
-/**
* DOC: Synchronous Block Cipher API
*
* The synchronous block cipher API is used with the ciphers of type
diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h
index 2cd9f1c..f475428 100644
--- a/include/linux/cryptohash.h
+++ b/include/linux/cryptohash.h
@@ -1,6 +1,8 @@
#ifndef __CRYPTOHASH_H
#define __CRYPTOHASH_H
+#include <uapi/linux/types.h>
+
#define SHA_DIGEST_WORDS 5
#define SHA_MESSAGE_BYTES (512 /*bits*/ / 8)
#define SHA_WORKSPACE_WORDS 16
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 5a81398..d67ae11 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -160,6 +160,7 @@ struct dentry_operations {
char *(*d_dname)(struct dentry *, char *, int);
struct vfsmount *(*d_automount)(struct path *);
int (*d_manage)(struct dentry *, bool);
+ struct inode *(*d_select_inode)(struct dentry *, unsigned);
} ____cacheline_aligned;
/*
@@ -215,13 +216,17 @@ struct dentry_operations {
#define DCACHE_LRU_LIST 0x00080000
#define DCACHE_ENTRY_TYPE 0x00700000
-#define DCACHE_MISS_TYPE 0x00000000 /* Negative dentry */
-#define DCACHE_DIRECTORY_TYPE 0x00100000 /* Normal directory */
-#define DCACHE_AUTODIR_TYPE 0x00200000 /* Lookupless directory (presumed automount) */
-#define DCACHE_SYMLINK_TYPE 0x00300000 /* Symlink */
-#define DCACHE_FILE_TYPE 0x00400000 /* Other file type */
+#define DCACHE_MISS_TYPE 0x00000000 /* Negative dentry (maybe fallthru to nowhere) */
+#define DCACHE_WHITEOUT_TYPE 0x00100000 /* Whiteout dentry (stop pathwalk) */
+#define DCACHE_DIRECTORY_TYPE 0x00200000 /* Normal directory */
+#define DCACHE_AUTODIR_TYPE 0x00300000 /* Lookupless directory (presumed automount) */
+#define DCACHE_REGULAR_TYPE 0x00400000 /* Regular file type (or fallthru to such) */
+#define DCACHE_SPECIAL_TYPE 0x00500000 /* Other file type (or fallthru to such) */
+#define DCACHE_SYMLINK_TYPE 0x00600000 /* Symlink (or fallthru to such) */
#define DCACHE_MAY_FREE 0x00800000
+#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
+#define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */
extern seqlock_t rename_lock;
@@ -319,13 +324,11 @@ static inline unsigned d_count(const struct dentry *dentry)
return dentry->d_lockref.count;
}
-/* validate "insecure" dentry pointer */
-extern int d_validate(struct dentry *, struct dentry *);
-
/*
* helper function for dentry_operations.d_dname() members
*/
-extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
+extern __printf(4, 5)
+char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
extern char *simple_dname(struct dentry *, char *, int);
extern char *__d_path(const struct path *, const struct path *, char *, int);
@@ -404,26 +407,21 @@ static inline bool d_mountpoint(const struct dentry *dentry)
/*
* Directory cache entry type accessor functions.
*/
-static inline void __d_set_type(struct dentry *dentry, unsigned type)
-{
- dentry->d_flags = (dentry->d_flags & ~DCACHE_ENTRY_TYPE) | type;
-}
-
-static inline void __d_clear_type(struct dentry *dentry)
+static inline unsigned __d_entry_type(const struct dentry *dentry)
{
- __d_set_type(dentry, DCACHE_MISS_TYPE);
+ unsigned type = READ_ONCE(dentry->d_flags);
+ smp_rmb();
+ return type & DCACHE_ENTRY_TYPE;
}
-static inline void d_set_type(struct dentry *dentry, unsigned type)
+static inline bool d_is_miss(const struct dentry *dentry)
{
- spin_lock(&dentry->d_lock);
- __d_set_type(dentry, type);
- spin_unlock(&dentry->d_lock);
+ return __d_entry_type(dentry) == DCACHE_MISS_TYPE;
}
-static inline unsigned __d_entry_type(const struct dentry *dentry)
+static inline bool d_is_whiteout(const struct dentry *dentry)
{
- return dentry->d_flags & DCACHE_ENTRY_TYPE;
+ return __d_entry_type(dentry) == DCACHE_WHITEOUT_TYPE;
}
static inline bool d_can_lookup(const struct dentry *dentry)
@@ -446,14 +444,25 @@ static inline bool d_is_symlink(const struct dentry *dentry)
return __d_entry_type(dentry) == DCACHE_SYMLINK_TYPE;
}
+static inline bool d_is_reg(const struct dentry *dentry)
+{
+ return __d_entry_type(dentry) == DCACHE_REGULAR_TYPE;
+}
+
+static inline bool d_is_special(const struct dentry *dentry)
+{
+ return __d_entry_type(dentry) == DCACHE_SPECIAL_TYPE;
+}
+
static inline bool d_is_file(const struct dentry *dentry)
{
- return __d_entry_type(dentry) == DCACHE_FILE_TYPE;
+ return d_is_reg(dentry) || d_is_special(dentry);
}
static inline bool d_is_negative(const struct dentry *dentry)
{
- return __d_entry_type(dentry) == DCACHE_MISS_TYPE;
+ // TODO: check d_is_whiteout(dentry) also.
+ return d_is_miss(dentry);
}
static inline bool d_is_positive(const struct dentry *dentry)
@@ -461,10 +470,118 @@ static inline bool d_is_positive(const struct dentry *dentry)
return !d_is_negative(dentry);
}
+/**
+ * d_really_is_negative - Determine if a dentry is really negative (ignoring fallthroughs)
+ * @dentry: The dentry in question
+ *
+ * Returns true if the dentry represents either an absent name or a name that
+ * doesn't map to an inode (ie. ->d_inode is NULL). The dentry could represent
+ * a true miss, a whiteout that isn't represented by a 0,0 chardev or a
+ * fallthrough marker in an opaque directory.
+ *
+ * Note! (1) This should be used *only* by a filesystem to examine its own
+ * dentries. It should not be used to look at some other filesystem's
+ * dentries. (2) It should also be used in combination with d_inode() to get
+ * the inode. (3) The dentry may have something attached to ->d_lower and the
+ * type field of the flags may be set to something other than miss or whiteout.
+ */
+static inline bool d_really_is_negative(const struct dentry *dentry)
+{
+ return dentry->d_inode == NULL;
+}
+
+/**
+ * d_really_is_positive - Determine if a dentry is really positive (ignoring fallthroughs)
+ * @dentry: The dentry in question
+ *
+ * Returns true if the dentry represents a name that maps to an inode
+ * (ie. ->d_inode is not NULL). The dentry might still represent a whiteout if
+ * that is represented on medium as a 0,0 chardev.
+ *
+ * Note! (1) This should be used *only* by a filesystem to examine its own
+ * dentries. It should not be used to look at some other filesystem's
+ * dentries. (2) It should also be used in combination with d_inode() to get
+ * the inode.
+ */
+static inline bool d_really_is_positive(const struct dentry *dentry)
+{
+ return dentry->d_inode != NULL;
+}
+
+static inline int simple_positive(struct dentry *dentry)
+{
+ return d_really_is_positive(dentry) && !d_unhashed(dentry);
+}
+
+extern void d_set_fallthru(struct dentry *dentry);
+
+static inline bool d_is_fallthru(const struct dentry *dentry)
+{
+ return dentry->d_flags & DCACHE_FALLTHRU;
+}
+
+
extern int sysctl_vfs_cache_pressure;
static inline unsigned long vfs_pressure_ratio(unsigned long val)
{
return mult_frac(val, sysctl_vfs_cache_pressure, 100);
}
+
+/**
+ * d_inode - Get the actual inode of this dentry
+ * @dentry: The dentry to query
+ *
+ * This is the helper normal filesystems should use to get at their own inodes
+ * in their own dentries and ignore the layering superimposed upon them.
+ */
+static inline struct inode *d_inode(const struct dentry *dentry)
+{
+ return dentry->d_inode;
+}
+
+/**
+ * d_inode_rcu - Get the actual inode of this dentry with ACCESS_ONCE()
+ * @dentry: The dentry to query
+ *
+ * This is the helper normal filesystems should use to get at their own inodes
+ * in their own dentries and ignore the layering superimposed upon them.
+ */
+static inline struct inode *d_inode_rcu(const struct dentry *dentry)
+{
+ return ACCESS_ONCE(dentry->d_inode);
+}
+
+/**
+ * d_backing_inode - Get upper or lower inode we should be using
+ * @upper: The upper layer
+ *
+ * This is the helper that should be used to get at the inode that will be used
+ * if this dentry were to be opened as a file. The inode may be on the upper
+ * dentry or it may be on a lower dentry pinned by the upper.
+ *
+ * Normal filesystems should not use this to access their own inodes.
+ */
+static inline struct inode *d_backing_inode(const struct dentry *upper)
+{
+ struct inode *inode = upper->d_inode;
+
+ return inode;
+}
+
+/**
+ * d_backing_dentry - Get upper or lower dentry we should be using
+ * @upper: The upper layer
+ *
+ * This is the helper that should be used to get the dentry of the inode that
+ * will be used if this dentry were opened as a file. It may be the upper
+ * dentry or it may be a lower dentry pinned by the upper.
+ *
+ * Normal filesystems should not use this to access their own dentries.
+ */
+static inline struct dentry *d_backing_dentry(struct dentry *upper)
+{
+ return upper;
+}
+
#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 439ff69..2210254 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -43,6 +43,7 @@ enum dccp_state {
DCCP_CLOSING = TCP_CLOSING,
DCCP_TIME_WAIT = TCP_TIME_WAIT,
DCCP_CLOSED = TCP_CLOSE,
+ DCCP_NEW_SYN_RECV = TCP_NEW_SYN_RECV,
DCCP_PARTOPEN = TCP_MAX_STATES,
DCCP_PASSIVE_CLOSEREQ, /* clients receiving CloseReq */
DCCP_MAX_STATES
@@ -57,6 +58,7 @@ enum {
DCCPF_CLOSING = TCPF_CLOSING,
DCCPF_TIME_WAIT = TCPF_TIME_WAIT,
DCCPF_CLOSED = TCPF_CLOSE,
+ DCCPF_NEW_SYN_RECV = TCPF_NEW_SYN_RECV,
DCCPF_PARTOPEN = (1 << DCCP_PARTOPEN),
};
@@ -317,6 +319,6 @@ static inline const char *dccp_role(const struct sock *sk)
return NULL;
}
-extern void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
+extern void dccp_syn_ack_timeout(const struct request_sock *req);
#endif /* _LINUX_DCCP_H */
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index da4c498..420311b 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -45,17 +45,26 @@ extern struct dentry *arch_debugfs_dir;
/* declared over in file.c */
extern const struct file_operations debugfs_file_operations;
-extern const struct inode_operations debugfs_link_operations;
struct dentry *debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops);
+struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops,
+ loff_t file_size);
+
struct dentry *debugfs_create_dir(const char *name, struct dentry *parent);
struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
const char *dest);
+struct dentry *debugfs_create_automount(const char *name,
+ struct dentry *parent,
+ struct vfsmount *(*f)(void *),
+ void *data);
+
void debugfs_remove(struct dentry *dentry);
void debugfs_remove_recursive(struct dentry *dentry);
@@ -124,6 +133,14 @@ static inline struct dentry *debugfs_create_file(const char *name, umode_t mode,
return ERR_PTR(-ENODEV);
}
+static inline struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops,
+ loff_t file_size)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline struct dentry *debugfs_create_dir(const char *name,
struct dentry *parent)
{
diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h
new file mode 100644
index 0000000..0a83a1e
--- /dev/null
+++ b/include/linux/devfreq-event.h
@@ -0,0 +1,196 @@
+/*
+ * devfreq-event: a framework to provide raw data and events of devfreq devices
+ *
+ * Copyright (C) 2014 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_DEVFREQ_EVENT_H__
+#define __LINUX_DEVFREQ_EVENT_H__
+
+#include <linux/device.h>
+
+/**
+ * struct devfreq_event_dev - the devfreq-event device
+ *
+ * @node : Contain the devfreq-event device that have been registered.
+ * @dev : the device registered by devfreq-event class. dev.parent is
+ * the device using devfreq-event.
+ * @lock : a mutex to protect accessing devfreq-event.
+ * @enable_count: the number of enable function have been called.
+ * @desc : the description for devfreq-event device.
+ *
+ * This structure contains devfreq-event device information.
+ */
+struct devfreq_event_dev {
+ struct list_head node;
+
+ struct device dev;
+ struct mutex lock;
+ u32 enable_count;
+
+ const struct devfreq_event_desc *desc;
+};
+
+/**
+ * struct devfreq_event_data - the devfreq-event data
+ *
+ * @load_count : load count of devfreq-event device for the given period.
+ * @total_count : total count of devfreq-event device for the given period.
+ * each count may represent a clock cycle, a time unit
+ * (ns/us/...), or anything the device driver wants.
+ * Generally, utilization is load_count / total_count.
+ *
+ * This structure contains the data of devfreq-event device for polling period.
+ */
+struct devfreq_event_data {
+ unsigned long load_count;
+ unsigned long total_count;
+};
+
+/**
+ * struct devfreq_event_ops - the operations of devfreq-event device
+ *
+ * @enable : Enable the devfreq-event device.
+ * @disable : Disable the devfreq-event device.
+ * @reset : Reset all setting of the devfreq-event device.
+ * @set_event : Set the specific event type for the devfreq-event device.
+ * @get_event : Get the result of the devfreq-event devie with specific
+ * event type.
+ *
+ * This structure contains devfreq-event device operations which can be
+ * implemented by devfreq-event device drivers.
+ */
+struct devfreq_event_ops {
+ /* Optional functions */
+ int (*enable)(struct devfreq_event_dev *edev);
+ int (*disable)(struct devfreq_event_dev *edev);
+ int (*reset)(struct devfreq_event_dev *edev);
+
+ /* Mandatory functions */
+ int (*set_event)(struct devfreq_event_dev *edev);
+ int (*get_event)(struct devfreq_event_dev *edev,
+ struct devfreq_event_data *edata);
+};
+
+/**
+ * struct devfreq_event_desc - the descriptor of devfreq-event device
+ *
+ * @name : the name of devfreq-event device.
+ * @driver_data : the private data for devfreq-event driver.
+ * @ops : the operation to control devfreq-event device.
+ *
+ * Each devfreq-event device is described with a this structure.
+ * This structure contains the various data for devfreq-event device.
+ */
+struct devfreq_event_desc {
+ const char *name;
+ void *driver_data;
+
+ const struct devfreq_event_ops *ops;
+};
+
+#if defined(CONFIG_PM_DEVFREQ_EVENT)
+extern int devfreq_event_enable_edev(struct devfreq_event_dev *edev);
+extern int devfreq_event_disable_edev(struct devfreq_event_dev *edev);
+extern bool devfreq_event_is_enabled(struct devfreq_event_dev *edev);
+extern int devfreq_event_set_event(struct devfreq_event_dev *edev);
+extern int devfreq_event_get_event(struct devfreq_event_dev *edev,
+ struct devfreq_event_data *edata);
+extern int devfreq_event_reset_event(struct devfreq_event_dev *edev);
+extern struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
+ struct device *dev, int index);
+extern int devfreq_event_get_edev_count(struct device *dev);
+extern struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
+ struct devfreq_event_desc *desc);
+extern int devfreq_event_remove_edev(struct devfreq_event_dev *edev);
+extern struct devfreq_event_dev *devm_devfreq_event_add_edev(struct device *dev,
+ struct devfreq_event_desc *desc);
+extern void devm_devfreq_event_remove_edev(struct device *dev,
+ struct devfreq_event_dev *edev);
+static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
+{
+ return edev->desc->driver_data;
+}
+#else
+static inline int devfreq_event_enable_edev(struct devfreq_event_dev *edev)
+{
+ return -EINVAL;
+}
+
+static inline int devfreq_event_disable_edev(struct devfreq_event_dev *edev)
+{
+ return -EINVAL;
+}
+
+static inline bool devfreq_event_is_enabled(struct devfreq_event_dev *edev)
+{
+ return false;
+}
+
+static inline int devfreq_event_set_event(struct devfreq_event_dev *edev)
+{
+ return -EINVAL;
+}
+
+static inline int devfreq_event_get_event(struct devfreq_event_dev *edev,
+ struct devfreq_event_data *edata)
+{
+ return -EINVAL;
+}
+
+static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev)
+{
+ return -EINVAL;
+}
+
+static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
+ struct device *dev, int index)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int devfreq_event_get_edev_count(struct device *dev)
+{
+ return -EINVAL;
+}
+
+static inline struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
+ struct devfreq_event_desc *desc)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int devfreq_event_remove_edev(struct devfreq_event_dev *edev)
+{
+ return -EINVAL;
+}
+
+static inline struct devfreq_event_dev *devm_devfreq_event_add_edev(
+ struct device *dev,
+ struct devfreq_event_desc *desc)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void devm_devfreq_event_remove_edev(struct device *dev,
+ struct devfreq_event_dev *edev)
+{
+}
+
+static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
+{
+ return NULL;
+}
+#endif /* CONFIG_PM_DEVFREQ_EVENT */
+
+#endif /* __LINUX_DEVFREQ_EVENT_H__ */
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index ca6d2ac..51cc1de 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -48,6 +48,11 @@ typedef void (*dm_dtr_fn) (struct dm_target *ti);
typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
union map_info *map_context);
+typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
+ struct request *rq,
+ union map_info *map_context,
+ struct request **clone);
+typedef void (*dm_release_clone_request_fn) (struct request *clone);
/*
* Returns:
@@ -143,6 +148,8 @@ struct target_type {
dm_dtr_fn dtr;
dm_map_fn map;
dm_map_request_fn map_rq;
+ dm_clone_and_map_request_fn clone_and_map_rq;
+ dm_release_clone_request_fn release_clone_rq;
dm_endio_fn end_io;
dm_request_endio_fn rq_end_io;
dm_presuspend_fn presuspend;
@@ -368,6 +375,7 @@ int dm_create(int minor, struct mapped_device **md);
*/
struct mapped_device *dm_get_md(dev_t dev);
void dm_get(struct mapped_device *md);
+int dm_hold(struct mapped_device *md);
void dm_put(struct mapped_device *md);
/*
@@ -597,12 +605,4 @@ static inline unsigned long to_bytes(sector_t n)
return (n << SECTOR_SHIFT);
}
-/*-----------------------------------------------------------------
- * Helper for block layer and dm core operations
- *---------------------------------------------------------------*/
-void dm_dispatch_request(struct request *rq);
-void dm_requeue_unmapped_request(struct request *rq);
-void dm_kill_unmapped_request(struct request *rq, int error);
-int dm_underlying_device_busy(struct request_queue *q);
-
#endif /* _LINUX_DEVICE_MAPPER_H */
diff --git a/include/linux/device.h b/include/linux/device.h
index fb50673..a2b4ea7 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -38,6 +38,7 @@ struct class;
struct subsys_private;
struct bus_type;
struct device_node;
+struct fwnode_handle;
struct iommu_ops;
struct iommu_group;
@@ -195,12 +196,41 @@ extern struct kset *bus_get_kset(struct bus_type *bus);
extern struct klist *bus_get_device_klist(struct bus_type *bus);
/**
+ * enum probe_type - device driver probe type to try
+ * Device drivers may opt in for special handling of their
+ * respective probe routines. This tells the core what to
+ * expect and prefer.
+ *
+ * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well
+ * whether probed synchronously or asynchronously.
+ * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which
+ * probing order is not essential for booting the system may
+ * opt into executing their probes asynchronously.
+ * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need
+ * their probe routines to run synchronously with driver and
+ * device registration (with the exception of -EPROBE_DEFER
+ * handling - re-probing always ends up being done asynchronously).
+ *
+ * Note that the end goal is to switch the kernel to use asynchronous
+ * probing by default, so annotating drivers with
+ * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us
+ * to speed up boot process while we are validating the rest of the
+ * drivers.
+ */
+enum probe_type {
+ PROBE_DEFAULT_STRATEGY,
+ PROBE_PREFER_ASYNCHRONOUS,
+ PROBE_FORCE_SYNCHRONOUS,
+};
+
+/**
* struct device_driver - The basic device driver structure
* @name: Name of the device driver.
* @bus: The bus which the device of this driver belongs to.
* @owner: The module owner.
* @mod_name: Used for built-in modules.
* @suppress_bind_attrs: Disables bind/unbind via sysfs.
+ * @probe_type: Type of the probe (synchronous or asynchronous) to use.
* @of_match_table: The open firmware table.
* @acpi_match_table: The ACPI match table.
* @probe: Called to query the existence of a specific device,
@@ -234,6 +264,7 @@ struct device_driver {
const char *mod_name; /* used for built-in modules */
bool suppress_bind_attrs; /* disables bind/unbind via sysfs */
+ enum probe_type probe_type;
const struct of_device_id *of_match_table;
const struct acpi_device_id *acpi_match_table;
@@ -606,8 +637,9 @@ extern int devres_release_group(struct device *dev, void *id);
/* managed devm_k.alloc/kfree for device drivers */
extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
-extern char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
- va_list ap);
+extern __printf(3, 0)
+char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
+ va_list ap);
extern __printf(3, 4)
char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...);
static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
@@ -650,14 +682,6 @@ struct device_dma_parameters {
unsigned long segment_boundary_mask;
};
-struct acpi_device;
-
-struct acpi_dev_node {
-#ifdef CONFIG_ACPI
- struct acpi_device *companion;
-#endif
-};
-
/**
* struct device - The basic device structure
* @parent: The device's "parent" device, the device to which it is attached.
@@ -703,7 +727,7 @@ struct acpi_dev_node {
* @cma_area: Contiguous memory area for dma allocations
* @archdata: For arch-specific additions.
* @of_node: Associated device tree node.
- * @acpi_node: Associated ACPI device node.
+ * @fwnode: Associated device node supplied by platform firmware.
* @devt: For creating the sysfs "dev".
* @id: device instance
* @devres_lock: Spinlock to protect the resource of the device.
@@ -779,7 +803,7 @@ struct device {
struct dev_archdata archdata;
struct device_node *of_node; /* associated device tree node */
- struct acpi_dev_node acpi_node; /* associated ACPI device node */
+ struct fwnode_handle *fwnode; /* firmware device node */
dev_t devt; /* dev_t, creates the sysfs "dev" */
u32 id; /* device instance */
@@ -916,6 +940,13 @@ static inline void device_lock_assert(struct device *dev)
lockdep_assert_held(&dev->mutex);
}
+static inline struct device_node *dev_of_node(struct device *dev)
+{
+ if (!IS_ENABLED(CONFIG_OF))
+ return NULL;
+ return dev->of_node;
+}
+
void driver_init(void);
/*
@@ -947,6 +978,9 @@ extern void unlock_device_hotplug(void);
extern int lock_device_hotplug_sysfs(void);
extern int device_offline(struct device *dev);
extern int device_online(struct device *dev);
+extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+
/*
* Root device objects for grouping under /sys/devices
*/
@@ -972,17 +1006,16 @@ extern int __must_check device_bind_driver(struct device *dev);
extern void device_release_driver(struct device *dev);
extern int __must_check device_attach(struct device *dev);
extern int __must_check driver_attach(struct device_driver *drv);
+extern void device_initial_probe(struct device *dev);
extern int __must_check device_reprobe(struct device *dev);
/*
* Easy functions for dynamically creating devices on the fly
*/
-extern struct device *device_create_vargs(struct class *cls,
- struct device *parent,
- dev_t devt,
- void *drvdata,
- const char *fmt,
- va_list vargs);
+extern __printf(5, 0)
+struct device *device_create_vargs(struct class *cls, struct device *parent,
+ dev_t devt, void *drvdata,
+ const char *fmt, va_list vargs);
extern __printf(5, 6)
struct device *device_create(struct class *cls, struct device *parent,
dev_t devt, void *drvdata,
@@ -1038,22 +1071,22 @@ extern __printf(3, 4)
int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
extern __printf(3, 4)
-int dev_printk(const char *level, const struct device *dev,
- const char *fmt, ...);
+void dev_printk(const char *level, const struct device *dev,
+ const char *fmt, ...);
extern __printf(2, 3)
-int dev_emerg(const struct device *dev, const char *fmt, ...);
+void dev_emerg(const struct device *dev, const char *fmt, ...);
extern __printf(2, 3)
-int dev_alert(const struct device *dev, const char *fmt, ...);
+void dev_alert(const struct device *dev, const char *fmt, ...);
extern __printf(2, 3)
-int dev_crit(const struct device *dev, const char *fmt, ...);
+void dev_crit(const struct device *dev, const char *fmt, ...);
extern __printf(2, 3)
-int dev_err(const struct device *dev, const char *fmt, ...);
+void dev_err(const struct device *dev, const char *fmt, ...);
extern __printf(2, 3)
-int dev_warn(const struct device *dev, const char *fmt, ...);
+void dev_warn(const struct device *dev, const char *fmt, ...);
extern __printf(2, 3)
-int dev_notice(const struct device *dev, const char *fmt, ...);
+void dev_notice(const struct device *dev, const char *fmt, ...);
extern __printf(2, 3)
-int _dev_info(const struct device *dev, const char *fmt, ...);
+void _dev_info(const struct device *dev, const char *fmt, ...);
#else
@@ -1065,35 +1098,35 @@ static inline __printf(3, 4)
int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
{ return 0; }
-static inline int __dev_printk(const char *level, const struct device *dev,
- struct va_format *vaf)
-{ return 0; }
+static inline void __dev_printk(const char *level, const struct device *dev,
+ struct va_format *vaf)
+{}
static inline __printf(3, 4)
-int dev_printk(const char *level, const struct device *dev,
- const char *fmt, ...)
-{ return 0; }
+void dev_printk(const char *level, const struct device *dev,
+ const char *fmt, ...)
+{}
static inline __printf(2, 3)
-int dev_emerg(const struct device *dev, const char *fmt, ...)
-{ return 0; }
+void dev_emerg(const struct device *dev, const char *fmt, ...)
+{}
static inline __printf(2, 3)
-int dev_crit(const struct device *dev, const char *fmt, ...)
-{ return 0; }
+void dev_crit(const struct device *dev, const char *fmt, ...)
+{}
static inline __printf(2, 3)
-int dev_alert(const struct device *dev, const char *fmt, ...)
-{ return 0; }
+void dev_alert(const struct device *dev, const char *fmt, ...)
+{}
static inline __printf(2, 3)
-int dev_err(const struct device *dev, const char *fmt, ...)
-{ return 0; }
+void dev_err(const struct device *dev, const char *fmt, ...)
+{}
static inline __printf(2, 3)
-int dev_warn(const struct device *dev, const char *fmt, ...)
-{ return 0; }
+void dev_warn(const struct device *dev, const char *fmt, ...)
+{}
static inline __printf(2, 3)
-int dev_notice(const struct device *dev, const char *fmt, ...)
-{ return 0; }
+void dev_notice(const struct device *dev, const char *fmt, ...)
+{}
static inline __printf(2, 3)
-int _dev_info(const struct device *dev, const char *fmt, ...)
-{ return 0; }
+void _dev_info(const struct device *dev, const char *fmt, ...)
+{}
#endif
@@ -1119,7 +1152,6 @@ do { \
({ \
if (0) \
dev_printk(KERN_DEBUG, dev, format, ##arg); \
- 0; \
})
#endif
@@ -1156,7 +1188,7 @@ do { \
#define dev_info_once(dev, fmt, ...) \
dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
#define dev_dbg_once(dev, fmt, ...) \
- dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
+ dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__)
#define dev_level_ratelimited(dev_level, dev, fmt, ...) \
do { \
@@ -1215,7 +1247,6 @@ do { \
({ \
if (0) \
dev_printk(KERN_DEBUG, dev, format, ##arg); \
- 0; \
})
#endif
@@ -1268,4 +1299,26 @@ static void __exit __driver##_exit(void) \
} \
module_exit(__driver##_exit);
+/**
+ * builtin_driver() - Helper macro for drivers that don't do anything
+ * special in init and have no exit. This eliminates some boilerplate.
+ * Each driver may only use this macro once, and calling it replaces
+ * device_initcall (or in some cases, the legacy __initcall). This is
+ * meant to be a direct parallel of module_driver() above but without
+ * the __exit stuff that is not used for builtin cases.
+ *
+ * @__driver: driver name
+ * @__register: register function for this driver type
+ * @...: Additional arguments to be passed to __register
+ *
+ * Use this macro to construct bus specific macros for registering
+ * drivers, and do not use it on its own.
+ */
+#define builtin_driver(__driver, __register, ...) \
+static int __init __driver##_init(void) \
+{ \
+ return __register(&(__driver) , ##__VA_ARGS__); \
+} \
+device_initcall(__driver##_init);
+
#endif /* _DEVICE_H_ */
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 694e1fe..f98bd70 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -115,6 +115,8 @@ struct dma_buf_ops {
* @attachments: list of dma_buf_attachment that denotes all devices attached.
* @ops: dma_buf_ops associated with this buffer object.
* @exp_name: name of the exporter; useful for debugging.
+ * @owner: pointer to exporter module; used for refcounting when exporter is a
+ * kernel module.
* @list_node: node for dma_buf accounting and debugging.
* @priv: exporter specific private data for this buffer object.
* @resv: reservation object linked to this dma-buf
@@ -129,6 +131,7 @@ struct dma_buf {
unsigned vmapping_counter;
void *vmap_ptr;
const char *exp_name;
+ struct module *owner;
struct list_head list_node;
void *priv;
struct reservation_object *resv;
@@ -163,6 +166,36 @@ struct dma_buf_attachment {
};
/**
+ * struct dma_buf_export_info - holds information needed to export a dma_buf
+ * @exp_name: name of the exporter - useful for debugging.
+ * @owner: pointer to exporter module - used for refcounting kernel module
+ * @ops: Attach allocator-defined dma buf ops to the new buffer
+ * @size: Size of the buffer
+ * @flags: mode flags for the file
+ * @resv: reservation-object, NULL to allocate default one
+ * @priv: Attach private data of allocator to this buffer
+ *
+ * This structure holds the information required to export the buffer. Used
+ * with dma_buf_export() only.
+ */
+struct dma_buf_export_info {
+ const char *exp_name;
+ struct module *owner;
+ const struct dma_buf_ops *ops;
+ size_t size;
+ int flags;
+ struct reservation_object *resv;
+ void *priv;
+};
+
+/**
+ * helper macro for exporters; zeros and fills in most common values
+ */
+#define DEFINE_DMA_BUF_EXPORT_INFO(a) \
+ struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME, \
+ .owner = THIS_MODULE }
+
+/**
* get_dma_buf - convenience wrapper for get_file.
* @dmabuf: [in] pointer to dma_buf
*
@@ -181,12 +214,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
void dma_buf_detach(struct dma_buf *dmabuf,
struct dma_buf_attachment *dmabuf_attach);
-struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
- size_t size, int flags, const char *,
- struct reservation_object *);
-
-#define dma_buf_export(priv, ops, size, flags, resv) \
- dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME, resv)
+struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
int dma_buf_fd(struct dma_buf *dmabuf, int flags);
struct dma_buf *dma_buf_get(int fd);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index c3007cb..ac07ff0 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -34,6 +34,10 @@ struct dma_map_ops {
void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs);
+ /*
+ * map_sg returns 0 on error and a value > 0 on success.
+ * It should never return a value < 0.
+ */
int (*map_sg)(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs);
diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h
new file mode 100644
index 0000000..234393a
--- /dev/null
+++ b/include/linux/dma/hsu.h
@@ -0,0 +1,48 @@
+/*
+ * Driver for the High Speed UART DMA
+ *
+ * Copyright (C) 2015 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _DMA_HSU_H
+#define _DMA_HSU_H
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+
+#include <linux/platform_data/dma-hsu.h>
+
+struct hsu_dma;
+
+/**
+ * struct hsu_dma_chip - representation of HSU DMA hardware
+ * @dev: struct device of the DMA controller
+ * @irq: irq line
+ * @regs: memory mapped I/O space
+ * @length: I/O space length
+ * @offset: offset of the I/O space where registers are located
+ * @hsu: struct hsu_dma that is filed by ->probe()
+ * @pdata: platform data for the DMA controller if provided
+ */
+struct hsu_dma_chip {
+ struct device *dev;
+ int irq;
+ void __iomem *regs;
+ unsigned int length;
+ unsigned int offset;
+ struct hsu_dma *hsu;
+ struct hsu_dma_platform_data *pdata;
+};
+
+/* Export to the internal users */
+irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr);
+
+/* Export to the platform drivers */
+int hsu_dma_probe(struct hsu_dma_chip *chip);
+int hsu_dma_remove(struct hsu_dma_chip *chip);
+
+#endif /* _DMA_HSU_H */
diff --git a/include/linux/dma/pxa-dma.h b/include/linux/dma/pxa-dma.h
new file mode 100644
index 0000000..3edc992
--- /dev/null
+++ b/include/linux/dma/pxa-dma.h
@@ -0,0 +1,27 @@
+#ifndef _PXA_DMA_H_
+#define _PXA_DMA_H_
+
+enum pxad_chan_prio {
+ PXAD_PRIO_HIGHEST = 0,
+ PXAD_PRIO_NORMAL,
+ PXAD_PRIO_LOW,
+ PXAD_PRIO_LOWEST,
+};
+
+struct pxad_param {
+ unsigned int drcmr;
+ enum pxad_chan_prio prio;
+};
+
+struct dma_chan;
+
+#ifdef CONFIG_PXA_DMA
+bool pxad_filter_fn(struct dma_chan *chan, void *param);
+#else
+static inline bool pxad_filter_fn(struct dma_chan *chan, void *param)
+{
+ return false;
+}
+#endif
+
+#endif /* _PXA_DMA_H_ */
diff --git a/include/linux/amba/xilinx_dma.h b/include/linux/dma/xilinx_dma.h
index 34b98f2..34b98f2 100644
--- a/include/linux/amba/xilinx_dma.h
+++ b/include/linux/dma/xilinx_dma.h
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 40cd75e..e2f5eb4 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
@@ -69,6 +65,7 @@ enum dma_transaction_type {
DMA_PQ,
DMA_XOR_VAL,
DMA_PQ_VAL,
+ DMA_MEMSET,
DMA_INTERRUPT,
DMA_SG,
DMA_PRIVATE,
@@ -126,10 +123,18 @@ enum dma_transfer_direction {
* chunk and before first src/dst address for next chunk.
* Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false.
* Ignored for src(assumed 0), if src_inc is true and src_sgl is false.
+ * @dst_icg: Number of bytes to jump after last dst address of this
+ * chunk and before the first dst address for next chunk.
+ * Ignored if dst_inc is true and dst_sgl is false.
+ * @src_icg: Number of bytes to jump after last src address of this
+ * chunk and before the first src address for next chunk.
+ * Ignored if src_inc is true and src_sgl is false.
*/
struct data_chunk {
size_t size;
size_t icg;
+ size_t dst_icg;
+ size_t src_icg;
};
/**
@@ -189,25 +194,6 @@ enum dma_ctrl_flags {
};
/**
- * enum dma_ctrl_cmd - DMA operations that can optionally be exercised
- * on a running channel.
- * @DMA_TERMINATE_ALL: terminate all ongoing transfers
- * @DMA_PAUSE: pause ongoing transfers
- * @DMA_RESUME: resume paused transfer
- * @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers
- * that need to runtime reconfigure the slave channels (as opposed to passing
- * configuration data in statically from the platform). An additional
- * argument of struct dma_slave_config must be passed in with this
- * command.
- */
-enum dma_ctrl_cmd {
- DMA_TERMINATE_ALL,
- DMA_PAUSE,
- DMA_RESUME,
- DMA_SLAVE_CONFIG,
-};
-
-/**
* enum sum_check_bits - bit position of pq_check_flags
*/
enum sum_check_bits {
@@ -245,6 +231,16 @@ struct dma_chan_percpu {
};
/**
+ * struct dma_router - DMA router structure
+ * @dev: pointer to the DMA router device
+ * @route_free: function to be called when the route can be disconnected
+ */
+struct dma_router {
+ struct device *dev;
+ void (*route_free)(struct device *dev, void *route_data);
+};
+
+/**
* struct dma_chan - devices supply DMA channels, clients use them
* @device: ptr to the dma device who supplies this channel, always !%NULL
* @cookie: last cookie value returned to client
@@ -255,6 +251,8 @@ struct dma_chan_percpu {
* @local: per-cpu pointer to a struct dma_chan_percpu
* @client_count: how many clients are using this channel
* @table_count: number of appearances in the mem-to-mem allocation table
+ * @router: pointer to the DMA router structure
+ * @route_data: channel specific data for the router
* @private: private data for certain client-channel associations
*/
struct dma_chan {
@@ -270,6 +268,11 @@ struct dma_chan {
struct dma_chan_percpu __percpu *local;
int client_count;
int table_count;
+
+ /* DMA router */
+ struct dma_router *router;
+ void *route_data;
+
void *private;
};
@@ -298,6 +301,9 @@ enum dma_slave_buswidth {
DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
+ DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
+ DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
+ DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
};
/**
@@ -336,9 +342,8 @@ enum dma_slave_buswidth {
* This struct is passed in as configuration data to a DMA engine
* in order to set up a certain channel for DMA transport at runtime.
* The DMA device/engine has to provide support for an additional
- * command in the channel config interface, DMA_SLAVE_CONFIG
- * and this struct will then be passed in as an argument to the
- * DMA engine device_control() function.
+ * callback in the dma_device structure, device_config and this struct
+ * will then be passed in as an argument to the function.
*
* The rationale for adding configuration information to this struct is as
* follows: if it is likely that more than one DMA slave controllers in
@@ -387,7 +392,7 @@ enum dma_residue_granularity {
/* struct dma_slave_caps - expose capabilities of a slave channel only
*
* @src_addr_widths: bit mask of src addr widths the channel supports
- * @dstn_addr_widths: bit mask of dstn addr widths the channel supports
+ * @dst_addr_widths: bit mask of dstn addr widths the channel supports
* @directions: bit mask of slave direction the channel supported
* since the enum dma_transfer_direction is not defined as bits for each
* type of direction, the dma controller should fill (1 << <TYPE>) and same
@@ -398,7 +403,7 @@ enum dma_residue_granularity {
*/
struct dma_slave_caps {
u32 src_addr_widths;
- u32 dstn_addr_widths;
+ u32 dst_addr_widths;
u32 directions;
bool cmd_pause;
bool cmd_terminate;
@@ -594,6 +599,14 @@ struct dma_tx_state {
* @fill_align: alignment shift for memset operations
* @dev_id: unique device ID
* @dev: struct device reference for dma mapping api
+ * @src_addr_widths: bit mask of src addr widths the device supports
+ * @dst_addr_widths: bit mask of dst addr widths the device supports
+ * @directions: bit mask of slave direction the device supports since
+ * the enum dma_transfer_direction is not defined as bits for
+ * each type of direction, the dma controller should fill (1 <<
+ * <TYPE>) and same should be checked by controller as well
+ * @residue_granularity: granularity of the transfer residue reported
+ * by tx_status
* @device_alloc_chan_resources: allocate resources and return the
* number of allocated descriptors
* @device_free_chan_resources: release DMA channel's resources
@@ -602,20 +615,26 @@ struct dma_tx_state {
* @device_prep_dma_xor_val: prepares a xor validation operation
* @device_prep_dma_pq: prepares a pq operation
* @device_prep_dma_pq_val: prepares a pqzero_sum operation
+ * @device_prep_dma_memset: prepares a memset operation
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation
* @device_prep_slave_sg: prepares a slave dma operation
* @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
* The function takes a buffer of size buf_len. The callback function will
* be called after period_len bytes have been transferred.
* @device_prep_interleaved_dma: Transfer expression in a generic way.
- * @device_control: manipulate all pending operations on a channel, returns
- * zero or error code
+ * @device_config: Pushes a new configuration to a channel, return 0 or an error
+ * code
+ * @device_pause: Pauses any transfer happening on a channel. Returns
+ * 0 or an error code
+ * @device_resume: Resumes any transfer on a channel previously
+ * paused. Returns 0 or an error code
+ * @device_terminate_all: Aborts all transfers on a channel. Returns 0
+ * or an error code
* @device_tx_status: poll for transaction completion, the optional
* txstate parameter can be supplied with a pointer to get a
* struct with auxiliary transfer status information, otherwise the call
* will just return a simple status code
* @device_issue_pending: push pending transactions to hardware
- * @device_slave_caps: return the slave channel capabilities
*/
struct dma_device {
@@ -635,14 +654,19 @@ struct dma_device {
int dev_id;
struct device *dev;
+ u32 src_addr_widths;
+ u32 dst_addr_widths;
+ u32 directions;
+ enum dma_residue_granularity residue_granularity;
+
int (*device_alloc_chan_resources)(struct dma_chan *chan);
void (*device_free_chan_resources)(struct dma_chan *chan);
struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
- struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
size_t len, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
- struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+ struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
unsigned int src_cnt, size_t len, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
@@ -655,6 +679,9 @@ struct dma_device {
struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
enum sum_check_flags *pqres, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
+ struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
+ unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
struct dma_chan *chan, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
@@ -674,31 +701,26 @@ struct dma_device {
struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
struct dma_chan *chan, struct dma_interleaved_template *xt,
unsigned long flags);
- int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
- unsigned long arg);
+
+ int (*device_config)(struct dma_chan *chan,
+ struct dma_slave_config *config);
+ int (*device_pause)(struct dma_chan *chan);
+ int (*device_resume)(struct dma_chan *chan);
+ int (*device_terminate_all)(struct dma_chan *chan);
enum dma_status (*device_tx_status)(struct dma_chan *chan,
dma_cookie_t cookie,
struct dma_tx_state *txstate);
void (*device_issue_pending)(struct dma_chan *chan);
- int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
};
-static inline int dmaengine_device_control(struct dma_chan *chan,
- enum dma_ctrl_cmd cmd,
- unsigned long arg)
-{
- if (chan->device->device_control)
- return chan->device->device_control(chan, cmd, arg);
-
- return -ENOSYS;
-}
-
static inline int dmaengine_slave_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
- return dmaengine_device_control(chan, DMA_SLAVE_CONFIG,
- (unsigned long)config);
+ if (chan->device->device_config)
+ return chan->device->device_config(chan, config);
+
+ return -ENOSYS;
}
static inline bool is_slave_direction(enum dma_transfer_direction direction)
@@ -755,6 +777,17 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
}
+static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
+ struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
+ unsigned long flags)
+{
+ if (!chan || !chan->device)
+ return NULL;
+
+ return chan->device->device_prep_dma_memset(chan, dest, value,
+ len, flags);
+}
+
static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
struct dma_chan *chan,
struct scatterlist *dst_sg, unsigned int dst_nents,
@@ -765,34 +798,28 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
src_sg, src_nents, flags);
}
-static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
-{
- if (!chan || !caps)
- return -EINVAL;
-
- /* check if the channel supports slave transactions */
- if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits))
- return -ENXIO;
-
- if (chan->device->device_slave_caps)
- return chan->device->device_slave_caps(chan, caps);
-
- return -ENXIO;
-}
-
static inline int dmaengine_terminate_all(struct dma_chan *chan)
{
- return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
+ if (chan->device->device_terminate_all)
+ return chan->device->device_terminate_all(chan);
+
+ return -ENOSYS;
}
static inline int dmaengine_pause(struct dma_chan *chan)
{
- return dmaengine_device_control(chan, DMA_PAUSE, 0);
+ if (chan->device->device_pause)
+ return chan->device->device_pause(chan);
+
+ return -ENOSYS;
}
static inline int dmaengine_resume(struct dma_chan *chan)
{
- return dmaengine_device_control(chan, DMA_RESUME, 0);
+ if (chan->device->device_resume)
+ return chan->device->device_resume(chan);
+
+ return -ENOSYS;
}
static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
@@ -896,6 +923,33 @@ static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
BUG();
}
+static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
+ size_t dir_icg)
+{
+ if (inc) {
+ if (dir_icg)
+ return dir_icg;
+ else if (sgl)
+ return icg;
+ }
+
+ return 0;
+}
+
+static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt,
+ struct data_chunk *chunk)
+{
+ return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl,
+ chunk->icg, chunk->dst_icg);
+}
+
+static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt,
+ struct data_chunk *chunk)
+{
+ return dmaengine_get_icg(xt->src_inc, xt->src_sgl,
+ chunk->icg, chunk->src_icg);
+}
+
/* --- public DMA engine API --- */
#ifdef CONFIG_DMA_ENGINE
@@ -1059,6 +1113,7 @@ struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
const char *name);
struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
void dma_release_channel(struct dma_chan *chan);
+int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
#else
static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
{
@@ -1093,6 +1148,11 @@ static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
static inline void dma_release_channel(struct dma_chan *chan)
{
}
+static inline int dma_get_slave_caps(struct dma_chan *chan,
+ struct dma_slave_caps *caps)
+{
+ return -ENXIO;
+}
#endif
/* --- DMA device --- */
@@ -1102,7 +1162,6 @@ void dma_async_device_unregister(struct dma_device *device);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
-struct dma_chan *net_dma_find_channel(void);
#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
__dma_request_slave_channel_compat(&(mask), x, y, dev, name)
@@ -1120,27 +1179,4 @@ static inline struct dma_chan
return __dma_request_channel(mask, fn, fn_param);
}
-
-/* --- Helper iov-locking functions --- */
-
-struct dma_page_list {
- char __user *base_address;
- int nr_pages;
- struct page **pages;
-};
-
-struct dma_pinned_list {
- int nr_iovecs;
- struct dma_page_list page_list[0];
-};
-
-struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
-void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
-
-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
- struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
- struct dma_pinned_list *pinned_list, struct page *page,
- unsigned int offset, size_t len);
-
#endif /* DMAENGINE_H */
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index 022e34f..e1043f7 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -11,8 +11,10 @@
#ifndef LINUX_DMAPOOL_H
#define LINUX_DMAPOOL_H
+#include <linux/scatterlist.h>
#include <asm/io.h>
-#include <asm/scatterlist.h>
+
+struct device;
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
size_t size, size_t align, size_t allocation);
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 3062495..e9bc929 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -185,33 +185,85 @@ static inline int dmar_device_remove(void *handle)
struct irte {
union {
+ /* Shared between remapped and posted mode*/
struct {
- __u64 present : 1,
- fpd : 1,
- dst_mode : 1,
- redir_hint : 1,
- trigger_mode : 1,
- dlvry_mode : 3,
- avail : 4,
- __reserved_1 : 4,
- vector : 8,
- __reserved_2 : 8,
- dest_id : 32;
+ __u64 present : 1, /* 0 */
+ fpd : 1, /* 1 */
+ __res0 : 6, /* 2 - 6 */
+ avail : 4, /* 8 - 11 */
+ __res1 : 3, /* 12 - 14 */
+ pst : 1, /* 15 */
+ vector : 8, /* 16 - 23 */
+ __res2 : 40; /* 24 - 63 */
+ };
+
+ /* Remapped mode */
+ struct {
+ __u64 r_present : 1, /* 0 */
+ r_fpd : 1, /* 1 */
+ dst_mode : 1, /* 2 */
+ redir_hint : 1, /* 3 */
+ trigger_mode : 1, /* 4 */
+ dlvry_mode : 3, /* 5 - 7 */
+ r_avail : 4, /* 8 - 11 */
+ r_res0 : 4, /* 12 - 15 */
+ r_vector : 8, /* 16 - 23 */
+ r_res1 : 8, /* 24 - 31 */
+ dest_id : 32; /* 32 - 63 */
+ };
+
+ /* Posted mode */
+ struct {
+ __u64 p_present : 1, /* 0 */
+ p_fpd : 1, /* 1 */
+ p_res0 : 6, /* 2 - 7 */
+ p_avail : 4, /* 8 - 11 */
+ p_res1 : 2, /* 12 - 13 */
+ p_urgent : 1, /* 14 */
+ p_pst : 1, /* 15 */
+ p_vector : 8, /* 16 - 23 */
+ p_res2 : 14, /* 24 - 37 */
+ pda_l : 26; /* 38 - 63 */
};
__u64 low;
};
union {
+ /* Shared between remapped and posted mode*/
struct {
- __u64 sid : 16,
- sq : 2,
- svt : 2,
- __reserved_3 : 44;
+ __u64 sid : 16, /* 64 - 79 */
+ sq : 2, /* 80 - 81 */
+ svt : 2, /* 82 - 83 */
+ __res3 : 44; /* 84 - 127 */
+ };
+
+ /* Posted mode*/
+ struct {
+ __u64 p_sid : 16, /* 64 - 79 */
+ p_sq : 2, /* 80 - 81 */
+ p_svt : 2, /* 82 - 83 */
+ p_res3 : 12, /* 84 - 95 */
+ pda_h : 32; /* 96 - 127 */
};
__u64 high;
};
};
+static inline void dmar_copy_shared_irte(struct irte *dst, struct irte *src)
+{
+ dst->present = src->present;
+ dst->fpd = src->fpd;
+ dst->avail = src->avail;
+ dst->pst = src->pst;
+ dst->vector = src->vector;
+ dst->sid = src->sid;
+ dst->sq = src->sq;
+ dst->svt = src->svt;
+}
+
+#define PDA_LOW_BIT 26
+#define PDA_HIGH_BIT 32
+
enum {
IRQ_REMAP_XAPIC_MODE,
IRQ_REMAP_X2APIC_MODE,
@@ -227,6 +279,7 @@ extern void dmar_msi_read(int irq, struct msi_msg *msg);
extern void dmar_msi_write(int irq, struct msi_msg *msg);
extern int dmar_set_interrupt(struct intel_iommu *iommu);
extern irqreturn_t dmar_fault(int irq, void *dev_id);
-extern int arch_setup_dmar_msi(unsigned int irq);
+extern int dmar_alloc_hwirq(int id, int node, void *arg);
+extern void dmar_free_hwirq(int irq);
#endif /* __DMAR_H__ */
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index f820f0a..5055ac3 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -2,6 +2,7 @@
#define __DMI_H__
#include <linux/list.h>
+#include <linux/kobject.h>
#include <linux/mod_devicetable.h>
/* enum dmi_field is in mod_devicetable.h */
@@ -74,7 +75,7 @@ struct dmi_header {
u8 type;
u8 length;
u16 handle;
-};
+} __packed;
struct dmi_device {
struct list_head list;
@@ -93,6 +94,7 @@ struct dmi_dev_onboard {
int devfn;
};
+extern struct kobject *dmi_kobj;
extern int dmi_check_system(const struct dmi_system_id *list);
const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list);
extern const char * dmi_get_system_info(int field);
diff --git a/include/linux/dqblk_v1.h b/include/linux/dqblk_v1.h
index 3713a72..c0d4d1e 100644
--- a/include/linux/dqblk_v1.h
+++ b/include/linux/dqblk_v1.h
@@ -5,9 +5,6 @@
#ifndef _LINUX_DQBLK_V1_H
#define _LINUX_DQBLK_V1_H
-/* Root squash turned on */
-#define V1_DQF_RSQUASH 1
-
/* Numbers of blocks needed for updates */
#define V1_INIT_ALLOC 1
#define V1_INIT_REWRITE 1
diff --git a/include/linux/efi.h b/include/linux/efi.h
index b674837..85ef051 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -85,7 +85,8 @@ typedef struct {
#define EFI_MEMORY_MAPPED_IO 11
#define EFI_MEMORY_MAPPED_IO_PORT_SPACE 12
#define EFI_PAL_CODE 13
-#define EFI_MAX_MEMORY_TYPE 14
+#define EFI_PERSISTENT_MEMORY 14
+#define EFI_MAX_MEMORY_TYPE 15
/* Attribute values: */
#define EFI_MEMORY_UC ((u64)0x0000000000000001ULL) /* uncached */
@@ -96,6 +97,8 @@ typedef struct {
#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */
#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */
#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */
+#define EFI_MEMORY_MORE_RELIABLE \
+ ((u64)0x0000000000010000ULL) /* higher reliability */
#define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */
#define EFI_MEMORY_DESCRIPTOR_VERSION 1
@@ -583,6 +586,9 @@ void efi_native_runtime_setup(void);
#define EFI_FILE_INFO_ID \
EFI_GUID( 0x9576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b )
+#define EFI_SYSTEM_RESOURCE_TABLE_GUID \
+ EFI_GUID( 0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80 )
+
#define EFI_FILE_SYSTEM_GUID \
EFI_GUID( 0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b )
@@ -823,6 +829,7 @@ extern struct efi {
unsigned long fw_vendor; /* fw_vendor */
unsigned long runtime; /* runtime table */
unsigned long config_table; /* config tables */
+ unsigned long esrt; /* ESRT table */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time;
@@ -864,6 +871,7 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos
extern void efi_late_init(void);
extern void efi_free_boot_services(void);
extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size);
+extern void efi_find_mirror(void);
#else
static inline void efi_late_init(void) {}
static inline void efi_free_boot_services(void) {}
@@ -875,17 +883,27 @@ static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned lon
#endif
extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
extern int efi_config_init(efi_config_table_type_t *arch_tables);
+#ifdef CONFIG_EFI_ESRT
+extern void __init efi_esrt_init(void);
+#else
+static inline void efi_esrt_init(void) { }
+#endif
+extern int efi_config_parse_tables(void *config_tables, int count, int sz,
+ efi_config_table_type_t *arch_tables);
extern u64 efi_get_iobase (void);
extern u32 efi_mem_type (unsigned long phys_addr);
extern u64 efi_mem_attributes (unsigned long phys_addr);
extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
extern int __init efi_uart_console_only (void);
+extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
+extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource);
extern void efi_get_time(struct timespec *now);
extern void efi_reserve_boot_services(void);
extern int efi_get_fdt_params(struct efi_fdt_params *params, int verbose);
extern struct efi_memory_map memmap;
+extern struct kobject *efi_kobj;
extern int efi_reboot_quirk_mode;
extern bool efi_poweroff_required(void);
@@ -940,6 +958,7 @@ extern int __init efi_setup_pcdp_console(char *);
#define EFI_64BIT 5 /* Is the firmware 64-bit? */
#define EFI_PARAVIRT 6 /* Access is via a paravirt interface */
#define EFI_ARCH_1 7 /* First arch-specific bit */
+#define EFI_DBG 8 /* Print additional debug info at runtime */
#ifdef CONFIG_EFI
/*
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 45a9147..638b324 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -39,6 +39,7 @@ typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct reques
typedef int (elevator_init_fn) (struct request_queue *,
struct elevator_type *e);
typedef void (elevator_exit_fn) (struct elevator_queue *);
+typedef void (elevator_registered_fn) (struct request_queue *);
struct elevator_ops
{
@@ -68,6 +69,7 @@ struct elevator_ops
elevator_init_fn *elevator_init_fn;
elevator_exit_fn *elevator_exit_fn;
+ elevator_registered_fn *elevator_registered_fn;
};
#define ELV_NAME_MAX (16)
diff --git a/include/linux/elf-randomize.h b/include/linux/elf-randomize.h
new file mode 100644
index 0000000..b5f0bda
--- /dev/null
+++ b/include/linux/elf-randomize.h
@@ -0,0 +1,22 @@
+#ifndef _ELF_RANDOMIZE_H
+#define _ELF_RANDOMIZE_H
+
+struct mm_struct;
+
+#ifndef CONFIG_ARCH_HAS_ELF_RANDOMIZE
+static inline unsigned long arch_mmap_rnd(void) { return 0; }
+# if defined(arch_randomize_brk) && defined(CONFIG_COMPAT_BRK)
+# define compat_brk_randomized
+# endif
+# ifndef arch_randomize_brk
+# define arch_randomize_brk(mm) (mm->brk)
+# endif
+#else
+extern unsigned long arch_mmap_rnd(void);
+extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+# ifdef CONFIG_COMPAT_BRK
+# define compat_brk_randomized
+# endif
+#endif
+
+#endif
diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
index 9a33c5f..7be22da 100644
--- a/include/linux/enclosure.h
+++ b/include/linux/enclosure.h
@@ -79,6 +79,12 @@ struct enclosure_component_callbacks {
int (*set_locate)(struct enclosure_device *,
struct enclosure_component *,
enum enclosure_component_setting);
+ void (*get_power_status)(struct enclosure_device *,
+ struct enclosure_component *);
+ int (*set_power_status)(struct enclosure_device *,
+ struct enclosure_component *,
+ int);
+ int (*show_id)(struct enclosure_device *, char *buf);
};
@@ -91,7 +97,9 @@ struct enclosure_component {
int fault;
int active;
int locate;
+ int slot;
enum enclosure_status status;
+ int power_status;
};
struct enclosure_device {
@@ -120,8 +128,9 @@ enclosure_register(struct device *, const char *, int,
struct enclosure_component_callbacks *);
void enclosure_unregister(struct enclosure_device *);
struct enclosure_component *
-enclosure_component_register(struct enclosure_device *, unsigned int,
- enum enclosure_component_type, const char *);
+enclosure_component_alloc(struct enclosure_device *, unsigned int,
+ enum enclosure_component_type, const char *);
+int enclosure_component_register(struct enclosure_component *);
int enclosure_add_device(struct enclosure_device *enclosure, int component,
struct device *dev);
int enclosure_remove_device(struct enclosure_device *, struct device *);
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 41c891d..9012f87 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -35,7 +35,6 @@ extern const struct header_ops eth_header_ops;
int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
const void *daddr, const void *saddr, unsigned len);
-int eth_rebuild_header(struct sk_buff *skb);
int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
__be16 type);
@@ -52,6 +51,10 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
+struct sk_buff **eth_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb);
+int eth_gro_complete(struct sk_buff *skb, int nhoff);
+
/* Reserved Ethernet Addresses per IEEE 802.1Q */
static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
@@ -107,7 +110,29 @@ static inline bool is_zero_ether_addr(const u8 *addr)
*/
static inline bool is_multicast_ether_addr(const u8 *addr)
{
- return 0x01 & addr[0];
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ u32 a = *(const u32 *)addr;
+#else
+ u16 a = *(const u16 *)addr;
+#endif
+#ifdef __BIG_ENDIAN
+ return 0x01 & (a >> ((sizeof(a) * 8) - 8));
+#else
+ return 0x01 & a;
+#endif
+}
+
+static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+#ifdef __BIG_ENDIAN
+ return 0x01 & ((*(const u64 *)addr) >> 56);
+#else
+ return 0x01 & (*(const u64 *)addr);
+#endif
+#else
+ return is_multicast_ether_addr(addr);
+#endif
}
/**
@@ -166,6 +191,24 @@ static inline bool is_valid_ether_addr(const u8 *addr)
}
/**
+ * eth_proto_is_802_3 - Determine if a given Ethertype/length is a protocol
+ * @proto: Ethertype/length value to be tested
+ *
+ * Check that the value from the Ethertype/length field is a valid Ethertype.
+ *
+ * Return true if the valid is an 802.3 supported Ethertype.
+ */
+static inline bool eth_proto_is_802_3(__be16 proto)
+{
+#ifndef __BIG_ENDIAN
+ /* if CPU is little endian mask off bits representing LSB */
+ proto &= htons(0xFF00);
+#endif
+ /* cast both to u16 and compare since LSB can be ignored */
+ return (__force u16)proto >= (__force u16)htons(ETH_P_802_3_MIN);
+}
+
+/**
* eth_random_addr - Generate software assigned random Ethernet address
* @addr: Pointer to a six-byte array containing the Ethernet address
*
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 41b223a..fa05e04 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
struct dentry;
+struct iattr;
struct inode;
struct super_block;
struct vfsmount;
@@ -180,6 +181,21 @@ struct fid {
* get_name is not (which is possibly inconsistent)
*/
+/* types of block ranges for multipage write mappings. */
+#define IOMAP_HOLE 0x01 /* no blocks allocated, need allocation */
+#define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */
+#define IOMAP_MAPPED 0x03 /* blocks allocated @blkno */
+#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */
+
+#define IOMAP_NULL_BLOCK -1LL /* blkno is not valid */
+
+struct iomap {
+ sector_t blkno; /* first sector of mapping */
+ loff_t offset; /* file offset of mapping, bytes */
+ u64 length; /* length of mapping, bytes */
+ int type; /* type of mapping */
+};
+
struct export_operations {
int (*encode_fh)(struct inode *inode, __u32 *fh, int *max_len,
struct inode *parent);
@@ -191,6 +207,13 @@ struct export_operations {
struct dentry *child);
struct dentry * (*get_parent)(struct dentry *child);
int (*commit_metadata)(struct inode *inode);
+
+ int (*get_uuid)(struct super_block *sb, u8 *buf, u32 *len, u64 *offset);
+ int (*map_blocks)(struct inode *inode, loff_t offset,
+ u64 len, struct iomap *iomap,
+ bool write, u32 *device_generation);
+ int (*commit_blocks)(struct inode *inode, struct iomap *iomaps,
+ int nr_iomaps, struct iattr *iattr);
};
extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid,
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 36f49c4..b16d929 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -1,6 +1,9 @@
/*
* External connector (extcon) class driver
*
+ * Copyright (C) 2015 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
* Copyright (C) 2012 Samsung Electronics
* Author: Donggeun Kim <dg77.kim@samsung.com>
* Author: MyungJoo Ham <myungjoo.ham@samsung.com>
@@ -27,50 +30,35 @@
#include <linux/notifier.h>
#include <linux/sysfs.h>
-#define SUPPORTED_CABLE_MAX 32
-#define CABLE_NAME_MAX 30
-
/*
- * The standard cable name is to help support general notifier
- * and notifiee device drivers to share the common names.
- * Please use standard cable names unless your notifier device has
- * a very unique and abnormal cable or
- * the cable type is supposed to be used with only one unique
- * pair of notifier/notifiee devices.
- *
- * Please add any other "standard" cables used with extcon dev.
- *
- * You may add a dot and number to specify version or specification
- * of the specific cable if it is required. (e.g., "Fast-charger.18"
- * and "Fast-charger.10" for 1.8A and 1.0A chargers)
- * However, the notifiee and notifier should be able to handle such
- * string and if the notifiee can negotiate the protocol or identify,
- * you don't need such convention. This convention is helpful when
- * notifier can distinguish but notifiee cannot.
+ * Define the unique id of supported external connectors
*/
-enum extcon_cable_name {
- EXTCON_USB = 0,
- EXTCON_USB_HOST,
- EXTCON_TA, /* Travel Adaptor */
- EXTCON_FAST_CHARGER,
- EXTCON_SLOW_CHARGER,
- EXTCON_CHARGE_DOWNSTREAM, /* Charging an external device */
- EXTCON_HDMI,
- EXTCON_MHL,
- EXTCON_DVI,
- EXTCON_VGA,
- EXTCON_DOCK,
- EXTCON_LINE_IN,
- EXTCON_LINE_OUT,
- EXTCON_MIC_IN,
- EXTCON_HEADPHONE_OUT,
- EXTCON_SPDIF_IN,
- EXTCON_SPDIF_OUT,
- EXTCON_VIDEO_IN,
- EXTCON_VIDEO_OUT,
- EXTCON_MECHANICAL,
-};
-extern const char extcon_cable_name[][CABLE_NAME_MAX + 1];
+#define EXTCON_NONE 0
+
+#define EXTCON_USB 1 /* USB connector */
+#define EXTCON_USB_HOST 2
+
+#define EXTCON_TA 3 /* Charger connector */
+#define EXTCON_FAST_CHARGER 4
+#define EXTCON_SLOW_CHARGER 5
+#define EXTCON_CHARGE_DOWNSTREAM 6
+
+#define EXTCON_LINE_IN 7 /* Audio/Video connector */
+#define EXTCON_LINE_OUT 8
+#define EXTCON_MICROPHONE 9
+#define EXTCON_HEADPHONE 10
+#define EXTCON_HDMI 11
+#define EXTCON_MHL 12
+#define EXTCON_DVI 13
+#define EXTCON_VGA 14
+#define EXTCON_SPDIF_IN 15
+#define EXTCON_SPDIF_OUT 16
+#define EXTCON_VIDEO_IN 17
+#define EXTCON_VIDEO_OUT 18
+
+#define EXTCON_DOCK 19 /* Misc connector */
+#define EXTCON_JIG 20
+#define EXTCON_MECHANICAL 21
struct extcon_cable;
@@ -78,7 +66,7 @@ struct extcon_cable;
* struct extcon_dev - An extcon device represents one external connector.
* @name: The name of this extcon device. Parent device name is
* used if NULL.
- * @supported_cable: Array of supported cable names ending with NULL.
+ * @supported_cable: Array of supported cable names ending with EXTCON_NONE.
* If supported_cable is NULL, cable name related APIs
* are disabled.
* @mutually_exclusive: Array of mutually exclusive set of cables that cannot
@@ -89,16 +77,14 @@ struct extcon_cable;
* be attached simulataneously. {0x7, 0} is equivalent to
* {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
* can be no simultaneous connections.
- * @print_name: An optional callback to override the method to print the
- * name of the extcon device.
* @print_state: An optional callback to override the method to print the
* status of the extcon device.
* @dev: Device of this extcon.
* @state: Attach/detach state of this extcon. Do not provide at
* register-time.
* @nh: Notifier for the state change events from this extcon
- * @entry: To support list of extcon devices so that users can search
- * for extcon devices based on the extcon name.
+ * @entry: To support list of extcon devices so that users can
+ * search for extcon devices based on the extcon name.
* @lock:
* @max_supported: Internal value to store the number of cables.
* @extcon_dev_type: Device_type struct to provide attribute_groups
@@ -113,16 +99,15 @@ struct extcon_cable;
struct extcon_dev {
/* Optional user initializing data */
const char *name;
- const char **supported_cable;
+ const unsigned int *supported_cable;
const u32 *mutually_exclusive;
/* Optional callbacks to override class functions */
- ssize_t (*print_name)(struct extcon_dev *edev, char *buf);
ssize_t (*print_state)(struct extcon_dev *edev, char *buf);
/* Internal data. Please do not set. */
struct device dev;
- struct raw_notifier_head nh;
+ struct raw_notifier_head *nh;
struct list_head entry;
int max_supported;
spinlock_t lock; /* could be called by irq handler */
@@ -161,8 +146,6 @@ struct extcon_cable {
/**
* struct extcon_specific_cable_nb - An internal data for
* extcon_register_interest().
- * @internal_nb: A notifier block bridging extcon notifier
- * and cable notifier.
* @user_nb: user provided notifier block for events from
* a specific cable.
* @cable_index: the target cable.
@@ -170,7 +153,6 @@ struct extcon_cable {
* @previous_value: the saved previous event value.
*/
struct extcon_specific_cable_nb {
- struct notifier_block internal_nb;
struct notifier_block *user_nb;
int cable_index;
struct extcon_dev *edev;
@@ -194,10 +176,10 @@ extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
/*
* Following APIs control the memory of extcon device.
*/
-extern struct extcon_dev *extcon_dev_allocate(const char **cables);
+extern struct extcon_dev *extcon_dev_allocate(const unsigned int *cable);
extern void extcon_dev_free(struct extcon_dev *edev);
extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
- const char **cables);
+ const unsigned int *cable);
extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
/*
@@ -216,13 +198,10 @@ extern int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state);
/*
* get/set_cable_state access each bit of the 32b encoded state value.
- * They are used to access the status of each cable based on the cable_name
- * or cable_index, which is retrieved by extcon_find_cable_index
+ * They are used to access the status of each cable based on the cable_name.
*/
-extern int extcon_find_cable_index(struct extcon_dev *sdev,
- const char *cable_name);
-extern int extcon_get_cable_state_(struct extcon_dev *edev, int cable_index);
-extern int extcon_set_cable_state_(struct extcon_dev *edev, int cable_index,
+extern int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id);
+extern int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
bool cable_state);
extern int extcon_get_cable_state(struct extcon_dev *edev,
@@ -249,16 +228,21 @@ extern int extcon_unregister_interest(struct extcon_specific_cable_nb *nb);
* we do not recommend to use this for normal 'notifiee' device drivers who
* want to be notified by a specific external port of the notifier.
*/
-extern int extcon_register_notifier(struct extcon_dev *edev,
+extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
+ struct notifier_block *nb);
+extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
-extern int extcon_unregister_notifier(struct extcon_dev *edev,
- struct notifier_block *nb);
/*
* Following API get the extcon device from devicetree.
* This function use phandle of devicetree to get extcon device directly.
*/
-extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index);
+extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
+ int index);
+
+/* Following API to get information of extcon device */
+extern const char *extcon_get_edev_name(struct extcon_dev *edev);
+
#else /* CONFIG_EXTCON */
static inline int extcon_dev_register(struct extcon_dev *edev)
{
@@ -276,7 +260,7 @@ static inline int devm_extcon_dev_register(struct device *dev,
static inline void devm_extcon_dev_unregister(struct device *dev,
struct extcon_dev *edev) { }
-static inline struct extcon_dev *extcon_dev_allocate(const char **cables)
+static inline struct extcon_dev *extcon_dev_allocate(const unsigned int *cable)
{
return ERR_PTR(-ENOSYS);
}
@@ -284,7 +268,7 @@ static inline struct extcon_dev *extcon_dev_allocate(const char **cables)
static inline void extcon_dev_free(struct extcon_dev *edev) { }
static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
- const char **cables)
+ const unsigned int *cable)
{
return ERR_PTR(-ENOSYS);
}
@@ -307,20 +291,14 @@ static inline int extcon_update_state(struct extcon_dev *edev, u32 mask,
return 0;
}
-static inline int extcon_find_cable_index(struct extcon_dev *edev,
- const char *cable_name)
-{
- return 0;
-}
-
static inline int extcon_get_cable_state_(struct extcon_dev *edev,
- int cable_index)
+ unsigned int id)
{
return 0;
}
static inline int extcon_set_cable_state_(struct extcon_dev *edev,
- int cable_index, bool cable_state)
+ unsigned int id, bool cable_state)
{
return 0;
}
@@ -343,13 +321,15 @@ static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
}
static inline int extcon_register_notifier(struct extcon_dev *edev,
- struct notifier_block *nb)
+ unsigned int id,
+ struct notifier_block *nb)
{
return 0;
}
static inline int extcon_unregister_notifier(struct extcon_dev *edev,
- struct notifier_block *nb)
+ unsigned int id,
+ struct notifier_block *nb)
{
return 0;
}
diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h
index 9ca958c..53c6080 100644
--- a/include/linux/extcon/extcon-adc-jack.h
+++ b/include/linux/extcon/extcon-adc-jack.h
@@ -44,7 +44,7 @@ struct adc_jack_cond {
* @consumer_channel: Unique name to identify the channel on the consumer
* side. This typically describes the channels used within
* the consumer. E.g. 'battery_voltage'
- * @cable_names: array of cable names ending with null.
+ * @cable_names: array of extcon id for supported cables.
* @adc_contitions: array of struct adc_jack_cond conditions ending
* with .state = 0 entry. This describes how to decode
* adc values into extcon state.
@@ -58,8 +58,7 @@ struct adc_jack_pdata {
const char *name;
const char *consumer_channel;
- /* The last entry should be NULL */
- const char **cable_names;
+ const enum extcon *cable_names;
/* The last entry's state should be 0 */
struct adc_jack_cond *adc_conditions;
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 87f14e9..920408a 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -19,12 +19,16 @@
#define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */
#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */
#define F2FS_BLKSIZE 4096 /* support only 4KB block */
+#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE)
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
+#define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS)
+#define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS)
+
/* 0, 1(node nid), 2(meta nid) are reserved node id */
#define F2FS_RESERVED_NODE_NUM 3
@@ -46,6 +50,8 @@
#define MAX_ACTIVE_NODE_LOGS 8
#define MAX_ACTIVE_DATA_LOGS 8
+#define VERSION_LEN 256
+
/*
* For superblock
*/
@@ -82,11 +88,18 @@ struct f2fs_super_block {
__le32 extension_count; /* # of extensions below */
__u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */
__le32 cp_payload;
+ __u8 version[VERSION_LEN]; /* the kernel version */
+ __u8 init_version[VERSION_LEN]; /* the initial kernel version */
+ __le32 feature; /* defined features */
+ __u8 encryption_level; /* versioning level for encryption */
+ __u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
+ __u8 reserved[871]; /* valid reserved region */
} __packed;
/*
* For checkpoint
*/
+#define CP_FASTBOOT_FLAG 0x00000020
#define CP_FSCK_FLAG 0x00000010
#define CP_ERROR_FLAG 0x00000008
#define CP_COMPACT_SUM_FLAG 0x00000004
@@ -148,7 +161,7 @@ struct f2fs_orphan_block {
*/
struct f2fs_extent {
__le32 fofs; /* start file offset of the extent */
- __le32 blk_addr; /* start block address of the extent */
+ __le32 blk; /* start block address of the extent */
__le32 len; /* lengh of the extent */
} __packed;
@@ -173,6 +186,7 @@ struct f2fs_extent {
#define F2FS_INLINE_DATA 0x02 /* file inline data flag */
#define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */
#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
+#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
#define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \
F2FS_INLINE_XATTR_ADDRS - 1))
@@ -224,6 +238,8 @@ enum {
OFFSET_BIT_SHIFT
};
+#define OFFSET_BIT_MASK (0x07) /* (0x01 << OFFSET_BIT_SHIFT) - 1 */
+
struct node_footer {
__le32 nid; /* node id */
__le32 ino; /* inode nunmber */
diff --git a/include/linux/falloc.h b/include/linux/falloc.h
index 3159168..9961110 100644
--- a/include/linux/falloc.h
+++ b/include/linux/falloc.h
@@ -21,4 +21,10 @@ struct space_resv {
#define FS_IOC_RESVSP _IOW('X', 40, struct space_resv)
#define FS_IOC_RESVSP64 _IOW('X', 42, struct space_resv)
+#define FALLOC_FL_SUPPORTED_MASK (FALLOC_FL_KEEP_SIZE | \
+ FALLOC_FL_PUNCH_HOLE | \
+ FALLOC_FL_COLLAPSE_RANGE | \
+ FALLOC_FL_ZERO_RANGE | \
+ FALLOC_FL_INSERT_RANGE)
+
#endif /* _FALLOC_H_ */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 09bb7a1..043f328 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -726,7 +726,9 @@ extern int fb_videomode_from_videomode(const struct videomode *vm,
struct fb_videomode *fbmode);
/* drivers/video/modedb.c */
-#define VESA_MODEDB_SIZE 34
+#define VESA_MODEDB_SIZE 43
+#define DMT_SIZE 0x50
+
extern void fb_var_to_videomode(struct fb_videomode *mode,
const struct fb_var_screeninfo *var);
extern void fb_videomode_to_var(struct fb_var_screeninfo *var,
@@ -777,9 +779,17 @@ struct fb_videomode {
u32 flag;
};
+struct dmt_videomode {
+ u32 dmt_id;
+ u32 std_2byte_code;
+ u32 cvt_3byte_code;
+ const struct fb_videomode *mode;
+};
+
extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
extern const struct fb_videomode cea_modes[64];
+extern const struct dmt_videomode dmt_modes[];
struct fb_modelist {
struct list_head list;
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 230f87b..fbb8874 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -47,6 +47,9 @@ struct files_struct {
* read mostly part
*/
atomic_t count;
+ bool resize_in_progress;
+ wait_queue_head_t resize_wait;
+
struct fdtable __rcu *fdt;
struct fdtable fdtab;
/*
diff --git a/include/linux/fec.h b/include/linux/fec.h
index bcff455..1454a50 100644
--- a/include/linux/fec.h
+++ b/include/linux/fec.h
@@ -19,6 +19,7 @@
struct fec_platform_data {
phy_interface_t phy;
unsigned char mac[ETH_ALEN];
+ void (*sleep_mode_enable)(int enabled);
};
#endif
diff --git a/include/linux/filter.h b/include/linux/filter.h
index caac208..17724f6 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -145,8 +145,6 @@ struct bpf_prog_aux;
.off = 0, \
.imm = ((__u64) (IMM)) >> 32 })
-#define BPF_PSEUDO_MAP_FD 1
-
/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
#define BPF_LD_MAP_FD(DST, MAP_FD) \
BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
@@ -209,6 +207,16 @@ struct bpf_prog_aux;
.off = OFF, \
.imm = 0 })
+/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
+
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
@@ -269,6 +277,14 @@ struct bpf_prog_aux;
.off = 0, \
.imm = 0 })
+/* Internal classic blocks for direct assignment */
+
+#define __BPF_STMT(CODE, K) \
+ ((struct sock_filter) BPF_STMT(CODE, K))
+
+#define __BPF_JUMP(CODE, K, JT, JF) \
+ ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF))
+
#define bytes_to_bpf_size(bytes) \
({ \
int bpf_size = -EINVAL; \
@@ -310,9 +326,11 @@ struct bpf_binary_header {
struct bpf_prog {
u16 pages; /* Number of allocated pages */
bool jited; /* Is our filter JIT'ed? */
+ bool gpl_compatible; /* Is our filter GPL compatible? */
u32 len; /* Number of filter blocks */
- struct sock_fprog_kern *orig_prog; /* Original BPF program */
+ enum bpf_prog_type type; /* Type of BPF program */
struct bpf_prog_aux *aux; /* Auxiliary fields */
+ struct sock_fprog_kern *orig_prog; /* Original BPF program */
unsigned int (*bpf_func)(const struct sk_buff *skb,
const struct bpf_insn *filter);
/* Instructions for interpreter */
@@ -360,12 +378,9 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
int sk_filter(struct sock *sk, struct sk_buff *skb);
-void bpf_prog_select_runtime(struct bpf_prog *fp);
+int bpf_prog_select_runtime(struct bpf_prog *fp);
void bpf_prog_free(struct bpf_prog *fp);
-int bpf_convert_filter(struct sock_filter *prog, int len,
- struct bpf_insn *new_prog, int *new_len);
-
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_extra_flags);
@@ -377,14 +392,17 @@ static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
__bpf_prog_free(fp);
}
+typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter,
+ unsigned int flen);
+
int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
+int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
+ bpf_aux_classic_check_t trans);
void bpf_prog_destroy(struct bpf_prog *fp);
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_attach_bpf(u32 ufd, struct sock *sk);
int sk_detach_filter(struct sock *sk);
-
-int bpf_check_classic(const struct sock_filter *filter, unsigned int flen);
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
unsigned int len);
@@ -454,6 +472,7 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
BPF_ANCILLARY(VLAN_TAG_PRESENT);
BPF_ANCILLARY(PAY_OFFSET);
BPF_ANCILLARY(RANDOM);
+ BPF_ANCILLARY(VLAN_TPID);
}
/* Fallthrough. */
default:
diff --git a/include/linux/fixp-arith.h b/include/linux/fixp-arith.h
index 3089d73..d4686fe 100644
--- a/include/linux/fixp-arith.h
+++ b/include/linux/fixp-arith.h
@@ -1,6 +1,8 @@
#ifndef _FIXP_ARITH_H
#define _FIXP_ARITH_H
+#include <linux/math64.h>
+
/*
* Simplistic fixed-point arithmetics.
* Hmm, I'm probably duplicating some code :(
@@ -29,59 +31,126 @@
#include <linux/types.h>
-/* The type representing fixed-point values */
-typedef s16 fixp_t;
+static const s32 sin_table[] = {
+ 0x00000000, 0x023be165, 0x04779632, 0x06b2f1d2, 0x08edc7b6, 0x0b27eb5c,
+ 0x0d61304d, 0x0f996a26, 0x11d06c96, 0x14060b67, 0x163a1a7d, 0x186c6ddd,
+ 0x1a9cd9ac, 0x1ccb3236, 0x1ef74bf2, 0x2120fb82, 0x234815ba, 0x256c6f9e,
+ 0x278dde6e, 0x29ac379f, 0x2bc750e8, 0x2ddf003f, 0x2ff31bdd, 0x32037a44,
+ 0x340ff241, 0x36185aee, 0x381c8bb5, 0x3a1c5c56, 0x3c17a4e7, 0x3e0e3ddb,
+ 0x3fffffff, 0x41ecc483, 0x43d464fa, 0x45b6bb5d, 0x4793a20f, 0x496af3e1,
+ 0x4b3c8c11, 0x4d084650, 0x4ecdfec6, 0x508d9210, 0x5246dd48, 0x53f9be04,
+ 0x55a6125a, 0x574bb8e5, 0x58ea90c2, 0x5a827999, 0x5c135399, 0x5d9cff82,
+ 0x5f1f5ea0, 0x609a52d1, 0x620dbe8a, 0x637984d3, 0x64dd894f, 0x6639b039,
+ 0x678dde6d, 0x68d9f963, 0x6a1de735, 0x6b598ea1, 0x6c8cd70a, 0x6db7a879,
+ 0x6ed9eba0, 0x6ff389de, 0x71046d3c, 0x720c8074, 0x730baeec, 0x7401e4bf,
+ 0x74ef0ebb, 0x75d31a5f, 0x76adf5e5, 0x777f903b, 0x7847d908, 0x7906c0af,
+ 0x79bc384c, 0x7a6831b8, 0x7b0a9f8c, 0x7ba3751c, 0x7c32a67c, 0x7cb82884,
+ 0x7d33f0c8, 0x7da5f5a3, 0x7e0e2e31, 0x7e6c924f, 0x7ec11aa3, 0x7f0bc095,
+ 0x7f4c7e52, 0x7f834ecf, 0x7fb02dc4, 0x7fd317b3, 0x7fec09e1, 0x7ffb025e,
+ 0x7fffffff
+};
-#define FRAC_N 8
-#define FRAC_MASK ((1<<FRAC_N)-1)
+/**
+ * __fixp_sin32() returns the sin of an angle in degrees
+ *
+ * @degrees: angle, in degrees, from 0 to 360.
+ *
+ * The returned value ranges from -0x7fffffff to +0x7fffffff.
+ */
+static inline s32 __fixp_sin32(int degrees)
+{
+ s32 ret;
+ bool negative = false;
-/* Not to be used directly. Use fixp_{cos,sin} */
-static const fixp_t cos_table[46] = {
- 0x0100, 0x00FF, 0x00FF, 0x00FE, 0x00FD, 0x00FC, 0x00FA, 0x00F8,
- 0x00F6, 0x00F3, 0x00F0, 0x00ED, 0x00E9, 0x00E6, 0x00E2, 0x00DD,
- 0x00D9, 0x00D4, 0x00CF, 0x00C9, 0x00C4, 0x00BE, 0x00B8, 0x00B1,
- 0x00AB, 0x00A4, 0x009D, 0x0096, 0x008F, 0x0087, 0x0080, 0x0078,
- 0x0070, 0x0068, 0x005F, 0x0057, 0x004F, 0x0046, 0x003D, 0x0035,
- 0x002C, 0x0023, 0x001A, 0x0011, 0x0008, 0x0000
-};
+ if (degrees > 180) {
+ negative = true;
+ degrees -= 180;
+ }
+ if (degrees > 90)
+ degrees = 180 - degrees;
+ ret = sin_table[degrees];
-/* a: 123 -> 123.0 */
-static inline fixp_t fixp_new(s16 a)
-{
- return a<<FRAC_N;
+ return negative ? -ret : ret;
}
-/* a: 0xFFFF -> -1.0
- 0x8000 -> 1.0
- 0x0000 -> 0.0
-*/
-static inline fixp_t fixp_new16(s16 a)
+/**
+ * fixp_sin32() returns the sin of an angle in degrees
+ *
+ * @degrees: angle, in degrees. The angle can be positive or negative
+ *
+ * The returned value ranges from -0x7fffffff to +0x7fffffff.
+ */
+static inline s32 fixp_sin32(int degrees)
{
- return ((s32)a)>>(16-FRAC_N);
+ degrees = (degrees % 360 + 360) % 360;
+
+ return __fixp_sin32(degrees);
}
-static inline fixp_t fixp_cos(unsigned int degrees)
+/* cos(x) = sin(x + 90 degrees) */
+#define fixp_cos32(v) fixp_sin32((v) + 90)
+
+/*
+ * 16 bits variants
+ *
+ * The returned value ranges from -0x7fff to 0x7fff
+ */
+
+#define fixp_sin16(v) (fixp_sin32(v) >> 16)
+#define fixp_cos16(v) (fixp_cos32(v) >> 16)
+
+/**
+ * fixp_sin32_rad() - calculates the sin of an angle in radians
+ *
+ * @radians: angle, in radians
+ * @twopi: value to be used for 2*pi
+ *
+ * Provides a variant for the cases where just 360
+ * values is not enough. This function uses linear
+ * interpolation to a wider range of values given by
+ * twopi var.
+ *
+ * Experimental tests gave a maximum difference of
+ * 0.000038 between the value calculated by sin() and
+ * the one produced by this function, when twopi is
+ * equal to 360000. That seems to be enough precision
+ * for practical purposes.
+ *
+ * Please notice that two high numbers for twopi could cause
+ * overflows, so the routine will not allow values of twopi
+ * bigger than 1^18.
+ */
+static inline s32 fixp_sin32_rad(u32 radians, u32 twopi)
{
- int quadrant = (degrees / 90) & 3;
- unsigned int i = degrees % 90;
+ int degrees;
+ s32 v1, v2, dx, dy;
+ s64 tmp;
- if (quadrant == 1 || quadrant == 3)
- i = 90 - i;
+ /*
+ * Avoid too large values for twopi, as we don't want overflows.
+ */
+ BUG_ON(twopi > 1 << 18);
- i >>= 1;
+ degrees = (radians * 360) / twopi;
+ tmp = radians - (degrees * twopi) / 360;
- return (quadrant == 1 || quadrant == 2)? -cos_table[i] : cos_table[i];
-}
+ degrees = (degrees % 360 + 360) % 360;
+ v1 = __fixp_sin32(degrees);
-static inline fixp_t fixp_sin(unsigned int degrees)
-{
- return -fixp_cos(degrees + 90);
-}
+ v2 = fixp_sin32(degrees + 1);
-static inline fixp_t fixp_mult(fixp_t a, fixp_t b)
-{
- return ((s32)(a*b))>>FRAC_N;
+ dx = twopi / 360;
+ dy = v2 - v1;
+
+ tmp *= dy;
+
+ return v1 + div_s64(tmp, dx);
}
+/* cos(x) = sin(x + pi/2 radians) */
+
+#define fixp_cos32_rad(rad, twopi) \
+ fixp_sin32_rad(rad + twopi / 4, twopi)
+
#endif
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
index 8293262..e65ef95 100644
--- a/include/linux/frontswap.h
+++ b/include/linux/frontswap.h
@@ -6,16 +6,16 @@
#include <linux/bitops.h>
struct frontswap_ops {
- void (*init)(unsigned);
- int (*store)(unsigned, pgoff_t, struct page *);
- int (*load)(unsigned, pgoff_t, struct page *);
- void (*invalidate_page)(unsigned, pgoff_t);
- void (*invalidate_area)(unsigned);
+ void (*init)(unsigned); /* this swap type was just swapon'ed */
+ int (*store)(unsigned, pgoff_t, struct page *); /* store a page */
+ int (*load)(unsigned, pgoff_t, struct page *); /* load a page */
+ void (*invalidate_page)(unsigned, pgoff_t); /* page no longer needed */
+ void (*invalidate_area)(unsigned); /* swap type just swapoff'ed */
+ struct frontswap_ops *next; /* private pointer to next ops */
};
extern bool frontswap_enabled;
-extern struct frontswap_ops *
- frontswap_register_ops(struct frontswap_ops *ops);
+extern void frontswap_register_ops(struct frontswap_ops *ops);
extern void frontswap_shrink(unsigned long);
extern unsigned long frontswap_curr_pages(void);
extern void frontswap_writethrough(bool);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 42efe13..cc008c3 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -34,10 +34,11 @@
#include <asm/byteorder.h>
#include <uapi/linux/fs.h>
+struct backing_dev_info;
+struct bdi_writeback;
struct export_operations;
struct hd_geometry;
struct iovec;
-struct nameidata;
struct kiocb;
struct kobject;
struct pipe_inode_info;
@@ -50,6 +51,7 @@ struct swap_info_struct;
struct seq_file;
struct workqueue_struct;
struct iov_iter;
+struct vm_fault;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
@@ -68,6 +70,7 @@ typedef int (get_block_t)(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
ssize_t bytes, void *private);
+typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate);
#define MAY_EXEC 0x00000001
#define MAY_WRITE 0x00000002
@@ -312,6 +315,33 @@ struct page;
struct address_space;
struct writeback_control;
+#define IOCB_EVENTFD (1 << 0)
+#define IOCB_APPEND (1 << 1)
+#define IOCB_DIRECT (1 << 2)
+
+struct kiocb {
+ struct file *ki_filp;
+ loff_t ki_pos;
+ void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
+ void *private;
+ int ki_flags;
+};
+
+static inline bool is_sync_kiocb(struct kiocb *kiocb)
+{
+ return kiocb->ki_complete == NULL;
+}
+
+static inline int iocb_flags(struct file *file);
+
+static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
+{
+ *kiocb = (struct kiocb) {
+ .ki_filp = filp,
+ .ki_flags = iocb_flags(filp),
+ };
+}
+
/*
* "descriptor" for what we're up to with a read.
* This allows us to use the same read code yet
@@ -359,9 +389,7 @@ struct address_space_operations {
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
int (*releasepage) (struct page *, gfp_t);
void (*freepage)(struct page *);
- ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
- void **, unsigned long *);
+ ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
/*
* migrate the contents of a page to the specified target. If
* migrate_mode is MIGRATE_ASYNC, it must not block.
@@ -394,14 +422,12 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
-struct backing_dev_info;
struct address_space {
struct inode *host; /* owner: inode, block_device */
struct radix_tree_root page_tree; /* radix tree of all pages */
spinlock_t tree_lock; /* and lock protecting it */
atomic_t i_mmap_writable;/* count VM_SHARED mappings */
struct rb_root i_mmap; /* tree of private and shared mappings */
- struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */
/* Protected by tree_lock together with the radix tree */
unsigned long nrpages; /* number of total pages */
@@ -409,7 +435,6 @@ struct address_space {
pgoff_t writeback_index;/* writeback starts here */
const struct address_space_operations *a_ops; /* methods */
unsigned long flags; /* error bits/gfp mask */
- struct backing_dev_info *backing_dev_info; /* device readahead, etc */
spinlock_t private_lock; /* for use by the address_space */
struct list_head private_list; /* ditto */
void *private_data; /* ditto */
@@ -493,8 +518,7 @@ static inline void i_mmap_unlock_read(struct address_space *mapping)
*/
static inline int mapping_mapped(struct address_space *mapping)
{
- return !RB_EMPTY_ROOT(&mapping->i_mmap) ||
- !list_empty(&mapping->i_mmap_nonlinear);
+ return !RB_EMPTY_ROOT(&mapping->i_mmap);
}
/*
@@ -608,9 +632,18 @@ struct inode {
struct mutex i_mutex;
unsigned long dirtied_when; /* jiffies of first dirtying */
+ unsigned long dirtied_time_when;
struct hlist_node i_hash;
struct list_head i_wb_list; /* backing dev IO list */
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct bdi_writeback *i_wb; /* the associated cgroup wb */
+
+ /* foreign inode detection, see wbc_detach_inode() */
+ int i_wb_frn_winner;
+ u16 i_wb_frn_avg_time;
+ u16 i_wb_frn_history;
+#endif
struct list_head i_lru; /* inode LRU list */
struct list_head i_sb_list;
union {
@@ -625,13 +658,14 @@ struct inode {
atomic_t i_readcount; /* struct files open RO */
#endif
const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
- struct file_lock *i_flock;
+ struct file_lock_context *i_flctx;
struct address_space i_data;
struct list_head i_devices;
union {
struct pipe_inode_info *i_pipe;
struct block_device *i_bdev;
struct cdev *i_cdev;
+ char *i_link;
};
__u32 i_generation;
@@ -851,6 +885,7 @@ static inline struct file *get_file(struct file *f)
atomic_long_inc(&f->f_count);
return f;
}
+#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count)
#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
#define file_count(x) atomic_long_read(&(x)->f_count)
@@ -875,6 +910,7 @@ static inline struct file *get_file(struct file *f)
#define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */
#define FL_UNLOCK_PENDING 512 /* Lease is being broken */
#define FL_OFDLCK 1024 /* lock is "owned" by struct file */
+#define FL_LAYOUT 2048 /* outstanding pNFS layout */
/*
* Special return value from posix_lock_file() and vfs_lock_file() for
@@ -885,6 +921,8 @@ static inline struct file *get_file(struct file *f)
/* legacy typedef, should eventually be removed */
typedef void *fl_owner_t;
+struct file_lock;
+
struct file_lock_operations {
void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
void (*fl_release_private)(struct file_lock *);
@@ -893,12 +931,12 @@ struct file_lock_operations {
struct lock_manager_operations {
int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
unsigned long (*lm_owner_key)(struct file_lock *);
- void (*lm_get_owner)(struct file_lock *, struct file_lock *);
- void (*lm_put_owner)(struct file_lock *);
+ fl_owner_t (*lm_get_owner)(fl_owner_t);
+ void (*lm_put_owner)(fl_owner_t);
void (*lm_notify)(struct file_lock *); /* unblock callback */
int (*lm_grant)(struct file_lock *, int);
bool (*lm_break)(struct file_lock *);
- int (*lm_change)(struct file_lock **, int, struct list_head *);
+ int (*lm_change)(struct file_lock *, int, struct list_head *);
void (*lm_setup)(struct file_lock *, void **);
};
@@ -923,17 +961,17 @@ int locks_in_grace(struct net *);
* FIXME: should we create a separate "struct lock_request" to help distinguish
* these two uses?
*
- * The i_flock list is ordered by:
+ * The varous i_flctx lists are ordered by:
*
- * 1) lock type -- FL_LEASEs first, then FL_FLOCK, and finally FL_POSIX
- * 2) lock owner
- * 3) lock range start
- * 4) lock range end
+ * 1) lock owner
+ * 2) lock range start
+ * 3) lock range end
*
* Obviously, the last two criteria only matter for POSIX locks.
*/
struct file_lock {
struct file_lock *fl_next; /* singly linked list for this inode */
+ struct list_head fl_list; /* link into file_lock_context */
struct hlist_node fl_link; /* node in global lists */
struct list_head fl_block; /* circular list of blocked processes */
fl_owner_t fl_owner;
@@ -964,6 +1002,13 @@ struct file_lock {
} fl_u;
};
+struct file_lock_context {
+ spinlock_t flc_lock;
+ struct list_head flc_flock;
+ struct list_head flc_posix;
+ struct list_head flc_lease;
+};
+
/* The following constant reflects the upper bound of the file/locking space */
#ifndef OFFSET_MAX
#define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1)))
@@ -990,6 +1035,7 @@ extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
extern int fcntl_getlease(struct file *filp);
/* fs/locks.c */
+void locks_free_lock_context(struct file_lock_context *ctx);
void locks_free_lock(struct file_lock *fl);
extern void locks_init_lock(struct file_lock *);
extern struct file_lock * locks_alloc_lock(void);
@@ -1000,17 +1046,20 @@ extern void locks_remove_file(struct file *);
extern void locks_release_private(struct file_lock *);
extern void posix_test_lock(struct file *, struct file_lock *);
extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
-extern int posix_lock_file_wait(struct file *, struct file_lock *);
+extern int posix_lock_inode_wait(struct inode *, struct file_lock *);
extern int posix_unblock_lock(struct file_lock *);
extern int vfs_test_lock(struct file *, struct file_lock *);
extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
-extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
+extern int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl);
extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
extern void lease_get_mtime(struct inode *, struct timespec *time);
extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
-extern int lease_modify(struct file_lock **, int, struct list_head *);
+extern int lease_modify(struct file_lock *, int, struct list_head *);
+struct files_struct;
+extern void show_fd_locks(struct seq_file *f,
+ struct file *filp, struct files_struct *files);
#else /* !CONFIG_FILE_LOCKING */
static inline int fcntl_getlk(struct file *file, unsigned int cmd,
struct flock __user *user)
@@ -1047,6 +1096,11 @@ static inline int fcntl_getlease(struct file *filp)
return F_UNLCK;
}
+static inline void
+locks_free_lock_context(struct file_lock_context *ctx)
+{
+}
+
static inline void locks_init_lock(struct file_lock *fl)
{
return;
@@ -1083,7 +1137,8 @@ static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
return -ENOLCK;
}
-static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
+static inline int posix_lock_inode_wait(struct inode *inode,
+ struct file_lock *fl)
{
return -ENOLCK;
}
@@ -1109,8 +1164,8 @@ static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
return 0;
}
-static inline int flock_lock_file_wait(struct file *filp,
- struct file_lock *request)
+static inline int flock_lock_inode_wait(struct inode *inode,
+ struct file_lock *request)
{
return -ENOLCK;
}
@@ -1137,13 +1192,31 @@ static inline int vfs_setlease(struct file *filp, long arg,
return -EINVAL;
}
-static inline int lease_modify(struct file_lock **before, int arg,
+static inline int lease_modify(struct file_lock *fl, int arg,
struct list_head *dispose)
{
return -EINVAL;
}
+
+struct files_struct;
+static inline void show_fd_locks(struct seq_file *f,
+ struct file *filp, struct files_struct *files) {}
#endif /* !CONFIG_FILE_LOCKING */
+static inline struct inode *file_inode(const struct file *f)
+{
+ return f->f_inode;
+}
+
+static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
+{
+ return posix_lock_inode_wait(file_inode(filp), fl);
+}
+
+static inline int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
+{
+ return flock_lock_inode_wait(file_inode(filp), fl);
+}
struct fasync_struct {
spinlock_t fa_lock;
@@ -1184,8 +1257,8 @@ struct mm_struct;
#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
-extern struct list_head super_blocks;
-extern spinlock_t sb_lock;
+/* sb->s_iflags */
+#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
/* Possible states of 'frozen' field */
enum {
@@ -1224,6 +1297,7 @@ struct super_block {
const struct quotactl_ops *s_qcop;
const struct export_operations *s_export_op;
unsigned long s_flags;
+ unsigned long s_iflags; /* internal SB_I_* flags */
unsigned long s_magic;
struct dentry *s_root;
struct rw_semaphore s_umount;
@@ -1502,6 +1576,26 @@ struct block_device_operations;
#define HAVE_COMPAT_IOCTL 1
#define HAVE_UNLOCKED_IOCTL 1
+/*
+ * These flags let !MMU mmap() govern direct device mapping vs immediate
+ * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
+ *
+ * NOMMU_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
+ * NOMMU_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
+ * NOMMU_MAP_READ: Can be mapped for reading
+ * NOMMU_MAP_WRITE: Can be mapped for writing
+ * NOMMU_MAP_EXEC: Can be mapped for execution
+ */
+#define NOMMU_MAP_COPY 0x00000001
+#define NOMMU_MAP_DIRECT 0x00000008
+#define NOMMU_MAP_READ VM_MAYREAD
+#define NOMMU_MAP_WRITE VM_MAYWRITE
+#define NOMMU_MAP_EXEC VM_MAYEXEC
+
+#define NOMMU_VMFLAGS \
+ (NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC)
+
+
struct iov_iter;
struct file_operations {
@@ -1509,8 +1603,6 @@ struct file_operations {
loff_t (*llseek) (struct file *, loff_t, int);
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
- ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
- ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
int (*iterate) (struct file *, struct dir_context *);
@@ -1518,7 +1610,7 @@ struct file_operations {
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
- void (*mremap)(struct file *, struct vm_area_struct *);
+ int (*mremap)(struct file *, struct vm_area_struct *);
int (*open) (struct inode *, struct file *);
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
@@ -1536,16 +1628,19 @@ struct file_operations {
long (*fallocate)(struct file *file, int mode, loff_t offset,
loff_t len);
void (*show_fdinfo)(struct seq_file *m, struct file *f);
+#ifndef CONFIG_MMU
+ unsigned (*mmap_capabilities)(struct file *);
+#endif
};
struct inode_operations {
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
- void * (*follow_link) (struct dentry *, struct nameidata *);
+ const char * (*follow_link) (struct dentry *, void **);
int (*permission) (struct inode *, int);
struct posix_acl * (*get_acl)(struct inode *, int);
int (*readlink) (struct dentry *, char __user *,int);
- void (*put_link) (struct dentry *, struct nameidata *, void *);
+ void (*put_link) (struct inode *, void *);
int (*create) (struct inode *,struct dentry *, umode_t, bool);
int (*link) (struct dentry *,struct inode *,struct dentry *);
@@ -1574,7 +1669,6 @@ struct inode_operations {
int (*set_acl)(struct inode *, struct posix_acl *, int);
/* WARNING: probably going away soon, do not use! */
- int (*dentry_open)(struct dentry *, struct file *, const struct cred *);
} ____cacheline_aligned;
ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
@@ -1583,6 +1677,7 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
struct iovec **ret_pointer);
extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *);
+extern ssize_t __vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
@@ -1618,8 +1713,10 @@ struct super_operations {
struct dquot **(*get_dquots)(struct inode *);
#endif
int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
- long (*nr_cached_objects)(struct super_block *, int);
- long (*free_cached_objects)(struct super_block *, long, int);
+ long (*nr_cached_objects)(struct super_block *,
+ struct shrink_control *);
+ long (*free_cached_objects)(struct super_block *,
+ struct shrink_control *);
};
/*
@@ -1638,6 +1735,11 @@ struct super_operations {
#define S_IMA 1024 /* Inode has an associated IMA struct */
#define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */
#define S_NOSEC 4096 /* no suid or xattr security attributes */
+#ifdef CONFIG_FS_DAX
+#define S_DAX 8192 /* Direct Access, avoiding the page cache */
+#else
+#define S_DAX 0 /* Make all the DAX code disappear */
+#endif
/*
* Note that nosuid etc flags are inode-specific: setting some file-system
@@ -1675,6 +1777,7 @@ struct super_operations {
#define IS_IMA(inode) ((inode)->i_flags & S_IMA)
#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC)
+#define IS_DAX(inode) ((inode)->i_flags & S_DAX)
#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
(inode)->i_rdev == WHITEOUT_DEV)
@@ -1730,6 +1833,11 @@ struct super_operations {
*
* I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit().
*
+ * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to
+ * synchronize competing switching instances and to tell
+ * wb stat updates to grab mapping->tree_lock. See
+ * inode_switch_wb_work_fn() for details.
+ *
* Q: What is the difference between I_WILL_FREE and I_FREEING?
*/
#define I_DIRTY_SYNC (1 << 0)
@@ -1744,10 +1852,15 @@ struct super_operations {
#define I_SYNC (1 << __I_SYNC)
#define I_REFERENCED (1 << 8)
#define __I_DIO_WAKEUP 9
-#define I_DIO_WAKEUP (1 << I_DIO_WAKEUP)
+#define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP)
#define I_LINKABLE (1 << 10)
+#define I_DIRTY_TIME (1 << 11)
+#define __I_DIRTY_TIME_EXPIRED 12
+#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED)
+#define I_WB_SWITCH (1 << 13)
#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
+#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME)
extern void __mark_inode_dirty(struct inode *, int);
static inline void mark_inode_dirty(struct inode *inode)
@@ -1799,6 +1912,7 @@ enum file_time_flags {
S_VERSION = 8,
};
+extern bool atime_needs_update(const struct path *, struct inode *);
extern void touch_atime(const struct path *);
static inline void file_accessed(struct file *file)
{
@@ -1817,6 +1931,7 @@ struct file_system_type {
#define FS_HAS_SUBTYPE 4
#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
#define FS_USERNS_DEV_MOUNT 16 /* A userns mount does not imply MNT_NODEV */
+#define FS_USERNS_VISIBLE 32 /* FS must already be visible */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
struct dentry *(*mount) (struct file_system_type *, int,
const char *, void *);
@@ -1904,17 +2019,12 @@ extern int vfs_ustat(dev_t, struct kstatfs *);
extern int freeze_super(struct super_block *super);
extern int thaw_super(struct super_block *super);
extern bool our_mnt(struct vfsmount *mnt);
-extern bool fs_fully_visible(struct file_system_type *);
extern int current_umask(void);
extern void ihold(struct inode * inode);
extern void iput(struct inode *);
-
-static inline struct inode *file_inode(const struct file *f)
-{
- return f->f_inode;
-}
+extern int generic_update_time(struct inode *, struct timespec *, int);
/* /sys/fs */
extern struct kobject *fs_kobj;
@@ -1959,7 +2069,7 @@ static inline int locks_verify_truncate(struct inode *inode,
struct file *filp,
loff_t size)
{
- if (inode->i_flock && mandatory_lock(inode))
+ if (inode->i_flctx && mandatory_lock(inode))
return locks_mandatory_area(
FLOCK_VERIFY_WRITE, inode, filp,
size < inode->i_size ? size : inode->i_size,
@@ -1973,11 +2083,12 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
{
/*
* Since this check is lockless, we must ensure that any refcounts
- * taken are done before checking inode->i_flock. Otherwise, we could
- * end up racing with tasks trying to set a new lease on this file.
+ * taken are done before checking i_flctx->flc_lease. Otherwise, we
+ * could end up racing with tasks trying to set a new lease on this
+ * file.
*/
smp_mb();
- if (inode->i_flock)
+ if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
return __break_lease(inode, mode, FL_LEASE);
return 0;
}
@@ -1986,11 +2097,12 @@ static inline int break_deleg(struct inode *inode, unsigned int mode)
{
/*
* Since this check is lockless, we must ensure that any refcounts
- * taken are done before checking inode->i_flock. Otherwise, we could
- * end up racing with tasks trying to set a new lease on this file.
+ * taken are done before checking i_flctx->flc_lease. Otherwise, we
+ * could end up racing with tasks trying to set a new lease on this
+ * file.
*/
smp_mb();
- if (inode->i_flock)
+ if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
return __break_lease(inode, mode, FL_DELEG);
return 0;
}
@@ -2017,6 +2129,16 @@ static inline int break_deleg_wait(struct inode **delegated_inode)
return ret;
}
+static inline int break_layout(struct inode *inode, bool wait)
+{
+ smp_mb();
+ if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
+ return __break_lease(inode,
+ wait ? O_WRONLY : O_WRONLY | O_NONBLOCK,
+ FL_LAYOUT);
+ return 0;
+}
+
#else /* !CONFIG_FILE_LOCKING */
static inline int locks_mandatory_locked(struct file *file)
{
@@ -2072,6 +2194,11 @@ static inline int break_deleg_wait(struct inode **delegated_inode)
return 0;
}
+static inline int break_layout(struct inode *inode, bool wait)
+{
+ return 0;
+}
+
#endif /* CONFIG_FILE_LOCKING */
/* fs/open.c */
@@ -2080,7 +2207,8 @@ struct filename {
const char *name; /* pointer to actual string */
const __user char *uptr; /* original userland pointer */
struct audit_names *aname;
- bool separate; /* should "name" be freed? */
+ int refcnt;
+ const char iname[];
};
extern long vfs_truncate(struct path *, loff_t);
@@ -2094,13 +2222,13 @@ extern struct file *file_open_name(struct filename *, int, umode_t);
extern struct file *filp_open(const char *, int, umode_t);
extern struct file *file_open_root(struct dentry *, struct vfsmount *,
const char *, int);
-extern int vfs_open(const struct path *, struct file *, const struct cred *);
extern struct file * dentry_open(const struct path *, int, const struct cred *);
extern int filp_close(struct file *, fl_owner_t id);
extern struct filename *getname_flags(const char __user *, int, int *);
extern struct filename *getname(const char __user *);
extern struct filename *getname_kernel(const char *);
+extern void putname(struct filename *name);
enum {
FILE_CREATED = 1,
@@ -2121,15 +2249,8 @@ extern void __init vfs_caches_init(unsigned long);
extern struct kmem_cache *names_cachep;
-extern void final_putname(struct filename *name);
-
#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL)
#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
-#ifndef CONFIG_AUDITSYSCALL
-#define putname(name) final_putname(name)
-#else
-extern void putname(struct filename *name);
-#endif
#ifdef CONFIG_BLOCK
extern int register_blkdev(unsigned int, const char *);
@@ -2147,7 +2268,13 @@ extern struct super_block *freeze_bdev(struct block_device *);
extern void emergency_thaw_all(void);
extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
extern int fsync_bdev(struct block_device *);
-extern int sb_is_blkdev_sb(struct super_block *sb);
+
+extern struct super_block *blockdev_superblock;
+
+static inline bool sb_is_blkdev_sb(struct super_block *sb)
+{
+ return sb == blockdev_superblock;
+}
#else
static inline void bd_forget(struct inode *inode) {}
static inline int sync_blockdev(struct block_device *bdev) { return 0; }
@@ -2186,6 +2313,9 @@ extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
void *holder);
extern void blkdev_put(struct block_device *bdev, fmode_t mode);
+extern int __blkdev_reread_part(struct block_device *bdev);
+extern int blkdev_reread_part(struct block_device *bdev);
+
#ifdef CONFIG_SYSFS
extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
extern void bd_unlink_disk_holder(struct block_device *bdev,
@@ -2408,6 +2538,8 @@ extern struct file * open_exec(const char *);
extern int is_subdir(struct dentry *, struct dentry *);
extern int path_is_under(struct path *, struct path *);
+extern char *file_path(struct file *, char *, int);
+
#include <linux/err.h>
/* needed for stackable file system support */
@@ -2436,6 +2568,11 @@ extern struct inode *ilookup(struct super_block *sb, unsigned long ino);
extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *);
extern struct inode * iget_locked(struct super_block *, unsigned long);
+extern struct inode *find_inode_nowait(struct super_block *,
+ unsigned long,
+ int (*match)(struct inode *,
+ unsigned long, void *),
+ void *data);
extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *);
extern int insert_inode_locked(struct inode *);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -2454,7 +2591,12 @@ extern struct inode *new_inode_pseudo(struct super_block *sb);
extern struct inode *new_inode(struct super_block *sb);
extern void free_inode_nonrcu(struct inode *inode);
extern int should_remove_suid(struct dentry *);
-extern int file_remove_suid(struct file *);
+extern int file_remove_privs(struct file *);
+extern int dentry_needs_remove_privs(struct dentry *dentry);
+static inline int file_needs_remove_privs(struct file *file)
+{
+ return dentry_needs_remove_privs(file->f_path.dentry);
+}
extern void __insert_inode_hash(struct inode *, unsigned long hashval);
static inline void insert_inode_hash(struct inode *inode)
@@ -2481,18 +2623,15 @@ extern int sb_min_blocksize(struct super_block *, int);
extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
-extern int generic_file_remap_pages(struct vm_area_struct *, unsigned long addr,
- unsigned long size, pgoff_t pgoff);
-int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
+extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *, loff_t);
extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
-extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
-extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
-extern ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
-extern ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
+
+ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos);
+ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos);
/* fs/block_dev.c */
extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
@@ -2527,19 +2666,18 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
-#ifdef CONFIG_FS_XIP
-extern ssize_t xip_file_read(struct file *filp, char __user *buf, size_t len,
- loff_t *ppos);
-extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma);
-extern ssize_t xip_file_write(struct file *filp, const char __user *buf,
- size_t len, loff_t *ppos);
-extern int xip_truncate_page(struct address_space *mapping, loff_t from);
-#else
-static inline int xip_truncate_page(struct address_space *mapping, loff_t from)
-{
- return 0;
-}
-#endif
+ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
+ get_block_t, dio_iodone_t, int flags);
+int dax_clear_blocks(struct inode *, sector_t block, long size);
+int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
+int dax_truncate_page(struct inode *, loff_t from, get_block_t);
+int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
+ dax_iodone_t);
+int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
+ dax_iodone_t);
+int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
+#define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod)
+#define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod)
#ifdef CONFIG_BLOCK
typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
@@ -2554,27 +2692,56 @@ enum {
/* filesystem can handle aio writes beyond i_size */
DIO_ASYNC_EXTEND = 0x04,
+
+ /* inode/fs/bdev does not need truncate protection */
+ DIO_SKIP_DIO_COUNT = 0x08,
};
void dio_end_io(struct bio *bio, int error);
-ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
- struct block_device *bdev, struct iov_iter *iter, loff_t offset,
- get_block_t get_block, dio_iodone_t end_io,
- dio_submit_t submit_io, int flags);
+ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
+ struct block_device *bdev, struct iov_iter *iter,
+ loff_t offset, get_block_t get_block,
+ dio_iodone_t end_io, dio_submit_t submit_io,
+ int flags);
-static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
- struct inode *inode, struct iov_iter *iter, loff_t offset,
- get_block_t get_block)
+static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
+ struct inode *inode,
+ struct iov_iter *iter, loff_t offset,
+ get_block_t get_block)
{
- return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iter,
+ return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
offset, get_block, NULL, NULL,
DIO_LOCKING | DIO_SKIP_HOLES);
}
#endif
void inode_dio_wait(struct inode *inode);
-void inode_dio_done(struct inode *inode);
+
+/*
+ * inode_dio_begin - signal start of a direct I/O requests
+ * @inode: inode the direct I/O happens on
+ *
+ * This is called once we've finished processing a direct I/O request,
+ * and is used to wake up callers waiting for direct I/O to be quiesced.
+ */
+static inline void inode_dio_begin(struct inode *inode)
+{
+ atomic_inc(&inode->i_dio_count);
+}
+
+/*
+ * inode_dio_end - signal finish of a direct I/O requests
+ * @inode: inode the direct I/O happens on
+ *
+ * This is called once we've finished processing a direct I/O request,
+ * and is used to wake up callers waiting for direct I/O to be quiesced.
+ */
+static inline void inode_dio_end(struct inode *inode)
+{
+ if (atomic_dec_and_test(&inode->i_dio_count))
+ wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
+}
extern void inode_set_flags(struct inode *inode, unsigned int flags,
unsigned int mask);
@@ -2585,13 +2752,14 @@ extern const struct file_operations generic_ro_fops;
extern int readlink_copy(char __user *, int, const char *);
extern int page_readlink(struct dentry *, char __user *, int);
-extern void *page_follow_link_light(struct dentry *, struct nameidata *);
-extern void page_put_link(struct dentry *, struct nameidata *, void *);
+extern const char *page_follow_link_light(struct dentry *, void **);
+extern void page_put_link(struct inode *, void *);
extern int __page_symlink(struct inode *inode, const char *symname, int len,
int nofs);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
-extern void kfree_put_link(struct dentry *, struct nameidata *, void *);
+extern void kfree_put_link(struct inode *, void *);
+extern void free_page_put_link(struct inode *, void *);
extern int generic_readlink(struct dentry *, char __user *, int);
extern void generic_fillattr(struct inode *, struct kstat *);
int vfs_getattr_nosec(struct path *path, struct kstat *stat);
@@ -2602,8 +2770,9 @@ void __inode_sub_bytes(struct inode *inode, loff_t bytes);
void inode_sub_bytes(struct inode *inode, loff_t bytes);
loff_t inode_get_bytes(struct inode *inode);
void inode_set_bytes(struct inode *inode, loff_t bytes);
+const char *simple_follow_link(struct dentry *, void **);
+extern const struct inode_operations simple_symlink_inode_operations;
-extern int vfs_readdir(struct file *, filldir_t, void *);
extern int iterate_dir(struct file *, struct dir_context *);
extern int vfs_stat(const char __user *, struct kstat *);
@@ -2662,6 +2831,8 @@ extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned in
extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
extern const struct file_operations simple_dir_operations;
extern const struct inode_operations simple_dir_inode_operations;
+extern void make_empty_dir_inode(struct inode *inode);
+extern bool is_empty_dir_inode(struct inode *inode);
struct tree_descr { char *name; const struct file_operations *ops; int mode; };
struct dentry *d_alloc_name(struct dentry *, const char *);
extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *);
@@ -2696,6 +2867,21 @@ extern int generic_show_options(struct seq_file *m, struct dentry *root);
extern void save_mount_options(struct super_block *sb, char *options);
extern void replace_mount_options(struct super_block *sb, char *options);
+static inline bool io_is_direct(struct file *filp)
+{
+ return (filp->f_flags & O_DIRECT) || IS_DAX(file_inode(filp));
+}
+
+static inline int iocb_flags(struct file *file)
+{
+ int res = 0;
+ if (file->f_flags & O_APPEND)
+ res |= IOCB_APPEND;
+ if (io_is_direct(file))
+ res |= IOCB_DIRECT;
+ return res;
+}
+
static inline ino_t parent_ino(struct dentry *dentry)
{
ino_t res;
diff --git a/include/linux/fs_pin.h b/include/linux/fs_pin.h
index f66525e..3886b3b 100644
--- a/include/linux/fs_pin.h
+++ b/include/linux/fs_pin.h
@@ -1,17 +1,24 @@
-#include <linux/fs.h>
+#include <linux/wait.h>
struct fs_pin {
- atomic_long_t count;
- union {
- struct {
- struct hlist_node s_list;
- struct hlist_node m_list;
- };
- struct rcu_head rcu;
- };
+ wait_queue_head_t wait;
+ int done;
+ struct hlist_node s_list;
+ struct hlist_node m_list;
void (*kill)(struct fs_pin *);
};
-void pin_put(struct fs_pin *);
+struct vfsmount;
+
+static inline void init_fs_pin(struct fs_pin *p, void (*kill)(struct fs_pin *))
+{
+ init_waitqueue_head(&p->wait);
+ INIT_HLIST_NODE(&p->s_list);
+ INIT_HLIST_NODE(&p->m_list);
+ p->kill = kill;
+}
+
void pin_remove(struct fs_pin *);
+void pin_insert_group(struct fs_pin *, struct vfsmount *, struct hlist_head *);
void pin_insert(struct fs_pin *, struct vfsmount *);
+void pin_kill(struct fs_pin *);
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 7714849..604e152 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -74,6 +74,7 @@ extern wait_queue_head_t fscache_cache_cleared_wq;
*/
typedef void (*fscache_operation_release_t)(struct fscache_operation *op);
typedef void (*fscache_operation_processor_t)(struct fscache_operation *op);
+typedef void (*fscache_operation_cancel_t)(struct fscache_operation *op);
enum fscache_operation_state {
FSCACHE_OP_ST_BLANK, /* Op is not yet submitted */
@@ -109,6 +110,9 @@ struct fscache_operation {
* the op in a non-pool thread */
fscache_operation_processor_t processor;
+ /* Operation cancellation cleanup (optional) */
+ fscache_operation_cancel_t cancel;
+
/* operation releaser */
fscache_operation_release_t release;
};
@@ -119,33 +123,17 @@ extern void fscache_op_work_func(struct work_struct *work);
extern void fscache_enqueue_operation(struct fscache_operation *);
extern void fscache_op_complete(struct fscache_operation *, bool);
extern void fscache_put_operation(struct fscache_operation *);
-
-/**
- * fscache_operation_init - Do basic initialisation of an operation
- * @op: The operation to initialise
- * @release: The release function to assign
- *
- * Do basic initialisation of an operation. The caller must still set flags,
- * object and processor if needed.
- */
-static inline void fscache_operation_init(struct fscache_operation *op,
- fscache_operation_processor_t processor,
- fscache_operation_release_t release)
-{
- INIT_WORK(&op->work, fscache_op_work_func);
- atomic_set(&op->usage, 1);
- op->state = FSCACHE_OP_ST_INITIALISED;
- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
- op->processor = processor;
- op->release = release;
- INIT_LIST_HEAD(&op->pend_link);
-}
+extern void fscache_operation_init(struct fscache_operation *,
+ fscache_operation_processor_t,
+ fscache_operation_cancel_t,
+ fscache_operation_release_t);
/*
* data read operation
*/
struct fscache_retrieval {
struct fscache_operation op;
+ struct fscache_cookie *cookie; /* The netfs cookie */
struct address_space *mapping; /* netfs pages */
fscache_rw_complete_t end_io_func; /* function to call on I/O completion */
void *context; /* netfs read context (pinned) */
@@ -371,6 +359,7 @@ struct fscache_object {
#define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */
#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
+#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */
struct list_head cache_link; /* link in cache->object_list */
struct hlist_node cookie_link; /* link in cookie->backing_objects */
@@ -410,17 +399,16 @@ static inline bool fscache_object_is_available(struct fscache_object *object)
return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
}
-static inline bool fscache_object_is_active(struct fscache_object *object)
+static inline bool fscache_cache_is_broken(struct fscache_object *object)
{
- return fscache_object_is_available(object) &&
- fscache_object_is_live(object) &&
- !test_bit(FSCACHE_IOERROR, &object->cache->flags);
+ return test_bit(FSCACHE_IOERROR, &object->cache->flags);
}
-static inline bool fscache_object_is_dead(struct fscache_object *object)
+static inline bool fscache_object_is_active(struct fscache_object *object)
{
- return fscache_object_is_dying(object) &&
- test_bit(FSCACHE_IOERROR, &object->cache->flags);
+ return fscache_object_is_available(object) &&
+ fscache_object_is_live(object) &&
+ !fscache_cache_is_broken(object);
}
/**
@@ -551,4 +539,15 @@ extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
const void *data,
uint16_t datalen);
+extern void fscache_object_retrying_stale(struct fscache_object *object);
+
+enum fscache_why_object_killed {
+ FSCACHE_OBJECT_IS_STALE,
+ FSCACHE_OBJECT_NO_SPACE,
+ FSCACHE_OBJECT_WAS_RETIRED,
+ FSCACHE_OBJECT_WAS_CULLED,
+};
+extern void fscache_object_mark_killed(struct fscache_object *object,
+ enum fscache_why_object_killed why);
+
#endif /* _LINUX_FSCACHE_CACHE_H */
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index a82296a..2a2f56b 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -24,6 +24,7 @@
#define FSL_USB_VER_1_6 1
#define FSL_USB_VER_2_2 2
#define FSL_USB_VER_2_4 3
+#define FSL_USB_VER_2_5 4
#include <linux/types.h>
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 1c804b0..7ee1774 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -101,8 +101,10 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
new_dir_mask |= FS_ISDIR;
}
- fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie);
- fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie);
+ fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name,
+ fs_cookie);
+ fsnotify(new_dir, new_dir_mask, source, FSNOTIFY_EVENT_INODE, new_name,
+ fs_cookie);
if (target)
fsnotify_link_count(target);
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 0f313f9..65a517d 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -84,8 +84,6 @@ struct fsnotify_fname;
* Each group much define these ops. The fsnotify infrastructure will call
* these operations for each relevant group.
*
- * should_send_event - given a group, inode, and mask this function determines
- * if the group is interested in this event.
* handle_event - main call for a group to handle an fs event
* free_group_priv - called when a group refcnt hits 0 to clean up the private union
* freeing_mark - called when a mark is being destroyed for some reason. The group
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
new file mode 100644
index 0000000..0408545
--- /dev/null
+++ b/include/linux/fwnode.h
@@ -0,0 +1,27 @@
+/*
+ * fwnode.h - Firmware device node object handle type definition.
+ *
+ * Copyright (C) 2015, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_FWNODE_H_
+#define _LINUX_FWNODE_H_
+
+enum fwnode_type {
+ FWNODE_INVALID = 0,
+ FWNODE_OF,
+ FWNODE_ACPI,
+ FWNODE_PDATA,
+};
+
+struct fwnode_handle {
+ enum fwnode_type type;
+ struct fwnode_handle *secondary;
+};
+
+#endif
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 1ccaab4..5383bb1 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -119,16 +119,16 @@ extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
extern struct gen_pool *devm_gen_pool_create(struct device *dev,
int min_alloc_order, int nid);
-extern struct gen_pool *dev_get_gen_pool(struct device *dev);
+extern struct gen_pool *gen_pool_get(struct device *dev);
bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
size_t size);
#ifdef CONFIG_OF
-extern struct gen_pool *of_get_named_gen_pool(struct device_node *np,
+extern struct gen_pool *of_gen_pool_get(struct device_node *np,
const char *propname, int index);
#else
-static inline struct gen_pool *of_get_named_gen_pool(struct device_node *np,
+static inline struct gen_pool *of_gen_pool_get(struct device_node *np,
const char *propname, int index)
{
return NULL;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index b840e3b..ad35f30 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -30,6 +30,7 @@ struct vm_area_struct;
#define ___GFP_HARDWALL 0x20000u
#define ___GFP_THISNODE 0x40000u
#define ___GFP_RECLAIMABLE 0x80000u
+#define ___GFP_NOACCOUNT 0x100000u
#define ___GFP_NOTRACK 0x200000u
#define ___GFP_NO_KSWAPD 0x400000u
#define ___GFP_OTHER_NODE 0x800000u
@@ -57,8 +58,10 @@ struct vm_area_struct;
* _might_ fail. This depends upon the particular VM implementation.
*
* __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
- * cannot handle allocation failures. This modifier is deprecated and no new
- * users should be added.
+ * cannot handle allocation failures. New users should be evaluated carefully
+ * (and the flag should be used only when there is no reasonable failure policy)
+ * but it is definitely preferable to use the flag rather than opencode endless
+ * loop around allocator.
*
* __GFP_NORETRY: The VM implementation must not retry indefinitely.
*
@@ -85,6 +88,7 @@ struct vm_area_struct;
#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
+#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* Don't account to kmemcg */
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
@@ -117,16 +121,6 @@ struct vm_area_struct;
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
__GFP_NO_KSWAPD)
-/*
- * GFP_THISNODE does not perform any reclaim, you most likely want to
- * use __GFP_THISNODE to allocate from a given node without fallback!
- */
-#ifdef CONFIG_NUMA
-#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
-#else
-#define GFP_THISNODE ((__force gfp_t)0)
-#endif
-
/* This mask makes up all the page movable related flags */
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
@@ -334,18 +328,22 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
}
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
struct vm_area_struct *vma, unsigned long addr,
- int node);
+ int node, bool hugepage);
+#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
+ alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
#else
#define alloc_pages(gfp_mask, order) \
alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_pages_vma(gfp_mask, order, vma, addr, node) \
+#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
+ alloc_pages(gfp_mask, order)
+#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
alloc_pages(gfp_mask, order)
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
#define alloc_page_vma(gfp_mask, vma, addr) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
+ alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, node)
+ alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
extern struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order);
extern struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask,
@@ -370,6 +368,11 @@ extern void free_pages(unsigned long addr, unsigned int order);
extern void free_hot_cold_page(struct page *page, bool cold);
extern void free_hot_cold_page_list(struct list_head *list, bool cold);
+struct page_frag_cache;
+extern void *__alloc_page_frag(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask);
+extern void __free_page_frag(void *addr);
+
extern void __free_kmem_pages(struct page *page, unsigned int order);
extern void free_kmem_pages(unsigned long addr, unsigned int order);
@@ -381,6 +384,14 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
void drain_all_pages(struct zone *zone);
void drain_local_pages(struct zone *zone);
+#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+void page_alloc_init_late(void);
+#else
+static inline void page_alloc_init_late(void)
+{
+}
+#endif
+
/*
* gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
* GFP flags are used before interrupts are enabled. Once interrupts are
diff --git a/include/linux/goldfish.h b/include/linux/goldfish.h
index 569236e..93e080b 100644
--- a/include/linux/goldfish.h
+++ b/include/linux/goldfish.h
@@ -3,13 +3,24 @@
/* Helpers for Goldfish virtual platform */
-static inline void gf_write64(unsigned long data,
- void __iomem *portl, void __iomem *porth)
+static inline void gf_write_ptr(const void *ptr, void __iomem *portl,
+ void __iomem *porth)
{
- writel((u32)data, portl);
+ writel((u32)(unsigned long)ptr, portl);
#ifdef CONFIG_64BIT
- writel(data>>32, porth);
+ writel((unsigned long)ptr >> 32, porth);
#endif
}
+static inline void gf_write_dma_addr(const dma_addr_t addr,
+ void __iomem *portl,
+ void __iomem *porth)
+{
+ writel((u32)addr, portl);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ writel(addr >> 32, porth);
+#endif
+}
+
+
#endif /* __LINUX_GOLDFISH_H */
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index ab81339..d12b5d5 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -196,13 +196,6 @@ static inline int gpio_export_link(struct device *dev, const char *name,
return -EINVAL;
}
-static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
-{
- /* GPIO can never have been requested */
- WARN_ON(1);
- return -EINVAL;
-}
-
static inline void gpio_unexport(unsigned gpio)
{
/* GPIO can never have been exported */
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index fd85cb1..adac255 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -16,6 +16,15 @@ struct device;
*/
struct gpio_desc;
+/**
+ * Struct containing an array of descriptors that can be obtained using
+ * gpiod_get_array().
+ */
+struct gpio_descs {
+ unsigned int ndescs;
+ struct gpio_desc *desc[];
+};
+
#define GPIOD_FLAGS_BIT_DIR_SET BIT(0)
#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1)
#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2)
@@ -34,6 +43,9 @@ enum gpiod_flags {
#ifdef CONFIG_GPIOLIB
+/* Return the number of GPIOs associated with a device / function */
+int gpiod_count(struct device *dev, const char *con_id);
+
/* Acquire and dispose GPIOs */
struct gpio_desc *__must_check __gpiod_get(struct device *dev,
const char *con_id,
@@ -49,7 +61,14 @@ struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
const char *con_id,
unsigned int index,
enum gpiod_flags flags);
+struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags);
+struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags);
void gpiod_put(struct gpio_desc *desc);
+void gpiod_put_array(struct gpio_descs *descs);
struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev,
const char *con_id,
@@ -64,7 +83,14 @@ struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev,
struct gpio_desc *__must_check
__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags);
+struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags);
+struct gpio_descs *__must_check
+devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
+ enum gpiod_flags flags);
void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
+void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs);
int gpiod_get_direction(struct gpio_desc *desc);
int gpiod_direction_input(struct gpio_desc *desc);
@@ -74,24 +100,25 @@ int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
/* Value get/set from non-sleeping context */
int gpiod_get_value(const struct gpio_desc *desc);
void gpiod_set_value(struct gpio_desc *desc, int value);
-void gpiod_set_array(unsigned int array_size,
- struct gpio_desc **desc_array, int *value_array);
+void gpiod_set_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array, int *value_array);
int gpiod_get_raw_value(const struct gpio_desc *desc);
void gpiod_set_raw_value(struct gpio_desc *desc, int value);
-void gpiod_set_raw_array(unsigned int array_size,
- struct gpio_desc **desc_array, int *value_array);
+void gpiod_set_raw_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
/* Value get/set from sleeping context */
int gpiod_get_value_cansleep(const struct gpio_desc *desc);
void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
-void gpiod_set_array_cansleep(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array);
+void gpiod_set_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc);
void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
-void gpiod_set_raw_array_cansleep(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array);
+void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
@@ -110,9 +137,15 @@ struct fwnode_handle;
struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
const char *propname);
struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
+ const char *con_id,
struct fwnode_handle *child);
#else /* CONFIG_GPIOLIB */
+static inline int gpiod_count(struct device *dev, const char *con_id)
+{
+ return 0;
+}
+
static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
@@ -142,6 +175,20 @@ __gpiod_get_index_optional(struct device *dev, const char *con_id,
return ERR_PTR(-ENOSYS);
}
+static inline struct gpio_descs *__must_check
+gpiod_get_array(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct gpio_descs *__must_check
+gpiod_get_array_optional(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
static inline void gpiod_put(struct gpio_desc *desc)
{
might_sleep();
@@ -150,6 +197,14 @@ static inline void gpiod_put(struct gpio_desc *desc)
WARN_ON(1);
}
+static inline void gpiod_put_array(struct gpio_descs *descs)
+{
+ might_sleep();
+
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
static inline struct gpio_desc *__must_check
__devm_gpiod_get(struct device *dev,
const char *con_id,
@@ -181,6 +236,20 @@ __devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
return ERR_PTR(-ENOSYS);
}
+static inline struct gpio_descs *__must_check
+devm_gpiod_get_array(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct gpio_descs *__must_check
+devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
{
might_sleep();
@@ -189,6 +258,15 @@ static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
WARN_ON(1);
}
+static inline void devm_gpiod_put_array(struct device *dev,
+ struct gpio_descs *descs)
+{
+ might_sleep();
+
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
static inline int gpiod_get_direction(const struct gpio_desc *desc)
{
@@ -227,9 +305,9 @@ static inline void gpiod_set_value(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_array(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
+static inline void gpiod_set_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -245,9 +323,9 @@ static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_raw_array(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
+static inline void gpiod_set_raw_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -264,7 +342,7 @@ static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_array_cansleep(unsigned int array_size,
+static inline void gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
{
@@ -283,7 +361,7 @@ static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_raw_array_cansleep(unsigned int array_size,
+static inline void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
{
@@ -329,6 +407,21 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
return -EINVAL;
}
+/* Child properties interface */
+struct fwnode_handle;
+
+static inline struct gpio_desc *fwnode_get_named_gpiod(
+ struct fwnode_handle *fwnode, const char *propname)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct gpio_desc *devm_get_gpiod_from_child(
+ struct device *dev, const char *con_id, struct fwnode_handle *child)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
#endif /* CONFIG_GPIOLIB */
/*
@@ -340,38 +433,38 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
* etc.
*/
#define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags)
-#define gpiod_get(varargs...) __gpiod_get(varargs, 0)
+#define gpiod_get(varargs...) __gpiod_get(varargs, GPIOD_ASIS)
#define __gpiod_get_index(dev, con_id, index, flags, ...) \
__gpiod_get_index(dev, con_id, index, flags)
-#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, 0)
+#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, GPIOD_ASIS)
#define __gpiod_get_optional(dev, con_id, flags, ...) \
__gpiod_get_optional(dev, con_id, flags)
-#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, 0)
+#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, GPIOD_ASIS)
#define __gpiod_get_index_optional(dev, con_id, index, flags, ...) \
__gpiod_get_index_optional(dev, con_id, index, flags)
#define gpiod_get_index_optional(varargs...) \
- __gpiod_get_index_optional(varargs, 0)
+ __gpiod_get_index_optional(varargs, GPIOD_ASIS)
#define __devm_gpiod_get(dev, con_id, flags, ...) \
__devm_gpiod_get(dev, con_id, flags)
-#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, 0)
+#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, GPIOD_ASIS)
#define __devm_gpiod_get_index(dev, con_id, index, flags, ...) \
__devm_gpiod_get_index(dev, con_id, index, flags)
-#define devm_gpiod_get_index(varargs...) __devm_gpiod_get_index(varargs, 0)
+#define devm_gpiod_get_index(varargs...) \
+ __devm_gpiod_get_index(varargs, GPIOD_ASIS)
#define __devm_gpiod_get_optional(dev, con_id, flags, ...) \
__devm_gpiod_get_optional(dev, con_id, flags)
#define devm_gpiod_get_optional(varargs...) \
- __devm_gpiod_get_optional(varargs, 0)
+ __devm_gpiod_get_optional(varargs, GPIOD_ASIS)
#define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...) \
__devm_gpiod_get_index_optional(dev, con_id, index, flags)
#define devm_gpiod_get_index_optional(varargs...) \
- __devm_gpiod_get_index_optional(varargs, 0)
+ __devm_gpiod_get_index_optional(varargs, GPIOD_ASIS)
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
int gpiod_export_link(struct device *dev, const char *name,
struct gpio_desc *desc);
-int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value);
void gpiod_unexport(struct gpio_desc *desc);
#else /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
@@ -388,11 +481,6 @@ static inline int gpiod_export_link(struct device *dev, const char *name,
return -ENOSYS;
}
-static inline int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
-{
- return -ENOSYS;
-}
-
static inline void gpiod_unexport(struct gpio_desc *desc)
{
}
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index c497c62..c8393cd 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -6,6 +6,7 @@
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
+#include <linux/pinctrl/pinctrl.h>
struct device;
struct gpio_desc;
@@ -19,6 +20,7 @@ struct seq_file;
* struct gpio_chip - abstract a GPIO controller
* @label: for diagnostics
* @dev: optional device providing the GPIOs
+ * @cdev: class device used by sysfs interface (may be NULL)
* @owner: helps prevent removal of modules exporting active GPIOs
* @list: links gpio_chips together for traversal
* @request: optional hook for chip-specific activation, such as
@@ -40,8 +42,12 @@ struct seq_file;
* @dbg_show: optional routine to show contents in debugfs; default code
* will be used when this is omitted, but custom code can show extra
* state (such as pullup/pulldown configuration).
- * @base: identifies the first GPIO number handled by this chip; or, if
- * negative during registration, requests dynamic ID allocation.
+ * @base: identifies the first GPIO number handled by this chip;
+ * or, if negative during registration, requests dynamic ID allocation.
+ * DEPRECATION: providing anything non-negative and nailing the base
+ * offset of GPIO chips is deprecated. Please pass -1 as base to
+ * let gpiolib select the chip base in all possible cases. We want to
+ * get rid of the static GPIO number space in the long run.
* @ngpio: the number of GPIOs handled by this controller; the last GPIO
* handled is (base + ngpio - 1).
* @desc: array of ngpio descriptors. Private.
@@ -56,7 +62,6 @@ struct seq_file;
* implies that if the chip supports IRQs, these IRQs need to be threaded
* as the chip access may sleep when e.g. reading out the IRQ status
* registers.
- * @exported: flags if the gpiochip is exported for use from sysfs. Private.
* @irq_not_threaded: flag must be set if @can_sleep is set but the
* IRQs don't need to be threaded
*
@@ -73,6 +78,7 @@ struct seq_file;
struct gpio_chip {
const char *label;
struct device *dev;
+ struct device *cdev;
struct module *owner;
struct list_head list;
@@ -108,7 +114,6 @@ struct gpio_chip {
const char *const *names;
bool can_sleep;
bool irq_not_threaded;
- bool exported;
#ifdef CONFIG_GPIOLIB_IRQCHIP
/*
@@ -120,6 +125,7 @@ struct gpio_chip {
unsigned int irq_base;
irq_flow_handler_t irq_handler;
unsigned int irq_default_type;
+ int irq_parent;
#endif
#if defined(CONFIG_OF_GPIO)
@@ -173,6 +179,53 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
#endif /* CONFIG_GPIOLIB_IRQCHIP */
+#ifdef CONFIG_PINCTRL
+
+/**
+ * struct gpio_pin_range - pin range controlled by a gpio chip
+ * @head: list for maintaining set of pin ranges, used internally
+ * @pctldev: pinctrl device which handles corresponding pins
+ * @range: actual range of pins controlled by a gpio controller
+ */
+
+struct gpio_pin_range {
+ struct list_head node;
+ struct pinctrl_dev *pctldev;
+ struct pinctrl_gpio_range range;
+};
+
+int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
+ unsigned int gpio_offset, unsigned int pin_offset,
+ unsigned int npins);
+int gpiochip_add_pingroup_range(struct gpio_chip *chip,
+ struct pinctrl_dev *pctldev,
+ unsigned int gpio_offset, const char *pin_group);
+void gpiochip_remove_pin_ranges(struct gpio_chip *chip);
+
+#else
+
+static inline int
+gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
+ unsigned int gpio_offset, unsigned int pin_offset,
+ unsigned int npins)
+{
+ return 0;
+}
+static inline int
+gpiochip_add_pingroup_range(struct gpio_chip *chip,
+ struct pinctrl_dev *pctldev,
+ unsigned int gpio_offset, const char *pin_group)
+{
+ return 0;
+}
+
+static inline void
+gpiochip_remove_pin_ranges(struct gpio_chip *chip)
+{
+}
+
+#endif /* CONFIG_PINCTRL */
+
struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
const char *label);
void gpiochip_free_own_desc(struct gpio_desc *desc);
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index cba442e..dfd59d6 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -1,7 +1,7 @@
#ifndef LINUX_HARDIRQ_H
#define LINUX_HARDIRQ_H
-#include <linux/preempt_mask.h>
+#include <linux/preempt.h>
#include <linux/lockdep.h>
#include <linux/ftrace_irq.h>
#include <linux/vtime.h>
@@ -9,7 +9,7 @@
extern void synchronize_irq(unsigned int irq);
-extern void synchronize_hardirq(unsigned int irq);
+extern bool synchronize_hardirq(unsigned int irq);
#if defined(CONFIG_TINY_RCU)
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index cbb5790..e974420 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -25,6 +25,7 @@
#define __LINUX_HDMI_H_
#include <linux/types.h>
+#include <linux/device.h>
enum hdmi_infoframe_type {
HDMI_INFOFRAME_TYPE_VENDOR = 0x81,
@@ -52,12 +53,18 @@ enum hdmi_colorspace {
HDMI_COLORSPACE_RGB,
HDMI_COLORSPACE_YUV422,
HDMI_COLORSPACE_YUV444,
+ HDMI_COLORSPACE_YUV420,
+ HDMI_COLORSPACE_RESERVED4,
+ HDMI_COLORSPACE_RESERVED5,
+ HDMI_COLORSPACE_RESERVED6,
+ HDMI_COLORSPACE_IDO_DEFINED,
};
enum hdmi_scan_mode {
HDMI_SCAN_MODE_NONE,
HDMI_SCAN_MODE_OVERSCAN,
HDMI_SCAN_MODE_UNDERSCAN,
+ HDMI_SCAN_MODE_RESERVED,
};
enum hdmi_colorimetry {
@@ -71,6 +78,7 @@ enum hdmi_picture_aspect {
HDMI_PICTURE_ASPECT_NONE,
HDMI_PICTURE_ASPECT_4_3,
HDMI_PICTURE_ASPECT_16_9,
+ HDMI_PICTURE_ASPECT_RESERVED,
};
enum hdmi_active_aspect {
@@ -92,12 +100,18 @@ enum hdmi_extended_colorimetry {
HDMI_EXTENDED_COLORIMETRY_S_YCC_601,
HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601,
HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB,
+
+ /* The following EC values are only defined in CEA-861-F. */
+ HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM,
+ HDMI_EXTENDED_COLORIMETRY_BT2020,
+ HDMI_EXTENDED_COLORIMETRY_RESERVED,
};
enum hdmi_quantization_range {
HDMI_QUANTIZATION_RANGE_DEFAULT,
HDMI_QUANTIZATION_RANGE_LIMITED,
HDMI_QUANTIZATION_RANGE_FULL,
+ HDMI_QUANTIZATION_RANGE_RESERVED,
};
/* non-uniform picture scaling */
@@ -114,7 +128,7 @@ enum hdmi_ycc_quantization_range {
};
enum hdmi_content_type {
- HDMI_CONTENT_TYPE_NONE,
+ HDMI_CONTENT_TYPE_GRAPHICS,
HDMI_CONTENT_TYPE_PHOTO,
HDMI_CONTENT_TYPE_CINEMA,
HDMI_CONTENT_TYPE_GAME,
@@ -194,6 +208,7 @@ enum hdmi_audio_coding_type {
HDMI_AUDIO_CODING_TYPE_MLP,
HDMI_AUDIO_CODING_TYPE_DST,
HDMI_AUDIO_CODING_TYPE_WMA_PRO,
+ HDMI_AUDIO_CODING_TYPE_CXT,
};
enum hdmi_audio_sample_size {
@@ -215,10 +230,25 @@ enum hdmi_audio_sample_frequency {
};
enum hdmi_audio_coding_type_ext {
- HDMI_AUDIO_CODING_TYPE_EXT_STREAM,
+ /* Refer to Audio Coding Type (CT) field in Data Byte 1 */
+ HDMI_AUDIO_CODING_TYPE_EXT_CT,
+
+ /*
+ * The next three CXT values are defined in CEA-861-E only.
+ * They do not exist in older versions, and in CEA-861-F they are
+ * defined as 'Not in use'.
+ */
HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC,
HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2,
HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND,
+
+ /* The following CXT values are only defined in CEA-861-F. */
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC,
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_V2,
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC,
+ HDMI_AUDIO_CODING_TYPE_EXT_DRA,
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_SURROUND,
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC_SURROUND = 10,
};
struct hdmi_audio_infoframe {
@@ -299,5 +329,8 @@ union hdmi_infoframe {
ssize_t
hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size);
+int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer);
+void hdmi_infoframe_log(const char *level, struct device *dev,
+ union hdmi_infoframe *frame);
#endif /* _DRM_HDMI_H */
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 51f7cca..c02b5ce 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -33,6 +33,8 @@
* @units: Measurment unit for this attribute.
* @unit_expo: Exponent used in the data.
* @size: Size in bytes for data size.
+ * @logical_minimum: Logical minimum value for this attribute.
+ * @logical_maximum: Logical maximum value for this attribute.
*/
struct hid_sensor_hub_attribute_info {
u32 usage_id;
@@ -47,19 +49,43 @@ struct hid_sensor_hub_attribute_info {
};
/**
+ * struct sensor_hub_pending - Synchronous read pending information
+ * @status: Pending status true/false.
+ * @ready: Completion synchronization data.
+ * @usage_id: Usage id for physical device, E.g. Gyro usage id.
+ * @attr_usage_id: Usage Id of a field, E.g. X-AXIS for a gyro.
+ * @raw_size: Response size for a read request.
+ * @raw_data: Place holder for received response.
+ */
+struct sensor_hub_pending {
+ bool status;
+ struct completion ready;
+ u32 usage_id;
+ u32 attr_usage_id;
+ int raw_size;
+ u8 *raw_data;
+};
+
+/**
* struct hid_sensor_hub_device - Stores the hub instance data
* @hdev: Stores the hid instance.
* @vendor_id: Vendor id of hub device.
* @product_id: Product id of hub device.
+ * @usage: Usage id for this hub device instance.
* @start_collection_index: Starting index for a phy type collection
* @end_collection_index: Last index for a phy type collection
+ * @mutex_ptr: synchronizing mutex pointer.
+ * @pending: Holds information of pending sync read request.
*/
struct hid_sensor_hub_device {
struct hid_device *hdev;
u32 vendor_id;
u32 product_id;
+ u32 usage;
int start_collection_index;
int end_collection_index;
+ struct mutex *mutex_ptr;
+ struct sensor_hub_pending pending;
};
/**
@@ -146,41 +172,55 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
/**
* sensor_hub_input_attr_get_raw_value() - Synchronous read request
+* @hsdev: Hub device instance.
* @usage_id: Attribute usage id of parent physical device as per spec
* @attr_usage_id: Attribute usage id as per spec
* @report_id: Report id to look for
+* @flag: Synchronous or asynchronous read
*
-* Issues a synchronous read request for an input attribute. Returns
-* data upto 32 bits. Since client can get events, so this call should
-* not be used for data paths, this will impact performance.
+* Issues a synchronous or asynchronous read request for an input attribute.
+* Returns data upto 32 bits.
*/
+enum sensor_hub_read_flags {
+ SENSOR_HUB_SYNC,
+ SENSOR_HUB_ASYNC,
+};
+
int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
- u32 usage_id,
- u32 attr_usage_id, u32 report_id);
+ u32 usage_id,
+ u32 attr_usage_id, u32 report_id,
+ enum sensor_hub_read_flags flag
+);
+
/**
* sensor_hub_set_feature() - Feature set request
+* @hsdev: Hub device instance.
* @report_id: Report id to look for
* @field_index: Field index inside a report
-* @value: Value to set
+* @buffer_size: size of the buffer
+* @buffer: buffer to use in the feature set
*
* Used to set a field in feature report. For example this can set polling
* interval, sensitivity, activate/deactivate state.
*/
int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
- u32 field_index, s32 value);
+ u32 field_index, int buffer_size, void *buffer);
/**
* sensor_hub_get_feature() - Feature get request
+* @hsdev: Hub device instance.
* @report_id: Report id to look for
* @field_index: Field index inside a report
-* @value: Place holder for return value
+* @buffer_size: size of the buffer
+* @buffer: buffer to copy output
*
* Used to get a field in feature report. For example this can get polling
-* interval, sensitivity, activate/deactivate state.
+* interval, sensitivity, activate/deactivate state. On success it returns
+* number of bytes copied to buffer. On failure, it returns value < 0.
*/
int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
- u32 field_index, s32 *value);
+ u32 field_index, int buffer_size, void *buffer);
/* hid-sensor-attributes */
@@ -190,6 +230,7 @@ struct hid_sensor_common {
struct platform_device *pdev;
unsigned usage_id;
atomic_t data_ready;
+ atomic_t user_requested_state;
struct iio_trigger *trigger;
struct hid_sensor_hub_attribute_info poll;
struct hid_sensor_hub_attribute_info report_state;
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index 109f0e6..f2ee90a 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -21,6 +21,8 @@
#define HID_MAX_PHY_DEVICES 0xFF
+#define HID_USAGE_SENSOR_COLLECTION 0x200001
+
/* Accel 3D (200073) */
#define HID_USAGE_SENSOR_ACCEL_3D 0x200073
#define HID_USAGE_SENSOR_DATA_ACCELERATION 0x200452
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 06c4607..f17980d 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -159,6 +159,7 @@ struct hid_item {
#define HID_UP_LED 0x00080000
#define HID_UP_BUTTON 0x00090000
#define HID_UP_ORDINAL 0x000a0000
+#define HID_UP_TELEPHONY 0x000b0000
#define HID_UP_CONSUMER 0x000c0000
#define HID_UP_DIGITIZER 0x000d0000
#define HID_UP_PID 0x000f0000
@@ -269,6 +270,7 @@ struct hid_item {
#define HID_DG_DEVICEINDEX 0x000d0053
#define HID_DG_CONTACTCOUNT 0x000d0054
#define HID_DG_CONTACTMAX 0x000d0055
+#define HID_DG_BUTTONTYPE 0x000d0059
#define HID_DG_BARRELSWITCH2 0x000d005a
#define HID_DG_TOOLSERIALNUMBER 0x000d005b
@@ -514,10 +516,10 @@ struct hid_device { /* device report descriptor */
#ifdef CONFIG_HID_BATTERY_STRENGTH
/*
* Power supply information for HID devices which report
- * battery strength. power_supply is registered iff
- * battery.name is non-NULL.
+ * battery strength. power_supply was successfully registered if
+ * battery is non-NULL.
*/
- struct power_supply battery;
+ struct power_supply *battery;
__s32 battery_min;
__s32 battery_max;
__s32 battery_report_type;
@@ -574,7 +576,9 @@ static inline void hid_set_drvdata(struct hid_device *hdev, void *data)
#define HID_GLOBAL_STACK_SIZE 4
#define HID_COLLECTION_STACK_SIZE 4
-#define HID_SCAN_FLAG_MT_WIN_8 0x00000001
+#define HID_SCAN_FLAG_MT_WIN_8 BIT(0)
+#define HID_SCAN_FLAG_VENDOR_SPECIFIC BIT(1)
+#define HID_SCAN_FLAG_GD_POINTER BIT(2)
struct hid_parser {
struct hid_global global;
@@ -811,6 +815,8 @@ void hid_disconnect(struct hid_device *hid);
const struct hid_device_id *hid_match_id(struct hid_device *hdev,
const struct hid_device_id *id);
s32 hid_snto32(__u32 value, unsigned n);
+__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
+ unsigned offset, unsigned n);
/**
* hid_device_io_start - enable HID input during probe, remove
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 9286a46..6aefcd0 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -65,6 +65,7 @@ static inline void kunmap(struct page *page)
static inline void *kmap_atomic(struct page *page)
{
+ preempt_disable();
pagefault_disable();
return page_address(page);
}
@@ -73,6 +74,7 @@ static inline void *kmap_atomic(struct page *page)
static inline void __kunmap_atomic(void *addr)
{
pagefault_enable();
+ preempt_enable();
}
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index bb9840f..d2ba7d3 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -135,6 +135,7 @@ struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
u32 host1x_syncpt_id(struct host1x_syncpt *sp);
u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
+u32 host1x_syncpt_read(struct host1x_syncpt *sp);
int host1x_syncpt_incr(struct host1x_syncpt *sp);
u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
@@ -250,17 +251,29 @@ void host1x_job_unpin(struct host1x_job *job);
struct host1x_device;
struct host1x_driver {
+ struct device_driver driver;
+
const struct of_device_id *subdevs;
struct list_head list;
- const char *name;
int (*probe)(struct host1x_device *device);
int (*remove)(struct host1x_device *device);
+ void (*shutdown)(struct host1x_device *device);
};
-int host1x_driver_register(struct host1x_driver *driver);
+static inline struct host1x_driver *
+to_host1x_driver(struct device_driver *driver)
+{
+ return container_of(driver, struct host1x_driver, driver);
+}
+
+int host1x_driver_register_full(struct host1x_driver *driver,
+ struct module *owner);
void host1x_driver_unregister(struct host1x_driver *driver);
+#define host1x_driver_register(driver) \
+ host1x_driver_register_full(driver, THIS_MODULE)
+
struct host1x_device {
struct host1x_driver *driver;
struct list_head list;
@@ -272,6 +285,8 @@ struct host1x_device {
struct mutex clients_lock;
struct list_head clients;
+
+ bool registered;
};
static inline struct host1x_device *to_host1x_device(struct device *dev)
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index a036d05..76dd4f0 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -53,34 +53,25 @@ enum hrtimer_restart {
*
* 0x00 inactive
* 0x01 enqueued into rbtree
- * 0x02 callback function running
- * 0x04 timer is migrated to another cpu
*
- * Special cases:
- * 0x03 callback function running and enqueued
- * (was requeued on another CPU)
- * 0x05 timer was migrated on CPU hotunplug
+ * The callback state is not part of the timer->state because clearing it would
+ * mean touching the timer after the callback, this makes it impossible to free
+ * the timer from the callback function.
*
- * The "callback function running and enqueued" status is only possible on
- * SMP. It happens for example when a posix timer expired and the callback
+ * Therefore we track the callback state in:
+ *
+ * timer->base->cpu_base->running == timer
+ *
+ * On SMP it is possible to have a "callback function running and enqueued"
+ * status. It happens for example when a posix timer expired and the callback
* queued a signal. Between dropping the lock which protects the posix timer
* and reacquiring the base lock of the hrtimer, another CPU can deliver the
- * signal and rearm the timer. We have to preserve the callback running state,
- * as otherwise the timer could be removed before the softirq code finishes the
- * the handling of the timer.
- *
- * The HRTIMER_STATE_ENQUEUED bit is always or'ed to the current state
- * to preserve the HRTIMER_STATE_CALLBACK in the above scenario. This
- * also affects HRTIMER_STATE_MIGRATE where the preservation is not
- * necessary. HRTIMER_STATE_MIGRATE is cleared after the timer is
- * enqueued on the new cpu.
+ * signal and rearm the timer.
*
* All state transitions are protected by cpu_base->lock.
*/
#define HRTIMER_STATE_INACTIVE 0x00
#define HRTIMER_STATE_ENQUEUED 0x01
-#define HRTIMER_STATE_CALLBACK 0x02
-#define HRTIMER_STATE_MIGRATE 0x04
/**
* struct hrtimer - the basic hrtimer structure
@@ -130,6 +121,12 @@ struct hrtimer_sleeper {
struct task_struct *task;
};
+#ifdef CONFIG_64BIT
+# define HRTIMER_CLOCK_BASE_ALIGN 64
+#else
+# define HRTIMER_CLOCK_BASE_ALIGN 32
+#endif
+
/**
* struct hrtimer_clock_base - the timer base for a specific clock
* @cpu_base: per cpu clock base
@@ -137,9 +134,7 @@ struct hrtimer_sleeper {
* timer to a base on another cpu.
* @clockid: clock id for per_cpu support
* @active: red black tree root node for the active timers
- * @resolution: the resolution of the clock, in nanoseconds
* @get_time: function to retrieve the current time of the clock
- * @softirq_time: the time when running the hrtimer queue in the softirq
* @offset: offset of this clock to the monotonic base
*/
struct hrtimer_clock_base {
@@ -147,11 +142,9 @@ struct hrtimer_clock_base {
int index;
clockid_t clockid;
struct timerqueue_head active;
- ktime_t resolution;
ktime_t (*get_time)(void);
- ktime_t softirq_time;
ktime_t offset;
-};
+} __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
enum hrtimer_base_type {
HRTIMER_BASE_MONOTONIC,
@@ -165,11 +158,17 @@ enum hrtimer_base_type {
* struct hrtimer_cpu_base - the per cpu clock bases
* @lock: lock protecting the base and associated clock bases
* and timers
+ * @seq: seqcount around __run_hrtimer
+ * @running: pointer to the currently running hrtimer
* @cpu: cpu number
* @active_bases: Bitfield to mark bases with active timers
- * @clock_was_set: Indicates that clock was set from irq context.
+ * @clock_was_set_seq: Sequence counter of clock was set events
+ * @migration_enabled: The migration of hrtimers to other cpus is enabled
+ * @nohz_active: The nohz functionality is enabled
* @expires_next: absolute time of the next event which was scheduled
* via clock_set_next_event()
+ * @next_timer: Pointer to the first expiring timer
+ * @in_hrtirq: hrtimer_interrupt() is currently executing
* @hres_active: State of high resolution mode
* @hang_detected: The last hrtimer interrupt detected a hang
* @nr_events: Total number of hrtimer interrupt events
@@ -177,26 +176,38 @@ enum hrtimer_base_type {
* @nr_hangs: Total number of hrtimer interrupt hangs
* @max_hang_time: Maximum time spent in hrtimer_interrupt
* @clock_base: array of clock bases for this cpu
+ *
+ * Note: next_timer is just an optimization for __remove_hrtimer().
+ * Do not dereference the pointer because it is not reliable on
+ * cross cpu removals.
*/
struct hrtimer_cpu_base {
raw_spinlock_t lock;
+ seqcount_t seq;
+ struct hrtimer *running;
unsigned int cpu;
unsigned int active_bases;
- unsigned int clock_was_set;
+ unsigned int clock_was_set_seq;
+ bool migration_enabled;
+ bool nohz_active;
#ifdef CONFIG_HIGH_RES_TIMERS
+ unsigned int in_hrtirq : 1,
+ hres_active : 1,
+ hang_detected : 1;
ktime_t expires_next;
- int hres_active;
- int hang_detected;
- unsigned long nr_events;
- unsigned long nr_retries;
- unsigned long nr_hangs;
- ktime_t max_hang_time;
+ struct hrtimer *next_timer;
+ unsigned int nr_events;
+ unsigned int nr_retries;
+ unsigned int nr_hangs;
+ unsigned int max_hang_time;
#endif
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
-};
+} ____cacheline_aligned;
static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
{
+ BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN);
+
timer->node.expires = time;
timer->_softexpires = time;
}
@@ -260,19 +271,16 @@ static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
return ktime_sub(timer->node.expires, timer->base->get_time());
}
-#ifdef CONFIG_HIGH_RES_TIMERS
-struct clock_event_device;
-
-extern void hrtimer_interrupt(struct clock_event_device *dev);
-
-/*
- * In high resolution mode the time reference must be read accurate
- */
static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
{
return timer->base->get_time();
}
+#ifdef CONFIG_HIGH_RES_TIMERS
+struct clock_event_device;
+
+extern void hrtimer_interrupt(struct clock_event_device *dev);
+
static inline int hrtimer_is_hres_active(struct hrtimer *timer)
{
return timer->base->cpu_base->hres_active;
@@ -293,21 +301,16 @@ extern void hrtimer_peek_ahead_timers(void);
extern void clock_was_set_delayed(void);
+extern unsigned int hrtimer_resolution;
+
#else
# define MONOTONIC_RES_NSEC LOW_RES_NSEC
# define KTIME_MONOTONIC_RES KTIME_LOW_RES
-static inline void hrtimer_peek_ahead_timers(void) { }
+#define hrtimer_resolution (unsigned int)LOW_RES_NSEC
-/*
- * In non high resolution mode the time reference is taken from
- * the base softirq time variable.
- */
-static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
-{
- return timer->base->softirq_time;
-}
+static inline void hrtimer_peek_ahead_timers(void) { }
static inline int hrtimer_is_hres_active(struct hrtimer *timer)
{
@@ -351,49 +354,47 @@ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
#endif
/* Basic timer operations: */
-extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
- const enum hrtimer_mode mode);
-extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
unsigned long range_ns, const enum hrtimer_mode mode);
-extern int
-__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
- unsigned long delta_ns,
- const enum hrtimer_mode mode, int wakeup);
+
+/**
+ * hrtimer_start - (re)start an hrtimer on the current CPU
+ * @timer: the timer to be added
+ * @tim: expiry time
+ * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
+ * relative (HRTIMER_MODE_REL)
+ */
+static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
+ const enum hrtimer_mode mode)
+{
+ hrtimer_start_range_ns(timer, tim, 0, mode);
+}
extern int hrtimer_cancel(struct hrtimer *timer);
extern int hrtimer_try_to_cancel(struct hrtimer *timer);
-static inline int hrtimer_start_expires(struct hrtimer *timer,
- enum hrtimer_mode mode)
+static inline void hrtimer_start_expires(struct hrtimer *timer,
+ enum hrtimer_mode mode)
{
unsigned long delta;
ktime_t soft, hard;
soft = hrtimer_get_softexpires(timer);
hard = hrtimer_get_expires(timer);
delta = ktime_to_ns(ktime_sub(hard, soft));
- return hrtimer_start_range_ns(timer, soft, delta, mode);
+ hrtimer_start_range_ns(timer, soft, delta, mode);
}
-static inline int hrtimer_restart(struct hrtimer *timer)
+static inline void hrtimer_restart(struct hrtimer *timer)
{
- return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
+ hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
/* Query timers: */
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
-extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
-extern ktime_t hrtimer_get_next_event(void);
+extern u64 hrtimer_get_next_event(void);
-/*
- * A timer is active, when it is enqueued into the rbtree or the
- * callback function is running or it's in the state of being migrated
- * to another cpu.
- */
-static inline int hrtimer_active(const struct hrtimer *timer)
-{
- return timer->state != HRTIMER_STATE_INACTIVE;
-}
+extern bool hrtimer_active(const struct hrtimer *timer);
/*
* Helper function to check, whether the timer is on one of the queues
@@ -409,14 +410,29 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
*/
static inline int hrtimer_callback_running(struct hrtimer *timer)
{
- return timer->state & HRTIMER_STATE_CALLBACK;
+ return timer->base->cpu_base->running == timer;
}
/* Forward a hrtimer so it expires after now: */
extern u64
hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
-/* Forward a hrtimer so it expires after the hrtimer's current now */
+/**
+ * hrtimer_forward_now - forward the timer expiry so it expires after now
+ * @timer: hrtimer to forward
+ * @interval: the interval to forward
+ *
+ * Forward the timer expiry so it will expire after the current time
+ * of the hrtimer clock base. Returns the number of overruns.
+ *
+ * Can be safely called from the callback function of @timer. If
+ * called from other contexts @timer must neither be enqueued nor
+ * running the callback and the caller needs to take care of
+ * serialization.
+ *
+ * Note: This only updates the timer expiry value and does not requeue
+ * the timer.
+ */
static inline u64 hrtimer_forward_now(struct hrtimer *timer,
ktime_t interval)
{
@@ -441,7 +457,6 @@ extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
/* Soft interrupt function to run the hrtimer queues: */
extern void hrtimer_run_queues(void);
-extern void hrtimer_run_pending(void);
/* Bootup initialization: */
extern void __init hrtimers_init(void);
diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h
index 3ec0630..5dd60c2 100644
--- a/include/linux/hsi/hsi.h
+++ b/include/linux/hsi/hsi.h
@@ -135,9 +135,9 @@ static inline int hsi_register_board_info(struct hsi_board_info const *info,
* @device: Driver model representation of the device
* @tx_cfg: HSI TX configuration
* @rx_cfg: HSI RX configuration
- * e_handler: Callback for handling port events (RX Wake High/Low)
- * pclaimed: Keeps tracks if the clients claimed its associated HSI port
- * nb: Notifier block for port events
+ * @e_handler: Callback for handling port events (RX Wake High/Low)
+ * @pclaimed: Keeps tracks if the clients claimed its associated HSI port
+ * @nb: Notifier block for port events
*/
struct hsi_client {
struct device device;
diff --git a/include/linux/htirq.h b/include/linux/htirq.h
index 70a1dbb..d4a527e 100644
--- a/include/linux/htirq.h
+++ b/include/linux/htirq.h
@@ -1,24 +1,38 @@
#ifndef LINUX_HTIRQ_H
#define LINUX_HTIRQ_H
+struct pci_dev;
+struct irq_data;
+
struct ht_irq_msg {
u32 address_lo; /* low 32 bits of the ht irq message */
u32 address_hi; /* high 32 bits of the it irq message */
};
+typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq,
+ struct ht_irq_msg *msg);
+
+struct ht_irq_cfg {
+ struct pci_dev *dev;
+ /* Update callback used to cope with buggy hardware */
+ ht_irq_update_t *update;
+ unsigned pos;
+ unsigned idx;
+ struct ht_irq_msg msg;
+};
+
/* Helper functions.. */
void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
-struct irq_data;
void mask_ht_irq(struct irq_data *data);
void unmask_ht_irq(struct irq_data *data);
/* The arch hook for getting things started */
-int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev);
+int arch_setup_ht_irq(int idx, int pos, struct pci_dev *dev,
+ ht_irq_update_t *update);
+void arch_teardown_ht_irq(unsigned int irq);
/* For drivers of buggy hardware */
-typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq,
- struct ht_irq_msg *msg);
int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update);
#endif /* LINUX_HTIRQ_H */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index ad9051b..f10b20f 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -157,6 +157,13 @@ static inline int hpage_nr_pages(struct page *page)
extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd, pmd_t *pmdp);
+extern struct page *huge_zero_page;
+
+static inline bool is_huge_zero_page(struct page *page)
+{
+ return ACCESS_ONCE(huge_zero_page) == page;
+}
+
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
@@ -206,6 +213,11 @@ static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_str
return 0;
}
+static inline bool is_huge_zero_page(struct page *page)
+{
+ return false;
+}
+
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* _LINUX_HUGE_MM_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 431b7fc..d891f94 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -22,7 +22,13 @@ struct mmu_gather;
struct hugepage_subpool {
spinlock_t lock;
long count;
- long max_hpages, used_hpages;
+ long max_hpages; /* Maximum huge pages or -1 if no maximum. */
+ long used_hpages; /* Used count against maximum, includes */
+ /* both alloced and reserved pages. */
+ struct hstate *hstate;
+ long min_hpages; /* Minimum huge pages or -1 if no minimum. */
+ long rsv_hpages; /* Pages reserved against global pool to */
+ /* sasitfy minimum size. */
};
struct resv_map {
@@ -38,11 +44,10 @@ extern int hugetlb_max_hstate __read_mostly;
#define for_each_hstate(h) \
for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
-struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
+struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
+ long min_hpages);
void hugepage_put_subpool(struct hugepage_subpool *spool);
-int PageHuge(struct page *page);
-
void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
@@ -79,14 +84,13 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
int dequeue_hwpoisoned_huge_page(struct page *page);
bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page);
-bool is_hugepage_active(struct page *page);
void free_huge_page(struct page *page);
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
#endif
-extern unsigned long hugepages_treat_as_movable;
+extern int hugepages_treat_as_movable;
extern int sysctl_hugetlb_shm_group;
extern struct list_head huge_boot_pages;
@@ -99,9 +103,9 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write);
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int write);
+ pmd_t *pmd, int flags);
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
- pud_t *pud, int write);
+ pud_t *pud, int flags);
int pmd_huge(pmd_t pmd);
int pud_huge(pud_t pmd);
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
@@ -109,11 +113,6 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
#else /* !CONFIG_HUGETLB_PAGE */
-static inline int PageHuge(struct page *page)
-{
- return 0;
-}
-
static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
{
}
@@ -133,8 +132,8 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
static inline void hugetlb_show_meminfo(void)
{
}
-#define follow_huge_pmd(mm, addr, pmd, write) NULL
-#define follow_huge_pud(mm, addr, pud, write) NULL
+#define follow_huge_pmd(mm, addr, pmd, flags) NULL
+#define follow_huge_pud(mm, addr, pud, flags) NULL
#define prepare_hugepage_range(file, addr, len) (-EINVAL)
#define pmd_huge(x) 0
#define pud_huge(x) 0
@@ -152,7 +151,6 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list)
return false;
}
#define putback_active_hugepage(p) do {} while (0)
-#define is_hugepage_active(x) false
static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot)
@@ -462,15 +460,14 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
return &mm->page_table_lock;
}
-static inline bool hugepages_supported(void)
-{
- /*
- * Some platform decide whether they support huge pages at boot
- * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
- * there is no such support
- */
- return HPAGE_SHIFT != 0;
-}
+#ifndef hugepages_supported
+/*
+ * Some platform decide whether they support huge pages at boot
+ * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
+ * when there is no such support
+ */
+#define hugepages_supported() (HPAGE_SHIFT != 0)
+#endif
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index 914bb08..4f7d8f4 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -12,8 +12,10 @@
#ifndef LINUX_HWRANDOM_H_
#define LINUX_HWRANDOM_H_
+#include <linux/completion.h>
#include <linux/types.h>
#include <linux/list.h>
+#include <linux/kref.h>
/**
* struct hwrng - Hardware Random Number Generator driver
@@ -44,12 +46,18 @@ struct hwrng {
/* internal. */
struct list_head list;
+ struct kref ref;
+ struct completion cleanup_done;
};
+struct device;
+
/** Register a new Hardware Random Number Generator driver. */
extern int hwrng_register(struct hwrng *rng);
+extern int devm_hwrng_register(struct device *dev, struct hwrng *rng);
/** Unregister a Hardware Random Number Generator driver. */
extern void hwrng_unregister(struct hwrng *rng);
+extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
/** Feed random bits into the pool. */
extern void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy);
diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
index 3343298..859d673 100644
--- a/include/linux/hwspinlock.h
+++ b/include/linux/hwspinlock.h
@@ -26,6 +26,7 @@
#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
struct device;
+struct device_node;
struct hwspinlock;
struct hwspinlock_device;
struct hwspinlock_ops;
@@ -66,6 +67,7 @@ int hwspin_lock_unregister(struct hwspinlock_device *bank);
struct hwspinlock *hwspin_lock_request(void);
struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
int hwspin_lock_free(struct hwspinlock *hwlock);
+int of_hwspin_lock_get_id(struct device_node *np, int index);
int hwspin_lock_get_id(struct hwspinlock *hwlock);
int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
unsigned long *);
@@ -120,6 +122,11 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
{
}
+static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
+{
+ return 0;
+}
+
static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
{
return 0;
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 476c685..30d3a1f 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -57,6 +57,18 @@ struct hv_multipage_buffer {
u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
};
+/*
+ * Multiple-page buffer array; the pfn array is variable size:
+ * The number of entries in the PFN array is determined by
+ * "len" and "offset".
+ */
+struct hv_mpb_array {
+ /* Length and Offset determines the # of pfns in the array */
+ u32 len;
+ u32 offset;
+ u64 pfn_array[];
+};
+
/* 0x18 includes the proprietary packet header */
#define MAX_PAGE_BUFFER_PACKET (0x18 + \
(sizeof(struct hv_page_buffer) * \
@@ -148,16 +160,18 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
* 1 . 1 (Windows 7)
* 2 . 4 (Windows 8)
* 3 . 0 (Windows 8 R2)
+ * 4 . 0 (Windows 10)
*/
#define VERSION_WS2008 ((0 << 16) | (13))
#define VERSION_WIN7 ((1 << 16) | (1))
#define VERSION_WIN8 ((2 << 16) | (4))
#define VERSION_WIN8_1 ((3 << 16) | (0))
+#define VERSION_WIN10 ((4 << 16) | (0))
#define VERSION_INVAL -1
-#define VERSION_CURRENT VERSION_WIN8_1
+#define VERSION_CURRENT VERSION_WIN10
/* Make maximum size of pipe payload of 16K */
#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
@@ -377,10 +391,7 @@ enum vmbus_channel_message_type {
CHANNELMSG_INITIATE_CONTACT = 14,
CHANNELMSG_VERSION_RESPONSE = 15,
CHANNELMSG_UNLOAD = 16,
-#ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
- CHANNELMSG_VIEWRANGE_ADD = 17,
- CHANNELMSG_VIEWRANGE_REMOVE = 18,
-#endif
+ CHANNELMSG_UNLOAD_RESPONSE = 17,
CHANNELMSG_COUNT
};
@@ -537,21 +548,6 @@ struct vmbus_channel_gpadl_torndown {
u32 gpadl;
} __packed;
-#ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
-struct vmbus_channel_view_range_add {
- struct vmbus_channel_message_header header;
- PHYSICAL_ADDRESS viewrange_base;
- u64 viewrange_length;
- u32 child_relid;
-} __packed;
-
-struct vmbus_channel_view_range_remove {
- struct vmbus_channel_message_header header;
- PHYSICAL_ADDRESS viewrange_base;
- u32 child_relid;
-} __packed;
-#endif
-
struct vmbus_channel_relid_released {
struct vmbus_channel_message_header header;
u32 child_relid;
@@ -634,12 +630,13 @@ struct hv_input_signal_event_buffer {
};
struct vmbus_channel {
+ /* Unique channel id */
+ int id;
+
struct list_head listentry;
struct hv_device *device_obj;
- struct work_struct work;
-
enum vmbus_channel_state state;
struct vmbus_channel_offer_channel offermsg;
@@ -660,7 +657,6 @@ struct vmbus_channel {
struct hv_ring_buffer_info outbound; /* send to parent */
struct hv_ring_buffer_info inbound; /* receive from parent */
spinlock_t inbound_lock;
- struct workqueue_struct *controlwq;
struct vmbus_close_msg close_msg;
@@ -701,6 +697,11 @@ struct vmbus_channel {
/* The corresponding CPUID in the guest */
u32 target_cpu;
/*
+ * State to manage the CPU affiliation of channels.
+ */
+ struct cpumask alloced_cpus_in_node;
+ int numa_node;
+ /*
* Support for sub-channels. For high performance devices,
* it will be useful to have multiple sub-channels to support
* a scalable communication infrastructure with the host.
@@ -722,12 +723,26 @@ struct vmbus_channel {
*/
void (*sc_creation_callback)(struct vmbus_channel *new_sc);
- spinlock_t sc_lock;
+ /*
+ * The spinlock to protect the structure. It is being used to protect
+ * test-and-set access to various attributes of the structure as well
+ * as all sc_list operations.
+ */
+ spinlock_t lock;
/*
* All Sub-channels of a primary channel are linked here.
*/
struct list_head sc_list;
/*
+ * Current number of sub-channels.
+ */
+ int num_sc;
+ /*
+ * Number of a sub-channel (position within sc_list) which is supposed
+ * to be used as the next outgoing channel.
+ */
+ int next_oc;
+ /*
* The primary channel this sub-channel belongs to.
* This will be NULL for the primary channel.
*/
@@ -814,6 +829,18 @@ struct vmbus_channel_packet_multipage_buffer {
struct hv_multipage_buffer range;
} __packed;
+/* The format must be the same as struct vmdata_gpa_direct */
+struct vmbus_packet_mpb_array {
+ u16 type;
+ u16 dataoffset8;
+ u16 length8;
+ u16 flags;
+ u64 transactionid;
+ u32 reserved;
+ u32 rangecount; /* Always 1 in this case */
+ struct hv_mpb_array range;
+} __packed;
+
extern int vmbus_open(struct vmbus_channel *channel,
u32 send_ringbuffersize,
@@ -832,6 +859,14 @@ extern int vmbus_sendpacket(struct vmbus_channel *channel,
enum vmbus_packet_type type,
u32 flags);
+extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel,
+ void *buffer,
+ u32 bufferLen,
+ u64 requestid,
+ enum vmbus_packet_type type,
+ u32 flags,
+ bool kick_q);
+
extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
struct hv_page_buffer pagebuffers[],
u32 pagecount,
@@ -839,12 +874,28 @@ extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
u32 bufferlen,
u64 requestid);
+extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
+ struct hv_page_buffer pagebuffers[],
+ u32 pagecount,
+ void *buffer,
+ u32 bufferlen,
+ u64 requestid,
+ u32 flags,
+ bool kick_q);
+
extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
struct hv_multipage_buffer *mpb,
void *buffer,
u32 bufferlen,
u64 requestid);
+extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
+ struct vmbus_packet_mpb_array *mpb,
+ u32 desc_size,
+ void *buffer,
+ u32 bufferlen,
+ u64 requestid);
+
extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
void *kbuffer,
u32 size,
@@ -1071,6 +1122,16 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver);
}
/*
+ * NetworkDirect. This is the guest RDMA service.
+ * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
+ */
+#define HV_ND_GUID \
+ .guid = { \
+ 0x3d, 0xaf, 0x2e, 0x8c, 0xa7, 0x32, 0x09, 0x4b, \
+ 0xab, 0x99, 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01 \
+ }
+
+/*
* Common header for Hyper-V ICs
*/
@@ -1170,13 +1231,7 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
struct icmsg_negotiate *, u8 *, int,
int);
-int hv_kvp_init(struct hv_util_service *);
-void hv_kvp_deinit(void);
-void hv_kvp_onchannelcallback(void *);
-
-int hv_vss_init(struct hv_util_service *);
-void hv_vss_deinit(void);
-void hv_vss_onchannelcallback(void *);
+void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
extern struct resource hyperv_mmio;
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index e3a1721..e83a738 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -130,8 +130,6 @@ extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
* @probe: Callback for device binding
* @remove: Callback for device unbinding
* @shutdown: Callback for device shutdown
- * @suspend: Callback for device suspend
- * @resume: Callback for device resume
* @alert: Alert callback, for example for the SMBus alert protocol
* @command: Callback for bus-wide signaling (optional)
* @driver: Device driver model driver
@@ -174,8 +172,6 @@ struct i2c_driver {
/* driver model interfaces that don't relate to enumeration */
void (*shutdown)(struct i2c_client *);
- int (*suspend)(struct i2c_client *, pm_message_t mesg);
- int (*resume)(struct i2c_client *);
/* Alert callback, for example for the SMBus alert protocol.
* The format and meaning of the data value depends on the protocol.
@@ -228,7 +224,9 @@ struct i2c_client {
struct device dev; /* the device structure */
int irq; /* irq issued by device */
struct list_head detected;
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
i2c_slave_cb_t slave_cb; /* callback for slave mode */
+#endif
};
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
@@ -253,11 +251,12 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
/* I2C slave support */
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
enum i2c_slave_event {
- I2C_SLAVE_REQ_READ_START,
- I2C_SLAVE_REQ_READ_END,
- I2C_SLAVE_REQ_WRITE_START,
- I2C_SLAVE_REQ_WRITE_END,
+ I2C_SLAVE_READ_REQUESTED,
+ I2C_SLAVE_WRITE_REQUESTED,
+ I2C_SLAVE_READ_PROCESSED,
+ I2C_SLAVE_WRITE_RECEIVED,
I2C_SLAVE_STOP,
};
@@ -269,6 +268,7 @@ static inline int i2c_slave_event(struct i2c_client *client,
{
return client->slave_cb(client, event, val);
}
+#endif
/**
* struct i2c_board_info - template for device creation
@@ -278,7 +278,7 @@ static inline int i2c_slave_event(struct i2c_client *client,
* @platform_data: stored in i2c_client.dev.platform_data
* @archdata: copied into i2c_client.dev.archdata
* @of_node: pointer to OpenFirmware device node
- * @acpi_node: ACPI device node
+ * @fwnode: device node supplied by the platform firmware
* @irq: stored in i2c_client.irq
*
* I2C doesn't actually support hardware probing, although controllers and
@@ -299,7 +299,7 @@ struct i2c_board_info {
void *platform_data;
struct dev_archdata *archdata;
struct device_node *of_node;
- struct acpi_dev_node acpi_node;
+ struct fwnode_handle *fwnode;
int irq;
};
@@ -404,8 +404,10 @@ struct i2c_algorithm {
/* To determine what the adapter supports */
u32 (*functionality) (struct i2c_adapter *);
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
int (*reg_slave)(struct i2c_client *client);
int (*unreg_slave)(struct i2c_client *client);
+#endif
};
/**
@@ -433,8 +435,8 @@ struct i2c_bus_recovery_info {
void (*set_scl)(struct i2c_adapter *, int val);
int (*get_sda)(struct i2c_adapter *);
- void (*prepare_recovery)(struct i2c_bus_recovery_info *bri);
- void (*unprepare_recovery)(struct i2c_bus_recovery_info *bri);
+ void (*prepare_recovery)(struct i2c_adapter *);
+ void (*unprepare_recovery)(struct i2c_adapter *);
/* gpio recovery */
int scl_gpio;
@@ -447,6 +449,48 @@ int i2c_recover_bus(struct i2c_adapter *adap);
int i2c_generic_gpio_recovery(struct i2c_adapter *adap);
int i2c_generic_scl_recovery(struct i2c_adapter *adap);
+/**
+ * struct i2c_adapter_quirks - describe flaws of an i2c adapter
+ * @flags: see I2C_AQ_* for possible flags and read below
+ * @max_num_msgs: maximum number of messages per transfer
+ * @max_write_len: maximum length of a write message
+ * @max_read_len: maximum length of a read message
+ * @max_comb_1st_msg_len: maximum length of the first msg in a combined message
+ * @max_comb_2nd_msg_len: maximum length of the second msg in a combined message
+ *
+ * Note about combined messages: Some I2C controllers can only send one message
+ * per transfer, plus something called combined message or write-then-read.
+ * This is (usually) a small write message followed by a read message and
+ * barely enough to access register based devices like EEPROMs. There is a flag
+ * to support this mode. It implies max_num_msg = 2 and does the length checks
+ * with max_comb_*_len because combined message mode usually has its own
+ * limitations. Because of HW implementations, some controllers can actually do
+ * write-then-anything or other variants. To support that, write-then-read has
+ * been broken out into smaller bits like write-first and read-second which can
+ * be combined as needed.
+ */
+
+struct i2c_adapter_quirks {
+ u64 flags;
+ int max_num_msgs;
+ u16 max_write_len;
+ u16 max_read_len;
+ u16 max_comb_1st_msg_len;
+ u16 max_comb_2nd_msg_len;
+};
+
+/* enforce max_num_msgs = 2 and use max_comb_*_len for length checks */
+#define I2C_AQ_COMB BIT(0)
+/* first combined message must be write */
+#define I2C_AQ_COMB_WRITE_FIRST BIT(1)
+/* second combined message must be read */
+#define I2C_AQ_COMB_READ_SECOND BIT(2)
+/* both combined messages must have the same target address */
+#define I2C_AQ_COMB_SAME_ADDR BIT(3)
+/* convenience macro for typical write-then read case */
+#define I2C_AQ_COMB_WRITE_THEN_READ (I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | \
+ I2C_AQ_COMB_READ_SECOND | I2C_AQ_COMB_SAME_ADDR)
+
/*
* i2c_adapter is the structure used to identify a physical i2c bus along
* with the access algorithms necessary to access it.
@@ -472,6 +516,7 @@ struct i2c_adapter {
struct list_head userspace_clients;
struct i2c_bus_recovery_info *bus_recovery_info;
+ const struct i2c_adapter_quirks *quirks;
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index 0bc03f1..9ad7828 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -675,6 +675,7 @@ struct twl4030_power_data {
struct twl4030_resconfig *board_config;
#define TWL4030_RESCONFIG_UNDEF ((u8)-1)
bool use_poweroff; /* Board is wired for TWL poweroff */
+ bool ac_charger_quirk; /* Disable AC charger on board */
};
extern int twl4030_remove_script(u8 flags);
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
deleted file mode 100644
index d23c3c2..0000000
--- a/include/linux/i2o.h
+++ /dev/null
@@ -1,988 +0,0 @@
-/*
- * I2O kernel space accessible structures/APIs
- *
- * (c) Copyright 1999, 2000 Red Hat Software
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- *************************************************************************
- *
- * This header file defined the I2O APIs/structures for use by
- * the I2O kernel modules.
- *
- */
-
-#ifndef _I2O_H
-#define _I2O_H
-
-#include <linux/i2o-dev.h>
-
-/* How many different OSM's are we allowing */
-#define I2O_MAX_DRIVERS 8
-
-#include <linux/pci.h>
-#include <linux/bug.h>
-#include <linux/dma-mapping.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h> /* work_struct */
-#include <linux/mempool.h>
-#include <linux/mutex.h>
-#include <linux/scatterlist.h>
-#include <linux/semaphore.h> /* Needed for MUTEX init macros */
-
-#include <asm/io.h>
-
-/* message queue empty */
-#define I2O_QUEUE_EMPTY 0xffffffff
-
-/*
- * Cache strategies
- */
-
-/* The NULL strategy leaves everything up to the controller. This tends to be a
- * pessimal but functional choice.
- */
-#define CACHE_NULL 0
-/* Prefetch data when reading. We continually attempt to load the next 32 sectors
- * into the controller cache.
- */
-#define CACHE_PREFETCH 1
-/* Prefetch data when reading. We sometimes attempt to load the next 32 sectors
- * into the controller cache. When an I/O is less <= 8K we assume its probably
- * not sequential and don't prefetch (default)
- */
-#define CACHE_SMARTFETCH 2
-/* Data is written to the cache and then out on to the disk. The I/O must be
- * physically on the medium before the write is acknowledged (default without
- * NVRAM)
- */
-#define CACHE_WRITETHROUGH 17
-/* Data is written to the cache and then out on to the disk. The controller
- * is permitted to write back the cache any way it wants. (default if battery
- * backed NVRAM is present). It can be useful to set this for swap regardless of
- * battery state.
- */
-#define CACHE_WRITEBACK 18
-/* Optimise for under powered controllers, especially on RAID1 and RAID0. We
- * write large I/O's directly to disk bypassing the cache to avoid the extra
- * memory copy hits. Small writes are writeback cached
- */
-#define CACHE_SMARTBACK 19
-/* Optimise for under powered controllers, especially on RAID1 and RAID0. We
- * write large I/O's directly to disk bypassing the cache to avoid the extra
- * memory copy hits. Small writes are writethrough cached. Suitable for devices
- * lacking battery backup
- */
-#define CACHE_SMARTTHROUGH 20
-
-/*
- * Ioctl structures
- */
-
-#define BLKI2OGRSTRAT _IOR('2', 1, int)
-#define BLKI2OGWSTRAT _IOR('2', 2, int)
-#define BLKI2OSRSTRAT _IOW('2', 3, int)
-#define BLKI2OSWSTRAT _IOW('2', 4, int)
-
-/*
- * I2O Function codes
- */
-
-/*
- * Executive Class
- */
-#define I2O_CMD_ADAPTER_ASSIGN 0xB3
-#define I2O_CMD_ADAPTER_READ 0xB2
-#define I2O_CMD_ADAPTER_RELEASE 0xB5
-#define I2O_CMD_BIOS_INFO_SET 0xA5
-#define I2O_CMD_BOOT_DEVICE_SET 0xA7
-#define I2O_CMD_CONFIG_VALIDATE 0xBB
-#define I2O_CMD_CONN_SETUP 0xCA
-#define I2O_CMD_DDM_DESTROY 0xB1
-#define I2O_CMD_DDM_ENABLE 0xD5
-#define I2O_CMD_DDM_QUIESCE 0xC7
-#define I2O_CMD_DDM_RESET 0xD9
-#define I2O_CMD_DDM_SUSPEND 0xAF
-#define I2O_CMD_DEVICE_ASSIGN 0xB7
-#define I2O_CMD_DEVICE_RELEASE 0xB9
-#define I2O_CMD_HRT_GET 0xA8
-#define I2O_CMD_ADAPTER_CLEAR 0xBE
-#define I2O_CMD_ADAPTER_CONNECT 0xC9
-#define I2O_CMD_ADAPTER_RESET 0xBD
-#define I2O_CMD_LCT_NOTIFY 0xA2
-#define I2O_CMD_OUTBOUND_INIT 0xA1
-#define I2O_CMD_PATH_ENABLE 0xD3
-#define I2O_CMD_PATH_QUIESCE 0xC5
-#define I2O_CMD_PATH_RESET 0xD7
-#define I2O_CMD_STATIC_MF_CREATE 0xDD
-#define I2O_CMD_STATIC_MF_RELEASE 0xDF
-#define I2O_CMD_STATUS_GET 0xA0
-#define I2O_CMD_SW_DOWNLOAD 0xA9
-#define I2O_CMD_SW_UPLOAD 0xAB
-#define I2O_CMD_SW_REMOVE 0xAD
-#define I2O_CMD_SYS_ENABLE 0xD1
-#define I2O_CMD_SYS_MODIFY 0xC1
-#define I2O_CMD_SYS_QUIESCE 0xC3
-#define I2O_CMD_SYS_TAB_SET 0xA3
-
-/*
- * Utility Class
- */
-#define I2O_CMD_UTIL_NOP 0x00
-#define I2O_CMD_UTIL_ABORT 0x01
-#define I2O_CMD_UTIL_CLAIM 0x09
-#define I2O_CMD_UTIL_RELEASE 0x0B
-#define I2O_CMD_UTIL_PARAMS_GET 0x06
-#define I2O_CMD_UTIL_PARAMS_SET 0x05
-#define I2O_CMD_UTIL_EVT_REGISTER 0x13
-#define I2O_CMD_UTIL_EVT_ACK 0x14
-#define I2O_CMD_UTIL_CONFIG_DIALOG 0x10
-#define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D
-#define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F
-#define I2O_CMD_UTIL_LOCK 0x17
-#define I2O_CMD_UTIL_LOCK_RELEASE 0x19
-#define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15
-
-/*
- * SCSI Host Bus Adapter Class
- */
-#define I2O_CMD_SCSI_EXEC 0x81
-#define I2O_CMD_SCSI_ABORT 0x83
-#define I2O_CMD_SCSI_BUSRESET 0x27
-
-/*
- * Bus Adapter Class
- */
-#define I2O_CMD_BUS_ADAPTER_RESET 0x85
-#define I2O_CMD_BUS_RESET 0x87
-#define I2O_CMD_BUS_SCAN 0x89
-#define I2O_CMD_BUS_QUIESCE 0x8b
-
-/*
- * Random Block Storage Class
- */
-#define I2O_CMD_BLOCK_READ 0x30
-#define I2O_CMD_BLOCK_WRITE 0x31
-#define I2O_CMD_BLOCK_CFLUSH 0x37
-#define I2O_CMD_BLOCK_MLOCK 0x49
-#define I2O_CMD_BLOCK_MUNLOCK 0x4B
-#define I2O_CMD_BLOCK_MMOUNT 0x41
-#define I2O_CMD_BLOCK_MEJECT 0x43
-#define I2O_CMD_BLOCK_POWER 0x70
-
-#define I2O_CMD_PRIVATE 0xFF
-
-/* Command status values */
-
-#define I2O_CMD_IN_PROGRESS 0x01
-#define I2O_CMD_REJECTED 0x02
-#define I2O_CMD_FAILED 0x03
-#define I2O_CMD_COMPLETED 0x04
-
-/* I2O API function return values */
-
-#define I2O_RTN_NO_ERROR 0
-#define I2O_RTN_NOT_INIT 1
-#define I2O_RTN_FREE_Q_EMPTY 2
-#define I2O_RTN_TCB_ERROR 3
-#define I2O_RTN_TRANSACTION_ERROR 4
-#define I2O_RTN_ADAPTER_ALREADY_INIT 5
-#define I2O_RTN_MALLOC_ERROR 6
-#define I2O_RTN_ADPTR_NOT_REGISTERED 7
-#define I2O_RTN_MSG_REPLY_TIMEOUT 8
-#define I2O_RTN_NO_STATUS 9
-#define I2O_RTN_NO_FIRM_VER 10
-#define I2O_RTN_NO_LINK_SPEED 11
-
-/* Reply message status defines for all messages */
-
-#define I2O_REPLY_STATUS_SUCCESS 0x00
-#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01
-#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02
-#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03
-#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04
-#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05
-#define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06
-#define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08
-#define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09
-#define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A
-#define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B
-#define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80
-
-/* Status codes and Error Information for Parameter functions */
-
-#define I2O_PARAMS_STATUS_SUCCESS 0x00
-#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01
-#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02
-#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03
-#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04
-#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05
-#define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06
-#define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07
-#define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08
-#define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09
-#define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A
-#define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B
-#define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C
-#define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D
-#define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E
-#define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F
-#define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10
-
-/* DetailedStatusCode defines for Executive, DDM, Util and Transaction error
- * messages: Table 3-2 Detailed Status Codes.*/
-
-#define I2O_DSC_SUCCESS 0x0000
-#define I2O_DSC_BAD_KEY 0x0002
-#define I2O_DSC_TCL_ERROR 0x0003
-#define I2O_DSC_REPLY_BUFFER_FULL 0x0004
-#define I2O_DSC_NO_SUCH_PAGE 0x0005
-#define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006
-#define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007
-#define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009
-#define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A
-#define I2O_DSC_DEVICE_LOCKED 0x000B
-#define I2O_DSC_DEVICE_RESET 0x000C
-#define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D
-#define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E
-#define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F
-#define I2O_DSC_INVALID_OFFSET 0x0010
-#define I2O_DSC_INVALID_PARAMETER 0x0011
-#define I2O_DSC_INVALID_REQUEST 0x0012
-#define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013
-#define I2O_DSC_MESSAGE_TOO_LARGE 0x0014
-#define I2O_DSC_MESSAGE_TOO_SMALL 0x0015
-#define I2O_DSC_MISSING_PARAMETER 0x0016
-#define I2O_DSC_TIMEOUT 0x0017
-#define I2O_DSC_UNKNOWN_ERROR 0x0018
-#define I2O_DSC_UNKNOWN_FUNCTION 0x0019
-#define I2O_DSC_UNSUPPORTED_VERSION 0x001A
-#define I2O_DSC_DEVICE_BUSY 0x001B
-#define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C
-
-/* DetailedStatusCode defines for Block Storage Operation: Table 6-7 Detailed
- Status Codes.*/
-
-#define I2O_BSA_DSC_SUCCESS 0x0000
-#define I2O_BSA_DSC_MEDIA_ERROR 0x0001
-#define I2O_BSA_DSC_ACCESS_ERROR 0x0002
-#define I2O_BSA_DSC_DEVICE_FAILURE 0x0003
-#define I2O_BSA_DSC_DEVICE_NOT_READY 0x0004
-#define I2O_BSA_DSC_MEDIA_NOT_PRESENT 0x0005
-#define I2O_BSA_DSC_MEDIA_LOCKED 0x0006
-#define I2O_BSA_DSC_MEDIA_FAILURE 0x0007
-#define I2O_BSA_DSC_PROTOCOL_FAILURE 0x0008
-#define I2O_BSA_DSC_BUS_FAILURE 0x0009
-#define I2O_BSA_DSC_ACCESS_VIOLATION 0x000A
-#define I2O_BSA_DSC_WRITE_PROTECTED 0x000B
-#define I2O_BSA_DSC_DEVICE_RESET 0x000C
-#define I2O_BSA_DSC_VOLUME_CHANGED 0x000D
-#define I2O_BSA_DSC_TIMEOUT 0x000E
-
-/* FailureStatusCodes, Table 3-3 Message Failure Codes */
-
-#define I2O_FSC_TRANSPORT_SERVICE_SUSPENDED 0x81
-#define I2O_FSC_TRANSPORT_SERVICE_TERMINATED 0x82
-#define I2O_FSC_TRANSPORT_CONGESTION 0x83
-#define I2O_FSC_TRANSPORT_FAILURE 0x84
-#define I2O_FSC_TRANSPORT_STATE_ERROR 0x85
-#define I2O_FSC_TRANSPORT_TIME_OUT 0x86
-#define I2O_FSC_TRANSPORT_ROUTING_FAILURE 0x87
-#define I2O_FSC_TRANSPORT_INVALID_VERSION 0x88
-#define I2O_FSC_TRANSPORT_INVALID_OFFSET 0x89
-#define I2O_FSC_TRANSPORT_INVALID_MSG_FLAGS 0x8A
-#define I2O_FSC_TRANSPORT_FRAME_TOO_SMALL 0x8B
-#define I2O_FSC_TRANSPORT_FRAME_TOO_LARGE 0x8C
-#define I2O_FSC_TRANSPORT_INVALID_TARGET_ID 0x8D
-#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_ID 0x8E
-#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_CONTEXT 0x8F
-#define I2O_FSC_TRANSPORT_UNKNOWN_FAILURE 0xFF
-
-/* Device Claim Types */
-#define I2O_CLAIM_PRIMARY 0x01000000
-#define I2O_CLAIM_MANAGEMENT 0x02000000
-#define I2O_CLAIM_AUTHORIZED 0x03000000
-#define I2O_CLAIM_SECONDARY 0x04000000
-
-/* Message header defines for VersionOffset */
-#define I2OVER15 0x0001
-#define I2OVER20 0x0002
-
-/* Default is 1.5 */
-#define I2OVERSION I2OVER15
-
-#define SGL_OFFSET_0 I2OVERSION
-#define SGL_OFFSET_4 (0x0040 | I2OVERSION)
-#define SGL_OFFSET_5 (0x0050 | I2OVERSION)
-#define SGL_OFFSET_6 (0x0060 | I2OVERSION)
-#define SGL_OFFSET_7 (0x0070 | I2OVERSION)
-#define SGL_OFFSET_8 (0x0080 | I2OVERSION)
-#define SGL_OFFSET_9 (0x0090 | I2OVERSION)
-#define SGL_OFFSET_10 (0x00A0 | I2OVERSION)
-#define SGL_OFFSET_11 (0x00B0 | I2OVERSION)
-#define SGL_OFFSET_12 (0x00C0 | I2OVERSION)
-#define SGL_OFFSET(x) (((x)<<4) | I2OVERSION)
-
-/* Transaction Reply Lists (TRL) Control Word structure */
-#define TRL_SINGLE_FIXED_LENGTH 0x00
-#define TRL_SINGLE_VARIABLE_LENGTH 0x40
-#define TRL_MULTIPLE_FIXED_LENGTH 0x80
-
- /* msg header defines for MsgFlags */
-#define MSG_STATIC 0x0100
-#define MSG_64BIT_CNTXT 0x0200
-#define MSG_MULTI_TRANS 0x1000
-#define MSG_FAIL 0x2000
-#define MSG_FINAL 0x4000
-#define MSG_REPLY 0x8000
-
- /* minimum size msg */
-#define THREE_WORD_MSG_SIZE 0x00030000
-#define FOUR_WORD_MSG_SIZE 0x00040000
-#define FIVE_WORD_MSG_SIZE 0x00050000
-#define SIX_WORD_MSG_SIZE 0x00060000
-#define SEVEN_WORD_MSG_SIZE 0x00070000
-#define EIGHT_WORD_MSG_SIZE 0x00080000
-#define NINE_WORD_MSG_SIZE 0x00090000
-#define TEN_WORD_MSG_SIZE 0x000A0000
-#define ELEVEN_WORD_MSG_SIZE 0x000B0000
-#define I2O_MESSAGE_SIZE(x) ((x)<<16)
-
-/* special TID assignments */
-#define ADAPTER_TID 0
-#define HOST_TID 1
-
-/* outbound queue defines */
-#define I2O_MAX_OUTBOUND_MSG_FRAMES 128
-#define I2O_OUTBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */
-
-/* inbound queue definitions */
-#define I2O_MSG_INPOOL_MIN 32
-#define I2O_INBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */
-
-#define I2O_POST_WAIT_OK 0
-#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT
-
-#define I2O_CONTEXT_LIST_MIN_LENGTH 15
-#define I2O_CONTEXT_LIST_USED 0x01
-#define I2O_CONTEXT_LIST_DELETED 0x02
-
-/* timeouts */
-#define I2O_TIMEOUT_INIT_OUTBOUND_QUEUE 15
-#define I2O_TIMEOUT_MESSAGE_GET 5
-#define I2O_TIMEOUT_RESET 30
-#define I2O_TIMEOUT_STATUS_GET 5
-#define I2O_TIMEOUT_LCT_GET 360
-#define I2O_TIMEOUT_SCSI_SCB_ABORT 240
-
-/* retries */
-#define I2O_HRT_GET_TRIES 3
-#define I2O_LCT_GET_TRIES 3
-
-/* defines for max_sectors and max_phys_segments */
-#define I2O_MAX_SECTORS 1024
-#define I2O_MAX_SECTORS_LIMITED 128
-#define I2O_MAX_PHYS_SEGMENTS BLK_MAX_SEGMENTS
-
-/*
- * Message structures
- */
-struct i2o_message {
- union {
- struct {
- u8 version_offset;
- u8 flags;
- u16 size;
- u32 target_tid:12;
- u32 init_tid:12;
- u32 function:8;
- u32 icntxt; /* initiator context */
- u32 tcntxt; /* transaction context */
- } s;
- u32 head[4];
- } u;
- /* List follows */
- u32 body[0];
-};
-
-/* MFA and I2O message used by mempool */
-struct i2o_msg_mfa {
- u32 mfa; /* MFA returned by the controller */
- struct i2o_message msg; /* I2O message */
-};
-
-/*
- * Each I2O device entity has one of these. There is one per device.
- */
-struct i2o_device {
- i2o_lct_entry lct_data; /* Device LCT information */
-
- struct i2o_controller *iop; /* Controlling IOP */
- struct list_head list; /* node in IOP devices list */
-
- struct device device;
-
- struct mutex lock; /* device lock */
-};
-
-/*
- * Event structure provided to the event handling function
- */
-struct i2o_event {
- struct work_struct work;
- struct i2o_device *i2o_dev; /* I2O device pointer from which the
- event reply was initiated */
- u16 size; /* Size of data in 32-bit words */
- u32 tcntxt; /* Transaction context used at
- registration */
- u32 event_indicator; /* Event indicator from reply */
- u32 data[0]; /* Event data from reply */
-};
-
-/*
- * I2O classes which could be handled by the OSM
- */
-struct i2o_class_id {
- u16 class_id:12;
-};
-
-/*
- * I2O driver structure for OSMs
- */
-struct i2o_driver {
- char *name; /* OSM name */
- int context; /* Low 8 bits of the transaction info */
- struct i2o_class_id *classes; /* I2O classes that this OSM handles */
-
- /* Message reply handler */
- int (*reply) (struct i2o_controller *, u32, struct i2o_message *);
-
- /* Event handler */
- work_func_t event;
-
- struct workqueue_struct *event_queue; /* Event queue */
-
- struct device_driver driver;
-
- /* notification of changes */
- void (*notify_controller_add) (struct i2o_controller *);
- void (*notify_controller_remove) (struct i2o_controller *);
- void (*notify_device_add) (struct i2o_device *);
- void (*notify_device_remove) (struct i2o_device *);
-
- struct semaphore lock;
-};
-
-/*
- * Contains DMA mapped address information
- */
-struct i2o_dma {
- void *virt;
- dma_addr_t phys;
- size_t len;
-};
-
-/*
- * Contains slab cache and mempool information
- */
-struct i2o_pool {
- char *name;
- struct kmem_cache *slab;
- mempool_t *mempool;
-};
-
-/*
- * Contains IO mapped address information
- */
-struct i2o_io {
- void __iomem *virt;
- unsigned long phys;
- unsigned long len;
-};
-
-/*
- * Context queue entry, used for 32-bit context on 64-bit systems
- */
-struct i2o_context_list_element {
- struct list_head list;
- u32 context;
- void *ptr;
- unsigned long timestamp;
-};
-
-/*
- * Each I2O controller has one of these objects
- */
-struct i2o_controller {
- char name[16];
- int unit;
- int type;
-
- struct pci_dev *pdev; /* PCI device */
-
- unsigned int promise:1; /* Promise controller */
- unsigned int adaptec:1; /* DPT / Adaptec controller */
- unsigned int raptor:1; /* split bar */
- unsigned int no_quiesce:1; /* dont quiesce before reset */
- unsigned int short_req:1; /* use small block sizes */
- unsigned int limit_sectors:1; /* limit number of sectors / request */
- unsigned int pae_support:1; /* controller has 64-bit SGL support */
-
- struct list_head devices; /* list of I2O devices */
- struct list_head list; /* Controller list */
-
- void __iomem *in_port; /* Inbout port address */
- void __iomem *out_port; /* Outbound port address */
- void __iomem *irq_status; /* Interrupt status register address */
- void __iomem *irq_mask; /* Interrupt mask register address */
-
- struct i2o_dma status; /* IOP status block */
-
- struct i2o_dma hrt; /* HW Resource Table */
- i2o_lct *lct; /* Logical Config Table */
- struct i2o_dma dlct; /* Temp LCT */
- struct mutex lct_lock; /* Lock for LCT updates */
- struct i2o_dma status_block; /* IOP status block */
-
- struct i2o_io base; /* controller messaging unit */
- struct i2o_io in_queue; /* inbound message queue Host->IOP */
- struct i2o_dma out_queue; /* outbound message queue IOP->Host */
-
- struct i2o_pool in_msg; /* mempool for inbound messages */
-
- unsigned int battery:1; /* Has a battery backup */
- unsigned int io_alloc:1; /* An I/O resource was allocated */
- unsigned int mem_alloc:1; /* A memory resource was allocated */
-
- struct resource io_resource; /* I/O resource allocated to the IOP */
- struct resource mem_resource; /* Mem resource allocated to the IOP */
-
- struct device device;
- struct i2o_device *exec; /* Executive */
-#if BITS_PER_LONG == 64
- spinlock_t context_list_lock; /* lock for context_list */
- atomic_t context_list_counter; /* needed for unique contexts */
- struct list_head context_list; /* list of context id's
- and pointers */
-#endif
- spinlock_t lock; /* lock for controller
- configuration */
- void *driver_data[I2O_MAX_DRIVERS]; /* storage for drivers */
-};
-
-/*
- * I2O System table entry
- *
- * The system table contains information about all the IOPs in the
- * system. It is sent to all IOPs so that they can create peer2peer
- * connections between them.
- */
-struct i2o_sys_tbl_entry {
- u16 org_id;
- u16 reserved1;
- u32 iop_id:12;
- u32 reserved2:20;
- u16 seg_num:12;
- u16 i2o_version:4;
- u8 iop_state;
- u8 msg_type;
- u16 frame_size;
- u16 reserved3;
- u32 last_changed;
- u32 iop_capabilities;
- u32 inbound_low;
- u32 inbound_high;
-};
-
-struct i2o_sys_tbl {
- u8 num_entries;
- u8 version;
- u16 reserved1;
- u32 change_ind;
- u32 reserved2;
- u32 reserved3;
- struct i2o_sys_tbl_entry iops[0];
-};
-
-extern struct list_head i2o_controllers;
-
-/* Message functions */
-extern struct i2o_message *i2o_msg_get_wait(struct i2o_controller *, int);
-extern int i2o_msg_post_wait_mem(struct i2o_controller *, struct i2o_message *,
- unsigned long, struct i2o_dma *);
-
-/* IOP functions */
-extern int i2o_status_get(struct i2o_controller *);
-
-extern int i2o_event_register(struct i2o_device *, struct i2o_driver *, int,
- u32);
-extern struct i2o_device *i2o_iop_find_device(struct i2o_controller *, u16);
-extern struct i2o_controller *i2o_find_iop(int);
-
-/* Functions needed for handling 64-bit pointers in 32-bit context */
-#if BITS_PER_LONG == 64
-extern u32 i2o_cntxt_list_add(struct i2o_controller *, void *);
-extern void *i2o_cntxt_list_get(struct i2o_controller *, u32);
-extern u32 i2o_cntxt_list_remove(struct i2o_controller *, void *);
-extern u32 i2o_cntxt_list_get_ptr(struct i2o_controller *, void *);
-
-static inline u32 i2o_ptr_low(void *ptr)
-{
- return (u32) (u64) ptr;
-};
-
-static inline u32 i2o_ptr_high(void *ptr)
-{
- return (u32) ((u64) ptr >> 32);
-};
-
-static inline u32 i2o_dma_low(dma_addr_t dma_addr)
-{
- return (u32) (u64) dma_addr;
-};
-
-static inline u32 i2o_dma_high(dma_addr_t dma_addr)
-{
- return (u32) ((u64) dma_addr >> 32);
-};
-#else
-static inline u32 i2o_cntxt_list_add(struct i2o_controller *c, void *ptr)
-{
- return (u32) ptr;
-};
-
-static inline void *i2o_cntxt_list_get(struct i2o_controller *c, u32 context)
-{
- return (void *)context;
-};
-
-static inline u32 i2o_cntxt_list_remove(struct i2o_controller *c, void *ptr)
-{
- return (u32) ptr;
-};
-
-static inline u32 i2o_cntxt_list_get_ptr(struct i2o_controller *c, void *ptr)
-{
- return (u32) ptr;
-};
-
-static inline u32 i2o_ptr_low(void *ptr)
-{
- return (u32) ptr;
-};
-
-static inline u32 i2o_ptr_high(void *ptr)
-{
- return 0;
-};
-
-static inline u32 i2o_dma_low(dma_addr_t dma_addr)
-{
- return (u32) dma_addr;
-};
-
-static inline u32 i2o_dma_high(dma_addr_t dma_addr)
-{
- return 0;
-};
-#endif
-
-extern u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size);
-extern dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
- size_t size,
- enum dma_data_direction direction,
- u32 ** sg_ptr);
-extern int i2o_dma_map_sg(struct i2o_controller *c,
- struct scatterlist *sg, int sg_count,
- enum dma_data_direction direction,
- u32 ** sg_ptr);
-extern int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, size_t len);
-extern void i2o_dma_free(struct device *dev, struct i2o_dma *addr);
-extern int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr,
- size_t len);
-extern int i2o_pool_alloc(struct i2o_pool *pool, const char *name,
- size_t size, int min_nr);
-extern void i2o_pool_free(struct i2o_pool *pool);
-/* I2O driver (OSM) functions */
-extern int i2o_driver_register(struct i2o_driver *);
-extern void i2o_driver_unregister(struct i2o_driver *);
-
-/**
- * i2o_driver_notify_controller_add - Send notification of added controller
- * @drv: I2O driver
- * @c: I2O controller
- *
- * Send notification of added controller to a single registered driver.
- */
-static inline void i2o_driver_notify_controller_add(struct i2o_driver *drv,
- struct i2o_controller *c)
-{
- if (drv->notify_controller_add)
- drv->notify_controller_add(c);
-};
-
-/**
- * i2o_driver_notify_controller_remove - Send notification of removed controller
- * @drv: I2O driver
- * @c: I2O controller
- *
- * Send notification of removed controller to a single registered driver.
- */
-static inline void i2o_driver_notify_controller_remove(struct i2o_driver *drv,
- struct i2o_controller *c)
-{
- if (drv->notify_controller_remove)
- drv->notify_controller_remove(c);
-};
-
-/**
- * i2o_driver_notify_device_add - Send notification of added device
- * @drv: I2O driver
- * @i2o_dev: the added i2o_device
- *
- * Send notification of added device to a single registered driver.
- */
-static inline void i2o_driver_notify_device_add(struct i2o_driver *drv,
- struct i2o_device *i2o_dev)
-{
- if (drv->notify_device_add)
- drv->notify_device_add(i2o_dev);
-};
-
-/**
- * i2o_driver_notify_device_remove - Send notification of removed device
- * @drv: I2O driver
- * @i2o_dev: the added i2o_device
- *
- * Send notification of removed device to a single registered driver.
- */
-static inline void i2o_driver_notify_device_remove(struct i2o_driver *drv,
- struct i2o_device *i2o_dev)
-{
- if (drv->notify_device_remove)
- drv->notify_device_remove(i2o_dev);
-};
-
-extern void i2o_driver_notify_controller_add_all(struct i2o_controller *);
-extern void i2o_driver_notify_controller_remove_all(struct i2o_controller *);
-extern void i2o_driver_notify_device_add_all(struct i2o_device *);
-extern void i2o_driver_notify_device_remove_all(struct i2o_device *);
-
-/* I2O device functions */
-extern int i2o_device_claim(struct i2o_device *);
-extern int i2o_device_claim_release(struct i2o_device *);
-
-/* Exec OSM functions */
-extern int i2o_exec_lct_get(struct i2o_controller *);
-
-/* device / driver / kobject conversion functions */
-#define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver)
-#define to_i2o_device(dev) container_of(dev, struct i2o_device, device)
-#define to_i2o_controller(dev) container_of(dev, struct i2o_controller, device)
-
-/**
- * i2o_out_to_virt - Turn an I2O message to a virtual address
- * @c: controller
- * @m: message engine value
- *
- * Turn a receive message from an I2O controller bus address into
- * a Linux virtual address. The shared page frame is a linear block
- * so we simply have to shift the offset. This function does not
- * work for sender side messages as they are ioremap objects
- * provided by the I2O controller.
- */
-static inline struct i2o_message *i2o_msg_out_to_virt(struct i2o_controller *c,
- u32 m)
-{
- BUG_ON(m < c->out_queue.phys
- || m >= c->out_queue.phys + c->out_queue.len);
-
- return c->out_queue.virt + (m - c->out_queue.phys);
-};
-
-/**
- * i2o_msg_in_to_virt - Turn an I2O message to a virtual address
- * @c: controller
- * @m: message engine value
- *
- * Turn a send message from an I2O controller bus address into
- * a Linux virtual address. The shared page frame is a linear block
- * so we simply have to shift the offset. This function does not
- * work for receive side messages as they are kmalloc objects
- * in a different pool.
- */
-static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct
- i2o_controller *c,
- u32 m)
-{
- return c->in_queue.virt + m;
-};
-
-/**
- * i2o_msg_get - obtain an I2O message from the IOP
- * @c: I2O controller
- *
- * This function tries to get a message frame. If no message frame is
- * available do not wait until one is available (see also i2o_msg_get_wait).
- * The returned pointer to the message frame is not in I/O memory, it is
- * allocated from a mempool. But because a MFA is allocated from the
- * controller too it is guaranteed that i2o_msg_post() will never fail.
- *
- * On a success a pointer to the message frame is returned. If the message
- * queue is empty -EBUSY is returned and if no memory is available -ENOMEM
- * is returned.
- */
-static inline struct i2o_message *i2o_msg_get(struct i2o_controller *c)
-{
- struct i2o_msg_mfa *mmsg = mempool_alloc(c->in_msg.mempool, GFP_ATOMIC);
- if (!mmsg)
- return ERR_PTR(-ENOMEM);
-
- mmsg->mfa = readl(c->in_port);
- if (unlikely(mmsg->mfa >= c->in_queue.len)) {
- u32 mfa = mmsg->mfa;
-
- mempool_free(mmsg, c->in_msg.mempool);
-
- if (mfa == I2O_QUEUE_EMPTY)
- return ERR_PTR(-EBUSY);
- return ERR_PTR(-EFAULT);
- }
-
- return &mmsg->msg;
-};
-
-/**
- * i2o_msg_post - Post I2O message to I2O controller
- * @c: I2O controller to which the message should be send
- * @msg: message returned by i2o_msg_get()
- *
- * Post the message to the I2O controller and return immediately.
- */
-static inline void i2o_msg_post(struct i2o_controller *c,
- struct i2o_message *msg)
-{
- struct i2o_msg_mfa *mmsg;
-
- mmsg = container_of(msg, struct i2o_msg_mfa, msg);
- memcpy_toio(i2o_msg_in_to_virt(c, mmsg->mfa), msg,
- (le32_to_cpu(msg->u.head[0]) >> 16) << 2);
- writel(mmsg->mfa, c->in_port);
- mempool_free(mmsg, c->in_msg.mempool);
-};
-
-/**
- * i2o_msg_post_wait - Post and wait a message and wait until return
- * @c: controller
- * @msg: message to post
- * @timeout: time in seconds to wait
- *
- * This API allows an OSM to post a message and then be told whether or
- * not the system received a successful reply. If the message times out
- * then the value '-ETIMEDOUT' is returned.
- *
- * Returns 0 on success or negative error code on failure.
- */
-static inline int i2o_msg_post_wait(struct i2o_controller *c,
- struct i2o_message *msg,
- unsigned long timeout)
-{
- return i2o_msg_post_wait_mem(c, msg, timeout, NULL);
-};
-
-/**
- * i2o_msg_nop_mfa - Returns a fetched MFA back to the controller
- * @c: I2O controller from which the MFA was fetched
- * @mfa: MFA which should be returned
- *
- * This function must be used for preserved messages, because i2o_msg_nop()
- * also returns the allocated memory back to the msg_pool mempool.
- */
-static inline void i2o_msg_nop_mfa(struct i2o_controller *c, u32 mfa)
-{
- struct i2o_message __iomem *msg;
- u32 nop[3] = {
- THREE_WORD_MSG_SIZE | SGL_OFFSET_0,
- I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID,
- 0x00000000
- };
-
- msg = i2o_msg_in_to_virt(c, mfa);
- memcpy_toio(msg, nop, sizeof(nop));
- writel(mfa, c->in_port);
-};
-
-/**
- * i2o_msg_nop - Returns a message which is not used
- * @c: I2O controller from which the message was created
- * @msg: message which should be returned
- *
- * If you fetch a message via i2o_msg_get, and can't use it, you must
- * return the message with this function. Otherwise the MFA is lost as well
- * as the allocated memory from the mempool.
- */
-static inline void i2o_msg_nop(struct i2o_controller *c,
- struct i2o_message *msg)
-{
- struct i2o_msg_mfa *mmsg;
- mmsg = container_of(msg, struct i2o_msg_mfa, msg);
-
- i2o_msg_nop_mfa(c, mmsg->mfa);
- mempool_free(mmsg, c->in_msg.mempool);
-};
-
-/**
- * i2o_flush_reply - Flush reply from I2O controller
- * @c: I2O controller
- * @m: the message identifier
- *
- * The I2O controller must be informed that the reply message is not needed
- * anymore. If you forget to flush the reply, the message frame can't be
- * used by the controller anymore and is therefore lost.
- */
-static inline void i2o_flush_reply(struct i2o_controller *c, u32 m)
-{
- writel(m, c->out_port);
-};
-
-/*
- * Endian handling wrapped into the macro - keeps the core code
- * cleaner.
- */
-
-#define i2o_raw_writel(val, mem) __raw_writel(cpu_to_le32(val), mem)
-
-extern int i2o_parm_field_get(struct i2o_device *, int, int, void *, int);
-extern int i2o_parm_table_get(struct i2o_device *, int, int, int, void *, int,
- void *, int);
-
-/* debugging and troubleshooting/diagnostic helpers. */
-#define osm_printk(level, format, arg...) \
- printk(level "%s: " format, OSM_NAME , ## arg)
-
-#ifdef DEBUG
-#define osm_debug(format, arg...) \
- osm_printk(KERN_DEBUG, format , ## arg)
-#else
-#define osm_debug(format, arg...) \
- do { } while (0)
-#endif
-
-#define osm_err(format, arg...) \
- osm_printk(KERN_ERR, format , ## arg)
-#define osm_info(format, arg...) \
- osm_printk(KERN_INFO, format , ## arg)
-#define osm_warn(format, arg...) \
- osm_printk(KERN_WARNING, format , ## arg)
-
-/* debugging functions */
-extern void i2o_report_status(const char *, const char *, struct i2o_message *);
-extern void i2o_dump_message(struct i2o_message *);
-extern void i2o_dump_hrt(struct i2o_controller *c);
-extern void i2o_debug_state(struct i2o_controller *c);
-
-#endif /* _I2O_H */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 93b5ca7..a633898 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -39,6 +39,19 @@
struct device;
+/* IDE-specific values for req->cmd_type */
+enum ata_cmd_type_bits {
+ REQ_TYPE_ATA_TASKFILE = REQ_TYPE_DRV_PRIV + 1,
+ REQ_TYPE_ATA_PC,
+ REQ_TYPE_ATA_SENSE, /* sense request */
+ REQ_TYPE_ATA_PM_SUSPEND,/* suspend request */
+ REQ_TYPE_ATA_PM_RESUME, /* resume request */
+};
+
+#define ata_pm_request(rq) \
+ ((rq)->cmd_type == REQ_TYPE_ATA_PM_SUSPEND || \
+ (rq)->cmd_type == REQ_TYPE_ATA_PM_RESUME)
+
/* Error codes returned in rq->errors to the higher part of the driver. */
enum {
IDE_DRV_ERROR_GENERAL = 101,
@@ -1314,6 +1327,19 @@ struct ide_port_info {
u8 udma_mask;
};
+/*
+ * State information carried for REQ_TYPE_ATA_PM_SUSPEND and REQ_TYPE_ATA_PM_RESUME
+ * requests.
+ */
+struct ide_pm_state {
+ /* PM state machine step value, currently driver specific */
+ int pm_step;
+ /* requested PM state value (S1, S2, S3, S4, ...) */
+ u32 pm_state;
+ void* data; /* for driver use */
+};
+
+
int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *);
int ide_pci_init_two(struct pci_dev *, struct pci_dev *,
const struct ide_port_info *, void *);
@@ -1551,4 +1577,5 @@ static inline void ide_set_drivedata(ide_drive_t *drive, void *data)
#define ide_host_for_each_port(i, port, host) \
for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++)
+
#endif /* _IDE_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 4f4eea8..b9c7897 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1017,6 +1017,15 @@ struct ieee80211_mmie {
u8 mic[8];
} __packed;
+/* Management MIC information element (IEEE 802.11w) for GMAC and CMAC-256 */
+struct ieee80211_mmie_16 {
+ u8 element_id;
+ u8 length;
+ __le16 key_id;
+ u8 sequence_number[6];
+ u8 mic[16];
+} __packed;
+
struct ieee80211_vendor_ie {
u8 element_id;
u8 len;
@@ -1994,9 +2003,15 @@ enum ieee80211_key_len {
WLAN_KEY_LEN_WEP40 = 5,
WLAN_KEY_LEN_WEP104 = 13,
WLAN_KEY_LEN_CCMP = 16,
+ WLAN_KEY_LEN_CCMP_256 = 32,
WLAN_KEY_LEN_TKIP = 32,
WLAN_KEY_LEN_AES_CMAC = 16,
WLAN_KEY_LEN_SMS4 = 32,
+ WLAN_KEY_LEN_GCMP = 16,
+ WLAN_KEY_LEN_GCMP_256 = 32,
+ WLAN_KEY_LEN_BIP_CMAC_256 = 32,
+ WLAN_KEY_LEN_BIP_GMAC_128 = 16,
+ WLAN_KEY_LEN_BIP_GMAC_256 = 32,
};
#define IEEE80211_WEP_IV_LEN 4
@@ -2004,9 +2019,16 @@ enum ieee80211_key_len {
#define IEEE80211_CCMP_HDR_LEN 8
#define IEEE80211_CCMP_MIC_LEN 8
#define IEEE80211_CCMP_PN_LEN 6
+#define IEEE80211_CCMP_256_HDR_LEN 8
+#define IEEE80211_CCMP_256_MIC_LEN 16
+#define IEEE80211_CCMP_256_PN_LEN 6
#define IEEE80211_TKIP_IV_LEN 8
#define IEEE80211_TKIP_ICV_LEN 4
#define IEEE80211_CMAC_PN_LEN 6
+#define IEEE80211_GMAC_PN_LEN 6
+#define IEEE80211_GCMP_HDR_LEN 8
+#define IEEE80211_GCMP_MIC_LEN 16
+#define IEEE80211_GCMP_PN_LEN 6
/* Public action codes */
enum ieee80211_pub_actioncode {
@@ -2230,6 +2252,11 @@ enum ieee80211_sa_query_action {
#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05
#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06
#define WLAN_CIPHER_SUITE_GCMP 0x000FAC08
+#define WLAN_CIPHER_SUITE_GCMP_256 0x000FAC09
+#define WLAN_CIPHER_SUITE_CCMP_256 0x000FAC0A
+#define WLAN_CIPHER_SUITE_BIP_GMAC_128 0x000FAC0B
+#define WLAN_CIPHER_SUITE_BIP_GMAC_256 0x000FAC0C
+#define WLAN_CIPHER_SUITE_BIP_CMAC_256 0x000FAC0D
#define WLAN_CIPHER_SUITE_SMS4 0x00147201
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
index 6e82d88..1dc1f4e 100644
--- a/include/linux/ieee802154.h
+++ b/include/linux/ieee802154.h
@@ -28,7 +28,9 @@
#include <asm/byteorder.h>
#define IEEE802154_MTU 127
-#define IEEE802154_MIN_PSDU_LEN 5
+#define IEEE802154_ACK_PSDU_LEN 5
+#define IEEE802154_MIN_PSDU_LEN 9
+#define IEEE802154_FCS_LEN 2
#define IEEE802154_PAN_ID_BROADCAST 0xffff
#define IEEE802154_ADDR_SHORT_BROADCAST 0xffff
@@ -38,6 +40,7 @@
#define IEEE802154_LIFS_PERIOD 40
#define IEEE802154_SIFS_PERIOD 12
+#define IEEE802154_MAX_SIFS_FRAME_SIZE 18
#define IEEE802154_MAX_CHANNEL 26
#define IEEE802154_MAX_PAGE 31
@@ -204,26 +207,31 @@ enum {
/**
* ieee802154_is_valid_psdu_len - check if psdu len is valid
+ * available lengths:
+ * 0-4 Reserved
+ * 5 MPDU (Acknowledgment)
+ * 6-8 Reserved
+ * 9-127 MPDU
+ *
* @len: psdu len with (MHR + payload + MFR)
*/
static inline bool ieee802154_is_valid_psdu_len(const u8 len)
{
- return (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU);
+ return (len == IEEE802154_ACK_PSDU_LEN ||
+ (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU));
}
/**
* ieee802154_is_valid_psdu_len - check if extended addr is valid
* @addr: extended addr to check
*/
-static inline bool ieee802154_is_valid_extended_addr(const __le64 addr)
+static inline bool ieee802154_is_valid_extended_unicast_addr(const __le64 addr)
{
- /* These EUI-64 addresses are reserved by IEEE. 0xffffffffffffffff
- * is used internally as extended to short address broadcast mapping.
- * This is currently a workaround because neighbor discovery can't
- * deal with short addresses types right now.
+ /* Bail out if the address is all zero, or if the group
+ * address bit is set.
*/
return ((addr != cpu_to_le64(0x0000000000000000ULL)) &&
- (addr != cpu_to_le64(0xffffffffffffffffULL)));
+ !(addr & cpu_to_le64(0x0100000000000000ULL)));
}
/**
@@ -234,9 +242,9 @@ static inline void ieee802154_random_extended_addr(__le64 *addr)
{
get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN);
- /* toggle some bit if we hit an invalid extended addr */
- if (!ieee802154_is_valid_extended_addr(*addr))
- ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] ^= 0x01;
+ /* clear the group bit, and set the locally administered bit */
+ ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] &= ~0x01;
+ ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] |= 0x02;
}
#endif /* LINUX_IEEE802154_H */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 0a8ce76..dad8b00 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -44,30 +44,13 @@ struct br_ip_list {
#define BR_PROMISC BIT(7)
#define BR_PROXYARP BIT(8)
#define BR_LEARNING_SYNC BIT(9)
+#define BR_PROXYARP_WIFI BIT(10)
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
typedef int br_should_route_hook_t(struct sk_buff *skb);
extern br_should_route_hook_t __rcu *br_should_route_hook;
-#if IS_ENABLED(CONFIG_BRIDGE)
-int br_fdb_external_learn_add(struct net_device *dev,
- const unsigned char *addr, u16 vid);
-int br_fdb_external_learn_del(struct net_device *dev,
- const unsigned char *addr, u16 vid);
-#else
-static inline int br_fdb_external_learn_add(struct net_device *dev,
- const unsigned char *addr, u16 vid)
-{
- return 0;
-}
-static inline int br_fdb_external_learn_del(struct net_device *dev,
- const unsigned char *addr, u16 vid)
-{
- return 0;
-}
-#endif
-
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list);
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 119130e..ae5d0d2 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -5,6 +5,15 @@
/* We don't want this structure exposed to user space */
+struct ifla_vf_stats {
+ __u64 rx_packets;
+ __u64 tx_packets;
+ __u64 rx_bytes;
+ __u64 tx_bytes;
+ __u64 broadcast;
+ __u64 multicast;
+};
+
struct ifla_vf_info {
__u32 vf;
__u8 mac[32];
@@ -14,5 +23,6 @@ struct ifla_vf_info {
__u32 linkstate;
__u32 min_tx_rate;
__u32 max_tx_rate;
+ __u32 rss_query_en;
};
#endif /* _LINUX_IF_LINK_H */
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 6f6929e..a4ccc31 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -29,7 +29,7 @@ struct macvtap_queue;
* Maximum times a macvtap device can be opened. This can be used to
* configure the number of receive queue, e.g. for multiqueue virtio.
*/
-#define MAX_MACVTAP_QUEUES 16
+#define MAX_MACVTAP_QUEUES 256
#define MACVLAN_MC_FILTER_BITS 8
#define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS)
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index aff7ad8..b49cf92 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -19,6 +19,7 @@
#include <linux/netdevice.h>
#include <linux/ppp_channel.h>
#include <linux/skbuff.h>
+#include <linux/workqueue.h>
#include <uapi/linux/if_pppox.h>
static inline struct pppoe_hdr *pppoe_hdr(const struct sk_buff *skb)
@@ -32,6 +33,7 @@ struct pppoe_opt {
struct pppoe_addr pa; /* what this socket is bound to*/
struct sockaddr_pppox relay; /* what socket data will be
relayed to (PPPoE relaying) */
+ struct work_struct padt_work;/* Work item for handling PADT */
};
struct pptp_opt {
@@ -72,7 +74,7 @@ static inline struct sock *sk_pppox(struct pppox_sock *po)
struct module;
struct pppox_proto {
- int (*create)(struct net *net, struct socket *sock);
+ int (*create)(struct net *net, struct socket *sock, int kern);
int (*ioctl)(struct socket *sock, unsigned int cmd,
unsigned long arg);
struct module *owner;
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 515a35e2..67ce5bd 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -78,9 +78,9 @@ static inline bool is_vlan_dev(struct net_device *dev)
return dev->priv_flags & IFF_802_1Q_VLAN;
}
-#define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
-#define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
-#define vlan_tx_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
+#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
+#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
+#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
/**
* struct vlan_pcpu_stats - VLAN percpu rx/tx stats
@@ -376,7 +376,7 @@ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
{
skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
- vlan_tx_tag_get(skb));
+ skb_vlan_tag_get(skb));
if (likely(skb))
skb->vlan_tci = 0;
return skb;
@@ -393,7 +393,7 @@ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
*/
static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb)
{
- if (vlan_tx_tag_present(skb))
+ if (skb_vlan_tag_present(skb))
skb = __vlan_hwaccel_push_inside(skb);
return skb;
}
@@ -416,7 +416,7 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
/**
* __vlan_get_tag - get the VLAN ID that is part of the payload
* @skb: skbuff to query
- * @vlan_tci: buffer to store vlaue
+ * @vlan_tci: buffer to store value
*
* Returns error if the skb is not of VLAN type
*/
@@ -435,15 +435,15 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
/**
* __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[]
* @skb: skbuff to query
- * @vlan_tci: buffer to store vlaue
+ * @vlan_tci: buffer to store value
*
* Returns error if @skb->vlan_tci is not set correctly
*/
static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
u16 *vlan_tci)
{
- if (vlan_tx_tag_present(skb)) {
- *vlan_tci = vlan_tx_tag_get(skb);
+ if (skb_vlan_tag_present(skb)) {
+ *vlan_tci = skb_vlan_tag_get(skb);
return 0;
} else {
*vlan_tci = 0;
@@ -456,7 +456,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
/**
* vlan_get_tag - get the VLAN ID from the skb
* @skb: skbuff to query
- * @vlan_tci: buffer to store vlaue
+ * @vlan_tci: buffer to store value
*
* Returns error if the skb is not VLAN tagged
*/
@@ -472,27 +472,59 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
/**
* vlan_get_protocol - get protocol EtherType.
* @skb: skbuff to query
+ * @type: first vlan protocol
+ * @depth: buffer to store length of eth and vlan tags in bytes
*
* Returns the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
-static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
-{
- __be16 protocol = 0;
-
- if (vlan_tx_tag_present(skb) ||
- skb->protocol != cpu_to_be16(ETH_P_8021Q))
- protocol = skb->protocol;
- else {
- __be16 proto, *protop;
- protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr,
- h_vlan_encapsulated_proto),
- sizeof(proto), &proto);
- if (likely(protop))
- protocol = *protop;
+static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
+ int *depth)
+{
+ unsigned int vlan_depth = skb->mac_len;
+
+ /* if type is 802.1Q/AD then the header should already be
+ * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
+ * ETH_HLEN otherwise
+ */
+ if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
+ if (vlan_depth) {
+ if (WARN_ON(vlan_depth < VLAN_HLEN))
+ return 0;
+ vlan_depth -= VLAN_HLEN;
+ } else {
+ vlan_depth = ETH_HLEN;
+ }
+ do {
+ struct vlan_hdr *vh;
+
+ if (unlikely(!pskb_may_pull(skb,
+ vlan_depth + VLAN_HLEN)))
+ return 0;
+
+ vh = (struct vlan_hdr *)(skb->data + vlan_depth);
+ type = vh->h_vlan_encapsulated_proto;
+ vlan_depth += VLAN_HLEN;
+ } while (type == htons(ETH_P_8021Q) ||
+ type == htons(ETH_P_8021AD));
}
- return protocol;
+ if (depth)
+ *depth = vlan_depth;
+
+ return type;
+}
+
+/**
+ * vlan_get_protocol - get protocol EtherType.
+ * @skb: skbuff to query
+ *
+ * Returns the EtherType of the packet, regardless of whether it is
+ * vlan encapsulated (normal or hardware accelerated) or not.
+ */
+static inline __be16 vlan_get_protocol(struct sk_buff *skb)
+{
+ return __vlan_get_protocol(skb, skb->protocol, NULL);
}
static inline void vlan_set_encap_proto(struct sk_buff *skb,
@@ -507,7 +539,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
*/
proto = vhdr->h_vlan_encapsulated_proto;
- if (ntohs(proto) >= ETH_P_802_3_MIN) {
+ if (eth_proto_is_802_3(proto)) {
skb->protocol = proto;
return;
}
@@ -529,4 +561,91 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
skb->protocol = htons(ETH_P_802_2);
}
+/**
+ * skb_vlan_tagged - check if skb is vlan tagged.
+ * @skb: skbuff to query
+ *
+ * Returns true if the skb is tagged, regardless of whether it is hardware
+ * accelerated or not.
+ */
+static inline bool skb_vlan_tagged(const struct sk_buff *skb)
+{
+ if (!skb_vlan_tag_present(skb) &&
+ likely(skb->protocol != htons(ETH_P_8021Q) &&
+ skb->protocol != htons(ETH_P_8021AD)))
+ return false;
+
+ return true;
+}
+
+/**
+ * skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers.
+ * @skb: skbuff to query
+ *
+ * Returns true if the skb is tagged with multiple vlan headers, regardless
+ * of whether it is hardware accelerated or not.
+ */
+static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
+{
+ __be16 protocol = skb->protocol;
+
+ if (!skb_vlan_tag_present(skb)) {
+ struct vlan_ethhdr *veh;
+
+ if (likely(protocol != htons(ETH_P_8021Q) &&
+ protocol != htons(ETH_P_8021AD)))
+ return false;
+
+ veh = (struct vlan_ethhdr *)skb->data;
+ protocol = veh->h_vlan_encapsulated_proto;
+ }
+
+ if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD))
+ return false;
+
+ return true;
+}
+
+/**
+ * vlan_features_check - drop unsafe features for skb with multiple tags.
+ * @skb: skbuff to query
+ * @features: features to be checked
+ *
+ * Returns features without unsafe ones if the skb has multiple tags.
+ */
+static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
+ netdev_features_t features)
+{
+ if (skb_vlan_tagged_multi(skb))
+ features = netdev_intersect_features(features,
+ NETIF_F_SG |
+ NETIF_F_HIGHDMA |
+ NETIF_F_FRAGLIST |
+ NETIF_F_GEN_CSUM |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX);
+
+ return features;
+}
+
+/**
+ * compare_vlan_header - Compare two vlan headers
+ * @h1: Pointer to vlan header
+ * @h2: Pointer to vlan header
+ *
+ * Compare two vlan headers, returns 0 if equal.
+ *
+ * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits.
+ */
+static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1,
+ const struct vlan_hdr *h2)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return *(u32 *)h1 ^ *(u32 *)h2;
+#else
+ return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) |
+ ((__force u32)h1->h_vlan_encapsulated_proto ^
+ (__force u32)h2->h_vlan_encapsulated_proto);
+#endif
+}
#endif /* !(_LINUX_IF_VLAN_H_) */
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 2c677af..193ad48 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -130,5 +130,6 @@ extern void ip_mc_unmap(struct in_device *);
extern void ip_mc_remap(struct in_device *);
extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
+int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed);
#endif
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index 5193927..1600c55 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -21,16 +21,15 @@ struct iio_buffer;
* struct iio_buffer_access_funcs - access functions for buffers.
* @store_to: actually store stuff to the buffer
* @read_first_n: try to get a specified number of bytes (must exist)
- * @data_available: indicates whether data for reading from the buffer is
- * available.
+ * @data_available: indicates how much data is available for reading from
+ * the buffer.
* @request_update: if a parameter change has been marked, update underlying
* storage.
- * @get_bytes_per_datum:get current bytes per datum
* @set_bytes_per_datum:set number of bytes per datum
- * @get_length: get number of datums in buffer
* @set_length: set number of datums in buffer
* @release: called when the last reference to the buffer is dropped,
* should free all resources allocated by the buffer.
+ * @modes: Supported operating modes by this buffer type
*
* The purpose of this structure is to make the buffer element
* modular as event for a given driver, different usecases may require
@@ -45,16 +44,16 @@ struct iio_buffer_access_funcs {
int (*read_first_n)(struct iio_buffer *buffer,
size_t n,
char __user *buf);
- bool (*data_available)(struct iio_buffer *buffer);
+ size_t (*data_available)(struct iio_buffer *buffer);
int (*request_update)(struct iio_buffer *buffer);
- int (*get_bytes_per_datum)(struct iio_buffer *buffer);
int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
- int (*get_length)(struct iio_buffer *buffer);
int (*set_length)(struct iio_buffer *buffer, int length);
void (*release)(struct iio_buffer *buffer);
+
+ unsigned int modes;
};
/**
@@ -76,6 +75,7 @@ struct iio_buffer_access_funcs {
* @demux_bounce: [INTERN] buffer for doing gather from incoming scan.
* @buffer_list: [INTERN] entry in the devices list of current buffers.
* @ref: [INTERN] reference count of the buffer.
+ * @watermark: [INTERN] number of datums to wait for poll/read.
*/
struct iio_buffer {
int length;
@@ -85,14 +85,16 @@ struct iio_buffer {
bool scan_timestamp;
const struct iio_buffer_access_funcs *access;
struct list_head scan_el_dev_attr_list;
+ struct attribute_group buffer_group;
struct attribute_group scan_el_group;
wait_queue_head_t pollq;
bool stufftoread;
- const struct attribute_group *attrs;
+ const struct attribute **attrs;
struct list_head demux_list;
void *demux_bounce;
struct list_head buffer_list;
struct kref ref;
+ unsigned int watermark;
};
/**
@@ -117,15 +119,6 @@ int iio_scan_mask_query(struct iio_dev *indio_dev,
struct iio_buffer *buffer, int bit);
/**
- * iio_scan_mask_set() - set particular bit in the scan mask
- * @indio_dev IIO device structure
- * @buffer: the buffer whose scan mask we are interested in
- * @bit: the bit to be set.
- **/
-int iio_scan_mask_set(struct iio_dev *indio_dev,
- struct iio_buffer *buffer, int bit);
-
-/**
* iio_push_to_buffers() - push to a registered buffer.
* @indio_dev: iio_dev structure for device.
* @data: Full scan.
@@ -159,56 +152,6 @@ static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev,
int iio_update_demux(struct iio_dev *indio_dev);
-/**
- * iio_buffer_register() - register the buffer with IIO core
- * @indio_dev: device with the buffer to be registered
- * @channels: the channel descriptions used to construct buffer
- * @num_channels: the number of channels
- **/
-int iio_buffer_register(struct iio_dev *indio_dev,
- const struct iio_chan_spec *channels,
- int num_channels);
-
-/**
- * iio_buffer_unregister() - unregister the buffer from IIO core
- * @indio_dev: the device with the buffer to be unregistered
- **/
-void iio_buffer_unregister(struct iio_dev *indio_dev);
-
-/**
- * iio_buffer_read_length() - attr func to get number of datums in the buffer
- **/
-ssize_t iio_buffer_read_length(struct device *dev,
- struct device_attribute *attr,
- char *buf);
-/**
- * iio_buffer_write_length() - attr func to set number of datums in the buffer
- **/
-ssize_t iio_buffer_write_length(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len);
-/**
- * iio_buffer_store_enable() - attr to turn the buffer on
- **/
-ssize_t iio_buffer_store_enable(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len);
-/**
- * iio_buffer_show_enable() - attr to see if the buffer is on
- **/
-ssize_t iio_buffer_show_enable(struct device *dev,
- struct device_attribute *attr,
- char *buf);
-#define IIO_BUFFER_LENGTH_ATTR DEVICE_ATTR(length, S_IRUGO | S_IWUSR, \
- iio_buffer_read_length, \
- iio_buffer_write_length)
-
-#define IIO_BUFFER_ENABLE_ATTR DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, \
- iio_buffer_show_enable, \
- iio_buffer_store_enable)
-
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
const unsigned long *mask);
@@ -232,16 +175,6 @@ static inline void iio_device_attach_buffer(struct iio_dev *indio_dev,
#else /* CONFIG_IIO_BUFFER */
-static inline int iio_buffer_register(struct iio_dev *indio_dev,
- const struct iio_chan_spec *channels,
- int num_channels)
-{
- return 0;
-}
-
-static inline void iio_buffer_unregister(struct iio_dev *indio_dev)
-{}
-
static inline void iio_buffer_get(struct iio_buffer *buffer) {}
static inline void iio_buffer_put(struct iio_buffer *buffer) {}
diff --git a/include/linux/iio/common/ssp_sensors.h b/include/linux/iio/common/ssp_sensors.h
new file mode 100644
index 0000000..f4d1b0e
--- /dev/null
+++ b/include/linux/iio/common/ssp_sensors.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2014, Samsung Electronics Co. Ltd. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _SSP_SENSORS_H_
+#define _SSP_SENSORS_H_
+
+#include <linux/iio/iio.h>
+
+#define SSP_TIME_SIZE 4
+#define SSP_ACCELEROMETER_SIZE 6
+#define SSP_GYROSCOPE_SIZE 6
+#define SSP_BIO_HRM_RAW_SIZE 8
+#define SSP_BIO_HRM_RAW_FAC_SIZE 36
+#define SSP_BIO_HRM_LIB_SIZE 8
+
+/**
+ * enum ssp_sensor_type - SSP sensor type
+ */
+enum ssp_sensor_type {
+ SSP_ACCELEROMETER_SENSOR = 0,
+ SSP_GYROSCOPE_SENSOR,
+ SSP_GEOMAGNETIC_UNCALIB_SENSOR,
+ SSP_GEOMAGNETIC_RAW,
+ SSP_GEOMAGNETIC_SENSOR,
+ SSP_PRESSURE_SENSOR,
+ SSP_GESTURE_SENSOR,
+ SSP_PROXIMITY_SENSOR,
+ SSP_TEMPERATURE_HUMIDITY_SENSOR,
+ SSP_LIGHT_SENSOR,
+ SSP_PROXIMITY_RAW,
+ SSP_ORIENTATION_SENSOR,
+ SSP_STEP_DETECTOR,
+ SSP_SIG_MOTION_SENSOR,
+ SSP_GYRO_UNCALIB_SENSOR,
+ SSP_GAME_ROTATION_VECTOR,
+ SSP_ROTATION_VECTOR,
+ SSP_STEP_COUNTER,
+ SSP_BIO_HRM_RAW,
+ SSP_BIO_HRM_RAW_FAC,
+ SSP_BIO_HRM_LIB,
+ SSP_SENSOR_MAX,
+};
+
+struct ssp_data;
+
+/**
+ * struct ssp_sensor_data - Sensor object
+ * @process_data: Callback to feed sensor data.
+ * @type: Used sensor type.
+ * @buffer: Received data buffer.
+ */
+struct ssp_sensor_data {
+ int (*process_data)(struct iio_dev *indio_dev, void *buf,
+ int64_t timestamp);
+ enum ssp_sensor_type type;
+ u8 *buffer;
+};
+
+void ssp_register_consumer(struct iio_dev *indio_dev,
+ enum ssp_sensor_type type);
+
+int ssp_enable_sensor(struct ssp_data *data, enum ssp_sensor_type type,
+ u32 delay);
+
+int ssp_disable_sensor(struct ssp_data *data, enum ssp_sensor_type type);
+
+u32 ssp_get_sensor_delay(struct ssp_data *data, enum ssp_sensor_type);
+
+int ssp_change_delay(struct ssp_data *data, enum ssp_sensor_type type,
+ u32 delay);
+#endif /* _SSP_SENSORS_H_ */
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 651f9a0..26fb8f6 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -151,6 +151,16 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val);
int iio_read_channel_processed(struct iio_channel *chan, int *val);
/**
+ * iio_write_channel_raw() - write to a given channel
+ * @chan: The channel being queried.
+ * @val: Value being written.
+ *
+ * Note raw writes to iio channels are in dac counts and hence
+ * scale will need to be applied if standard units required.
+ */
+int iio_write_channel_raw(struct iio_channel *chan, int val);
+
+/**
* iio_get_channel_type() - get the type of a channel
* @channel: The channel being queried.
* @type: The type of the channel.
@@ -191,7 +201,7 @@ int iio_read_channel_scale(struct iio_channel *chan, int *val,
* The scale factor allows to increase the precession of the returned value. For
* a scale factor of 1 the function will return the result in the normal IIO
* unit for the channel type. E.g. millivolt for voltage channels, if you want
- * nanovolts instead pass 1000 as the scale factor.
+ * nanovolts instead pass 1000000 as the scale factor.
*/
int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
int *processed, unsigned int scale);
diff --git a/include/linux/iio/dac/max517.h b/include/linux/iio/dac/max517.h
index f6d1d25..7668716 100644
--- a/include/linux/iio/dac/max517.h
+++ b/include/linux/iio/dac/max517.h
@@ -9,7 +9,7 @@
#define IIO_DAC_MAX517_H_
struct max517_platform_data {
- u16 vref_mv[2];
+ u16 vref_mv[8];
};
#endif /* IIO_DAC_MAX517_H_ */
diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h
index 03fa332..8ad87d1 100644
--- a/include/linux/iio/events.h
+++ b/include/linux/iio/events.h
@@ -9,22 +9,8 @@
#ifndef _IIO_EVENTS_H_
#define _IIO_EVENTS_H_
-#include <linux/ioctl.h>
-#include <linux/types.h>
#include <linux/iio/types.h>
-
-/**
- * struct iio_event_data - The actual event being pushed to userspace
- * @id: event identifier
- * @timestamp: best estimate of time of event occurrence (often from
- * the interrupt handler)
- */
-struct iio_event_data {
- __u64 id;
- __s64 timestamp;
-};
-
-#define IIO_GET_EVENT_FD_IOCTL _IOR('i', 0x90, int)
+#include <uapi/linux/iio/events.h>
/**
* IIO_EVENT_CODE() - create event identifier
@@ -70,18 +56,4 @@ struct iio_event_data {
#define IIO_UNMOD_EVENT_CODE(chan_type, number, type, direction) \
IIO_EVENT_CODE(chan_type, 0, 0, direction, type, number, 0, 0)
-#define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF)
-
-#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0x7F)
-
-#define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF)
-
-/* Event code number extraction depends on which type of event we have.
- * Perhaps review this function in the future*/
-#define IIO_EVENT_CODE_EXTRACT_CHAN(mask) ((__s16)(mask & 0xFFFF))
-#define IIO_EVENT_CODE_EXTRACT_CHAN2(mask) ((__s16)(((mask) >> 16) & 0xFFFF))
-
-#define IIO_EVENT_CODE_EXTRACT_MODIFIER(mask) ((mask >> 40) & 0xFF)
-#define IIO_EVENT_CODE_EXTRACT_DIFF(mask) (((mask) >> 55) & 0x1)
-
#endif
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 3642ce7..f791482 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -32,12 +32,20 @@ enum iio_chan_info_enum {
IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
IIO_CHAN_INFO_AVERAGE_RAW,
IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
+ IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY,
IIO_CHAN_INFO_SAMP_FREQ,
IIO_CHAN_INFO_FREQUENCY,
IIO_CHAN_INFO_PHASE,
IIO_CHAN_INFO_HARDWAREGAIN,
IIO_CHAN_INFO_HYSTERESIS,
IIO_CHAN_INFO_INT_TIME,
+ IIO_CHAN_INFO_ENABLE,
+ IIO_CHAN_INFO_CALIBHEIGHT,
+ IIO_CHAN_INFO_CALIBWEIGHT,
+ IIO_CHAN_INFO_DEBOUNCE_COUNT,
+ IIO_CHAN_INFO_DEBOUNCE_TIME,
+ IIO_CHAN_INFO_CALIBEMISSIVITY,
+ IIO_CHAN_INFO_OVERSAMPLING_RATIO,
};
enum iio_shared_by {
@@ -284,10 +292,11 @@ static inline s64 iio_get_time_ns(void)
/* Device operating modes */
#define INDIO_DIRECT_MODE 0x01
#define INDIO_BUFFER_TRIGGERED 0x02
+#define INDIO_BUFFER_SOFTWARE 0x04
#define INDIO_BUFFER_HARDWARE 0x08
#define INDIO_ALL_BUFFER_MODES \
- (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE)
+ (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE)
#define INDIO_MAX_RAW_ELEMENTS 4
@@ -332,6 +341,16 @@ struct iio_dev;
* provide a custom of_xlate function that reads the
* *args* and returns the appropriate index in registered
* IIO channels array.
+ * @hwfifo_set_watermark: function pointer to set the current hardware
+ * fifo watermark level; see hwfifo_* entries in
+ * Documentation/ABI/testing/sysfs-bus-iio for details on
+ * how the hardware fifo operates
+ * @hwfifo_flush_to_buffer: function pointer to flush the samples stored
+ * in the hardware fifo to the device buffer. The driver
+ * should not flush more than count samples. The function
+ * must return the number of samples flushed, 0 if no
+ * samples were flushed or a negative integer if no samples
+ * were flushed and there was an error.
**/
struct iio_info {
struct module *driver_module;
@@ -393,6 +412,9 @@ struct iio_info {
unsigned *readval);
int (*of_xlate)(struct iio_dev *indio_dev,
const struct of_phandle_args *iiospec);
+ int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned val);
+ int (*hwfifo_flush_to_buffer)(struct iio_dev *indio_dev,
+ unsigned count);
};
/**
@@ -591,7 +613,8 @@ void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig);
static inline bool iio_buffer_enabled(struct iio_dev *indio_dev)
{
return indio_dev->currentmode
- & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE);
+ & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE |
+ INDIO_BUFFER_SOFTWARE);
}
/**
diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h
index 25eeac7..1683bc7 100644
--- a/include/linux/iio/kfifo_buf.h
+++ b/include/linux/iio/kfifo_buf.h
@@ -5,7 +5,10 @@
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
-struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev);
+struct iio_buffer *iio_kfifo_allocate(void);
void iio_kfifo_free(struct iio_buffer *r);
+struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev);
+void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r);
+
#endif
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 4a2af8a..32b5795 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -10,77 +10,15 @@
#ifndef _IIO_TYPES_H_
#define _IIO_TYPES_H_
-enum iio_chan_type {
- IIO_VOLTAGE,
- IIO_CURRENT,
- IIO_POWER,
- IIO_ACCEL,
- IIO_ANGL_VEL,
- IIO_MAGN,
- IIO_LIGHT,
- IIO_INTENSITY,
- IIO_PROXIMITY,
- IIO_TEMP,
- IIO_INCLI,
- IIO_ROT,
- IIO_ANGL,
- IIO_TIMESTAMP,
- IIO_CAPACITANCE,
- IIO_ALTVOLTAGE,
- IIO_CCT,
- IIO_PRESSURE,
- IIO_HUMIDITYRELATIVE,
-};
-
-enum iio_modifier {
- IIO_NO_MOD,
- IIO_MOD_X,
- IIO_MOD_Y,
- IIO_MOD_Z,
- IIO_MOD_X_AND_Y,
- IIO_MOD_X_AND_Z,
- IIO_MOD_Y_AND_Z,
- IIO_MOD_X_AND_Y_AND_Z,
- IIO_MOD_X_OR_Y,
- IIO_MOD_X_OR_Z,
- IIO_MOD_Y_OR_Z,
- IIO_MOD_X_OR_Y_OR_Z,
- IIO_MOD_LIGHT_BOTH,
- IIO_MOD_LIGHT_IR,
- IIO_MOD_ROOT_SUM_SQUARED_X_Y,
- IIO_MOD_SUM_SQUARED_X_Y_Z,
- IIO_MOD_LIGHT_CLEAR,
- IIO_MOD_LIGHT_RED,
- IIO_MOD_LIGHT_GREEN,
- IIO_MOD_LIGHT_BLUE,
- IIO_MOD_QUATERNION,
- IIO_MOD_TEMP_AMBIENT,
- IIO_MOD_TEMP_OBJECT,
- IIO_MOD_NORTH_MAGN,
- IIO_MOD_NORTH_TRUE,
- IIO_MOD_NORTH_MAGN_TILT_COMP,
- IIO_MOD_NORTH_TRUE_TILT_COMP
-};
-
-enum iio_event_type {
- IIO_EV_TYPE_THRESH,
- IIO_EV_TYPE_MAG,
- IIO_EV_TYPE_ROC,
- IIO_EV_TYPE_THRESH_ADAPTIVE,
- IIO_EV_TYPE_MAG_ADAPTIVE,
-};
+#include <uapi/linux/iio/types.h>
enum iio_event_info {
IIO_EV_INFO_ENABLE,
IIO_EV_INFO_VALUE,
IIO_EV_INFO_HYSTERESIS,
IIO_EV_INFO_PERIOD,
-};
-
-enum iio_event_direction {
- IIO_EV_DIR_EITHER,
- IIO_EV_DIR_RISING,
- IIO_EV_DIR_FALLING,
+ IIO_EV_INFO_HIGH_PASS_FILTER_3DB,
+ IIO_EV_INFO_LOW_PASS_FILTER_3DB,
};
#define IIO_VAL_INT 1
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index 46da024..0e707f0 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -11,33 +11,35 @@ struct sk_buff;
struct netlink_callback;
struct inet_diag_handler {
- void (*dump)(struct sk_buff *skb,
- struct netlink_callback *cb,
- struct inet_diag_req_v2 *r,
- struct nlattr *bc);
-
- int (*dump_one)(struct sk_buff *in_skb,
- const struct nlmsghdr *nlh,
- struct inet_diag_req_v2 *req);
-
- void (*idiag_get_info)(struct sock *sk,
- struct inet_diag_msg *r,
- void *info);
- __u16 idiag_type;
+ void (*dump)(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ const struct inet_diag_req_v2 *r,
+ struct nlattr *bc);
+
+ int (*dump_one)(struct sk_buff *in_skb,
+ const struct nlmsghdr *nlh,
+ const struct inet_diag_req_v2 *req);
+
+ void (*idiag_get_info)(struct sock *sk,
+ struct inet_diag_msg *r,
+ void *info);
+ __u16 idiag_type;
+ __u16 idiag_info_size;
};
struct inet_connection_sock;
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
- struct sk_buff *skb, struct inet_diag_req_v2 *req,
- struct user_namespace *user_ns,
- u32 pid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh);
+ struct sk_buff *skb, const struct inet_diag_req_v2 *req,
+ struct user_namespace *user_ns,
+ u32 pid, u32 seq, u16 nlmsg_flags,
+ const struct nlmsghdr *unlh);
void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
- struct netlink_callback *cb, struct inet_diag_req_v2 *r,
- struct nlattr *bc);
+ struct netlink_callback *cb,
+ const struct inet_diag_req_v2 *r,
+ struct nlattr *bc);
int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
- struct sk_buff *in_skb, const struct nlmsghdr *nlh,
- struct inet_diag_req_v2 *req);
+ struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+ const struct inet_diag_req_v2 *req);
int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 0a21fbe..a4328ce 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -120,6 +120,9 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
|| (!IN_DEV_FORWARD(in_dev) && \
IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS)))
+#define IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) \
+ IN_DEV_CONF_GET((in_dev), IGNORE_ROUTES_WITH_LINKDOWN)
+
#define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER)
#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT)
#define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE)
diff --git a/include/linux/init.h b/include/linux/init.h
index 2df8e8d..b449f37 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -91,14 +91,6 @@
#define __exit __section(.exit.text) __exitused __cold notrace
-/* temporary, until all users are removed */
-#define __cpuinit
-#define __cpuinitdata
-#define __cpuinitconst
-#define __cpuexit
-#define __cpuexitdata
-#define __cpuexitconst
-
/* Used for MEMORY_HOTPLUG */
#define __meminit __section(.meminit.text) __cold notrace
#define __meminitdata __section(.meminit.data)
@@ -116,9 +108,6 @@
#define __INITRODATA .section ".init.rodata","a",%progbits
#define __FINITDATA .previous
-/* temporary, until all users are removed */
-#define __CPUINIT
-
#define __MEMINIT .section ".meminit.text", "ax"
#define __MEMINITDATA .section ".meminit.data", "aw"
#define __MEMINITRODATA .section ".meminit.rodata", "a"
@@ -253,88 +242,48 @@ struct obs_kernel_param {
* obs_kernel_param "array" too far apart in .init.setup.
*/
#define __setup_param(str, unique_id, fn, early) \
- static const char __setup_str_##unique_id[] __initconst \
- __aligned(1) = str; \
- static struct obs_kernel_param __setup_##unique_id \
- __used __section(.init.setup) \
- __attribute__((aligned((sizeof(long))))) \
+ static const char __setup_str_##unique_id[] __initconst \
+ __aligned(1) = str; \
+ static struct obs_kernel_param __setup_##unique_id \
+ __used __section(.init.setup) \
+ __attribute__((aligned((sizeof(long))))) \
= { __setup_str_##unique_id, fn, early }
-#define __setup(str, fn) \
+#define __setup(str, fn) \
__setup_param(str, fn, fn, 0)
-/* NOTE: fn is as per module_param, not __setup! Emits warning if fn
- * returns non-zero. */
-#define early_param(str, fn) \
+/*
+ * NOTE: fn is as per module_param, not __setup!
+ * Emits warning if fn returns non-zero.
+ */
+#define early_param(str, fn) \
__setup_param(str, fn, fn, 1)
+#define early_param_on_off(str_on, str_off, var, config) \
+ \
+ int var = IS_ENABLED(config); \
+ \
+ static int __init parse_##var##_on(char *arg) \
+ { \
+ var = 1; \
+ return 0; \
+ } \
+ __setup_param(str_on, parse_##var##_on, parse_##var##_on, 1); \
+ \
+ static int __init parse_##var##_off(char *arg) \
+ { \
+ var = 0; \
+ return 0; \
+ } \
+ __setup_param(str_off, parse_##var##_off, parse_##var##_off, 1)
+
/* Relies on boot_command_line being set */
void __init parse_early_param(void);
void __init parse_early_options(char *cmdline);
#endif /* __ASSEMBLY__ */
-/**
- * module_init() - driver initialization entry point
- * @x: function to be run at kernel boot time or module insertion
- *
- * module_init() will either be called during do_initcalls() (if
- * builtin) or at module insertion time (if a module). There can only
- * be one per module.
- */
-#define module_init(x) __initcall(x);
-
-/**
- * module_exit() - driver exit entry point
- * @x: function to be run when driver is removed
- *
- * module_exit() will wrap the driver clean-up code
- * with cleanup_module() when used with rmmod when
- * the driver is a module. If the driver is statically
- * compiled into the kernel, module_exit() has no effect.
- * There can only be one per module.
- */
-#define module_exit(x) __exitcall(x);
-
#else /* MODULE */
-/*
- * In most cases loadable modules do not need custom
- * initcall levels. There are still some valid cases where
- * a driver may be needed early if built in, and does not
- * matter when built as a loadable module. Like bus
- * snooping debug drivers.
- */
-#define early_initcall(fn) module_init(fn)
-#define core_initcall(fn) module_init(fn)
-#define core_initcall_sync(fn) module_init(fn)
-#define postcore_initcall(fn) module_init(fn)
-#define postcore_initcall_sync(fn) module_init(fn)
-#define arch_initcall(fn) module_init(fn)
-#define subsys_initcall(fn) module_init(fn)
-#define subsys_initcall_sync(fn) module_init(fn)
-#define fs_initcall(fn) module_init(fn)
-#define fs_initcall_sync(fn) module_init(fn)
-#define rootfs_initcall(fn) module_init(fn)
-#define device_initcall(fn) module_init(fn)
-#define device_initcall_sync(fn) module_init(fn)
-#define late_initcall(fn) module_init(fn)
-#define late_initcall_sync(fn) module_init(fn)
-
-#define console_initcall(fn) module_init(fn)
-#define security_initcall(fn) module_init(fn)
-
-/* Each module must use one module_init(). */
-#define module_init(initfn) \
- static inline initcall_t __inittest(void) \
- { return initfn; } \
- int init_module(void) __attribute__((alias(#initfn)));
-
-/* This is only required if you want to be unloadable. */
-#define module_exit(exitfn) \
- static inline exitcall_t __exittest(void) \
- { return exitfn; } \
- void cleanup_module(void) __attribute__((alias(#exitfn)));
-
#define __setup_param(str, unique_id, fn) /* nothing */
#define __setup(str, func) /* nothing */
#endif
@@ -342,24 +291,6 @@ void __init parse_early_options(char *cmdline);
/* Data marked not to be saved by software suspend */
#define __nosavedata __section(.data..nosave)
-/* This means "can be init if no module support, otherwise module load
- may call it." */
-#ifdef CONFIG_MODULES
-#define __init_or_module
-#define __initdata_or_module
-#define __initconst_or_module
-#define __INIT_OR_MODULE .text
-#define __INITDATA_OR_MODULE .data
-#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits
-#else
-#define __init_or_module __init
-#define __initdata_or_module __initdata
-#define __initconst_or_module __initconst
-#define __INIT_OR_MODULE __INIT
-#define __INITDATA_OR_MODULE __INITDATA
-#define __INITRODATA_OR_MODULE __INITRODATA
-#endif /*CONFIG_MODULES*/
-
#ifdef MODULE
#define __exit_p(x) x
#else
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 3037fc0..e8493fe 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -25,13 +25,6 @@
extern struct files_struct init_files;
extern struct fs_struct init_fs;
-#ifdef CONFIG_CGROUPS
-#define INIT_GROUP_RWSEM(sig) \
- .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem),
-#else
-#define INIT_GROUP_RWSEM(sig)
-#endif
-
#ifdef CONFIG_CPUSETS
#define INIT_CPUSET_SEQ(tsk) \
.mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
@@ -50,13 +43,11 @@ extern struct fs_struct init_fs;
.cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \
.rlim = INIT_RLIMITS, \
.cputimer = { \
- .cputime = INIT_CPUTIME, \
- .running = 0, \
- .lock = __RAW_SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
+ .cputime_atomic = INIT_CPUTIME_ATOMIC, \
+ .running = 0, \
}, \
.cred_guard_mutex = \
__MUTEX_INITIALIZER(sig.cred_guard_mutex), \
- INIT_GROUP_RWSEM(sig) \
}
extern struct nsproxy init_nsproxy;
@@ -175,6 +166,13 @@ extern struct task_group root_task_group;
# define INIT_NUMA_BALANCING(tsk)
#endif
+#ifdef CONFIG_KASAN
+# define INIT_KASAN(tsk) \
+ .kasan_depth = 1,
+#else
+# define INIT_KASAN(tsk)
+#endif
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -193,6 +191,9 @@ extern struct task_group root_task_group;
.nr_cpus_allowed= NR_CPUS, \
.mm = NULL, \
.active_mm = &init_mm, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
.se = { \
.group_node = LIST_HEAD_INIT(tsk.se.group_node), \
}, \
@@ -247,6 +248,7 @@ extern struct task_group root_task_group;
INIT_RT_MUTEXES(tsk) \
INIT_VTIME(tsk) \
INIT_NUMA_BALANCING(tsk) \
+ INIT_KASAN(tsk) \
}
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
index f583ff6..d7188de 100644
--- a/include/linux/input/mt.h
+++ b/include/linux/input/mt.h
@@ -119,7 +119,8 @@ struct input_mt_pos {
};
int input_mt_assign_slots(struct input_dev *dev, int *slots,
- const struct input_mt_pos *pos, int num_pos);
+ const struct input_mt_pos *pos, int num_pos,
+ int dmax);
int input_mt_get_slot_by_key(struct input_dev *dev, int key);
diff --git a/include/linux/input/touchscreen.h b/include/linux/input/touchscreen.h
index 08a5ef6..eecc9ea 100644
--- a/include/linux/input/touchscreen.h
+++ b/include/linux/input/touchscreen.h
@@ -12,9 +12,10 @@
#include <linux/input.h>
#ifdef CONFIG_OF
-void touchscreen_parse_of_params(struct input_dev *dev);
+void touchscreen_parse_of_params(struct input_dev *dev, bool multitouch);
#else
-static inline void touchscreen_parse_of_params(struct input_dev *dev)
+static inline void touchscreen_parse_of_params(struct input_dev *dev,
+ bool multitouch)
{
}
#endif
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index a65208a..d9a366d 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -87,6 +87,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
/*
* Decoding Capability Register
*/
+#define cap_pi_support(c) (((c) >> 59) & 1)
#define cap_read_drain(c) (((c) >> 55) & 1)
#define cap_write_drain(c) (((c) >> 54) & 1)
#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
@@ -115,10 +116,20 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
* Extended Capability Register
*/
-#define ecap_niotlb_iunits(e) ((((e) >> 24) & 0xff) + 1)
+#define ecap_pasid(e) ((e >> 40) & 0x1)
+#define ecap_pss(e) ((e >> 35) & 0x1f)
+#define ecap_eafs(e) ((e >> 34) & 0x1)
+#define ecap_nwfs(e) ((e >> 33) & 0x1)
+#define ecap_srs(e) ((e >> 31) & 0x1)
+#define ecap_ers(e) ((e >> 30) & 0x1)
+#define ecap_prs(e) ((e >> 29) & 0x1)
+/* PASID support used to be on bit 28 */
+#define ecap_dis(e) ((e >> 27) & 0x1)
+#define ecap_nest(e) ((e >> 26) & 0x1)
+#define ecap_mts(e) ((e >> 25) & 0x1)
+#define ecap_ecs(e) ((e >> 24) & 0x1)
#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
-#define ecap_max_iotlb_offset(e) \
- (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16)
+#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
#define ecap_coherent(e) ((e) & 0x1)
#define ecap_qis(e) ((e) & 0x2)
#define ecap_pass_through(e) ((e >> 6) & 0x1)
@@ -180,6 +191,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
#define DMA_GSTS_IRES (((u32)1) << 25)
#define DMA_GSTS_CFIS (((u32)1) << 23)
+/* DMA_RTADDR_REG */
+#define DMA_RTADDR_RTT (((u64)1) << 11)
+
/* CCMD_REG */
#define DMA_CCMD_ICC (((u64)1) << 63)
#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
@@ -283,9 +297,12 @@ struct q_inval {
/* 1MB - maximum possible interrupt remapping table size */
#define INTR_REMAP_PAGE_ORDER 8
#define INTR_REMAP_TABLE_REG_SIZE 0xf
+#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
#define INTR_REMAP_TABLE_ENTRIES 65536
+struct irq_domain;
+
struct ir_table {
struct irte *base;
unsigned long *bitmap;
@@ -307,6 +324,9 @@ enum {
MAX_SR_DMAR_REGS
};
+#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
+#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
+
struct intel_iommu {
void __iomem *reg; /* Pointer to hardware regs, virtual addr */
u64 reg_phys; /* physical address of hw register set */
@@ -335,9 +355,12 @@ struct intel_iommu {
#ifdef CONFIG_IRQ_REMAP
struct ir_table *ir_table; /* Interrupt remapping info */
+ struct irq_domain *ir_domain;
+ struct irq_domain *ir_msi_domain;
#endif
struct device *iommu_dev; /* IOMMU-sysfs device */
int node;
+ u32 flags; /* Software defined flags */
};
static inline void __iommu_flush_cache(
diff --git a/include/linux/intel_mid_dma.h b/include/linux/intel_mid_dma.h
deleted file mode 100644
index 10496bd..0000000
--- a/include/linux/intel_mid_dma.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * intel_mid_dma.h - Intel MID DMA Drivers
- *
- * Copyright (C) 2008-10 Intel Corp
- * Author: Vinod Koul <vinod.koul@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *
- */
-#ifndef __INTEL_MID_DMA_H__
-#define __INTEL_MID_DMA_H__
-
-#include <linux/dmaengine.h>
-
-#define DMA_PREP_CIRCULAR_LIST (1 << 10)
-
-/*DMA mode configurations*/
-enum intel_mid_dma_mode {
- LNW_DMA_PER_TO_MEM = 0, /*periphral to memory configuration*/
- LNW_DMA_MEM_TO_PER, /*memory to periphral configuration*/
- LNW_DMA_MEM_TO_MEM, /*mem to mem confg (testing only)*/
-};
-
-/*DMA handshaking*/
-enum intel_mid_dma_hs_mode {
- LNW_DMA_HW_HS = 0, /*HW Handshaking only*/
- LNW_DMA_SW_HS = 1, /*SW Handshaking not recommended*/
-};
-
-/*Burst size configuration*/
-enum intel_mid_dma_msize {
- LNW_DMA_MSIZE_1 = 0x0,
- LNW_DMA_MSIZE_4 = 0x1,
- LNW_DMA_MSIZE_8 = 0x2,
- LNW_DMA_MSIZE_16 = 0x3,
- LNW_DMA_MSIZE_32 = 0x4,
- LNW_DMA_MSIZE_64 = 0x5,
-};
-
-/**
- * struct intel_mid_dma_slave - DMA slave structure
- *
- * @dirn: DMA trf direction
- * @src_width: tx register width
- * @dst_width: rx register width
- * @hs_mode: HW/SW handshaking mode
- * @cfg_mode: DMA data transfer mode (per-per/mem-per/mem-mem)
- * @src_msize: Source DMA burst size
- * @dst_msize: Dst DMA burst size
- * @per_addr: Periphral address
- * @device_instance: DMA peripheral device instance, we can have multiple
- * peripheral device connected to single DMAC
- */
-struct intel_mid_dma_slave {
- enum intel_mid_dma_hs_mode hs_mode; /*handshaking*/
- enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
- unsigned int device_instance; /*0, 1 for periphral instance*/
- struct dma_slave_config dma_slave;
-};
-
-#endif /*__INTEL_MID_DMA_H__*/
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index d9b05b5..be7e75c 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -39,8 +39,6 @@
* These flags used only by the kernel as part of the
* irq handling routines.
*
- * IRQF_DISABLED - keep irqs disabled when calling the action handler.
- * DEPRECATED. This flag is a NOOP and scheduled to be removed
* IRQF_SHARED - allow sharing the irq among several devices
* IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
* IRQF_TIMER - Flag to mark this interrupt as timer interrupt
@@ -52,13 +50,18 @@
* IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
* Used by threaded interrupts which need to keep the
* irq line disabled until the threaded handler has been run.
- * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
+ * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
+ * that this interrupt will wake the system from a suspended
+ * state. See Documentation/power/suspend-and-interrupts.txt
* IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
* IRQF_NO_THREAD - Interrupt cannot be threaded
* IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
* resume time.
+ * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
+ * interrupt handler after suspending interrupts. For system
+ * wakeup devices users need to implement wakeup detection in
+ * their interrupt handlers.
*/
-#define IRQF_DISABLED 0x00000020
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
#define __IRQF_TIMER 0x00000200
@@ -70,6 +73,7 @@
#define IRQF_FORCE_RESUME 0x00008000
#define IRQF_NO_THREAD 0x00010000
#define IRQF_EARLY_RESUME 0x00020000
+#define IRQF_COND_SUSPEND 0x00040000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
@@ -184,6 +188,7 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
#endif
extern void disable_irq_nosync(unsigned int irq);
+extern bool disable_hardirq(unsigned int irq);
extern void disable_irq(unsigned int irq);
extern void disable_percpu_irq(unsigned int irq);
extern void enable_irq(unsigned int irq);
@@ -356,6 +361,20 @@ static inline int disable_irq_wake(unsigned int irq)
return irq_set_irq_wake(irq, 0);
}
+/*
+ * irq_get_irqchip_state/irq_set_irqchip_state specific flags
+ */
+enum irqchip_irq_state {
+ IRQCHIP_STATE_PENDING, /* Is interrupt pending? */
+ IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */
+ IRQCHIP_STATE_MASKED, /* Is interrupt masked? */
+ IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */
+};
+
+extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
+ bool *state);
+extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
+ bool state);
#ifdef CONFIG_IRQ_FORCED_THREADING
extern bool force_irqthreads;
@@ -394,7 +413,8 @@ enum
BLOCK_IOPOLL_SOFTIRQ,
TASKLET_SOFTIRQ,
SCHED_SOFTIRQ,
- HRTIMER_SOFTIRQ,
+ HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
+ numbering. Sigh! */
RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
NR_SOFTIRQS
@@ -573,10 +593,10 @@ tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
clockid_t which_clock, enum hrtimer_mode mode);
static inline
-int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
- const enum hrtimer_mode mode)
+void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
+ const enum hrtimer_mode mode)
{
- return hrtimer_start(&ttimer->timer, time, mode);
+ hrtimer_start(&ttimer->timer, time, mode);
}
static inline
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 657fab4..c27dde7 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -141,6 +141,7 @@ static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset)
{
+ preempt_disable();
pagefault_disable();
return ((char __force __iomem *) mapping) + offset;
}
@@ -149,6 +150,7 @@ static inline void
io_mapping_unmap_atomic(void __iomem *vaddr)
{
pagefault_enable();
+ preempt_enable();
}
/* Non-atomic map/unmap */
diff --git a/include/linux/io.h b/include/linux/io.h
index fa02e55..fb5a998 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -19,6 +19,7 @@
#define _LINUX_IO_H
#include <linux/types.h>
+#include <linux/init.h>
#include <asm/io.h>
#include <asm/page.h>
@@ -38,6 +39,14 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end,
}
#endif
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+void __init ioremap_huge_init(void);
+int arch_ioremap_pud_supported(void);
+int arch_ioremap_pmd_supported(void);
+#else
+static inline void ioremap_huge_init(void) { }
+#endif
+
/*
* Managed iomap interface
*/
@@ -64,6 +73,8 @@ void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
resource_size_t size);
void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
resource_size_t size);
+void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
+ resource_size_t size);
void devm_iounmap(struct device *dev, void __iomem *addr);
int check_signature(const volatile void __iomem *io_addr,
const unsigned char *signature, int length);
@@ -101,6 +112,13 @@ static inline void arch_phys_wc_del(int handle)
}
#define arch_phys_wc_add arch_phys_wc_add
+#ifndef arch_phys_wc_index
+static inline int arch_phys_wc_index(int handle)
+{
+ return -1;
+}
+#define arch_phys_wc_index arch_phys_wc_index
+#endif
#endif
#endif /* _LINUX_IO_H */
diff --git a/include/linux/iommu-common.h b/include/linux/iommu-common.h
new file mode 100644
index 0000000..bbced83
--- /dev/null
+++ b/include/linux/iommu-common.h
@@ -0,0 +1,51 @@
+#ifndef _LINUX_IOMMU_COMMON_H
+#define _LINUX_IOMMU_COMMON_H
+
+#include <linux/spinlock_types.h>
+#include <linux/device.h>
+#include <asm/page.h>
+
+#define IOMMU_POOL_HASHBITS 4
+#define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS)
+
+struct iommu_pool {
+ unsigned long start;
+ unsigned long end;
+ unsigned long hint;
+ spinlock_t lock;
+};
+
+struct iommu_map_table {
+ unsigned long table_map_base;
+ unsigned long table_shift;
+ unsigned long nr_pools;
+ void (*lazy_flush)(struct iommu_map_table *);
+ unsigned long poolsize;
+ struct iommu_pool pools[IOMMU_NR_POOLS];
+ u32 flags;
+#define IOMMU_HAS_LARGE_POOL 0x00000001
+#define IOMMU_NO_SPAN_BOUND 0x00000002
+#define IOMMU_NEED_FLUSH 0x00000004
+ struct iommu_pool large_pool;
+ unsigned long *map;
+};
+
+extern void iommu_tbl_pool_init(struct iommu_map_table *iommu,
+ unsigned long num_entries,
+ u32 table_shift,
+ void (*lazy_flush)(struct iommu_map_table *),
+ bool large_pool, u32 npools,
+ bool skip_span_boundary_check);
+
+extern unsigned long iommu_tbl_range_alloc(struct device *dev,
+ struct iommu_map_table *iommu,
+ unsigned long npages,
+ unsigned long *handle,
+ unsigned long mask,
+ unsigned int align_order);
+
+extern void iommu_tbl_range_free(struct iommu_map_table *iommu,
+ u64 dma_addr, unsigned long npages,
+ unsigned long entry);
+
+#endif
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 38daa45..f9c1b6d 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -51,9 +51,33 @@ struct iommu_domain_geometry {
bool force_aperture; /* DMA only allowed in mappable range? */
};
+/* Domain feature flags */
+#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
+#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
+ implementation */
+#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
+
+/*
+ * This are the possible domain-types
+ *
+ * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
+ * devices
+ * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
+ * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
+ * for VMs
+ * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
+ * This flag allows IOMMU drivers to implement
+ * certain optimizations for these domains
+ */
+#define IOMMU_DOMAIN_BLOCKED (0U)
+#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
+#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
+#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
+ __IOMMU_DOMAIN_DMA_API)
+
struct iommu_domain {
+ unsigned type;
const struct iommu_ops *ops;
- void *priv;
iommu_fault_handler_t handler;
void *handler_token;
struct iommu_domain_geometry geometry;
@@ -90,6 +114,20 @@ enum iommu_attr {
DOMAIN_ATTR_MAX,
};
+/**
+ * struct iommu_dm_region - descriptor for a direct mapped memory region
+ * @list: Linked list pointers
+ * @start: System physical start address of the region
+ * @length: Length of the region in bytes
+ * @prot: IOMMU Protection flags (READ/WRITE/...)
+ */
+struct iommu_dm_region {
+ struct list_head list;
+ phys_addr_t start;
+ size_t length;
+ int prot;
+};
+
#ifdef CONFIG_IOMMU_API
/**
@@ -113,8 +151,11 @@ enum iommu_attr {
*/
struct iommu_ops {
bool (*capable)(enum iommu_cap);
- int (*domain_init)(struct iommu_domain *domain);
- void (*domain_destroy)(struct iommu_domain *domain);
+
+ /* Domain allocation and freeing by the iommu driver */
+ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
+ void (*domain_free)(struct iommu_domain *);
+
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
int (*map)(struct iommu_domain *domain, unsigned long iova,
@@ -132,6 +173,10 @@ struct iommu_ops {
int (*domain_set_attr)(struct iommu_domain *domain,
enum iommu_attr attr, void *data);
+ /* Request/Free a list of direct mapping requirements for a device */
+ void (*get_dm_regions)(struct device *dev, struct list_head *list);
+ void (*put_dm_regions)(struct device *dev, struct list_head *list);
+
/* Window handling functions */
int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot);
@@ -166,6 +211,7 @@ extern int iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
+extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
@@ -177,6 +223,10 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
+extern void iommu_get_dm_regions(struct device *dev, struct list_head *list);
+extern void iommu_put_dm_regions(struct device *dev, struct list_head *list);
+extern int iommu_request_dm_for_dev(struct device *dev);
+
extern int iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group);
extern void iommu_detach_group(struct iommu_domain *domain,
@@ -200,6 +250,7 @@ extern int iommu_group_unregister_notifier(struct iommu_group *group,
struct notifier_block *nb);
extern int iommu_group_id(struct iommu_group *group);
extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
+extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
void *data);
@@ -207,7 +258,7 @@ extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
void *data);
struct device *iommu_device_create(struct device *parent, void *drvdata,
const struct attribute_group **groups,
- const char *fmt, ...);
+ const char *fmt, ...) __printf(4, 5);
void iommu_device_destroy(struct device *dev);
int iommu_device_link(struct device *dev, struct device *link);
void iommu_device_unlink(struct device *dev, struct device *link);
@@ -305,6 +356,11 @@ static inline void iommu_detach_device(struct iommu_domain *domain,
{
}
+static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
+{
+ return NULL;
+}
+
static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, int gfp_order, int prot)
{
@@ -346,6 +402,21 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
{
}
+static inline void iommu_get_dm_regions(struct device *dev,
+ struct list_head *list)
+{
+}
+
+static inline void iommu_put_dm_regions(struct device *dev,
+ struct list_head *list)
+{
+}
+
+static inline int iommu_request_dm_for_dev(struct device *dev)
+{
+ return -ENODEV;
+}
+
static inline int iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group)
{
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
new file mode 100644
index 0000000..1c30014
--- /dev/null
+++ b/include/linux/iopoll.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_IOPOLL_H
+#define _LINUX_IOPOLL_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/hrtimer.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+
+/**
+ * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
+ * @op: accessor function (takes @addr as its only argument)
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0
+ * tight-loops). Should be less than ~20ms since usleep_range
+ * is used (see Documentation/timers/timers-howto.txt).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ *
+ * When available, you'll probably want to use one of the specialized
+ * macros defined below rather than this macro directly.
+ */
+#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \
+({ \
+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
+ might_sleep_if(sleep_us); \
+ for (;;) { \
+ (val) = op(addr); \
+ if (cond) \
+ break; \
+ if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
+ (val) = op(addr); \
+ break; \
+ } \
+ if (sleep_us) \
+ usleep_range((sleep_us >> 2) + 1, sleep_us); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
+
+/**
+ * readx_poll_timeout_atomic - Periodically poll an address until a condition is met or a timeout occurs
+ * @op: accessor function (takes @addr as its only argument)
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @delay_us: Time to udelay between reads in us (0 tight-loops). Should
+ * be less than ~10us since udelay is used (see
+ * Documentation/timers/timers-howto.txt).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val.
+ *
+ * When available, you'll probably want to use one of the specialized
+ * macros defined below rather than this macro directly.
+ */
+#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
+({ \
+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
+ for (;;) { \
+ (val) = op(addr); \
+ if (cond) \
+ break; \
+ if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
+ (val) = op(addr); \
+ break; \
+ } \
+ if (delay_us) \
+ udelay(delay_us); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
+
+
+#define readb_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readb, addr, val, cond, delay_us, timeout_us)
+
+#define readb_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readb, addr, val, cond, delay_us, timeout_us)
+
+#define readw_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readw, addr, val, cond, delay_us, timeout_us)
+
+#define readw_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readw, addr, val, cond, delay_us, timeout_us)
+
+#define readl_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readl, addr, val, cond, delay_us, timeout_us)
+
+#define readl_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readl, addr, val, cond, delay_us, timeout_us)
+
+#define readq_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readq, addr, val, cond, delay_us, timeout_us)
+
+#define readq_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readq, addr, val, cond, delay_us, timeout_us)
+
+#define readb_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readb_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#define readb_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readb_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#define readw_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readw_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#define readw_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readw_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#define readl_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readl_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#define readl_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readl_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#define readq_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readq_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#define readq_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readq_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#endif /* _LINUX_IOPOLL_H */
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 2c525022..388e3ae 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -196,10 +196,8 @@ extern struct resource * __request_region(struct resource *,
/* Compatibility cruft */
#define release_region(start,n) __release_region(&ioport_resource, (start), (n))
-#define check_mem_region(start,n) __check_region(&iomem_resource, (start), (n))
#define release_mem_region(start,n) __release_region(&iomem_resource, (start), (n))
-extern int __check_region(struct resource *, resource_size_t, resource_size_t);
extern void __release_region(struct resource *, resource_size_t,
resource_size_t);
#ifdef CONFIG_MEMORY_HOTREMOVE
@@ -207,12 +205,6 @@ extern int release_mem_region_adjustable(struct resource *, resource_size_t,
resource_size_t);
#endif
-static inline int __deprecated check_region(resource_size_t s,
- resource_size_t n)
-{
- return __check_region(&ioport_resource, s, n);
-}
-
/* Wrappers for managed devices */
struct device;
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 19e81d5..3920a19 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -16,9 +16,6 @@
#include <linux/rbtree.h>
#include <linux/dma-mapping.h>
-/* IO virtual address start page frame number */
-#define IOVA_START_PFN (1)
-
/* iova structure */
struct iova {
struct rb_node node;
@@ -31,6 +28,8 @@ struct iova_domain {
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
struct rb_root rbroot; /* iova domain rbtree root */
struct rb_node *cached32_node; /* Save last alloced node */
+ unsigned long granule; /* pfn granularity for this domain */
+ unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn;
};
@@ -39,6 +38,39 @@ static inline unsigned long iova_size(struct iova *iova)
return iova->pfn_hi - iova->pfn_lo + 1;
}
+static inline unsigned long iova_shift(struct iova_domain *iovad)
+{
+ return __ffs(iovad->granule);
+}
+
+static inline unsigned long iova_mask(struct iova_domain *iovad)
+{
+ return iovad->granule - 1;
+}
+
+static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova)
+{
+ return iova & iova_mask(iovad);
+}
+
+static inline size_t iova_align(struct iova_domain *iovad, size_t size)
+{
+ return ALIGN(size, iovad->granule);
+}
+
+static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
+{
+ return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
+}
+
+static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
+{
+ return iova >> iova_shift(iovad);
+}
+
+int iommu_iova_cache_init(void);
+void iommu_iova_cache_destroy(void);
+
struct iova *alloc_iova_mem(void);
void free_iova_mem(struct iova *iova);
void free_iova(struct iova_domain *iovad, unsigned long pfn);
@@ -49,7 +81,8 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi);
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
-void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit);
+void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
+ unsigned long start_pfn, unsigned long pfn_32bit);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad);
struct iova *split_and_remove_iova(struct iova_domain *iovad,
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index c694e7b..82806c6 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -52,6 +52,11 @@ struct ipv6_devconf {
__s32 force_tllao;
__s32 ndisc_notify;
__s32 suppress_frag_ndisc;
+ __s32 accept_ra_mtu;
+ struct ipv6_stable_secret {
+ bool initialized;
+ struct in6_addr secret;
+ } stable_secret;
void *sysctl;
};
@@ -124,6 +129,12 @@ struct ipv6_mc_socklist;
struct ipv6_ac_socklist;
struct ipv6_fl_socklist;
+struct inet6_cork {
+ struct ipv6_txoptions *opt;
+ u8 hop_limit;
+ u8 tclass;
+};
+
/**
* struct ipv6_pinfo - ipv6 private area
*
@@ -216,11 +227,7 @@ struct ipv6_pinfo {
struct ipv6_txoptions *opt;
struct sk_buff *pktoptions;
struct sk_buff *rxpmtu;
- struct {
- struct ipv6_txoptions *opt;
- u8 hop_limit;
- u8 tclass;
- } cork;
+ struct inet6_cork cork;
};
/* WARNING: don't change the layout of the members in {raw,udp,tcp}6_sock! */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index d09ec7a..92188b0 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -30,6 +30,7 @@
struct seq_file;
struct module;
struct msi_msg;
+enum irqchip_irq_state;
/*
* IRQ line status.
@@ -125,13 +126,21 @@ struct msi_desc;
struct irq_domain;
/**
- * struct irq_data - per irq and irq chip data passed down to chip functions
+ * struct irq_common_data - per irq data shared by all irqchips
+ * @state_use_accessors: status information for irq chip functions.
+ * Use accessor functions to deal with it
+ */
+struct irq_common_data {
+ unsigned int state_use_accessors;
+};
+
+/**
+ * struct irq_data - per irq chip data passed down to chip functions
* @mask: precomputed bitmask for accessing the chip registers
* @irq: interrupt number
* @hwirq: hardware interrupt number, local to the interrupt domain
* @node: node index useful for balancing
- * @state_use_accessors: status information for irq chip functions.
- * Use accessor functions to deal with it
+ * @common: point to data shared by all irqchips
* @chip: low level interrupt hardware access
* @domain: Interrupt translation domain; responsible for mapping
* between hwirq number and linux irq number.
@@ -152,7 +161,7 @@ struct irq_data {
unsigned int irq;
unsigned long hwirq;
unsigned int node;
- unsigned int state_use_accessors;
+ struct irq_common_data *common;
struct irq_chip *chip;
struct irq_domain *domain;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
@@ -165,7 +174,7 @@ struct irq_data {
};
/*
- * Bit masks for irq_data.state
+ * Bit masks for irq_common_data.state_use_accessors
*
* IRQD_TRIGGER_MASK - Mask for the trigger type bits
* IRQD_SETAFFINITY_PENDING - Affinity setting is pending
@@ -197,34 +206,36 @@ enum {
IRQD_WAKEUP_ARMED = (1 << 19),
};
+#define __irqd_to_state(d) ((d)->common->state_use_accessors)
+
static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_SETAFFINITY_PENDING;
+ return __irqd_to_state(d) & IRQD_SETAFFINITY_PENDING;
}
static inline bool irqd_is_per_cpu(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_PER_CPU;
+ return __irqd_to_state(d) & IRQD_PER_CPU;
}
static inline bool irqd_can_balance(struct irq_data *d)
{
- return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING));
+ return !(__irqd_to_state(d) & (IRQD_PER_CPU | IRQD_NO_BALANCING));
}
static inline bool irqd_affinity_was_set(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_AFFINITY_SET;
+ return __irqd_to_state(d) & IRQD_AFFINITY_SET;
}
static inline void irqd_mark_affinity_was_set(struct irq_data *d)
{
- d->state_use_accessors |= IRQD_AFFINITY_SET;
+ __irqd_to_state(d) |= IRQD_AFFINITY_SET;
}
static inline u32 irqd_get_trigger_type(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_TRIGGER_MASK;
+ return __irqd_to_state(d) & IRQD_TRIGGER_MASK;
}
/*
@@ -232,43 +243,43 @@ static inline u32 irqd_get_trigger_type(struct irq_data *d)
*/
static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
{
- d->state_use_accessors &= ~IRQD_TRIGGER_MASK;
- d->state_use_accessors |= type & IRQD_TRIGGER_MASK;
+ __irqd_to_state(d) &= ~IRQD_TRIGGER_MASK;
+ __irqd_to_state(d) |= type & IRQD_TRIGGER_MASK;
}
static inline bool irqd_is_level_type(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_LEVEL;
+ return __irqd_to_state(d) & IRQD_LEVEL;
}
static inline bool irqd_is_wakeup_set(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_WAKEUP_STATE;
+ return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
}
static inline bool irqd_can_move_in_process_context(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_MOVE_PCNTXT;
+ return __irqd_to_state(d) & IRQD_MOVE_PCNTXT;
}
static inline bool irqd_irq_disabled(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_IRQ_DISABLED;
+ return __irqd_to_state(d) & IRQD_IRQ_DISABLED;
}
static inline bool irqd_irq_masked(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_IRQ_MASKED;
+ return __irqd_to_state(d) & IRQD_IRQ_MASKED;
}
static inline bool irqd_irq_inprogress(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_IRQ_INPROGRESS;
+ return __irqd_to_state(d) & IRQD_IRQ_INPROGRESS;
}
static inline bool irqd_is_wakeup_armed(struct irq_data *d)
{
- return d->state_use_accessors & IRQD_WAKEUP_ARMED;
+ return __irqd_to_state(d) & IRQD_WAKEUP_ARMED;
}
@@ -279,12 +290,12 @@ static inline bool irqd_is_wakeup_armed(struct irq_data *d)
*/
static inline void irqd_set_chained_irq_inprogress(struct irq_data *d)
{
- d->state_use_accessors |= IRQD_IRQ_INPROGRESS;
+ __irqd_to_state(d) |= IRQD_IRQ_INPROGRESS;
}
static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d)
{
- d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS;
+ __irqd_to_state(d) &= ~IRQD_IRQ_INPROGRESS;
}
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
@@ -324,6 +335,9 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* irq_request_resources
* @irq_compose_msi_msg: optional to compose message content for MSI
* @irq_write_msi_msg: optional to write message content for MSI
+ * @irq_get_irqchip_state: return the internal state of an interrupt
+ * @irq_set_irqchip_state: set the internal state of a interrupt
+ * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine
* @flags: chip specific flags
*/
struct irq_chip {
@@ -363,6 +377,11 @@ struct irq_chip {
void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg);
void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
+ int (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state);
+ int (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state);
+
+ int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info);
+
unsigned long flags;
};
@@ -388,7 +407,6 @@ enum {
IRQCHIP_EOI_THREADED = (1 << 6),
};
-/* This include will go away once we isolated irq_desc usage to core code */
#include <linux/irqdesc.h>
/*
@@ -416,6 +434,7 @@ extern void irq_cpu_online(void);
extern void irq_cpu_offline(void);
extern int irq_set_affinity_locked(struct irq_data *data,
const struct cpumask *cpumask, bool force);
+extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
void irq_move_irq(struct irq_data *data);
@@ -452,6 +471,8 @@ extern void handle_nested_irq(unsigned int irq);
extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+extern void irq_chip_enable_parent(struct irq_data *data);
+extern void irq_chip_disable_parent(struct irq_data *data);
extern void irq_chip_ack_parent(struct irq_data *data);
extern int irq_chip_retrigger_hierarchy(struct irq_data *data);
extern void irq_chip_mask_parent(struct irq_data *data);
@@ -460,6 +481,9 @@ extern void irq_chip_eoi_parent(struct irq_data *data);
extern int irq_chip_set_affinity_parent(struct irq_data *data,
const struct cpumask *dest,
bool force);
+extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
+extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
+ void *vcpu_info);
#endif
/* Handling of unhandled and spurious interrupts: */
@@ -510,6 +534,15 @@ irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle)
__irq_set_handler(irq, handle, 1, NULL);
}
+/*
+ * Set a highlevel chained flow handler and its data for a given IRQ.
+ * (a chained handler is automatically enabled and set to
+ * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD)
+ */
+void
+irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
+ void *data);
+
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);
static inline void irq_set_status_flags(unsigned int irq, unsigned long set)
@@ -617,6 +650,23 @@ static inline u32 irq_get_trigger_type(unsigned int irq)
return d ? irqd_get_trigger_type(d) : 0;
}
+static inline int irq_data_get_node(struct irq_data *d)
+{
+ return d->node;
+}
+
+static inline struct cpumask *irq_get_affinity_mask(int irq)
+{
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ return d ? d->affinity : NULL;
+}
+
+static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
+{
+ return d->affinity;
+}
+
unsigned int arch_dynirq_lower_bound(unsigned int from);
int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index bf3fe71..47b9ebd 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -38,16 +38,17 @@ bool irq_work_queue(struct irq_work *work);
bool irq_work_queue_on(struct irq_work *work, int cpu);
#endif
-void irq_work_run(void);
void irq_work_tick(void);
void irq_work_sync(struct irq_work *work);
#ifdef CONFIG_IRQ_WORK
#include <asm/irq_work.h>
+void irq_work_run(void);
bool irq_work_needs_cpu(void);
#else
static inline bool irq_work_needs_cpu(void) { return false; }
+static inline void irq_work_run(void) { }
#endif
#endif /* _LINUX_IRQ_WORK_H */
diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h
index 14d7913..6388873 100644
--- a/include/linux/irqchip.h
+++ b/include/linux/irqchip.h
@@ -11,6 +11,20 @@
#ifndef _LINUX_IRQCHIP_H
#define _LINUX_IRQCHIP_H
+#include <linux/of.h>
+
+/*
+ * This macro must be used by the different irqchip drivers to declare
+ * the association between their DT compatible string and their
+ * initialization function.
+ *
+ * @name: name that must be unique accross all IRQCHIP_DECLARE of the
+ * same file.
+ * @compstr: compatible string of the irqchip driver
+ * @fn: initialization function
+ */
+#define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn)
+
#ifdef CONFIG_IRQCHIP
void irqchip_init(void);
#else
diff --git a/include/linux/irqchip/arm-gic-acpi.h b/include/linux/irqchip/arm-gic-acpi.h
new file mode 100644
index 0000000..de3419e
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-acpi.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2014, Linaro Ltd.
+ * Author: Tomasz Nowicki <tomasz.nowicki@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ARM_GIC_ACPI_H_
+#define ARM_GIC_ACPI_H_
+
+#ifdef CONFIG_ACPI
+
+/*
+ * Hard code here, we can not get memory size from MADT (but FDT does),
+ * Actually no need to do that, because this size can be inferred
+ * from GIC spec.
+ */
+#define ACPI_GICV2_DIST_MEM_SIZE (SZ_4K)
+#define ACPI_GIC_CPU_IF_MEM_SIZE (SZ_8K)
+
+struct acpi_table_header;
+
+int gic_v2_acpi_init(struct acpi_table_header *table);
+void acpi_gic_init(void);
+#else
+static inline void acpi_gic_init(void) { }
+#endif
+
+#endif /* ARM_GIC_ACPI_H_ */
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 1e8b0cf..ffbc034 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -33,6 +33,7 @@
#define GICD_SETSPI_SR 0x0050
#define GICD_CLRSPI_SR 0x0058
#define GICD_SEIR 0x0068
+#define GICD_IGROUPR 0x0080
#define GICD_ISENABLER 0x0100
#define GICD_ICENABLER 0x0180
#define GICD_ISPENDR 0x0200
@@ -41,14 +42,37 @@
#define GICD_ICACTIVER 0x0380
#define GICD_IPRIORITYR 0x0400
#define GICD_ICFGR 0x0C00
+#define GICD_IGRPMODR 0x0D00
+#define GICD_NSACR 0x0E00
#define GICD_IROUTER 0x6000
+#define GICD_IDREGS 0xFFD0
#define GICD_PIDR2 0xFFE8
+/*
+ * Those registers are actually from GICv2, but the spec demands that they
+ * are implemented as RES0 if ARE is 1 (which we do in KVM's emulated GICv3).
+ */
+#define GICD_ITARGETSR 0x0800
+#define GICD_SGIR 0x0F00
+#define GICD_CPENDSGIR 0x0F10
+#define GICD_SPENDSGIR 0x0F20
+
#define GICD_CTLR_RWP (1U << 31)
+#define GICD_CTLR_DS (1U << 6)
#define GICD_CTLR_ARE_NS (1U << 4)
#define GICD_CTLR_ENABLE_G1A (1U << 1)
#define GICD_CTLR_ENABLE_G1 (1U << 0)
+/*
+ * In systems with a single security state (what we emulate in KVM)
+ * the meaning of the interrupt group enable bits is slightly different
+ */
+#define GICD_CTLR_ENABLE_SS_G1 (1U << 1)
+#define GICD_CTLR_ENABLE_SS_G0 (1U << 0)
+
+#define GICD_TYPER_LPIS (1U << 17)
+#define GICD_TYPER_MBIS (1U << 16)
+
#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
#define GICD_TYPER_LPIS (1U << 17)
@@ -60,6 +84,8 @@
#define GIC_PIDR2_ARCH_GICv3 0x30
#define GIC_PIDR2_ARCH_GICv4 0x40
+#define GIC_V3_DIST_SIZE 0x10000
+
/*
* Re-Distributor registers, offsets from RD_base
*/
@@ -78,6 +104,7 @@
#define GICR_SYNCR 0x00C0
#define GICR_MOVLPIR 0x0100
#define GICR_MOVALLR 0x0110
+#define GICR_IDREGS GICD_IDREGS
#define GICR_PIDR2 GICD_PIDR2
#define GICR_CTLR_ENABLE_LPIS (1UL << 0)
@@ -99,11 +126,27 @@
#define GICR_PROPBASER_WaWb (5U << 7)
#define GICR_PROPBASER_RaWaWt (6U << 7)
#define GICR_PROPBASER_RaWaWb (7U << 7)
+#define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7)
#define GICR_PROPBASER_IDBITS_MASK (0x1f)
+#define GICR_PENDBASER_NonShareable (0U << 10)
+#define GICR_PENDBASER_InnerShareable (1U << 10)
+#define GICR_PENDBASER_OuterShareable (2U << 10)
+#define GICR_PENDBASER_SHAREABILITY_MASK (3UL << 10)
+#define GICR_PENDBASER_nCnB (0U << 7)
+#define GICR_PENDBASER_nC (1U << 7)
+#define GICR_PENDBASER_RaWt (2U << 7)
+#define GICR_PENDBASER_RaWb (3U << 7)
+#define GICR_PENDBASER_WaWt (4U << 7)
+#define GICR_PENDBASER_WaWb (5U << 7)
+#define GICR_PENDBASER_RaWaWt (6U << 7)
+#define GICR_PENDBASER_RaWaWb (7U << 7)
+#define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7)
+
/*
* Re-Distributor registers, offsets from SGI_base
*/
+#define GICR_IGROUPR0 GICD_IGROUPR
#define GICR_ISENABLER0 GICD_ISENABLER
#define GICR_ICENABLER0 GICD_ICENABLER
#define GICR_ISPENDR0 GICD_ISPENDR
@@ -112,11 +155,15 @@
#define GICR_ICACTIVER0 GICD_ICACTIVER
#define GICR_IPRIORITYR0 GICD_IPRIORITYR
#define GICR_ICFGR0 GICD_ICFGR
+#define GICR_IGRPMODR0 GICD_IGRPMODR
+#define GICR_NSACR GICD_NSACR
#define GICR_TYPER_PLPIS (1U << 0)
#define GICR_TYPER_VLPIS (1U << 1)
#define GICR_TYPER_LAST (1U << 4)
+#define GIC_V3_REDIST_SIZE 0x20000
+
#define LPI_PROP_GROUP1 (1 << 1)
#define LPI_PROP_ENABLED (1 << 0)
@@ -134,6 +181,11 @@
#define GITS_TRANSLATER 0x10040
+#define GITS_CTLR_ENABLE (1U << 0)
+#define GITS_CTLR_QUIESCENT (1U << 31)
+
+#define GITS_TYPER_DEVBITS_SHIFT 13
+#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
#define GITS_TYPER_PTA (1UL << 19)
#define GITS_CBASER_VALID (1UL << 63)
@@ -145,6 +197,7 @@
#define GITS_CBASER_WaWb (5UL << 59)
#define GITS_CBASER_RaWaWt (6UL << 59)
#define GITS_CBASER_RaWaWb (7UL << 59)
+#define GITS_CBASER_CACHEABILITY_MASK (7UL << 59)
#define GITS_CBASER_NonShareable (0UL << 10)
#define GITS_CBASER_InnerShareable (1UL << 10)
#define GITS_CBASER_OuterShareable (2UL << 10)
@@ -161,6 +214,7 @@
#define GITS_BASER_WaWb (5UL << 59)
#define GITS_BASER_RaWaWt (6UL << 59)
#define GITS_BASER_RaWaWb (7UL << 59)
+#define GITS_BASER_CACHEABILITY_MASK (7UL << 59)
#define GITS_BASER_TYPE_SHIFT (56)
#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
@@ -248,6 +302,18 @@
#define ICC_SRE_EL2_SRE (1 << 0)
#define ICC_SRE_EL2_ENABLE (1 << 3)
+#define ICC_SGI1R_TARGET_LIST_SHIFT 0
+#define ICC_SGI1R_TARGET_LIST_MASK (0xffff << ICC_SGI1R_TARGET_LIST_SHIFT)
+#define ICC_SGI1R_AFFINITY_1_SHIFT 16
+#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT)
+#define ICC_SGI1R_SGI_ID_SHIFT 24
+#define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT)
+#define ICC_SGI1R_AFFINITY_2_SHIFT 32
+#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
+#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
+#define ICC_SGI1R_AFFINITY_3_SHIFT 48
+#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
+
/*
* System register definitions
*/
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 71d706d..9de976b 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -95,8 +95,7 @@
struct device_node;
-extern struct irq_chip gic_arch_extn;
-
+void gic_set_irqchip_flags(unsigned long flags);
void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
u32 offset, struct device_node *);
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
@@ -115,11 +114,5 @@ int gic_get_cpu_id(unsigned int cpu);
void gic_migrate_target(unsigned int new_cpu_id);
unsigned long gic_get_sgir_physaddr(void);
-extern const struct irq_domain_ops *gic_routable_irq_domain_ops;
-static inline void __init register_routable_domain_ops
- (const struct irq_domain_ops *ops)
-{
- gic_routable_irq_domain_ops = ops;
-}
#endif /* __ASSEMBLY */
#endif
diff --git a/include/linux/irqchip/ingenic.h b/include/linux/irqchip/ingenic.h
new file mode 100644
index 0000000..0ee319a
--- /dev/null
+++ b/include/linux/irqchip/ingenic.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __LINUX_IRQCHIP_INGENIC_H__
+#define __LINUX_IRQCHIP_INGENIC_H__
+
+#include <linux/irq.h>
+
+extern void ingenic_intc_irq_suspend(struct irq_data *data);
+extern void ingenic_intc_irq_resume(struct irq_data *data);
+
+#endif
diff --git a/include/linux/irqchip/irq-crossbar.h b/include/linux/irqchip/irq-crossbar.h
deleted file mode 100644
index e5537b8..0000000
--- a/include/linux/irqchip/irq-crossbar.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * drivers/irqchip/irq-crossbar.h
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-int irqcrossbar_init(void);
diff --git a/include/linux/irqchip/irq-omap-intc.h b/include/linux/irqchip/irq-omap-intc.h
index e06b370..2e3d1af 100644
--- a/include/linux/irqchip/irq-omap-intc.h
+++ b/include/linux/irqchip/irq-omap-intc.h
@@ -18,9 +18,7 @@
#ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
#define __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
-void omap2_init_irq(void);
void omap3_init_irq(void);
-void ti81xx_init_irq(void);
int omap_irq_pending(void);
void omap_intc_save_context(void);
diff --git a/include/linux/irqchip/irq-sa11x0.h b/include/linux/irqchip/irq-sa11x0.h
new file mode 100644
index 0000000..15db682
--- /dev/null
+++ b/include/linux/irqchip/irq-sa11x0.h
@@ -0,0 +1,17 @@
+/*
+ * Generic IRQ handling for the SA11x0.
+ *
+ * Copyright (C) 2015 Dmitry Eremin-Solenikov
+ * Copyright (C) 1999-2001 Nicolas Pitre
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_SA11x0_H
+#define __INCLUDE_LINUX_IRQCHIP_IRQ_SA11x0_H
+
+void __init sa11x0_init_irq_nodt(int irq_start, resource_size_t io_start);
+
+#endif
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index 420f77b..9b1ad37 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -165,6 +165,8 @@
#define GIC_VPE_PEND_SWINT0_MSK (MSK(1) << GIC_VPE_PEND_SWINT0_SHF)
#define GIC_VPE_PEND_SWINT1_SHF 5
#define GIC_VPE_PEND_SWINT1_MSK (MSK(1) << GIC_VPE_PEND_SWINT1_SHF)
+#define GIC_VPE_PEND_FDC_SHF 6
+#define GIC_VPE_PEND_FDC_MSK (MSK(1) << GIC_VPE_PEND_FDC_SHF)
/* GIC_VPE_RMASK Masks */
#define GIC_VPE_RMASK_WD_SHF 0
@@ -179,6 +181,8 @@
#define GIC_VPE_RMASK_SWINT0_MSK (MSK(1) << GIC_VPE_RMASK_SWINT0_SHF)
#define GIC_VPE_RMASK_SWINT1_SHF 5
#define GIC_VPE_RMASK_SWINT1_MSK (MSK(1) << GIC_VPE_RMASK_SWINT1_SHF)
+#define GIC_VPE_RMASK_FDC_SHF 6
+#define GIC_VPE_RMASK_FDC_MSK (MSK(1) << GIC_VPE_RMASK_FDC_SHF)
/* GIC_VPE_SMASK Masks */
#define GIC_VPE_SMASK_WD_SHF 0
@@ -193,6 +197,8 @@
#define GIC_VPE_SMASK_SWINT0_MSK (MSK(1) << GIC_VPE_SMASK_SWINT0_SHF)
#define GIC_VPE_SMASK_SWINT1_SHF 5
#define GIC_VPE_SMASK_SWINT1_MSK (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF)
+#define GIC_VPE_SMASK_FDC_SHF 6
+#define GIC_VPE_SMASK_FDC_MSK (MSK(1) << GIC_VPE_SMASK_FDC_SHF)
/* GIC nomenclature for Core Interrupt Pins. */
#define GIC_CPU_INT0 0 /* Core Interrupt 2 */
@@ -240,10 +246,12 @@ extern unsigned int gic_get_count_width(void);
extern cycle_t gic_read_compare(void);
extern void gic_write_compare(cycle_t cnt);
extern void gic_write_cpu_compare(cycle_t cnt, int cpu);
+extern void gic_start_count(void);
+extern void gic_stop_count(void);
extern void gic_send_ipi(unsigned int intr);
extern unsigned int plat_ipi_call_int_xlate(unsigned int);
extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
-extern unsigned int gic_get_timer_pending(void);
extern int gic_get_c0_compare_int(void);
extern int gic_get_c0_perfcount_int(void);
+extern int gic_get_c0_fdc_int(void);
#endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index faf433a..fcea4e4 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -3,9 +3,6 @@
/*
* Core internal functions to deal with irq descriptors
- *
- * This include will move to kernel/irq once we cleaned up the tree.
- * For now it's included from <linux/irq.h>
*/
struct irq_affinity_notify;
@@ -17,7 +14,7 @@ struct pt_regs;
/**
* struct irq_desc - interrupt descriptor
- * @irq_data: per irq and chip data passed down to chip functions
+ * @irq_common_data: per irq and chip data passed down to chip functions
* @kstat_irqs: irq stats per cpu
* @handle_irq: highlevel irq-events handler
* @preflow_handler: handler called before the flow handler (currently used by sparc)
@@ -47,6 +44,7 @@ struct pt_regs;
* @name: flow handler name for /proc/interrupts output
*/
struct irq_desc {
+ struct irq_common_data irq_common_data;
struct irq_data irq_data;
unsigned int __percpu *kstat_irqs;
irq_flow_handler_t handle_irq;
@@ -78,6 +76,7 @@ struct irq_desc {
#ifdef CONFIG_PM_SLEEP
unsigned int nr_actions;
unsigned int no_suspend_depth;
+ unsigned int cond_suspend_depth;
unsigned int force_resume_depth;
#endif
#ifdef CONFIG_PROC_FS
@@ -88,10 +87,29 @@ struct irq_desc {
const char *name;
} ____cacheline_internodealigned_in_smp;
-#ifndef CONFIG_SPARSE_IRQ
+#ifdef CONFIG_SPARSE_IRQ
+extern void irq_lock_sparse(void);
+extern void irq_unlock_sparse(void);
+#else
+static inline void irq_lock_sparse(void) { }
+static inline void irq_unlock_sparse(void) { }
extern struct irq_desc irq_desc[NR_IRQS];
#endif
+static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
+{
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+ return irq_to_desc(data->irq);
+#else
+ return container_of(data, struct irq_desc, irq_data);
+#endif
+}
+
+static inline unsigned int irq_desc_get_irq(struct irq_desc *desc)
+{
+ return desc->irq_data.irq;
+}
+
static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
{
return &desc->irq_data;
@@ -177,6 +195,47 @@ __irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip,
desc->name = name;
}
+/**
+ * irq_set_handler_locked - Set irq handler from a locked region
+ * @data: Pointer to the irq_data structure which identifies the irq
+ * @handler: Flow control handler function for this interrupt
+ *
+ * Sets the handler in the irq descriptor associated to @data.
+ *
+ * Must be called with irq_desc locked and valid parameters. Typical
+ * call site is the irq_set_type() callback.
+ */
+static inline void irq_set_handler_locked(struct irq_data *data,
+ irq_flow_handler_t handler)
+{
+ struct irq_desc *desc = irq_data_to_desc(data);
+
+ desc->handle_irq = handler;
+}
+
+/**
+ * irq_set_chip_handler_name_locked - Set chip, handler and name from a locked region
+ * @data: Pointer to the irq_data structure for which the chip is set
+ * @chip: Pointer to the new irq chip
+ * @handler: Flow control handler function for this interrupt
+ * @name: Name of the interrupt
+ *
+ * Replace the irq chip at the proper hierarchy level in @data and
+ * sets the handler and name in the associated irq descriptor.
+ *
+ * Must be called with irq_desc locked and valid parameters.
+ */
+static inline void
+irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip,
+ irq_flow_handler_t handler, const char *name)
+{
+ struct irq_desc *desc = irq_data_to_desc(data);
+
+ desc->handle_irq = handler;
+ desc->name = name;
+ data->chip = chip;
+}
+
static inline int irq_balancing_disabled(unsigned int irq)
{
struct irq_desc *desc;
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 676d730..744ac0e 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -258,6 +258,10 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
/* V2 interfaces to support hierarchy IRQ domains. */
extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
unsigned int virq);
+extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hwirq, struct irq_chip *chip,
+ void *chip_data, irq_flow_handler_t handler,
+ void *handler_data, const char *handler_name);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
unsigned int flags, unsigned int size,
@@ -281,10 +285,6 @@ extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
irq_hw_number_t hwirq,
struct irq_chip *chip,
void *chip_data);
-extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
- irq_hw_number_t hwirq, struct irq_chip *chip,
- void *chip_data, irq_flow_handler_t handler,
- void *handler_data, const char *handler_name);
extern void irq_domain_reset_irq_data(struct irq_data *irq_data);
extern void irq_domain_free_irqs_common(struct irq_domain *domain,
unsigned int virq,
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index d176d65..5dd1272 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -85,7 +85,7 @@
* The local_irq_*() APIs are equal to the raw_local_irq*()
* if !TRACE_IRQFLAGS.
*/
-#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
+#ifdef CONFIG_TRACE_IRQFLAGS
#define local_irq_enable() \
do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0)
#define local_irq_disable() \
@@ -107,22 +107,6 @@
raw_local_irq_restore(flags); \
} \
} while (0)
-#define local_save_flags(flags) \
- do { \
- raw_local_save_flags(flags); \
- } while (0)
-
-#define irqs_disabled_flags(flags) \
- ({ \
- raw_irqs_disabled_flags(flags); \
- })
-
-#define irqs_disabled() \
- ({ \
- unsigned long _flags; \
- raw_local_save_flags(_flags); \
- raw_irqs_disabled_flags(_flags); \
- })
#define safe_halt() \
do { \
@@ -131,7 +115,7 @@
} while (0)
-#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */
+#else /* !CONFIG_TRACE_IRQFLAGS */
#define local_irq_enable() do { raw_local_irq_enable(); } while (0)
#define local_irq_disable() do { raw_local_irq_disable(); } while (0)
@@ -140,11 +124,28 @@
raw_local_irq_save(flags); \
} while (0)
#define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0)
-#define local_save_flags(flags) do { raw_local_save_flags(flags); } while (0)
-#define irqs_disabled() (raw_irqs_disabled())
-#define irqs_disabled_flags(flags) (raw_irqs_disabled_flags(flags))
#define safe_halt() do { raw_safe_halt(); } while (0)
+#endif /* CONFIG_TRACE_IRQFLAGS */
+
+#define local_save_flags(flags) raw_local_save_flags(flags)
+
+/*
+ * Some architectures don't define arch_irqs_disabled(), so even if either
+ * definition would be fine we need to use different ones for the time being
+ * to avoid build issues.
+ */
+#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
+#define irqs_disabled() \
+ ({ \
+ unsigned long _flags; \
+ raw_local_save_flags(_flags); \
+ raw_irqs_disabled_flags(_flags); \
+ })
+#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */
+#define irqs_disabled() raw_irqs_disabled()
#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
+#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
+
#endif
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index fdd5cc1..9669bf9 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -23,12 +23,6 @@ unsigned int irq_get_next_irq(unsigned int offset);
; \
else
-#ifdef CONFIG_SMP
-#define irq_node(irq) (irq_get_irq_data(irq)->node)
-#else
-#define irq_node(irq) 0
-#endif
-
# define for_each_active_irq(irq) \
for (irq = irq_get_next_irq(0); irq < nr_irqs; \
irq = irq_get_next_irq(irq + 1))
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 31229e0..d326152 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -956,15 +956,6 @@ void __log_wait_for_space(journal_t *journal);
extern void __journal_drop_transaction(journal_t *, transaction_t *);
extern int cleanup_journal_tail(journal_t *);
-/* Debugging code only: */
-
-#define jbd_ENOSYS() \
-do { \
- printk (KERN_ERR "JBD unimplemented function %s\n", __func__); \
- current->state = TASK_UNINTERRUPTIBLE; \
- schedule(); \
-} while (1)
-
/*
* is_journal_abort
*
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 704b9a5..edb640a 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1035,7 +1035,7 @@ struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal);
int jbd2_journal_next_log_block(journal_t *, unsigned long long *);
int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
unsigned long *block);
-void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
+int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
/* Commit management */
@@ -1157,7 +1157,7 @@ extern int jbd2_journal_recover (journal_t *journal);
extern int jbd2_journal_wipe (journal_t *, int);
extern int jbd2_journal_skip_recovery (journal_t *);
extern void jbd2_journal_update_sb_errno(journal_t *);
-extern void jbd2_journal_update_sb_log_tail (journal_t *, tid_t,
+extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t,
unsigned long, int);
extern void __jbd2_journal_abort_hard (journal_t *);
extern void jbd2_journal_abort (journal_t *, int);
@@ -1251,15 +1251,6 @@ void __jbd2_log_wait_for_space(journal_t *journal);
extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
extern int jbd2_cleanup_journal_tail(journal_t *);
-/* Debugging code only: */
-
-#define jbd_ENOSYS() \
-do { \
- printk (KERN_ERR "JBD unimplemented function %s\n", __func__); \
- current->state = TASK_UNINTERRUPTIBLE; \
- schedule(); \
-} while (1)
-
/*
* is_journal_abort
*
diff --git a/include/linux/jhash.h b/include/linux/jhash.h
index 47cb09e..348c6f4 100644
--- a/include/linux/jhash.h
+++ b/include/linux/jhash.h
@@ -145,11 +145,11 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
}
-/* jhash_3words - hash exactly 3, 2 or 1 word(s) */
-static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+/* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */
+static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
{
- a += JHASH_INITVAL;
- b += JHASH_INITVAL;
+ a += initval;
+ b += initval;
c += initval;
__jhash_final(a, b, c);
@@ -157,14 +157,19 @@ static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
return c;
}
+static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+{
+ return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2));
+}
+
static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
{
- return jhash_3words(a, b, 0, initval);
+ return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
}
static inline u32 jhash_1word(u32 a, u32 initval)
{
- return jhash_3words(a, 0, 0, initval);
+ return __jhash_nwords(a, 0, 0, initval + JHASH_INITVAL + (1 << 2));
}
#endif /* _LINUX_JHASH_H */
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index c367cbd..535fd3b 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -7,6 +7,7 @@
#include <linux/time.h>
#include <linux/timex.h>
#include <asm/param.h> /* for HZ */
+#include <generated/timeconst.h>
/*
* The following defines establish the engineering parameters of the PLL
@@ -288,8 +289,133 @@ static inline u64 jiffies_to_nsecs(const unsigned long j)
return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
}
-extern unsigned long msecs_to_jiffies(const unsigned int m);
-extern unsigned long usecs_to_jiffies(const unsigned int u);
+extern unsigned long __msecs_to_jiffies(const unsigned int m);
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+/*
+ * HZ is equal to or smaller than 1000, and 1000 is a nice round
+ * multiple of HZ, divide with the factor between them, but round
+ * upwards:
+ */
+static inline unsigned long _msecs_to_jiffies(const unsigned int m)
+{
+ return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
+}
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+/*
+ * HZ is larger than 1000, and HZ is a nice round multiple of 1000 -
+ * simply multiply with the factor between them.
+ *
+ * But first make sure the multiplication result cannot overflow:
+ */
+static inline unsigned long _msecs_to_jiffies(const unsigned int m)
+{
+ if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+ return m * (HZ / MSEC_PER_SEC);
+}
+#else
+/*
+ * Generic case - multiply, round and divide. But first check that if
+ * we are doing a net multiplication, that we wouldn't overflow:
+ */
+static inline unsigned long _msecs_to_jiffies(const unsigned int m)
+{
+ if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+
+ return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32) >> MSEC_TO_HZ_SHR32;
+}
+#endif
+/**
+ * msecs_to_jiffies: - convert milliseconds to jiffies
+ * @m: time in milliseconds
+ *
+ * conversion is done as follows:
+ *
+ * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
+ *
+ * - 'too large' values [that would result in larger than
+ * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
+ *
+ * - all other values are converted to jiffies by either multiplying
+ * the input value by a factor or dividing it with a factor and
+ * handling any 32-bit overflows.
+ * for the details see __msecs_to_jiffies()
+ *
+ * msecs_to_jiffies() checks for the passed in value being a constant
+ * via __builtin_constant_p() allowing gcc to eliminate most of the
+ * code, __msecs_to_jiffies() is called if the value passed does not
+ * allow constant folding and the actual conversion must be done at
+ * runtime.
+ * the HZ range specific helpers _msecs_to_jiffies() are called both
+ * directly here and from __msecs_to_jiffies() in the case where
+ * constant folding is not possible.
+ */
+static inline unsigned long msecs_to_jiffies(const unsigned int m)
+{
+ if (__builtin_constant_p(m)) {
+ if ((int)m < 0)
+ return MAX_JIFFY_OFFSET;
+ return _msecs_to_jiffies(m);
+ } else {
+ return __msecs_to_jiffies(m);
+ }
+}
+
+extern unsigned long __usecs_to_jiffies(const unsigned int u);
+#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
+static inline unsigned long _usecs_to_jiffies(const unsigned int u)
+{
+ return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
+}
+#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
+static inline unsigned long _usecs_to_jiffies(const unsigned int u)
+{
+ return u * (HZ / USEC_PER_SEC);
+}
+static inline unsigned long _usecs_to_jiffies(const unsigned int u)
+{
+#else
+static inline unsigned long _usecs_to_jiffies(const unsigned int u)
+{
+ return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
+ >> USEC_TO_HZ_SHR32;
+}
+#endif
+
+/**
+ * usecs_to_jiffies: - convert microseconds to jiffies
+ * @u: time in microseconds
+ *
+ * conversion is done as follows:
+ *
+ * - 'too large' values [that would result in larger than
+ * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
+ *
+ * - all other values are converted to jiffies by either multiplying
+ * the input value by a factor or dividing it with a factor and
+ * handling any 32-bit overflows as for msecs_to_jiffies.
+ *
+ * usecs_to_jiffies() checks for the passed in value being a constant
+ * via __builtin_constant_p() allowing gcc to eliminate most of the
+ * code, __usecs_to_jiffies() is called if the value passed does not
+ * allow constant folding and the actual conversion must be done at
+ * runtime.
+ * the HZ range specific helpers _usecs_to_jiffies() are called both
+ * directly here and from __msecs_to_jiffies() in the case where
+ * constant folding is not possible.
+ */
+static inline unsigned long usecs_to_jiffies(const unsigned int u)
+{
+ if (__builtin_constant_p(u)) {
+ if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+ return _usecs_to_jiffies(u);
+ } else {
+ return __usecs_to_jiffies(u);
+ }
+}
+
extern unsigned long timespec_to_jiffies(const struct timespec *value);
extern void jiffies_to_timespec(const unsigned long jiffies,
struct timespec *value);
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 98f923b6..f4de473 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -45,6 +45,12 @@
* same as using STATIC_KEY_INIT_FALSE.
*/
+#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
+# define HAVE_JUMP_LABEL
+#endif
+
+#ifndef __ASSEMBLY__
+
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/bug.h>
@@ -55,7 +61,7 @@ extern bool static_key_initialized;
"%s used before call to jump_label_init", \
__func__)
-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
+#ifdef HAVE_JUMP_LABEL
struct static_key {
atomic_t enabled;
@@ -66,13 +72,18 @@ struct static_key {
#endif
};
-# include <asm/jump_label.h>
-# define HAVE_JUMP_LABEL
#else
struct static_key {
atomic_t enabled;
};
-#endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */
+#endif /* HAVE_JUMP_LABEL */
+#endif /* __ASSEMBLY__ */
+
+#ifdef HAVE_JUMP_LABEL
+#include <asm/jump_label.h>
+#endif
+
+#ifndef __ASSEMBLY__
enum jump_label_type {
JUMP_LABEL_DISABLE = 0,
@@ -203,3 +214,5 @@ static inline bool static_key_enabled(struct static_key *key)
}
#endif /* _LINUX_JUMP_LABEL_H */
+
+#endif /* __ASSEMBLY__ */
diff --git a/include/linux/jz4780-nemc.h b/include/linux/jz4780-nemc.h
new file mode 100644
index 0000000..e7f1cc7
--- /dev/null
+++ b/include/linux/jz4780-nemc.h
@@ -0,0 +1,43 @@
+/*
+ * JZ4780 NAND/external memory controller (NEMC)
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ * Author: Alex Smith <alex@alex-smith.me.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_JZ4780_NEMC_H__
+#define __LINUX_JZ4780_NEMC_H__
+
+#include <linux/types.h>
+
+struct device;
+
+/*
+ * Number of NEMC banks. Note that there are actually 6, but they are numbered
+ * from 1.
+ */
+#define JZ4780_NEMC_NUM_BANKS 7
+
+/**
+ * enum jz4780_nemc_bank_type - device types which can be connected to a bank
+ * @JZ4780_NEMC_BANK_SRAM: SRAM
+ * @JZ4780_NEMC_BANK_NAND: NAND
+ */
+enum jz4780_nemc_bank_type {
+ JZ4780_NEMC_BANK_SRAM,
+ JZ4780_NEMC_BANK_NAND,
+};
+
+extern unsigned int jz4780_nemc_num_banks(struct device *dev);
+
+extern void jz4780_nemc_set_type(struct device *dev, unsigned int bank,
+ enum jz4780_nemc_bank_type type);
+extern void jz4780_nemc_assert(struct device *dev, unsigned int bank,
+ bool assert);
+
+#endif /* __LINUX_JZ4780_NEMC_H__ */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
new file mode 100644
index 0000000..5486d77
--- /dev/null
+++ b/include/linux/kasan.h
@@ -0,0 +1,88 @@
+#ifndef _LINUX_KASAN_H
+#define _LINUX_KASAN_H
+
+#include <linux/types.h>
+
+struct kmem_cache;
+struct page;
+struct vm_struct;
+
+#ifdef CONFIG_KASAN
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+
+#include <asm/kasan.h>
+#include <linux/sched.h>
+
+static inline void *kasan_mem_to_shadow(const void *addr)
+{
+ return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
+ + KASAN_SHADOW_OFFSET;
+}
+
+/* Enable reporting bugs after kasan_disable_current() */
+static inline void kasan_enable_current(void)
+{
+ current->kasan_depth++;
+}
+
+/* Disable reporting bugs for current task */
+static inline void kasan_disable_current(void)
+{
+ current->kasan_depth--;
+}
+
+void kasan_unpoison_shadow(const void *address, size_t size);
+
+void kasan_alloc_pages(struct page *page, unsigned int order);
+void kasan_free_pages(struct page *page, unsigned int order);
+
+void kasan_poison_slab(struct page *page);
+void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
+void kasan_poison_object_data(struct kmem_cache *cache, void *object);
+
+void kasan_kmalloc_large(const void *ptr, size_t size);
+void kasan_kfree_large(const void *ptr);
+void kasan_kfree(void *ptr);
+void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size);
+void kasan_krealloc(const void *object, size_t new_size);
+
+void kasan_slab_alloc(struct kmem_cache *s, void *object);
+void kasan_slab_free(struct kmem_cache *s, void *object);
+
+int kasan_module_alloc(void *addr, size_t size);
+void kasan_free_shadow(const struct vm_struct *vm);
+
+#else /* CONFIG_KASAN */
+
+static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
+
+static inline void kasan_enable_current(void) {}
+static inline void kasan_disable_current(void) {}
+
+static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
+static inline void kasan_free_pages(struct page *page, unsigned int order) {}
+
+static inline void kasan_poison_slab(struct page *page) {}
+static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+ void *object) {}
+static inline void kasan_poison_object_data(struct kmem_cache *cache,
+ void *object) {}
+
+static inline void kasan_kmalloc_large(void *ptr, size_t size) {}
+static inline void kasan_kfree_large(const void *ptr) {}
+static inline void kasan_kfree(void *ptr) {}
+static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
+ size_t size) {}
+static inline void kasan_krealloc(const void *object, size_t new_size) {}
+
+static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {}
+static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
+
+static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
+static inline void kasan_free_shadow(const struct vm_struct *vm) {}
+
+#endif /* CONFIG_KASAN */
+
+#endif /* LINUX_KASAN_H */
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index be342b9..b33c779 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -23,14 +23,6 @@
#define ___config_enabled(__ignored, val, ...) val
/*
- * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
- * 0 otherwise.
- *
- */
-#define IS_ENABLED(option) \
- (config_enabled(option) || config_enabled(option##_MODULE))
-
-/*
* IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0
* otherwise. For boolean options, this is equivalent to
* IS_ENABLED(CONFIG_FOO).
@@ -43,4 +35,20 @@
*/
#define IS_MODULE(option) config_enabled(option##_MODULE)
+/*
+ * IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled
+ * code can call a function defined in code compiled based on CONFIG_FOO.
+ * This is similar to IS_ENABLED(), but returns false when invoked from
+ * built-in code when CONFIG_FOO is set to 'm'.
+ */
+#define IS_REACHABLE(option) (config_enabled(option) || \
+ (config_enabled(option##_MODULE) && config_enabled(MODULE)))
+
+/*
+ * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
+ * 0 otherwise.
+ */
+#define IS_ENABLED(option) \
+ (IS_BUILTIN(option) || IS_MODULE(option))
+
#endif /* __LINUX_KCONFIG_H */
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 75ae2e2..a19bcf9 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -156,8 +156,14 @@ typedef enum {
KDB_REASON_SYSTEM_NMI, /* In NMI due to SYSTEM cmd; regs valid */
} kdb_reason_t;
+enum kdb_msgsrc {
+ KDB_MSGSRC_INTERNAL, /* direct call to kdb_printf() */
+ KDB_MSGSRC_PRINTK, /* trapped from printk() */
+};
+
extern int kdb_trap_printk;
-extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args);
+extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt,
+ va_list args);
extern __printf(1, 2) int kdb_printf(const char *, ...);
typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 5449d2f..5582410 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -103,6 +103,18 @@
(((__x) - ((__d) / 2)) / (__d)); \
} \
)
+/*
+ * Same as above but for u64 dividends. divisor must be a 32-bit
+ * number.
+ */
+#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
+{ \
+ typeof(divisor) __d = divisor; \
+ unsigned long long _tmp = (x) + (__d) / 2; \
+ do_div(_tmp, __d); \
+ _tmp; \
+} \
+)
/*
* Multiplies an integer by a fraction, while avoiding unnecessary
@@ -176,7 +188,7 @@ extern int _cond_resched(void);
*/
# define might_sleep() \
do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
-# define sched_annotate_sleep() __set_current_state(TASK_RUNNING)
+# define sched_annotate_sleep() (current->task_state_change = 0)
#else
static inline void ___might_sleep(const char *file, int line,
int preempt_offset) { }
@@ -232,7 +244,8 @@ static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
#if defined(CONFIG_MMU) && \
(defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
-void might_fault(void);
+#define might_fault() __might_fault(__FILE__, __LINE__)
+void __might_fault(const char *file, int line);
#else
static inline void might_fault(void) { }
#endif
@@ -398,7 +411,8 @@ extern __printf(3, 0)
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
extern __printf(2, 3)
char *kasprintf(gfp_t gfp, const char *fmt, ...);
-extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
+extern __printf(2, 0)
+char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
extern __scanf(2, 3)
int sscanf(const char *, const char *, ...);
@@ -426,6 +440,9 @@ extern int panic_on_unrecovered_nmi;
extern int panic_on_io_nmi;
extern int panic_on_warn;
extern int sysctl_panic_on_stackoverflow;
+
+extern bool crash_kexec_post_notifiers;
+
/*
* Only to be used by arch init code. If the user over-wrote the default
* CONFIG_PANIC_TIMEOUT, honor it.
@@ -471,6 +488,7 @@ extern enum system_states {
#define TAINT_OOT_MODULE 12
#define TAINT_UNSIGNED_MODULE 13
#define TAINT_SOFTLOCKUP 14
+#define TAINT_LIVEPATCH 15
extern const char hex_asc[];
#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
@@ -519,12 +537,6 @@ bool mac_pton(const char *s, u8 *mac);
*
* Most likely, you want to use tracing_on/tracing_off.
*/
-#ifdef CONFIG_RING_BUFFER
-/* trace_off_permanent stops recording with no way to bring it back */
-void tracing_off_permanent(void);
-#else
-static inline void tracing_off_permanent(void) { }
-#endif
enum ftrace_dump_mode {
DUMP_NONE,
@@ -668,10 +680,10 @@ do { \
__ftrace_vprintk(_THIS_IP_, fmt, vargs); \
} while (0)
-extern int
+extern __printf(2, 0) int
__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
-extern int
+extern __printf(2, 0) int
__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
@@ -691,7 +703,7 @@ int trace_printk(const char *fmt, ...)
{
return 0;
}
-static inline int
+static __printf(1, 0) inline int
ftrace_vprintk(const char *fmt, va_list ap)
{
return 0;
@@ -799,22 +811,21 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - offsetof(type,member) );})
-/* Trap pasters of __FUNCTION__ at compile-time */
-#define __FUNCTION__ (__func__)
-
/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
#endif
/* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */
-#define VERIFY_OCTAL_PERMISSIONS(perms) \
- (BUILD_BUG_ON_ZERO((perms) < 0) + \
- BUILD_BUG_ON_ZERO((perms) > 0777) + \
- /* User perms >= group perms >= other perms */ \
- BUILD_BUG_ON_ZERO(((perms) >> 6) < (((perms) >> 3) & 7)) + \
- BUILD_BUG_ON_ZERO((((perms) >> 3) & 7) < ((perms) & 7)) + \
- /* Other writable? Generally considered a bad idea. */ \
- BUILD_BUG_ON_ZERO((perms) & 2) + \
+#define VERIFY_OCTAL_PERMISSIONS(perms) \
+ (BUILD_BUG_ON_ZERO((perms) < 0) + \
+ BUILD_BUG_ON_ZERO((perms) > 0777) + \
+ /* USER_READABLE >= GROUP_READABLE >= OTHER_READABLE */ \
+ BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \
+ BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \
+ /* USER_WRITABLE >= GROUP_WRITABLE */ \
+ BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \
+ /* OTHER_WRITABLE? Generally considered a bad idea. */ \
+ BUILD_BUG_ON_ZERO((perms) & 2) + \
(perms))
#endif
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index d4e01b3..123be25 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -43,9 +43,9 @@ enum kernfs_node_flag {
KERNFS_HAS_SEQ_SHOW = 0x0040,
KERNFS_HAS_MMAP = 0x0080,
KERNFS_LOCKDEP = 0x0100,
- KERNFS_STATIC_NAME = 0x0200,
KERNFS_SUICIDAL = 0x0400,
KERNFS_SUICIDED = 0x0800,
+ KERNFS_EMPTY_DIR = 0x1000,
};
/* @flags for kernfs_create_root() */
@@ -278,6 +278,7 @@ void kernfs_put(struct kernfs_node *kn);
struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry);
struct kernfs_root *kernfs_root_from_sb(struct super_block *sb);
+struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn);
struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
unsigned int flags, void *priv);
@@ -286,12 +287,13 @@ void kernfs_destroy_root(struct kernfs_root *root);
struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
const char *name, umode_t mode,
void *priv, const void *ns);
+struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
+ const char *name);
struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
const char *name,
umode_t mode, loff_t size,
const struct kernfs_ops *ops,
void *priv, const void *ns,
- bool name_is_static,
struct lock_class_key *key);
struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
const char *name,
@@ -354,6 +356,10 @@ static inline struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry)
static inline struct kernfs_root *kernfs_root_from_sb(struct super_block *sb)
{ return NULL; }
+static inline struct inode *
+kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn)
+{ return NULL; }
+
static inline struct kernfs_root *
kernfs_create_root(struct kernfs_syscall_ops *scops, unsigned int flags,
void *priv)
@@ -369,8 +375,7 @@ kernfs_create_dir_ns(struct kernfs_node *parent, const char *name,
static inline struct kernfs_node *
__kernfs_create_file(struct kernfs_node *parent, const char *name,
umode_t mode, loff_t size, const struct kernfs_ops *ops,
- void *priv, const void *ns, bool name_is_static,
- struct lock_class_key *key)
+ void *priv, const void *ns, struct lock_class_key *key)
{ return ERR_PTR(-ENOSYS); }
static inline struct kernfs_node *
@@ -439,7 +444,7 @@ kernfs_create_file_ns(struct kernfs_node *parent, const char *name,
key = (struct lock_class_key *)&ops->lockdep_key;
#endif
return __kernfs_create_file(parent, name, mode, size, ops, priv, ns,
- false, key);
+ key);
}
static inline struct kernfs_node *
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 9d957b7..e804306 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -1,6 +1,19 @@
#ifndef LINUX_KEXEC_H
#define LINUX_KEXEC_H
+#define IND_DESTINATION_BIT 0
+#define IND_INDIRECTION_BIT 1
+#define IND_DONE_BIT 2
+#define IND_SOURCE_BIT 3
+
+#define IND_DESTINATION (1 << IND_DESTINATION_BIT)
+#define IND_INDIRECTION (1 << IND_INDIRECTION_BIT)
+#define IND_DONE (1 << IND_DONE_BIT)
+#define IND_SOURCE (1 << IND_SOURCE_BIT)
+#define IND_FLAGS (IND_DESTINATION | IND_INDIRECTION | IND_DONE | IND_SOURCE)
+
+#if !defined(__ASSEMBLY__)
+
#include <uapi/linux/kexec.h>
#ifdef CONFIG_KEXEC
@@ -27,6 +40,10 @@
#error KEXEC_CONTROL_MEMORY_LIMIT not defined
#endif
+#ifndef KEXEC_CONTROL_MEMORY_GFP
+#define KEXEC_CONTROL_MEMORY_GFP GFP_KERNEL
+#endif
+
#ifndef KEXEC_CONTROL_PAGE_SIZE
#error KEXEC_CONTROL_PAGE_SIZE not defined
#endif
@@ -64,10 +81,6 @@
*/
typedef unsigned long kimage_entry_t;
-#define IND_DESTINATION 0x1
-#define IND_INDIRECTION 0x2
-#define IND_DONE 0x4
-#define IND_SOURCE 0x8
struct kexec_segment {
/*
@@ -122,8 +135,6 @@ struct kimage {
kimage_entry_t *entry;
kimage_entry_t *last_entry;
- unsigned long destination;
-
unsigned long start;
struct page *control_code_page;
struct page *swap_page;
@@ -313,4 +324,7 @@ struct task_struct;
static inline void crash_kexec(struct pt_regs *regs) { }
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
#endif /* CONFIG_KEXEC */
+
+#endif /* !defined(__ASSEBMLY__) */
+
#endif /* LINUX_KEXEC_H */
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index e705467..d0a1f99 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -28,7 +28,8 @@
extern void kmemleak_init(void) __ref;
extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
gfp_t gfp) __ref;
-extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
+extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
+ gfp_t gfp) __ref;
extern void kmemleak_free(const void *ptr) __ref;
extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
extern void kmemleak_free_percpu(const void __percpu *ptr) __ref;
@@ -71,7 +72,8 @@ static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
gfp_t gfp)
{
}
-static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
+static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
+ gfp_t gfp)
{
}
static inline void kmemleak_free(const void *ptr)
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 2d61b90..637f670 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -80,8 +80,9 @@ struct kobject {
extern __printf(2, 3)
int kobject_set_name(struct kobject *kobj, const char *name, ...);
-extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
- va_list vargs);
+extern __printf(2, 0)
+int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
+ va_list vargs);
static inline const char *kobject_name(const struct kobject *kobj)
{
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 5297f9f..1ab5475 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -308,7 +308,8 @@ struct optimized_kprobe {
/* Architecture dependent functions for direct jump optimization */
extern int arch_prepared_optinsn(struct arch_optimized_insn *optinsn);
extern int arch_check_optimized_kprobe(struct optimized_kprobe *op);
-extern int arch_prepare_optimized_kprobe(struct optimized_kprobe *op);
+extern int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
+ struct kprobe *orig);
extern void arch_remove_optimized_kprobe(struct optimized_kprobe *op);
extern void arch_optimize_kprobes(struct list_head *oplist);
extern void arch_unoptimize_kprobes(struct list_head *oplist,
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 3be6bb1..7ae216a 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -35,18 +35,6 @@ static inline void ksm_exit(struct mm_struct *mm)
__ksm_exit(mm);
}
-/*
- * A KSM page is one of those write-protected "shared pages" or "merged pages"
- * which KSM maps into multiple mms, wherever identical anonymous page content
- * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
- * anon_vma, but to that page's node of the stable tree.
- */
-static inline int PageKsm(struct page *page)
-{
- return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
- (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
-}
-
static inline struct stable_node *page_stable_node(struct page *page)
{
return PageKsm(page) ? page_rmapping(page) : NULL;
@@ -87,11 +75,6 @@ static inline void ksm_exit(struct mm_struct *mm)
{
}
-static inline int PageKsm(struct page *page)
-{
- return 0;
-}
-
#ifdef CONFIG_MMU
static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags)
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index c9d645a..2b6a204 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -166,9 +166,34 @@ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
}
#if BITS_PER_LONG < 64
-extern u64 ktime_divns(const ktime_t kt, s64 div);
+extern s64 __ktime_divns(const ktime_t kt, s64 div);
+static inline s64 ktime_divns(const ktime_t kt, s64 div)
+{
+ /*
+ * Negative divisors could cause an inf loop,
+ * so bug out here.
+ */
+ BUG_ON(div < 0);
+ if (__builtin_constant_p(div) && !(div >> 32)) {
+ s64 ns = kt.tv64;
+ u64 tmp = ns < 0 ? -ns : ns;
+
+ do_div(tmp, div);
+ return ns < 0 ? -tmp : tmp;
+ } else {
+ return __ktime_divns(kt, div);
+ }
+}
#else /* BITS_PER_LONG < 64 */
-# define ktime_divns(kt, div) (u64)((kt).tv64 / (div))
+static inline s64 ktime_divns(const ktime_t kt, s64 div)
+{
+ /*
+ * 32-bit implementation cannot handle negative divisors,
+ * so catch them on 64bit as well.
+ */
+ WARN_ON(div < 0);
+ return kt.tv64 / div;
+}
#endif
static inline s64 ktime_to_us(const ktime_t kt)
@@ -186,6 +211,11 @@ static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
return ktime_to_us(ktime_sub(later, earlier));
}
+static inline s64 ktime_ms_delta(const ktime_t later, const ktime_t earlier)
+{
+ return ktime_to_ms(ktime_sub(later, earlier));
+}
+
static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec)
{
return ktime_add_ns(kt, usec * NSEC_PER_USEC);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 26f1060..05e99b8 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -33,10 +33,6 @@
#include <asm/kvm_host.h>
-#ifndef KVM_MMIO_SIZE
-#define KVM_MMIO_SIZE 8
-#endif
-
/*
* The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
* in kvm, other bits are visible for userspace which are defined in
@@ -48,6 +44,10 @@
/* Two fragments for cross MMIO pages. */
#define KVM_MAX_MMIO_FRAGMENTS 2
+#ifndef KVM_ADDRESS_SPACE_NUM
+#define KVM_ADDRESS_SPACE_NUM 1
+#endif
+
/*
* For the normal pfn, the highest 12 bits should be zero,
* so we can mask bit 62 ~ bit 52 to indicate the error pfn,
@@ -138,6 +138,7 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_ENABLE_IBS 23
#define KVM_REQ_DISABLE_IBS 24
#define KVM_REQ_APIC_PAGE_RELOAD 25
+#define KVM_REQ_SMI 26
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -169,12 +170,12 @@ enum kvm_bus {
KVM_NR_BUSES
};
-int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
int len, const void *val);
-int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
- int len, const void *val, long cookie);
-int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
- void *val);
+int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
+ gpa_t addr, int len, const void *val, long cookie);
+int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
+ int len, void *val);
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, struct kvm_io_device *dev);
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
@@ -200,17 +201,6 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
-/*
- * Carry out a gup that requires IO. Allow the mm to relinquish the mmap
- * semaphore if the filemap/swap has to wait on a page lock. pagep == NULL
- * controls whether we retry the gup one more time to completion in that case.
- * Typically this is called after a FAULT_FLAG_RETRY_NOWAIT in the main tdp
- * handler.
- */
-int kvm_get_user_page_io(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long addr, bool write_fault,
- struct page **pagep);
-
enum {
OUTSIDE_GUEST_MODE,
IN_GUEST_MODE,
@@ -245,6 +235,7 @@ struct kvm_vcpu {
int fpu_active;
int guest_fpu_loaded, guest_xcr0_loaded;
+ unsigned char fpu_counter;
wait_queue_head_t wq;
struct pid *pid;
int sigset_active;
@@ -344,6 +335,13 @@ struct kvm_kernel_irq_routing_entry {
#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
#endif
+#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
+static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+#endif
+
/*
* Note:
* memslots are not sorted by id anymore, please use id_to_memslot()
@@ -362,7 +360,7 @@ struct kvm {
spinlock_t mmu_lock;
struct mutex slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
- struct kvm_memslots *memslots;
+ struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
struct srcu_struct srcu;
struct srcu_struct irq_srcu;
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
@@ -477,13 +475,25 @@ void kvm_exit(void);
void kvm_get_kvm(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm);
-static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
+static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
{
- return rcu_dereference_check(kvm->memslots,
+ return rcu_dereference_check(kvm->memslots[as_id],
srcu_read_lock_held(&kvm->srcu)
|| lockdep_is_held(&kvm->slots_lock));
}
+static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
+{
+ return __kvm_memslots(kvm, 0);
+}
+
+static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
+{
+ int as_id = kvm_arch_vcpu_memslots_id(vcpu);
+
+ return __kvm_memslots(vcpu->kvm, as_id);
+}
+
static inline struct kvm_memory_slot *
id_to_memslot(struct kvm_memslots *slots, int id)
{
@@ -515,21 +525,22 @@ enum kvm_mr_change {
};
int kvm_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem);
+ const struct kvm_userspace_memory_region *mem);
int __kvm_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem);
+ const struct kvm_userspace_memory_region *mem);
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont);
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned long npages);
-void kvm_arch_memslots_updated(struct kvm *kvm);
+void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
- struct kvm_userspace_memory_region *mem,
+ const struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change);
void kvm_arch_commit_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
+ const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
enum kvm_mr_change change);
bool kvm_largepages_enabled(void);
void kvm_disable_largepages(void);
@@ -539,8 +550,8 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm);
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot);
-int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
- int nr_pages);
+int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
+ struct page **pages, int nr_pages);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
@@ -553,13 +564,13 @@ void kvm_release_page_dirty(struct page *page);
void kvm_set_page_accessed(struct page *page);
pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
-pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
- bool write_fault, bool *writable);
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
bool *writable);
pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
+pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
+ bool *async, bool write_fault, bool *writable);
void kvm_release_pfn_clean(pfn_t pfn);
void kvm_set_pfn_dirty(pfn_t pfn);
@@ -588,6 +599,25 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
+struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
+struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
+pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
+pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
+struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
+unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
+unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
+int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
+ int len);
+int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
+ unsigned long len);
+int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
+ unsigned long len);
+int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
+ int offset, int len);
+int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
+ unsigned long len);
+void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
+
void kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
@@ -611,6 +641,15 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
int kvm_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log, int *is_dirty);
+
+int kvm_get_dirty_log_protect(struct kvm *kvm,
+ struct kvm_dirty_log *log, bool *is_dirty);
+
+void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset,
+ unsigned long mask);
+
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log);
@@ -652,7 +691,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
-int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
int kvm_arch_hardware_enable(void);
@@ -664,7 +703,6 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
void *kvm_kvzalloc(unsigned long size);
-void kvm_kvfree(const void *addr);
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
@@ -696,6 +734,24 @@ static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
return false;
}
#endif
+#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
+void kvm_arch_start_assignment(struct kvm *kvm);
+void kvm_arch_end_assignment(struct kvm *kvm);
+bool kvm_arch_has_assigned_device(struct kvm *kvm);
+#else
+static inline void kvm_arch_start_assignment(struct kvm *kvm)
+{
+}
+
+static inline void kvm_arch_end_assignment(struct kvm *kvm)
+{
+}
+
+static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
+{
+ return false;
+}
+#endif
static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
{
@@ -706,6 +762,20 @@ static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
#endif
}
+#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
+/*
+ * returns true if the virtual interrupt controller is initialized and
+ * ready to accept virtual IRQ. On some architectures the virtual interrupt
+ * controller is dynamically instantiated and this is not always true.
+ */
+bool kvm_arch_intc_initialized(struct kvm *kvm);
+#else
+static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
+{
+ return true;
+}
+#endif
+
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
void kvm_arch_destroy_vm(struct kvm *kvm);
void kvm_arch_sync_events(struct kvm *kvm);
@@ -755,16 +825,10 @@ static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
}
#endif
-static inline void kvm_guest_enter(void)
+/* must be called with irqs disabled */
+static inline void __kvm_guest_enter(void)
{
- unsigned long flags;
-
- BUG_ON(preemptible());
-
- local_irq_save(flags);
guest_enter();
- local_irq_restore(flags);
-
/* KVM does not hold any references to rcu protected data when it
* switches CPU into a guest mode. In fact switching to a guest mode
* is very similar to exiting to userspace from rcu point of view. In
@@ -772,7 +836,23 @@ static inline void kvm_guest_enter(void)
* one time slice). Lets treat guest mode as quiescent state, just like
* we do with user-mode execution.
*/
- rcu_virt_note_context_switch(smp_processor_id());
+ if (!context_tracking_cpu_is_enabled())
+ rcu_virt_note_context_switch(smp_processor_id());
+}
+
+/* must be called with irqs disabled */
+static inline void __kvm_guest_exit(void)
+{
+ guest_exit();
+}
+
+static inline void kvm_guest_enter(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __kvm_guest_enter();
+ local_irq_restore(flags);
}
static inline void kvm_guest_exit(void)
@@ -780,7 +860,7 @@ static inline void kvm_guest_exit(void)
unsigned long flags;
local_irq_save(flags);
- guest_exit();
+ __kvm_guest_exit();
local_irq_restore(flags);
}
@@ -975,11 +1055,16 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
#endif /* CONFIG_HAVE_KVM_EVENTFD */
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
-static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
+static inline bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
{
return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
}
+static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
+{
+ return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
+}
+
bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
#else
@@ -1042,6 +1127,8 @@ void kvm_unregister_device_ops(u32 type);
extern struct kvm_device_ops kvm_mpic_ops;
extern struct kvm_device_ops kvm_xics_ops;
+extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
+extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 931da7e..1b47a18 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -28,6 +28,7 @@ struct kvm_run;
struct kvm_userspace_memory_region;
struct kvm_vcpu;
struct kvm_vcpu_init;
+struct kvm_memslots;
enum kvm_mr_change;
diff --git a/include/linux/lcm.h b/include/linux/lcm.h
index 7bf01d7..1ce79a7 100644
--- a/include/linux/lcm.h
+++ b/include/linux/lcm.h
@@ -4,5 +4,6 @@
#include <linux/compiler.h>
unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__;
+unsigned long lcm_not_zero(unsigned long a, unsigned long b) __attribute_const__;
#endif /* _LCM_H */
diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h
new file mode 100644
index 0000000..e97966d
--- /dev/null
+++ b/include/linux/led-class-flash.h
@@ -0,0 +1,192 @@
+/*
+ * LED Flash class interface
+ *
+ * Copyright (C) 2015 Samsung Electronics Co., Ltd.
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#ifndef __LINUX_FLASH_LEDS_H_INCLUDED
+#define __LINUX_FLASH_LEDS_H_INCLUDED
+
+#include <linux/leds.h>
+
+struct device_node;
+struct led_classdev_flash;
+
+/*
+ * Supported led fault bits - must be kept in synch
+ * with V4L2_FLASH_FAULT bits.
+ */
+#define LED_FAULT_OVER_VOLTAGE (1 << 0)
+#define LED_FAULT_TIMEOUT (1 << 1)
+#define LED_FAULT_OVER_TEMPERATURE (1 << 2)
+#define LED_FAULT_SHORT_CIRCUIT (1 << 3)
+#define LED_FAULT_OVER_CURRENT (1 << 4)
+#define LED_FAULT_INDICATOR (1 << 5)
+#define LED_FAULT_UNDER_VOLTAGE (1 << 6)
+#define LED_FAULT_INPUT_VOLTAGE (1 << 7)
+#define LED_FAULT_LED_OVER_TEMPERATURE (1 << 8)
+#define LED_NUM_FLASH_FAULTS 9
+
+#define LED_FLASH_SYSFS_GROUPS_SIZE 5
+
+struct led_flash_ops {
+ /* set flash brightness */
+ int (*flash_brightness_set)(struct led_classdev_flash *fled_cdev,
+ u32 brightness);
+ /* get flash brightness */
+ int (*flash_brightness_get)(struct led_classdev_flash *fled_cdev,
+ u32 *brightness);
+ /* set flash strobe state */
+ int (*strobe_set)(struct led_classdev_flash *fled_cdev, bool state);
+ /* get flash strobe state */
+ int (*strobe_get)(struct led_classdev_flash *fled_cdev, bool *state);
+ /* set flash timeout */
+ int (*timeout_set)(struct led_classdev_flash *fled_cdev, u32 timeout);
+ /* get the flash LED fault */
+ int (*fault_get)(struct led_classdev_flash *fled_cdev, u32 *fault);
+};
+
+/*
+ * Current value of a flash setting along
+ * with its constraints.
+ */
+struct led_flash_setting {
+ /* maximum allowed value */
+ u32 min;
+ /* maximum allowed value */
+ u32 max;
+ /* step value */
+ u32 step;
+ /* current value */
+ u32 val;
+};
+
+struct led_classdev_flash {
+ /* led class device */
+ struct led_classdev led_cdev;
+
+ /* flash led specific ops */
+ const struct led_flash_ops *ops;
+
+ /* flash brightness value in microamperes along with its constraints */
+ struct led_flash_setting brightness;
+
+ /* flash timeout value in microseconds along with its constraints */
+ struct led_flash_setting timeout;
+
+ /* LED Flash class sysfs groups */
+ const struct attribute_group *sysfs_groups[LED_FLASH_SYSFS_GROUPS_SIZE];
+};
+
+static inline struct led_classdev_flash *lcdev_to_flcdev(
+ struct led_classdev *lcdev)
+{
+ return container_of(lcdev, struct led_classdev_flash, led_cdev);
+}
+
+/**
+ * led_classdev_flash_register - register a new object of led_classdev class
+ * with support for flash LEDs
+ * @parent: the flash LED to register
+ * @fled_cdev: the led_classdev_flash structure for this device
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+extern int led_classdev_flash_register(struct device *parent,
+ struct led_classdev_flash *fled_cdev);
+
+/**
+ * led_classdev_flash_unregister - unregisters an object of led_classdev class
+ * with support for flash LEDs
+ * @fled_cdev: the flash LED to unregister
+ *
+ * Unregister a previously registered via led_classdev_flash_register object
+ */
+extern void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev);
+
+/**
+ * led_set_flash_strobe - setup flash strobe
+ * @fled_cdev: the flash LED to set strobe on
+ * @state: 1 - strobe flash, 0 - stop flash strobe
+ *
+ * Strobe the flash LED.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+static inline int led_set_flash_strobe(struct led_classdev_flash *fled_cdev,
+ bool state)
+{
+ return fled_cdev->ops->strobe_set(fled_cdev, state);
+}
+
+/**
+ * led_get_flash_strobe - get flash strobe status
+ * @fled_cdev: the flash LED to query
+ * @state: 1 - flash is strobing, 0 - flash is off
+ *
+ * Check whether the flash is strobing at the moment.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+static inline int led_get_flash_strobe(struct led_classdev_flash *fled_cdev,
+ bool *state)
+{
+ if (fled_cdev->ops->strobe_get)
+ return fled_cdev->ops->strobe_get(fled_cdev, state);
+
+ return -EINVAL;
+}
+
+/**
+ * led_set_flash_brightness - set flash LED brightness
+ * @fled_cdev: the flash LED to set
+ * @brightness: the brightness to set it to
+ *
+ * Set a flash LED's brightness.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+extern int led_set_flash_brightness(struct led_classdev_flash *fled_cdev,
+ u32 brightness);
+
+/**
+ * led_update_flash_brightness - update flash LED brightness
+ * @fled_cdev: the flash LED to query
+ *
+ * Get a flash LED's current brightness and update led_flash->brightness
+ * member with the obtained value.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+extern int led_update_flash_brightness(struct led_classdev_flash *fled_cdev);
+
+/**
+ * led_set_flash_timeout - set flash LED timeout
+ * @fled_cdev: the flash LED to set
+ * @timeout: the flash timeout to set it to
+ *
+ * Set the flash strobe duration.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+extern int led_set_flash_timeout(struct led_classdev_flash *fled_cdev,
+ u32 timeout);
+
+/**
+ * led_get_flash_fault - get the flash LED fault
+ * @fled_cdev: the flash LED to query
+ * @fault: bitmask containing flash faults
+ *
+ * Get the flash LED fault.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+extern int led_get_flash_fault(struct led_classdev_flash *fled_cdev,
+ u32 *fault);
+
+#endif /* __LINUX_FLASH_LEDS_H_INCLUDED */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index cfceef3..b122eea 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -12,6 +12,7 @@
#ifndef __LINUX_LEDS_H_INCLUDED
#define __LINUX_LEDS_H_INCLUDED
+#include <linux/device.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
@@ -46,6 +47,7 @@ struct led_classdev {
#define LED_SYSFS_DISABLE (1 << 20)
#define SET_BRIGHTNESS_ASYNC (1 << 21)
#define SET_BRIGHTNESS_SYNC (1 << 22)
+#define LED_DEV_CAP_FLASH (1 << 23)
/* Set LED brightness level */
/* Must not sleep, use a workqueue if needed */
@@ -81,6 +83,7 @@ struct led_classdev {
unsigned long blink_delay_on, blink_delay_off;
struct timer_list blink_timer;
int blink_brightness;
+ void (*flash_resume)(struct led_classdev *led_cdev);
struct work_struct set_brightness_work;
int delayed_set_value;
@@ -102,7 +105,11 @@ struct led_classdev {
extern int led_classdev_register(struct device *parent,
struct led_classdev *led_cdev);
+extern int devm_led_classdev_register(struct device *parent,
+ struct led_classdev *led_cdev);
extern void led_classdev_unregister(struct led_classdev *led_cdev);
+extern void devm_led_classdev_unregister(struct device *parent,
+ struct led_classdev *led_cdev);
extern void led_classdev_suspend(struct led_classdev *led_cdev);
extern void led_classdev_resume(struct led_classdev *led_cdev);
@@ -216,6 +223,11 @@ struct led_trigger {
struct list_head next_trig;
};
+ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count);
+ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
+ char *buf);
+
/* Registration functions for complex triggers */
extern int led_trigger_register(struct led_trigger *trigger);
extern void led_trigger_unregister(struct led_trigger *trigger);
@@ -232,6 +244,16 @@ extern void led_trigger_blink_oneshot(struct led_trigger *trigger,
unsigned long *delay_on,
unsigned long *delay_off,
int invert);
+extern void led_trigger_set_default(struct led_classdev *led_cdev);
+extern void led_trigger_set(struct led_classdev *led_cdev,
+ struct led_trigger *trigger);
+extern void led_trigger_remove(struct led_classdev *led_cdev);
+
+static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
+{
+ return led_cdev->trigger_data;
+}
+
/**
* led_trigger_rename_static - rename a trigger
* @name: the new trigger name
@@ -261,6 +283,15 @@ static inline void led_trigger_register_simple(const char *name,
static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {}
static inline void led_trigger_event(struct led_trigger *trigger,
enum led_brightness event) {}
+static inline void led_trigger_set_default(struct led_classdev *led_cdev) {}
+static inline void led_trigger_set(struct led_classdev *led_cdev,
+ struct led_trigger *trigger) {}
+static inline void led_trigger_remove(struct led_classdev *led_cdev) {}
+static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
+{
+ return NULL;
+}
+
#endif /* CONFIG_LEDS_TRIGGERS */
/* Trigger specific functions */
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index 0081f00..c92ebd1 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -52,10 +52,15 @@ struct lglock {
static struct lglock name = { .lock = &name ## _lock }
void lg_lock_init(struct lglock *lg, char *name);
+
void lg_local_lock(struct lglock *lg);
void lg_local_unlock(struct lglock *lg);
void lg_local_lock_cpu(struct lglock *lg, int cpu);
void lg_local_unlock_cpu(struct lglock *lg, int cpu);
+
+void lg_double_lock(struct lglock *lg, int cpu1, int cpu2);
+void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2);
+
void lg_global_lock(struct lglock *lg);
void lg_global_unlock(struct lglock *lg);
diff --git a/include/linux/lguest.h b/include/linux/lguest.h
index 9962c6b..6db19f3 100644
--- a/include/linux/lguest.h
+++ b/include/linux/lguest.h
@@ -61,8 +61,8 @@ struct lguest_data {
u32 tsc_khz;
/* Fields initialized by the Guest at boot: */
- /* Instruction range to suppress interrupts even if enabled */
- unsigned long noirq_start, noirq_end;
+ /* Instruction to suppress interrupts even if enabled */
+ unsigned long noirq_iret;
/* Address above which page tables are all identical. */
unsigned long kernel_address;
/* The vector to try to use for system calls (0x40 or 0x80). */
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h
index 495203f..acd5b12 100644
--- a/include/linux/lguest_launcher.h
+++ b/include/linux/lguest_launcher.h
@@ -8,52 +8,13 @@
*
* The Guest needs devices to do anything useful. Since we don't let it touch
* real devices (think of the damage it could do!) we provide virtual devices.
- * We could emulate a PCI bus with various devices on it, but that is a fairly
- * complex burden for the Host and suboptimal for the Guest, so we have our own
- * simple lguest bus and we use "virtio" drivers. These drivers need a set of
- * routines from us which will actually do the virtual I/O, but they handle all
- * the net/block/console stuff themselves. This means that if we want to add
- * a new device, we simply need to write a new virtio driver and create support
- * for it in the Launcher: this code won't need to change.
+ * We emulate a PCI bus with virtio devices on it; we used to have our own
+ * lguest bus which was far simpler, but this tests the virtio 1.0 standard.
*
* Virtio devices are also used by kvm, so we can simply reuse their optimized
* device drivers. And one day when everyone uses virtio, my plan will be
* complete. Bwahahahah!
- *
- * Devices are described by a simplified ID, a status byte, and some "config"
- * bytes which describe this device's configuration. This is placed by the
- * Launcher just above the top of physical memory:
- */
-struct lguest_device_desc {
- /* The device type: console, network, disk etc. Type 0 terminates. */
- __u8 type;
- /* The number of virtqueues (first in config array) */
- __u8 num_vq;
- /*
- * The number of bytes of feature bits. Multiply by 2: one for host
- * features and one for Guest acknowledgements.
- */
- __u8 feature_len;
- /* The number of bytes of the config array after virtqueues. */
- __u8 config_len;
- /* A status byte, written by the Guest. */
- __u8 status;
- __u8 config[0];
-};
-
-/*D:135
- * This is how we expect the device configuration field for a virtqueue
- * to be laid out in config space.
*/
-struct lguest_vqconfig {
- /* The number of entries in the virtio_ring */
- __u16 num;
- /* The interrupt we get when something happens. */
- __u16 irq;
- /* The page number of the virtio ring for this device. */
- __u32 pfn;
-};
-/*:*/
/* Write command first word is a request. */
enum lguest_req
@@ -62,12 +23,22 @@ enum lguest_req
LHREQ_GETDMA, /* No longer used */
LHREQ_IRQ, /* + irq */
LHREQ_BREAK, /* No longer used */
- LHREQ_EVENTFD, /* + address, fd. */
+ LHREQ_EVENTFD, /* No longer used. */
+ LHREQ_GETREG, /* + offset within struct pt_regs (then read value). */
+ LHREQ_SETREG, /* + offset within struct pt_regs, value. */
+ LHREQ_TRAP, /* + trap number to deliver to guest. */
};
/*
- * The alignment to use between consumer and producer parts of vring.
- * x86 pagesize for historical reasons.
+ * This is what read() of the lguest fd populates. trap ==
+ * LGUEST_TRAP_ENTRY for an LHCALL_NOTIFY (addr is the
+ * argument), 14 for a page fault in the MMIO region (addr is
+ * the trap address, insn is the instruction), or 13 for a GPF
+ * (insn is the instruction).
*/
-#define LGUEST_VRING_ALIGN 4096
+struct lguest_pending {
+ __u8 trap;
+ __u8 insn[7];
+ __u32 addr;
+};
#endif /* _LINUX_LGUEST_LAUNCHER */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 91f705d..36ce37b 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -134,7 +134,6 @@ enum {
ATA_ALL_DEVICES = (1 << ATA_MAX_DEVICES) - 1,
ATA_SHT_EMULATED = 1,
- ATA_SHT_CMD_PER_LUN = 1,
ATA_SHT_THIS_ID = -1,
ATA_SHT_USE_CLUSTERING = 1,
@@ -205,6 +204,7 @@ enum {
ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */
ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */
ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */
+ ATA_LFLAG_CHANGED = (1 << 10), /* LPM state changed on this link */
/* struct ata_port flags */
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
@@ -231,7 +231,7 @@ enum {
ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity
* led */
ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */
- ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */
+ ATA_FLAG_SAS_HOST = (1 << 24), /* SAS host */
/* bits 24:31 of ap->flags are reserved for LLD specific flags */
@@ -309,6 +309,12 @@ enum {
*/
ATA_TMOUT_PMP_SRST_WAIT = 5000,
+ /* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
+ * be a spurious PHY event, so ignore the first PHY event that
+ * occurs within 10s after the policy change.
+ */
+ ATA_TMOUT_SPURIOUS_PHY = 10000,
+
/* ATA bus states */
BUS_UNKNOWN = 0,
BUS_DMA = 1,
@@ -424,6 +430,7 @@ enum {
ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */
ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
+ ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
@@ -788,6 +795,8 @@ struct ata_link {
struct ata_eh_context eh_context;
struct ata_device device[ATA_MAX_DEVICES];
+
+ unsigned long last_lpm_change; /* when last LPM change happened */
};
#define ATA_LINK_CLEAR_BEGIN offsetof(struct ata_link, active_tag)
#define ATA_LINK_CLEAR_END offsetof(struct ata_link, device[0])
@@ -823,10 +832,10 @@ struct ata_port {
unsigned int cbl; /* cable type; ATA_CBL_xxx */
struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
- unsigned long qc_allocated;
+ unsigned long sas_tag_allocated; /* for sas tag allocation only */
unsigned int qc_active;
int nr_active_links; /* #links with active qcs */
- unsigned int last_tag; /* track next tag hw expects */
+ unsigned int sas_last_tag; /* track next tag hw expects */
struct ata_link link; /* host default link */
struct ata_link *slave_link; /* see ata_slave_link_init() */
@@ -1201,6 +1210,7 @@ extern struct ata_device *ata_dev_pair(struct ata_device *adev);
extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q);
+extern bool sata_lpm_ignore_phy_events(struct ata_link *link);
extern int ata_cable_40wire(struct ata_port *ap);
extern int ata_cable_80wire(struct ata_port *ap);
@@ -1340,14 +1350,20 @@ extern const struct ata_port_operations ata_base_port_ops;
extern const struct ata_port_operations sata_port_ops;
extern struct device_attribute *ata_common_sdev_attrs[];
+/*
+ * All sht initializers (BASE, PIO, BMDMA, NCQ) must be instantiated
+ * by the edge drivers. Because the 'module' field of sht must be the
+ * edge driver's module reference, otherwise the driver can be unloaded
+ * even if the scsi_device is being accessed.
+ */
#define ATA_BASE_SHT(drv_name) \
.module = THIS_MODULE, \
.name = drv_name, \
.ioctl = ata_scsi_ioctl, \
.queuecommand = ata_scsi_queuecmd, \
.can_queue = ATA_DEF_QUEUE, \
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
.this_id = ATA_SHT_THIS_ID, \
- .cmd_per_lun = ATA_SHT_CMD_PER_LUN, \
.emulated = ATA_SHT_EMULATED, \
.use_clustering = ATA_SHT_USE_CLUSTERING, \
.proc_name = drv_name, \
diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h
index 01508c7..2a663c6 100644
--- a/include/linux/libfdt_env.h
+++ b/include/linux/libfdt_env.h
@@ -5,6 +5,10 @@
#include <asm/byteorder.h>
+typedef __be16 fdt16_t;
+typedef __be32 fdt32_t;
+typedef __be64 fdt64_t;
+
#define fdt32_to_cpu(x) be32_to_cpu(x)
#define cpu_to_fdt32(x) cpu_to_be32(x)
#define fdt64_to_cpu(x) be64_to_cpu(x)
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
new file mode 100644
index 0000000..75e3af0
--- /dev/null
+++ b/include/linux/libnvdimm.h
@@ -0,0 +1,151 @@
+/*
+ * libnvdimm - Non-volatile-memory Devices Subsystem
+ *
+ * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef __LIBNVDIMM_H__
+#define __LIBNVDIMM_H__
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+enum {
+ /* when a dimm supports both PMEM and BLK access a label is required */
+ NDD_ALIASING = 1 << 0,
+ /* unarmed memory devices may not persist writes */
+ NDD_UNARMED = 1 << 1,
+
+ /* need to set a limit somewhere, but yes, this is likely overkill */
+ ND_IOCTL_MAX_BUFLEN = SZ_4M,
+ ND_CMD_MAX_ELEM = 4,
+ ND_CMD_MAX_ENVELOPE = 16,
+ ND_CMD_ARS_STATUS_MAX = SZ_4K,
+ ND_MAX_MAPPINGS = 32,
+
+ /* mark newly adjusted resources as requiring a label update */
+ DPA_RESOURCE_ADJUSTED = 1 << 0,
+};
+
+extern struct attribute_group nvdimm_bus_attribute_group;
+extern struct attribute_group nvdimm_attribute_group;
+extern struct attribute_group nd_device_attribute_group;
+extern struct attribute_group nd_numa_attribute_group;
+extern struct attribute_group nd_region_attribute_group;
+extern struct attribute_group nd_mapping_attribute_group;
+
+struct nvdimm;
+struct nvdimm_bus_descriptor;
+typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
+ struct nvdimm *nvdimm, unsigned int cmd, void *buf,
+ unsigned int buf_len);
+
+struct nd_namespace_label;
+struct nvdimm_drvdata;
+struct nd_mapping {
+ struct nvdimm *nvdimm;
+ struct nd_namespace_label **labels;
+ u64 start;
+ u64 size;
+ /*
+ * @ndd is for private use at region enable / disable time for
+ * get_ndd() + put_ndd(), all other nd_mapping to ndd
+ * conversions use to_ndd() which respects enabled state of the
+ * nvdimm.
+ */
+ struct nvdimm_drvdata *ndd;
+};
+
+struct nvdimm_bus_descriptor {
+ const struct attribute_group **attr_groups;
+ unsigned long dsm_mask;
+ char *provider_name;
+ ndctl_fn ndctl;
+};
+
+struct nd_cmd_desc {
+ int in_num;
+ int out_num;
+ u32 in_sizes[ND_CMD_MAX_ELEM];
+ int out_sizes[ND_CMD_MAX_ELEM];
+};
+
+struct nd_interleave_set {
+ u64 cookie;
+};
+
+struct nd_region_desc {
+ struct resource *res;
+ struct nd_mapping *nd_mapping;
+ u16 num_mappings;
+ const struct attribute_group **attr_groups;
+ struct nd_interleave_set *nd_set;
+ void *provider_data;
+ int num_lanes;
+ int numa_node;
+};
+
+struct nvdimm_bus;
+struct module;
+struct device;
+struct nd_blk_region;
+struct nd_blk_region_desc {
+ int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
+ void (*disable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
+ int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
+ void *iobuf, u64 len, int rw);
+ struct nd_region_desc ndr_desc;
+};
+
+static inline struct nd_blk_region_desc *to_blk_region_desc(
+ struct nd_region_desc *ndr_desc)
+{
+ return container_of(ndr_desc, struct nd_blk_region_desc, ndr_desc);
+
+}
+
+struct nvdimm_bus *__nvdimm_bus_register(struct device *parent,
+ struct nvdimm_bus_descriptor *nfit_desc, struct module *module);
+#define nvdimm_bus_register(parent, desc) \
+ __nvdimm_bus_register(parent, desc, THIS_MODULE)
+void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus);
+struct nvdimm_bus *to_nvdimm_bus(struct device *dev);
+struct nvdimm *to_nvdimm(struct device *dev);
+struct nd_region *to_nd_region(struct device *dev);
+struct nd_blk_region *to_nd_blk_region(struct device *dev);
+struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus);
+const char *nvdimm_name(struct nvdimm *nvdimm);
+void *nvdimm_provider_data(struct nvdimm *nvdimm);
+struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
+ const struct attribute_group **groups, unsigned long flags,
+ unsigned long *dsm_mask);
+const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
+const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd);
+u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
+ const struct nd_cmd_desc *desc, int idx, void *buf);
+u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
+ const struct nd_cmd_desc *desc, int idx, const u32 *in_field,
+ const u32 *out_field);
+int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count);
+struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
+ struct nd_region_desc *ndr_desc);
+struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
+ struct nd_region_desc *ndr_desc);
+struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
+ struct nd_region_desc *ndr_desc);
+void *nd_region_provider_data(struct nd_region *nd_region);
+void *nd_blk_region_provider_data(struct nd_blk_region *ndbr);
+void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data);
+struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr);
+unsigned int nd_region_acquire_lane(struct nd_region *nd_region);
+void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane);
+u64 nd_fletcher64(void *addr, size_t len, bool le);
+#endif /* __LIBNVDIMM_H__ */
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index f343453..2a6b994 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -9,6 +9,9 @@
#include <linux/list.h>
#include <linux/nodemask.h>
+#include <linux/shrinker.h>
+
+struct mem_cgroup;
/* list_lru_walk_cb has to always return one of those */
enum lru_status {
@@ -21,24 +24,45 @@ enum lru_status {
internally, but has to return locked. */
};
-struct list_lru_node {
- spinlock_t lock;
+struct list_lru_one {
struct list_head list;
- /* kept as signed so we can catch imbalance bugs */
+ /* may become negative during memcg reparenting */
long nr_items;
+};
+
+struct list_lru_memcg {
+ /* array of per cgroup lists, indexed by memcg_cache_id */
+ struct list_lru_one *lru[0];
+};
+
+struct list_lru_node {
+ /* protects all lists on the node, including per cgroup */
+ spinlock_t lock;
+ /* global list, used for the root cgroup in cgroup aware lrus */
+ struct list_lru_one lru;
+#ifdef CONFIG_MEMCG_KMEM
+ /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
+ struct list_lru_memcg *memcg_lrus;
+#endif
} ____cacheline_aligned_in_smp;
struct list_lru {
struct list_lru_node *node;
- nodemask_t active_nodes;
+#ifdef CONFIG_MEMCG_KMEM
+ struct list_head list;
+#endif
};
void list_lru_destroy(struct list_lru *lru);
-int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key);
-static inline int list_lru_init(struct list_lru *lru)
-{
- return list_lru_init_key(lru, NULL);
-}
+int __list_lru_init(struct list_lru *lru, bool memcg_aware,
+ struct lock_class_key *key);
+
+#define list_lru_init(lru) __list_lru_init((lru), false, NULL)
+#define list_lru_init_key(lru, key) __list_lru_init((lru), false, (key))
+#define list_lru_init_memcg(lru) __list_lru_init((lru), true, NULL)
+
+int memcg_update_all_list_lrus(int num_memcgs);
+void memcg_drain_all_list_lrus(int src_idx, int dst_idx);
/**
* list_lru_add: add an element to the lru list's tail
@@ -72,32 +96,48 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item);
bool list_lru_del(struct list_lru *lru, struct list_head *item);
/**
- * list_lru_count_node: return the number of objects currently held by @lru
+ * list_lru_count_one: return the number of objects currently held by @lru
* @lru: the lru pointer.
* @nid: the node id to count from.
+ * @memcg: the cgroup to count from.
*
* Always return a non-negative number, 0 for empty lists. There is no
* guarantee that the list is not updated while the count is being computed.
* Callers that want such a guarantee need to provide an outer lock.
*/
+unsigned long list_lru_count_one(struct list_lru *lru,
+ int nid, struct mem_cgroup *memcg);
unsigned long list_lru_count_node(struct list_lru *lru, int nid);
+
+static inline unsigned long list_lru_shrink_count(struct list_lru *lru,
+ struct shrink_control *sc)
+{
+ return list_lru_count_one(lru, sc->nid, sc->memcg);
+}
+
static inline unsigned long list_lru_count(struct list_lru *lru)
{
long count = 0;
int nid;
- for_each_node_mask(nid, lru->active_nodes)
+ for_each_node_state(nid, N_NORMAL_MEMORY)
count += list_lru_count_node(lru, nid);
return count;
}
-typedef enum lru_status
-(*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg);
+void list_lru_isolate(struct list_lru_one *list, struct list_head *item);
+void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
+ struct list_head *head);
+
+typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
+ struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
+
/**
- * list_lru_walk_node: walk a list_lru, isolating and disposing freeable items.
+ * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items.
* @lru: the lru pointer.
* @nid: the node id to scan from.
+ * @memcg: the cgroup to scan from.
* @isolate: callback function that is resposible for deciding what to do with
* the item currently being scanned
* @cb_arg: opaque type that will be passed to @isolate
@@ -115,18 +155,30 @@ typedef enum lru_status
*
* Return value: the number of objects effectively removed from the LRU.
*/
+unsigned long list_lru_walk_one(struct list_lru *lru,
+ int nid, struct mem_cgroup *memcg,
+ list_lru_walk_cb isolate, void *cb_arg,
+ unsigned long *nr_to_walk);
unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk);
static inline unsigned long
+list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
+ list_lru_walk_cb isolate, void *cb_arg)
+{
+ return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg,
+ &sc->nr_to_scan);
+}
+
+static inline unsigned long
list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
void *cb_arg, unsigned long nr_to_walk)
{
long isolated = 0;
int nid;
- for_each_node_mask(nid, lru->active_nodes) {
+ for_each_node_state(nid, N_NORMAL_MEMORY) {
isolated += list_lru_walk_node(lru, nid, isolate,
cb_arg, &nr_to_walk);
if (nr_to_walk <= 0)
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
index 5d10ae3..f266661 100644
--- a/include/linux/list_nulls.h
+++ b/include/linux/list_nulls.h
@@ -1,6 +1,9 @@
#ifndef _LINUX_LIST_NULLS_H
#define _LINUX_LIST_NULLS_H
+#include <linux/poison.h>
+#include <linux/const.h>
+
/*
* Special version of lists, where end of list is not a NULL pointer,
* but a 'nulls' marker, which can have many different values.
@@ -21,8 +24,9 @@ struct hlist_nulls_head {
struct hlist_nulls_node {
struct hlist_nulls_node *next, **pprev;
};
+#define NULLS_MARKER(value) (1UL | (((long)value) << 1))
#define INIT_HLIST_NULLS_HEAD(ptr, nulls) \
- ((ptr)->first = (struct hlist_nulls_node *) (1UL | (((long)nulls) << 1)))
+ ((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls))
#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
/**
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
new file mode 100644
index 0000000..31db7a0
--- /dev/null
+++ b/include/linux/livepatch.h
@@ -0,0 +1,139 @@
+/*
+ * livepatch.h - Kernel Live Patching Core
+ *
+ * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
+ * Copyright (C) 2014 SUSE
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _LINUX_LIVEPATCH_H_
+#define _LINUX_LIVEPATCH_H_
+
+#include <linux/module.h>
+#include <linux/ftrace.h>
+
+#if IS_ENABLED(CONFIG_LIVEPATCH)
+
+#include <asm/livepatch.h>
+
+enum klp_state {
+ KLP_DISABLED,
+ KLP_ENABLED
+};
+
+/**
+ * struct klp_func - function structure for live patching
+ * @old_name: name of the function to be patched
+ * @new_func: pointer to the patched function code
+ * @old_addr: a hint conveying at what address the old function
+ * can be found (optional, vmlinux patches only)
+ * @kobj: kobject for sysfs resources
+ * @state: tracks function-level patch application state
+ * @stack_node: list node for klp_ops func_stack list
+ */
+struct klp_func {
+ /* external */
+ const char *old_name;
+ void *new_func;
+ /*
+ * The old_addr field is optional and can be used to resolve
+ * duplicate symbol names in the vmlinux object. If this
+ * information is not present, the symbol is located by name
+ * with kallsyms. If the name is not unique and old_addr is
+ * not provided, the patch application fails as there is no
+ * way to resolve the ambiguity.
+ */
+ unsigned long old_addr;
+
+ /* internal */
+ struct kobject kobj;
+ enum klp_state state;
+ struct list_head stack_node;
+};
+
+/**
+ * struct klp_reloc - relocation structure for live patching
+ * @loc: address where the relocation will be written
+ * @val: address of the referenced symbol (optional,
+ * vmlinux patches only)
+ * @type: ELF relocation type
+ * @name: name of the referenced symbol (for lookup/verification)
+ * @addend: offset from the referenced symbol
+ * @external: symbol is either exported or within the live patch module itself
+ */
+struct klp_reloc {
+ unsigned long loc;
+ unsigned long val;
+ unsigned long type;
+ const char *name;
+ int addend;
+ int external;
+};
+
+/**
+ * struct klp_object - kernel object structure for live patching
+ * @name: module name (or NULL for vmlinux)
+ * @relocs: relocation entries to be applied at load time
+ * @funcs: function entries for functions to be patched in the object
+ * @kobj: kobject for sysfs resources
+ * @mod: kernel module associated with the patched object
+ * (NULL for vmlinux)
+ * @state: tracks object-level patch application state
+ */
+struct klp_object {
+ /* external */
+ const char *name;
+ struct klp_reloc *relocs;
+ struct klp_func *funcs;
+
+ /* internal */
+ struct kobject kobj;
+ struct module *mod;
+ enum klp_state state;
+};
+
+/**
+ * struct klp_patch - patch structure for live patching
+ * @mod: reference to the live patch module
+ * @objs: object entries for kernel objects to be patched
+ * @list: list node for global list of registered patches
+ * @kobj: kobject for sysfs resources
+ * @state: tracks patch-level application state
+ */
+struct klp_patch {
+ /* external */
+ struct module *mod;
+ struct klp_object *objs;
+
+ /* internal */
+ struct list_head list;
+ struct kobject kobj;
+ enum klp_state state;
+};
+
+#define klp_for_each_object(patch, obj) \
+ for (obj = patch->objs; obj->funcs; obj++)
+
+#define klp_for_each_func(obj, func) \
+ for (func = obj->funcs; func->old_name; func++)
+
+int klp_register_patch(struct klp_patch *);
+int klp_unregister_patch(struct klp_patch *);
+int klp_enable_patch(struct klp_patch *);
+int klp_disable_patch(struct klp_patch *);
+
+#endif /* CONFIG_LIVEPATCH */
+
+#endif /* _LINUX_LIVEPATCH_H_ */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 74ab231..70400dc 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -130,8 +130,8 @@ enum bounce_type {
};
struct lock_class_stats {
- unsigned long contention_point[4];
- unsigned long contending_point[4];
+ unsigned long contention_point[LOCKSTAT_POINTS];
+ unsigned long contending_point[LOCKSTAT_POINTS];
struct lock_time read_waittime;
struct lock_time write_waittime;
struct lock_time read_holdtime;
@@ -255,6 +255,7 @@ struct held_lock {
unsigned int check:1; /* see lock_acquire() comment */
unsigned int hardirqs_off:1;
unsigned int references:12; /* 32 bits */
+ unsigned int pin_count;
};
/*
@@ -354,6 +355,9 @@ extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
+extern void lock_pin_lock(struct lockdep_map *lock);
+extern void lock_unpin_lock(struct lockdep_map *lock);
+
# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
@@ -368,6 +372,9 @@ extern void lockdep_trace_alloc(gfp_t mask);
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
+#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
+#define lockdep_unpin_lock(l) lock_unpin_lock(&(l)->dep_map)
+
#else /* !CONFIG_LOCKDEP */
static inline void lockdep_off(void)
@@ -420,6 +427,9 @@ struct lock_class_key { };
#define lockdep_recursing(tsk) (0)
+#define lockdep_pin_lock(l) do { (void)(l); } while (0)
+#define lockdep_unpin_lock(l) do { (void)(l); } while (0)
+
#endif /* !LOCKDEP */
#ifdef CONFIG_LOCK_STAT
@@ -531,8 +541,13 @@ do { \
# define might_lock_read(lock) do { } while (0)
#endif
-#ifdef CONFIG_PROVE_RCU
+#ifdef CONFIG_LOCKDEP
void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
+#else
+static inline void
+lockdep_rcu_suspicious(const char *file, const int line, const char *s)
+{
+}
#endif
#endif /* __LINUX_LOCKDEP_H */
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index 4bfde0e..b10b122 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -28,12 +28,13 @@ struct lockref {
#endif
struct {
spinlock_t lock;
- unsigned int count;
+ int count;
};
};
};
extern void lockref_get(struct lockref *);
+extern int lockref_put_return(struct lockref *);
extern int lockref_get_not_zero(struct lockref *);
extern int lockref_get_or_lock(struct lockref *);
extern int lockref_put_or_lock(struct lockref *);
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
new file mode 100644
index 0000000..9429f05
--- /dev/null
+++ b/include/linux/lsm_hooks.h
@@ -0,0 +1,1888 @@
+/*
+ * Linux Security Module interfaces
+ *
+ * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com>
+ * Copyright (C) 2001 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com>
+ * Copyright (C) 2001 James Morris <jmorris@intercode.com.au>
+ * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group)
+ * Copyright (C) 2015 Intel Corporation.
+ * Copyright (C) 2015 Casey Schaufler <casey@schaufler-ca.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Due to this file being licensed under the GPL there is controversy over
+ * whether this permits you to write a module that #includes this file
+ * without placing your module under the GPL. Please consult a lawyer for
+ * advice before doing this.
+ *
+ */
+
+#ifndef __LINUX_LSM_HOOKS_H
+#define __LINUX_LSM_HOOKS_H
+
+#include <linux/security.h>
+#include <linux/init.h>
+#include <linux/rculist.h>
+
+/**
+ * Security hooks for program execution operations.
+ *
+ * @bprm_set_creds:
+ * Save security information in the bprm->security field, typically based
+ * on information about the bprm->file, for later use by the apply_creds
+ * hook. This hook may also optionally check permissions (e.g. for
+ * transitions between security domains).
+ * This hook may be called multiple times during a single execve, e.g. for
+ * interpreters. The hook can tell whether it has already been called by
+ * checking to see if @bprm->security is non-NULL. If so, then the hook
+ * may decide either to retain the security information saved earlier or
+ * to replace it.
+ * @bprm contains the linux_binprm structure.
+ * Return 0 if the hook is successful and permission is granted.
+ * @bprm_check_security:
+ * This hook mediates the point when a search for a binary handler will
+ * begin. It allows a check the @bprm->security value which is set in the
+ * preceding set_creds call. The primary difference from set_creds is
+ * that the argv list and envp list are reliably available in @bprm. This
+ * hook may be called multiple times during a single execve; and in each
+ * pass set_creds is called first.
+ * @bprm contains the linux_binprm structure.
+ * Return 0 if the hook is successful and permission is granted.
+ * @bprm_committing_creds:
+ * Prepare to install the new security attributes of a process being
+ * transformed by an execve operation, based on the old credentials
+ * pointed to by @current->cred and the information set in @bprm->cred by
+ * the bprm_set_creds hook. @bprm points to the linux_binprm structure.
+ * This hook is a good place to perform state changes on the process such
+ * as closing open file descriptors to which access will no longer be
+ * granted when the attributes are changed. This is called immediately
+ * before commit_creds().
+ * @bprm_committed_creds:
+ * Tidy up after the installation of the new security attributes of a
+ * process being transformed by an execve operation. The new credentials
+ * have, by this point, been set to @current->cred. @bprm points to the
+ * linux_binprm structure. This hook is a good place to perform state
+ * changes on the process such as clearing out non-inheritable signal
+ * state. This is called immediately after commit_creds().
+ * @bprm_secureexec:
+ * Return a boolean value (0 or 1) indicating whether a "secure exec"
+ * is required. The flag is passed in the auxiliary table
+ * on the initial stack to the ELF interpreter to indicate whether libc
+ * should enable secure mode.
+ * @bprm contains the linux_binprm structure.
+ *
+ * Security hooks for filesystem operations.
+ *
+ * @sb_alloc_security:
+ * Allocate and attach a security structure to the sb->s_security field.
+ * The s_security field is initialized to NULL when the structure is
+ * allocated.
+ * @sb contains the super_block structure to be modified.
+ * Return 0 if operation was successful.
+ * @sb_free_security:
+ * Deallocate and clear the sb->s_security field.
+ * @sb contains the super_block structure to be modified.
+ * @sb_statfs:
+ * Check permission before obtaining filesystem statistics for the @mnt
+ * mountpoint.
+ * @dentry is a handle on the superblock for the filesystem.
+ * Return 0 if permission is granted.
+ * @sb_mount:
+ * Check permission before an object specified by @dev_name is mounted on
+ * the mount point named by @nd. For an ordinary mount, @dev_name
+ * identifies a device if the file system type requires a device. For a
+ * remount (@flags & MS_REMOUNT), @dev_name is irrelevant. For a
+ * loopback/bind mount (@flags & MS_BIND), @dev_name identifies the
+ * pathname of the object being mounted.
+ * @dev_name contains the name for object being mounted.
+ * @path contains the path for mount point object.
+ * @type contains the filesystem type.
+ * @flags contains the mount flags.
+ * @data contains the filesystem-specific data.
+ * Return 0 if permission is granted.
+ * @sb_copy_data:
+ * Allow mount option data to be copied prior to parsing by the filesystem,
+ * so that the security module can extract security-specific mount
+ * options cleanly (a filesystem may modify the data e.g. with strsep()).
+ * This also allows the original mount data to be stripped of security-
+ * specific options to avoid having to make filesystems aware of them.
+ * @type the type of filesystem being mounted.
+ * @orig the original mount data copied from userspace.
+ * @copy copied data which will be passed to the security module.
+ * Returns 0 if the copy was successful.
+ * @sb_remount:
+ * Extracts security system specific mount options and verifies no changes
+ * are being made to those options.
+ * @sb superblock being remounted
+ * @data contains the filesystem-specific data.
+ * Return 0 if permission is granted.
+ * @sb_umount:
+ * Check permission before the @mnt file system is unmounted.
+ * @mnt contains the mounted file system.
+ * @flags contains the unmount flags, e.g. MNT_FORCE.
+ * Return 0 if permission is granted.
+ * @sb_pivotroot:
+ * Check permission before pivoting the root filesystem.
+ * @old_path contains the path for the new location of the
+ * current root (put_old).
+ * @new_path contains the path for the new root (new_root).
+ * Return 0 if permission is granted.
+ * @sb_set_mnt_opts:
+ * Set the security relevant mount options used for a superblock
+ * @sb the superblock to set security mount options for
+ * @opts binary data structure containing all lsm mount data
+ * @sb_clone_mnt_opts:
+ * Copy all security options from a given superblock to another
+ * @oldsb old superblock which contain information to clone
+ * @newsb new superblock which needs filled in
+ * @sb_parse_opts_str:
+ * Parse a string of security data filling in the opts structure
+ * @options string containing all mount options known by the LSM
+ * @opts binary data structure usable by the LSM
+ * @dentry_init_security:
+ * Compute a context for a dentry as the inode is not yet available
+ * since NFSv4 has no label backed by an EA anyway.
+ * @dentry dentry to use in calculating the context.
+ * @mode mode used to determine resource type.
+ * @name name of the last path component used to create file
+ * @ctx pointer to place the pointer to the resulting context in.
+ * @ctxlen point to place the length of the resulting context.
+ *
+ *
+ * Security hooks for inode operations.
+ *
+ * @inode_alloc_security:
+ * Allocate and attach a security structure to @inode->i_security. The
+ * i_security field is initialized to NULL when the inode structure is
+ * allocated.
+ * @inode contains the inode structure.
+ * Return 0 if operation was successful.
+ * @inode_free_security:
+ * @inode contains the inode structure.
+ * Deallocate the inode security structure and set @inode->i_security to
+ * NULL.
+ * @inode_init_security:
+ * Obtain the security attribute name suffix and value to set on a newly
+ * created inode and set up the incore security field for the new inode.
+ * This hook is called by the fs code as part of the inode creation
+ * transaction and provides for atomic labeling of the inode, unlike
+ * the post_create/mkdir/... hooks called by the VFS. The hook function
+ * is expected to allocate the name and value via kmalloc, with the caller
+ * being responsible for calling kfree after using them.
+ * If the security module does not use security attributes or does
+ * not wish to put a security attribute on this particular inode,
+ * then it should return -EOPNOTSUPP to skip this processing.
+ * @inode contains the inode structure of the newly created inode.
+ * @dir contains the inode structure of the parent directory.
+ * @qstr contains the last path component of the new object
+ * @name will be set to the allocated name suffix (e.g. selinux).
+ * @value will be set to the allocated attribute value.
+ * @len will be set to the length of the value.
+ * Returns 0 if @name and @value have been successfully set,
+ * -EOPNOTSUPP if no security attribute is needed, or
+ * -ENOMEM on memory allocation failure.
+ * @inode_create:
+ * Check permission to create a regular file.
+ * @dir contains inode structure of the parent of the new file.
+ * @dentry contains the dentry structure for the file to be created.
+ * @mode contains the file mode of the file to be created.
+ * Return 0 if permission is granted.
+ * @inode_link:
+ * Check permission before creating a new hard link to a file.
+ * @old_dentry contains the dentry structure for an existing
+ * link to the file.
+ * @dir contains the inode structure of the parent directory
+ * of the new link.
+ * @new_dentry contains the dentry structure for the new link.
+ * Return 0 if permission is granted.
+ * @path_link:
+ * Check permission before creating a new hard link to a file.
+ * @old_dentry contains the dentry structure for an existing link
+ * to the file.
+ * @new_dir contains the path structure of the parent directory of
+ * the new link.
+ * @new_dentry contains the dentry structure for the new link.
+ * Return 0 if permission is granted.
+ * @inode_unlink:
+ * Check the permission to remove a hard link to a file.
+ * @dir contains the inode structure of parent directory of the file.
+ * @dentry contains the dentry structure for file to be unlinked.
+ * Return 0 if permission is granted.
+ * @path_unlink:
+ * Check the permission to remove a hard link to a file.
+ * @dir contains the path structure of parent directory of the file.
+ * @dentry contains the dentry structure for file to be unlinked.
+ * Return 0 if permission is granted.
+ * @inode_symlink:
+ * Check the permission to create a symbolic link to a file.
+ * @dir contains the inode structure of parent directory of
+ * the symbolic link.
+ * @dentry contains the dentry structure of the symbolic link.
+ * @old_name contains the pathname of file.
+ * Return 0 if permission is granted.
+ * @path_symlink:
+ * Check the permission to create a symbolic link to a file.
+ * @dir contains the path structure of parent directory of
+ * the symbolic link.
+ * @dentry contains the dentry structure of the symbolic link.
+ * @old_name contains the pathname of file.
+ * Return 0 if permission is granted.
+ * @inode_mkdir:
+ * Check permissions to create a new directory in the existing directory
+ * associated with inode structure @dir.
+ * @dir contains the inode structure of parent of the directory
+ * to be created.
+ * @dentry contains the dentry structure of new directory.
+ * @mode contains the mode of new directory.
+ * Return 0 if permission is granted.
+ * @path_mkdir:
+ * Check permissions to create a new directory in the existing directory
+ * associated with path structure @path.
+ * @dir contains the path structure of parent of the directory
+ * to be created.
+ * @dentry contains the dentry structure of new directory.
+ * @mode contains the mode of new directory.
+ * Return 0 if permission is granted.
+ * @inode_rmdir:
+ * Check the permission to remove a directory.
+ * @dir contains the inode structure of parent of the directory
+ * to be removed.
+ * @dentry contains the dentry structure of directory to be removed.
+ * Return 0 if permission is granted.
+ * @path_rmdir:
+ * Check the permission to remove a directory.
+ * @dir contains the path structure of parent of the directory to be
+ * removed.
+ * @dentry contains the dentry structure of directory to be removed.
+ * Return 0 if permission is granted.
+ * @inode_mknod:
+ * Check permissions when creating a special file (or a socket or a fifo
+ * file created via the mknod system call). Note that if mknod operation
+ * is being done for a regular file, then the create hook will be called
+ * and not this hook.
+ * @dir contains the inode structure of parent of the new file.
+ * @dentry contains the dentry structure of the new file.
+ * @mode contains the mode of the new file.
+ * @dev contains the device number.
+ * Return 0 if permission is granted.
+ * @path_mknod:
+ * Check permissions when creating a file. Note that this hook is called
+ * even if mknod operation is being done for a regular file.
+ * @dir contains the path structure of parent of the new file.
+ * @dentry contains the dentry structure of the new file.
+ * @mode contains the mode of the new file.
+ * @dev contains the undecoded device number. Use new_decode_dev() to get
+ * the decoded device number.
+ * Return 0 if permission is granted.
+ * @inode_rename:
+ * Check for permission to rename a file or directory.
+ * @old_dir contains the inode structure for parent of the old link.
+ * @old_dentry contains the dentry structure of the old link.
+ * @new_dir contains the inode structure for parent of the new link.
+ * @new_dentry contains the dentry structure of the new link.
+ * Return 0 if permission is granted.
+ * @path_rename:
+ * Check for permission to rename a file or directory.
+ * @old_dir contains the path structure for parent of the old link.
+ * @old_dentry contains the dentry structure of the old link.
+ * @new_dir contains the path structure for parent of the new link.
+ * @new_dentry contains the dentry structure of the new link.
+ * Return 0 if permission is granted.
+ * @path_chmod:
+ * Check for permission to change DAC's permission of a file or directory.
+ * @dentry contains the dentry structure.
+ * @mnt contains the vfsmnt structure.
+ * @mode contains DAC's mode.
+ * Return 0 if permission is granted.
+ * @path_chown:
+ * Check for permission to change owner/group of a file or directory.
+ * @path contains the path structure.
+ * @uid contains new owner's ID.
+ * @gid contains new group's ID.
+ * Return 0 if permission is granted.
+ * @path_chroot:
+ * Check for permission to change root directory.
+ * @path contains the path structure.
+ * Return 0 if permission is granted.
+ * @inode_readlink:
+ * Check the permission to read the symbolic link.
+ * @dentry contains the dentry structure for the file link.
+ * Return 0 if permission is granted.
+ * @inode_follow_link:
+ * Check permission to follow a symbolic link when looking up a pathname.
+ * @dentry contains the dentry structure for the link.
+ * @inode contains the inode, which itself is not stable in RCU-walk
+ * @rcu indicates whether we are in RCU-walk mode.
+ * Return 0 if permission is granted.
+ * @inode_permission:
+ * Check permission before accessing an inode. This hook is called by the
+ * existing Linux permission function, so a security module can use it to
+ * provide additional checking for existing Linux permission checks.
+ * Notice that this hook is called when a file is opened (as well as many
+ * other operations), whereas the file_security_ops permission hook is
+ * called when the actual read/write operations are performed.
+ * @inode contains the inode structure to check.
+ * @mask contains the permission mask.
+ * Return 0 if permission is granted.
+ * @inode_setattr:
+ * Check permission before setting file attributes. Note that the kernel
+ * call to notify_change is performed from several locations, whenever
+ * file attributes change (such as when a file is truncated, chown/chmod
+ * operations, transferring disk quotas, etc).
+ * @dentry contains the dentry structure for the file.
+ * @attr is the iattr structure containing the new file attributes.
+ * Return 0 if permission is granted.
+ * @path_truncate:
+ * Check permission before truncating a file.
+ * @path contains the path structure for the file.
+ * Return 0 if permission is granted.
+ * @inode_getattr:
+ * Check permission before obtaining file attributes.
+ * @mnt is the vfsmount where the dentry was looked up
+ * @dentry contains the dentry structure for the file.
+ * Return 0 if permission is granted.
+ * @inode_setxattr:
+ * Check permission before setting the extended attributes
+ * @value identified by @name for @dentry.
+ * Return 0 if permission is granted.
+ * @inode_post_setxattr:
+ * Update inode security field after successful setxattr operation.
+ * @value identified by @name for @dentry.
+ * @inode_getxattr:
+ * Check permission before obtaining the extended attributes
+ * identified by @name for @dentry.
+ * Return 0 if permission is granted.
+ * @inode_listxattr:
+ * Check permission before obtaining the list of extended attribute
+ * names for @dentry.
+ * Return 0 if permission is granted.
+ * @inode_removexattr:
+ * Check permission before removing the extended attribute
+ * identified by @name for @dentry.
+ * Return 0 if permission is granted.
+ * @inode_getsecurity:
+ * Retrieve a copy of the extended attribute representation of the
+ * security label associated with @name for @inode via @buffer. Note that
+ * @name is the remainder of the attribute name after the security prefix
+ * has been removed. @alloc is used to specify of the call should return a
+ * value via the buffer or just the value length Return size of buffer on
+ * success.
+ * @inode_setsecurity:
+ * Set the security label associated with @name for @inode from the
+ * extended attribute value @value. @size indicates the size of the
+ * @value in bytes. @flags may be XATTR_CREATE, XATTR_REPLACE, or 0.
+ * Note that @name is the remainder of the attribute name after the
+ * security. prefix has been removed.
+ * Return 0 on success.
+ * @inode_listsecurity:
+ * Copy the extended attribute names for the security labels
+ * associated with @inode into @buffer. The maximum size of @buffer
+ * is specified by @buffer_size. @buffer may be NULL to request
+ * the size of the buffer required.
+ * Returns number of bytes used/required on success.
+ * @inode_need_killpriv:
+ * Called when an inode has been changed.
+ * @dentry is the dentry being changed.
+ * Return <0 on error to abort the inode change operation.
+ * Return 0 if inode_killpriv does not need to be called.
+ * Return >0 if inode_killpriv does need to be called.
+ * @inode_killpriv:
+ * The setuid bit is being removed. Remove similar security labels.
+ * Called with the dentry->d_inode->i_mutex held.
+ * @dentry is the dentry being changed.
+ * Return 0 on success. If error is returned, then the operation
+ * causing setuid bit removal is failed.
+ * @inode_getsecid:
+ * Get the secid associated with the node.
+ * @inode contains a pointer to the inode.
+ * @secid contains a pointer to the location where result will be saved.
+ * In case of failure, @secid will be set to zero.
+ *
+ * Security hooks for file operations
+ *
+ * @file_permission:
+ * Check file permissions before accessing an open file. This hook is
+ * called by various operations that read or write files. A security
+ * module can use this hook to perform additional checking on these
+ * operations, e.g. to revalidate permissions on use to support privilege
+ * bracketing or policy changes. Notice that this hook is used when the
+ * actual read/write operations are performed, whereas the
+ * inode_security_ops hook is called when a file is opened (as well as
+ * many other operations).
+ * Caveat: Although this hook can be used to revalidate permissions for
+ * various system call operations that read or write files, it does not
+ * address the revalidation of permissions for memory-mapped files.
+ * Security modules must handle this separately if they need such
+ * revalidation.
+ * @file contains the file structure being accessed.
+ * @mask contains the requested permissions.
+ * Return 0 if permission is granted.
+ * @file_alloc_security:
+ * Allocate and attach a security structure to the file->f_security field.
+ * The security field is initialized to NULL when the structure is first
+ * created.
+ * @file contains the file structure to secure.
+ * Return 0 if the hook is successful and permission is granted.
+ * @file_free_security:
+ * Deallocate and free any security structures stored in file->f_security.
+ * @file contains the file structure being modified.
+ * @file_ioctl:
+ * @file contains the file structure.
+ * @cmd contains the operation to perform.
+ * @arg contains the operational arguments.
+ * Check permission for an ioctl operation on @file. Note that @arg
+ * sometimes represents a user space pointer; in other cases, it may be a
+ * simple integer value. When @arg represents a user space pointer, it
+ * should never be used by the security module.
+ * Return 0 if permission is granted.
+ * @mmap_addr :
+ * Check permissions for a mmap operation at @addr.
+ * @addr contains virtual address that will be used for the operation.
+ * Return 0 if permission is granted.
+ * @mmap_file :
+ * Check permissions for a mmap operation. The @file may be NULL, e.g.
+ * if mapping anonymous memory.
+ * @file contains the file structure for file to map (may be NULL).
+ * @reqprot contains the protection requested by the application.
+ * @prot contains the protection that will be applied by the kernel.
+ * @flags contains the operational flags.
+ * Return 0 if permission is granted.
+ * @file_mprotect:
+ * Check permissions before changing memory access permissions.
+ * @vma contains the memory region to modify.
+ * @reqprot contains the protection requested by the application.
+ * @prot contains the protection that will be applied by the kernel.
+ * Return 0 if permission is granted.
+ * @file_lock:
+ * Check permission before performing file locking operations.
+ * Note: this hook mediates both flock and fcntl style locks.
+ * @file contains the file structure.
+ * @cmd contains the posix-translated lock operation to perform
+ * (e.g. F_RDLCK, F_WRLCK).
+ * Return 0 if permission is granted.
+ * @file_fcntl:
+ * Check permission before allowing the file operation specified by @cmd
+ * from being performed on the file @file. Note that @arg sometimes
+ * represents a user space pointer; in other cases, it may be a simple
+ * integer value. When @arg represents a user space pointer, it should
+ * never be used by the security module.
+ * @file contains the file structure.
+ * @cmd contains the operation to be performed.
+ * @arg contains the operational arguments.
+ * Return 0 if permission is granted.
+ * @file_set_fowner:
+ * Save owner security information (typically from current->security) in
+ * file->f_security for later use by the send_sigiotask hook.
+ * @file contains the file structure to update.
+ * Return 0 on success.
+ * @file_send_sigiotask:
+ * Check permission for the file owner @fown to send SIGIO or SIGURG to the
+ * process @tsk. Note that this hook is sometimes called from interrupt.
+ * Note that the fown_struct, @fown, is never outside the context of a
+ * struct file, so the file structure (and associated security information)
+ * can always be obtained:
+ * container_of(fown, struct file, f_owner)
+ * @tsk contains the structure of task receiving signal.
+ * @fown contains the file owner information.
+ * @sig is the signal that will be sent. When 0, kernel sends SIGIO.
+ * Return 0 if permission is granted.
+ * @file_receive:
+ * This hook allows security modules to control the ability of a process
+ * to receive an open file descriptor via socket IPC.
+ * @file contains the file structure being received.
+ * Return 0 if permission is granted.
+ * @file_open
+ * Save open-time permission checking state for later use upon
+ * file_permission, and recheck access if anything has changed
+ * since inode_permission.
+ *
+ * Security hooks for task operations.
+ *
+ * @task_create:
+ * Check permission before creating a child process. See the clone(2)
+ * manual page for definitions of the @clone_flags.
+ * @clone_flags contains the flags indicating what should be shared.
+ * Return 0 if permission is granted.
+ * @task_free:
+ * @task task being freed
+ * Handle release of task-related resources. (Note that this can be called
+ * from interrupt context.)
+ * @cred_alloc_blank:
+ * @cred points to the credentials.
+ * @gfp indicates the atomicity of any memory allocations.
+ * Only allocate sufficient memory and attach to @cred such that
+ * cred_transfer() will not get ENOMEM.
+ * @cred_free:
+ * @cred points to the credentials.
+ * Deallocate and clear the cred->security field in a set of credentials.
+ * @cred_prepare:
+ * @new points to the new credentials.
+ * @old points to the original credentials.
+ * @gfp indicates the atomicity of any memory allocations.
+ * Prepare a new set of credentials by copying the data from the old set.
+ * @cred_transfer:
+ * @new points to the new credentials.
+ * @old points to the original credentials.
+ * Transfer data from original creds to new creds
+ * @kernel_act_as:
+ * Set the credentials for a kernel service to act as (subjective context).
+ * @new points to the credentials to be modified.
+ * @secid specifies the security ID to be set
+ * The current task must be the one that nominated @secid.
+ * Return 0 if successful.
+ * @kernel_create_files_as:
+ * Set the file creation context in a set of credentials to be the same as
+ * the objective context of the specified inode.
+ * @new points to the credentials to be modified.
+ * @inode points to the inode to use as a reference.
+ * The current task must be the one that nominated @inode.
+ * Return 0 if successful.
+ * @kernel_fw_from_file:
+ * Load firmware from userspace (not called for built-in firmware).
+ * @file contains the file structure pointing to the file containing
+ * the firmware to load. This argument will be NULL if the firmware
+ * was loaded via the uevent-triggered blob-based interface exposed
+ * by CONFIG_FW_LOADER_USER_HELPER.
+ * @buf pointer to buffer containing firmware contents.
+ * @size length of the firmware contents.
+ * Return 0 if permission is granted.
+ * @kernel_module_request:
+ * Ability to trigger the kernel to automatically upcall to userspace for
+ * userspace to load a kernel module with the given name.
+ * @kmod_name name of the module requested by the kernel
+ * Return 0 if successful.
+ * @kernel_module_from_file:
+ * Load a kernel module from userspace.
+ * @file contains the file structure pointing to the file containing
+ * the kernel module to load. If the module is being loaded from a blob,
+ * this argument will be NULL.
+ * Return 0 if permission is granted.
+ * @task_fix_setuid:
+ * Update the module's state after setting one or more of the user
+ * identity attributes of the current process. The @flags parameter
+ * indicates which of the set*uid system calls invoked this hook. If
+ * @new is the set of credentials that will be installed. Modifications
+ * should be made to this rather than to @current->cred.
+ * @old is the set of credentials that are being replaces
+ * @flags contains one of the LSM_SETID_* values.
+ * Return 0 on success.
+ * @task_setpgid:
+ * Check permission before setting the process group identifier of the
+ * process @p to @pgid.
+ * @p contains the task_struct for process being modified.
+ * @pgid contains the new pgid.
+ * Return 0 if permission is granted.
+ * @task_getpgid:
+ * Check permission before getting the process group identifier of the
+ * process @p.
+ * @p contains the task_struct for the process.
+ * Return 0 if permission is granted.
+ * @task_getsid:
+ * Check permission before getting the session identifier of the process
+ * @p.
+ * @p contains the task_struct for the process.
+ * Return 0 if permission is granted.
+ * @task_getsecid:
+ * Retrieve the security identifier of the process @p.
+ * @p contains the task_struct for the process and place is into @secid.
+ * In case of failure, @secid will be set to zero.
+ *
+ * @task_setnice:
+ * Check permission before setting the nice value of @p to @nice.
+ * @p contains the task_struct of process.
+ * @nice contains the new nice value.
+ * Return 0 if permission is granted.
+ * @task_setioprio
+ * Check permission before setting the ioprio value of @p to @ioprio.
+ * @p contains the task_struct of process.
+ * @ioprio contains the new ioprio value
+ * Return 0 if permission is granted.
+ * @task_getioprio
+ * Check permission before getting the ioprio value of @p.
+ * @p contains the task_struct of process.
+ * Return 0 if permission is granted.
+ * @task_setrlimit:
+ * Check permission before setting the resource limits of the current
+ * process for @resource to @new_rlim. The old resource limit values can
+ * be examined by dereferencing (current->signal->rlim + resource).
+ * @resource contains the resource whose limit is being set.
+ * @new_rlim contains the new limits for @resource.
+ * Return 0 if permission is granted.
+ * @task_setscheduler:
+ * Check permission before setting scheduling policy and/or parameters of
+ * process @p based on @policy and @lp.
+ * @p contains the task_struct for process.
+ * @policy contains the scheduling policy.
+ * @lp contains the scheduling parameters.
+ * Return 0 if permission is granted.
+ * @task_getscheduler:
+ * Check permission before obtaining scheduling information for process
+ * @p.
+ * @p contains the task_struct for process.
+ * Return 0 if permission is granted.
+ * @task_movememory
+ * Check permission before moving memory owned by process @p.
+ * @p contains the task_struct for process.
+ * Return 0 if permission is granted.
+ * @task_kill:
+ * Check permission before sending signal @sig to @p. @info can be NULL,
+ * the constant 1, or a pointer to a siginfo structure. If @info is 1 or
+ * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming
+ * from the kernel and should typically be permitted.
+ * SIGIO signals are handled separately by the send_sigiotask hook in
+ * file_security_ops.
+ * @p contains the task_struct for process.
+ * @info contains the signal information.
+ * @sig contains the signal value.
+ * @secid contains the sid of the process where the signal originated
+ * Return 0 if permission is granted.
+ * @task_wait:
+ * Check permission before allowing a process to reap a child process @p
+ * and collect its status information.
+ * @p contains the task_struct for process.
+ * Return 0 if permission is granted.
+ * @task_prctl:
+ * Check permission before performing a process control operation on the
+ * current process.
+ * @option contains the operation.
+ * @arg2 contains a argument.
+ * @arg3 contains a argument.
+ * @arg4 contains a argument.
+ * @arg5 contains a argument.
+ * Return -ENOSYS if no-one wanted to handle this op, any other value to
+ * cause prctl() to return immediately with that value.
+ * @task_to_inode:
+ * Set the security attributes for an inode based on an associated task's
+ * security attributes, e.g. for /proc/pid inodes.
+ * @p contains the task_struct for the task.
+ * @inode contains the inode structure for the inode.
+ *
+ * Security hooks for Netlink messaging.
+ *
+ * @netlink_send:
+ * Save security information for a netlink message so that permission
+ * checking can be performed when the message is processed. The security
+ * information can be saved using the eff_cap field of the
+ * netlink_skb_parms structure. Also may be used to provide fine
+ * grained control over message transmission.
+ * @sk associated sock of task sending the message.
+ * @skb contains the sk_buff structure for the netlink message.
+ * Return 0 if the information was successfully saved and message
+ * is allowed to be transmitted.
+ *
+ * Security hooks for Unix domain networking.
+ *
+ * @unix_stream_connect:
+ * Check permissions before establishing a Unix domain stream connection
+ * between @sock and @other.
+ * @sock contains the sock structure.
+ * @other contains the peer sock structure.
+ * @newsk contains the new sock structure.
+ * Return 0 if permission is granted.
+ * @unix_may_send:
+ * Check permissions before connecting or sending datagrams from @sock to
+ * @other.
+ * @sock contains the socket structure.
+ * @other contains the peer socket structure.
+ * Return 0 if permission is granted.
+ *
+ * The @unix_stream_connect and @unix_may_send hooks were necessary because
+ * Linux provides an alternative to the conventional file name space for Unix
+ * domain sockets. Whereas binding and connecting to sockets in the file name
+ * space is mediated by the typical file permissions (and caught by the mknod
+ * and permission hooks in inode_security_ops), binding and connecting to
+ * sockets in the abstract name space is completely unmediated. Sufficient
+ * control of Unix domain sockets in the abstract name space isn't possible
+ * using only the socket layer hooks, since we need to know the actual target
+ * socket, which is not looked up until we are inside the af_unix code.
+ *
+ * Security hooks for socket operations.
+ *
+ * @socket_create:
+ * Check permissions prior to creating a new socket.
+ * @family contains the requested protocol family.
+ * @type contains the requested communications type.
+ * @protocol contains the requested protocol.
+ * @kern set to 1 if a kernel socket.
+ * Return 0 if permission is granted.
+ * @socket_post_create:
+ * This hook allows a module to update or allocate a per-socket security
+ * structure. Note that the security field was not added directly to the
+ * socket structure, but rather, the socket security information is stored
+ * in the associated inode. Typically, the inode alloc_security hook will
+ * allocate and and attach security information to
+ * sock->inode->i_security. This hook may be used to update the
+ * sock->inode->i_security field with additional information that wasn't
+ * available when the inode was allocated.
+ * @sock contains the newly created socket structure.
+ * @family contains the requested protocol family.
+ * @type contains the requested communications type.
+ * @protocol contains the requested protocol.
+ * @kern set to 1 if a kernel socket.
+ * @socket_bind:
+ * Check permission before socket protocol layer bind operation is
+ * performed and the socket @sock is bound to the address specified in the
+ * @address parameter.
+ * @sock contains the socket structure.
+ * @address contains the address to bind to.
+ * @addrlen contains the length of address.
+ * Return 0 if permission is granted.
+ * @socket_connect:
+ * Check permission before socket protocol layer connect operation
+ * attempts to connect socket @sock to a remote address, @address.
+ * @sock contains the socket structure.
+ * @address contains the address of remote endpoint.
+ * @addrlen contains the length of address.
+ * Return 0 if permission is granted.
+ * @socket_listen:
+ * Check permission before socket protocol layer listen operation.
+ * @sock contains the socket structure.
+ * @backlog contains the maximum length for the pending connection queue.
+ * Return 0 if permission is granted.
+ * @socket_accept:
+ * Check permission before accepting a new connection. Note that the new
+ * socket, @newsock, has been created and some information copied to it,
+ * but the accept operation has not actually been performed.
+ * @sock contains the listening socket structure.
+ * @newsock contains the newly created server socket for connection.
+ * Return 0 if permission is granted.
+ * @socket_sendmsg:
+ * Check permission before transmitting a message to another socket.
+ * @sock contains the socket structure.
+ * @msg contains the message to be transmitted.
+ * @size contains the size of message.
+ * Return 0 if permission is granted.
+ * @socket_recvmsg:
+ * Check permission before receiving a message from a socket.
+ * @sock contains the socket structure.
+ * @msg contains the message structure.
+ * @size contains the size of message structure.
+ * @flags contains the operational flags.
+ * Return 0 if permission is granted.
+ * @socket_getsockname:
+ * Check permission before the local address (name) of the socket object
+ * @sock is retrieved.
+ * @sock contains the socket structure.
+ * Return 0 if permission is granted.
+ * @socket_getpeername:
+ * Check permission before the remote address (name) of a socket object
+ * @sock is retrieved.
+ * @sock contains the socket structure.
+ * Return 0 if permission is granted.
+ * @socket_getsockopt:
+ * Check permissions before retrieving the options associated with socket
+ * @sock.
+ * @sock contains the socket structure.
+ * @level contains the protocol level to retrieve option from.
+ * @optname contains the name of option to retrieve.
+ * Return 0 if permission is granted.
+ * @socket_setsockopt:
+ * Check permissions before setting the options associated with socket
+ * @sock.
+ * @sock contains the socket structure.
+ * @level contains the protocol level to set options for.
+ * @optname contains the name of the option to set.
+ * Return 0 if permission is granted.
+ * @socket_shutdown:
+ * Checks permission before all or part of a connection on the socket
+ * @sock is shut down.
+ * @sock contains the socket structure.
+ * @how contains the flag indicating how future sends and receives
+ * are handled.
+ * Return 0 if permission is granted.
+ * @socket_sock_rcv_skb:
+ * Check permissions on incoming network packets. This hook is distinct
+ * from Netfilter's IP input hooks since it is the first time that the
+ * incoming sk_buff @skb has been associated with a particular socket, @sk.
+ * Must not sleep inside this hook because some callers hold spinlocks.
+ * @sk contains the sock (not socket) associated with the incoming sk_buff.
+ * @skb contains the incoming network data.
+ * @socket_getpeersec_stream:
+ * This hook allows the security module to provide peer socket security
+ * state for unix or connected tcp sockets to userspace via getsockopt
+ * SO_GETPEERSEC. For tcp sockets this can be meaningful if the
+ * socket is associated with an ipsec SA.
+ * @sock is the local socket.
+ * @optval userspace memory where the security state is to be copied.
+ * @optlen userspace int where the module should copy the actual length
+ * of the security state.
+ * @len as input is the maximum length to copy to userspace provided
+ * by the caller.
+ * Return 0 if all is well, otherwise, typical getsockopt return
+ * values.
+ * @socket_getpeersec_dgram:
+ * This hook allows the security module to provide peer socket security
+ * state for udp sockets on a per-packet basis to userspace via
+ * getsockopt SO_GETPEERSEC. The application must first have indicated
+ * the IP_PASSSEC option via getsockopt. It can then retrieve the
+ * security state returned by this hook for a packet via the SCM_SECURITY
+ * ancillary message type.
+ * @skb is the skbuff for the packet being queried
+ * @secdata is a pointer to a buffer in which to copy the security data
+ * @seclen is the maximum length for @secdata
+ * Return 0 on success, error on failure.
+ * @sk_alloc_security:
+ * Allocate and attach a security structure to the sk->sk_security field,
+ * which is used to copy security attributes between local stream sockets.
+ * @sk_free_security:
+ * Deallocate security structure.
+ * @sk_clone_security:
+ * Clone/copy security structure.
+ * @sk_getsecid:
+ * Retrieve the LSM-specific secid for the sock to enable caching
+ * of network authorizations.
+ * @sock_graft:
+ * Sets the socket's isec sid to the sock's sid.
+ * @inet_conn_request:
+ * Sets the openreq's sid to socket's sid with MLS portion taken
+ * from peer sid.
+ * @inet_csk_clone:
+ * Sets the new child socket's sid to the openreq sid.
+ * @inet_conn_established:
+ * Sets the connection's peersid to the secmark on skb.
+ * @secmark_relabel_packet:
+ * check if the process should be allowed to relabel packets to
+ * the given secid
+ * @security_secmark_refcount_inc
+ * tells the LSM to increment the number of secmark labeling rules loaded
+ * @security_secmark_refcount_dec
+ * tells the LSM to decrement the number of secmark labeling rules loaded
+ * @req_classify_flow:
+ * Sets the flow's sid to the openreq sid.
+ * @tun_dev_alloc_security:
+ * This hook allows a module to allocate a security structure for a TUN
+ * device.
+ * @security pointer to a security structure pointer.
+ * Returns a zero on success, negative values on failure.
+ * @tun_dev_free_security:
+ * This hook allows a module to free the security structure for a TUN
+ * device.
+ * @security pointer to the TUN device's security structure
+ * @tun_dev_create:
+ * Check permissions prior to creating a new TUN device.
+ * @tun_dev_attach_queue:
+ * Check permissions prior to attaching to a TUN device queue.
+ * @security pointer to the TUN device's security structure.
+ * @tun_dev_attach:
+ * This hook can be used by the module to update any security state
+ * associated with the TUN device's sock structure.
+ * @sk contains the existing sock structure.
+ * @security pointer to the TUN device's security structure.
+ * @tun_dev_open:
+ * This hook can be used by the module to update any security state
+ * associated with the TUN device's security structure.
+ * @security pointer to the TUN devices's security structure.
+ *
+ * Security hooks for XFRM operations.
+ *
+ * @xfrm_policy_alloc_security:
+ * @ctxp is a pointer to the xfrm_sec_ctx being added to Security Policy
+ * Database used by the XFRM system.
+ * @sec_ctx contains the security context information being provided by
+ * the user-level policy update program (e.g., setkey).
+ * Allocate a security structure to the xp->security field; the security
+ * field is initialized to NULL when the xfrm_policy is allocated.
+ * Return 0 if operation was successful (memory to allocate, legal context)
+ * @gfp is to specify the context for the allocation
+ * @xfrm_policy_clone_security:
+ * @old_ctx contains an existing xfrm_sec_ctx.
+ * @new_ctxp contains a new xfrm_sec_ctx being cloned from old.
+ * Allocate a security structure in new_ctxp that contains the
+ * information from the old_ctx structure.
+ * Return 0 if operation was successful (memory to allocate).
+ * @xfrm_policy_free_security:
+ * @ctx contains the xfrm_sec_ctx
+ * Deallocate xp->security.
+ * @xfrm_policy_delete_security:
+ * @ctx contains the xfrm_sec_ctx.
+ * Authorize deletion of xp->security.
+ * @xfrm_state_alloc:
+ * @x contains the xfrm_state being added to the Security Association
+ * Database by the XFRM system.
+ * @sec_ctx contains the security context information being provided by
+ * the user-level SA generation program (e.g., setkey or racoon).
+ * Allocate a security structure to the x->security field; the security
+ * field is initialized to NULL when the xfrm_state is allocated. Set the
+ * context to correspond to sec_ctx. Return 0 if operation was successful
+ * (memory to allocate, legal context).
+ * @xfrm_state_alloc_acquire:
+ * @x contains the xfrm_state being added to the Security Association
+ * Database by the XFRM system.
+ * @polsec contains the policy's security context.
+ * @secid contains the secid from which to take the mls portion of the
+ * context.
+ * Allocate a security structure to the x->security field; the security
+ * field is initialized to NULL when the xfrm_state is allocated. Set the
+ * context to correspond to secid. Return 0 if operation was successful
+ * (memory to allocate, legal context).
+ * @xfrm_state_free_security:
+ * @x contains the xfrm_state.
+ * Deallocate x->security.
+ * @xfrm_state_delete_security:
+ * @x contains the xfrm_state.
+ * Authorize deletion of x->security.
+ * @xfrm_policy_lookup:
+ * @ctx contains the xfrm_sec_ctx for which the access control is being
+ * checked.
+ * @fl_secid contains the flow security label that is used to authorize
+ * access to the policy xp.
+ * @dir contains the direction of the flow (input or output).
+ * Check permission when a flow selects a xfrm_policy for processing
+ * XFRMs on a packet. The hook is called when selecting either a
+ * per-socket policy or a generic xfrm policy.
+ * Return 0 if permission is granted, -ESRCH otherwise, or -errno
+ * on other errors.
+ * @xfrm_state_pol_flow_match:
+ * @x contains the state to match.
+ * @xp contains the policy to check for a match.
+ * @fl contains the flow to check for a match.
+ * Return 1 if there is a match.
+ * @xfrm_decode_session:
+ * @skb points to skb to decode.
+ * @secid points to the flow key secid to set.
+ * @ckall says if all xfrms used should be checked for same secid.
+ * Return 0 if ckall is zero or all xfrms used have the same secid.
+ *
+ * Security hooks affecting all Key Management operations
+ *
+ * @key_alloc:
+ * Permit allocation of a key and assign security data. Note that key does
+ * not have a serial number assigned at this point.
+ * @key points to the key.
+ * @flags is the allocation flags
+ * Return 0 if permission is granted, -ve error otherwise.
+ * @key_free:
+ * Notification of destruction; free security data.
+ * @key points to the key.
+ * No return value.
+ * @key_permission:
+ * See whether a specific operational right is granted to a process on a
+ * key.
+ * @key_ref refers to the key (key pointer + possession attribute bit).
+ * @cred points to the credentials to provide the context against which to
+ * evaluate the security data on the key.
+ * @perm describes the combination of permissions required of this key.
+ * Return 0 if permission is granted, -ve error otherwise.
+ * @key_getsecurity:
+ * Get a textual representation of the security context attached to a key
+ * for the purposes of honouring KEYCTL_GETSECURITY. This function
+ * allocates the storage for the NUL-terminated string and the caller
+ * should free it.
+ * @key points to the key to be queried.
+ * @_buffer points to a pointer that should be set to point to the
+ * resulting string (if no label or an error occurs).
+ * Return the length of the string (including terminating NUL) or -ve if
+ * an error.
+ * May also return 0 (and a NULL buffer pointer) if there is no label.
+ *
+ * Security hooks affecting all System V IPC operations.
+ *
+ * @ipc_permission:
+ * Check permissions for access to IPC
+ * @ipcp contains the kernel IPC permission structure
+ * @flag contains the desired (requested) permission set
+ * Return 0 if permission is granted.
+ * @ipc_getsecid:
+ * Get the secid associated with the ipc object.
+ * @ipcp contains the kernel IPC permission structure.
+ * @secid contains a pointer to the location where result will be saved.
+ * In case of failure, @secid will be set to zero.
+ *
+ * Security hooks for individual messages held in System V IPC message queues
+ * @msg_msg_alloc_security:
+ * Allocate and attach a security structure to the msg->security field.
+ * The security field is initialized to NULL when the structure is first
+ * created.
+ * @msg contains the message structure to be modified.
+ * Return 0 if operation was successful and permission is granted.
+ * @msg_msg_free_security:
+ * Deallocate the security structure for this message.
+ * @msg contains the message structure to be modified.
+ *
+ * Security hooks for System V IPC Message Queues
+ *
+ * @msg_queue_alloc_security:
+ * Allocate and attach a security structure to the
+ * msq->q_perm.security field. The security field is initialized to
+ * NULL when the structure is first created.
+ * @msq contains the message queue structure to be modified.
+ * Return 0 if operation was successful and permission is granted.
+ * @msg_queue_free_security:
+ * Deallocate security structure for this message queue.
+ * @msq contains the message queue structure to be modified.
+ * @msg_queue_associate:
+ * Check permission when a message queue is requested through the
+ * msgget system call. This hook is only called when returning the
+ * message queue identifier for an existing message queue, not when a
+ * new message queue is created.
+ * @msq contains the message queue to act upon.
+ * @msqflg contains the operation control flags.
+ * Return 0 if permission is granted.
+ * @msg_queue_msgctl:
+ * Check permission when a message control operation specified by @cmd
+ * is to be performed on the message queue @msq.
+ * The @msq may be NULL, e.g. for IPC_INFO or MSG_INFO.
+ * @msq contains the message queue to act upon. May be NULL.
+ * @cmd contains the operation to be performed.
+ * Return 0 if permission is granted.
+ * @msg_queue_msgsnd:
+ * Check permission before a message, @msg, is enqueued on the message
+ * queue, @msq.
+ * @msq contains the message queue to send message to.
+ * @msg contains the message to be enqueued.
+ * @msqflg contains operational flags.
+ * Return 0 if permission is granted.
+ * @msg_queue_msgrcv:
+ * Check permission before a message, @msg, is removed from the message
+ * queue, @msq. The @target task structure contains a pointer to the
+ * process that will be receiving the message (not equal to the current
+ * process when inline receives are being performed).
+ * @msq contains the message queue to retrieve message from.
+ * @msg contains the message destination.
+ * @target contains the task structure for recipient process.
+ * @type contains the type of message requested.
+ * @mode contains the operational flags.
+ * Return 0 if permission is granted.
+ *
+ * Security hooks for System V Shared Memory Segments
+ *
+ * @shm_alloc_security:
+ * Allocate and attach a security structure to the shp->shm_perm.security
+ * field. The security field is initialized to NULL when the structure is
+ * first created.
+ * @shp contains the shared memory structure to be modified.
+ * Return 0 if operation was successful and permission is granted.
+ * @shm_free_security:
+ * Deallocate the security struct for this memory segment.
+ * @shp contains the shared memory structure to be modified.
+ * @shm_associate:
+ * Check permission when a shared memory region is requested through the
+ * shmget system call. This hook is only called when returning the shared
+ * memory region identifier for an existing region, not when a new shared
+ * memory region is created.
+ * @shp contains the shared memory structure to be modified.
+ * @shmflg contains the operation control flags.
+ * Return 0 if permission is granted.
+ * @shm_shmctl:
+ * Check permission when a shared memory control operation specified by
+ * @cmd is to be performed on the shared memory region @shp.
+ * The @shp may be NULL, e.g. for IPC_INFO or SHM_INFO.
+ * @shp contains shared memory structure to be modified.
+ * @cmd contains the operation to be performed.
+ * Return 0 if permission is granted.
+ * @shm_shmat:
+ * Check permissions prior to allowing the shmat system call to attach the
+ * shared memory segment @shp to the data segment of the calling process.
+ * The attaching address is specified by @shmaddr.
+ * @shp contains the shared memory structure to be modified.
+ * @shmaddr contains the address to attach memory region to.
+ * @shmflg contains the operational flags.
+ * Return 0 if permission is granted.
+ *
+ * Security hooks for System V Semaphores
+ *
+ * @sem_alloc_security:
+ * Allocate and attach a security structure to the sma->sem_perm.security
+ * field. The security field is initialized to NULL when the structure is
+ * first created.
+ * @sma contains the semaphore structure
+ * Return 0 if operation was successful and permission is granted.
+ * @sem_free_security:
+ * deallocate security struct for this semaphore
+ * @sma contains the semaphore structure.
+ * @sem_associate:
+ * Check permission when a semaphore is requested through the semget
+ * system call. This hook is only called when returning the semaphore
+ * identifier for an existing semaphore, not when a new one must be
+ * created.
+ * @sma contains the semaphore structure.
+ * @semflg contains the operation control flags.
+ * Return 0 if permission is granted.
+ * @sem_semctl:
+ * Check permission when a semaphore operation specified by @cmd is to be
+ * performed on the semaphore @sma. The @sma may be NULL, e.g. for
+ * IPC_INFO or SEM_INFO.
+ * @sma contains the semaphore structure. May be NULL.
+ * @cmd contains the operation to be performed.
+ * Return 0 if permission is granted.
+ * @sem_semop
+ * Check permissions before performing operations on members of the
+ * semaphore set @sma. If the @alter flag is nonzero, the semaphore set
+ * may be modified.
+ * @sma contains the semaphore structure.
+ * @sops contains the operations to perform.
+ * @nsops contains the number of operations to perform.
+ * @alter contains the flag indicating whether changes are to be made.
+ * Return 0 if permission is granted.
+ *
+ * @binder_set_context_mgr
+ * Check whether @mgr is allowed to be the binder context manager.
+ * @mgr contains the task_struct for the task being registered.
+ * Return 0 if permission is granted.
+ * @binder_transaction
+ * Check whether @from is allowed to invoke a binder transaction call
+ * to @to.
+ * @from contains the task_struct for the sending task.
+ * @to contains the task_struct for the receiving task.
+ * @binder_transfer_binder
+ * Check whether @from is allowed to transfer a binder reference to @to.
+ * @from contains the task_struct for the sending task.
+ * @to contains the task_struct for the receiving task.
+ * @binder_transfer_file
+ * Check whether @from is allowed to transfer @file to @to.
+ * @from contains the task_struct for the sending task.
+ * @file contains the struct file being transferred.
+ * @to contains the task_struct for the receiving task.
+ *
+ * @ptrace_access_check:
+ * Check permission before allowing the current process to trace the
+ * @child process.
+ * Security modules may also want to perform a process tracing check
+ * during an execve in the set_security or apply_creds hooks of
+ * tracing check during an execve in the bprm_set_creds hook of
+ * binprm_security_ops if the process is being traced and its security
+ * attributes would be changed by the execve.
+ * @child contains the task_struct structure for the target process.
+ * @mode contains the PTRACE_MODE flags indicating the form of access.
+ * Return 0 if permission is granted.
+ * @ptrace_traceme:
+ * Check that the @parent process has sufficient permission to trace the
+ * current process before allowing the current process to present itself
+ * to the @parent process for tracing.
+ * @parent contains the task_struct structure for debugger process.
+ * Return 0 if permission is granted.
+ * @capget:
+ * Get the @effective, @inheritable, and @permitted capability sets for
+ * the @target process. The hook may also perform permission checking to
+ * determine if the current process is allowed to see the capability sets
+ * of the @target process.
+ * @target contains the task_struct structure for target process.
+ * @effective contains the effective capability set.
+ * @inheritable contains the inheritable capability set.
+ * @permitted contains the permitted capability set.
+ * Return 0 if the capability sets were successfully obtained.
+ * @capset:
+ * Set the @effective, @inheritable, and @permitted capability sets for
+ * the current process.
+ * @new contains the new credentials structure for target process.
+ * @old contains the current credentials structure for target process.
+ * @effective contains the effective capability set.
+ * @inheritable contains the inheritable capability set.
+ * @permitted contains the permitted capability set.
+ * Return 0 and update @new if permission is granted.
+ * @capable:
+ * Check whether the @tsk process has the @cap capability in the indicated
+ * credentials.
+ * @cred contains the credentials to use.
+ * @ns contains the user namespace we want the capability in
+ * @cap contains the capability <include/linux/capability.h>.
+ * @audit: Whether to write an audit message or not
+ * Return 0 if the capability is granted for @tsk.
+ * @syslog:
+ * Check permission before accessing the kernel message ring or changing
+ * logging to the console.
+ * See the syslog(2) manual page for an explanation of the @type values.
+ * @type contains the type of action.
+ * @from_file indicates the context of action (if it came from /proc).
+ * Return 0 if permission is granted.
+ * @settime:
+ * Check permission to change the system time.
+ * struct timespec and timezone are defined in include/linux/time.h
+ * @ts contains new time
+ * @tz contains new timezone
+ * Return 0 if permission is granted.
+ * @vm_enough_memory:
+ * Check permissions for allocating a new virtual mapping.
+ * @mm contains the mm struct it is being added to.
+ * @pages contains the number of pages.
+ * Return 0 if permission is granted.
+ *
+ * @ismaclabel:
+ * Check if the extended attribute specified by @name
+ * represents a MAC label. Returns 1 if name is a MAC
+ * attribute otherwise returns 0.
+ * @name full extended attribute name to check against
+ * LSM as a MAC label.
+ *
+ * @secid_to_secctx:
+ * Convert secid to security context. If secdata is NULL the length of
+ * the result will be returned in seclen, but no secdata will be returned.
+ * This does mean that the length could change between calls to check the
+ * length and the next call which actually allocates and returns the
+ * secdata.
+ * @secid contains the security ID.
+ * @secdata contains the pointer that stores the converted security
+ * context.
+ * @seclen pointer which contains the length of the data
+ * @secctx_to_secid:
+ * Convert security context to secid.
+ * @secid contains the pointer to the generated security ID.
+ * @secdata contains the security context.
+ *
+ * @release_secctx:
+ * Release the security context.
+ * @secdata contains the security context.
+ * @seclen contains the length of the security context.
+ *
+ * Security hooks for Audit
+ *
+ * @audit_rule_init:
+ * Allocate and initialize an LSM audit rule structure.
+ * @field contains the required Audit action.
+ * Fields flags are defined in include/linux/audit.h
+ * @op contains the operator the rule uses.
+ * @rulestr contains the context where the rule will be applied to.
+ * @lsmrule contains a pointer to receive the result.
+ * Return 0 if @lsmrule has been successfully set,
+ * -EINVAL in case of an invalid rule.
+ *
+ * @audit_rule_known:
+ * Specifies whether given @rule contains any fields related to
+ * current LSM.
+ * @rule contains the audit rule of interest.
+ * Return 1 in case of relation found, 0 otherwise.
+ *
+ * @audit_rule_match:
+ * Determine if given @secid matches a rule previously approved
+ * by @audit_rule_known.
+ * @secid contains the security id in question.
+ * @field contains the field which relates to current LSM.
+ * @op contains the operator that will be used for matching.
+ * @rule points to the audit rule that will be checked against.
+ * @actx points to the audit context associated with the check.
+ * Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure.
+ *
+ * @audit_rule_free:
+ * Deallocate the LSM audit rule structure previously allocated by
+ * audit_rule_init.
+ * @rule contains the allocated rule
+ *
+ * @inode_notifysecctx:
+ * Notify the security module of what the security context of an inode
+ * should be. Initializes the incore security context managed by the
+ * security module for this inode. Example usage: NFS client invokes
+ * this hook to initialize the security context in its incore inode to the
+ * value provided by the server for the file when the server returned the
+ * file's attributes to the client.
+ *
+ * Must be called with inode->i_mutex locked.
+ *
+ * @inode we wish to set the security context of.
+ * @ctx contains the string which we wish to set in the inode.
+ * @ctxlen contains the length of @ctx.
+ *
+ * @inode_setsecctx:
+ * Change the security context of an inode. Updates the
+ * incore security context managed by the security module and invokes the
+ * fs code as needed (via __vfs_setxattr_noperm) to update any backing
+ * xattrs that represent the context. Example usage: NFS server invokes
+ * this hook to change the security context in its incore inode and on the
+ * backing filesystem to a value provided by the client on a SETATTR
+ * operation.
+ *
+ * Must be called with inode->i_mutex locked.
+ *
+ * @dentry contains the inode we wish to set the security context of.
+ * @ctx contains the string which we wish to set in the inode.
+ * @ctxlen contains the length of @ctx.
+ *
+ * @inode_getsecctx:
+ * On success, returns 0 and fills out @ctx and @ctxlen with the security
+ * context for the given @inode.
+ *
+ * @inode we wish to get the security context of.
+ * @ctx is a pointer in which to place the allocated security context.
+ * @ctxlen points to the place to put the length of @ctx.
+ * This is the main security structure.
+ */
+
+union security_list_options {
+ int (*binder_set_context_mgr)(struct task_struct *mgr);
+ int (*binder_transaction)(struct task_struct *from,
+ struct task_struct *to);
+ int (*binder_transfer_binder)(struct task_struct *from,
+ struct task_struct *to);
+ int (*binder_transfer_file)(struct task_struct *from,
+ struct task_struct *to,
+ struct file *file);
+
+ int (*ptrace_access_check)(struct task_struct *child,
+ unsigned int mode);
+ int (*ptrace_traceme)(struct task_struct *parent);
+ int (*capget)(struct task_struct *target, kernel_cap_t *effective,
+ kernel_cap_t *inheritable, kernel_cap_t *permitted);
+ int (*capset)(struct cred *new, const struct cred *old,
+ const kernel_cap_t *effective,
+ const kernel_cap_t *inheritable,
+ const kernel_cap_t *permitted);
+ int (*capable)(const struct cred *cred, struct user_namespace *ns,
+ int cap, int audit);
+ int (*quotactl)(int cmds, int type, int id, struct super_block *sb);
+ int (*quota_on)(struct dentry *dentry);
+ int (*syslog)(int type);
+ int (*settime)(const struct timespec *ts, const struct timezone *tz);
+ int (*vm_enough_memory)(struct mm_struct *mm, long pages);
+
+ int (*bprm_set_creds)(struct linux_binprm *bprm);
+ int (*bprm_check_security)(struct linux_binprm *bprm);
+ int (*bprm_secureexec)(struct linux_binprm *bprm);
+ void (*bprm_committing_creds)(struct linux_binprm *bprm);
+ void (*bprm_committed_creds)(struct linux_binprm *bprm);
+
+ int (*sb_alloc_security)(struct super_block *sb);
+ void (*sb_free_security)(struct super_block *sb);
+ int (*sb_copy_data)(char *orig, char *copy);
+ int (*sb_remount)(struct super_block *sb, void *data);
+ int (*sb_kern_mount)(struct super_block *sb, int flags, void *data);
+ int (*sb_show_options)(struct seq_file *m, struct super_block *sb);
+ int (*sb_statfs)(struct dentry *dentry);
+ int (*sb_mount)(const char *dev_name, struct path *path,
+ const char *type, unsigned long flags, void *data);
+ int (*sb_umount)(struct vfsmount *mnt, int flags);
+ int (*sb_pivotroot)(struct path *old_path, struct path *new_path);
+ int (*sb_set_mnt_opts)(struct super_block *sb,
+ struct security_mnt_opts *opts,
+ unsigned long kern_flags,
+ unsigned long *set_kern_flags);
+ int (*sb_clone_mnt_opts)(const struct super_block *oldsb,
+ struct super_block *newsb);
+ int (*sb_parse_opts_str)(char *options, struct security_mnt_opts *opts);
+ int (*dentry_init_security)(struct dentry *dentry, int mode,
+ struct qstr *name, void **ctx,
+ u32 *ctxlen);
+
+
+#ifdef CONFIG_SECURITY_PATH
+ int (*path_unlink)(struct path *dir, struct dentry *dentry);
+ int (*path_mkdir)(struct path *dir, struct dentry *dentry,
+ umode_t mode);
+ int (*path_rmdir)(struct path *dir, struct dentry *dentry);
+ int (*path_mknod)(struct path *dir, struct dentry *dentry,
+ umode_t mode, unsigned int dev);
+ int (*path_truncate)(struct path *path);
+ int (*path_symlink)(struct path *dir, struct dentry *dentry,
+ const char *old_name);
+ int (*path_link)(struct dentry *old_dentry, struct path *new_dir,
+ struct dentry *new_dentry);
+ int (*path_rename)(struct path *old_dir, struct dentry *old_dentry,
+ struct path *new_dir,
+ struct dentry *new_dentry);
+ int (*path_chmod)(struct path *path, umode_t mode);
+ int (*path_chown)(struct path *path, kuid_t uid, kgid_t gid);
+ int (*path_chroot)(struct path *path);
+#endif
+
+ int (*inode_alloc_security)(struct inode *inode);
+ void (*inode_free_security)(struct inode *inode);
+ int (*inode_init_security)(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr,
+ const char **name, void **value,
+ size_t *len);
+ int (*inode_create)(struct inode *dir, struct dentry *dentry,
+ umode_t mode);
+ int (*inode_link)(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *new_dentry);
+ int (*inode_unlink)(struct inode *dir, struct dentry *dentry);
+ int (*inode_symlink)(struct inode *dir, struct dentry *dentry,
+ const char *old_name);
+ int (*inode_mkdir)(struct inode *dir, struct dentry *dentry,
+ umode_t mode);
+ int (*inode_rmdir)(struct inode *dir, struct dentry *dentry);
+ int (*inode_mknod)(struct inode *dir, struct dentry *dentry,
+ umode_t mode, dev_t dev);
+ int (*inode_rename)(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir,
+ struct dentry *new_dentry);
+ int (*inode_readlink)(struct dentry *dentry);
+ int (*inode_follow_link)(struct dentry *dentry, struct inode *inode,
+ bool rcu);
+ int (*inode_permission)(struct inode *inode, int mask);
+ int (*inode_setattr)(struct dentry *dentry, struct iattr *attr);
+ int (*inode_getattr)(const struct path *path);
+ int (*inode_setxattr)(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags);
+ void (*inode_post_setxattr)(struct dentry *dentry, const char *name,
+ const void *value, size_t size,
+ int flags);
+ int (*inode_getxattr)(struct dentry *dentry, const char *name);
+ int (*inode_listxattr)(struct dentry *dentry);
+ int (*inode_removexattr)(struct dentry *dentry, const char *name);
+ int (*inode_need_killpriv)(struct dentry *dentry);
+ int (*inode_killpriv)(struct dentry *dentry);
+ int (*inode_getsecurity)(const struct inode *inode, const char *name,
+ void **buffer, bool alloc);
+ int (*inode_setsecurity)(struct inode *inode, const char *name,
+ const void *value, size_t size,
+ int flags);
+ int (*inode_listsecurity)(struct inode *inode, char *buffer,
+ size_t buffer_size);
+ void (*inode_getsecid)(const struct inode *inode, u32 *secid);
+
+ int (*file_permission)(struct file *file, int mask);
+ int (*file_alloc_security)(struct file *file);
+ void (*file_free_security)(struct file *file);
+ int (*file_ioctl)(struct file *file, unsigned int cmd,
+ unsigned long arg);
+ int (*mmap_addr)(unsigned long addr);
+ int (*mmap_file)(struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags);
+ int (*file_mprotect)(struct vm_area_struct *vma, unsigned long reqprot,
+ unsigned long prot);
+ int (*file_lock)(struct file *file, unsigned int cmd);
+ int (*file_fcntl)(struct file *file, unsigned int cmd,
+ unsigned long arg);
+ void (*file_set_fowner)(struct file *file);
+ int (*file_send_sigiotask)(struct task_struct *tsk,
+ struct fown_struct *fown, int sig);
+ int (*file_receive)(struct file *file);
+ int (*file_open)(struct file *file, const struct cred *cred);
+
+ int (*task_create)(unsigned long clone_flags);
+ void (*task_free)(struct task_struct *task);
+ int (*cred_alloc_blank)(struct cred *cred, gfp_t gfp);
+ void (*cred_free)(struct cred *cred);
+ int (*cred_prepare)(struct cred *new, const struct cred *old,
+ gfp_t gfp);
+ void (*cred_transfer)(struct cred *new, const struct cred *old);
+ int (*kernel_act_as)(struct cred *new, u32 secid);
+ int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
+ int (*kernel_fw_from_file)(struct file *file, char *buf, size_t size);
+ int (*kernel_module_request)(char *kmod_name);
+ int (*kernel_module_from_file)(struct file *file);
+ int (*task_fix_setuid)(struct cred *new, const struct cred *old,
+ int flags);
+ int (*task_setpgid)(struct task_struct *p, pid_t pgid);
+ int (*task_getpgid)(struct task_struct *p);
+ int (*task_getsid)(struct task_struct *p);
+ void (*task_getsecid)(struct task_struct *p, u32 *secid);
+ int (*task_setnice)(struct task_struct *p, int nice);
+ int (*task_setioprio)(struct task_struct *p, int ioprio);
+ int (*task_getioprio)(struct task_struct *p);
+ int (*task_setrlimit)(struct task_struct *p, unsigned int resource,
+ struct rlimit *new_rlim);
+ int (*task_setscheduler)(struct task_struct *p);
+ int (*task_getscheduler)(struct task_struct *p);
+ int (*task_movememory)(struct task_struct *p);
+ int (*task_kill)(struct task_struct *p, struct siginfo *info,
+ int sig, u32 secid);
+ int (*task_wait)(struct task_struct *p);
+ int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5);
+ void (*task_to_inode)(struct task_struct *p, struct inode *inode);
+
+ int (*ipc_permission)(struct kern_ipc_perm *ipcp, short flag);
+ void (*ipc_getsecid)(struct kern_ipc_perm *ipcp, u32 *secid);
+
+ int (*msg_msg_alloc_security)(struct msg_msg *msg);
+ void (*msg_msg_free_security)(struct msg_msg *msg);
+
+ int (*msg_queue_alloc_security)(struct msg_queue *msq);
+ void (*msg_queue_free_security)(struct msg_queue *msq);
+ int (*msg_queue_associate)(struct msg_queue *msq, int msqflg);
+ int (*msg_queue_msgctl)(struct msg_queue *msq, int cmd);
+ int (*msg_queue_msgsnd)(struct msg_queue *msq, struct msg_msg *msg,
+ int msqflg);
+ int (*msg_queue_msgrcv)(struct msg_queue *msq, struct msg_msg *msg,
+ struct task_struct *target, long type,
+ int mode);
+
+ int (*shm_alloc_security)(struct shmid_kernel *shp);
+ void (*shm_free_security)(struct shmid_kernel *shp);
+ int (*shm_associate)(struct shmid_kernel *shp, int shmflg);
+ int (*shm_shmctl)(struct shmid_kernel *shp, int cmd);
+ int (*shm_shmat)(struct shmid_kernel *shp, char __user *shmaddr,
+ int shmflg);
+
+ int (*sem_alloc_security)(struct sem_array *sma);
+ void (*sem_free_security)(struct sem_array *sma);
+ int (*sem_associate)(struct sem_array *sma, int semflg);
+ int (*sem_semctl)(struct sem_array *sma, int cmd);
+ int (*sem_semop)(struct sem_array *sma, struct sembuf *sops,
+ unsigned nsops, int alter);
+
+ int (*netlink_send)(struct sock *sk, struct sk_buff *skb);
+
+ void (*d_instantiate)(struct dentry *dentry, struct inode *inode);
+
+ int (*getprocattr)(struct task_struct *p, char *name, char **value);
+ int (*setprocattr)(struct task_struct *p, char *name, void *value,
+ size_t size);
+ int (*ismaclabel)(const char *name);
+ int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen);
+ int (*secctx_to_secid)(const char *secdata, u32 seclen, u32 *secid);
+ void (*release_secctx)(char *secdata, u32 seclen);
+
+ int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen);
+ int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen);
+ int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen);
+
+#ifdef CONFIG_SECURITY_NETWORK
+ int (*unix_stream_connect)(struct sock *sock, struct sock *other,
+ struct sock *newsk);
+ int (*unix_may_send)(struct socket *sock, struct socket *other);
+
+ int (*socket_create)(int family, int type, int protocol, int kern);
+ int (*socket_post_create)(struct socket *sock, int family, int type,
+ int protocol, int kern);
+ int (*socket_bind)(struct socket *sock, struct sockaddr *address,
+ int addrlen);
+ int (*socket_connect)(struct socket *sock, struct sockaddr *address,
+ int addrlen);
+ int (*socket_listen)(struct socket *sock, int backlog);
+ int (*socket_accept)(struct socket *sock, struct socket *newsock);
+ int (*socket_sendmsg)(struct socket *sock, struct msghdr *msg,
+ int size);
+ int (*socket_recvmsg)(struct socket *sock, struct msghdr *msg,
+ int size, int flags);
+ int (*socket_getsockname)(struct socket *sock);
+ int (*socket_getpeername)(struct socket *sock);
+ int (*socket_getsockopt)(struct socket *sock, int level, int optname);
+ int (*socket_setsockopt)(struct socket *sock, int level, int optname);
+ int (*socket_shutdown)(struct socket *sock, int how);
+ int (*socket_sock_rcv_skb)(struct sock *sk, struct sk_buff *skb);
+ int (*socket_getpeersec_stream)(struct socket *sock,
+ char __user *optval,
+ int __user *optlen, unsigned len);
+ int (*socket_getpeersec_dgram)(struct socket *sock,
+ struct sk_buff *skb, u32 *secid);
+ int (*sk_alloc_security)(struct sock *sk, int family, gfp_t priority);
+ void (*sk_free_security)(struct sock *sk);
+ void (*sk_clone_security)(const struct sock *sk, struct sock *newsk);
+ void (*sk_getsecid)(struct sock *sk, u32 *secid);
+ void (*sock_graft)(struct sock *sk, struct socket *parent);
+ int (*inet_conn_request)(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req);
+ void (*inet_csk_clone)(struct sock *newsk,
+ const struct request_sock *req);
+ void (*inet_conn_established)(struct sock *sk, struct sk_buff *skb);
+ int (*secmark_relabel_packet)(u32 secid);
+ void (*secmark_refcount_inc)(void);
+ void (*secmark_refcount_dec)(void);
+ void (*req_classify_flow)(const struct request_sock *req,
+ struct flowi *fl);
+ int (*tun_dev_alloc_security)(void **security);
+ void (*tun_dev_free_security)(void *security);
+ int (*tun_dev_create)(void);
+ int (*tun_dev_attach_queue)(void *security);
+ int (*tun_dev_attach)(struct sock *sk, void *security);
+ int (*tun_dev_open)(void *security);
+#endif /* CONFIG_SECURITY_NETWORK */
+
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+ int (*xfrm_policy_alloc_security)(struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *sec_ctx,
+ gfp_t gfp);
+ int (*xfrm_policy_clone_security)(struct xfrm_sec_ctx *old_ctx,
+ struct xfrm_sec_ctx **new_ctx);
+ void (*xfrm_policy_free_security)(struct xfrm_sec_ctx *ctx);
+ int (*xfrm_policy_delete_security)(struct xfrm_sec_ctx *ctx);
+ int (*xfrm_state_alloc)(struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *sec_ctx);
+ int (*xfrm_state_alloc_acquire)(struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec,
+ u32 secid);
+ void (*xfrm_state_free_security)(struct xfrm_state *x);
+ int (*xfrm_state_delete_security)(struct xfrm_state *x);
+ int (*xfrm_policy_lookup)(struct xfrm_sec_ctx *ctx, u32 fl_secid,
+ u8 dir);
+ int (*xfrm_state_pol_flow_match)(struct xfrm_state *x,
+ struct xfrm_policy *xp,
+ const struct flowi *fl);
+ int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall);
+#endif /* CONFIG_SECURITY_NETWORK_XFRM */
+
+ /* key management security hooks */
+#ifdef CONFIG_KEYS
+ int (*key_alloc)(struct key *key, const struct cred *cred,
+ unsigned long flags);
+ void (*key_free)(struct key *key);
+ int (*key_permission)(key_ref_t key_ref, const struct cred *cred,
+ unsigned perm);
+ int (*key_getsecurity)(struct key *key, char **_buffer);
+#endif /* CONFIG_KEYS */
+
+#ifdef CONFIG_AUDIT
+ int (*audit_rule_init)(u32 field, u32 op, char *rulestr,
+ void **lsmrule);
+ int (*audit_rule_known)(struct audit_krule *krule);
+ int (*audit_rule_match)(u32 secid, u32 field, u32 op, void *lsmrule,
+ struct audit_context *actx);
+ void (*audit_rule_free)(void *lsmrule);
+#endif /* CONFIG_AUDIT */
+};
+
+struct security_hook_heads {
+ struct list_head binder_set_context_mgr;
+ struct list_head binder_transaction;
+ struct list_head binder_transfer_binder;
+ struct list_head binder_transfer_file;
+ struct list_head ptrace_access_check;
+ struct list_head ptrace_traceme;
+ struct list_head capget;
+ struct list_head capset;
+ struct list_head capable;
+ struct list_head quotactl;
+ struct list_head quota_on;
+ struct list_head syslog;
+ struct list_head settime;
+ struct list_head vm_enough_memory;
+ struct list_head bprm_set_creds;
+ struct list_head bprm_check_security;
+ struct list_head bprm_secureexec;
+ struct list_head bprm_committing_creds;
+ struct list_head bprm_committed_creds;
+ struct list_head sb_alloc_security;
+ struct list_head sb_free_security;
+ struct list_head sb_copy_data;
+ struct list_head sb_remount;
+ struct list_head sb_kern_mount;
+ struct list_head sb_show_options;
+ struct list_head sb_statfs;
+ struct list_head sb_mount;
+ struct list_head sb_umount;
+ struct list_head sb_pivotroot;
+ struct list_head sb_set_mnt_opts;
+ struct list_head sb_clone_mnt_opts;
+ struct list_head sb_parse_opts_str;
+ struct list_head dentry_init_security;
+#ifdef CONFIG_SECURITY_PATH
+ struct list_head path_unlink;
+ struct list_head path_mkdir;
+ struct list_head path_rmdir;
+ struct list_head path_mknod;
+ struct list_head path_truncate;
+ struct list_head path_symlink;
+ struct list_head path_link;
+ struct list_head path_rename;
+ struct list_head path_chmod;
+ struct list_head path_chown;
+ struct list_head path_chroot;
+#endif
+ struct list_head inode_alloc_security;
+ struct list_head inode_free_security;
+ struct list_head inode_init_security;
+ struct list_head inode_create;
+ struct list_head inode_link;
+ struct list_head inode_unlink;
+ struct list_head inode_symlink;
+ struct list_head inode_mkdir;
+ struct list_head inode_rmdir;
+ struct list_head inode_mknod;
+ struct list_head inode_rename;
+ struct list_head inode_readlink;
+ struct list_head inode_follow_link;
+ struct list_head inode_permission;
+ struct list_head inode_setattr;
+ struct list_head inode_getattr;
+ struct list_head inode_setxattr;
+ struct list_head inode_post_setxattr;
+ struct list_head inode_getxattr;
+ struct list_head inode_listxattr;
+ struct list_head inode_removexattr;
+ struct list_head inode_need_killpriv;
+ struct list_head inode_killpriv;
+ struct list_head inode_getsecurity;
+ struct list_head inode_setsecurity;
+ struct list_head inode_listsecurity;
+ struct list_head inode_getsecid;
+ struct list_head file_permission;
+ struct list_head file_alloc_security;
+ struct list_head file_free_security;
+ struct list_head file_ioctl;
+ struct list_head mmap_addr;
+ struct list_head mmap_file;
+ struct list_head file_mprotect;
+ struct list_head file_lock;
+ struct list_head file_fcntl;
+ struct list_head file_set_fowner;
+ struct list_head file_send_sigiotask;
+ struct list_head file_receive;
+ struct list_head file_open;
+ struct list_head task_create;
+ struct list_head task_free;
+ struct list_head cred_alloc_blank;
+ struct list_head cred_free;
+ struct list_head cred_prepare;
+ struct list_head cred_transfer;
+ struct list_head kernel_act_as;
+ struct list_head kernel_create_files_as;
+ struct list_head kernel_fw_from_file;
+ struct list_head kernel_module_request;
+ struct list_head kernel_module_from_file;
+ struct list_head task_fix_setuid;
+ struct list_head task_setpgid;
+ struct list_head task_getpgid;
+ struct list_head task_getsid;
+ struct list_head task_getsecid;
+ struct list_head task_setnice;
+ struct list_head task_setioprio;
+ struct list_head task_getioprio;
+ struct list_head task_setrlimit;
+ struct list_head task_setscheduler;
+ struct list_head task_getscheduler;
+ struct list_head task_movememory;
+ struct list_head task_kill;
+ struct list_head task_wait;
+ struct list_head task_prctl;
+ struct list_head task_to_inode;
+ struct list_head ipc_permission;
+ struct list_head ipc_getsecid;
+ struct list_head msg_msg_alloc_security;
+ struct list_head msg_msg_free_security;
+ struct list_head msg_queue_alloc_security;
+ struct list_head msg_queue_free_security;
+ struct list_head msg_queue_associate;
+ struct list_head msg_queue_msgctl;
+ struct list_head msg_queue_msgsnd;
+ struct list_head msg_queue_msgrcv;
+ struct list_head shm_alloc_security;
+ struct list_head shm_free_security;
+ struct list_head shm_associate;
+ struct list_head shm_shmctl;
+ struct list_head shm_shmat;
+ struct list_head sem_alloc_security;
+ struct list_head sem_free_security;
+ struct list_head sem_associate;
+ struct list_head sem_semctl;
+ struct list_head sem_semop;
+ struct list_head netlink_send;
+ struct list_head d_instantiate;
+ struct list_head getprocattr;
+ struct list_head setprocattr;
+ struct list_head ismaclabel;
+ struct list_head secid_to_secctx;
+ struct list_head secctx_to_secid;
+ struct list_head release_secctx;
+ struct list_head inode_notifysecctx;
+ struct list_head inode_setsecctx;
+ struct list_head inode_getsecctx;
+#ifdef CONFIG_SECURITY_NETWORK
+ struct list_head unix_stream_connect;
+ struct list_head unix_may_send;
+ struct list_head socket_create;
+ struct list_head socket_post_create;
+ struct list_head socket_bind;
+ struct list_head socket_connect;
+ struct list_head socket_listen;
+ struct list_head socket_accept;
+ struct list_head socket_sendmsg;
+ struct list_head socket_recvmsg;
+ struct list_head socket_getsockname;
+ struct list_head socket_getpeername;
+ struct list_head socket_getsockopt;
+ struct list_head socket_setsockopt;
+ struct list_head socket_shutdown;
+ struct list_head socket_sock_rcv_skb;
+ struct list_head socket_getpeersec_stream;
+ struct list_head socket_getpeersec_dgram;
+ struct list_head sk_alloc_security;
+ struct list_head sk_free_security;
+ struct list_head sk_clone_security;
+ struct list_head sk_getsecid;
+ struct list_head sock_graft;
+ struct list_head inet_conn_request;
+ struct list_head inet_csk_clone;
+ struct list_head inet_conn_established;
+ struct list_head secmark_relabel_packet;
+ struct list_head secmark_refcount_inc;
+ struct list_head secmark_refcount_dec;
+ struct list_head req_classify_flow;
+ struct list_head tun_dev_alloc_security;
+ struct list_head tun_dev_free_security;
+ struct list_head tun_dev_create;
+ struct list_head tun_dev_attach_queue;
+ struct list_head tun_dev_attach;
+ struct list_head tun_dev_open;
+ struct list_head skb_owned_by;
+#endif /* CONFIG_SECURITY_NETWORK */
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+ struct list_head xfrm_policy_alloc_security;
+ struct list_head xfrm_policy_clone_security;
+ struct list_head xfrm_policy_free_security;
+ struct list_head xfrm_policy_delete_security;
+ struct list_head xfrm_state_alloc;
+ struct list_head xfrm_state_alloc_acquire;
+ struct list_head xfrm_state_free_security;
+ struct list_head xfrm_state_delete_security;
+ struct list_head xfrm_policy_lookup;
+ struct list_head xfrm_state_pol_flow_match;
+ struct list_head xfrm_decode_session;
+#endif /* CONFIG_SECURITY_NETWORK_XFRM */
+#ifdef CONFIG_KEYS
+ struct list_head key_alloc;
+ struct list_head key_free;
+ struct list_head key_permission;
+ struct list_head key_getsecurity;
+#endif /* CONFIG_KEYS */
+#ifdef CONFIG_AUDIT
+ struct list_head audit_rule_init;
+ struct list_head audit_rule_known;
+ struct list_head audit_rule_match;
+ struct list_head audit_rule_free;
+#endif /* CONFIG_AUDIT */
+};
+
+/*
+ * Security module hook list structure.
+ * For use with generic list macros for common operations.
+ */
+struct security_hook_list {
+ struct list_head list;
+ struct list_head *head;
+ union security_list_options hook;
+};
+
+/*
+ * Initializing a security_hook_list structure takes
+ * up a lot of space in a source file. This macro takes
+ * care of the common case and reduces the amount of
+ * text involved.
+ */
+#define LSM_HOOK_INIT(HEAD, HOOK) \
+ { .head = &security_hook_heads.HEAD, .hook = { .HEAD = HOOK } }
+
+extern struct security_hook_heads security_hook_heads;
+
+static inline void security_add_hooks(struct security_hook_list *hooks,
+ int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ list_add_tail_rcu(&hooks[i].list, hooks[i].head);
+}
+
+#ifdef CONFIG_SECURITY_SELINUX_DISABLE
+/*
+ * Assuring the safety of deleting a security module is up to
+ * the security module involved. This may entail ordering the
+ * module's hook list in a particular way, refusing to disable
+ * the module once a policy is loaded or any number of other
+ * actions better imagined than described.
+ *
+ * The name of the configuration option reflects the only module
+ * that currently uses the mechanism. Any developer who thinks
+ * disabling their module is a good idea needs to be at least as
+ * careful as the SELinux team.
+ */
+static inline void security_delete_hooks(struct security_hook_list *hooks,
+ int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ list_del_rcu(&hooks[i].list);
+}
+#endif /* CONFIG_SECURITY_SELINUX_DISABLE */
+
+extern int __init security_module_enable(const char *module);
+extern void __init capability_add_hooks(void);
+#ifdef CONFIG_SECURITY_YAMA_STACKED
+void __init yama_add_hooks(void);
+#endif
+
+#endif /* ! __LINUX_LSM_HOOKS_H */
diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h
index 1726ccb..4434871 100644
--- a/include/linux/mailbox_client.h
+++ b/include/linux/mailbox_client.h
@@ -40,6 +40,8 @@ struct mbox_client {
void (*tx_done)(struct mbox_client *cl, void *mssg, int r);
};
+struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
+ const char *name);
struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index);
int mbox_send_message(struct mbox_chan *chan, void *mssg);
void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
index d4cf96f..68c4245 100644
--- a/include/linux/mailbox_controller.h
+++ b/include/linux/mailbox_controller.h
@@ -72,7 +72,7 @@ struct mbox_chan_ops {
*/
struct mbox_controller {
struct device *dev;
- struct mbox_chan_ops *ops;
+ const struct mbox_chan_ops *ops;
struct mbox_chan *chans;
int num_chans;
bool txdone_irq;
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index 611b69f..1f7bc63 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -54,11 +54,16 @@ struct mbus_dram_target_info
*/
#ifdef CONFIG_PLAT_ORION
extern const struct mbus_dram_target_info *mv_mbus_dram_info(void);
+extern const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void);
#else
static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void)
{
return NULL;
}
+static inline const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void)
+{
+ return NULL;
+}
#endif
int mvebu_mbus_save_cpu_target(u32 *store_addr);
diff --git a/include/linux/mdio-gpio.h b/include/linux/mdio-gpio.h
index 66c30a7..11f00cd 100644
--- a/include/linux/mdio-gpio.h
+++ b/include/linux/mdio-gpio.h
@@ -23,7 +23,8 @@ struct mdio_gpio_platform_data {
bool mdio_active_low;
bool mdo_active_low;
- unsigned int phy_mask;
+ u32 phy_mask;
+ u32 phy_ignore_ta_mask;
int irqs[PHY_MAX_ADDR];
/* reset callback */
int (*reset)(struct mii_bus *bus);
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index 164aad1..a16b1f9 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -7,6 +7,42 @@
struct mei_cl_device;
+typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
+ u32 events, void *context);
+
+/**
+ * struct mei_cl_device - MEI device handle
+ * An mei_cl_device pointer is returned from mei_add_device()
+ * and links MEI bus clients to their actual ME host client pointer.
+ * Drivers for MEI devices will get an mei_cl_device pointer
+ * when being probed and shall use it for doing ME bus I/O.
+ *
+ * @dev: linux driver model device pointer
+ * @me_cl: me client
+ * @cl: mei client
+ * @name: device name
+ * @event_work: async work to execute event callback
+ * @event_cb: Drivers register this callback to get asynchronous ME
+ * events (e.g. Rx buffer pending) notifications.
+ * @event_context: event callback run context
+ * @events: Events bitmask sent to the driver.
+ * @priv_data: client private data
+ */
+struct mei_cl_device {
+ struct device dev;
+
+ struct mei_me_client *me_cl;
+ struct mei_cl *cl;
+ char name[MEI_CL_NAME_SIZE];
+
+ struct work_struct event_work;
+ mei_cl_event_cb_t event_cb;
+ void *event_context;
+ unsigned long events;
+
+ void *priv_data;
+};
+
struct mei_cl_driver {
struct device_driver driver;
const char *name;
@@ -25,11 +61,9 @@ int __mei_cl_driver_register(struct mei_cl_driver *driver,
void mei_cl_driver_unregister(struct mei_cl_driver *driver);
-int mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length);
-int mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length);
+ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length);
+ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length);
-typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
- u32 events, void *context);
int mei_cl_register_event_cb(struct mei_cl_device *device,
mei_cl_event_cb_t read_cb, void *context);
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index e8cc453..cc4b019 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -21,7 +21,11 @@
#define INIT_PHYSMEM_REGIONS 4
/* Definition of memblock flags. */
-#define MEMBLOCK_HOTPLUG 0x1 /* hotpluggable region */
+enum {
+ MEMBLOCK_NONE = 0x0, /* No special request */
+ MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
+ MEMBLOCK_MIRROR = 0x2, /* mirrored region */
+};
struct memblock_region {
phys_addr_t base;
@@ -61,7 +65,7 @@ extern bool movable_node_enabled;
phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
phys_addr_t start, phys_addr_t end,
- int nid);
+ int nid, ulong flags);
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
phys_addr_t size, phys_addr_t align);
phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
@@ -75,6 +79,8 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size);
void memblock_trim_memory(phys_addr_t align);
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
+int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
+ulong choose_memblock_flags(void);
/* Low level functions */
int memblock_add_range(struct memblock_type *type,
@@ -85,14 +91,19 @@ int memblock_remove_range(struct memblock_type *type,
phys_addr_t base,
phys_addr_t size);
-void __next_mem_range(u64 *idx, int nid, struct memblock_type *type_a,
+void __next_mem_range(u64 *idx, int nid, ulong flags,
+ struct memblock_type *type_a,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid);
-void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a,
+void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
+ struct memblock_type *type_a,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid);
+void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
+ phys_addr_t *out_end);
+
/**
* for_each_mem_range - iterate through memblock areas from type_a and not
* included in type_b. Or just type_a if type_b is NULL.
@@ -100,16 +111,17 @@ void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a,
* @type_a: ptr to memblock_type to iterate
* @type_b: ptr to memblock_type which excludes from the iteration
* @nid: node selector, %NUMA_NO_NODE for all nodes
+ * @flags: pick from blocks based on memory attributes
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
*/
-#define for_each_mem_range(i, type_a, type_b, nid, \
+#define for_each_mem_range(i, type_a, type_b, nid, flags, \
p_start, p_end, p_nid) \
- for (i = 0, __next_mem_range(&i, nid, type_a, type_b, \
+ for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid); \
i != (u64)ULLONG_MAX; \
- __next_mem_range(&i, nid, type_a, type_b, \
+ __next_mem_range(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid))
/**
@@ -119,19 +131,35 @@ void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a,
* @type_a: ptr to memblock_type to iterate
* @type_b: ptr to memblock_type which excludes from the iteration
* @nid: node selector, %NUMA_NO_NODE for all nodes
+ * @flags: pick from blocks based on memory attributes
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
*/
-#define for_each_mem_range_rev(i, type_a, type_b, nid, \
+#define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
p_start, p_end, p_nid) \
for (i = (u64)ULLONG_MAX, \
- __next_mem_range_rev(&i, nid, type_a, type_b, \
+ __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
p_start, p_end, p_nid); \
i != (u64)ULLONG_MAX; \
- __next_mem_range_rev(&i, nid, type_a, type_b, \
+ __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid))
+/**
+ * for_each_reserved_mem_region - iterate over all reserved memblock areas
+ * @i: u64 used as loop variable
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ *
+ * Walks over reserved areas of memblock. Available as soon as memblock
+ * is initialized.
+ */
+#define for_each_reserved_mem_region(i, p_start, p_end) \
+ for (i = 0UL, \
+ __next_reserved_mem_region(&i, p_start, p_end); \
+ i != (u64)ULLONG_MAX; \
+ __next_reserved_mem_region(&i, p_start, p_end))
+
#ifdef CONFIG_MOVABLE_NODE
static inline bool memblock_is_hotpluggable(struct memblock_region *m)
{
@@ -153,6 +181,11 @@ static inline bool movable_node_is_enabled(void)
}
#endif
+static inline bool memblock_is_mirror(struct memblock_region *m)
+{
+ return m->flags & MEMBLOCK_MIRROR;
+}
+
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
unsigned long *end_pfn);
@@ -181,13 +214,14 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
+ * @flags: pick from blocks based on memory attributes
*
* Walks over free (memory && !reserved) areas of memblock. Available as
* soon as memblock is initialized.
*/
-#define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \
+#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
- nid, p_start, p_end, p_nid)
+ nid, flags, p_start, p_end, p_nid)
/**
* for_each_free_mem_range_reverse - rev-iterate through free memblock areas
@@ -196,13 +230,15 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
+ * @flags: pick from blocks based on memory attributes
*
* Walks over free (memory && !reserved) areas of memblock in reverse
* order. Available as soon as memblock is initialized.
*/
-#define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \
+#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
+ p_nid) \
for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
- nid, p_start, p_end, p_nid)
+ nid, flags, p_start, p_end, p_nid)
static inline void memblock_set_region_flags(struct memblock_region *r,
unsigned long flags)
@@ -273,7 +309,8 @@ static inline bool memblock_bottom_up(void) { return false; }
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
- phys_addr_t start, phys_addr_t end);
+ phys_addr_t start, phys_addr_t end,
+ ulong flags);
phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
phys_addr_t max_addr);
phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
@@ -365,6 +402,14 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
#define __initdata_memblock
#endif
+#ifdef CONFIG_MEMTEST
+extern void early_memtest(phys_addr_t start, phys_addr_t end);
+#else
+static inline void early_memtest(phys_addr_t start, phys_addr_t end)
+{
+}
+#endif
+
#else
static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
{
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 7c95af8..73b02b0 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -41,6 +41,7 @@ enum mem_cgroup_stat_index {
MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
+ MEM_CGROUP_STAT_DIRTY, /* # of dirty pages in page cache */
MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */
MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
MEM_CGROUP_STAT_NSTATS,
@@ -52,7 +53,29 @@ struct mem_cgroup_reclaim_cookie {
unsigned int generation;
};
+enum mem_cgroup_events_index {
+ MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
+ MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
+ MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
+ MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
+ MEM_CGROUP_EVENTS_NSTATS,
+ /* default hierarchy events */
+ MEMCG_LOW = MEM_CGROUP_EVENTS_NSTATS,
+ MEMCG_HIGH,
+ MEMCG_MAX,
+ MEMCG_OOM,
+ MEMCG_NR_EVENTS,
+};
+
#ifdef CONFIG_MEMCG
+extern struct cgroup_subsys_state *mem_cgroup_root_css;
+
+void mem_cgroup_events(struct mem_cgroup *memcg,
+ enum mem_cgroup_events_index idx,
+ unsigned int nr);
+
+bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
+
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **memcgp);
void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
@@ -92,6 +115,7 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
}
extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
+extern struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
struct mem_cgroup *,
@@ -102,6 +126,7 @@ void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
* For memory reclaim.
*/
int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
+bool mem_cgroup_lruvec_online(struct lruvec *lruvec);
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
@@ -138,12 +163,10 @@ static inline bool mem_cgroup_disabled(void)
return false;
}
-struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked,
- unsigned long *flags);
-void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked,
- unsigned long *flags);
+struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page);
void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
enum mem_cgroup_stat_index idx, int val);
+void mem_cgroup_end_page_stat(struct mem_cgroup *memcg);
static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
enum mem_cgroup_stat_index idx)
@@ -176,6 +199,20 @@ void mem_cgroup_split_huge_fixup(struct page *head);
#else /* CONFIG_MEMCG */
struct mem_cgroup;
+#define mem_cgroup_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
+
+static inline void mem_cgroup_events(struct mem_cgroup *memcg,
+ enum mem_cgroup_events_index idx,
+ unsigned int nr)
+{
+}
+
+static inline bool mem_cgroup_low(struct mem_cgroup *root,
+ struct mem_cgroup *memcg)
+{
+ return false;
+}
+
static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask,
struct mem_cgroup **memcgp)
@@ -268,6 +305,11 @@ mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
return 1;
}
+static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
+{
+ return true;
+}
+
static inline unsigned long
mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{
@@ -285,14 +327,12 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
}
-static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
- bool *locked, unsigned long *flags)
+static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page)
{
return NULL;
}
-static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg,
- bool *locked, unsigned long *flags)
+static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
{
}
@@ -348,6 +388,29 @@ enum {
OVER_LIMIT,
};
+#ifdef CONFIG_CGROUP_WRITEBACK
+
+struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
+struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
+void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail,
+ unsigned long *pdirty, unsigned long *pwriteback);
+
+#else /* CONFIG_CGROUP_WRITEBACK */
+
+static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
+{
+ return NULL;
+}
+
+static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
+ unsigned long *pavail,
+ unsigned long *pdirty,
+ unsigned long *pwriteback)
+{
+}
+
+#endif /* CONFIG_CGROUP_WRITEBACK */
+
struct sock;
#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
void sock_update_memcg(struct sock *sk);
@@ -364,7 +427,9 @@ static inline void sock_release_memcg(struct sock *sk)
#ifdef CONFIG_MEMCG_KMEM
extern struct static_key memcg_kmem_enabled_key;
-extern int memcg_limited_groups_array_size;
+extern int memcg_nr_cache_ids;
+extern void memcg_get_cache_ids(void);
+extern void memcg_put_cache_ids(void);
/*
* Helper macro to loop through all memcg-specific caches. Callers must still
@@ -372,13 +437,15 @@ extern int memcg_limited_groups_array_size;
* the slab_mutex must be held when looping through those caches
*/
#define for_each_memcg_cache_index(_idx) \
- for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++)
+ for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
static inline bool memcg_kmem_enabled(void)
{
return static_key_false(&memcg_kmem_enabled_key);
}
+bool memcg_kmem_is_active(struct mem_cgroup *memcg);
+
/*
* In general, we'll do everything in our power to not incur in any overhead
* for non-memcg users for the kmem functions. Not even a function call, if we
@@ -398,15 +465,14 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order);
int memcg_cache_id(struct mem_cgroup *memcg);
-void memcg_update_array_size(int num_groups);
-
struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
void __memcg_kmem_put_cache(struct kmem_cache *cachep);
-int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order);
-void __memcg_uncharge_slab(struct kmem_cache *cachep, int order);
+struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr);
-int __memcg_cleanup_cache_params(struct kmem_cache *s);
+int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
+ unsigned long nr_pages);
+void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages);
/**
* memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
@@ -426,6 +492,8 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
if (!memcg_kmem_enabled())
return true;
+ if (gfp & __GFP_NOACCOUNT)
+ return true;
/*
* __GFP_NOFAIL allocations will move on even if charging is not
* possible. Therefore we don't even try, and have this allocation
@@ -485,6 +553,8 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
{
if (!memcg_kmem_enabled())
return cachep;
+ if (gfp & __GFP_NOACCOUNT)
+ return cachep;
if (gfp & __GFP_NOFAIL)
return cachep;
if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
@@ -500,6 +570,13 @@ static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
if (memcg_kmem_enabled())
__memcg_kmem_put_cache(cachep);
}
+
+static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
+{
+ if (!memcg_kmem_enabled())
+ return NULL;
+ return __mem_cgroup_from_kmem(ptr);
+}
#else
#define for_each_memcg_cache_index(_idx) \
for (; NULL; )
@@ -509,6 +586,11 @@ static inline bool memcg_kmem_enabled(void)
return false;
}
+static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
+{
+ return false;
+}
+
static inline bool
memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
{
@@ -529,6 +611,14 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
return -1;
}
+static inline void memcg_get_cache_ids(void)
+{
+}
+
+static inline void memcg_put_cache_ids(void)
+{
+}
+
static inline struct kmem_cache *
memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
{
@@ -538,6 +628,11 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
{
}
+
+static inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
+{
+ return NULL;
+}
#endif /* CONFIG_MEMCG_KMEM */
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 8f1a419..6ffa0ac 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -192,6 +192,9 @@ extern void get_page_bootmem(unsigned long ingo, struct page *page,
void get_online_mems(void);
void put_online_mems(void);
+void mem_hotplug_begin(void);
+void mem_hotplug_done(void);
+
#else /* ! CONFIG_MEMORY_HOTPLUG */
/*
* Stub functions for when hotplug is off
@@ -231,6 +234,9 @@ static inline int try_online_node(int nid)
static inline void get_online_mems(void) {}
static inline void put_online_mems(void) {}
+static inline void mem_hotplug_begin(void) {}
+static inline void mem_hotplug_done(void) {}
+
#endif /* ! CONFIG_MEMORY_HOTPLUG */
#ifdef CONFIG_MEMORY_HOTREMOVE
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index 39ed62a..69b6951 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -29,14 +29,15 @@ extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data,
gfp_t gfp_mask, int nid);
-extern int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask);
+extern int mempool_resize(mempool_t *pool, int new_min_nr);
extern void mempool_destroy(mempool_t *pool);
extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask);
extern void mempool_free(void *element, mempool_t *pool);
/*
* A mempool_alloc_t and mempool_free_t that get the memory from
- * a slab that is passed in through pool_data.
+ * a slab cache that is passed in through pool_data.
+ * Note: the slab cache may not have a ctor function.
*/
void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
void mempool_free_slab(void *element, void *pool_data);
diff --git a/include/linux/mfd/abx500/ab8500-bm.h b/include/linux/mfd/abx500/ab8500-bm.h
index cc892a8..12a5b39 100644
--- a/include/linux/mfd/abx500/ab8500-bm.h
+++ b/include/linux/mfd/abx500/ab8500-bm.h
@@ -461,7 +461,6 @@ struct ab8500_fg;
#ifdef CONFIG_AB8500_BM
extern struct abx500_bm_data ab8500_bm_data;
-void ab8500_fg_reinit(void);
void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA);
struct ab8500_btemp *ab8500_btemp_get(void);
int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp);
diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
index 234c991..67703f2 100644
--- a/include/linux/mfd/abx500/ux500_chargalg.h
+++ b/include/linux/mfd/abx500/ux500_chargalg.h
@@ -9,8 +9,13 @@
#include <linux/power_supply.h>
-#define psy_to_ux500_charger(x) container_of((x), \
- struct ux500_charger, psy)
+/*
+ * Valid only for supplies of type:
+ * - POWER_SUPPLY_TYPE_MAINS,
+ * - POWER_SUPPLY_TYPE_USB,
+ * because only them store as drv_data pointer to struct ux500_charger.
+ */
+#define psy_to_ux500_charger(x) power_supply_get_drvdata(psy)
/* Forward declaration */
struct ux500_charger;
@@ -35,7 +40,7 @@ struct ux500_charger_ops {
* @power_path USB power path support
*/
struct ux500_charger {
- struct power_supply psy;
+ struct power_supply *psy;
struct ux500_charger_ops ops;
int max_out_volt;
int max_out_curr;
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index 910e3aa..2f434f4 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -24,6 +24,7 @@ enum arizona_type {
WM5102 = 1,
WM5110 = 2,
WM8997 = 3,
+ WM8280 = 4,
};
#define ARIZONA_IRQ_GP1 0
@@ -116,6 +117,7 @@ struct arizona {
int num_core_supplies;
struct regulator_bulk_data core_supplies[ARIZONA_MAX_CORE_SUPPLIES];
struct regulator *dcvdd;
+ bool has_fully_powered_off;
struct arizona_pdata pdata;
@@ -126,7 +128,7 @@ struct arizona {
struct regmap_irq_chip_data *aod_irq_chip;
struct regmap_irq_chip_data *irq_chip;
- bool hpdet_magic;
+ bool hpdet_clamp;
unsigned int hp_ena;
struct mutex clk_lock;
@@ -152,7 +154,15 @@ int arizona_request_irq(struct arizona *arizona, int irq, char *name,
void arizona_free_irq(struct arizona *arizona, int irq, void *data);
int arizona_set_irq_wake(struct arizona *arizona, int irq, int on);
+#ifdef CONFIG_MFD_WM5102
int wm5102_patch(struct arizona *arizona);
+#else
+static inline int wm5102_patch(struct arizona *arizona)
+{
+ return 0;
+}
+#endif
+
int wm5110_patch(struct arizona *arizona);
int wm8997_patch(struct arizona *arizona);
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index 4578c72..43db4fa 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -11,31 +11,26 @@
#ifndef _ARIZONA_PDATA_H
#define _ARIZONA_PDATA_H
-#define ARIZONA_GPN_DIR 0x8000 /* GPN_DIR */
+#include <dt-bindings/mfd/arizona.h>
+
#define ARIZONA_GPN_DIR_MASK 0x8000 /* GPN_DIR */
#define ARIZONA_GPN_DIR_SHIFT 15 /* GPN_DIR */
#define ARIZONA_GPN_DIR_WIDTH 1 /* GPN_DIR */
-#define ARIZONA_GPN_PU 0x4000 /* GPN_PU */
#define ARIZONA_GPN_PU_MASK 0x4000 /* GPN_PU */
#define ARIZONA_GPN_PU_SHIFT 14 /* GPN_PU */
#define ARIZONA_GPN_PU_WIDTH 1 /* GPN_PU */
-#define ARIZONA_GPN_PD 0x2000 /* GPN_PD */
#define ARIZONA_GPN_PD_MASK 0x2000 /* GPN_PD */
#define ARIZONA_GPN_PD_SHIFT 13 /* GPN_PD */
#define ARIZONA_GPN_PD_WIDTH 1 /* GPN_PD */
-#define ARIZONA_GPN_LVL 0x0800 /* GPN_LVL */
#define ARIZONA_GPN_LVL_MASK 0x0800 /* GPN_LVL */
#define ARIZONA_GPN_LVL_SHIFT 11 /* GPN_LVL */
#define ARIZONA_GPN_LVL_WIDTH 1 /* GPN_LVL */
-#define ARIZONA_GPN_POL 0x0400 /* GPN_POL */
#define ARIZONA_GPN_POL_MASK 0x0400 /* GPN_POL */
#define ARIZONA_GPN_POL_SHIFT 10 /* GPN_POL */
#define ARIZONA_GPN_POL_WIDTH 1 /* GPN_POL */
-#define ARIZONA_GPN_OP_CFG 0x0200 /* GPN_OP_CFG */
#define ARIZONA_GPN_OP_CFG_MASK 0x0200 /* GPN_OP_CFG */
#define ARIZONA_GPN_OP_CFG_SHIFT 9 /* GPN_OP_CFG */
#define ARIZONA_GPN_OP_CFG_WIDTH 1 /* GPN_OP_CFG */
-#define ARIZONA_GPN_DB 0x0100 /* GPN_DB */
#define ARIZONA_GPN_DB_MASK 0x0100 /* GPN_DB */
#define ARIZONA_GPN_DB_SHIFT 8 /* GPN_DB */
#define ARIZONA_GPN_DB_WIDTH 1 /* GPN_DB */
@@ -45,23 +40,10 @@
#define ARIZONA_MAX_GPIO 5
-#define ARIZONA_32KZ_MCLK1 1
-#define ARIZONA_32KZ_MCLK2 2
-#define ARIZONA_32KZ_NONE 3
-
#define ARIZONA_MAX_INPUT 4
-#define ARIZONA_DMIC_MICVDD 0
-#define ARIZONA_DMIC_MICBIAS1 1
-#define ARIZONA_DMIC_MICBIAS2 2
-#define ARIZONA_DMIC_MICBIAS3 3
-
#define ARIZONA_MAX_MICBIAS 3
-#define ARIZONA_INMODE_DIFF 0
-#define ARIZONA_INMODE_SE 1
-#define ARIZONA_INMODE_DMIC 2
-
#define ARIZONA_MAX_OUTPUT 6
#define ARIZONA_MAX_AIF 3
@@ -112,7 +94,7 @@ struct arizona_pdata {
int gpio_base;
/** Pin state for GPIO pins */
- int gpio_defaults[ARIZONA_MAX_GPIO];
+ unsigned int gpio_defaults[ARIZONA_MAX_GPIO];
/**
* Maximum number of channels clocks will be generated for,
@@ -139,6 +121,9 @@ struct arizona_pdata {
/** GPIO used for mic isolation with HPDET */
int hpdet_id_gpio;
+ /** Channel to use for headphone detection */
+ unsigned int hpdet_channel;
+
/** Extra debounce timeout used during initial mic detection (ms) */
int micd_detect_debounce;
@@ -174,7 +159,10 @@ struct arizona_pdata {
/** MICBIAS configurations */
struct arizona_micbias micbias[ARIZONA_MAX_MICBIAS];
- /** Mode of input structures */
+ /**
+ * Mode of input structures
+ * One of the ARIZONA_INMODE_xxx values
+ */
int inmode[ARIZONA_MAX_INPUT];
/** Mode for outputs */
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index aacc10d..3499d36 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -2515,9 +2515,12 @@
#define ARIZONA_IN1_DMIC_SUP_MASK 0x1800 /* IN1_DMIC_SUP - [12:11] */
#define ARIZONA_IN1_DMIC_SUP_SHIFT 11 /* IN1_DMIC_SUP - [12:11] */
#define ARIZONA_IN1_DMIC_SUP_WIDTH 2 /* IN1_DMIC_SUP - [12:11] */
-#define ARIZONA_IN1_MODE_MASK 0x0600 /* IN1_MODE - [10:9] */
-#define ARIZONA_IN1_MODE_SHIFT 9 /* IN1_MODE - [10:9] */
-#define ARIZONA_IN1_MODE_WIDTH 2 /* IN1_MODE - [10:9] */
+#define ARIZONA_IN1_MODE_MASK 0x0400 /* IN1_MODE - [10] */
+#define ARIZONA_IN1_MODE_SHIFT 10 /* IN1_MODE - [10] */
+#define ARIZONA_IN1_MODE_WIDTH 1 /* IN1_MODE - [10] */
+#define ARIZONA_IN1_SINGLE_ENDED_MASK 0x0200 /* IN1_MODE - [9] */
+#define ARIZONA_IN1_SINGLE_ENDED_SHIFT 9 /* IN1_MODE - [9] */
+#define ARIZONA_IN1_SINGLE_ENDED_WIDTH 1 /* IN1_MODE - [9] */
#define ARIZONA_IN1L_PGA_VOL_MASK 0x00FE /* IN1L_PGA_VOL - [7:1] */
#define ARIZONA_IN1L_PGA_VOL_SHIFT 1 /* IN1L_PGA_VOL - [7:1] */
#define ARIZONA_IN1L_PGA_VOL_WIDTH 7 /* IN1L_PGA_VOL - [7:1] */
@@ -2588,9 +2591,12 @@
#define ARIZONA_IN2_DMIC_SUP_MASK 0x1800 /* IN2_DMIC_SUP - [12:11] */
#define ARIZONA_IN2_DMIC_SUP_SHIFT 11 /* IN2_DMIC_SUP - [12:11] */
#define ARIZONA_IN2_DMIC_SUP_WIDTH 2 /* IN2_DMIC_SUP - [12:11] */
-#define ARIZONA_IN2_MODE_MASK 0x0600 /* IN2_MODE - [10:9] */
-#define ARIZONA_IN2_MODE_SHIFT 9 /* IN2_MODE - [10:9] */
-#define ARIZONA_IN2_MODE_WIDTH 2 /* IN2_MODE - [10:9] */
+#define ARIZONA_IN2_MODE_MASK 0x0400 /* IN2_MODE - [10] */
+#define ARIZONA_IN2_MODE_SHIFT 10 /* IN2_MODE - [10] */
+#define ARIZONA_IN2_MODE_WIDTH 1 /* IN2_MODE - [10] */
+#define ARIZONA_IN2_SINGLE_ENDED_MASK 0x0200 /* IN2_MODE - [9] */
+#define ARIZONA_IN2_SINGLE_ENDED_SHIFT 9 /* IN2_MODE - [9] */
+#define ARIZONA_IN2_SINGLE_ENDED_WIDTH 1 /* IN2_MODE - [9] */
#define ARIZONA_IN2L_PGA_VOL_MASK 0x00FE /* IN2L_PGA_VOL - [7:1] */
#define ARIZONA_IN2L_PGA_VOL_SHIFT 1 /* IN2L_PGA_VOL - [7:1] */
#define ARIZONA_IN2L_PGA_VOL_WIDTH 7 /* IN2L_PGA_VOL - [7:1] */
@@ -2661,9 +2667,12 @@
#define ARIZONA_IN3_DMIC_SUP_MASK 0x1800 /* IN3_DMIC_SUP - [12:11] */
#define ARIZONA_IN3_DMIC_SUP_SHIFT 11 /* IN3_DMIC_SUP - [12:11] */
#define ARIZONA_IN3_DMIC_SUP_WIDTH 2 /* IN3_DMIC_SUP - [12:11] */
-#define ARIZONA_IN3_MODE_MASK 0x0600 /* IN3_MODE - [10:9] */
-#define ARIZONA_IN3_MODE_SHIFT 9 /* IN3_MODE - [10:9] */
-#define ARIZONA_IN3_MODE_WIDTH 2 /* IN3_MODE - [10:9] */
+#define ARIZONA_IN3_MODE_MASK 0x0400 /* IN3_MODE - [10] */
+#define ARIZONA_IN3_MODE_SHIFT 10 /* IN3_MODE - [10] */
+#define ARIZONA_IN3_MODE_WIDTH 1 /* IN3_MODE - [10] */
+#define ARIZONA_IN3_SINGLE_ENDED_MASK 0x0200 /* IN3_MODE - [9] */
+#define ARIZONA_IN3_SINGLE_ENDED_SHIFT 9 /* IN3_MODE - [9] */
+#define ARIZONA_IN3_SINGLE_ENDED_WIDTH 1 /* IN3_MODE - [9] */
#define ARIZONA_IN3L_PGA_VOL_MASK 0x00FE /* IN3L_PGA_VOL - [7:1] */
#define ARIZONA_IN3L_PGA_VOL_SHIFT 1 /* IN3L_PGA_VOL - [7:1] */
#define ARIZONA_IN3L_PGA_VOL_WIDTH 7 /* IN3L_PGA_VOL - [7:1] */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 81589d1..c2aa853 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -14,6 +14,7 @@
enum {
AXP202_ID = 0,
AXP209_ID,
+ AXP221_ID,
AXP288_ID,
NR_AXP20X_VARIANTS,
};
@@ -45,6 +46,28 @@ enum {
#define AXP20X_V_LTF_DISCHRG 0x3c
#define AXP20X_V_HTF_DISCHRG 0x3d
+#define AXP22X_PWR_OUT_CTRL1 0x10
+#define AXP22X_PWR_OUT_CTRL2 0x12
+#define AXP22X_PWR_OUT_CTRL3 0x13
+#define AXP22X_DLDO1_V_OUT 0x15
+#define AXP22X_DLDO2_V_OUT 0x16
+#define AXP22X_DLDO3_V_OUT 0x17
+#define AXP22X_DLDO4_V_OUT 0x18
+#define AXP22X_ELDO1_V_OUT 0x19
+#define AXP22X_ELDO2_V_OUT 0x1a
+#define AXP22X_ELDO3_V_OUT 0x1b
+#define AXP22X_DC5LDO_V_OUT 0x1c
+#define AXP22X_DCDC1_V_OUT 0x21
+#define AXP22X_DCDC2_V_OUT 0x22
+#define AXP22X_DCDC3_V_OUT 0x23
+#define AXP22X_DCDC4_V_OUT 0x24
+#define AXP22X_DCDC5_V_OUT 0x25
+#define AXP22X_DCDC23_V_RAMP_CTRL 0x27
+#define AXP22X_ALDO1_V_OUT 0x28
+#define AXP22X_ALDO2_V_OUT 0x29
+#define AXP22X_ALDO3_V_OUT 0x2a
+#define AXP22X_CHRG_CTRL3 0x35
+
/* Interrupt */
#define AXP20X_IRQ1_EN 0x40
#define AXP20X_IRQ2_EN 0x41
@@ -100,6 +123,9 @@ enum {
#define AXP20X_VBUS_MON 0x8b
#define AXP20X_OVER_TMP 0x8f
+#define AXP22X_PWREN_CTRL1 0x8c
+#define AXP22X_PWREN_CTRL2 0x8d
+
/* GPIO */
#define AXP20X_GPIO0_CTRL 0x90
#define AXP20X_LDO5_V_OUT 0x91
@@ -108,6 +134,11 @@ enum {
#define AXP20X_GPIO20_SS 0x94
#define AXP20X_GPIO3_CTRL 0x95
+#define AXP22X_LDO_IO0_V_OUT 0x91
+#define AXP22X_LDO_IO1_V_OUT 0x93
+#define AXP22X_GPIO_STATE 0x94
+#define AXP22X_GPIO_PULL_DOWN 0x95
+
/* Battery */
#define AXP20X_CHRG_CC_31_24 0xb0
#define AXP20X_CHRG_CC_23_16 0xb1
@@ -120,14 +151,34 @@ enum {
#define AXP20X_CC_CTRL 0xb8
#define AXP20X_FG_RES 0xb9
+/* AXP22X specific registers */
+#define AXP22X_BATLOW_THRES1 0xe6
+
/* AXP288 specific registers */
#define AXP288_PMIC_ADC_H 0x56
#define AXP288_PMIC_ADC_L 0x57
#define AXP288_ADC_TS_PIN_CTRL 0x84
-
#define AXP288_PMIC_ADC_EN 0x84
-#define AXP288_FG_TUNE5 0xed
+/* Fuel Gauge */
+#define AXP288_FG_RDC1_REG 0xba
+#define AXP288_FG_RDC0_REG 0xbb
+#define AXP288_FG_OCVH_REG 0xbc
+#define AXP288_FG_OCVL_REG 0xbd
+#define AXP288_FG_OCV_CURVE_REG 0xc0
+#define AXP288_FG_DES_CAP1_REG 0xe0
+#define AXP288_FG_DES_CAP0_REG 0xe1
+#define AXP288_FG_CC_MTR1_REG 0xe2
+#define AXP288_FG_CC_MTR0_REG 0xe3
+#define AXP288_FG_OCV_CAP_REG 0xe4
+#define AXP288_FG_CC_CAP_REG 0xe5
+#define AXP288_FG_LOW_CAP_REG 0xe6
+#define AXP288_FG_TUNE0 0xe8
+#define AXP288_FG_TUNE1 0xe9
+#define AXP288_FG_TUNE2 0xea
+#define AXP288_FG_TUNE3 0xeb
+#define AXP288_FG_TUNE4 0xec
+#define AXP288_FG_TUNE5 0xed
/* Regulators IDs */
enum {
@@ -141,6 +192,30 @@ enum {
AXP20X_REG_ID_MAX,
};
+enum {
+ AXP22X_DCDC1 = 0,
+ AXP22X_DCDC2,
+ AXP22X_DCDC3,
+ AXP22X_DCDC4,
+ AXP22X_DCDC5,
+ AXP22X_DC1SW,
+ AXP22X_DC5LDO,
+ AXP22X_ALDO1,
+ AXP22X_ALDO2,
+ AXP22X_ALDO3,
+ AXP22X_ELDO1,
+ AXP22X_ELDO2,
+ AXP22X_ELDO3,
+ AXP22X_DLDO1,
+ AXP22X_DLDO2,
+ AXP22X_DLDO3,
+ AXP22X_DLDO4,
+ AXP22X_RTC_LDO,
+ AXP22X_LDO_IO0,
+ AXP22X_LDO_IO1,
+ AXP22X_REG_ID_MAX,
+};
+
/* IRQs */
enum {
AXP20X_IRQ_ACIN_OVER_V = 1,
@@ -182,6 +257,34 @@ enum {
AXP20X_IRQ_GPIO0_INPUT,
};
+enum axp22x_irqs {
+ AXP22X_IRQ_ACIN_OVER_V = 1,
+ AXP22X_IRQ_ACIN_PLUGIN,
+ AXP22X_IRQ_ACIN_REMOVAL,
+ AXP22X_IRQ_VBUS_OVER_V,
+ AXP22X_IRQ_VBUS_PLUGIN,
+ AXP22X_IRQ_VBUS_REMOVAL,
+ AXP22X_IRQ_VBUS_V_LOW,
+ AXP22X_IRQ_BATT_PLUGIN,
+ AXP22X_IRQ_BATT_REMOVAL,
+ AXP22X_IRQ_BATT_ENT_ACT_MODE,
+ AXP22X_IRQ_BATT_EXIT_ACT_MODE,
+ AXP22X_IRQ_CHARG,
+ AXP22X_IRQ_CHARG_DONE,
+ AXP22X_IRQ_BATT_TEMP_HIGH,
+ AXP22X_IRQ_BATT_TEMP_LOW,
+ AXP22X_IRQ_DIE_TEMP_HIGH,
+ AXP22X_IRQ_PEK_SHORT,
+ AXP22X_IRQ_PEK_LONG,
+ AXP22X_IRQ_LOW_PWR_LVL1,
+ AXP22X_IRQ_LOW_PWR_LVL2,
+ AXP22X_IRQ_TIMER,
+ AXP22X_IRQ_PEK_RIS_EDGE,
+ AXP22X_IRQ_PEK_FAL_EDGE,
+ AXP22X_IRQ_GPIO1_INPUT,
+ AXP22X_IRQ_GPIO0_INPUT,
+};
+
enum axp288_irqs {
AXP288_IRQ_VBUS_FALL = 2,
AXP288_IRQ_VBUS_RISE,
@@ -236,4 +339,38 @@ struct axp20x_dev {
const struct regmap_irq_chip *regmap_irq_chip;
};
+#define BATTID_LEN 64
+#define OCV_CURVE_SIZE 32
+#define MAX_THERM_CURVE_SIZE 25
+#define PD_DEF_MIN_TEMP 0
+#define PD_DEF_MAX_TEMP 55
+
+struct axp20x_fg_pdata {
+ char battid[BATTID_LEN + 1];
+ int design_cap;
+ int min_volt;
+ int max_volt;
+ int max_temp;
+ int min_temp;
+ int cap1;
+ int cap0;
+ int rdc1;
+ int rdc0;
+ int ocv_curve[OCV_CURVE_SIZE];
+ int tcsz;
+ int thermistor_curve[MAX_THERM_CURVE_SIZE][2];
+};
+
+struct axp20x_chrg_pdata {
+ int max_cc;
+ int max_cv;
+ int def_cc;
+ int def_cv;
+};
+
+struct axp288_extcon_pdata {
+ /* GPIO pin control to switch D+/D- lines b/w PMIC and SOC */
+ struct gpio_desc *gpio_mux_cntl;
+};
+
#endif /* __LINUX_MFD_AXP20X_H */
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 0e166b9..da72671 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -16,10 +16,30 @@
#ifndef __LINUX_MFD_CROS_EC_H
#define __LINUX_MFD_CROS_EC_H
+#include <linux/cdev.h>
+#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/mfd/cros_ec_commands.h>
#include <linux/mutex.h>
+#define CROS_EC_DEV_NAME "cros_ec"
+#define CROS_EC_DEV_PD_NAME "cros_pd"
+
+/*
+ * The EC is unresponsive for a time after a reboot command. Add a
+ * simple delay to make sure that the bus stays locked.
+ */
+#define EC_REBOOT_DELAY_MS 50
+
+/*
+ * Max bus-specific overhead incurred by request/responses.
+ * I2C requires 1 additional byte for requests.
+ * I2C requires 2 additional bytes for responses.
+ * */
+#define EC_PROTO_VERSION_UNKNOWN 0
+#define EC_MAX_REQUEST_OVERHEAD 1
+#define EC_MAX_RESPONSE_OVERHEAD 2
+
/*
* Command interface between EC and AP, for LPC, I2C and SPI interfaces.
*/
@@ -38,33 +58,37 @@ enum {
/*
* @version: Command version number (often 0)
* @command: Command to send (EC_CMD_...)
- * @outdata: Outgoing data to EC
* @outsize: Outgoing length in bytes
- * @indata: Where to put the incoming data from EC
* @insize: Max number of bytes to accept from EC
* @result: EC's response to the command (separate from communication failure)
+ * @data: Where to put the incoming data from EC and outgoing data to EC
*/
struct cros_ec_command {
uint32_t version;
uint32_t command;
- uint8_t *outdata;
uint32_t outsize;
- uint8_t *indata;
uint32_t insize;
uint32_t result;
+ uint8_t data[0];
};
/**
* struct cros_ec_device - Information about a ChromeOS EC device
*
- * @ec_name: name of EC device (e.g. 'chromeos-ec')
* @phys_name: name of physical comms layer (e.g. 'i2c-4')
- * @dev: Device pointer
+ * @dev: Device pointer for physical comms device
* @was_wake_device: true if this device was set to wake the system from
* sleep at the last suspend
+ * @cmd_readmem: direct read of the EC memory-mapped region, if supported
+ * @offset is within EC_LPC_ADDR_MEMMAP region.
+ * @bytes: number of bytes to read. zero means "read a string" (including
+ * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be read.
+ * Caller must ensure that the buffer is large enough for the result when
+ * reading a string.
*
* @priv: Private data
* @irq: Interrupt to use
+ * @id: Device id
* @din: input buffer (for data from EC)
* @dout: output buffer (for data to EC)
* \note
@@ -76,37 +100,72 @@ struct cros_ec_command {
* to using dword.
* @din_size: size of din buffer to allocate (zero to use static din)
* @dout_size: size of dout buffer to allocate (zero to use static dout)
- * @parent: pointer to parent device (e.g. i2c or spi device)
* @wake_enabled: true if this device can wake the system from sleep
* @cmd_xfer: send command to EC and get response
* Returns the number of bytes received if the communication succeeded, but
* that doesn't mean the EC was happy with the command. The caller
* should check msg.result for the EC's result code.
+ * @pkt_xfer: send packet to EC and get response
* @lock: one transaction at a time
*/
struct cros_ec_device {
/* These are used by other drivers that want to talk to the EC */
- const char *ec_name;
const char *phys_name;
struct device *dev;
bool was_wake_device;
struct class *cros_class;
+ int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset,
+ unsigned int bytes, void *dest);
/* These are used to implement the platform-specific interface */
+ u16 max_request;
+ u16 max_response;
+ u16 max_passthru;
+ u16 proto_version;
void *priv;
int irq;
- uint8_t *din;
- uint8_t *dout;
+ u8 *din;
+ u8 *dout;
int din_size;
int dout_size;
- struct device *parent;
bool wake_enabled;
int (*cmd_xfer)(struct cros_ec_device *ec,
struct cros_ec_command *msg);
+ int (*pkt_xfer)(struct cros_ec_device *ec,
+ struct cros_ec_command *msg);
struct mutex lock;
};
+/* struct cros_ec_platform - ChromeOS EC platform information
+ *
+ * @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...)
+ * used in /dev/ and sysfs.
+ * @cmd_offset: offset to apply for each command. Set when
+ * registering a devicde behind another one.
+ */
+struct cros_ec_platform {
+ const char *ec_name;
+ u16 cmd_offset;
+};
+
+/*
+ * struct cros_ec_dev - ChromeOS EC device entry point
+ *
+ * @class_dev: Device structure used in sysfs
+ * @cdev: Character device structure in /dev
+ * @ec_dev: cros_ec_device structure to talk to the physical device
+ * @dev: pointer to the platform device
+ * @cmd_offset: offset to apply for each command.
+ */
+struct cros_ec_dev {
+ struct device class_dev;
+ struct cdev cdev;
+ struct cros_ec_device *ec_dev;
+ struct device *dev;
+ u16 cmd_offset;
+};
+
/**
* cros_ec_suspend - Handle a suspend operation for the ChromeOS EC device
*
@@ -185,4 +244,16 @@ int cros_ec_remove(struct cros_ec_device *ec_dev);
*/
int cros_ec_register(struct cros_ec_device *ec_dev);
+/**
+ * cros_ec_register - Query the protocol version supported by the ChromeOS EC
+ *
+ * @ec_dev: Device to register
+ * @return 0 if ok, -ve on error
+ */
+int cros_ec_query_all(struct cros_ec_device *ec_dev);
+
+/* sysfs stuff */
+extern struct attribute_group cros_ec_attr_group;
+extern struct attribute_group cros_ec_lightbar_attr_group;
+
#endif /* __LINUX_MFD_CROS_EC_H */
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index a49cd41..13b630c 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -515,7 +515,7 @@ struct ec_host_response {
/*
* Notes on commands:
*
- * Each command is an 8-byte command value. Commands which take params or
+ * Each command is an 16-bit command value. Commands which take params or
* return response data specify structs for that data. If no struct is
* specified, the command does not input or output data, respectively.
* Parameter/response length is implicit in the structs. Some underlying
@@ -966,7 +966,7 @@ struct rgb_s {
/* List of tweakable parameters. NOTE: It's __packed so it can be sent in a
* host command, but the alignment is the same regardless. Keep it that way.
*/
-struct lightbar_params {
+struct lightbar_params_v0 {
/* Timing */
int32_t google_ramp_up;
int32_t google_ramp_down;
@@ -1000,32 +1000,81 @@ struct lightbar_params {
struct rgb_s color[8]; /* 0-3 are Google colors */
} __packed;
+struct lightbar_params_v1 {
+ /* Timing */
+ int32_t google_ramp_up;
+ int32_t google_ramp_down;
+ int32_t s3s0_ramp_up;
+ int32_t s0_tick_delay[2]; /* AC=0/1 */
+ int32_t s0a_tick_delay[2]; /* AC=0/1 */
+ int32_t s0s3_ramp_down;
+ int32_t s3_sleep_for;
+ int32_t s3_ramp_up;
+ int32_t s3_ramp_down;
+ int32_t tap_tick_delay;
+ int32_t tap_display_time;
+
+ /* Tap-for-battery params */
+ uint8_t tap_pct_red;
+ uint8_t tap_pct_green;
+ uint8_t tap_seg_min_on;
+ uint8_t tap_seg_max_on;
+ uint8_t tap_seg_osc;
+ uint8_t tap_idx[3];
+
+ /* Oscillation */
+ uint8_t osc_min[2]; /* AC=0/1 */
+ uint8_t osc_max[2]; /* AC=0/1 */
+ uint8_t w_ofs[2]; /* AC=0/1 */
+
+ /* Brightness limits based on the backlight and AC. */
+ uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */
+ uint8_t bright_bl_on_min[2]; /* AC=0/1 */
+ uint8_t bright_bl_on_max[2]; /* AC=0/1 */
+
+ /* Battery level thresholds */
+ uint8_t battery_threshold[LB_BATTERY_LEVELS - 1];
+
+ /* Map [AC][battery_level] to color index */
+ uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */
+ uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */
+
+ /* Color palette */
+ struct rgb_s color[8]; /* 0-3 are Google colors */
+} __packed;
+
struct ec_params_lightbar {
uint8_t cmd; /* Command (see enum lightbar_command) */
union {
struct {
/* no args */
- } dump, off, on, init, get_seq, get_params, version;
+ } dump, off, on, init, get_seq, get_params_v0, get_params_v1,
+ version, get_brightness, get_demo;
- struct num {
+ struct {
uint8_t num;
- } brightness, seq, demo;
+ } set_brightness, seq, demo;
- struct reg {
+ struct {
uint8_t ctrl, reg, value;
} reg;
- struct rgb {
+ struct {
uint8_t led, red, green, blue;
- } rgb;
+ } set_rgb;
+
+ struct {
+ uint8_t led;
+ } get_rgb;
- struct lightbar_params set_params;
+ struct lightbar_params_v0 set_params_v0;
+ struct lightbar_params_v1 set_params_v1;
};
} __packed;
struct ec_response_lightbar {
union {
- struct dump {
+ struct {
struct {
uint8_t reg;
uint8_t ic0;
@@ -1033,20 +1082,26 @@ struct ec_response_lightbar {
} vals[23];
} dump;
- struct get_seq {
+ struct {
uint8_t num;
- } get_seq;
+ } get_seq, get_brightness, get_demo;
- struct lightbar_params get_params;
+ struct lightbar_params_v0 get_params_v0;
+ struct lightbar_params_v1 get_params_v1;
- struct version {
+ struct {
uint32_t num;
uint32_t flags;
} version;
struct {
+ uint8_t red, green, blue;
+ } get_rgb;
+
+ struct {
/* no return params */
- } off, on, init, brightness, seq, reg, rgb, demo, set_params;
+ } off, on, init, set_brightness, seq, reg, set_rgb,
+ demo, set_params_v0, set_params_v1;
};
} __packed;
@@ -1056,15 +1111,20 @@ enum lightbar_command {
LIGHTBAR_CMD_OFF = 1,
LIGHTBAR_CMD_ON = 2,
LIGHTBAR_CMD_INIT = 3,
- LIGHTBAR_CMD_BRIGHTNESS = 4,
+ LIGHTBAR_CMD_SET_BRIGHTNESS = 4,
LIGHTBAR_CMD_SEQ = 5,
LIGHTBAR_CMD_REG = 6,
- LIGHTBAR_CMD_RGB = 7,
+ LIGHTBAR_CMD_SET_RGB = 7,
LIGHTBAR_CMD_GET_SEQ = 8,
LIGHTBAR_CMD_DEMO = 9,
- LIGHTBAR_CMD_GET_PARAMS = 10,
- LIGHTBAR_CMD_SET_PARAMS = 11,
+ LIGHTBAR_CMD_GET_PARAMS_V0 = 10,
+ LIGHTBAR_CMD_SET_PARAMS_V0 = 11,
LIGHTBAR_CMD_VERSION = 12,
+ LIGHTBAR_CMD_GET_BRIGHTNESS = 13,
+ LIGHTBAR_CMD_GET_RGB = 14,
+ LIGHTBAR_CMD_GET_DEMO = 15,
+ LIGHTBAR_CMD_GET_PARAMS_V1 = 16,
+ LIGHTBAR_CMD_SET_PARAMS_V1 = 17,
LIGHTBAR_NUM_CMDS
};
@@ -1421,8 +1481,40 @@ struct ec_response_rtc {
/*****************************************************************************/
/* Port80 log access */
+/* Maximum entries that can be read/written in a single command */
+#define EC_PORT80_SIZE_MAX 32
+
/* Get last port80 code from previous boot */
#define EC_CMD_PORT80_LAST_BOOT 0x48
+#define EC_CMD_PORT80_READ 0x48
+
+enum ec_port80_subcmd {
+ EC_PORT80_GET_INFO = 0,
+ EC_PORT80_READ_BUFFER,
+};
+
+struct ec_params_port80_read {
+ uint16_t subcmd;
+ union {
+ struct {
+ uint32_t offset;
+ uint32_t num_entries;
+ } read_buffer;
+ };
+} __packed;
+
+struct ec_response_port80_read {
+ union {
+ struct {
+ uint32_t writes;
+ uint32_t history_size;
+ uint32_t last_boot;
+ } get_info;
+ struct {
+ uint16_t codes[EC_PORT80_SIZE_MAX];
+ } data;
+ };
+} __packed;
struct ec_response_port80_last_boot {
uint16_t code;
@@ -1782,6 +1874,7 @@ struct ec_params_gpio_set {
/* Get GPIO value */
#define EC_CMD_GPIO_GET 0x93
+/* Version 0 of input params and response */
struct ec_params_gpio_get {
char name[32];
} __packed;
@@ -1789,6 +1882,38 @@ struct ec_response_gpio_get {
uint8_t val;
} __packed;
+/* Version 1 of input params and response */
+struct ec_params_gpio_get_v1 {
+ uint8_t subcmd;
+ union {
+ struct {
+ char name[32];
+ } get_value_by_name;
+ struct {
+ uint8_t index;
+ } get_info;
+ };
+} __packed;
+
+struct ec_response_gpio_get_v1 {
+ union {
+ struct {
+ uint8_t val;
+ } get_value_by_name, get_count;
+ struct {
+ uint8_t val;
+ char name[32];
+ uint32_t flags;
+ } get_info;
+ };
+} __packed;
+
+enum gpio_get_subcmd {
+ EC_GPIO_GET_BY_NAME = 0,
+ EC_GPIO_GET_COUNT = 1,
+ EC_GPIO_GET_INFO = 2,
+};
+
/*****************************************************************************/
/* I2C commands. Only available when flash write protect is unlocked. */
@@ -1857,13 +1982,21 @@ struct ec_params_charge_control {
/*****************************************************************************/
/*
- * Cut off battery power output if the battery supports.
+ * Cut off battery power immediately or after the host has shut down.
*
- * For unsupported battery, just don't implement this command and lets EC
- * return EC_RES_INVALID_COMMAND.
+ * return EC_RES_INVALID_COMMAND if unsupported by a board/battery.
+ * EC_RES_SUCCESS if the command was successful.
+ * EC_RES_ERROR if the cut off command failed.
*/
+
#define EC_CMD_BATTERY_CUT_OFF 0x99
+#define EC_BATTERY_CUTOFF_FLAG_AT_SHUTDOWN (1 << 0)
+
+struct ec_params_battery_cutoff {
+ uint8_t flags;
+} __packed;
+
/*****************************************************************************/
/* USB port mux control. */
@@ -2142,6 +2275,32 @@ struct ec_params_sb_wr_block {
} __packed;
/*****************************************************************************/
+/* Battery vendor parameters
+ *
+ * Get or set vendor-specific parameters in the battery. Implementations may
+ * differ between boards or batteries. On a set operation, the response
+ * contains the actual value set, which may be rounded or clipped from the
+ * requested value.
+ */
+
+#define EC_CMD_BATTERY_VENDOR_PARAM 0xb4
+
+enum ec_battery_vendor_param_mode {
+ BATTERY_VENDOR_PARAM_MODE_GET = 0,
+ BATTERY_VENDOR_PARAM_MODE_SET,
+};
+
+struct ec_params_battery_vendor_param {
+ uint32_t param;
+ uint32_t value;
+ uint8_t mode;
+} __packed;
+
+struct ec_response_battery_vendor_param {
+ uint32_t value;
+} __packed;
+
+/*****************************************************************************/
/* System commands */
/*
@@ -2338,6 +2497,80 @@ struct ec_params_reboot_ec {
/*****************************************************************************/
/*
+ * PD commands
+ *
+ * These commands are for PD MCU communication.
+ */
+
+/* EC to PD MCU exchange status command */
+#define EC_CMD_PD_EXCHANGE_STATUS 0x100
+
+/* Status of EC being sent to PD */
+struct ec_params_pd_status {
+ int8_t batt_soc; /* battery state of charge */
+} __packed;
+
+/* Status of PD being sent back to EC */
+struct ec_response_pd_status {
+ int8_t status; /* PD MCU status */
+ uint32_t curr_lim_ma; /* input current limit */
+} __packed;
+
+/* Set USB type-C port role and muxes */
+#define EC_CMD_USB_PD_CONTROL 0x101
+
+enum usb_pd_control_role {
+ USB_PD_CTRL_ROLE_NO_CHANGE = 0,
+ USB_PD_CTRL_ROLE_TOGGLE_ON = 1, /* == AUTO */
+ USB_PD_CTRL_ROLE_TOGGLE_OFF = 2,
+ USB_PD_CTRL_ROLE_FORCE_SINK = 3,
+ USB_PD_CTRL_ROLE_FORCE_SOURCE = 4,
+};
+
+enum usb_pd_control_mux {
+ USB_PD_CTRL_MUX_NO_CHANGE = 0,
+ USB_PD_CTRL_MUX_NONE = 1,
+ USB_PD_CTRL_MUX_USB = 2,
+ USB_PD_CTRL_MUX_DP = 3,
+ USB_PD_CTRL_MUX_DOCK = 4,
+ USB_PD_CTRL_MUX_AUTO = 5,
+};
+
+struct ec_params_usb_pd_control {
+ uint8_t port;
+ uint8_t role;
+ uint8_t mux;
+} __packed;
+
+/*****************************************************************************/
+/*
+ * Passthru commands
+ *
+ * Some platforms have sub-processors chained to each other. For example.
+ *
+ * AP <--> EC <--> PD MCU
+ *
+ * The top 2 bits of the command number are used to indicate which device the
+ * command is intended for. Device 0 is always the device receiving the
+ * command; other device mapping is board-specific.
+ *
+ * When a device receives a command to be passed to a sub-processor, it passes
+ * it on with the device number set back to 0. This allows the sub-processor
+ * to remain blissfully unaware of whether the command originated on the next
+ * device up the chain, or was passed through from the AP.
+ *
+ * In the above example, if the AP wants to send command 0x0002 to the PD MCU,
+ * AP sends command 0x4002 to the EC
+ * EC sends command 0x0002 to the PD MCU
+ * EC forwards PD MCU response back to the AP
+ */
+
+/* Offset and max command number for sub-device n */
+#define EC_CMD_PASSTHRU_OFFSET(n) (0x4000 * (n))
+#define EC_CMD_PASSTHRU_MAX(n) (EC_CMD_PASSTHRU_OFFSET(n) + 0x3fff)
+
+/*****************************************************************************/
+/*
* Deprecated constants. These constants have been renamed for clarity. The
* meaning and size has not changed. Programs that use the old names should
* switch to the new names soon, as the old names may not be carried forward
diff --git a/include/linux/mfd/da9055/core.h b/include/linux/mfd/da9055/core.h
index 956afa4..5dc743f 100644
--- a/include/linux/mfd/da9055/core.h
+++ b/include/linux/mfd/da9055/core.h
@@ -89,6 +89,6 @@ static inline int da9055_reg_update(struct da9055 *da9055, unsigned char reg,
int da9055_device_init(struct da9055 *da9055);
void da9055_device_exit(struct da9055 *da9055);
-extern struct regmap_config da9055_regmap_config;
+extern const struct regmap_config da9055_regmap_config;
#endif /* __DA9055_CORE_H */
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
index b92a326..79f4d82 100644
--- a/include/linux/mfd/da9063/core.h
+++ b/include/linux/mfd/da9063/core.h
@@ -36,6 +36,7 @@ enum da9063_models {
enum da9063_variant_codes {
PMIC_DA9063_AD = 0x3,
PMIC_DA9063_BB = 0x5,
+ PMIC_DA9063_CA = 0x6,
};
/* Interrupts */
diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h
index 95c8742..612383b 100644
--- a/include/linux/mfd/da9063/pdata.h
+++ b/include/linux/mfd/da9063/pdata.h
@@ -103,6 +103,7 @@ struct da9063;
struct da9063_pdata {
int (*init)(struct da9063 *da9063);
int irq_base;
+ bool key_power;
unsigned flags;
struct da9063_regulators_pdata *regulators_pdata;
struct led_platform_data *leds_pdata;
diff --git a/include/linux/mfd/da9150/core.h b/include/linux/mfd/da9150/core.h
new file mode 100644
index 0000000..76e6689
--- /dev/null
+++ b/include/linux/mfd/da9150/core.h
@@ -0,0 +1,68 @@
+/*
+ * DA9150 MFD Driver - Core Data
+ *
+ * Copyright (c) 2014 Dialog Semiconductor
+ *
+ * Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __DA9150_CORE_H
+#define __DA9150_CORE_H
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+
+/* I2C address paging */
+#define DA9150_REG_PAGE_SHIFT 8
+#define DA9150_REG_PAGE_MASK 0xFF
+
+/* IRQs */
+#define DA9150_NUM_IRQ_REGS 4
+#define DA9150_IRQ_VBUS 0
+#define DA9150_IRQ_CHG 1
+#define DA9150_IRQ_TCLASS 2
+#define DA9150_IRQ_TJUNC 3
+#define DA9150_IRQ_VFAULT 4
+#define DA9150_IRQ_CONF 5
+#define DA9150_IRQ_DAT 6
+#define DA9150_IRQ_DTYPE 7
+#define DA9150_IRQ_ID 8
+#define DA9150_IRQ_ADP 9
+#define DA9150_IRQ_SESS_END 10
+#define DA9150_IRQ_SESS_VLD 11
+#define DA9150_IRQ_FG 12
+#define DA9150_IRQ_GP 13
+#define DA9150_IRQ_TBAT 14
+#define DA9150_IRQ_GPIOA 15
+#define DA9150_IRQ_GPIOB 16
+#define DA9150_IRQ_GPIOC 17
+#define DA9150_IRQ_GPIOD 18
+#define DA9150_IRQ_GPADC 19
+#define DA9150_IRQ_WKUP 20
+
+struct da9150_pdata {
+ int irq_base;
+};
+
+struct da9150 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *regmap_irq_data;
+ int irq;
+ int irq_base;
+};
+
+/* Device I/O */
+u8 da9150_reg_read(struct da9150 *da9150, u16 reg);
+void da9150_reg_write(struct da9150 *da9150, u16 reg, u8 val);
+void da9150_set_bits(struct da9150 *da9150, u16 reg, u8 mask, u8 val);
+
+void da9150_bulk_read(struct da9150 *da9150, u16 reg, int count, u8 *buf);
+void da9150_bulk_write(struct da9150 *da9150, u16 reg, int count, const u8 *buf);
+#endif /* __DA9150_CORE_H */
diff --git a/include/linux/mfd/da9150/registers.h b/include/linux/mfd/da9150/registers.h
new file mode 100644
index 0000000..27ca6ee
--- /dev/null
+++ b/include/linux/mfd/da9150/registers.h
@@ -0,0 +1,1155 @@
+/*
+ * DA9150 MFD Driver - Registers
+ *
+ * Copyright (c) 2014 Dialog Semiconductor
+ *
+ * Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __DA9150_REGISTERS_H
+#define __DA9150_REGISTERS_H
+
+#include <linux/bitops.h>
+
+/* Registers */
+#define DA9150_PAGE_CON 0x000
+#define DA9150_STATUS_A 0x068
+#define DA9150_STATUS_B 0x069
+#define DA9150_STATUS_C 0x06A
+#define DA9150_STATUS_D 0x06B
+#define DA9150_STATUS_E 0x06C
+#define DA9150_STATUS_F 0x06D
+#define DA9150_STATUS_G 0x06E
+#define DA9150_STATUS_H 0x06F
+#define DA9150_STATUS_I 0x070
+#define DA9150_STATUS_J 0x071
+#define DA9150_STATUS_K 0x072
+#define DA9150_STATUS_L 0x073
+#define DA9150_STATUS_N 0x074
+#define DA9150_FAULT_LOG_A 0x076
+#define DA9150_FAULT_LOG_B 0x077
+#define DA9150_EVENT_E 0x078
+#define DA9150_EVENT_F 0x079
+#define DA9150_EVENT_G 0x07A
+#define DA9150_EVENT_H 0x07B
+#define DA9150_IRQ_MASK_E 0x07C
+#define DA9150_IRQ_MASK_F 0x07D
+#define DA9150_IRQ_MASK_G 0x07E
+#define DA9150_IRQ_MASK_H 0x07F
+#define DA9150_PAGE_CON_1 0x080
+#define DA9150_CONFIG_A 0x0E0
+#define DA9150_CONFIG_B 0x0E1
+#define DA9150_CONFIG_C 0x0E2
+#define DA9150_CONFIG_D 0x0E3
+#define DA9150_CONFIG_E 0x0E4
+#define DA9150_CONTROL_A 0x0E5
+#define DA9150_CONTROL_B 0x0E6
+#define DA9150_CONTROL_C 0x0E7
+#define DA9150_GPIO_A_B 0x0E8
+#define DA9150_GPIO_C_D 0x0E9
+#define DA9150_GPIO_MODE_CONT 0x0EA
+#define DA9150_GPIO_CTRL_B 0x0EB
+#define DA9150_GPIO_CTRL_A 0x0EC
+#define DA9150_GPIO_CTRL_C 0x0ED
+#define DA9150_GPIO_CFG_A 0x0EE
+#define DA9150_GPIO_CFG_B 0x0EF
+#define DA9150_GPIO_CFG_C 0x0F0
+#define DA9150_GPADC_MAN 0x0F2
+#define DA9150_GPADC_RES_A 0x0F4
+#define DA9150_GPADC_RES_B 0x0F5
+#define DA9150_PAGE_CON_2 0x100
+#define DA9150_OTP_CONT_SHARED 0x101
+#define DA9150_INTERFACE_SHARED 0x105
+#define DA9150_CONFIG_A_SHARED 0x106
+#define DA9150_CONFIG_D_SHARED 0x109
+#define DA9150_ADETVB_CFG_C 0x150
+#define DA9150_ADETD_STAT 0x151
+#define DA9150_ADET_CMPSTAT 0x152
+#define DA9150_ADET_CTRL_A 0x153
+#define DA9150_ADETVB_CFG_B 0x154
+#define DA9150_ADETVB_CFG_A 0x155
+#define DA9150_ADETAC_CFG_A 0x156
+#define DA9150_ADDETAC_CFG_B 0x157
+#define DA9150_ADETAC_CFG_C 0x158
+#define DA9150_ADETAC_CFG_D 0x159
+#define DA9150_ADETVB_CFG_D 0x15A
+#define DA9150_ADETID_CFG_A 0x15B
+#define DA9150_ADET_RID_PT_CHG_H 0x15C
+#define DA9150_ADET_RID_PT_CHG_L 0x15D
+#define DA9150_PPR_TCTR_B 0x160
+#define DA9150_PPR_BKCTRL_A 0x163
+#define DA9150_PPR_BKCFG_A 0x164
+#define DA9150_PPR_BKCFG_B 0x165
+#define DA9150_PPR_CHGCTRL_A 0x166
+#define DA9150_PPR_CHGCTRL_B 0x167
+#define DA9150_PPR_CHGCTRL_C 0x168
+#define DA9150_PPR_TCTR_A 0x169
+#define DA9150_PPR_CHGCTRL_D 0x16A
+#define DA9150_PPR_CHGCTRL_E 0x16B
+#define DA9150_PPR_CHGCTRL_F 0x16C
+#define DA9150_PPR_CHGCTRL_G 0x16D
+#define DA9150_PPR_CHGCTRL_H 0x16E
+#define DA9150_PPR_CHGCTRL_I 0x16F
+#define DA9150_PPR_CHGCTRL_J 0x170
+#define DA9150_PPR_CHGCTRL_K 0x171
+#define DA9150_PPR_CHGCTRL_L 0x172
+#define DA9150_PPR_CHGCTRL_M 0x173
+#define DA9150_PPR_THYST_A 0x174
+#define DA9150_PPR_THYST_B 0x175
+#define DA9150_PPR_THYST_C 0x176
+#define DA9150_PPR_THYST_D 0x177
+#define DA9150_PPR_THYST_E 0x178
+#define DA9150_PPR_THYST_F 0x179
+#define DA9150_PPR_THYST_G 0x17A
+#define DA9150_PAGE_CON_3 0x180
+#define DA9150_PAGE_CON_4 0x200
+#define DA9150_PAGE_CON_5 0x280
+#define DA9150_PAGE_CON_6 0x300
+#define DA9150_COREBTLD_STAT_A 0x302
+#define DA9150_COREBTLD_CTRL_A 0x303
+#define DA9150_CORE_CONFIG_A 0x304
+#define DA9150_CORE_CONFIG_C 0x305
+#define DA9150_CORE_CONFIG_B 0x306
+#define DA9150_CORE_CFG_DATA_A 0x307
+#define DA9150_CORE_CFG_DATA_B 0x308
+#define DA9150_CORE_CMD_A 0x309
+#define DA9150_CORE_DATA_A 0x30A
+#define DA9150_CORE_DATA_B 0x30B
+#define DA9150_CORE_DATA_C 0x30C
+#define DA9150_CORE_DATA_D 0x30D
+#define DA9150_CORE2WIRE_STAT_A 0x310
+#define DA9150_CORE2WIRE_CTRL_A 0x311
+#define DA9150_FW_CTRL_A 0x312
+#define DA9150_FW_CTRL_C 0x313
+#define DA9150_FW_CTRL_D 0x314
+#define DA9150_FG_CTRL_A 0x315
+#define DA9150_FG_CTRL_B 0x316
+#define DA9150_FW_CTRL_E 0x317
+#define DA9150_FW_CTRL_B 0x318
+#define DA9150_GPADC_CMAN 0x320
+#define DA9150_GPADC_CRES_A 0x322
+#define DA9150_GPADC_CRES_B 0x323
+#define DA9150_CC_CFG_A 0x328
+#define DA9150_CC_CFG_B 0x329
+#define DA9150_CC_ICHG_RES_A 0x32A
+#define DA9150_CC_ICHG_RES_B 0x32B
+#define DA9150_CC_IAVG_RES_A 0x32C
+#define DA9150_CC_IAVG_RES_B 0x32D
+#define DA9150_TAUX_CTRL_A 0x330
+#define DA9150_TAUX_RELOAD_H 0x332
+#define DA9150_TAUX_RELOAD_L 0x333
+#define DA9150_TAUX_VALUE_H 0x334
+#define DA9150_TAUX_VALUE_L 0x335
+#define DA9150_AUX_DATA_0 0x338
+#define DA9150_AUX_DATA_1 0x339
+#define DA9150_AUX_DATA_2 0x33A
+#define DA9150_AUX_DATA_3 0x33B
+#define DA9150_BIF_CTRL 0x340
+#define DA9150_TBAT_CTRL_A 0x342
+#define DA9150_TBAT_CTRL_B 0x343
+#define DA9150_TBAT_RES_A 0x344
+#define DA9150_TBAT_RES_B 0x345
+
+/* DA9150_PAGE_CON = 0x000 */
+#define DA9150_PAGE_SHIFT 0
+#define DA9150_PAGE_MASK (0x3f << 0)
+#define DA9150_I2C_PAGE_SHIFT 1
+#define DA9150_I2C_PAGE_MASK (0x1f << 1)
+#define DA9150_WRITE_MODE_SHIFT 6
+#define DA9150_WRITE_MODE_MASK BIT(6)
+#define DA9150_REVERT_SHIFT 7
+#define DA9150_REVERT_MASK BIT(7)
+
+/* DA9150_STATUS_A = 0x068 */
+#define DA9150_WKUP_STAT_SHIFT 2
+#define DA9150_WKUP_STAT_MASK (0x0f << 2)
+#define DA9150_SLEEP_STAT_SHIFT 6
+#define DA9150_SLEEP_STAT_MASK (0x03 << 6)
+
+/* DA9150_STATUS_B = 0x069 */
+#define DA9150_VFAULT_STAT_SHIFT 0
+#define DA9150_VFAULT_STAT_MASK BIT(0)
+#define DA9150_TFAULT_STAT_SHIFT 1
+#define DA9150_TFAULT_STAT_MASK BIT(1)
+
+/* DA9150_STATUS_C = 0x06A */
+#define DA9150_VDD33_STAT_SHIFT 0
+#define DA9150_VDD33_STAT_MASK BIT(0)
+#define DA9150_VDD33_SLEEP_SHIFT 1
+#define DA9150_VDD33_SLEEP_MASK BIT(1)
+#define DA9150_LFOSC_STAT_SHIFT 7
+#define DA9150_LFOSC_STAT_MASK BIT(7)
+
+/* DA9150_STATUS_D = 0x06B */
+#define DA9150_GPIOA_STAT_SHIFT 0
+#define DA9150_GPIOA_STAT_MASK BIT(0)
+#define DA9150_GPIOB_STAT_SHIFT 1
+#define DA9150_GPIOB_STAT_MASK BIT(1)
+#define DA9150_GPIOC_STAT_SHIFT 2
+#define DA9150_GPIOC_STAT_MASK BIT(2)
+#define DA9150_GPIOD_STAT_SHIFT 3
+#define DA9150_GPIOD_STAT_MASK BIT(3)
+
+/* DA9150_STATUS_E = 0x06C */
+#define DA9150_DTYPE_SHIFT 0
+#define DA9150_DTYPE_MASK (0x1f << 0)
+#define DA9150_DTYPE_DT_NIL (0x00 << 0)
+#define DA9150_DTYPE_DT_USB_OTG BIT(0)
+#define DA9150_DTYPE_DT_USB_STD (0x02 << 0)
+#define DA9150_DTYPE_DT_USB_CHG (0x03 << 0)
+#define DA9150_DTYPE_DT_ACA_CHG (0x04 << 0)
+#define DA9150_DTYPE_DT_ACA_OTG (0x05 << 0)
+#define DA9150_DTYPE_DT_ACA_DOC (0x06 << 0)
+#define DA9150_DTYPE_DT_DED_CHG (0x07 << 0)
+#define DA9150_DTYPE_DT_CR5_CHG (0x08 << 0)
+#define DA9150_DTYPE_DT_CR4_CHG (0x0c << 0)
+#define DA9150_DTYPE_DT_PT_CHG (0x11 << 0)
+#define DA9150_DTYPE_DT_NN_ACC (0x16 << 0)
+#define DA9150_DTYPE_DT_NN_CHG (0x17 << 0)
+
+/* DA9150_STATUS_F = 0x06D */
+#define DA9150_SESS_VLD_SHIFT 0
+#define DA9150_SESS_VLD_MASK BIT(0)
+#define DA9150_ID_ERR_SHIFT 1
+#define DA9150_ID_ERR_MASK BIT(1)
+#define DA9150_PT_CHG_SHIFT 2
+#define DA9150_PT_CHG_MASK BIT(2)
+
+/* DA9150_STATUS_G = 0x06E */
+#define DA9150_RID_SHIFT 0
+#define DA9150_RID_MASK (0xff << 0)
+
+/* DA9150_STATUS_H = 0x06F */
+#define DA9150_VBUS_STAT_SHIFT 0
+#define DA9150_VBUS_STAT_MASK (0x07 << 0)
+#define DA9150_VBUS_STAT_OFF (0x00 << 0)
+#define DA9150_VBUS_STAT_WAIT BIT(0)
+#define DA9150_VBUS_STAT_CHG (0x02 << 0)
+#define DA9150_VBUS_TRED_SHIFT 3
+#define DA9150_VBUS_TRED_MASK BIT(3)
+#define DA9150_VBUS_DROP_STAT_SHIFT 4
+#define DA9150_VBUS_DROP_STAT_MASK (0x0f << 4)
+
+/* DA9150_STATUS_I = 0x070 */
+#define DA9150_VBUS_ISET_STAT_SHIFT 0
+#define DA9150_VBUS_ISET_STAT_MASK (0x1f << 0)
+#define DA9150_VBUS_OT_SHIFT 7
+#define DA9150_VBUS_OT_MASK BIT(7)
+
+/* DA9150_STATUS_J = 0x071 */
+#define DA9150_CHG_STAT_SHIFT 0
+#define DA9150_CHG_STAT_MASK (0x0f << 0)
+#define DA9150_CHG_STAT_OFF (0x00 << 0)
+#define DA9150_CHG_STAT_SUSP BIT(0)
+#define DA9150_CHG_STAT_ACT (0x02 << 0)
+#define DA9150_CHG_STAT_PRE (0x03 << 0)
+#define DA9150_CHG_STAT_CC (0x04 << 0)
+#define DA9150_CHG_STAT_CV (0x05 << 0)
+#define DA9150_CHG_STAT_FULL (0x06 << 0)
+#define DA9150_CHG_STAT_TEMP (0x07 << 0)
+#define DA9150_CHG_STAT_TIME (0x08 << 0)
+#define DA9150_CHG_STAT_BAT (0x09 << 0)
+#define DA9150_CHG_TEMP_SHIFT 4
+#define DA9150_CHG_TEMP_MASK (0x07 << 4)
+#define DA9150_CHG_TEMP_UNDER (0x06 << 4)
+#define DA9150_CHG_TEMP_OVER (0x07 << 4)
+#define DA9150_CHG_IEND_STAT_SHIFT 7
+#define DA9150_CHG_IEND_STAT_MASK BIT(7)
+
+/* DA9150_STATUS_K = 0x072 */
+#define DA9150_CHG_IAV_H_SHIFT 0
+#define DA9150_CHG_IAV_H_MASK (0xff << 0)
+
+/* DA9150_STATUS_L = 0x073 */
+#define DA9150_CHG_IAV_L_SHIFT 5
+#define DA9150_CHG_IAV_L_MASK (0x07 << 5)
+
+/* DA9150_STATUS_N = 0x074 */
+#define DA9150_CHG_TIME_SHIFT 1
+#define DA9150_CHG_TIME_MASK BIT(1)
+#define DA9150_CHG_TRED_SHIFT 2
+#define DA9150_CHG_TRED_MASK BIT(2)
+#define DA9150_CHG_TJUNC_CLASS_SHIFT 3
+#define DA9150_CHG_TJUNC_CLASS_MASK (0x07 << 3)
+#define DA9150_CHG_TJUNC_CLASS_6 (0x06 << 3)
+#define DA9150_EBS_STAT_SHIFT 6
+#define DA9150_EBS_STAT_MASK BIT(6)
+#define DA9150_CHG_BAT_REMOVED_SHIFT 7
+#define DA9150_CHG_BAT_REMOVED_MASK BIT(7)
+
+/* DA9150_FAULT_LOG_A = 0x076 */
+#define DA9150_TEMP_FAULT_SHIFT 0
+#define DA9150_TEMP_FAULT_MASK BIT(0)
+#define DA9150_VSYS_FAULT_SHIFT 1
+#define DA9150_VSYS_FAULT_MASK BIT(1)
+#define DA9150_START_FAULT_SHIFT 2
+#define DA9150_START_FAULT_MASK BIT(2)
+#define DA9150_EXT_FAULT_SHIFT 3
+#define DA9150_EXT_FAULT_MASK BIT(3)
+#define DA9150_POR_FAULT_SHIFT 4
+#define DA9150_POR_FAULT_MASK BIT(4)
+
+/* DA9150_FAULT_LOG_B = 0x077 */
+#define DA9150_VBUS_FAULT_SHIFT 0
+#define DA9150_VBUS_FAULT_MASK BIT(0)
+#define DA9150_OTG_FAULT_SHIFT 1
+#define DA9150_OTG_FAULT_MASK BIT(1)
+
+/* DA9150_EVENT_E = 0x078 */
+#define DA9150_E_VBUS_SHIFT 0
+#define DA9150_E_VBUS_MASK BIT(0)
+#define DA9150_E_CHG_SHIFT 1
+#define DA9150_E_CHG_MASK BIT(1)
+#define DA9150_E_TCLASS_SHIFT 2
+#define DA9150_E_TCLASS_MASK BIT(2)
+#define DA9150_E_TJUNC_SHIFT 3
+#define DA9150_E_TJUNC_MASK BIT(3)
+#define DA9150_E_VFAULT_SHIFT 4
+#define DA9150_E_VFAULT_MASK BIT(4)
+#define DA9150_EVENTS_H_SHIFT 5
+#define DA9150_EVENTS_H_MASK BIT(5)
+#define DA9150_EVENTS_G_SHIFT 6
+#define DA9150_EVENTS_G_MASK BIT(6)
+#define DA9150_EVENTS_F_SHIFT 7
+#define DA9150_EVENTS_F_MASK BIT(7)
+
+/* DA9150_EVENT_F = 0x079 */
+#define DA9150_E_CONF_SHIFT 0
+#define DA9150_E_CONF_MASK BIT(0)
+#define DA9150_E_DAT_SHIFT 1
+#define DA9150_E_DAT_MASK BIT(1)
+#define DA9150_E_DTYPE_SHIFT 3
+#define DA9150_E_DTYPE_MASK BIT(3)
+#define DA9150_E_ID_SHIFT 4
+#define DA9150_E_ID_MASK BIT(4)
+#define DA9150_E_ADP_SHIFT 5
+#define DA9150_E_ADP_MASK BIT(5)
+#define DA9150_E_SESS_END_SHIFT 6
+#define DA9150_E_SESS_END_MASK BIT(6)
+#define DA9150_E_SESS_VLD_SHIFT 7
+#define DA9150_E_SESS_VLD_MASK BIT(7)
+
+/* DA9150_EVENT_G = 0x07A */
+#define DA9150_E_FG_SHIFT 0
+#define DA9150_E_FG_MASK BIT(0)
+#define DA9150_E_GP_SHIFT 1
+#define DA9150_E_GP_MASK BIT(1)
+#define DA9150_E_TBAT_SHIFT 2
+#define DA9150_E_TBAT_MASK BIT(2)
+#define DA9150_E_GPIOA_SHIFT 3
+#define DA9150_E_GPIOA_MASK BIT(3)
+#define DA9150_E_GPIOB_SHIFT 4
+#define DA9150_E_GPIOB_MASK BIT(4)
+#define DA9150_E_GPIOC_SHIFT 5
+#define DA9150_E_GPIOC_MASK BIT(5)
+#define DA9150_E_GPIOD_SHIFT 6
+#define DA9150_E_GPIOD_MASK BIT(6)
+#define DA9150_E_GPADC_SHIFT 7
+#define DA9150_E_GPADC_MASK BIT(7)
+
+/* DA9150_EVENT_H = 0x07B */
+#define DA9150_E_WKUP_SHIFT 0
+#define DA9150_E_WKUP_MASK BIT(0)
+
+/* DA9150_IRQ_MASK_E = 0x07C */
+#define DA9150_M_VBUS_SHIFT 0
+#define DA9150_M_VBUS_MASK BIT(0)
+#define DA9150_M_CHG_SHIFT 1
+#define DA9150_M_CHG_MASK BIT(1)
+#define DA9150_M_TJUNC_SHIFT 3
+#define DA9150_M_TJUNC_MASK BIT(3)
+#define DA9150_M_VFAULT_SHIFT 4
+#define DA9150_M_VFAULT_MASK BIT(4)
+
+/* DA9150_IRQ_MASK_F = 0x07D */
+#define DA9150_M_CONF_SHIFT 0
+#define DA9150_M_CONF_MASK BIT(0)
+#define DA9150_M_DAT_SHIFT 1
+#define DA9150_M_DAT_MASK BIT(1)
+#define DA9150_M_DTYPE_SHIFT 3
+#define DA9150_M_DTYPE_MASK BIT(3)
+#define DA9150_M_ID_SHIFT 4
+#define DA9150_M_ID_MASK BIT(4)
+#define DA9150_M_ADP_SHIFT 5
+#define DA9150_M_ADP_MASK BIT(5)
+#define DA9150_M_SESS_END_SHIFT 6
+#define DA9150_M_SESS_END_MASK BIT(6)
+#define DA9150_M_SESS_VLD_SHIFT 7
+#define DA9150_M_SESS_VLD_MASK BIT(7)
+
+/* DA9150_IRQ_MASK_G = 0x07E */
+#define DA9150_M_FG_SHIFT 0
+#define DA9150_M_FG_MASK BIT(0)
+#define DA9150_M_GP_SHIFT 1
+#define DA9150_M_GP_MASK BIT(1)
+#define DA9150_M_TBAT_SHIFT 2
+#define DA9150_M_TBAT_MASK BIT(2)
+#define DA9150_M_GPIOA_SHIFT 3
+#define DA9150_M_GPIOA_MASK BIT(3)
+#define DA9150_M_GPIOB_SHIFT 4
+#define DA9150_M_GPIOB_MASK BIT(4)
+#define DA9150_M_GPIOC_SHIFT 5
+#define DA9150_M_GPIOC_MASK BIT(5)
+#define DA9150_M_GPIOD_SHIFT 6
+#define DA9150_M_GPIOD_MASK BIT(6)
+#define DA9150_M_GPADC_SHIFT 7
+#define DA9150_M_GPADC_MASK BIT(7)
+
+/* DA9150_IRQ_MASK_H = 0x07F */
+#define DA9150_M_WKUP_SHIFT 0
+#define DA9150_M_WKUP_MASK BIT(0)
+
+/* DA9150_PAGE_CON_1 = 0x080 */
+#define DA9150_PAGE_SHIFT 0
+#define DA9150_PAGE_MASK (0x3f << 0)
+#define DA9150_WRITE_MODE_SHIFT 6
+#define DA9150_WRITE_MODE_MASK BIT(6)
+#define DA9150_REVERT_SHIFT 7
+#define DA9150_REVERT_MASK BIT(7)
+
+/* DA9150_CONFIG_A = 0x0E0 */
+#define DA9150_RESET_DUR_SHIFT 0
+#define DA9150_RESET_DUR_MASK (0x03 << 0)
+#define DA9150_RESET_EXT_SHIFT 2
+#define DA9150_RESET_EXT_MASK (0x03 << 2)
+#define DA9150_START_MAX_SHIFT 4
+#define DA9150_START_MAX_MASK (0x03 << 4)
+#define DA9150_PS_WAIT_EN_SHIFT 6
+#define DA9150_PS_WAIT_EN_MASK BIT(6)
+#define DA9150_PS_DISABLE_DIRECT_SHIFT 7
+#define DA9150_PS_DISABLE_DIRECT_MASK BIT(7)
+
+/* DA9150_CONFIG_B = 0x0E1 */
+#define DA9150_VFAULT_ADJ_SHIFT 0
+#define DA9150_VFAULT_ADJ_MASK (0x0f << 0)
+#define DA9150_VFAULT_HYST_SHIFT 4
+#define DA9150_VFAULT_HYST_MASK (0x07 << 4)
+#define DA9150_VFAULT_EN_SHIFT 7
+#define DA9150_VFAULT_EN_MASK BIT(7)
+
+/* DA9150_CONFIG_C = 0x0E2 */
+#define DA9150_VSYS_MIN_SHIFT 3
+#define DA9150_VSYS_MIN_MASK (0x1f << 3)
+
+/* DA9150_CONFIG_D = 0x0E3 */
+#define DA9150_LFOSC_EXT_SHIFT 0
+#define DA9150_LFOSC_EXT_MASK BIT(0)
+#define DA9150_VDD33_DWN_SHIFT 1
+#define DA9150_VDD33_DWN_MASK BIT(1)
+#define DA9150_WKUP_PM_EN_SHIFT 2
+#define DA9150_WKUP_PM_EN_MASK BIT(2)
+#define DA9150_WKUP_CE_SEL_SHIFT 3
+#define DA9150_WKUP_CE_SEL_MASK (0x03 << 3)
+#define DA9150_WKUP_CLK32K_EN_SHIFT 5
+#define DA9150_WKUP_CLK32K_EN_MASK BIT(5)
+#define DA9150_DISABLE_DEL_SHIFT 7
+#define DA9150_DISABLE_DEL_MASK BIT(7)
+
+/* DA9150_CONFIG_E = 0x0E4 */
+#define DA9150_PM_SPKSUP_DIS_SHIFT 0
+#define DA9150_PM_SPKSUP_DIS_MASK BIT(0)
+#define DA9150_PM_MERGE_SHIFT 1
+#define DA9150_PM_MERGE_MASK BIT(1)
+#define DA9150_PM_SR_OFF_SHIFT 2
+#define DA9150_PM_SR_OFF_MASK BIT(2)
+#define DA9150_PM_TIMEOUT_EN_SHIFT 3
+#define DA9150_PM_TIMEOUT_EN_MASK BIT(3)
+#define DA9150_PM_DLY_SEL_SHIFT 4
+#define DA9150_PM_DLY_SEL_MASK (0x07 << 4)
+#define DA9150_PM_OUT_DLY_SEL_SHIFT 7
+#define DA9150_PM_OUT_DLY_SEL_MASK BIT(7)
+
+/* DA9150_CONTROL_A = 0x0E5 */
+#define DA9150_VDD33_SL_SHIFT 0
+#define DA9150_VDD33_SL_MASK BIT(0)
+#define DA9150_VDD33_LPM_SHIFT 1
+#define DA9150_VDD33_LPM_MASK (0x03 << 1)
+#define DA9150_VDD33_EN_SHIFT 3
+#define DA9150_VDD33_EN_MASK BIT(3)
+#define DA9150_GPI_LPM_SHIFT 6
+#define DA9150_GPI_LPM_MASK BIT(6)
+#define DA9150_PM_IF_LPM_SHIFT 7
+#define DA9150_PM_IF_LPM_MASK BIT(7)
+
+/* DA9150_CONTROL_B = 0x0E6 */
+#define DA9150_LPM_SHIFT 0
+#define DA9150_LPM_MASK BIT(0)
+#define DA9150_RESET_SHIFT 1
+#define DA9150_RESET_MASK BIT(1)
+#define DA9150_RESET_USRCONF_EN_SHIFT 2
+#define DA9150_RESET_USRCONF_EN_MASK BIT(2)
+
+/* DA9150_CONTROL_C = 0x0E7 */
+#define DA9150_DISABLE_SHIFT 0
+#define DA9150_DISABLE_MASK BIT(0)
+
+/* DA9150_GPIO_A_B = 0x0E8 */
+#define DA9150_GPIOA_PIN_SHIFT 0
+#define DA9150_GPIOA_PIN_MASK (0x07 << 0)
+#define DA9150_GPIOA_PIN_GPI (0x00 << 0)
+#define DA9150_GPIOA_PIN_GPO_OD BIT(0)
+#define DA9150_GPIOA_TYPE_SHIFT 3
+#define DA9150_GPIOA_TYPE_MASK BIT(3)
+#define DA9150_GPIOB_PIN_SHIFT 4
+#define DA9150_GPIOB_PIN_MASK (0x07 << 4)
+#define DA9150_GPIOB_PIN_GPI (0x00 << 4)
+#define DA9150_GPIOB_PIN_GPO_OD BIT(4)
+#define DA9150_GPIOB_TYPE_SHIFT 7
+#define DA9150_GPIOB_TYPE_MASK BIT(7)
+
+/* DA9150_GPIO_C_D = 0x0E9 */
+#define DA9150_GPIOC_PIN_SHIFT 0
+#define DA9150_GPIOC_PIN_MASK (0x07 << 0)
+#define DA9150_GPIOC_PIN_GPI (0x00 << 0)
+#define DA9150_GPIOC_PIN_GPO_OD BIT(0)
+#define DA9150_GPIOC_TYPE_SHIFT 3
+#define DA9150_GPIOC_TYPE_MASK BIT(3)
+#define DA9150_GPIOD_PIN_SHIFT 4
+#define DA9150_GPIOD_PIN_MASK (0x07 << 4)
+#define DA9150_GPIOD_PIN_GPI (0x00 << 4)
+#define DA9150_GPIOD_PIN_GPO_OD BIT(4)
+#define DA9150_GPIOD_TYPE_SHIFT 7
+#define DA9150_GPIOD_TYPE_MASK BIT(7)
+
+/* DA9150_GPIO_MODE_CONT = 0x0EA */
+#define DA9150_GPIOA_MODE_SHIFT 0
+#define DA9150_GPIOA_MODE_MASK BIT(0)
+#define DA9150_GPIOB_MODE_SHIFT 1
+#define DA9150_GPIOB_MODE_MASK BIT(1)
+#define DA9150_GPIOC_MODE_SHIFT 2
+#define DA9150_GPIOC_MODE_MASK BIT(2)
+#define DA9150_GPIOD_MODE_SHIFT 3
+#define DA9150_GPIOD_MODE_MASK BIT(3)
+#define DA9150_GPIOA_CONT_SHIFT 4
+#define DA9150_GPIOA_CONT_MASK BIT(4)
+#define DA9150_GPIOB_CONT_SHIFT 5
+#define DA9150_GPIOB_CONT_MASK BIT(5)
+#define DA9150_GPIOC_CONT_SHIFT 6
+#define DA9150_GPIOC_CONT_MASK BIT(6)
+#define DA9150_GPIOD_CONT_SHIFT 7
+#define DA9150_GPIOD_CONT_MASK BIT(7)
+
+/* DA9150_GPIO_CTRL_B = 0x0EB */
+#define DA9150_WAKE_PIN_SHIFT 0
+#define DA9150_WAKE_PIN_MASK (0x03 << 0)
+#define DA9150_WAKE_MODE_SHIFT 2
+#define DA9150_WAKE_MODE_MASK BIT(2)
+#define DA9150_WAKE_CONT_SHIFT 3
+#define DA9150_WAKE_CONT_MASK BIT(3)
+#define DA9150_WAKE_DLY_SHIFT 4
+#define DA9150_WAKE_DLY_MASK BIT(4)
+
+/* DA9150_GPIO_CTRL_A = 0x0EC */
+#define DA9150_GPIOA_ANAEN_SHIFT 0
+#define DA9150_GPIOA_ANAEN_MASK BIT(0)
+#define DA9150_GPIOB_ANAEN_SHIFT 1
+#define DA9150_GPIOB_ANAEN_MASK BIT(1)
+#define DA9150_GPIOC_ANAEN_SHIFT 2
+#define DA9150_GPIOC_ANAEN_MASK BIT(2)
+#define DA9150_GPIOD_ANAEN_SHIFT 3
+#define DA9150_GPIOD_ANAEN_MASK BIT(3)
+#define DA9150_GPIO_ANAEN 0x01
+#define DA9150_GPIO_ANAEN_MASK 0x0F
+#define DA9150_CHGLED_PIN_SHIFT 5
+#define DA9150_CHGLED_PIN_MASK (0x07 << 5)
+
+/* DA9150_GPIO_CTRL_C = 0x0ED */
+#define DA9150_CHGBL_DUR_SHIFT 0
+#define DA9150_CHGBL_DUR_MASK (0x03 << 0)
+#define DA9150_CHGBL_DBL_SHIFT 2
+#define DA9150_CHGBL_DBL_MASK BIT(2)
+#define DA9150_CHGBL_FRQ_SHIFT 3
+#define DA9150_CHGBL_FRQ_MASK (0x03 << 3)
+#define DA9150_CHGBL_FLKR_SHIFT 5
+#define DA9150_CHGBL_FLKR_MASK BIT(5)
+
+/* DA9150_GPIO_CFG_A = 0x0EE */
+#define DA9150_CE_LPM_DEB_SHIFT 0
+#define DA9150_CE_LPM_DEB_MASK (0x07 << 0)
+
+/* DA9150_GPIO_CFG_B = 0x0EF */
+#define DA9150_GPIOA_PUPD_SHIFT 0
+#define DA9150_GPIOA_PUPD_MASK BIT(0)
+#define DA9150_GPIOB_PUPD_SHIFT 1
+#define DA9150_GPIOB_PUPD_MASK BIT(1)
+#define DA9150_GPIOC_PUPD_SHIFT 2
+#define DA9150_GPIOC_PUPD_MASK BIT(2)
+#define DA9150_GPIOD_PUPD_SHIFT 3
+#define DA9150_GPIOD_PUPD_MASK BIT(3)
+#define DA9150_GPIO_PUPD_MASK (0xF << 0)
+#define DA9150_GPI_DEB_SHIFT 4
+#define DA9150_GPI_DEB_MASK (0x07 << 4)
+#define DA9150_LPM_EN_SHIFT 7
+#define DA9150_LPM_EN_MASK BIT(7)
+
+/* DA9150_GPIO_CFG_C = 0x0F0 */
+#define DA9150_GPI_V_SHIFT 0
+#define DA9150_GPI_V_MASK BIT(0)
+#define DA9150_VDDIO_INT_SHIFT 1
+#define DA9150_VDDIO_INT_MASK BIT(1)
+#define DA9150_FAULT_PIN_SHIFT 3
+#define DA9150_FAULT_PIN_MASK (0x07 << 3)
+#define DA9150_FAULT_TYPE_SHIFT 6
+#define DA9150_FAULT_TYPE_MASK BIT(6)
+#define DA9150_NIRQ_PUPD_SHIFT 7
+#define DA9150_NIRQ_PUPD_MASK BIT(7)
+
+/* DA9150_GPADC_MAN = 0x0F2 */
+#define DA9150_GPADC_EN_SHIFT 0
+#define DA9150_GPADC_EN_MASK BIT(0)
+#define DA9150_GPADC_MUX_SHIFT 1
+#define DA9150_GPADC_MUX_MASK (0x1f << 1)
+
+/* DA9150_GPADC_RES_A = 0x0F4 */
+#define DA9150_GPADC_RES_H_SHIFT 0
+#define DA9150_GPADC_RES_H_MASK (0xff << 0)
+
+/* DA9150_GPADC_RES_B = 0x0F5 */
+#define DA9150_GPADC_RUN_SHIFT 0
+#define DA9150_GPADC_RUN_MASK BIT(0)
+#define DA9150_GPADC_RES_L_SHIFT 6
+#define DA9150_GPADC_RES_L_MASK (0x03 << 6)
+#define DA9150_GPADC_RES_L_BITS 2
+
+/* DA9150_PAGE_CON_2 = 0x100 */
+#define DA9150_PAGE_SHIFT 0
+#define DA9150_PAGE_MASK (0x3f << 0)
+#define DA9150_WRITE_MODE_SHIFT 6
+#define DA9150_WRITE_MODE_MASK BIT(6)
+#define DA9150_REVERT_SHIFT 7
+#define DA9150_REVERT_MASK BIT(7)
+
+/* DA9150_OTP_CONT_SHARED = 0x101 */
+#define DA9150_PC_DONE_SHIFT 3
+#define DA9150_PC_DONE_MASK BIT(3)
+
+/* DA9150_INTERFACE_SHARED = 0x105 */
+#define DA9150_IF_BASE_ADDR_SHIFT 4
+#define DA9150_IF_BASE_ADDR_MASK (0x0f << 4)
+
+/* DA9150_CONFIG_A_SHARED = 0x106 */
+#define DA9150_NIRQ_VDD_SHIFT 1
+#define DA9150_NIRQ_VDD_MASK BIT(1)
+#define DA9150_NIRQ_PIN_SHIFT 2
+#define DA9150_NIRQ_PIN_MASK BIT(2)
+#define DA9150_NIRQ_TYPE_SHIFT 3
+#define DA9150_NIRQ_TYPE_MASK BIT(3)
+#define DA9150_PM_IF_V_SHIFT 4
+#define DA9150_PM_IF_V_MASK BIT(4)
+#define DA9150_PM_IF_FMP_SHIFT 5
+#define DA9150_PM_IF_FMP_MASK BIT(5)
+#define DA9150_PM_IF_HSM_SHIFT 6
+#define DA9150_PM_IF_HSM_MASK BIT(6)
+
+/* DA9150_CONFIG_D_SHARED = 0x109 */
+#define DA9150_NIRQ_MODE_SHIFT 1
+#define DA9150_NIRQ_MODE_MASK BIT(1)
+
+/* DA9150_ADETVB_CFG_C = 0x150 */
+#define DA9150_TADP_RISE_SHIFT 0
+#define DA9150_TADP_RISE_MASK (0xff << 0)
+
+/* DA9150_ADETD_STAT = 0x151 */
+#define DA9150_DCD_STAT_SHIFT 0
+#define DA9150_DCD_STAT_MASK BIT(0)
+#define DA9150_PCD_STAT_SHIFT 1
+#define DA9150_PCD_STAT_MASK (0x03 << 1)
+#define DA9150_SCD_STAT_SHIFT 3
+#define DA9150_SCD_STAT_MASK (0x03 << 3)
+#define DA9150_DP_STAT_SHIFT 5
+#define DA9150_DP_STAT_MASK BIT(5)
+#define DA9150_DM_STAT_SHIFT 6
+#define DA9150_DM_STAT_MASK BIT(6)
+
+/* DA9150_ADET_CMPSTAT = 0x152 */
+#define DA9150_DP_COMP_SHIFT 1
+#define DA9150_DP_COMP_MASK BIT(1)
+#define DA9150_DM_COMP_SHIFT 2
+#define DA9150_DM_COMP_MASK BIT(2)
+#define DA9150_ADP_SNS_COMP_SHIFT 3
+#define DA9150_ADP_SNS_COMP_MASK BIT(3)
+#define DA9150_ADP_PRB_COMP_SHIFT 4
+#define DA9150_ADP_PRB_COMP_MASK BIT(4)
+#define DA9150_ID_COMP_SHIFT 5
+#define DA9150_ID_COMP_MASK BIT(5)
+
+/* DA9150_ADET_CTRL_A = 0x153 */
+#define DA9150_AID_DAT_SHIFT 0
+#define DA9150_AID_DAT_MASK BIT(0)
+#define DA9150_AID_ID_SHIFT 1
+#define DA9150_AID_ID_MASK BIT(1)
+#define DA9150_AID_TRIG_SHIFT 2
+#define DA9150_AID_TRIG_MASK BIT(2)
+
+/* DA9150_ADETVB_CFG_B = 0x154 */
+#define DA9150_VB_MODE_SHIFT 0
+#define DA9150_VB_MODE_MASK (0x03 << 0)
+#define DA9150_VB_MODE_VB_SESS BIT(0)
+
+#define DA9150_TADP_PRB_SHIFT 2
+#define DA9150_TADP_PRB_MASK BIT(2)
+#define DA9150_DAT_RPD_EXT_SHIFT 5
+#define DA9150_DAT_RPD_EXT_MASK BIT(5)
+#define DA9150_CONF_RPD_SHIFT 6
+#define DA9150_CONF_RPD_MASK BIT(6)
+#define DA9150_CONF_SRP_SHIFT 7
+#define DA9150_CONF_SRP_MASK BIT(7)
+
+/* DA9150_ADETVB_CFG_A = 0x155 */
+#define DA9150_AID_MODE_SHIFT 0
+#define DA9150_AID_MODE_MASK (0x03 << 0)
+#define DA9150_AID_EXT_POL_SHIFT 2
+#define DA9150_AID_EXT_POL_MASK BIT(2)
+
+/* DA9150_ADETAC_CFG_A = 0x156 */
+#define DA9150_ISET_CDP_SHIFT 0
+#define DA9150_ISET_CDP_MASK (0x1f << 0)
+#define DA9150_CONF_DBP_SHIFT 5
+#define DA9150_CONF_DBP_MASK BIT(5)
+
+/* DA9150_ADDETAC_CFG_B = 0x157 */
+#define DA9150_ISET_DCHG_SHIFT 0
+#define DA9150_ISET_DCHG_MASK (0x1f << 0)
+#define DA9150_CONF_GPIOA_SHIFT 5
+#define DA9150_CONF_GPIOA_MASK BIT(5)
+#define DA9150_CONF_GPIOB_SHIFT 6
+#define DA9150_CONF_GPIOB_MASK BIT(6)
+#define DA9150_AID_VB_SHIFT 7
+#define DA9150_AID_VB_MASK BIT(7)
+
+/* DA9150_ADETAC_CFG_C = 0x158 */
+#define DA9150_ISET_DEF_SHIFT 0
+#define DA9150_ISET_DEF_MASK (0x1f << 0)
+#define DA9150_CONF_MODE_SHIFT 5
+#define DA9150_CONF_MODE_MASK (0x03 << 5)
+#define DA9150_AID_CR_DIS_SHIFT 7
+#define DA9150_AID_CR_DIS_MASK BIT(7)
+
+/* DA9150_ADETAC_CFG_D = 0x159 */
+#define DA9150_ISET_UNIT_SHIFT 0
+#define DA9150_ISET_UNIT_MASK (0x1f << 0)
+#define DA9150_AID_UNCLAMP_SHIFT 5
+#define DA9150_AID_UNCLAMP_MASK BIT(5)
+
+/* DA9150_ADETVB_CFG_D = 0x15A */
+#define DA9150_ID_MODE_SHIFT 0
+#define DA9150_ID_MODE_MASK (0x03 << 0)
+#define DA9150_DAT_MODE_SHIFT 2
+#define DA9150_DAT_MODE_MASK (0x0f << 2)
+#define DA9150_DAT_SWP_SHIFT 6
+#define DA9150_DAT_SWP_MASK BIT(6)
+#define DA9150_DAT_CLAMP_EXT_SHIFT 7
+#define DA9150_DAT_CLAMP_EXT_MASK BIT(7)
+
+/* DA9150_ADETID_CFG_A = 0x15B */
+#define DA9150_TID_POLL_SHIFT 0
+#define DA9150_TID_POLL_MASK (0x07 << 0)
+#define DA9150_RID_CONV_SHIFT 3
+#define DA9150_RID_CONV_MASK BIT(3)
+
+/* DA9150_ADET_RID_PT_CHG_H = 0x15C */
+#define DA9150_RID_PT_CHG_H_SHIFT 0
+#define DA9150_RID_PT_CHG_H_MASK (0xff << 0)
+
+/* DA9150_ADET_RID_PT_CHG_L = 0x15D */
+#define DA9150_RID_PT_CHG_L_SHIFT 6
+#define DA9150_RID_PT_CHG_L_MASK (0x03 << 6)
+
+/* DA9150_PPR_TCTR_B = 0x160 */
+#define DA9150_CHG_TCTR_VAL_SHIFT 0
+#define DA9150_CHG_TCTR_VAL_MASK (0xff << 0)
+
+/* DA9150_PPR_BKCTRL_A = 0x163 */
+#define DA9150_VBUS_MODE_SHIFT 0
+#define DA9150_VBUS_MODE_MASK (0x03 << 0)
+#define DA9150_VBUS_MODE_CHG BIT(0)
+#define DA9150_VBUS_MODE_OTG (0x02 << 0)
+#define DA9150_VBUS_LPM_SHIFT 2
+#define DA9150_VBUS_LPM_MASK (0x03 << 2)
+#define DA9150_VBUS_SUSP_SHIFT 4
+#define DA9150_VBUS_SUSP_MASK BIT(4)
+#define DA9150_VBUS_PWM_SHIFT 5
+#define DA9150_VBUS_PWM_MASK BIT(5)
+#define DA9150_VBUS_ISO_SHIFT 6
+#define DA9150_VBUS_ISO_MASK BIT(6)
+#define DA9150_VBUS_LDO_SHIFT 7
+#define DA9150_VBUS_LDO_MASK BIT(7)
+
+/* DA9150_PPR_BKCFG_A = 0x164 */
+#define DA9150_VBUS_ISET_SHIFT 0
+#define DA9150_VBUS_ISET_MASK (0x1f << 0)
+#define DA9150_VBUS_IMAX_SHIFT 5
+#define DA9150_VBUS_IMAX_MASK BIT(5)
+#define DA9150_VBUS_IOTG_SHIFT 6
+#define DA9150_VBUS_IOTG_MASK (0x03 << 6)
+
+/* DA9150_PPR_BKCFG_B = 0x165 */
+#define DA9150_VBUS_DROP_SHIFT 0
+#define DA9150_VBUS_DROP_MASK (0x0f << 0)
+#define DA9150_VBUS_FAULT_DIS_SHIFT 6
+#define DA9150_VBUS_FAULT_DIS_MASK BIT(6)
+#define DA9150_OTG_FAULT_DIS_SHIFT 7
+#define DA9150_OTG_FAULT_DIS_MASK BIT(7)
+
+/* DA9150_PPR_CHGCTRL_A = 0x166 */
+#define DA9150_CHG_EN_SHIFT 0
+#define DA9150_CHG_EN_MASK BIT(0)
+
+/* DA9150_PPR_CHGCTRL_B = 0x167 */
+#define DA9150_CHG_VBAT_SHIFT 0
+#define DA9150_CHG_VBAT_MASK (0x1f << 0)
+#define DA9150_CHG_VDROP_SHIFT 6
+#define DA9150_CHG_VDROP_MASK (0x03 << 6)
+
+/* DA9150_PPR_CHGCTRL_C = 0x168 */
+#define DA9150_CHG_VFAULT_SHIFT 0
+#define DA9150_CHG_VFAULT_MASK (0x0f << 0)
+#define DA9150_CHG_IPRE_SHIFT 4
+#define DA9150_CHG_IPRE_MASK (0x03 << 4)
+
+/* DA9150_PPR_TCTR_A = 0x169 */
+#define DA9150_CHG_TCTR_SHIFT 0
+#define DA9150_CHG_TCTR_MASK (0x07 << 0)
+#define DA9150_CHG_TCTR_MODE_SHIFT 4
+#define DA9150_CHG_TCTR_MODE_MASK BIT(4)
+
+/* DA9150_PPR_CHGCTRL_D = 0x16A */
+#define DA9150_CHG_IBAT_SHIFT 0
+#define DA9150_CHG_IBAT_MASK (0xff << 0)
+
+/* DA9150_PPR_CHGCTRL_E = 0x16B */
+#define DA9150_CHG_IEND_SHIFT 0
+#define DA9150_CHG_IEND_MASK (0xff << 0)
+
+/* DA9150_PPR_CHGCTRL_F = 0x16C */
+#define DA9150_CHG_VCOLD_SHIFT 0
+#define DA9150_CHG_VCOLD_MASK (0x1f << 0)
+#define DA9150_TBAT_TQA_EN_SHIFT 6
+#define DA9150_TBAT_TQA_EN_MASK BIT(6)
+#define DA9150_TBAT_TDP_EN_SHIFT 7
+#define DA9150_TBAT_TDP_EN_MASK BIT(7)
+
+/* DA9150_PPR_CHGCTRL_G = 0x16D */
+#define DA9150_CHG_VWARM_SHIFT 0
+#define DA9150_CHG_VWARM_MASK (0x1f << 0)
+
+/* DA9150_PPR_CHGCTRL_H = 0x16E */
+#define DA9150_CHG_VHOT_SHIFT 0
+#define DA9150_CHG_VHOT_MASK (0x1f << 0)
+
+/* DA9150_PPR_CHGCTRL_I = 0x16F */
+#define DA9150_CHG_ICOLD_SHIFT 0
+#define DA9150_CHG_ICOLD_MASK (0xff << 0)
+
+/* DA9150_PPR_CHGCTRL_J = 0x170 */
+#define DA9150_CHG_IWARM_SHIFT 0
+#define DA9150_CHG_IWARM_MASK (0xff << 0)
+
+/* DA9150_PPR_CHGCTRL_K = 0x171 */
+#define DA9150_CHG_IHOT_SHIFT 0
+#define DA9150_CHG_IHOT_MASK (0xff << 0)
+
+/* DA9150_PPR_CHGCTRL_L = 0x172 */
+#define DA9150_CHG_IBAT_TRED_SHIFT 0
+#define DA9150_CHG_IBAT_TRED_MASK (0xff << 0)
+
+/* DA9150_PPR_CHGCTRL_M = 0x173 */
+#define DA9150_CHG_VFLOAT_SHIFT 0
+#define DA9150_CHG_VFLOAT_MASK (0x0f << 0)
+#define DA9150_CHG_LPM_SHIFT 5
+#define DA9150_CHG_LPM_MASK BIT(5)
+#define DA9150_CHG_NBLO_SHIFT 6
+#define DA9150_CHG_NBLO_MASK BIT(6)
+#define DA9150_EBS_EN_SHIFT 7
+#define DA9150_EBS_EN_MASK BIT(7)
+
+/* DA9150_PPR_THYST_A = 0x174 */
+#define DA9150_TBAT_T1_SHIFT 0
+#define DA9150_TBAT_T1_MASK (0xff << 0)
+
+/* DA9150_PPR_THYST_B = 0x175 */
+#define DA9150_TBAT_T2_SHIFT 0
+#define DA9150_TBAT_T2_MASK (0xff << 0)
+
+/* DA9150_PPR_THYST_C = 0x176 */
+#define DA9150_TBAT_T3_SHIFT 0
+#define DA9150_TBAT_T3_MASK (0xff << 0)
+
+/* DA9150_PPR_THYST_D = 0x177 */
+#define DA9150_TBAT_T4_SHIFT 0
+#define DA9150_TBAT_T4_MASK (0xff << 0)
+
+/* DA9150_PPR_THYST_E = 0x178 */
+#define DA9150_TBAT_T5_SHIFT 0
+#define DA9150_TBAT_T5_MASK (0xff << 0)
+
+/* DA9150_PPR_THYST_F = 0x179 */
+#define DA9150_TBAT_H1_SHIFT 0
+#define DA9150_TBAT_H1_MASK (0xff << 0)
+
+/* DA9150_PPR_THYST_G = 0x17A */
+#define DA9150_TBAT_H5_SHIFT 0
+#define DA9150_TBAT_H5_MASK (0xff << 0)
+
+/* DA9150_PAGE_CON_3 = 0x180 */
+#define DA9150_PAGE_SHIFT 0
+#define DA9150_PAGE_MASK (0x3f << 0)
+#define DA9150_WRITE_MODE_SHIFT 6
+#define DA9150_WRITE_MODE_MASK BIT(6)
+#define DA9150_REVERT_SHIFT 7
+#define DA9150_REVERT_MASK BIT(7)
+
+/* DA9150_PAGE_CON_4 = 0x200 */
+#define DA9150_PAGE_SHIFT 0
+#define DA9150_PAGE_MASK (0x3f << 0)
+#define DA9150_WRITE_MODE_SHIFT 6
+#define DA9150_WRITE_MODE_MASK BIT(6)
+#define DA9150_REVERT_SHIFT 7
+#define DA9150_REVERT_MASK BIT(7)
+
+/* DA9150_PAGE_CON_5 = 0x280 */
+#define DA9150_PAGE_SHIFT 0
+#define DA9150_PAGE_MASK (0x3f << 0)
+#define DA9150_WRITE_MODE_SHIFT 6
+#define DA9150_WRITE_MODE_MASK BIT(6)
+#define DA9150_REVERT_SHIFT 7
+#define DA9150_REVERT_MASK BIT(7)
+
+/* DA9150_PAGE_CON_6 = 0x300 */
+#define DA9150_PAGE_SHIFT 0
+#define DA9150_PAGE_MASK (0x3f << 0)
+#define DA9150_WRITE_MODE_SHIFT 6
+#define DA9150_WRITE_MODE_MASK BIT(6)
+#define DA9150_REVERT_SHIFT 7
+#define DA9150_REVERT_MASK BIT(7)
+
+/* DA9150_COREBTLD_STAT_A = 0x302 */
+#define DA9150_BOOTLD_STAT_SHIFT 0
+#define DA9150_BOOTLD_STAT_MASK (0x03 << 0)
+#define DA9150_CORE_LOCKUP_SHIFT 2
+#define DA9150_CORE_LOCKUP_MASK BIT(2)
+
+/* DA9150_COREBTLD_CTRL_A = 0x303 */
+#define DA9150_CORE_RESET_SHIFT 0
+#define DA9150_CORE_RESET_MASK BIT(0)
+#define DA9150_CORE_STOP_SHIFT 1
+#define DA9150_CORE_STOP_MASK BIT(1)
+
+/* DA9150_CORE_CONFIG_A = 0x304 */
+#define DA9150_CORE_MEMMUX_SHIFT 0
+#define DA9150_CORE_MEMMUX_MASK (0x03 << 0)
+#define DA9150_WDT_AUTO_START_SHIFT 2
+#define DA9150_WDT_AUTO_START_MASK BIT(2)
+#define DA9150_WDT_AUTO_LOCK_SHIFT 3
+#define DA9150_WDT_AUTO_LOCK_MASK BIT(3)
+#define DA9150_WDT_HLT_NO_CLK_SHIFT 4
+#define DA9150_WDT_HLT_NO_CLK_MASK BIT(4)
+
+/* DA9150_CORE_CONFIG_C = 0x305 */
+#define DA9150_CORE_SW_SIZE_SHIFT 0
+#define DA9150_CORE_SW_SIZE_MASK (0xff << 0)
+
+/* DA9150_CORE_CONFIG_B = 0x306 */
+#define DA9150_BOOTLD_EN_SHIFT 0
+#define DA9150_BOOTLD_EN_MASK BIT(0)
+#define DA9150_CORE_EN_SHIFT 2
+#define DA9150_CORE_EN_MASK BIT(2)
+#define DA9150_CORE_SW_SRC_SHIFT 3
+#define DA9150_CORE_SW_SRC_MASK (0x07 << 3)
+#define DA9150_DEEP_SLEEP_EN_SHIFT 7
+#define DA9150_DEEP_SLEEP_EN_MASK BIT(7)
+
+/* DA9150_CORE_CFG_DATA_A = 0x307 */
+#define DA9150_CORE_CFG_DT_A_SHIFT 0
+#define DA9150_CORE_CFG_DT_A_MASK (0xff << 0)
+
+/* DA9150_CORE_CFG_DATA_B = 0x308 */
+#define DA9150_CORE_CFG_DT_B_SHIFT 0
+#define DA9150_CORE_CFG_DT_B_MASK (0xff << 0)
+
+/* DA9150_CORE_CMD_A = 0x309 */
+#define DA9150_CORE_CMD_SHIFT 0
+#define DA9150_CORE_CMD_MASK (0xff << 0)
+
+/* DA9150_CORE_DATA_A = 0x30A */
+#define DA9150_CORE_DATA_0_SHIFT 0
+#define DA9150_CORE_DATA_0_MASK (0xff << 0)
+
+/* DA9150_CORE_DATA_B = 0x30B */
+#define DA9150_CORE_DATA_1_SHIFT 0
+#define DA9150_CORE_DATA_1_MASK (0xff << 0)
+
+/* DA9150_CORE_DATA_C = 0x30C */
+#define DA9150_CORE_DATA_2_SHIFT 0
+#define DA9150_CORE_DATA_2_MASK (0xff << 0)
+
+/* DA9150_CORE_DATA_D = 0x30D */
+#define DA9150_CORE_DATA_3_SHIFT 0
+#define DA9150_CORE_DATA_3_MASK (0xff << 0)
+
+/* DA9150_CORE2WIRE_STAT_A = 0x310 */
+#define DA9150_FW_FWDL_ERR_SHIFT 7
+#define DA9150_FW_FWDL_ERR_MASK BIT(7)
+
+/* DA9150_CORE2WIRE_CTRL_A = 0x311 */
+#define DA9150_FW_FWDL_EN_SHIFT 0
+#define DA9150_FW_FWDL_EN_MASK BIT(0)
+#define DA9150_FG_QIF_EN_SHIFT 1
+#define DA9150_FG_QIF_EN_MASK BIT(1)
+#define DA9150_CORE_BASE_ADDR_SHIFT 4
+#define DA9150_CORE_BASE_ADDR_MASK (0x0f << 4)
+
+/* DA9150_FW_CTRL_A = 0x312 */
+#define DA9150_FW_SEAL_SHIFT 0
+#define DA9150_FW_SEAL_MASK (0xff << 0)
+
+/* DA9150_FW_CTRL_C = 0x313 */
+#define DA9150_FW_FWDL_CRC_SHIFT 0
+#define DA9150_FW_FWDL_CRC_MASK (0xff << 0)
+
+/* DA9150_FW_CTRL_D = 0x314 */
+#define DA9150_FW_FWDL_BASE_SHIFT 0
+#define DA9150_FW_FWDL_BASE_MASK (0x0f << 0)
+
+/* DA9150_FG_CTRL_A = 0x315 */
+#define DA9150_FG_QIF_CODE_SHIFT 0
+#define DA9150_FG_QIF_CODE_MASK (0xff << 0)
+
+/* DA9150_FG_CTRL_B = 0x316 */
+#define DA9150_FG_QIF_VALUE_SHIFT 0
+#define DA9150_FG_QIF_VALUE_MASK (0xff << 0)
+
+/* DA9150_FW_CTRL_E = 0x317 */
+#define DA9150_FW_FWDL_SEG_SHIFT 0
+#define DA9150_FW_FWDL_SEG_MASK (0xff << 0)
+
+/* DA9150_FW_CTRL_B = 0x318 */
+#define DA9150_FW_FWDL_VALUE_SHIFT 0
+#define DA9150_FW_FWDL_VALUE_MASK (0xff << 0)
+
+/* DA9150_GPADC_CMAN = 0x320 */
+#define DA9150_GPADC_CEN_SHIFT 0
+#define DA9150_GPADC_CEN_MASK BIT(0)
+#define DA9150_GPADC_CMUX_SHIFT 1
+#define DA9150_GPADC_CMUX_MASK (0x1f << 1)
+
+/* DA9150_GPADC_CRES_A = 0x322 */
+#define DA9150_GPADC_CRES_H_SHIFT 0
+#define DA9150_GPADC_CRES_H_MASK (0xff << 0)
+
+/* DA9150_GPADC_CRES_B = 0x323 */
+#define DA9150_GPADC_CRUN_SHIFT 0
+#define DA9150_GPADC_CRUN_MASK BIT(0)
+#define DA9150_GPADC_CRES_L_SHIFT 6
+#define DA9150_GPADC_CRES_L_MASK (0x03 << 6)
+
+/* DA9150_CC_CFG_A = 0x328 */
+#define DA9150_CC_EN_SHIFT 0
+#define DA9150_CC_EN_MASK BIT(0)
+#define DA9150_CC_TIMEBASE_SHIFT 1
+#define DA9150_CC_TIMEBASE_MASK (0x03 << 1)
+#define DA9150_CC_CFG_SHIFT 5
+#define DA9150_CC_CFG_MASK (0x03 << 5)
+#define DA9150_CC_ENDLESS_MODE_SHIFT 7
+#define DA9150_CC_ENDLESS_MODE_MASK BIT(7)
+
+/* DA9150_CC_CFG_B = 0x329 */
+#define DA9150_CC_OPT_SHIFT 0
+#define DA9150_CC_OPT_MASK (0x03 << 0)
+#define DA9150_CC_PREAMP_SHIFT 2
+#define DA9150_CC_PREAMP_MASK (0x03 << 2)
+
+/* DA9150_CC_ICHG_RES_A = 0x32A */
+#define DA9150_CC_ICHG_RES_H_SHIFT 0
+#define DA9150_CC_ICHG_RES_H_MASK (0xff << 0)
+
+/* DA9150_CC_ICHG_RES_B = 0x32B */
+#define DA9150_CC_ICHG_RES_L_SHIFT 3
+#define DA9150_CC_ICHG_RES_L_MASK (0x1f << 3)
+
+/* DA9150_CC_IAVG_RES_A = 0x32C */
+#define DA9150_CC_IAVG_RES_H_SHIFT 0
+#define DA9150_CC_IAVG_RES_H_MASK (0xff << 0)
+
+/* DA9150_CC_IAVG_RES_B = 0x32D */
+#define DA9150_CC_IAVG_RES_L_SHIFT 0
+#define DA9150_CC_IAVG_RES_L_MASK (0xff << 0)
+
+/* DA9150_TAUX_CTRL_A = 0x330 */
+#define DA9150_TAUX_EN_SHIFT 0
+#define DA9150_TAUX_EN_MASK BIT(0)
+#define DA9150_TAUX_MOD_SHIFT 1
+#define DA9150_TAUX_MOD_MASK BIT(1)
+#define DA9150_TAUX_UPDATE_SHIFT 2
+#define DA9150_TAUX_UPDATE_MASK BIT(2)
+
+/* DA9150_TAUX_RELOAD_H = 0x332 */
+#define DA9150_TAUX_RLD_H_SHIFT 0
+#define DA9150_TAUX_RLD_H_MASK (0xff << 0)
+
+/* DA9150_TAUX_RELOAD_L = 0x333 */
+#define DA9150_TAUX_RLD_L_SHIFT 3
+#define DA9150_TAUX_RLD_L_MASK (0x1f << 3)
+
+/* DA9150_TAUX_VALUE_H = 0x334 */
+#define DA9150_TAUX_VAL_H_SHIFT 0
+#define DA9150_TAUX_VAL_H_MASK (0xff << 0)
+
+/* DA9150_TAUX_VALUE_L = 0x335 */
+#define DA9150_TAUX_VAL_L_SHIFT 3
+#define DA9150_TAUX_VAL_L_MASK (0x1f << 3)
+
+/* DA9150_AUX_DATA_0 = 0x338 */
+#define DA9150_AUX_DAT_0_SHIFT 0
+#define DA9150_AUX_DAT_0_MASK (0xff << 0)
+
+/* DA9150_AUX_DATA_1 = 0x339 */
+#define DA9150_AUX_DAT_1_SHIFT 0
+#define DA9150_AUX_DAT_1_MASK (0xff << 0)
+
+/* DA9150_AUX_DATA_2 = 0x33A */
+#define DA9150_AUX_DAT_2_SHIFT 0
+#define DA9150_AUX_DAT_2_MASK (0xff << 0)
+
+/* DA9150_AUX_DATA_3 = 0x33B */
+#define DA9150_AUX_DAT_3_SHIFT 0
+#define DA9150_AUX_DAT_3_MASK (0xff << 0)
+
+/* DA9150_BIF_CTRL = 0x340 */
+#define DA9150_BIF_ISRC_EN_SHIFT 0
+#define DA9150_BIF_ISRC_EN_MASK BIT(0)
+
+/* DA9150_TBAT_CTRL_A = 0x342 */
+#define DA9150_TBAT_EN_SHIFT 0
+#define DA9150_TBAT_EN_MASK BIT(0)
+#define DA9150_TBAT_SW1_SHIFT 1
+#define DA9150_TBAT_SW1_MASK BIT(1)
+#define DA9150_TBAT_SW2_SHIFT 2
+#define DA9150_TBAT_SW2_MASK BIT(2)
+
+/* DA9150_TBAT_CTRL_B = 0x343 */
+#define DA9150_TBAT_SW_FRC_SHIFT 0
+#define DA9150_TBAT_SW_FRC_MASK BIT(0)
+#define DA9150_TBAT_STAT_SW1_SHIFT 1
+#define DA9150_TBAT_STAT_SW1_MASK BIT(1)
+#define DA9150_TBAT_STAT_SW2_SHIFT 2
+#define DA9150_TBAT_STAT_SW2_MASK BIT(2)
+#define DA9150_TBAT_HIGH_CURR_SHIFT 3
+#define DA9150_TBAT_HIGH_CURR_MASK BIT(3)
+
+/* DA9150_TBAT_RES_A = 0x344 */
+#define DA9150_TBAT_RES_H_SHIFT 0
+#define DA9150_TBAT_RES_H_MASK (0xff << 0)
+
+/* DA9150_TBAT_RES_B = 0x345 */
+#define DA9150_TBAT_RES_DIS_SHIFT 0
+#define DA9150_TBAT_RES_DIS_MASK BIT(0)
+#define DA9150_TBAT_RES_L_SHIFT 6
+#define DA9150_TBAT_RES_L_MASK (0x03 << 6)
+
+#endif /* __DA9150_REGISTERS_H */
diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h
index 960b92a..f504349 100644
--- a/include/linux/mfd/max77686-private.h
+++ b/include/linux/mfd/max77686-private.h
@@ -447,7 +447,6 @@ struct max77686_dev {
struct regmap_irq_chip_data *rtc_irq_data;
int irq;
- bool wakeup;
struct mutex irqlock;
int irq_masks_cur[MAX77686_IRQ_GROUP_NR];
int irq_masks_cache[MAX77686_IRQ_GROUP_NR];
diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h
index 553f7d0..d4b72d5 100644
--- a/include/linux/mfd/max77686.h
+++ b/include/linux/mfd/max77686.h
@@ -119,43 +119,10 @@ enum max77802_regulators {
MAX77802_REG_MAX,
};
-struct max77686_regulator_data {
- int id;
- struct regulator_init_data *initdata;
- struct device_node *of_node;
-};
-
enum max77686_opmode {
MAX77686_OPMODE_NORMAL,
MAX77686_OPMODE_LP,
MAX77686_OPMODE_STANDBY,
};
-struct max77686_opmode_data {
- int id;
- int mode;
-};
-
-struct max77686_platform_data {
- int ono;
- int wakeup;
-
- /* ---- PMIC ---- */
- struct max77686_regulator_data *regulators;
- int num_regulators;
-
- struct max77686_opmode_data *opmode_data;
-
- /*
- * GPIO-DVS feature is not enabled with the current version of
- * MAX77686 driver. Buck2/3/4_voltages[0] is used as the default
- * voltage at probe. DVS/SELB gpios are set as OUTPUT-LOW.
- */
- int buck234_gpio_dvs[3]; /* GPIO of [0]DVS1, [1]DVS2, [2]DVS3 */
- int buck234_gpio_selb[3]; /* [0]SELB2, [1]SELB3, [2]SELB4 */
- unsigned int buck2_voltage[8]; /* buckx_voltage in uV */
- unsigned int buck3_voltage[8];
- unsigned int buck4_voltage[8];
-};
-
#endif /* __LINUX_MFD_MAX77686_H */
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 08dae01..51633ea 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -87,6 +87,7 @@ enum max77693_pmic_reg {
/* MAX77693 ITORCH register */
#define TORCH_IOUT1_SHIFT 0
#define TORCH_IOUT2_SHIFT 4
+#define TORCH_IOUT_MASK(x) (0xf << (x))
#define TORCH_IOUT_MIN 15625
#define TORCH_IOUT_MAX 250000
#define TORCH_IOUT_STEP 15625
@@ -113,8 +114,8 @@ enum max77693_pmic_reg {
#define FLASH_EN_FLASH 0x1
#define FLASH_EN_TORCH 0x2
#define FLASH_EN_ON 0x3
-#define FLASH_EN_SHIFT(x) (6 - ((x) - 1) * 2)
-#define TORCH_EN_SHIFT(x) (2 - ((x) - 1) * 2)
+#define FLASH_EN_SHIFT(x) (6 - (x) * 2)
+#define TORCH_EN_SHIFT(x) (2 - (x) * 2)
/* MAX77693 MAX_FLASH1 register */
#define MAX_FLASH1_MAX_FL_EN 0x80
@@ -143,10 +144,118 @@ enum max77693_pmic_reg {
#define FLASH_INT_FLED1_SHORT BIT(3)
#define FLASH_INT_OVER_CURRENT BIT(4)
+/* Fast charge timer in in hours */
+#define DEFAULT_FAST_CHARGE_TIMER 4
+/* microamps */
+#define DEFAULT_TOP_OFF_THRESHOLD_CURRENT 150000
+/* minutes */
+#define DEFAULT_TOP_OFF_TIMER 30
+/* microvolts */
+#define DEFAULT_CONSTANT_VOLT 4200000
+/* microvolts */
+#define DEFAULT_MIN_SYSTEM_VOLT 3600000
+/* celsius */
+#define DEFAULT_THERMAL_REGULATION_TEMP 100
+/* microamps */
+#define DEFAULT_BATTERY_OVERCURRENT 3500000
+/* microvolts */
+#define DEFAULT_CHARGER_INPUT_THRESHOLD_VOLT 4300000
+
+/* MAX77693_CHG_REG_CHG_INT_OK register */
+#define CHG_INT_OK_BYP_SHIFT 0
+#define CHG_INT_OK_BAT_SHIFT 3
+#define CHG_INT_OK_CHG_SHIFT 4
+#define CHG_INT_OK_CHGIN_SHIFT 6
+#define CHG_INT_OK_DETBAT_SHIFT 7
+#define CHG_INT_OK_BYP_MASK BIT(CHG_INT_OK_BYP_SHIFT)
+#define CHG_INT_OK_BAT_MASK BIT(CHG_INT_OK_BAT_SHIFT)
+#define CHG_INT_OK_CHG_MASK BIT(CHG_INT_OK_CHG_SHIFT)
+#define CHG_INT_OK_CHGIN_MASK BIT(CHG_INT_OK_CHGIN_SHIFT)
+#define CHG_INT_OK_DETBAT_MASK BIT(CHG_INT_OK_DETBAT_SHIFT)
+
+/* MAX77693_CHG_REG_CHG_DETAILS_00 register */
+#define CHG_DETAILS_00_CHGIN_SHIFT 5
+#define CHG_DETAILS_00_CHGIN_MASK (0x3 << CHG_DETAILS_00_CHGIN_SHIFT)
+
+/* MAX77693_CHG_REG_CHG_DETAILS_01 register */
+#define CHG_DETAILS_01_CHG_SHIFT 0
+#define CHG_DETAILS_01_BAT_SHIFT 4
+#define CHG_DETAILS_01_TREG_SHIFT 7
+#define CHG_DETAILS_01_CHG_MASK (0xf << CHG_DETAILS_01_CHG_SHIFT)
+#define CHG_DETAILS_01_BAT_MASK (0x7 << CHG_DETAILS_01_BAT_SHIFT)
+#define CHG_DETAILS_01_TREG_MASK BIT(7)
+
+/* MAX77693_CHG_REG_CHG_DETAILS_01/CHG field */
+enum max77693_charger_charging_state {
+ MAX77693_CHARGING_PREQUALIFICATION = 0x0,
+ MAX77693_CHARGING_FAST_CONST_CURRENT,
+ MAX77693_CHARGING_FAST_CONST_VOLTAGE,
+ MAX77693_CHARGING_TOP_OFF,
+ MAX77693_CHARGING_DONE,
+ MAX77693_CHARGING_HIGH_TEMP,
+ MAX77693_CHARGING_TIMER_EXPIRED,
+ MAX77693_CHARGING_THERMISTOR_SUSPEND,
+ MAX77693_CHARGING_OFF,
+ MAX77693_CHARGING_RESERVED,
+ MAX77693_CHARGING_OVER_TEMP,
+ MAX77693_CHARGING_WATCHDOG_EXPIRED,
+};
+
+/* MAX77693_CHG_REG_CHG_DETAILS_01/BAT field */
+enum max77693_charger_battery_state {
+ MAX77693_BATTERY_NOBAT = 0x0,
+ /* Dead-battery or low-battery prequalification */
+ MAX77693_BATTERY_PREQUALIFICATION,
+ MAX77693_BATTERY_TIMER_EXPIRED,
+ MAX77693_BATTERY_GOOD,
+ MAX77693_BATTERY_LOWVOLTAGE,
+ MAX77693_BATTERY_OVERVOLTAGE,
+ MAX77693_BATTERY_OVERCURRENT,
+ MAX77693_BATTERY_RESERVED,
+};
+
+/* MAX77693_CHG_REG_CHG_DETAILS_02 register */
+#define CHG_DETAILS_02_BYP_SHIFT 0
+#define CHG_DETAILS_02_BYP_MASK (0xf << CHG_DETAILS_02_BYP_SHIFT)
+
/* MAX77693 CHG_CNFG_00 register */
#define CHG_CNFG_00_CHG_MASK 0x1
#define CHG_CNFG_00_BUCK_MASK 0x4
+/* MAX77693_CHG_REG_CHG_CNFG_01 register */
+#define CHG_CNFG_01_FCHGTIME_SHIFT 0
+#define CHG_CNFG_01_CHGRSTRT_SHIFT 4
+#define CHG_CNFG_01_PQEN_SHIFT 7
+#define CHG_CNFG_01_FCHGTIME_MASK (0x7 << CHG_CNFG_01_FCHGTIME_SHIFT)
+#define CHG_CNFG_01_CHGRSTRT_MASK (0x3 << CHG_CNFG_01_CHGRSTRT_SHIFT)
+#define CHG_CNFG_01_PQEN_MAKS BIT(CHG_CNFG_01_PQEN_SHIFT)
+
+/* MAX77693_CHG_REG_CHG_CNFG_03 register */
+#define CHG_CNFG_03_TOITH_SHIFT 0
+#define CHG_CNFG_03_TOTIME_SHIFT 3
+#define CHG_CNFG_03_TOITH_MASK (0x7 << CHG_CNFG_03_TOITH_SHIFT)
+#define CHG_CNFG_03_TOTIME_MASK (0x7 << CHG_CNFG_03_TOTIME_SHIFT)
+
+/* MAX77693_CHG_REG_CHG_CNFG_04 register */
+#define CHG_CNFG_04_CHGCVPRM_SHIFT 0
+#define CHG_CNFG_04_MINVSYS_SHIFT 5
+#define CHG_CNFG_04_CHGCVPRM_MASK (0x1f << CHG_CNFG_04_CHGCVPRM_SHIFT)
+#define CHG_CNFG_04_MINVSYS_MASK (0x7 << CHG_CNFG_04_MINVSYS_SHIFT)
+
+/* MAX77693_CHG_REG_CHG_CNFG_06 register */
+#define CHG_CNFG_06_CHGPROT_SHIFT 2
+#define CHG_CNFG_06_CHGPROT_MASK (0x3 << CHG_CNFG_06_CHGPROT_SHIFT)
+
+/* MAX77693_CHG_REG_CHG_CNFG_07 register */
+#define CHG_CNFG_07_REGTEMP_SHIFT 5
+#define CHG_CNFG_07_REGTEMP_MASK (0x3 << CHG_CNFG_07_REGTEMP_SHIFT)
+
+/* MAX77693_CHG_REG_CHG_CNFG_12 register */
+#define CHG_CNFG_12_B2SOVRC_SHIFT 0
+#define CHG_CNFG_12_VCHGINREG_SHIFT 3
+#define CHG_CNFG_12_B2SOVRC_MASK (0x7 << CHG_CNFG_12_B2SOVRC_SHIFT)
+#define CHG_CNFG_12_VCHGINREG_MASK (0x3 << CHG_CNFG_12_VCHGINREG_SHIFT)
+
/* MAX77693 CHG_CNFG_09 Register */
#define CHG_CNFG_09_CHGIN_ILIM_MASK 0x7F
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
index f0b6585..d450f68 100644
--- a/include/linux/mfd/max77693.h
+++ b/include/linux/mfd/max77693.h
@@ -30,7 +30,7 @@
#ifndef __LINUX_MFD_MAX77693_H
#define __LINUX_MFD_MAX77693_H
-/* MAX77686 regulator IDs */
+/* MAX77693 regulator IDs */
enum max77693_regulators {
MAX77693_ESAFEOUT1 = 0,
MAX77693_ESAFEOUT2,
@@ -38,12 +38,6 @@ enum max77693_regulators {
MAX77693_REG_MAX,
};
-struct max77693_regulator_data {
- int id;
- struct regulator_init_data *initdata;
- struct device_node *of_node;
-};
-
struct max77693_reg_data {
u8 addr;
u8 data;
@@ -87,26 +81,9 @@ enum max77693_led_boost_mode {
MAX77693_LED_BOOST_FIXED,
};
-struct max77693_led_platform_data {
- u32 fleds[2];
- u32 iout_torch[2];
- u32 iout_flash[2];
- u32 trigger[2];
- u32 trigger_type[2];
- u32 num_leds;
- u32 boost_mode;
- u32 flash_timeout;
- u32 boost_vout;
- u32 low_vsys;
-};
-
/* MAX77693 */
struct max77693_platform_data {
- /* regulator data */
- struct max77693_regulator_data *regulators;
- int num_regulators;
-
/* muic data */
struct max77693_muic_platform_data *muic_data;
struct max77693_led_platform_data *led_data;
diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h
new file mode 100644
index 0000000..7178ace
--- /dev/null
+++ b/include/linux/mfd/max77843-private.h
@@ -0,0 +1,454 @@
+/*
+ * Common variables for the Maxim MAX77843 driver
+ *
+ * Copyright (C) 2015 Samsung Electronics
+ * Author: Jaewon Kim <jaewon02.kim@samsung.com>
+ * Author: Beomho Seo <beomho.seo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __MAX77843_PRIVATE_H_
+#define __MAX77843_PRIVATE_H_
+
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+#define I2C_ADDR_TOPSYS (0xCC >> 1)
+#define I2C_ADDR_CHG (0xD2 >> 1)
+#define I2C_ADDR_FG (0x6C >> 1)
+#define I2C_ADDR_MUIC (0x4A >> 1)
+
+/* Topsys, Haptic and LED registers */
+enum max77843_sys_reg {
+ MAX77843_SYS_REG_PMICID = 0x00,
+ MAX77843_SYS_REG_PMICREV = 0x01,
+ MAX77843_SYS_REG_MAINCTRL1 = 0x02,
+ MAX77843_SYS_REG_INTSRC = 0x22,
+ MAX77843_SYS_REG_INTSRCMASK = 0x23,
+ MAX77843_SYS_REG_SYSINTSRC = 0x24,
+ MAX77843_SYS_REG_SYSINTMASK = 0x26,
+ MAX77843_SYS_REG_TOPSYS_STAT = 0x28,
+ MAX77843_SYS_REG_SAFEOUTCTRL = 0xC6,
+
+ MAX77843_SYS_REG_END,
+};
+
+enum max77843_haptic_reg {
+ MAX77843_HAP_REG_MCONFIG = 0x10,
+
+ MAX77843_HAP_REG_END,
+};
+
+enum max77843_led_reg {
+ MAX77843_LED_REG_LEDEN = 0x30,
+ MAX77843_LED_REG_LED0BRT = 0x31,
+ MAX77843_LED_REG_LED1BRT = 0x32,
+ MAX77843_LED_REG_LED2BRT = 0x33,
+ MAX77843_LED_REG_LED3BRT = 0x34,
+ MAX77843_LED_REG_LEDBLNK = 0x38,
+ MAX77843_LED_REG_LEDRAMP = 0x36,
+
+ MAX77843_LED_REG_END,
+};
+
+/* Charger registers */
+enum max77843_charger_reg {
+ MAX77843_CHG_REG_CHG_INT = 0xB0,
+ MAX77843_CHG_REG_CHG_INT_MASK = 0xB1,
+ MAX77843_CHG_REG_CHG_INT_OK = 0xB2,
+ MAX77843_CHG_REG_CHG_DTLS_00 = 0xB3,
+ MAX77843_CHG_REG_CHG_DTLS_01 = 0xB4,
+ MAX77843_CHG_REG_CHG_DTLS_02 = 0xB5,
+ MAX77843_CHG_REG_CHG_CNFG_00 = 0xB7,
+ MAX77843_CHG_REG_CHG_CNFG_01 = 0xB8,
+ MAX77843_CHG_REG_CHG_CNFG_02 = 0xB9,
+ MAX77843_CHG_REG_CHG_CNFG_03 = 0xBA,
+ MAX77843_CHG_REG_CHG_CNFG_04 = 0xBB,
+ MAX77843_CHG_REG_CHG_CNFG_06 = 0xBD,
+ MAX77843_CHG_REG_CHG_CNFG_07 = 0xBE,
+ MAX77843_CHG_REG_CHG_CNFG_09 = 0xC0,
+ MAX77843_CHG_REG_CHG_CNFG_10 = 0xC1,
+ MAX77843_CHG_REG_CHG_CNFG_11 = 0xC2,
+ MAX77843_CHG_REG_CHG_CNFG_12 = 0xC3,
+
+ MAX77843_CHG_REG_END,
+};
+
+/* Fuel gauge registers */
+enum max77843_fuelgauge {
+ MAX77843_FG_REG_STATUS = 0x00,
+ MAX77843_FG_REG_VALRT_TH = 0x01,
+ MAX77843_FG_REG_TALRT_TH = 0x02,
+ MAX77843_FG_REG_SALRT_TH = 0x03,
+ MAX77843_FG_RATE_AT_RATE = 0x04,
+ MAX77843_FG_REG_REMCAP_REP = 0x05,
+ MAX77843_FG_REG_SOCREP = 0x06,
+ MAX77843_FG_REG_AGE = 0x07,
+ MAX77843_FG_REG_TEMP = 0x08,
+ MAX77843_FG_REG_VCELL = 0x09,
+ MAX77843_FG_REG_CURRENT = 0x0A,
+ MAX77843_FG_REG_AVG_CURRENT = 0x0B,
+ MAX77843_FG_REG_SOCMIX = 0x0D,
+ MAX77843_FG_REG_SOCAV = 0x0E,
+ MAX77843_FG_REG_REMCAP_MIX = 0x0F,
+ MAX77843_FG_REG_FULLCAP = 0x10,
+ MAX77843_FG_REG_AVG_TEMP = 0x16,
+ MAX77843_FG_REG_CYCLES = 0x17,
+ MAX77843_FG_REG_AVG_VCELL = 0x19,
+ MAX77843_FG_REG_CONFIG = 0x1D,
+ MAX77843_FG_REG_REMCAP_AV = 0x1F,
+ MAX77843_FG_REG_FULLCAP_NOM = 0x23,
+ MAX77843_FG_REG_MISCCFG = 0x2B,
+ MAX77843_FG_REG_RCOMP = 0x38,
+ MAX77843_FG_REG_FSTAT = 0x3D,
+ MAX77843_FG_REG_DQACC = 0x45,
+ MAX77843_FG_REG_DPACC = 0x46,
+ MAX77843_FG_REG_OCV = 0xEE,
+ MAX77843_FG_REG_VFOCV = 0xFB,
+ MAX77843_FG_SOCVF = 0xFF,
+
+ MAX77843_FG_END,
+};
+
+/* MUIC registers */
+enum max77843_muic_reg {
+ MAX77843_MUIC_REG_ID = 0x00,
+ MAX77843_MUIC_REG_INT1 = 0x01,
+ MAX77843_MUIC_REG_INT2 = 0x02,
+ MAX77843_MUIC_REG_INT3 = 0x03,
+ MAX77843_MUIC_REG_STATUS1 = 0x04,
+ MAX77843_MUIC_REG_STATUS2 = 0x05,
+ MAX77843_MUIC_REG_STATUS3 = 0x06,
+ MAX77843_MUIC_REG_INTMASK1 = 0x07,
+ MAX77843_MUIC_REG_INTMASK2 = 0x08,
+ MAX77843_MUIC_REG_INTMASK3 = 0x09,
+ MAX77843_MUIC_REG_CDETCTRL1 = 0x0A,
+ MAX77843_MUIC_REG_CDETCTRL2 = 0x0B,
+ MAX77843_MUIC_REG_CONTROL1 = 0x0C,
+ MAX77843_MUIC_REG_CONTROL2 = 0x0D,
+ MAX77843_MUIC_REG_CONTROL3 = 0x0E,
+ MAX77843_MUIC_REG_CONTROL4 = 0x16,
+ MAX77843_MUIC_REG_HVCONTROL1 = 0x17,
+ MAX77843_MUIC_REG_HVCONTROL2 = 0x18,
+
+ MAX77843_MUIC_REG_END,
+};
+
+enum max77843_irq {
+ /* Topsys: SYSTEM */
+ MAX77843_SYS_IRQ_SYSINTSRC_SYSUVLO_INT,
+ MAX77843_SYS_IRQ_SYSINTSRC_SYSOVLO_INT,
+ MAX77843_SYS_IRQ_SYSINTSRC_TSHDN_INT,
+ MAX77843_SYS_IRQ_SYSINTSRC_TM_INT,
+
+ /* Charger: CHG_INT */
+ MAX77843_CHG_IRQ_CHG_INT_BYP_I,
+ MAX77843_CHG_IRQ_CHG_INT_BATP_I,
+ MAX77843_CHG_IRQ_CHG_INT_BAT_I,
+ MAX77843_CHG_IRQ_CHG_INT_CHG_I,
+ MAX77843_CHG_IRQ_CHG_INT_WCIN_I,
+ MAX77843_CHG_IRQ_CHG_INT_CHGIN_I,
+ MAX77843_CHG_IRQ_CHG_INT_AICL_I,
+
+ MAX77843_IRQ_NUM,
+};
+
+enum max77843_irq_muic {
+ /* MUIC: INT1 */
+ MAX77843_MUIC_IRQ_INT1_ADC,
+ MAX77843_MUIC_IRQ_INT1_ADCERROR,
+ MAX77843_MUIC_IRQ_INT1_ADC1K,
+
+ /* MUIC: INT2 */
+ MAX77843_MUIC_IRQ_INT2_CHGTYP,
+ MAX77843_MUIC_IRQ_INT2_CHGDETRUN,
+ MAX77843_MUIC_IRQ_INT2_DCDTMR,
+ MAX77843_MUIC_IRQ_INT2_DXOVP,
+ MAX77843_MUIC_IRQ_INT2_VBVOLT,
+
+ /* MUIC: INT3 */
+ MAX77843_MUIC_IRQ_INT3_VBADC,
+ MAX77843_MUIC_IRQ_INT3_VDNMON,
+ MAX77843_MUIC_IRQ_INT3_DNRES,
+ MAX77843_MUIC_IRQ_INT3_MPNACK,
+ MAX77843_MUIC_IRQ_INT3_MRXBUFOW,
+ MAX77843_MUIC_IRQ_INT3_MRXTRF,
+ MAX77843_MUIC_IRQ_INT3_MRXPERR,
+ MAX77843_MUIC_IRQ_INT3_MRXRDY,
+
+ MAX77843_MUIC_IRQ_NUM,
+};
+
+/* MAX77843 interrupts */
+#define MAX77843_SYS_IRQ_SYSUVLO_INT BIT(0)
+#define MAX77843_SYS_IRQ_SYSOVLO_INT BIT(1)
+#define MAX77843_SYS_IRQ_TSHDN_INT BIT(2)
+#define MAX77843_SYS_IRQ_TM_INT BIT(3)
+
+/* MAX77843 MAINCTRL1 register */
+#define MAINCTRL1_BIASEN_SHIFT 7
+#define MAX77843_MAINCTRL1_BIASEN_MASK BIT(MAINCTRL1_BIASEN_SHIFT)
+
+/* MAX77843 MCONFIG register */
+#define MCONFIG_MODE_SHIFT 7
+#define MCONFIG_MEN_SHIFT 6
+#define MCONFIG_PDIV_SHIFT 0
+
+#define MAX77843_MCONFIG_MODE_MASK BIT(MCONFIG_MODE_SHIFT)
+#define MAX77843_MCONFIG_MEN_MASK BIT(MCONFIG_MEN_SHIFT)
+#define MAX77843_MCONFIG_PDIV_MASK (0x3 << MCONFIG_PDIV_SHIFT)
+
+/* Max77843 charger insterrupts */
+#define MAX77843_CHG_BYP_I BIT(0)
+#define MAX77843_CHG_BATP_I BIT(2)
+#define MAX77843_CHG_BAT_I BIT(3)
+#define MAX77843_CHG_CHG_I BIT(4)
+#define MAX77843_CHG_WCIN_I BIT(5)
+#define MAX77843_CHG_CHGIN_I BIT(6)
+#define MAX77843_CHG_AICL_I BIT(7)
+
+/* MAX77843 CHG_INT_OK register */
+#define MAX77843_CHG_BYP_OK BIT(0)
+#define MAX77843_CHG_BATP_OK BIT(2)
+#define MAX77843_CHG_BAT_OK BIT(3)
+#define MAX77843_CHG_CHG_OK BIT(4)
+#define MAX77843_CHG_WCIN_OK BIT(5)
+#define MAX77843_CHG_CHGIN_OK BIT(6)
+#define MAX77843_CHG_AICL_OK BIT(7)
+
+/* MAX77843 CHG_DETAILS_00 register */
+#define MAX77843_CHG_BAT_DTLS BIT(0)
+
+/* MAX77843 CHG_DETAILS_01 register */
+#define MAX77843_CHG_DTLS_MASK 0x0f
+#define MAX77843_CHG_PQ_MODE 0x00
+#define MAX77843_CHG_CC_MODE 0x01
+#define MAX77843_CHG_CV_MODE 0x02
+#define MAX77843_CHG_TO_MODE 0x03
+#define MAX77843_CHG_DO_MODE 0x04
+#define MAX77843_CHG_HT_MODE 0x05
+#define MAX77843_CHG_TF_MODE 0x06
+#define MAX77843_CHG_TS_MODE 0x07
+#define MAX77843_CHG_OFF_MODE 0x08
+
+#define MAX77843_CHG_BAT_DTLS_MASK 0xf0
+#define MAX77843_CHG_NO_BAT (0x00 << 4)
+#define MAX77843_CHG_LOW_VOLT_BAT (0x01 << 4)
+#define MAX77843_CHG_LONG_BAT_TIME (0x02 << 4)
+#define MAX77843_CHG_OK_BAT (0x03 << 4)
+#define MAX77843_CHG_OK_LOW_VOLT_BAT (0x04 << 4)
+#define MAX77843_CHG_OVER_VOLT_BAT (0x05 << 4)
+#define MAX77843_CHG_OVER_CURRENT_BAT (0x06 << 4)
+
+/* MAX77843 CHG_CNFG_00 register */
+#define MAX77843_CHG_DISABLE 0x00
+#define MAX77843_CHG_ENABLE 0x05
+#define MAX77843_CHG_MASK 0x01
+#define MAX77843_CHG_BUCK_MASK 0x04
+
+/* MAX77843 CHG_CNFG_01 register */
+#define MAX77843_CHG_RESTART_THRESHOLD_100 0x00
+#define MAX77843_CHG_RESTART_THRESHOLD_150 0x10
+#define MAX77843_CHG_RESTART_THRESHOLD_200 0x20
+#define MAX77843_CHG_RESTART_THRESHOLD_DISABLE 0x30
+
+/* MAX77843 CHG_CNFG_02 register */
+#define MAX77843_CHG_FAST_CHG_CURRENT_MIN 100000
+#define MAX77843_CHG_FAST_CHG_CURRENT_MAX 3150000
+#define MAX77843_CHG_FAST_CHG_CURRENT_STEP 50000
+#define MAX77843_CHG_FAST_CHG_CURRENT_MASK 0x3f
+#define MAX77843_CHG_OTG_ILIMIT_500 (0x00 << 6)
+#define MAX77843_CHG_OTG_ILIMIT_900 (0x01 << 6)
+#define MAX77843_CHG_OTG_ILIMIT_1200 (0x02 << 6)
+#define MAX77843_CHG_OTG_ILIMIT_1500 (0x03 << 6)
+#define MAX77843_CHG_OTG_ILIMIT_MASK 0xc0
+
+/* MAX77843 CHG_CNFG_03 register */
+#define MAX77843_CHG_TOP_OFF_CURRENT_MIN 125000
+#define MAX77843_CHG_TOP_OFF_CURRENT_MAX 650000
+#define MAX77843_CHG_TOP_OFF_CURRENT_STEP 75000
+#define MAX77843_CHG_TOP_OFF_CURRENT_MASK 0x07
+
+/* MAX77843 CHG_CNFG_06 register */
+#define MAX77843_CHG_WRITE_CAP_BLOCK 0x10
+#define MAX77843_CHG_WRITE_CAP_UNBLOCK 0x0C
+
+/* MAX77843_CHG_CNFG_09_register */
+#define MAX77843_CHG_INPUT_CURRENT_LIMIT_MIN 100000
+#define MAX77843_CHG_INPUT_CURRENT_LIMIT_MAX 4000000
+#define MAX77843_CHG_INPUT_CURRENT_LIMIT_REF 3367000
+#define MAX77843_CHG_INPUT_CURRENT_LIMIT_STEP 33000
+
+#define MAX77843_MUIC_ADC BIT(0)
+#define MAX77843_MUIC_ADCERROR BIT(2)
+#define MAX77843_MUIC_ADC1K BIT(3)
+
+#define MAX77843_MUIC_CHGTYP BIT(0)
+#define MAX77843_MUIC_CHGDETRUN BIT(1)
+#define MAX77843_MUIC_DCDTMR BIT(2)
+#define MAX77843_MUIC_DXOVP BIT(3)
+#define MAX77843_MUIC_VBVOLT BIT(4)
+
+#define MAX77843_MUIC_VBADC BIT(0)
+#define MAX77843_MUIC_VDNMON BIT(1)
+#define MAX77843_MUIC_DNRES BIT(2)
+#define MAX77843_MUIC_MPNACK BIT(3)
+#define MAX77843_MUIC_MRXBUFOW BIT(4)
+#define MAX77843_MUIC_MRXTRF BIT(5)
+#define MAX77843_MUIC_MRXPERR BIT(6)
+#define MAX77843_MUIC_MRXRDY BIT(7)
+
+/* MAX77843 INTSRCMASK register */
+#define MAX77843_INTSRCMASK_CHGR 0
+#define MAX77843_INTSRCMASK_SYS 1
+#define MAX77843_INTSRCMASK_FG 2
+#define MAX77843_INTSRCMASK_MUIC 3
+
+#define MAX77843_INTSRCMASK_CHGR_MASK BIT(MAX77843_INTSRCMASK_CHGR)
+#define MAX77843_INTSRCMASK_SYS_MASK BIT(MAX77843_INTSRCMASK_SYS)
+#define MAX77843_INTSRCMASK_FG_MASK BIT(MAX77843_INTSRCMASK_FG)
+#define MAX77843_INTSRCMASK_MUIC_MASK BIT(MAX77843_INTSRCMASK_MUIC)
+
+#define MAX77843_INTSRC_MASK_MASK \
+ (MAX77843_INTSRCMASK_MUIC_MASK | MAX77843_INTSRCMASK_FG_MASK | \
+ MAX77843_INTSRCMASK_SYS_MASK | MAX77843_INTSRCMASK_CHGR_MASK)
+
+/* MAX77843 STATUS register*/
+#define STATUS1_ADC_SHIFT 0
+#define STATUS1_ADCERROR_SHIFT 6
+#define STATUS1_ADC1K_SHIFT 7
+#define STATUS2_CHGTYP_SHIFT 0
+#define STATUS2_CHGDETRUN_SHIFT 3
+#define STATUS2_DCDTMR_SHIFT 4
+#define STATUS2_DXOVP_SHIFT 5
+#define STATUS2_VBVOLT_SHIFT 6
+#define STATUS3_VBADC_SHIFT 0
+#define STATUS3_VDNMON_SHIFT 4
+#define STATUS3_DNRES_SHIFT 5
+#define STATUS3_MPNACK_SHIFT 6
+
+#define MAX77843_MUIC_STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
+#define MAX77843_MUIC_STATUS1_ADCERROR_MASK BIT(STATUS1_ADCERROR_SHIFT)
+#define MAX77843_MUIC_STATUS1_ADC1K_MASK BIT(STATUS1_ADC1K_SHIFT)
+#define MAX77843_MUIC_STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
+#define MAX77843_MUIC_STATUS2_CHGDETRUN_MASK BIT(STATUS2_CHGDETRUN_SHIFT)
+#define MAX77843_MUIC_STATUS2_DCDTMR_MASK BIT(STATUS2_DCDTMR_SHIFT)
+#define MAX77843_MUIC_STATUS2_DXOVP_MASK BIT(STATUS2_DXOVP_SHIFT)
+#define MAX77843_MUIC_STATUS2_VBVOLT_MASK BIT(STATUS2_VBVOLT_SHIFT)
+#define MAX77843_MUIC_STATUS3_VBADC_MASK (0xf << STATUS3_VBADC_SHIFT)
+#define MAX77843_MUIC_STATUS3_VDNMON_MASK BIT(STATUS3_VDNMON_SHIFT)
+#define MAX77843_MUIC_STATUS3_DNRES_MASK BIT(STATUS3_DNRES_SHIFT)
+#define MAX77843_MUIC_STATUS3_MPNACK_MASK BIT(STATUS3_MPNACK_SHIFT)
+
+/* MAX77843 CONTROL register */
+#define CONTROL1_COMP1SW_SHIFT 0
+#define CONTROL1_COMP2SW_SHIFT 3
+#define CONTROL1_IDBEN_SHIFT 7
+#define CONTROL2_LOWPWR_SHIFT 0
+#define CONTROL2_ADCEN_SHIFT 1
+#define CONTROL2_CPEN_SHIFT 2
+#define CONTROL2_ACC_DET_SHIFT 5
+#define CONTROL2_USBCPINT_SHIFT 6
+#define CONTROL2_RCPS_SHIFT 7
+#define CONTROL3_JIGSET_SHIFT 0
+#define CONTROL4_ADCDBSET_SHIFT 0
+#define CONTROL4_USBAUTO_SHIFT 4
+#define CONTROL4_FCTAUTO_SHIFT 5
+#define CONTROL4_ADCMODE_SHIFT 6
+
+#define MAX77843_MUIC_CONTROL1_COMP1SW_MASK (0x7 << CONTROL1_COMP1SW_SHIFT)
+#define MAX77843_MUIC_CONTROL1_COMP2SW_MASK (0x7 << CONTROL1_COMP2SW_SHIFT)
+#define MAX77843_MUIC_CONTROL1_IDBEN_MASK BIT(CONTROL1_IDBEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_LOWPWR_MASK BIT(CONTROL2_LOWPWR_SHIFT)
+#define MAX77843_MUIC_CONTROL2_ADCEN_MASK BIT(CONTROL2_ADCEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_CPEN_MASK BIT(CONTROL2_CPEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_ACC_DET_MASK BIT(CONTROL2_ACC_DET_SHIFT)
+#define MAX77843_MUIC_CONTROL2_USBCPINT_MASK BIT(CONTROL2_USBCPINT_SHIFT)
+#define MAX77843_MUIC_CONTROL2_RCPS_MASK BIT(CONTROL2_RCPS_SHIFT)
+#define MAX77843_MUIC_CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT)
+#define MAX77843_MUIC_CONTROL4_ADCDBSET_MASK (0x3 << CONTROL4_ADCDBSET_SHIFT)
+#define MAX77843_MUIC_CONTROL4_USBAUTO_MASK BIT(CONTROL4_USBAUTO_SHIFT)
+#define MAX77843_MUIC_CONTROL4_FCTAUTO_MASK BIT(CONTROL4_FCTAUTO_SHIFT)
+#define MAX77843_MUIC_CONTROL4_ADCMODE_MASK (0x3 << CONTROL4_ADCMODE_SHIFT)
+
+/* MAX77843 switch port */
+#define COM_OPEN 0
+#define COM_USB 1
+#define COM_AUDIO 2
+#define COM_UART 3
+#define COM_AUX_USB 4
+#define COM_AUX_UART 5
+
+#define CONTROL1_COM_SW \
+ ((MAX77843_MUIC_CONTROL1_COMP1SW_MASK | \
+ MAX77843_MUIC_CONTROL1_COMP2SW_MASK))
+
+#define CONTROL1_SW_OPEN \
+ ((COM_OPEN << CONTROL1_COMP1SW_SHIFT | \
+ COM_OPEN << CONTROL1_COMP2SW_SHIFT))
+#define CONTROL1_SW_USB \
+ ((COM_USB << CONTROL1_COMP1SW_SHIFT | \
+ COM_USB << CONTROL1_COMP2SW_SHIFT))
+#define CONTROL1_SW_AUDIO \
+ ((COM_AUDIO << CONTROL1_COMP1SW_SHIFT | \
+ COM_AUDIO << CONTROL1_COMP2SW_SHIFT))
+#define CONTROL1_SW_UART \
+ ((COM_UART << CONTROL1_COMP1SW_SHIFT | \
+ COM_UART << CONTROL1_COMP2SW_SHIFT))
+#define CONTROL1_SW_AUX_USB \
+ ((COM_AUX_USB << CONTROL1_COMP1SW_SHIFT | \
+ COM_AUX_USB << CONTROL1_COMP2SW_SHIFT))
+#define CONTROL1_SW_AUX_UART \
+ ((COM_AUX_UART << CONTROL1_COMP1SW_SHIFT | \
+ COM_AUX_UART << CONTROL1_COMP2SW_SHIFT))
+
+#define MAX77843_DISABLE 0
+#define MAX77843_ENABLE 1
+
+#define CONTROL4_AUTO_DISABLE \
+ ((MAX77843_DISABLE << CONTROL4_USBAUTO_SHIFT) | \
+ (MAX77843_DISABLE << CONTROL4_FCTAUTO_SHIFT))
+#define CONTROL4_AUTO_ENABLE \
+ ((MAX77843_ENABLE << CONTROL4_USBAUTO_SHIFT) | \
+ (MAX77843_ENABLE << CONTROL4_FCTAUTO_SHIFT))
+
+/* MAX77843 SAFEOUT LDO Control register */
+#define SAFEOUTCTRL_SAFEOUT1_SHIFT 0
+#define SAFEOUTCTRL_SAFEOUT2_SHIFT 2
+#define SAFEOUTCTRL_ENSAFEOUT1_SHIFT 6
+#define SAFEOUTCTRL_ENSAFEOUT2_SHIFT 7
+
+#define MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT1 \
+ BIT(SAFEOUTCTRL_ENSAFEOUT1_SHIFT)
+#define MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT2 \
+ BIT(SAFEOUTCTRL_ENSAFEOUT2_SHIFT)
+#define MAX77843_REG_SAFEOUTCTRL_SAFEOUT1_MASK \
+ (0x3 << SAFEOUTCTRL_SAFEOUT1_SHIFT)
+#define MAX77843_REG_SAFEOUTCTRL_SAFEOUT2_MASK \
+ (0x3 << SAFEOUTCTRL_SAFEOUT2_SHIFT)
+
+struct max77843 {
+ struct device *dev;
+
+ struct i2c_client *i2c;
+ struct i2c_client *i2c_chg;
+ struct i2c_client *i2c_fuel;
+ struct i2c_client *i2c_muic;
+
+ struct regmap *regmap;
+ struct regmap *regmap_chg;
+ struct regmap *regmap_fuel;
+ struct regmap *regmap_muic;
+
+ struct regmap_irq_chip_data *irq_data;
+ struct regmap_irq_chip_data *irq_data_chg;
+ struct regmap_irq_chip_data *irq_data_fuel;
+ struct regmap_irq_chip_data *irq_data_muic;
+
+ int irq;
+};
+#endif /* __MAX77843_H__ */
diff --git a/include/linux/mfd/menelaus.h b/include/linux/mfd/menelaus.h
index f097e89..9e85ac0 100644
--- a/include/linux/mfd/menelaus.h
+++ b/include/linux/mfd/menelaus.h
@@ -24,7 +24,6 @@ extern int menelaus_set_vaux(unsigned int mV);
extern int menelaus_set_vdcdc(int dcdc, unsigned int mV);
extern int menelaus_set_slot_sel(int enable);
extern int menelaus_get_slot_pin_states(void);
-extern int menelaus_set_vcore_sw(unsigned int mV);
extern int menelaus_set_vcore_hw(unsigned int roof_mV, unsigned int floor_mV);
#define EN_VPLL_SLEEP (1 << 7)
@@ -38,10 +37,4 @@ extern int menelaus_set_vcore_hw(unsigned int roof_mV, unsigned int floor_mV);
extern int menelaus_set_regulator_sleep(int enable, u32 val);
-#if defined(CONFIG_ARCH_OMAP2) && defined(CONFIG_MENELAUS)
-#define omap_has_menelaus() 1
-#else
-#define omap_has_menelaus() 0
-#endif
-
#endif
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
new file mode 100644
index 0000000..cf5265b
--- /dev/null
+++ b/include/linux/mfd/mt6397/core.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu, MediaTek
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MFD_MT6397_CORE_H__
+#define __MFD_MT6397_CORE_H__
+
+enum mt6397_irq_numbers {
+ MT6397_IRQ_SPKL_AB = 0,
+ MT6397_IRQ_SPKR_AB,
+ MT6397_IRQ_SPKL,
+ MT6397_IRQ_SPKR,
+ MT6397_IRQ_BAT_L,
+ MT6397_IRQ_BAT_H,
+ MT6397_IRQ_FG_BAT_L,
+ MT6397_IRQ_FG_BAT_H,
+ MT6397_IRQ_WATCHDOG,
+ MT6397_IRQ_PWRKEY,
+ MT6397_IRQ_THR_L,
+ MT6397_IRQ_THR_H,
+ MT6397_IRQ_VBATON_UNDET,
+ MT6397_IRQ_BVALID_DET,
+ MT6397_IRQ_CHRDET,
+ MT6397_IRQ_OV,
+ MT6397_IRQ_LDO,
+ MT6397_IRQ_HOMEKEY,
+ MT6397_IRQ_ACCDET,
+ MT6397_IRQ_AUDIO,
+ MT6397_IRQ_RTC,
+ MT6397_IRQ_PWRKEY_RSTB,
+ MT6397_IRQ_HDMI_SIFM,
+ MT6397_IRQ_HDMI_CEC,
+ MT6397_IRQ_VCA15,
+ MT6397_IRQ_VSRMCA15,
+ MT6397_IRQ_VCORE,
+ MT6397_IRQ_VGPU,
+ MT6397_IRQ_VIO18,
+ MT6397_IRQ_VPCA7,
+ MT6397_IRQ_VSRMCA7,
+ MT6397_IRQ_VDRM,
+ MT6397_IRQ_NR,
+};
+
+struct mt6397_chip {
+ struct device *dev;
+ struct regmap *regmap;
+ int irq;
+ struct irq_domain *irq_domain;
+ struct mutex irqlock;
+ u16 irq_masks_cur[2];
+ u16 irq_masks_cache[2];
+};
+
+#endif /* __MFD_MT6397_CORE_H__ */
diff --git a/include/linux/mfd/mt6397/registers.h b/include/linux/mfd/mt6397/registers.h
new file mode 100644
index 0000000..f23a0a6
--- /dev/null
+++ b/include/linux/mfd/mt6397/registers.h
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu, MediaTek
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MFD_MT6397_REGISTERS_H__
+#define __MFD_MT6397_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6397_CID 0x0100
+#define MT6397_TOP_CKPDN 0x0102
+#define MT6397_TOP_CKPDN_SET 0x0104
+#define MT6397_TOP_CKPDN_CLR 0x0106
+#define MT6397_TOP_CKPDN2 0x0108
+#define MT6397_TOP_CKPDN2_SET 0x010A
+#define MT6397_TOP_CKPDN2_CLR 0x010C
+#define MT6397_TOP_GPIO_CKPDN 0x010E
+#define MT6397_TOP_RST_CON 0x0114
+#define MT6397_WRP_CKPDN 0x011A
+#define MT6397_WRP_RST_CON 0x0120
+#define MT6397_TOP_RST_MISC 0x0126
+#define MT6397_TOP_CKCON1 0x0128
+#define MT6397_TOP_CKCON2 0x012A
+#define MT6397_TOP_CKTST1 0x012C
+#define MT6397_TOP_CKTST2 0x012E
+#define MT6397_OC_DEG_EN 0x0130
+#define MT6397_OC_CTL0 0x0132
+#define MT6397_OC_CTL1 0x0134
+#define MT6397_OC_CTL2 0x0136
+#define MT6397_INT_RSV 0x0138
+#define MT6397_TEST_CON0 0x013A
+#define MT6397_TEST_CON1 0x013C
+#define MT6397_STATUS0 0x013E
+#define MT6397_STATUS1 0x0140
+#define MT6397_PGSTATUS 0x0142
+#define MT6397_CHRSTATUS 0x0144
+#define MT6397_OCSTATUS0 0x0146
+#define MT6397_OCSTATUS1 0x0148
+#define MT6397_OCSTATUS2 0x014A
+#define MT6397_HDMI_PAD_IE 0x014C
+#define MT6397_TEST_OUT_L 0x014E
+#define MT6397_TEST_OUT_H 0x0150
+#define MT6397_TDSEL_CON 0x0152
+#define MT6397_RDSEL_CON 0x0154
+#define MT6397_GPIO_SMT_CON0 0x0156
+#define MT6397_GPIO_SMT_CON1 0x0158
+#define MT6397_GPIO_SMT_CON2 0x015A
+#define MT6397_GPIO_SMT_CON3 0x015C
+#define MT6397_DRV_CON0 0x015E
+#define MT6397_DRV_CON1 0x0160
+#define MT6397_DRV_CON2 0x0162
+#define MT6397_DRV_CON3 0x0164
+#define MT6397_DRV_CON4 0x0166
+#define MT6397_DRV_CON5 0x0168
+#define MT6397_DRV_CON6 0x016A
+#define MT6397_DRV_CON7 0x016C
+#define MT6397_DRV_CON8 0x016E
+#define MT6397_DRV_CON9 0x0170
+#define MT6397_DRV_CON10 0x0172
+#define MT6397_DRV_CON11 0x0174
+#define MT6397_DRV_CON12 0x0176
+#define MT6397_INT_CON0 0x0178
+#define MT6397_INT_CON1 0x017E
+#define MT6397_INT_STATUS0 0x0184
+#define MT6397_INT_STATUS1 0x0186
+#define MT6397_FQMTR_CON0 0x0188
+#define MT6397_FQMTR_CON1 0x018A
+#define MT6397_FQMTR_CON2 0x018C
+#define MT6397_EFUSE_DOUT_0_15 0x01C4
+#define MT6397_EFUSE_DOUT_16_31 0x01C6
+#define MT6397_EFUSE_DOUT_32_47 0x01C8
+#define MT6397_EFUSE_DOUT_48_63 0x01CA
+#define MT6397_SPI_CON 0x01CC
+#define MT6397_TOP_CKPDN3 0x01CE
+#define MT6397_TOP_CKCON3 0x01D4
+#define MT6397_EFUSE_DOUT_64_79 0x01D6
+#define MT6397_EFUSE_DOUT_80_95 0x01D8
+#define MT6397_EFUSE_DOUT_96_111 0x01DA
+#define MT6397_EFUSE_DOUT_112_127 0x01DC
+#define MT6397_EFUSE_DOUT_128_143 0x01DE
+#define MT6397_EFUSE_DOUT_144_159 0x01E0
+#define MT6397_EFUSE_DOUT_160_175 0x01E2
+#define MT6397_EFUSE_DOUT_176_191 0x01E4
+#define MT6397_EFUSE_DOUT_192_207 0x01E6
+#define MT6397_EFUSE_DOUT_208_223 0x01E8
+#define MT6397_EFUSE_DOUT_224_239 0x01EA
+#define MT6397_EFUSE_DOUT_240_255 0x01EC
+#define MT6397_EFUSE_DOUT_256_271 0x01EE
+#define MT6397_EFUSE_DOUT_272_287 0x01F0
+#define MT6397_EFUSE_DOUT_288_300 0x01F2
+#define MT6397_EFUSE_DOUT_304_319 0x01F4
+#define MT6397_BUCK_CON0 0x0200
+#define MT6397_BUCK_CON1 0x0202
+#define MT6397_BUCK_CON2 0x0204
+#define MT6397_BUCK_CON3 0x0206
+#define MT6397_BUCK_CON4 0x0208
+#define MT6397_BUCK_CON5 0x020A
+#define MT6397_BUCK_CON6 0x020C
+#define MT6397_BUCK_CON7 0x020E
+#define MT6397_BUCK_CON8 0x0210
+#define MT6397_BUCK_CON9 0x0212
+#define MT6397_VCA15_CON0 0x0214
+#define MT6397_VCA15_CON1 0x0216
+#define MT6397_VCA15_CON2 0x0218
+#define MT6397_VCA15_CON3 0x021A
+#define MT6397_VCA15_CON4 0x021C
+#define MT6397_VCA15_CON5 0x021E
+#define MT6397_VCA15_CON6 0x0220
+#define MT6397_VCA15_CON7 0x0222
+#define MT6397_VCA15_CON8 0x0224
+#define MT6397_VCA15_CON9 0x0226
+#define MT6397_VCA15_CON10 0x0228
+#define MT6397_VCA15_CON11 0x022A
+#define MT6397_VCA15_CON12 0x022C
+#define MT6397_VCA15_CON13 0x022E
+#define MT6397_VCA15_CON14 0x0230
+#define MT6397_VCA15_CON15 0x0232
+#define MT6397_VCA15_CON16 0x0234
+#define MT6397_VCA15_CON17 0x0236
+#define MT6397_VCA15_CON18 0x0238
+#define MT6397_VSRMCA15_CON0 0x023A
+#define MT6397_VSRMCA15_CON1 0x023C
+#define MT6397_VSRMCA15_CON2 0x023E
+#define MT6397_VSRMCA15_CON3 0x0240
+#define MT6397_VSRMCA15_CON4 0x0242
+#define MT6397_VSRMCA15_CON5 0x0244
+#define MT6397_VSRMCA15_CON6 0x0246
+#define MT6397_VSRMCA15_CON7 0x0248
+#define MT6397_VSRMCA15_CON8 0x024A
+#define MT6397_VSRMCA15_CON9 0x024C
+#define MT6397_VSRMCA15_CON10 0x024E
+#define MT6397_VSRMCA15_CON11 0x0250
+#define MT6397_VSRMCA15_CON12 0x0252
+#define MT6397_VSRMCA15_CON13 0x0254
+#define MT6397_VSRMCA15_CON14 0x0256
+#define MT6397_VSRMCA15_CON15 0x0258
+#define MT6397_VSRMCA15_CON16 0x025A
+#define MT6397_VSRMCA15_CON17 0x025C
+#define MT6397_VSRMCA15_CON18 0x025E
+#define MT6397_VSRMCA15_CON19 0x0260
+#define MT6397_VSRMCA15_CON20 0x0262
+#define MT6397_VSRMCA15_CON21 0x0264
+#define MT6397_VCORE_CON0 0x0266
+#define MT6397_VCORE_CON1 0x0268
+#define MT6397_VCORE_CON2 0x026A
+#define MT6397_VCORE_CON3 0x026C
+#define MT6397_VCORE_CON4 0x026E
+#define MT6397_VCORE_CON5 0x0270
+#define MT6397_VCORE_CON6 0x0272
+#define MT6397_VCORE_CON7 0x0274
+#define MT6397_VCORE_CON8 0x0276
+#define MT6397_VCORE_CON9 0x0278
+#define MT6397_VCORE_CON10 0x027A
+#define MT6397_VCORE_CON11 0x027C
+#define MT6397_VCORE_CON12 0x027E
+#define MT6397_VCORE_CON13 0x0280
+#define MT6397_VCORE_CON14 0x0282
+#define MT6397_VCORE_CON15 0x0284
+#define MT6397_VCORE_CON16 0x0286
+#define MT6397_VCORE_CON17 0x0288
+#define MT6397_VCORE_CON18 0x028A
+#define MT6397_VGPU_CON0 0x028C
+#define MT6397_VGPU_CON1 0x028E
+#define MT6397_VGPU_CON2 0x0290
+#define MT6397_VGPU_CON3 0x0292
+#define MT6397_VGPU_CON4 0x0294
+#define MT6397_VGPU_CON5 0x0296
+#define MT6397_VGPU_CON6 0x0298
+#define MT6397_VGPU_CON7 0x029A
+#define MT6397_VGPU_CON8 0x029C
+#define MT6397_VGPU_CON9 0x029E
+#define MT6397_VGPU_CON10 0x02A0
+#define MT6397_VGPU_CON11 0x02A2
+#define MT6397_VGPU_CON12 0x02A4
+#define MT6397_VGPU_CON13 0x02A6
+#define MT6397_VGPU_CON14 0x02A8
+#define MT6397_VGPU_CON15 0x02AA
+#define MT6397_VGPU_CON16 0x02AC
+#define MT6397_VGPU_CON17 0x02AE
+#define MT6397_VGPU_CON18 0x02B0
+#define MT6397_VIO18_CON0 0x0300
+#define MT6397_VIO18_CON1 0x0302
+#define MT6397_VIO18_CON2 0x0304
+#define MT6397_VIO18_CON3 0x0306
+#define MT6397_VIO18_CON4 0x0308
+#define MT6397_VIO18_CON5 0x030A
+#define MT6397_VIO18_CON6 0x030C
+#define MT6397_VIO18_CON7 0x030E
+#define MT6397_VIO18_CON8 0x0310
+#define MT6397_VIO18_CON9 0x0312
+#define MT6397_VIO18_CON10 0x0314
+#define MT6397_VIO18_CON11 0x0316
+#define MT6397_VIO18_CON12 0x0318
+#define MT6397_VIO18_CON13 0x031A
+#define MT6397_VIO18_CON14 0x031C
+#define MT6397_VIO18_CON15 0x031E
+#define MT6397_VIO18_CON16 0x0320
+#define MT6397_VIO18_CON17 0x0322
+#define MT6397_VIO18_CON18 0x0324
+#define MT6397_VPCA7_CON0 0x0326
+#define MT6397_VPCA7_CON1 0x0328
+#define MT6397_VPCA7_CON2 0x032A
+#define MT6397_VPCA7_CON3 0x032C
+#define MT6397_VPCA7_CON4 0x032E
+#define MT6397_VPCA7_CON5 0x0330
+#define MT6397_VPCA7_CON6 0x0332
+#define MT6397_VPCA7_CON7 0x0334
+#define MT6397_VPCA7_CON8 0x0336
+#define MT6397_VPCA7_CON9 0x0338
+#define MT6397_VPCA7_CON10 0x033A
+#define MT6397_VPCA7_CON11 0x033C
+#define MT6397_VPCA7_CON12 0x033E
+#define MT6397_VPCA7_CON13 0x0340
+#define MT6397_VPCA7_CON14 0x0342
+#define MT6397_VPCA7_CON15 0x0344
+#define MT6397_VPCA7_CON16 0x0346
+#define MT6397_VPCA7_CON17 0x0348
+#define MT6397_VPCA7_CON18 0x034A
+#define MT6397_VSRMCA7_CON0 0x034C
+#define MT6397_VSRMCA7_CON1 0x034E
+#define MT6397_VSRMCA7_CON2 0x0350
+#define MT6397_VSRMCA7_CON3 0x0352
+#define MT6397_VSRMCA7_CON4 0x0354
+#define MT6397_VSRMCA7_CON5 0x0356
+#define MT6397_VSRMCA7_CON6 0x0358
+#define MT6397_VSRMCA7_CON7 0x035A
+#define MT6397_VSRMCA7_CON8 0x035C
+#define MT6397_VSRMCA7_CON9 0x035E
+#define MT6397_VSRMCA7_CON10 0x0360
+#define MT6397_VSRMCA7_CON11 0x0362
+#define MT6397_VSRMCA7_CON12 0x0364
+#define MT6397_VSRMCA7_CON13 0x0366
+#define MT6397_VSRMCA7_CON14 0x0368
+#define MT6397_VSRMCA7_CON15 0x036A
+#define MT6397_VSRMCA7_CON16 0x036C
+#define MT6397_VSRMCA7_CON17 0x036E
+#define MT6397_VSRMCA7_CON18 0x0370
+#define MT6397_VSRMCA7_CON19 0x0372
+#define MT6397_VSRMCA7_CON20 0x0374
+#define MT6397_VSRMCA7_CON21 0x0376
+#define MT6397_VDRM_CON0 0x0378
+#define MT6397_VDRM_CON1 0x037A
+#define MT6397_VDRM_CON2 0x037C
+#define MT6397_VDRM_CON3 0x037E
+#define MT6397_VDRM_CON4 0x0380
+#define MT6397_VDRM_CON5 0x0382
+#define MT6397_VDRM_CON6 0x0384
+#define MT6397_VDRM_CON7 0x0386
+#define MT6397_VDRM_CON8 0x0388
+#define MT6397_VDRM_CON9 0x038A
+#define MT6397_VDRM_CON10 0x038C
+#define MT6397_VDRM_CON11 0x038E
+#define MT6397_VDRM_CON12 0x0390
+#define MT6397_VDRM_CON13 0x0392
+#define MT6397_VDRM_CON14 0x0394
+#define MT6397_VDRM_CON15 0x0396
+#define MT6397_VDRM_CON16 0x0398
+#define MT6397_VDRM_CON17 0x039A
+#define MT6397_VDRM_CON18 0x039C
+#define MT6397_BUCK_K_CON0 0x039E
+#define MT6397_BUCK_K_CON1 0x03A0
+#define MT6397_ANALDO_CON0 0x0400
+#define MT6397_ANALDO_CON1 0x0402
+#define MT6397_ANALDO_CON2 0x0404
+#define MT6397_ANALDO_CON3 0x0406
+#define MT6397_ANALDO_CON4 0x0408
+#define MT6397_ANALDO_CON5 0x040A
+#define MT6397_ANALDO_CON6 0x040C
+#define MT6397_ANALDO_CON7 0x040E
+#define MT6397_DIGLDO_CON0 0x0410
+#define MT6397_DIGLDO_CON1 0x0412
+#define MT6397_DIGLDO_CON2 0x0414
+#define MT6397_DIGLDO_CON3 0x0416
+#define MT6397_DIGLDO_CON4 0x0418
+#define MT6397_DIGLDO_CON5 0x041A
+#define MT6397_DIGLDO_CON6 0x041C
+#define MT6397_DIGLDO_CON7 0x041E
+#define MT6397_DIGLDO_CON8 0x0420
+#define MT6397_DIGLDO_CON9 0x0422
+#define MT6397_DIGLDO_CON10 0x0424
+#define MT6397_DIGLDO_CON11 0x0426
+#define MT6397_DIGLDO_CON12 0x0428
+#define MT6397_DIGLDO_CON13 0x042A
+#define MT6397_DIGLDO_CON14 0x042C
+#define MT6397_DIGLDO_CON15 0x042E
+#define MT6397_DIGLDO_CON16 0x0430
+#define MT6397_DIGLDO_CON17 0x0432
+#define MT6397_DIGLDO_CON18 0x0434
+#define MT6397_DIGLDO_CON19 0x0436
+#define MT6397_DIGLDO_CON20 0x0438
+#define MT6397_DIGLDO_CON21 0x043A
+#define MT6397_DIGLDO_CON22 0x043C
+#define MT6397_DIGLDO_CON23 0x043E
+#define MT6397_DIGLDO_CON24 0x0440
+#define MT6397_DIGLDO_CON25 0x0442
+#define MT6397_DIGLDO_CON26 0x0444
+#define MT6397_DIGLDO_CON27 0x0446
+#define MT6397_DIGLDO_CON28 0x0448
+#define MT6397_DIGLDO_CON29 0x044A
+#define MT6397_DIGLDO_CON30 0x044C
+#define MT6397_DIGLDO_CON31 0x044E
+#define MT6397_DIGLDO_CON32 0x0450
+#define MT6397_DIGLDO_CON33 0x045A
+#define MT6397_SPK_CON0 0x0600
+#define MT6397_SPK_CON1 0x0602
+#define MT6397_SPK_CON2 0x0604
+#define MT6397_SPK_CON3 0x0606
+#define MT6397_SPK_CON4 0x0608
+#define MT6397_SPK_CON5 0x060A
+#define MT6397_SPK_CON6 0x060C
+#define MT6397_SPK_CON7 0x060E
+#define MT6397_SPK_CON8 0x0610
+#define MT6397_SPK_CON9 0x0612
+#define MT6397_SPK_CON10 0x0614
+#define MT6397_SPK_CON11 0x0616
+#define MT6397_AUDDAC_CON0 0x0700
+#define MT6397_AUDBUF_CFG0 0x0702
+#define MT6397_AUDBUF_CFG1 0x0704
+#define MT6397_AUDBUF_CFG2 0x0706
+#define MT6397_AUDBUF_CFG3 0x0708
+#define MT6397_AUDBUF_CFG4 0x070A
+#define MT6397_IBIASDIST_CFG0 0x070C
+#define MT6397_AUDACCDEPOP_CFG0 0x070E
+#define MT6397_AUD_IV_CFG0 0x0710
+#define MT6397_AUDCLKGEN_CFG0 0x0712
+#define MT6397_AUDLDO_CFG0 0x0714
+#define MT6397_AUDLDO_CFG1 0x0716
+#define MT6397_AUDNVREGGLB_CFG0 0x0718
+#define MT6397_AUD_NCP0 0x071A
+#define MT6397_AUDPREAMP_CON0 0x071C
+#define MT6397_AUDADC_CON0 0x071E
+#define MT6397_AUDADC_CON1 0x0720
+#define MT6397_AUDADC_CON2 0x0722
+#define MT6397_AUDADC_CON3 0x0724
+#define MT6397_AUDADC_CON4 0x0726
+#define MT6397_AUDADC_CON5 0x0728
+#define MT6397_AUDADC_CON6 0x072A
+#define MT6397_AUDDIGMI_CON0 0x072C
+#define MT6397_AUDLSBUF_CON0 0x072E
+#define MT6397_AUDLSBUF_CON1 0x0730
+#define MT6397_AUDENCSPARE_CON0 0x0732
+#define MT6397_AUDENCCLKSQ_CON0 0x0734
+#define MT6397_AUDPREAMPGAIN_CON0 0x0736
+#define MT6397_ZCD_CON0 0x0738
+#define MT6397_ZCD_CON1 0x073A
+#define MT6397_ZCD_CON2 0x073C
+#define MT6397_ZCD_CON3 0x073E
+#define MT6397_ZCD_CON4 0x0740
+#define MT6397_ZCD_CON5 0x0742
+#define MT6397_NCP_CLKDIV_CON0 0x0744
+#define MT6397_NCP_CLKDIV_CON1 0x0746
+
+#endif /* __MFD_MT6397_REGISTERS_H__ */
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index fb0390a..bb270bd 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -117,6 +117,7 @@ struct palmas_pmic_driver_data {
int ldo_begin;
int ldo_end;
int max_reg;
+ bool has_regen3;
struct palmas_regs_info *palmas_regs_info;
struct of_regulator_match *palmas_matches;
struct palmas_sleep_requestor_info *sleep_req_info;
@@ -2999,6 +3000,9 @@ enum usb_irq_events {
#define PALMAS_GPADC_TRIM15 0x0E
#define PALMAS_GPADC_TRIM16 0x0F
+/* TPS659038 regen2_ctrl offset iss different from palmas */
+#define TPS659038_REGEN2_CTRL 0x12
+
/* TPS65917 Interrupt registers */
/* Registers for function INTERRUPT */
diff --git a/include/linux/mfd/qcom_rpm.h b/include/linux/mfd/qcom_rpm.h
new file mode 100644
index 0000000..742ebf1
--- /dev/null
+++ b/include/linux/mfd/qcom_rpm.h
@@ -0,0 +1,13 @@
+#ifndef __QCOM_RPM_H__
+#define __QCOM_RPM_H__
+
+#include <linux/types.h>
+
+struct qcom_rpm;
+
+#define QCOM_RPM_ACTIVE_STATE 0
+#define QCOM_RPM_SLEEP_STATE 1
+
+int qcom_rpm_write(struct qcom_rpm *rpm, int state, int resource, u32 *buf, size_t count);
+
+#endif
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
index fb09312..441b6ee 100644
--- a/include/linux/mfd/rk808.h
+++ b/include/linux/mfd/rk808.h
@@ -156,6 +156,9 @@ enum rk808_reg {
#define BUCK2_RATE_MASK (3 << 3)
#define MASK_ALL 0xff
+#define BUCK_UV_ACT_MASK 0x0f
+#define BUCK_UV_ACT_DISABLE 0
+
#define SWITCH2_EN BIT(6)
#define SWITCH1_EN BIT(5)
#define DEV_OFF_RST BIT(3)
diff --git a/include/linux/mfd/rt5033-private.h b/include/linux/mfd/rt5033-private.h
new file mode 100644
index 0000000..1b63fc2
--- /dev/null
+++ b/include/linux/mfd/rt5033-private.h
@@ -0,0 +1,260 @@
+/*
+ * MFD core driver for Richtek RT5033
+ *
+ * Copyright (C) 2014 Samsung Electronics, Co., Ltd.
+ * Author: Beomho Seo <beomho.seo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published bythe Free Software Foundation.
+ */
+
+#ifndef __RT5033_PRIVATE_H__
+#define __RT5033_PRIVATE_H__
+
+enum rt5033_reg {
+ RT5033_REG_CHG_STAT = 0x00,
+ RT5033_REG_CHG_CTRL1 = 0x01,
+ RT5033_REG_CHG_CTRL2 = 0x02,
+ RT5033_REG_DEVICE_ID = 0x03,
+ RT5033_REG_CHG_CTRL3 = 0x04,
+ RT5033_REG_CHG_CTRL4 = 0x05,
+ RT5033_REG_CHG_CTRL5 = 0x06,
+ RT5033_REG_RT_CTRL0 = 0x07,
+ RT5033_REG_CHG_RESET = 0x08,
+ /* Reserved 0x09~0x18 */
+ RT5033_REG_RT_CTRL1 = 0x19,
+ /* Reserved 0x1A~0x20 */
+ RT5033_REG_FLED_FUNCTION1 = 0x21,
+ RT5033_REG_FLED_FUNCTION2 = 0x22,
+ RT5033_REG_FLED_STROBE_CTRL1 = 0x23,
+ RT5033_REG_FLED_STROBE_CTRL2 = 0x24,
+ RT5033_REG_FLED_CTRL1 = 0x25,
+ RT5033_REG_FLED_CTRL2 = 0x26,
+ RT5033_REG_FLED_CTRL3 = 0x27,
+ RT5033_REG_FLED_CTRL4 = 0x28,
+ RT5033_REG_FLED_CTRL5 = 0x29,
+ /* Reserved 0x2A~0x40 */
+ RT5033_REG_CTRL = 0x41,
+ RT5033_REG_BUCK_CTRL = 0x42,
+ RT5033_REG_LDO_CTRL = 0x43,
+ /* Reserved 0x44~0x46 */
+ RT5033_REG_MANUAL_RESET_CTRL = 0x47,
+ /* Reserved 0x48~0x5F */
+ RT5033_REG_CHG_IRQ1 = 0x60,
+ RT5033_REG_CHG_IRQ2 = 0x61,
+ RT5033_REG_CHG_IRQ3 = 0x62,
+ RT5033_REG_CHG_IRQ1_CTRL = 0x63,
+ RT5033_REG_CHG_IRQ2_CTRL = 0x64,
+ RT5033_REG_CHG_IRQ3_CTRL = 0x65,
+ RT5033_REG_LED_IRQ_STAT = 0x66,
+ RT5033_REG_LED_IRQ_CTRL = 0x67,
+ RT5033_REG_PMIC_IRQ_STAT = 0x68,
+ RT5033_REG_PMIC_IRQ_CTRL = 0x69,
+ RT5033_REG_SHDN_CTRL = 0x6A,
+ RT5033_REG_OFF_EVENT = 0x6B,
+
+ RT5033_REG_END,
+};
+
+/* RT5033 Charger state register */
+#define RT5033_CHG_STAT_MASK 0x20
+#define RT5033_CHG_STAT_DISCHARGING 0x00
+#define RT5033_CHG_STAT_FULL 0x10
+#define RT5033_CHG_STAT_CHARGING 0x20
+#define RT5033_CHG_STAT_NOT_CHARGING 0x30
+#define RT5033_CHG_STAT_TYPE_MASK 0x60
+#define RT5033_CHG_STAT_TYPE_PRE 0x20
+#define RT5033_CHG_STAT_TYPE_FAST 0x60
+
+/* RT5033 CHGCTRL1 register */
+#define RT5033_CHGCTRL1_IAICR_MASK 0xe0
+#define RT5033_CHGCTRL1_MODE_MASK 0x01
+
+/* RT5033 CHGCTRL2 register */
+#define RT5033_CHGCTRL2_CV_MASK 0xfc
+
+/* RT5033 CHGCTRL3 register */
+#define RT5033_CHGCTRL3_CFO_EN_MASK 0x40
+#define RT5033_CHGCTRL3_TIMER_MASK 0x38
+#define RT5033_CHGCTRL3_TIMER_EN_MASK 0x01
+
+/* RT5033 CHGCTRL4 register */
+#define RT5033_CHGCTRL4_EOC_MASK 0x07
+#define RT5033_CHGCTRL4_IPREC_MASK 0x18
+
+/* RT5033 CHGCTRL5 register */
+#define RT5033_CHGCTRL5_VPREC_MASK 0x0f
+#define RT5033_CHGCTRL5_ICHG_MASK 0xf0
+#define RT5033_CHGCTRL5_ICHG_SHIFT 0x04
+#define RT5033_CHG_MAX_CURRENT 0x0d
+
+/* RT5033 RT CTRL1 register */
+#define RT5033_RT_CTRL1_UUG_MASK 0x02
+#define RT5033_RT_HZ_MASK 0x01
+
+/* RT5033 control register */
+#define RT5033_CTRL_FCCM_BUCK_MASK 0x00
+#define RT5033_CTRL_BUCKOMS_MASK 0x01
+#define RT5033_CTRL_LDOOMS_MASK 0x02
+#define RT5033_CTRL_SLDOOMS_MASK 0x03
+#define RT5033_CTRL_EN_BUCK_MASK 0x04
+#define RT5033_CTRL_EN_LDO_MASK 0x05
+#define RT5033_CTRL_EN_SAFE_LDO_MASK 0x06
+#define RT5033_CTRL_LDO_SLEEP_MASK 0x07
+
+/* RT5033 BUCK control register */
+#define RT5033_BUCK_CTRL_MASK 0x1f
+
+/* RT5033 LDO control register */
+#define RT5033_LDO_CTRL_MASK 0x1f
+
+/* RT5033 charger property - model, manufacturer */
+
+#define RT5033_CHARGER_MODEL "RT5033WSC Charger"
+#define RT5033_MANUFACTURER "Richtek Technology Corporation"
+
+/*
+ * RT5033 charger fast-charge current lmits (as in CHGCTRL1 register),
+ * AICR mode limits the input current for example,
+ * the AIRC 100 mode limits the input current to 100 mA.
+ */
+#define RT5033_AICR_100_MODE 0x20
+#define RT5033_AICR_500_MODE 0x40
+#define RT5033_AICR_700_MODE 0x60
+#define RT5033_AICR_900_MODE 0x80
+#define RT5033_AICR_1500_MODE 0xc0
+#define RT5033_AICR_2000_MODE 0xe0
+#define RT5033_AICR_MODE_MASK 0xe0
+
+/* RT5033 use internal timer need to set time */
+#define RT5033_FAST_CHARGE_TIMER4 0x00
+#define RT5033_FAST_CHARGE_TIMER6 0x01
+#define RT5033_FAST_CHARGE_TIMER8 0x02
+#define RT5033_FAST_CHARGE_TIMER9 0x03
+#define RT5033_FAST_CHARGE_TIMER12 0x04
+#define RT5033_FAST_CHARGE_TIMER14 0x05
+#define RT5033_FAST_CHARGE_TIMER16 0x06
+
+#define RT5033_INT_TIMER_ENABLE 0x01
+
+/* RT5033 charger termination enable mask */
+#define RT5033_TE_ENABLE_MASK 0x08
+
+/*
+ * RT5033 charger opa mode. RT50300 have two opa mode charger mode
+ * and boost mode for OTG
+ */
+
+#define RT5033_CHARGER_MODE 0x00
+#define RT5033_BOOST_MODE 0x01
+
+/* RT5033 charger termination enable */
+#define RT5033_TE_ENABLE 0x08
+
+/* RT5033 charger CFO enable */
+#define RT5033_CFO_ENABLE 0x40
+
+/* RT5033 charger constant charge voltage (as in CHGCTRL2 register), uV */
+#define RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MIN 3650000U
+#define RT5033_CHARGER_CONST_VOLTAGE_STEP_NUM 25000U
+#define RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MAX 4400000U
+
+/* RT5033 charger pre-charge current limits (as in CHGCTRL4 register), uA */
+#define RT5033_CHARGER_PRE_CURRENT_LIMIT_MIN 350000U
+#define RT5033_CHARGER_PRE_CURRENT_STEP_NUM 100000U
+#define RT5033_CHARGER_PRE_CURRENT_LIMIT_MAX 650000U
+
+/* RT5033 charger fast-charge current (as in CHGCTRL5 register), uA */
+#define RT5033_CHARGER_FAST_CURRENT_MIN 700000U
+#define RT5033_CHARGER_FAST_CURRENT_STEP_NUM 100000U
+#define RT5033_CHARGER_FAST_CURRENT_MAX 2000000U
+
+/*
+ * RT5033 charger const-charge end of charger current (
+ * as in CHGCTRL4 register), uA
+ */
+#define RT5033_CHARGER_EOC_MIN 150000U
+#define RT5033_CHARGER_EOC_REF 300000U
+#define RT5033_CHARGER_EOC_STEP_NUM1 50000U
+#define RT5033_CHARGER_EOC_STEP_NUM2 100000U
+#define RT5033_CHARGER_EOC_MAX 600000U
+
+/*
+ * RT5033 charger pre-charge threshold volt limits
+ * (as in CHGCTRL5 register), uV
+ */
+
+#define RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MIN 2300000U
+#define RT5033_CHARGER_PRE_THRESHOLD_STEP_NUM 100000U
+#define RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MAX 3800000U
+
+/*
+ * RT5033 charger enable UUG, If UUG enable MOS auto control by H/W charger
+ * circuit.
+ */
+#define RT5033_CHARGER_UUG_ENABLE 0x02
+
+/* RT5033 charger High impedance mode */
+#define RT5033_CHARGER_HZ_DISABLE 0x00
+#define RT5033_CHARGER_HZ_ENABLE 0x01
+
+/* RT5033 regulator BUCK output voltage uV */
+#define RT5033_REGULATOR_BUCK_VOLTAGE_MIN 1000000U
+#define RT5033_REGULATOR_BUCK_VOLTAGE_MAX 3000000U
+#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP 100000U
+#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM 32
+
+/* RT5033 regulator LDO output voltage uV */
+#define RT5033_REGULATOR_LDO_VOLTAGE_MIN 1200000U
+#define RT5033_REGULATOR_LDO_VOLTAGE_MAX 3000000U
+#define RT5033_REGULATOR_LDO_VOLTAGE_STEP 100000U
+#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM 32
+
+/* RT5033 regulator SAFE LDO output voltage uV */
+#define RT5033_REGULATOR_SAFE_LDO_VOLTAGE 4900000U
+
+enum rt5033_fuel_reg {
+ RT5033_FUEL_REG_OCV_H = 0x00,
+ RT5033_FUEL_REG_OCV_L = 0x01,
+ RT5033_FUEL_REG_VBAT_H = 0x02,
+ RT5033_FUEL_REG_VBAT_L = 0x03,
+ RT5033_FUEL_REG_SOC_H = 0x04,
+ RT5033_FUEL_REG_SOC_L = 0x05,
+ RT5033_FUEL_REG_CTRL_H = 0x06,
+ RT5033_FUEL_REG_CTRL_L = 0x07,
+ RT5033_FUEL_REG_CRATE = 0x08,
+ RT5033_FUEL_REG_DEVICE_ID = 0x09,
+ RT5033_FUEL_REG_AVG_VOLT_H = 0x0A,
+ RT5033_FUEL_REG_AVG_VOLT_L = 0x0B,
+ RT5033_FUEL_REG_CONFIG_H = 0x0C,
+ RT5033_FUEL_REG_CONFIG_L = 0x0D,
+ /* Reserved 0x0E~0x0F */
+ RT5033_FUEL_REG_IRQ_CTRL = 0x10,
+ RT5033_FUEL_REG_IRQ_FLAG = 0x11,
+ RT5033_FUEL_VMIN = 0x12,
+ RT5033_FUEL_SMIN = 0x13,
+ /* Reserved 0x14~0x1F */
+ RT5033_FUEL_VGCOMP1 = 0x20,
+ RT5033_FUEL_VGCOMP2 = 0x21,
+ RT5033_FUEL_VGCOMP3 = 0x22,
+ RT5033_FUEL_VGCOMP4 = 0x23,
+ /* Reserved 0x24~0xFD */
+ RT5033_FUEL_MFA_H = 0xFE,
+ RT5033_FUEL_MFA_L = 0xFF,
+
+ RT5033_FUEL_REG_END,
+};
+
+/* RT5033 fuel gauge battery present property */
+#define RT5033_FUEL_BAT_PRESENT 0x02
+
+/* RT5033 PMIC interrupts */
+#define RT5033_PMIC_IRQ_BUCKOCP 2
+#define RT5033_PMIC_IRQ_BUCKLV 3
+#define RT5033_PMIC_IRQ_SAFELDOLV 4
+#define RT5033_PMIC_IRQ_LDOLV 5
+#define RT5033_PMIC_IRQ_OT 6
+#define RT5033_PMIC_IRQ_VDDA_UV 7
+
+#endif /* __RT5033_PRIVATE_H__ */
diff --git a/include/linux/mfd/rt5033.h b/include/linux/mfd/rt5033.h
new file mode 100644
index 0000000..6cff5cf
--- /dev/null
+++ b/include/linux/mfd/rt5033.h
@@ -0,0 +1,62 @@
+/*
+ * MFD core driver for the RT5033
+ *
+ * Copyright (C) 2014 Samsung Electronics
+ * Author: Beomho Seo <beomho.seo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published bythe Free Software Foundation.
+ */
+
+#ifndef __RT5033_H__
+#define __RT5033_H__
+
+#include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/power_supply.h>
+
+/* RT5033 regulator IDs */
+enum rt5033_regulators {
+ RT5033_BUCK = 0,
+ RT5033_LDO,
+ RT5033_SAFE_LDO,
+
+ RT5033_REGULATOR_NUM,
+};
+
+struct rt5033_dev {
+ struct device *dev;
+
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *irq_data;
+ int irq;
+ bool wakeup;
+};
+
+struct rt5033_battery {
+ struct i2c_client *client;
+ struct rt5033_dev *rt5033;
+ struct regmap *regmap;
+ struct power_supply *psy;
+};
+
+/* RT5033 charger platform data */
+struct rt5033_charger_data {
+ unsigned int pre_uamp;
+ unsigned int pre_uvolt;
+ unsigned int const_uvolt;
+ unsigned int eoc_uamp;
+ unsigned int fast_uamp;
+};
+
+struct rt5033_charger {
+ struct device *dev;
+ struct rt5033_dev *rt5033;
+ struct power_supply psy;
+
+ struct rt5033_charger_data *chg;
+};
+
+#endif /* __RT5033_H__ */
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h
index 0c12628..ff843e7 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/mfd/rtsx_pci.h
@@ -28,74 +28,72 @@
#define MAX_RW_REG_CNT 1024
-/* PCI Operation Register Address */
#define RTSX_HCBAR 0x00
#define RTSX_HCBCTLR 0x04
+#define STOP_CMD (0x01 << 28)
+#define READ_REG_CMD 0
+#define WRITE_REG_CMD 1
+#define CHECK_REG_CMD 2
+
#define RTSX_HDBAR 0x08
+#define SG_INT 0x04
+#define SG_END 0x02
+#define SG_VALID 0x01
+#define SG_NO_OP 0x00
+#define SG_TRANS_DATA (0x02 << 4)
+#define SG_LINK_DESC (0x03 << 4)
#define RTSX_HDBCTLR 0x0C
+#define SDMA_MODE 0x00
+#define ADMA_MODE (0x02 << 26)
+#define STOP_DMA (0x01 << 28)
+#define TRIG_DMA (0x01 << 31)
+
#define RTSX_HAIMR 0x10
-#define RTSX_BIPR 0x14
-#define RTSX_BIER 0x18
+#define HAIMR_TRANS_START (0x01 << 31)
+#define HAIMR_READ 0x00
+#define HAIMR_WRITE (0x01 << 30)
+#define HAIMR_READ_START (HAIMR_TRANS_START | HAIMR_READ)
+#define HAIMR_WRITE_START (HAIMR_TRANS_START | HAIMR_WRITE)
+#define HAIMR_TRANS_END (HAIMR_TRANS_START)
-/* Host command buffer control register */
-#define STOP_CMD (0x01 << 28)
-
-/* Host data buffer control register */
-#define SDMA_MODE 0x00
-#define ADMA_MODE (0x02 << 26)
-#define STOP_DMA (0x01 << 28)
-#define TRIG_DMA (0x01 << 31)
-
-/* Host access internal memory register */
-#define HAIMR_TRANS_START (0x01 << 31)
-#define HAIMR_READ 0x00
-#define HAIMR_WRITE (0x01 << 30)
-#define HAIMR_READ_START (HAIMR_TRANS_START | HAIMR_READ)
-#define HAIMR_WRITE_START (HAIMR_TRANS_START | HAIMR_WRITE)
-#define HAIMR_TRANS_END (HAIMR_TRANS_START)
-
-/* Bus interrupt pending register */
-#define CMD_DONE_INT (1 << 31)
-#define DATA_DONE_INT (1 << 30)
-#define TRANS_OK_INT (1 << 29)
-#define TRANS_FAIL_INT (1 << 28)
-#define XD_INT (1 << 27)
-#define MS_INT (1 << 26)
-#define SD_INT (1 << 25)
-#define GPIO0_INT (1 << 24)
-#define OC_INT (1 << 23)
-#define SD_WRITE_PROTECT (1 << 19)
-#define XD_EXIST (1 << 18)
-#define MS_EXIST (1 << 17)
-#define SD_EXIST (1 << 16)
-#define DELINK_INT GPIO0_INT
-#define MS_OC_INT (1 << 23)
-#define SD_OC_INT (1 << 22)
+#define RTSX_BIPR 0x14
+#define CMD_DONE_INT (1 << 31)
+#define DATA_DONE_INT (1 << 30)
+#define TRANS_OK_INT (1 << 29)
+#define TRANS_FAIL_INT (1 << 28)
+#define XD_INT (1 << 27)
+#define MS_INT (1 << 26)
+#define SD_INT (1 << 25)
+#define GPIO0_INT (1 << 24)
+#define OC_INT (1 << 23)
+#define SD_WRITE_PROTECT (1 << 19)
+#define XD_EXIST (1 << 18)
+#define MS_EXIST (1 << 17)
+#define SD_EXIST (1 << 16)
+#define DELINK_INT GPIO0_INT
+#define MS_OC_INT (1 << 23)
+#define SD_OC_INT (1 << 22)
#define CARD_INT (XD_INT | MS_INT | SD_INT)
#define NEED_COMPLETE_INT (DATA_DONE_INT | TRANS_OK_INT | TRANS_FAIL_INT)
#define RTSX_INT (CMD_DONE_INT | NEED_COMPLETE_INT | \
CARD_INT | GPIO0_INT | OC_INT)
-
#define CARD_EXIST (XD_EXIST | MS_EXIST | SD_EXIST)
-/* Bus interrupt enable register */
-#define CMD_DONE_INT_EN (1 << 31)
-#define DATA_DONE_INT_EN (1 << 30)
-#define TRANS_OK_INT_EN (1 << 29)
-#define TRANS_FAIL_INT_EN (1 << 28)
-#define XD_INT_EN (1 << 27)
-#define MS_INT_EN (1 << 26)
-#define SD_INT_EN (1 << 25)
-#define GPIO0_INT_EN (1 << 24)
-#define OC_INT_EN (1 << 23)
-#define DELINK_INT_EN GPIO0_INT_EN
-#define MS_OC_INT_EN (1 << 23)
-#define SD_OC_INT_EN (1 << 22)
-
-#define READ_REG_CMD 0
-#define WRITE_REG_CMD 1
-#define CHECK_REG_CMD 2
+#define RTSX_BIER 0x18
+#define CMD_DONE_INT_EN (1 << 31)
+#define DATA_DONE_INT_EN (1 << 30)
+#define TRANS_OK_INT_EN (1 << 29)
+#define TRANS_FAIL_INT_EN (1 << 28)
+#define XD_INT_EN (1 << 27)
+#define MS_INT_EN (1 << 26)
+#define SD_INT_EN (1 << 25)
+#define GPIO0_INT_EN (1 << 24)
+#define OC_INT_EN (1 << 23)
+#define DELINK_INT_EN GPIO0_INT_EN
+#define MS_OC_INT_EN (1 << 23)
+#define SD_OC_INT_EN (1 << 22)
+
/*
* macros for easy use
@@ -125,423 +123,68 @@
#define rtsx_pci_write_config_dword(pcr, where, val) \
pci_write_config_dword((pcr)->pci, where, val)
-#define STATE_TRANS_NONE 0
-#define STATE_TRANS_CMD 1
-#define STATE_TRANS_BUF 2
-#define STATE_TRANS_SG 3
-
-#define TRANS_NOT_READY 0
-#define TRANS_RESULT_OK 1
-#define TRANS_RESULT_FAIL 2
-#define TRANS_NO_DEVICE 3
-
-#define RTSX_RESV_BUF_LEN 4096
-#define HOST_CMDS_BUF_LEN 1024
-#define HOST_SG_TBL_BUF_LEN (RTSX_RESV_BUF_LEN - HOST_CMDS_BUF_LEN)
-#define HOST_SG_TBL_ITEMS (HOST_SG_TBL_BUF_LEN / 8)
-#define MAX_SG_ITEM_LEN 0x80000
-
-#define HOST_TO_DEVICE 0
-#define DEVICE_TO_HOST 1
-
-#define RTSX_PHASE_MAX 32
-#define RX_TUNING_CNT 3
-
-/* SG descriptor */
-#define SG_INT 0x04
-#define SG_END 0x02
-#define SG_VALID 0x01
-
-#define SG_NO_OP 0x00
-#define SG_TRANS_DATA (0x02 << 4)
-#define SG_LINK_DESC (0x03 << 4)
-
-/* Output voltage */
-#define OUTPUT_3V3 0
-#define OUTPUT_1V8 1
-
-/* Card Clock Enable Register */
-#define SD_CLK_EN 0x04
-#define MS_CLK_EN 0x08
-
-/* Card Select Register */
-#define SD_MOD_SEL 2
-#define MS_MOD_SEL 3
-
-/* Card Output Enable Register */
-#define SD_OUTPUT_EN 0x04
-#define MS_OUTPUT_EN 0x08
-
-/* CARD_SHARE_MODE */
-#define CARD_SHARE_MASK 0x0F
-#define CARD_SHARE_MULTI_LUN 0x00
-#define CARD_SHARE_NORMAL 0x00
-#define CARD_SHARE_48_SD 0x04
-#define CARD_SHARE_48_MS 0x08
-/* CARD_SHARE_MODE for barossa */
-#define CARD_SHARE_BAROSSA_SD 0x01
-#define CARD_SHARE_BAROSSA_MS 0x02
-
-/* CARD_DRIVE_SEL */
-#define MS_DRIVE_8mA (0x01 << 6)
-#define MMC_DRIVE_8mA (0x01 << 4)
-#define XD_DRIVE_8mA (0x01 << 2)
-#define GPIO_DRIVE_8mA 0x01
-#define RTS5209_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | MMC_DRIVE_8mA |\
- XD_DRIVE_8mA | GPIO_DRIVE_8mA)
-#define RTL8411_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | MMC_DRIVE_8mA |\
- XD_DRIVE_8mA)
-#define RTSX_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | GPIO_DRIVE_8mA)
+#define STATE_TRANS_NONE 0
+#define STATE_TRANS_CMD 1
+#define STATE_TRANS_BUF 2
+#define STATE_TRANS_SG 3
-/* SD30_DRIVE_SEL */
-#define DRIVER_TYPE_A 0x05
-#define DRIVER_TYPE_B 0x03
-#define DRIVER_TYPE_C 0x02
-#define DRIVER_TYPE_D 0x01
-#define CFG_DRIVER_TYPE_A 0x02
-#define CFG_DRIVER_TYPE_B 0x03
-#define CFG_DRIVER_TYPE_C 0x01
-#define CFG_DRIVER_TYPE_D 0x00
-
-/* FPDCTL */
-#define SSC_POWER_DOWN 0x01
-#define SD_OC_POWER_DOWN 0x02
-#define ALL_POWER_DOWN 0x07
-#define OC_POWER_DOWN 0x06
-
-/* CLK_CTL */
-#define CHANGE_CLK 0x01
-
-/* LDO_CTL */
-#define BPP_ASIC_1V7 0x00
-#define BPP_ASIC_1V8 0x01
-#define BPP_ASIC_1V9 0x02
-#define BPP_ASIC_2V0 0x03
-#define BPP_ASIC_2V7 0x04
-#define BPP_ASIC_2V8 0x05
-#define BPP_ASIC_3V2 0x06
-#define BPP_ASIC_3V3 0x07
-#define BPP_REG_TUNED18 0x07
-#define BPP_TUNED18_SHIFT_8402 5
-#define BPP_TUNED18_SHIFT_8411 4
-#define BPP_PAD_MASK 0x04
-#define BPP_PAD_3V3 0x04
-#define BPP_PAD_1V8 0x00
-#define BPP_LDO_POWB 0x03
-#define BPP_LDO_ON 0x00
-#define BPP_LDO_SUSPEND 0x02
-#define BPP_LDO_OFF 0x03
-
-/* CD_PAD_CTL */
-#define CD_DISABLE_MASK 0x07
-#define MS_CD_DISABLE 0x04
-#define SD_CD_DISABLE 0x02
-#define XD_CD_DISABLE 0x01
-#define CD_DISABLE 0x07
-#define CD_ENABLE 0x00
-#define MS_CD_EN_ONLY 0x03
-#define SD_CD_EN_ONLY 0x05
-#define XD_CD_EN_ONLY 0x06
-#define FORCE_CD_LOW_MASK 0x38
-#define FORCE_CD_XD_LOW 0x08
-#define FORCE_CD_SD_LOW 0x10
-#define FORCE_CD_MS_LOW 0x20
-#define CD_AUTO_DISABLE 0x40
-
-/* SD_STAT1 */
-#define SD_CRC7_ERR 0x80
-#define SD_CRC16_ERR 0x40
-#define SD_CRC_WRITE_ERR 0x20
-#define SD_CRC_WRITE_ERR_MASK 0x1C
-#define GET_CRC_TIME_OUT 0x02
-#define SD_TUNING_COMPARE_ERR 0x01
-
-/* SD_STAT2 */
-#define SD_RSP_80CLK_TIMEOUT 0x01
-
-/* SD_BUS_STAT */
-#define SD_CLK_TOGGLE_EN 0x80
-#define SD_CLK_FORCE_STOP 0x40
-#define SD_DAT3_STATUS 0x10
-#define SD_DAT2_STATUS 0x08
-#define SD_DAT1_STATUS 0x04
-#define SD_DAT0_STATUS 0x02
-#define SD_CMD_STATUS 0x01
-
-/* SD_PAD_CTL */
-#define SD_IO_USING_1V8 0x80
-#define SD_IO_USING_3V3 0x7F
-#define TYPE_A_DRIVING 0x00
-#define TYPE_B_DRIVING 0x01
-#define TYPE_C_DRIVING 0x02
-#define TYPE_D_DRIVING 0x03
-
-/* SD_SAMPLE_POINT_CTL */
-#define DDR_FIX_RX_DAT 0x00
-#define DDR_VAR_RX_DAT 0x80
-#define DDR_FIX_RX_DAT_EDGE 0x00
-#define DDR_FIX_RX_DAT_14_DELAY 0x40
-#define DDR_FIX_RX_CMD 0x00
-#define DDR_VAR_RX_CMD 0x20
-#define DDR_FIX_RX_CMD_POS_EDGE 0x00
-#define DDR_FIX_RX_CMD_14_DELAY 0x10
-#define SD20_RX_POS_EDGE 0x00
-#define SD20_RX_14_DELAY 0x08
-#define SD20_RX_SEL_MASK 0x08
+#define TRANS_NOT_READY 0
+#define TRANS_RESULT_OK 1
+#define TRANS_RESULT_FAIL 2
+#define TRANS_NO_DEVICE 3
-/* SD_PUSH_POINT_CTL */
-#define DDR_FIX_TX_CMD_DAT 0x00
-#define DDR_VAR_TX_CMD_DAT 0x80
-#define DDR_FIX_TX_DAT_14_TSU 0x00
-#define DDR_FIX_TX_DAT_12_TSU 0x40
-#define DDR_FIX_TX_CMD_NEG_EDGE 0x00
-#define DDR_FIX_TX_CMD_14_AHEAD 0x20
-#define SD20_TX_NEG_EDGE 0x00
-#define SD20_TX_14_AHEAD 0x10
-#define SD20_TX_SEL_MASK 0x10
-#define DDR_VAR_SDCLK_POL_SWAP 0x01
-
-/* SD_TRANSFER */
-#define SD_TRANSFER_START 0x80
-#define SD_TRANSFER_END 0x40
-#define SD_STAT_IDLE 0x20
-#define SD_TRANSFER_ERR 0x10
-/* SD Transfer Mode definition */
-#define SD_TM_NORMAL_WRITE 0x00
-#define SD_TM_AUTO_WRITE_3 0x01
-#define SD_TM_AUTO_WRITE_4 0x02
-#define SD_TM_AUTO_READ_3 0x05
-#define SD_TM_AUTO_READ_4 0x06
-#define SD_TM_CMD_RSP 0x08
-#define SD_TM_AUTO_WRITE_1 0x09
-#define SD_TM_AUTO_WRITE_2 0x0A
-#define SD_TM_NORMAL_READ 0x0C
-#define SD_TM_AUTO_READ_1 0x0D
-#define SD_TM_AUTO_READ_2 0x0E
-#define SD_TM_AUTO_TUNING 0x0F
-
-/* SD_VPTX_CTL / SD_VPRX_CTL */
-#define PHASE_CHANGE 0x80
-#define PHASE_NOT_RESET 0x40
-
-/* SD_DCMPS_TX_CTL / SD_DCMPS_RX_CTL */
-#define DCMPS_CHANGE 0x80
-#define DCMPS_CHANGE_DONE 0x40
-#define DCMPS_ERROR 0x20
-#define DCMPS_CURRENT_PHASE 0x1F
-
-/* SD Configure 1 Register */
-#define SD_CLK_DIVIDE_0 0x00
-#define SD_CLK_DIVIDE_256 0xC0
-#define SD_CLK_DIVIDE_128 0x80
-#define SD_BUS_WIDTH_1BIT 0x00
-#define SD_BUS_WIDTH_4BIT 0x01
-#define SD_BUS_WIDTH_8BIT 0x02
-#define SD_ASYNC_FIFO_NOT_RST 0x10
-#define SD_20_MODE 0x00
-#define SD_DDR_MODE 0x04
-#define SD_30_MODE 0x08
-
-#define SD_CLK_DIVIDE_MASK 0xC0
-
-/* SD_CMD_STATE */
-#define SD_CMD_IDLE 0x80
-
-/* SD_DATA_STATE */
-#define SD_DATA_IDLE 0x80
-
-/* DCM_DRP_CTL */
-#define DCM_RESET 0x08
-#define DCM_LOCKED 0x04
-#define DCM_208M 0x00
-#define DCM_TX 0x01
-#define DCM_RX 0x02
-
-/* DCM_DRP_TRIG */
-#define DRP_START 0x80
-#define DRP_DONE 0x40
-
-/* DCM_DRP_CFG */
-#define DRP_WRITE 0x80
-#define DRP_READ 0x00
-#define DCM_WRITE_ADDRESS_50 0x50
-#define DCM_WRITE_ADDRESS_51 0x51
-#define DCM_READ_ADDRESS_00 0x00
-#define DCM_READ_ADDRESS_51 0x51
-
-/* IRQSTAT0 */
-#define DMA_DONE_INT 0x80
-#define SUSPEND_INT 0x40
-#define LINK_RDY_INT 0x20
-#define LINK_DOWN_INT 0x10
-
-/* DMACTL */
-#define DMA_RST 0x80
-#define DMA_BUSY 0x04
-#define DMA_DIR_TO_CARD 0x00
-#define DMA_DIR_FROM_CARD 0x02
-#define DMA_EN 0x01
-#define DMA_128 (0 << 4)
-#define DMA_256 (1 << 4)
-#define DMA_512 (2 << 4)
-#define DMA_1024 (3 << 4)
-#define DMA_PACK_SIZE_MASK 0x30
-
-/* SSC_CTL1 */
-#define SSC_RSTB 0x80
-#define SSC_8X_EN 0x40
-#define SSC_FIX_FRAC 0x20
-#define SSC_SEL_1M 0x00
-#define SSC_SEL_2M 0x08
-#define SSC_SEL_4M 0x10
-#define SSC_SEL_8M 0x18
-
-/* SSC_CTL2 */
-#define SSC_DEPTH_MASK 0x07
-#define SSC_DEPTH_DISALBE 0x00
-#define SSC_DEPTH_4M 0x01
-#define SSC_DEPTH_2M 0x02
-#define SSC_DEPTH_1M 0x03
-#define SSC_DEPTH_500K 0x04
-#define SSC_DEPTH_250K 0x05
-
-/* System Clock Control Register */
-#define CLK_LOW_FREQ 0x01
-
-/* System Clock Divider Register */
-#define CLK_DIV_1 0x01
-#define CLK_DIV_2 0x02
-#define CLK_DIV_4 0x03
-#define CLK_DIV_8 0x04
-
-/* MS_CFG */
-#define SAMPLE_TIME_RISING 0x00
-#define SAMPLE_TIME_FALLING 0x80
-#define PUSH_TIME_DEFAULT 0x00
-#define PUSH_TIME_ODD 0x40
-#define NO_EXTEND_TOGGLE 0x00
-#define EXTEND_TOGGLE_CHK 0x20
-#define MS_BUS_WIDTH_1 0x00
-#define MS_BUS_WIDTH_4 0x10
-#define MS_BUS_WIDTH_8 0x18
-#define MS_2K_SECTOR_MODE 0x04
-#define MS_512_SECTOR_MODE 0x00
-#define MS_TOGGLE_TIMEOUT_EN 0x00
-#define MS_TOGGLE_TIMEOUT_DISEN 0x01
-#define MS_NO_CHECK_INT 0x02
+#define RTSX_RESV_BUF_LEN 4096
+#define HOST_CMDS_BUF_LEN 1024
+#define HOST_SG_TBL_BUF_LEN (RTSX_RESV_BUF_LEN - HOST_CMDS_BUF_LEN)
+#define HOST_SG_TBL_ITEMS (HOST_SG_TBL_BUF_LEN / 8)
+#define MAX_SG_ITEM_LEN 0x80000
+#define HOST_TO_DEVICE 0
+#define DEVICE_TO_HOST 1
-/* MS_TRANS_CFG */
-#define WAIT_INT 0x80
-#define NO_WAIT_INT 0x00
-#define NO_AUTO_READ_INT_REG 0x00
-#define AUTO_READ_INT_REG 0x40
-#define MS_CRC16_ERR 0x20
-#define MS_RDY_TIMEOUT 0x10
-#define MS_INT_CMDNK 0x08
-#define MS_INT_BREQ 0x04
-#define MS_INT_ERR 0x02
-#define MS_INT_CED 0x01
-
-/* MS_TRANSFER */
-#define MS_TRANSFER_START 0x80
-#define MS_TRANSFER_END 0x40
-#define MS_TRANSFER_ERR 0x20
-#define MS_BS_STATE 0x10
-#define MS_TM_READ_BYTES 0x00
-#define MS_TM_NORMAL_READ 0x01
-#define MS_TM_WRITE_BYTES 0x04
-#define MS_TM_NORMAL_WRITE 0x05
-#define MS_TM_AUTO_READ 0x08
-#define MS_TM_AUTO_WRITE 0x0C
-
-/* SD Configure 2 Register */
-#define SD_CALCULATE_CRC7 0x00
-#define SD_NO_CALCULATE_CRC7 0x80
-#define SD_CHECK_CRC16 0x00
-#define SD_NO_CHECK_CRC16 0x40
-#define SD_NO_CHECK_WAIT_CRC_TO 0x20
-#define SD_WAIT_BUSY_END 0x08
-#define SD_NO_WAIT_BUSY_END 0x00
-#define SD_CHECK_CRC7 0x00
-#define SD_NO_CHECK_CRC7 0x04
-#define SD_RSP_LEN_0 0x00
-#define SD_RSP_LEN_6 0x01
-#define SD_RSP_LEN_17 0x02
-/* SD/MMC Response Type Definition */
-#define SD_RSP_TYPE_R0 0x04
-#define SD_RSP_TYPE_R1 0x01
-#define SD_RSP_TYPE_R1b 0x09
-#define SD_RSP_TYPE_R2 0x02
-#define SD_RSP_TYPE_R3 0x05
-#define SD_RSP_TYPE_R4 0x05
-#define SD_RSP_TYPE_R5 0x01
-#define SD_RSP_TYPE_R6 0x01
-#define SD_RSP_TYPE_R7 0x01
-
-/* SD_CONFIGURE3 */
-#define SD_RSP_80CLK_TIMEOUT_EN 0x01
-
-/* Card Transfer Reset Register */
-#define SPI_STOP 0x01
-#define XD_STOP 0x02
-#define SD_STOP 0x04
-#define MS_STOP 0x08
-#define SPI_CLR_ERR 0x10
-#define XD_CLR_ERR 0x20
-#define SD_CLR_ERR 0x40
-#define MS_CLR_ERR 0x80
-
-/* Card Data Source Register */
-#define PINGPONG_BUFFER 0x01
-#define RING_BUFFER 0x00
-
-/* Card Power Control Register */
-#define PMOS_STRG_MASK 0x10
-#define PMOS_STRG_800mA 0x10
-#define PMOS_STRG_400mA 0x00
-#define SD_POWER_OFF 0x03
-#define SD_PARTIAL_POWER_ON 0x01
-#define SD_POWER_ON 0x00
-#define SD_POWER_MASK 0x03
-#define MS_POWER_OFF 0x0C
-#define MS_PARTIAL_POWER_ON 0x04
-#define MS_POWER_ON 0x00
-#define MS_POWER_MASK 0x0C
-#define BPP_POWER_OFF 0x0F
-#define BPP_POWER_5_PERCENT_ON 0x0E
-#define BPP_POWER_10_PERCENT_ON 0x0C
-#define BPP_POWER_15_PERCENT_ON 0x08
-#define BPP_POWER_ON 0x00
-#define BPP_POWER_MASK 0x0F
-#define SD_VCC_PARTIAL_POWER_ON 0x02
-#define SD_VCC_POWER_ON 0x00
-
-/* PWR_GATE_CTRL */
-#define PWR_GATE_EN 0x01
-#define LDO3318_PWR_MASK 0x06
-#define LDO_ON 0x00
-#define LDO_SUSPEND 0x04
-#define LDO_OFF 0x06
-
-/* CARD_CLK_SOURCE */
-#define CRC_FIX_CLK (0x00 << 0)
-#define CRC_VAR_CLK0 (0x01 << 0)
-#define CRC_VAR_CLK1 (0x02 << 0)
-#define SD30_FIX_CLK (0x00 << 2)
-#define SD30_VAR_CLK0 (0x01 << 2)
-#define SD30_VAR_CLK1 (0x02 << 2)
-#define SAMPLE_FIX_CLK (0x00 << 4)
-#define SAMPLE_VAR_CLK0 (0x01 << 4)
-#define SAMPLE_VAR_CLK1 (0x02 << 4)
-
-/* HOST_SLEEP_STATE */
-#define HOST_ENTER_S1 1
-#define HOST_ENTER_S3 2
+#define OUTPUT_3V3 0
+#define OUTPUT_1V8 1
+
+#define RTSX_PHASE_MAX 32
+#define RX_TUNING_CNT 3
#define MS_CFG 0xFD40
+#define SAMPLE_TIME_RISING 0x00
+#define SAMPLE_TIME_FALLING 0x80
+#define PUSH_TIME_DEFAULT 0x00
+#define PUSH_TIME_ODD 0x40
+#define NO_EXTEND_TOGGLE 0x00
+#define EXTEND_TOGGLE_CHK 0x20
+#define MS_BUS_WIDTH_1 0x00
+#define MS_BUS_WIDTH_4 0x10
+#define MS_BUS_WIDTH_8 0x18
+#define MS_2K_SECTOR_MODE 0x04
+#define MS_512_SECTOR_MODE 0x00
+#define MS_TOGGLE_TIMEOUT_EN 0x00
+#define MS_TOGGLE_TIMEOUT_DISEN 0x01
+#define MS_NO_CHECK_INT 0x02
#define MS_TPC 0xFD41
#define MS_TRANS_CFG 0xFD42
+#define WAIT_INT 0x80
+#define NO_WAIT_INT 0x00
+#define NO_AUTO_READ_INT_REG 0x00
+#define AUTO_READ_INT_REG 0x40
+#define MS_CRC16_ERR 0x20
+#define MS_RDY_TIMEOUT 0x10
+#define MS_INT_CMDNK 0x08
+#define MS_INT_BREQ 0x04
+#define MS_INT_ERR 0x02
+#define MS_INT_CED 0x01
#define MS_TRANSFER 0xFD43
+#define MS_TRANSFER_START 0x80
+#define MS_TRANSFER_END 0x40
+#define MS_TRANSFER_ERR 0x20
+#define MS_BS_STATE 0x10
+#define MS_TM_READ_BYTES 0x00
+#define MS_TM_NORMAL_READ 0x01
+#define MS_TM_WRITE_BYTES 0x04
+#define MS_TM_NORMAL_WRITE 0x05
+#define MS_TM_AUTO_READ 0x08
+#define MS_TM_AUTO_WRITE 0x0C
#define MS_INT_REG 0xFD44
#define MS_BYTE_CNT 0xFD45
#define MS_SECTOR_CNT_L 0xFD46
@@ -549,14 +192,90 @@
#define MS_DBUS_H 0xFD48
#define SD_CFG1 0xFDA0
+#define SD_CLK_DIVIDE_0 0x00
+#define SD_CLK_DIVIDE_256 0xC0
+#define SD_CLK_DIVIDE_128 0x80
+#define SD_BUS_WIDTH_1BIT 0x00
+#define SD_BUS_WIDTH_4BIT 0x01
+#define SD_BUS_WIDTH_8BIT 0x02
+#define SD_ASYNC_FIFO_NOT_RST 0x10
+#define SD_20_MODE 0x00
+#define SD_DDR_MODE 0x04
+#define SD_30_MODE 0x08
+#define SD_CLK_DIVIDE_MASK 0xC0
#define SD_CFG2 0xFDA1
+#define SD_CALCULATE_CRC7 0x00
+#define SD_NO_CALCULATE_CRC7 0x80
+#define SD_CHECK_CRC16 0x00
+#define SD_NO_CHECK_CRC16 0x40
+#define SD_NO_CHECK_WAIT_CRC_TO 0x20
+#define SD_WAIT_BUSY_END 0x08
+#define SD_NO_WAIT_BUSY_END 0x00
+#define SD_CHECK_CRC7 0x00
+#define SD_NO_CHECK_CRC7 0x04
+#define SD_RSP_LEN_0 0x00
+#define SD_RSP_LEN_6 0x01
+#define SD_RSP_LEN_17 0x02
+#define SD_RSP_TYPE_R0 0x04
+#define SD_RSP_TYPE_R1 0x01
+#define SD_RSP_TYPE_R1b 0x09
+#define SD_RSP_TYPE_R2 0x02
+#define SD_RSP_TYPE_R3 0x05
+#define SD_RSP_TYPE_R4 0x05
+#define SD_RSP_TYPE_R5 0x01
+#define SD_RSP_TYPE_R6 0x01
+#define SD_RSP_TYPE_R7 0x01
#define SD_CFG3 0xFDA2
+#define SD_RSP_80CLK_TIMEOUT_EN 0x01
+
#define SD_STAT1 0xFDA3
+#define SD_CRC7_ERR 0x80
+#define SD_CRC16_ERR 0x40
+#define SD_CRC_WRITE_ERR 0x20
+#define SD_CRC_WRITE_ERR_MASK 0x1C
+#define GET_CRC_TIME_OUT 0x02
+#define SD_TUNING_COMPARE_ERR 0x01
#define SD_STAT2 0xFDA4
+#define SD_RSP_80CLK_TIMEOUT 0x01
+
#define SD_BUS_STAT 0xFDA5
+#define SD_CLK_TOGGLE_EN 0x80
+#define SD_CLK_FORCE_STOP 0x40
+#define SD_DAT3_STATUS 0x10
+#define SD_DAT2_STATUS 0x08
+#define SD_DAT1_STATUS 0x04
+#define SD_DAT0_STATUS 0x02
+#define SD_CMD_STATUS 0x01
#define SD_PAD_CTL 0xFDA6
+#define SD_IO_USING_1V8 0x80
+#define SD_IO_USING_3V3 0x7F
+#define TYPE_A_DRIVING 0x00
+#define TYPE_B_DRIVING 0x01
+#define TYPE_C_DRIVING 0x02
+#define TYPE_D_DRIVING 0x03
#define SD_SAMPLE_POINT_CTL 0xFDA7
+#define DDR_FIX_RX_DAT 0x00
+#define DDR_VAR_RX_DAT 0x80
+#define DDR_FIX_RX_DAT_EDGE 0x00
+#define DDR_FIX_RX_DAT_14_DELAY 0x40
+#define DDR_FIX_RX_CMD 0x00
+#define DDR_VAR_RX_CMD 0x20
+#define DDR_FIX_RX_CMD_POS_EDGE 0x00
+#define DDR_FIX_RX_CMD_14_DELAY 0x10
+#define SD20_RX_POS_EDGE 0x00
+#define SD20_RX_14_DELAY 0x08
+#define SD20_RX_SEL_MASK 0x08
#define SD_PUSH_POINT_CTL 0xFDA8
+#define DDR_FIX_TX_CMD_DAT 0x00
+#define DDR_VAR_TX_CMD_DAT 0x80
+#define DDR_FIX_TX_DAT_14_TSU 0x00
+#define DDR_FIX_TX_DAT_12_TSU 0x40
+#define DDR_FIX_TX_CMD_NEG_EDGE 0x00
+#define DDR_FIX_TX_CMD_14_AHEAD 0x20
+#define SD20_TX_NEG_EDGE 0x00
+#define SD20_TX_14_AHEAD 0x10
+#define SD20_TX_SEL_MASK 0x10
+#define DDR_VAR_SDCLK_POL_SWAP 0x01
#define SD_CMD0 0xFDA9
#define SD_CMD_START 0x40
#define SD_CMD1 0xFDAA
@@ -569,60 +288,203 @@
#define SD_BLOCK_CNT_L 0xFDB1
#define SD_BLOCK_CNT_H 0xFDB2
#define SD_TRANSFER 0xFDB3
+#define SD_TRANSFER_START 0x80
+#define SD_TRANSFER_END 0x40
+#define SD_STAT_IDLE 0x20
+#define SD_TRANSFER_ERR 0x10
+#define SD_TM_NORMAL_WRITE 0x00
+#define SD_TM_AUTO_WRITE_3 0x01
+#define SD_TM_AUTO_WRITE_4 0x02
+#define SD_TM_AUTO_READ_3 0x05
+#define SD_TM_AUTO_READ_4 0x06
+#define SD_TM_CMD_RSP 0x08
+#define SD_TM_AUTO_WRITE_1 0x09
+#define SD_TM_AUTO_WRITE_2 0x0A
+#define SD_TM_NORMAL_READ 0x0C
+#define SD_TM_AUTO_READ_1 0x0D
+#define SD_TM_AUTO_READ_2 0x0E
+#define SD_TM_AUTO_TUNING 0x0F
#define SD_CMD_STATE 0xFDB5
+#define SD_CMD_IDLE 0x80
+
#define SD_DATA_STATE 0xFDB6
+#define SD_DATA_IDLE 0x80
#define SRCTL 0xFC13
-#define DCM_DRP_CTL 0xFC23
-#define DCM_DRP_TRIG 0xFC24
-#define DCM_DRP_CFG 0xFC25
-#define DCM_DRP_WR_DATA_L 0xFC26
-#define DCM_DRP_WR_DATA_H 0xFC27
-#define DCM_DRP_RD_DATA_L 0xFC28
-#define DCM_DRP_RD_DATA_H 0xFC29
+#define DCM_DRP_CTL 0xFC23
+#define DCM_RESET 0x08
+#define DCM_LOCKED 0x04
+#define DCM_208M 0x00
+#define DCM_TX 0x01
+#define DCM_RX 0x02
+#define DCM_DRP_TRIG 0xFC24
+#define DRP_START 0x80
+#define DRP_DONE 0x40
+#define DCM_DRP_CFG 0xFC25
+#define DRP_WRITE 0x80
+#define DRP_READ 0x00
+#define DCM_WRITE_ADDRESS_50 0x50
+#define DCM_WRITE_ADDRESS_51 0x51
+#define DCM_READ_ADDRESS_00 0x00
+#define DCM_READ_ADDRESS_51 0x51
+#define DCM_DRP_WR_DATA_L 0xFC26
+#define DCM_DRP_WR_DATA_H 0xFC27
+#define DCM_DRP_RD_DATA_L 0xFC28
+#define DCM_DRP_RD_DATA_H 0xFC29
#define SD_VPCLK0_CTL 0xFC2A
#define SD_VPCLK1_CTL 0xFC2B
#define SD_DCMPS0_CTL 0xFC2C
#define SD_DCMPS1_CTL 0xFC2D
#define SD_VPTX_CTL SD_VPCLK0_CTL
#define SD_VPRX_CTL SD_VPCLK1_CTL
+#define PHASE_CHANGE 0x80
+#define PHASE_NOT_RESET 0x40
#define SD_DCMPS_TX_CTL SD_DCMPS0_CTL
#define SD_DCMPS_RX_CTL SD_DCMPS1_CTL
+#define DCMPS_CHANGE 0x80
+#define DCMPS_CHANGE_DONE 0x40
+#define DCMPS_ERROR 0x20
+#define DCMPS_CURRENT_PHASE 0x1F
#define CARD_CLK_SOURCE 0xFC2E
-
+#define CRC_FIX_CLK (0x00 << 0)
+#define CRC_VAR_CLK0 (0x01 << 0)
+#define CRC_VAR_CLK1 (0x02 << 0)
+#define SD30_FIX_CLK (0x00 << 2)
+#define SD30_VAR_CLK0 (0x01 << 2)
+#define SD30_VAR_CLK1 (0x02 << 2)
+#define SAMPLE_FIX_CLK (0x00 << 4)
+#define SAMPLE_VAR_CLK0 (0x01 << 4)
+#define SAMPLE_VAR_CLK1 (0x02 << 4)
#define CARD_PWR_CTL 0xFD50
+#define PMOS_STRG_MASK 0x10
+#define PMOS_STRG_800mA 0x10
+#define PMOS_STRG_400mA 0x00
+#define SD_POWER_OFF 0x03
+#define SD_PARTIAL_POWER_ON 0x01
+#define SD_POWER_ON 0x00
+#define SD_POWER_MASK 0x03
+#define MS_POWER_OFF 0x0C
+#define MS_PARTIAL_POWER_ON 0x04
+#define MS_POWER_ON 0x00
+#define MS_POWER_MASK 0x0C
+#define BPP_POWER_OFF 0x0F
+#define BPP_POWER_5_PERCENT_ON 0x0E
+#define BPP_POWER_10_PERCENT_ON 0x0C
+#define BPP_POWER_15_PERCENT_ON 0x08
+#define BPP_POWER_ON 0x00
+#define BPP_POWER_MASK 0x0F
+#define SD_VCC_PARTIAL_POWER_ON 0x02
+#define SD_VCC_POWER_ON 0x00
#define CARD_CLK_SWITCH 0xFD51
#define RTL8411B_PACKAGE_MODE 0xFD51
#define CARD_SHARE_MODE 0xFD52
+#define CARD_SHARE_MASK 0x0F
+#define CARD_SHARE_MULTI_LUN 0x00
+#define CARD_SHARE_NORMAL 0x00
+#define CARD_SHARE_48_SD 0x04
+#define CARD_SHARE_48_MS 0x08
+#define CARD_SHARE_BAROSSA_SD 0x01
+#define CARD_SHARE_BAROSSA_MS 0x02
#define CARD_DRIVE_SEL 0xFD53
+#define MS_DRIVE_8mA (0x01 << 6)
+#define MMC_DRIVE_8mA (0x01 << 4)
+#define XD_DRIVE_8mA (0x01 << 2)
+#define GPIO_DRIVE_8mA 0x01
+#define RTS5209_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | MMC_DRIVE_8mA |\
+ XD_DRIVE_8mA | GPIO_DRIVE_8mA)
+#define RTL8411_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | MMC_DRIVE_8mA |\
+ XD_DRIVE_8mA)
+#define RTSX_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | GPIO_DRIVE_8mA)
+
#define CARD_STOP 0xFD54
+#define SPI_STOP 0x01
+#define XD_STOP 0x02
+#define SD_STOP 0x04
+#define MS_STOP 0x08
+#define SPI_CLR_ERR 0x10
+#define XD_CLR_ERR 0x20
+#define SD_CLR_ERR 0x40
+#define MS_CLR_ERR 0x80
#define CARD_OE 0xFD55
+#define SD_OUTPUT_EN 0x04
+#define MS_OUTPUT_EN 0x08
#define CARD_AUTO_BLINK 0xFD56
#define CARD_GPIO_DIR 0xFD57
#define CARD_GPIO 0xFD58
#define CARD_DATA_SOURCE 0xFD5B
+#define PINGPONG_BUFFER 0x01
+#define RING_BUFFER 0x00
#define SD30_CLK_DRIVE_SEL 0xFD5A
+#define DRIVER_TYPE_A 0x05
+#define DRIVER_TYPE_B 0x03
+#define DRIVER_TYPE_C 0x02
+#define DRIVER_TYPE_D 0x01
#define CARD_SELECT 0xFD5C
+#define SD_MOD_SEL 2
+#define MS_MOD_SEL 3
#define SD30_DRIVE_SEL 0xFD5E
+#define CFG_DRIVER_TYPE_A 0x02
+#define CFG_DRIVER_TYPE_B 0x03
+#define CFG_DRIVER_TYPE_C 0x01
+#define CFG_DRIVER_TYPE_D 0x00
#define SD30_CMD_DRIVE_SEL 0xFD5E
#define SD30_DAT_DRIVE_SEL 0xFD5F
#define CARD_CLK_EN 0xFD69
+#define SD_CLK_EN 0x04
+#define MS_CLK_EN 0x08
#define SDIO_CTRL 0xFD6B
#define CD_PAD_CTL 0xFD73
-
+#define CD_DISABLE_MASK 0x07
+#define MS_CD_DISABLE 0x04
+#define SD_CD_DISABLE 0x02
+#define XD_CD_DISABLE 0x01
+#define CD_DISABLE 0x07
+#define CD_ENABLE 0x00
+#define MS_CD_EN_ONLY 0x03
+#define SD_CD_EN_ONLY 0x05
+#define XD_CD_EN_ONLY 0x06
+#define FORCE_CD_LOW_MASK 0x38
+#define FORCE_CD_XD_LOW 0x08
+#define FORCE_CD_SD_LOW 0x10
+#define FORCE_CD_MS_LOW 0x20
+#define CD_AUTO_DISABLE 0x40
#define FPDCTL 0xFC00
+#define SSC_POWER_DOWN 0x01
+#define SD_OC_POWER_DOWN 0x02
+#define ALL_POWER_DOWN 0x07
+#define OC_POWER_DOWN 0x06
#define PDINFO 0xFC01
#define CLK_CTL 0xFC02
+#define CHANGE_CLK 0x01
+#define CLK_LOW_FREQ 0x01
+
#define CLK_DIV 0xFC03
+#define CLK_DIV_1 0x01
+#define CLK_DIV_2 0x02
+#define CLK_DIV_4 0x03
+#define CLK_DIV_8 0x04
#define CLK_SEL 0xFC04
#define SSC_DIV_N_0 0xFC0F
#define SSC_DIV_N_1 0xFC10
#define SSC_CTL1 0xFC11
+#define SSC_RSTB 0x80
+#define SSC_8X_EN 0x40
+#define SSC_FIX_FRAC 0x20
+#define SSC_SEL_1M 0x00
+#define SSC_SEL_2M 0x08
+#define SSC_SEL_4M 0x10
+#define SSC_SEL_8M 0x18
#define SSC_CTL2 0xFC12
-
+#define SSC_DEPTH_MASK 0x07
+#define SSC_DEPTH_DISALBE 0x00
+#define SSC_DEPTH_4M 0x01
+#define SSC_DEPTH_2M 0x02
+#define SSC_DEPTH_1M 0x03
+#define SSC_DEPTH_500K 0x04
+#define SSC_DEPTH_250K 0x05
#define RCCTL 0xFC14
#define FPGA_PULL_CTL 0xFC1D
@@ -630,6 +492,24 @@
#define GPIO_CTL 0xFC1F
#define LDO_CTL 0xFC1E
+#define BPP_ASIC_1V7 0x00
+#define BPP_ASIC_1V8 0x01
+#define BPP_ASIC_1V9 0x02
+#define BPP_ASIC_2V0 0x03
+#define BPP_ASIC_2V7 0x04
+#define BPP_ASIC_2V8 0x05
+#define BPP_ASIC_3V2 0x06
+#define BPP_ASIC_3V3 0x07
+#define BPP_REG_TUNED18 0x07
+#define BPP_TUNED18_SHIFT_8402 5
+#define BPP_TUNED18_SHIFT_8411 4
+#define BPP_PAD_MASK 0x04
+#define BPP_PAD_3V3 0x04
+#define BPP_PAD_1V8 0x00
+#define BPP_LDO_POWB 0x03
+#define BPP_LDO_ON 0x00
+#define BPP_LDO_SUSPEND 0x02
+#define BPP_LDO_OFF 0x03
#define SYS_VER 0xFC32
#define CARD_PULL_CTL1 0xFD60
@@ -642,6 +522,10 @@
/* PCI Express Related Registers */
#define IRQEN0 0xFE20
#define IRQSTAT0 0xFE21
+#define DMA_DONE_INT 0x80
+#define SUSPEND_INT 0x40
+#define LINK_RDY_INT 0x20
+#define LINK_DOWN_INT 0x10
#define IRQEN1 0xFE22
#define IRQSTAT1 0xFE23
#define TLPRIEN 0xFE24
@@ -653,6 +537,16 @@
#define DMATC2 0xFE2A
#define DMATC3 0xFE2B
#define DMACTL 0xFE2C
+#define DMA_RST 0x80
+#define DMA_BUSY 0x04
+#define DMA_DIR_TO_CARD 0x00
+#define DMA_DIR_FROM_CARD 0x02
+#define DMA_EN 0x01
+#define DMA_128 (0 << 4)
+#define DMA_256 (1 << 4)
+#define DMA_512 (2 << 4)
+#define DMA_1024 (3 << 4)
+#define DMA_PACK_SIZE_MASK 0x30
#define BCTL 0xFE2D
#define RBBC0 0xFE2E
#define RBBC1 0xFE2F
@@ -678,14 +572,21 @@
#define MSGTXDATA2 0xFE46
#define MSGTXDATA3 0xFE47
#define MSGTXCTL 0xFE48
-#define PETXCFG 0xFE49
#define LTR_CTL 0xFE4A
#define OBFF_CFG 0xFE4C
#define CDRESUMECTL 0xFE52
#define WAKE_SEL_CTL 0xFE54
+#define PCLK_CTL 0xFE55
+#define PCLK_MODE_SEL 0x20
#define PME_FORCE_CTL 0xFE56
+
#define ASPM_FORCE_CTL 0xFE57
+#define FORCE_ASPM_CTL0 0x10
+#define FORCE_ASPM_VAL_MASK 0x03
+#define FORCE_ASPM_L1_EN 0x02
+#define FORCE_ASPM_L0_EN 0x01
+#define FORCE_ASPM_NO_ASPM 0x00
#define PM_CLK_FORCE_CTL 0xFE58
#define FUNC_FORCE_CTL 0xFE59
#define PERST_GLITCH_WIDTH 0xFE5C
@@ -693,19 +594,36 @@
#define RESET_LOAD_REG 0xFE5E
#define EFUSE_CONTENT 0xFE5F
#define HOST_SLEEP_STATE 0xFE60
-#define SDIO_CFG 0xFE70
+#define HOST_ENTER_S1 1
+#define HOST_ENTER_S3 2
+#define SDIO_CFG 0xFE70
+#define PM_EVENT_DEBUG 0xFE71
+#define PME_DEBUG_0 0x08
#define NFTS_TX_CTRL 0xFE72
#define PWR_GATE_CTRL 0xFE75
+#define PWR_GATE_EN 0x01
+#define LDO3318_PWR_MASK 0x06
+#define LDO_ON 0x00
+#define LDO_SUSPEND 0x04
+#define LDO_OFF 0x06
#define PWD_SUSPEND_EN 0xFE76
#define LDO_PWR_SEL 0xFE78
+#define L1SUB_CONFIG1 0xFE8D
+#define L1SUB_CONFIG2 0xFE8E
+#define L1SUB_AUTO_CFG 0x02
+#define L1SUB_CONFIG3 0xFE8F
+
#define DUMMY_REG_RESET_0 0xFE90
#define AUTOLOAD_CFG_BASE 0xFF00
+#define PETXCFG 0xFF03
#define PM_CTRL1 0xFF44
+#define CD_RESUME_EN_MASK 0xF0
+
#define PM_CTRL2 0xFF45
#define PM_CTRL3 0xFF46
#define SDIO_SEND_PME_EN 0x80
@@ -726,18 +644,125 @@
#define IMAGE_FLAG_ADDR0 0xCE80
#define IMAGE_FLAG_ADDR1 0xCE81
+#define RREF_CFG 0xFF6C
+#define RREF_VBGSEL_MASK 0x38
+#define RREF_VBGSEL_1V25 0x28
+
+#define OOBS_CONFIG 0xFF6E
+#define OOBS_AUTOK_DIS 0x80
+#define OOBS_VAL_MASK 0x1F
+
+#define LDO_DV18_CFG 0xFF70
+#define LDO_DV18_SR_MASK 0xC0
+#define LDO_DV18_SR_DF 0x40
+
+#define LDO_CONFIG2 0xFF71
+#define LDO_D3318_MASK 0x07
+#define LDO_D3318_33V 0x07
+#define LDO_D3318_18V 0x02
+
+#define LDO_VCC_CFG0 0xFF72
+#define LDO_VCC_LMTVTH_MASK 0x30
+#define LDO_VCC_LMTVTH_2A 0x10
+
+#define LDO_VCC_CFG1 0xFF73
+#define LDO_VCC_REF_TUNE_MASK 0x30
+#define LDO_VCC_REF_1V2 0x20
+#define LDO_VCC_TUNE_MASK 0x07
+#define LDO_VCC_1V8 0x04
+#define LDO_VCC_3V3 0x07
+#define LDO_VCC_LMT_EN 0x08
+
+#define LDO_VIO_CFG 0xFF75
+#define LDO_VIO_SR_MASK 0xC0
+#define LDO_VIO_SR_DF 0x40
+#define LDO_VIO_REF_TUNE_MASK 0x30
+#define LDO_VIO_REF_1V2 0x20
+#define LDO_VIO_TUNE_MASK 0x07
+#define LDO_VIO_1V7 0x03
+#define LDO_VIO_1V8 0x04
+#define LDO_VIO_3V3 0x07
+
+#define LDO_DV12S_CFG 0xFF76
+#define LDO_REF12_TUNE_MASK 0x18
+#define LDO_REF12_TUNE_DF 0x10
+#define LDO_D12_TUNE_MASK 0x07
+#define LDO_D12_TUNE_DF 0x04
+
+#define LDO_AV12S_CFG 0xFF77
+#define LDO_AV12S_TUNE_MASK 0x07
+#define LDO_AV12S_TUNE_DF 0x04
+
+#define SD40_LDO_CTL1 0xFE7D
+#define SD40_VIO_TUNE_MASK 0x70
+#define SD40_VIO_TUNE_1V7 0x30
+#define SD_VIO_LDO_1V8 0x40
+#define SD_VIO_LDO_3V3 0x70
+
/* Phy register */
#define PHY_PCR 0x00
+#define PHY_PCR_FORCE_CODE 0xB000
+#define PHY_PCR_OOBS_CALI_50 0x0800
+#define PHY_PCR_OOBS_VCM_08 0x0200
+#define PHY_PCR_OOBS_SEN_90 0x0040
+#define PHY_PCR_RSSI_EN 0x0002
+#define PHY_PCR_RX10K 0x0001
+
#define PHY_RCR0 0x01
#define PHY_RCR1 0x02
+#define PHY_RCR1_ADP_TIME_4 0x0400
+#define PHY_RCR1_VCO_COARSE 0x001F
+#define PHY_SSCCR2 0x02
+#define PHY_SSCCR2_PLL_NCODE 0x0A00
+#define PHY_SSCCR2_TIME0 0x001C
+#define PHY_SSCCR2_TIME2_WIDTH 0x0003
+
#define PHY_RCR2 0x03
+#define PHY_RCR2_EMPHASE_EN 0x8000
+#define PHY_RCR2_NADJR 0x4000
+#define PHY_RCR2_CDR_SR_2 0x0100
+#define PHY_RCR2_FREQSEL_12 0x0040
+#define PHY_RCR2_CDR_SC_12P 0x0010
+#define PHY_RCR2_CALIB_LATE 0x0002
+#define PHY_SSCCR3 0x03
+#define PHY_SSCCR3_STEP_IN 0x2740
+#define PHY_SSCCR3_CHECK_DELAY 0x0008
+#define _PHY_ANA03 0x03
+#define _PHY_ANA03_TIMER_MAX 0x2700
+#define _PHY_ANA03_OOBS_DEB_EN 0x0040
+#define _PHY_CMU_DEBUG_EN 0x0008
+
#define PHY_RTCR 0x04
#define PHY_RDR 0x05
+#define PHY_RDR_RXDSEL_1_9 0x4000
+#define PHY_SSC_AUTO_PWD 0x0600
#define PHY_TCR0 0x06
#define PHY_TCR1 0x07
#define PHY_TUNE 0x08
+#define PHY_TUNE_TUNEREF_1_0 0x4000
+#define PHY_TUNE_VBGSEL_1252 0x0C00
+#define PHY_TUNE_SDBUS_33 0x0200
+#define PHY_TUNE_TUNED18 0x01C0
+#define PHY_TUNE_TUNED12 0X0020
+#define PHY_TUNE_TUNEA12 0x0004
+#define PHY_TUNE_VOLTAGE_MASK 0xFC3F
+#define PHY_TUNE_VOLTAGE_3V3 0x03C0
+#define PHY_TUNE_D18_1V8 0x0100
+#define PHY_TUNE_D18_1V7 0x0080
+#define PHY_ANA08 0x08
+#define PHY_ANA08_RX_EQ_DCGAIN 0x5000
+#define PHY_ANA08_SEL_RX_EN 0x0400
+#define PHY_ANA08_RX_EQ_VAL 0x03C0
+#define PHY_ANA08_SCP 0x0020
+#define PHY_ANA08_SEL_IPI 0x0004
+
#define PHY_IMR 0x09
#define PHY_BPCR 0x0A
+#define PHY_BPCR_IBRXSEL 0x0400
+#define PHY_BPCR_IBTXSEL 0x0100
+#define PHY_BPCR_IB_FILTER 0x0080
+#define PHY_BPCR_CMIRROR_EN 0x0040
+
#define PHY_BIST 0x0B
#define PHY_RAW_L 0x0C
#define PHY_RAW_H 0x0D
@@ -745,6 +770,7 @@
#define PHY_HOST_CLK_CTRL 0x0F
#define PHY_DMR 0x10
#define PHY_BACR 0x11
+#define PHY_BACR_BASIC_MASK 0xFFF3
#define PHY_IER 0x12
#define PHY_BCSR 0x13
#define PHY_BPR 0x14
@@ -752,80 +778,70 @@
#define PHY_BPNR 0x16
#define PHY_BRNR2 0x17
#define PHY_BENR 0x18
-#define PHY_REG_REV 0x19
+#define PHY_REV 0x19
+#define PHY_REV_RESV 0xE000
+#define PHY_REV_RXIDLE_LATCHED 0x1000
+#define PHY_REV_P1_EN 0x0800
+#define PHY_REV_RXIDLE_EN 0x0400
+#define PHY_REV_CLKREQ_TX_EN 0x0200
+#define PHY_REV_CLKREQ_RX_EN 0x0100
+#define PHY_REV_CLKREQ_DT_1_0 0x0040
+#define PHY_REV_STOP_CLKRD 0x0020
+#define PHY_REV_RX_PWST 0x0008
+#define PHY_REV_STOP_CLKWR 0x0004
+#define _PHY_REV0 0x19
+#define _PHY_REV0_FILTER_OUT 0x3800
+#define _PHY_REV0_CDR_BYPASS_PFD 0x0100
+#define _PHY_REV0_CDR_RX_IDLE_BYPASS 0x0002
+
#define PHY_FLD0 0x1A
+#define PHY_ANA1A 0x1A
+#define PHY_ANA1A_TXR_LOOPBACK 0x2000
+#define PHY_ANA1A_RXT_BIST 0x0500
+#define PHY_ANA1A_TXR_BIST 0x0040
+#define PHY_ANA1A_REV 0x0006
#define PHY_FLD1 0x1B
#define PHY_FLD2 0x1C
#define PHY_FLD3 0x1D
+#define PHY_FLD3_TIMER_4 0x0800
+#define PHY_FLD3_TIMER_6 0x0020
+#define PHY_FLD3_RXDELINK 0x0004
+#define PHY_ANA1D 0x1D
+#define PHY_ANA1D_DEBUG_ADDR 0x0004
+#define _PHY_FLD0 0x1D
+#define _PHY_FLD0_CLK_REQ_20C 0x8000
+#define _PHY_FLD0_RX_IDLE_EN 0x1000
+#define _PHY_FLD0_BIT_ERR_RSTN 0x0800
+#define _PHY_FLD0_BER_COUNT 0x01E0
+#define _PHY_FLD0_BER_TIMER 0x001E
+#define _PHY_FLD0_CHECK_EN 0x0001
+
#define PHY_FLD4 0x1E
+#define PHY_FLD4_FLDEN_SEL 0x4000
+#define PHY_FLD4_REQ_REF 0x2000
+#define PHY_FLD4_RXAMP_OFF 0x1000
+#define PHY_FLD4_REQ_ADDA 0x0800
+#define PHY_FLD4_BER_COUNT 0x00E0
+#define PHY_FLD4_BER_TIMER 0x000A
+#define PHY_FLD4_BER_CHK_EN 0x0001
+#define PHY_DIG1E 0x1E
+#define PHY_DIG1E_REV 0x4000
+#define PHY_DIG1E_D0_X_D1 0x1000
+#define PHY_DIG1E_RX_ON_HOST 0x0800
+#define PHY_DIG1E_RCLK_REF_HOST 0x0400
+#define PHY_DIG1E_RCLK_TX_EN_KEEP 0x0040
+#define PHY_DIG1E_RCLK_TX_TERM_KEEP 0x0020
+#define PHY_DIG1E_RCLK_RX_EIDLE_ON 0x0010
+#define PHY_DIG1E_TX_TERM_KEEP 0x0008
+#define PHY_DIG1E_RX_TERM_KEEP 0x0004
+#define PHY_DIG1E_TX_EN_KEEP 0x0002
+#define PHY_DIG1E_RX_EN_KEEP 0x0001
#define PHY_DUM_REG 0x1F
-#define LCTLR 0x80
-#define LCTLR_EXT_SYNC 0x80
-#define LCTLR_COMMON_CLOCK_CFG 0x40
-#define LCTLR_RETRAIN_LINK 0x20
-#define LCTLR_LINK_DISABLE 0x10
-#define LCTLR_RCB 0x08
-#define LCTLR_RESERVED 0x04
-#define LCTLR_ASPM_CTL_MASK 0x03
-
#define PCR_SETTING_REG1 0x724
#define PCR_SETTING_REG2 0x814
#define PCR_SETTING_REG3 0x747
-/* Phy bits */
-#define PHY_PCR_FORCE_CODE 0xB000
-#define PHY_PCR_OOBS_CALI_50 0x0800
-#define PHY_PCR_OOBS_VCM_08 0x0200
-#define PHY_PCR_OOBS_SEN_90 0x0040
-#define PHY_PCR_RSSI_EN 0x0002
-
-#define PHY_RCR1_ADP_TIME 0x0100
-#define PHY_RCR1_VCO_COARSE 0x001F
-
-#define PHY_RCR2_EMPHASE_EN 0x8000
-#define PHY_RCR2_NADJR 0x4000
-#define PHY_RCR2_CDR_CP_10 0x0400
-#define PHY_RCR2_CDR_SR_2 0x0100
-#define PHY_RCR2_FREQSEL_12 0x0040
-#define PHY_RCR2_CPADJEN 0x0020
-#define PHY_RCR2_CDR_SC_8 0x0008
-#define PHY_RCR2_CALIB_LATE 0x0002
-
-#define PHY_RDR_RXDSEL_1_9 0x4000
-
-#define PHY_TUNE_TUNEREF_1_0 0x4000
-#define PHY_TUNE_VBGSEL_1252 0x0C00
-#define PHY_TUNE_SDBUS_33 0x0200
-#define PHY_TUNE_TUNED18 0x01C0
-#define PHY_TUNE_TUNED12 0X0020
-
-#define PHY_BPCR_IBRXSEL 0x0400
-#define PHY_BPCR_IBTXSEL 0x0100
-#define PHY_BPCR_IB_FILTER 0x0080
-#define PHY_BPCR_CMIRROR_EN 0x0040
-
-#define PHY_REG_REV_RESV 0xE000
-#define PHY_REG_REV_RXIDLE_LATCHED 0x1000
-#define PHY_REG_REV_P1_EN 0x0800
-#define PHY_REG_REV_RXIDLE_EN 0x0400
-#define PHY_REG_REV_CLKREQ_DLY_TIMER_1_0 0x0040
-#define PHY_REG_REV_STOP_CLKRD 0x0020
-#define PHY_REG_REV_RX_PWST 0x0008
-#define PHY_REG_REV_STOP_CLKWR 0x0004
-
-#define PHY_FLD3_TIMER_4 0x7800
-#define PHY_FLD3_TIMER_6 0x00E0
-#define PHY_FLD3_RXDELINK 0x0004
-
-#define PHY_FLD4_FLDEN_SEL 0x4000
-#define PHY_FLD4_REQ_REF 0x2000
-#define PHY_FLD4_RXAMP_OFF 0x1000
-#define PHY_FLD4_REQ_ADDA 0x0800
-#define PHY_FLD4_BER_COUNT 0x00E0
-#define PHY_FLD4_BER_TIMER 0x000A
-#define PHY_FLD4_BER_CHK_EN 0x0001
-
#define rtsx_pci_init_cmd(pcr) ((pcr)->ci = 0)
struct rtsx_pcr;
@@ -835,6 +851,8 @@ struct pcr_handle {
};
struct pcr_ops {
+ int (*write_phy)(struct rtsx_pcr *pcr, u8 addr, u16 val);
+ int (*read_phy)(struct rtsx_pcr *pcr, u8 addr, u16 *val);
int (*extra_init_hw)(struct rtsx_pcr *pcr);
int (*optimize_phy)(struct rtsx_pcr *pcr);
int (*turn_on_led)(struct rtsx_pcr *pcr);
@@ -856,6 +874,7 @@ enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
struct rtsx_pcr {
struct pci_dev *pci;
unsigned int id;
+ int pcie_cap;
/* pci resources */
unsigned long addr;
@@ -928,6 +947,8 @@ struct rtsx_pcr {
const struct pcr_ops *ops;
enum PDEV_STAT state;
+ u16 reg_pm_ctrl3;
+
int num_slots;
struct rtsx_slot *slots;
};
@@ -935,6 +956,10 @@ struct rtsx_pcr {
#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid))
#define PCI_VID(pcr) ((pcr)->pci->vendor)
#define PCI_PID(pcr) ((pcr)->pci->device)
+#define is_version(pcr, pid, ver) \
+ (CHK_PCI_PID(pcr, pid) && (pcr)->ic_version == (ver))
+#define pcr_dbg(pcr, fmt, arg...) \
+ dev_dbg(&(pcr)->pci->dev, fmt, ##arg)
#define SDR104_PHASE(val) ((val) & 0xFF)
#define SDR50_PHASE(val) (((val) >> 8) & 0xFF)
@@ -1004,4 +1029,17 @@ static inline void rtsx_pci_write_be32(struct rtsx_pcr *pcr, u16 reg, u32 val)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 3, 0xFF, val);
}
+static inline int rtsx_pci_update_phy(struct rtsx_pcr *pcr, u8 addr,
+ u16 mask, u16 append)
+{
+ int err;
+ u16 val;
+
+ err = rtsx_pci_read_phy_register(pcr, addr, &val);
+ if (err < 0)
+ return err;
+
+ return rtsx_pci_write_phy_register(pcr, addr, (val & mask) | append);
+}
+
#endif
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index 3fdb7cfb..7511538 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -58,13 +58,7 @@ enum sec_device_type {
* @irq_base: Base IRQ number for device, required for IRQs
* @irq: Generic IRQ number for device
* @irq_data: Runtime data structure for IRQ controller
- * @ono: Power onoff IRQ number for s5m87xx
* @wakeup: Whether or not this is a wakeup device
- * @wtsr_smpl: Whether or not to enable in RTC driver the Watchdog
- * Timer Software Reset (registers set to default value
- * after PWRHOLD falling) and Sudden Momentary Power Loss
- * (PMIC will enter power on sequence after short drop in
- * VBATT voltage).
*/
struct sec_pmic_dev {
struct device *dev;
@@ -77,9 +71,7 @@ struct sec_pmic_dev {
int irq;
struct regmap_irq_chip_data *irq_data;
- int ono;
bool wakeup;
- bool wtsr_smpl;
};
int sec_irq_init(struct sec_pmic_dev *sec_pmic);
@@ -95,7 +87,6 @@ struct sec_platform_data {
int irq_base;
int (*cfg_pmic_irq)(void);
- int ono;
bool wakeup;
bool buck_voltage_lock;
diff --git a/include/linux/mfd/samsung/irq.h b/include/linux/mfd/samsung/irq.h
index f35af73..667aa40 100644
--- a/include/linux/mfd/samsung/irq.h
+++ b/include/linux/mfd/samsung/irq.h
@@ -74,8 +74,8 @@ enum s2mps11_irq {
S2MPS11_IRQ_MRB,
S2MPS11_IRQ_RTC60S,
- S2MPS11_IRQ_RTCA0,
S2MPS11_IRQ_RTCA1,
+ S2MPS11_IRQ_RTCA0,
S2MPS11_IRQ_SMPL,
S2MPS11_IRQ_RTC1S,
S2MPS11_IRQ_WTSR,
diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h
index b6401e7..29c30ac 100644
--- a/include/linux/mfd/samsung/rtc.h
+++ b/include/linux/mfd/samsung/rtc.h
@@ -105,6 +105,8 @@ enum s2mps_rtc_reg {
#define S5M_RTC_UDR_MASK (1 << S5M_RTC_UDR_SHIFT)
#define S2MPS_RTC_WUDR_SHIFT 4
#define S2MPS_RTC_WUDR_MASK (1 << S2MPS_RTC_WUDR_SHIFT)
+#define S2MPS13_RTC_AUDR_SHIFT 1
+#define S2MPS13_RTC_AUDR_MASK (1 << S2MPS13_RTC_AUDR_SHIFT)
#define S2MPS_RTC_RUDR_SHIFT 0
#define S2MPS_RTC_RUDR_MASK (1 << S2MPS_RTC_RUDR_SHIFT)
#define RTC_TCON_SHIFT 1
diff --git a/include/linux/mfd/sky81452.h b/include/linux/mfd/sky81452.h
new file mode 100644
index 0000000..b0925fa
--- /dev/null
+++ b/include/linux/mfd/sky81452.h
@@ -0,0 +1,31 @@
+/*
+ * sky81452.h SKY81452 MFD driver
+ *
+ * Copyright 2014 Skyworks Solutions Inc.
+ * Author : Gyungoh Yoo <jack.yoo@skyworksinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _SKY81452_H
+#define _SKY81452_H
+
+#include <linux/platform_data/sky81452-backlight.h>
+#include <linux/regulator/machine.h>
+
+struct sky81452_platform_data {
+ struct sky81452_bl_platform_data *bl_pdata;
+ struct regulator_init_data *regulator_init_data;
+};
+
+#endif
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index f742b67..cb83883 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -118,61 +118,6 @@ extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks);
#define STMPE_GPIO_NOREQ_811_TOUCH (0xf0)
/**
- * struct stmpe_gpio_platform_data - STMPE GPIO platform data
- * @norequest_mask: bitmask specifying which GPIOs should _not_ be
- * requestable due to different usage (e.g. touch, keypad)
- * STMPE_GPIO_NOREQ_* macros can be used here.
- * @setup: board specific setup callback.
- * @remove: board specific remove callback
- */
-struct stmpe_gpio_platform_data {
- unsigned norequest_mask;
- void (*setup)(struct stmpe *stmpe, unsigned gpio_base);
- void (*remove)(struct stmpe *stmpe, unsigned gpio_base);
-};
-
-/**
- * struct stmpe_ts_platform_data - stmpe811 touch screen controller platform
- * data
- * @sample_time: ADC converstion time in number of clock.
- * (0 -> 36 clocks, 1 -> 44 clocks, 2 -> 56 clocks, 3 -> 64 clocks,
- * 4 -> 80 clocks, 5 -> 96 clocks, 6 -> 144 clocks),
- * recommended is 4.
- * @mod_12b: ADC Bit mode (0 -> 10bit ADC, 1 -> 12bit ADC)
- * @ref_sel: ADC reference source
- * (0 -> internal reference, 1 -> external reference)
- * @adc_freq: ADC Clock speed
- * (0 -> 1.625 MHz, 1 -> 3.25 MHz, 2 || 3 -> 6.5 MHz)
- * @ave_ctrl: Sample average control
- * (0 -> 1 sample, 1 -> 2 samples, 2 -> 4 samples, 3 -> 8 samples)
- * @touch_det_delay: Touch detect interrupt delay
- * (0 -> 10 us, 1 -> 50 us, 2 -> 100 us, 3 -> 500 us,
- * 4-> 1 ms, 5 -> 5 ms, 6 -> 10 ms, 7 -> 50 ms)
- * recommended is 3
- * @settling: Panel driver settling time
- * (0 -> 10 us, 1 -> 100 us, 2 -> 500 us, 3 -> 1 ms,
- * 4 -> 5 ms, 5 -> 10 ms, 6 for 50 ms, 7 -> 100 ms)
- * recommended is 2
- * @fraction_z: Length of the fractional part in z
- * (fraction_z ([0..7]) = Count of the fractional part)
- * recommended is 7
- * @i_drive: current limit value of the touchscreen drivers
- * (0 -> 20 mA typical 35 mA max, 1 -> 50 mA typical 80 mA max)
- *
- * */
-struct stmpe_ts_platform_data {
- u8 sample_time;
- u8 mod_12b;
- u8 ref_sel;
- u8 adc_freq;
- u8 ave_ctrl;
- u8 touch_det_delay;
- u8 settling;
- u8 fraction_z;
- u8 i_drive;
-};
-
-/**
* struct stmpe_platform_data - STMPE platform data
* @id: device id to distinguish between multiple STMPEs on the same board
* @blocks: bitmask of blocks to enable (use STMPE_BLOCK_*)
@@ -182,8 +127,6 @@ struct stmpe_ts_platform_data {
* @irq_over_gpio: true if gpio is used to get irq
* @irq_gpio: gpio number over which irq will be requested (significant only if
* irq_over_gpio is true)
- * @gpio: GPIO-specific platform data
- * @ts: touchscreen-specific platform data
*/
struct stmpe_platform_data {
int id;
@@ -193,9 +136,6 @@ struct stmpe_platform_data {
bool irq_over_gpio;
int irq_gpio;
int autosleep_timeout;
-
- struct stmpe_gpio_platform_data *gpio;
- struct stmpe_ts_platform_data *ts;
};
#endif
diff --git a/include/linux/mfd/stw481x.h b/include/linux/mfd/stw481x.h
index eda1215..833074b 100644
--- a/include/linux/mfd/stw481x.h
+++ b/include/linux/mfd/stw481x.h
@@ -41,15 +41,11 @@
/**
* struct stw481x - state holder for the Stw481x drivers
- * @mutex: mutex to serialize I2C accesses
* @i2c_client: corresponding I2C client
- * @regulator: regulator device for regulator children
* @map: regmap handle to access device registers
*/
struct stw481x {
- struct mutex lock;
struct i2c_client *client;
- struct regulator_dev *vmmc_regulator;
struct regmap *map;
};
diff --git a/include/linux/mfd/syscon/atmel-matrix.h b/include/linux/mfd/syscon/atmel-matrix.h
new file mode 100644
index 0000000..8293c3e
--- /dev/null
+++ b/include/linux/mfd/syscon/atmel-matrix.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2014 Atmel Corporation.
+ *
+ * Memory Controllers (MATRIX, EBI) - System peripherals registers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _LINUX_MFD_SYSCON_ATMEL_MATRIX_H
+#define _LINUX_MFD_SYSCON_ATMEL_MATRIX_H
+
+#define AT91SAM9260_MATRIX_MCFG 0x00
+#define AT91SAM9260_MATRIX_SCFG 0x40
+#define AT91SAM9260_MATRIX_PRS 0x80
+#define AT91SAM9260_MATRIX_MRCR 0x100
+#define AT91SAM9260_MATRIX_EBICSA 0x11c
+
+#define AT91SAM9261_MATRIX_MRCR 0x0
+#define AT91SAM9261_MATRIX_SCFG 0x4
+#define AT91SAM9261_MATRIX_TCR 0x24
+#define AT91SAM9261_MATRIX_EBICSA 0x30
+#define AT91SAM9261_MATRIX_USBPUCR 0x34
+
+#define AT91SAM9263_MATRIX_MCFG 0x00
+#define AT91SAM9263_MATRIX_SCFG 0x40
+#define AT91SAM9263_MATRIX_PRS 0x80
+#define AT91SAM9263_MATRIX_MRCR 0x100
+#define AT91SAM9263_MATRIX_TCR 0x114
+#define AT91SAM9263_MATRIX_EBI0CSA 0x120
+#define AT91SAM9263_MATRIX_EBI1CSA 0x124
+
+#define AT91SAM9RL_MATRIX_MCFG 0x00
+#define AT91SAM9RL_MATRIX_SCFG 0x40
+#define AT91SAM9RL_MATRIX_PRS 0x80
+#define AT91SAM9RL_MATRIX_MRCR 0x100
+#define AT91SAM9RL_MATRIX_TCR 0x114
+#define AT91SAM9RL_MATRIX_EBICSA 0x120
+
+#define AT91SAM9G45_MATRIX_MCFG 0x00
+#define AT91SAM9G45_MATRIX_SCFG 0x40
+#define AT91SAM9G45_MATRIX_PRS 0x80
+#define AT91SAM9G45_MATRIX_MRCR 0x100
+#define AT91SAM9G45_MATRIX_TCR 0x110
+#define AT91SAM9G45_MATRIX_DDRMPR 0x118
+#define AT91SAM9G45_MATRIX_EBICSA 0x128
+
+#define AT91SAM9N12_MATRIX_MCFG 0x00
+#define AT91SAM9N12_MATRIX_SCFG 0x40
+#define AT91SAM9N12_MATRIX_PRS 0x80
+#define AT91SAM9N12_MATRIX_MRCR 0x100
+#define AT91SAM9N12_MATRIX_EBICSA 0x118
+
+#define AT91SAM9X5_MATRIX_MCFG 0x00
+#define AT91SAM9X5_MATRIX_SCFG 0x40
+#define AT91SAM9X5_MATRIX_PRS 0x80
+#define AT91SAM9X5_MATRIX_MRCR 0x100
+#define AT91SAM9X5_MATRIX_EBICSA 0x120
+
+#define SAMA5D3_MATRIX_MCFG 0x00
+#define SAMA5D3_MATRIX_SCFG 0x40
+#define SAMA5D3_MATRIX_PRS 0x80
+#define SAMA5D3_MATRIX_MRCR 0x100
+
+#define AT91_MATRIX_MCFG(o, x) ((o) + ((x) * 0x4))
+#define AT91_MATRIX_ULBT GENMASK(2, 0)
+#define AT91_MATRIX_ULBT_INFINITE (0 << 0)
+#define AT91_MATRIX_ULBT_SINGLE (1 << 0)
+#define AT91_MATRIX_ULBT_FOUR (2 << 0)
+#define AT91_MATRIX_ULBT_EIGHT (3 << 0)
+#define AT91_MATRIX_ULBT_SIXTEEN (4 << 0)
+
+#define AT91_MATRIX_SCFG(o, x) ((o) + ((x) * 0x4))
+#define AT91_MATRIX_SLOT_CYCLE GENMASK(7, 0)
+#define AT91_MATRIX_DEFMSTR_TYPE GENMASK(17, 16)
+#define AT91_MATRIX_DEFMSTR_TYPE_NONE (0 << 16)
+#define AT91_MATRIX_DEFMSTR_TYPE_LAST (1 << 16)
+#define AT91_MATRIX_DEFMSTR_TYPE_FIXED (2 << 16)
+#define AT91_MATRIX_FIXED_DEFMSTR GENMASK(20, 18)
+#define AT91_MATRIX_ARBT GENMASK(25, 24)
+#define AT91_MATRIX_ARBT_ROUND_ROBIN (0 << 24)
+#define AT91_MATRIX_ARBT_FIXED_PRIORITY (1 << 24)
+
+#define AT91_MATRIX_ITCM_SIZE GENMASK(3, 0)
+#define AT91_MATRIX_ITCM_0 (0 << 0)
+#define AT91_MATRIX_ITCM_16 (5 << 0)
+#define AT91_MATRIX_ITCM_32 (6 << 0)
+#define AT91_MATRIX_ITCM_64 (7 << 0)
+#define AT91_MATRIX_DTCM_SIZE GENMASK(7, 4)
+#define AT91_MATRIX_DTCM_0 (0 << 4)
+#define AT91_MATRIX_DTCM_16 (5 << 4)
+#define AT91_MATRIX_DTCM_32 (6 << 4)
+#define AT91_MATRIX_DTCM_64 (7 << 4)
+
+#define AT91_MATRIX_PRAS(o, x) ((o) + ((x) * 0x8))
+#define AT91_MATRIX_PRBS(o, x) ((o) + ((x) * 0x8) + 0x4)
+#define AT91_MATRIX_MPR(x) GENMASK(((x) * 0x4) + 1, ((x) * 0x4))
+
+#define AT91_MATRIX_RCB(x) BIT(x)
+
+#define AT91_MATRIX_CSA(cs, val) (val << (cs))
+#define AT91_MATRIX_DBPUC BIT(8)
+#define AT91_MATRIX_DBPDC BIT(9)
+#define AT91_MATRIX_VDDIOMSEL BIT(16)
+#define AT91_MATRIX_VDDIOMSEL_1_8V (0 << 16)
+#define AT91_MATRIX_VDDIOMSEL_3_3V (1 << 16)
+#define AT91_MATRIX_EBI_IOSR BIT(17)
+#define AT91_MATRIX_DDR_IOSR BIT(18)
+#define AT91_MATRIX_NFD0_SELECT BIT(24)
+#define AT91_MATRIX_DDR_MP_EN BIT(25)
+#define AT91_MATRIX_EBI_NUM_CS 8
+
+#define AT91_MATRIX_USBPUCR_PUON BIT(30)
+
+#endif /* _LINUX_MFD_SYSCON_ATMEL_MATRIX_H */
diff --git a/include/linux/mfd/syscon/atmel-mc.h b/include/linux/mfd/syscon/atmel-mc.h
new file mode 100644
index 0000000..afd9b8f
--- /dev/null
+++ b/include/linux/mfd/syscon/atmel-mc.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2005 Ivan Kokshaysky
+ * Copyright (C) SAN People
+ *
+ * Memory Controllers (MC, EBI, SMC, SDRAMC, BFC) - System peripherals
+ * registers.
+ * Based on AT91RM9200 datasheet revision E.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _LINUX_MFD_SYSCON_ATMEL_MC_H_
+#define _LINUX_MFD_SYSCON_ATMEL_MC_H_
+
+/* Memory Controller */
+#define AT91_MC_RCR 0x00
+#define AT91_MC_RCB BIT(0)
+
+#define AT91_MC_ASR 0x04
+#define AT91_MC_UNADD BIT(0)
+#define AT91_MC_MISADD BIT(1)
+#define AT91_MC_ABTSZ GENMASK(9, 8)
+#define AT91_MC_ABTSZ_BYTE (0 << 8)
+#define AT91_MC_ABTSZ_HALFWORD (1 << 8)
+#define AT91_MC_ABTSZ_WORD (2 << 8)
+#define AT91_MC_ABTTYP GENMASK(11, 10)
+#define AT91_MC_ABTTYP_DATAREAD (0 << 10)
+#define AT91_MC_ABTTYP_DATAWRITE (1 << 10)
+#define AT91_MC_ABTTYP_FETCH (2 << 10)
+#define AT91_MC_MST(n) BIT(16 + (n))
+#define AT91_MC_SVMST(n) BIT(24 + (n))
+
+#define AT91_MC_AASR 0x08
+
+#define AT91_MC_MPR 0x0c
+#define AT91_MPR_MSTP(n) GENMASK(2 + ((x) * 4), ((x) * 4))
+
+/* External Bus Interface (EBI) registers */
+#define AT91_MC_EBI_CSA 0x60
+#define AT91_MC_EBI_CS(n) BIT(x)
+#define AT91_MC_EBI_NUM_CS 8
+
+#define AT91_MC_EBI_CFGR 0x64
+#define AT91_MC_EBI_DBPUC BIT(0)
+
+/* Static Memory Controller (SMC) registers */
+#define AT91_MC_SMC_CSR(n) (0x70 + ((n) * 4))
+#define AT91_MC_SMC_NWS GENMASK(6, 0)
+#define AT91_MC_SMC_NWS_(x) ((x) << 0)
+#define AT91_MC_SMC_WSEN BIT(7)
+#define AT91_MC_SMC_TDF GENMASK(11, 8)
+#define AT91_MC_SMC_TDF_(x) ((x) << 8)
+#define AT91_MC_SMC_TDF_MAX 0xf
+#define AT91_MC_SMC_BAT BIT(12)
+#define AT91_MC_SMC_DBW GENMASK(14, 13)
+#define AT91_MC_SMC_DBW_16 (1 << 13)
+#define AT91_MC_SMC_DBW_8 (2 << 13)
+#define AT91_MC_SMC_DPR BIT(15)
+#define AT91_MC_SMC_ACSS GENMASK(17, 16)
+#define AT91_MC_SMC_ACSS_(x) ((x) << 16)
+#define AT91_MC_SMC_ACSS_MAX 3
+#define AT91_MC_SMC_RWSETUP GENMASK(26, 24)
+#define AT91_MC_SMC_RWSETUP_(x) ((x) << 24)
+#define AT91_MC_SMC_RWHOLD GENMASK(30, 28)
+#define AT91_MC_SMC_RWHOLD_(x) ((x) << 28)
+#define AT91_MC_SMC_RWHOLDSETUP_MAX 7
+
+/* SDRAM Controller registers */
+#define AT91_MC_SDRAMC_MR 0x90
+#define AT91_MC_SDRAMC_MODE GENMASK(3, 0)
+#define AT91_MC_SDRAMC_MODE_NORMAL (0 << 0)
+#define AT91_MC_SDRAMC_MODE_NOP (1 << 0)
+#define AT91_MC_SDRAMC_MODE_PRECHARGE (2 << 0)
+#define AT91_MC_SDRAMC_MODE_LMR (3 << 0)
+#define AT91_MC_SDRAMC_MODE_REFRESH (4 << 0)
+#define AT91_MC_SDRAMC_DBW_16 BIT(4)
+
+#define AT91_MC_SDRAMC_TR 0x94
+#define AT91_MC_SDRAMC_COUNT GENMASK(11, 0)
+
+#define AT91_MC_SDRAMC_CR 0x98
+#define AT91_MC_SDRAMC_NC GENMASK(1, 0)
+#define AT91_MC_SDRAMC_NC_8 (0 << 0)
+#define AT91_MC_SDRAMC_NC_9 (1 << 0)
+#define AT91_MC_SDRAMC_NC_10 (2 << 0)
+#define AT91_MC_SDRAMC_NC_11 (3 << 0)
+#define AT91_MC_SDRAMC_NR GENMASK(3, 2)
+#define AT91_MC_SDRAMC_NR_11 (0 << 2)
+#define AT91_MC_SDRAMC_NR_12 (1 << 2)
+#define AT91_MC_SDRAMC_NR_13 (2 << 2)
+#define AT91_MC_SDRAMC_NB BIT(4)
+#define AT91_MC_SDRAMC_NB_2 (0 << 4)
+#define AT91_MC_SDRAMC_NB_4 (1 << 4)
+#define AT91_MC_SDRAMC_CAS GENMASK(6, 5)
+#define AT91_MC_SDRAMC_CAS_2 (2 << 5)
+#define AT91_MC_SDRAMC_TWR GENMASK(10, 7)
+#define AT91_MC_SDRAMC_TRC GENMASK(14, 11)
+#define AT91_MC_SDRAMC_TRP GENMASK(18, 15)
+#define AT91_MC_SDRAMC_TRCD GENMASK(22, 19)
+#define AT91_MC_SDRAMC_TRAS GENMASK(26, 23)
+#define AT91_MC_SDRAMC_TXSR GENMASK(30, 27)
+
+#define AT91_MC_SDRAMC_SRR 0x9c
+#define AT91_MC_SDRAMC_SRCB BIT(0)
+
+#define AT91_MC_SDRAMC_LPR 0xa0
+#define AT91_MC_SDRAMC_LPCB BIT(0)
+
+#define AT91_MC_SDRAMC_IER 0xa4
+#define AT91_MC_SDRAMC_IDR 0xa8
+#define AT91_MC_SDRAMC_IMR 0xac
+#define AT91_MC_SDRAMC_ISR 0xb0
+#define AT91_MC_SDRAMC_RES BIT(0)
+
+/* Burst Flash Controller register */
+#define AT91_MC_BFC_MR 0xc0
+#define AT91_MC_BFC_BFCOM GENMASK(1, 0)
+#define AT91_MC_BFC_BFCOM_DISABLED (0 << 0)
+#define AT91_MC_BFC_BFCOM_ASYNC (1 << 0)
+#define AT91_MC_BFC_BFCOM_BURST (2 << 0)
+#define AT91_MC_BFC_BFCC GENMASK(3, 2)
+#define AT91_MC_BFC_BFCC_MCK (1 << 2)
+#define AT91_MC_BFC_BFCC_DIV2 (2 << 2)
+#define AT91_MC_BFC_BFCC_DIV4 (3 << 2)
+#define AT91_MC_BFC_AVL GENMASK(7, 4)
+#define AT91_MC_BFC_PAGES GENMASK(10, 8)
+#define AT91_MC_BFC_PAGES_NO_PAGE (0 << 8)
+#define AT91_MC_BFC_PAGES_16 (1 << 8)
+#define AT91_MC_BFC_PAGES_32 (2 << 8)
+#define AT91_MC_BFC_PAGES_64 (3 << 8)
+#define AT91_MC_BFC_PAGES_128 (4 << 8)
+#define AT91_MC_BFC_PAGES_256 (5 << 8)
+#define AT91_MC_BFC_PAGES_512 (6 << 8)
+#define AT91_MC_BFC_PAGES_1024 (7 << 8)
+#define AT91_MC_BFC_OEL GENMASK(13, 12)
+#define AT91_MC_BFC_BAAEN BIT(16)
+#define AT91_MC_BFC_BFOEH BIT(17)
+#define AT91_MC_BFC_MUXEN BIT(18)
+#define AT91_MC_BFC_RDYEN BIT(19)
+
+#endif /* _LINUX_MFD_SYSCON_ATMEL_MC_H_ */
diff --git a/include/linux/mfd/syscon/atmel-smc.h b/include/linux/mfd/syscon/atmel-smc.h
new file mode 100644
index 0000000..be6ebe6
--- /dev/null
+++ b/include/linux/mfd/syscon/atmel-smc.h
@@ -0,0 +1,173 @@
+/*
+ * Atmel SMC (Static Memory Controller) register offsets and bit definitions.
+ *
+ * Copyright (C) 2014 Atmel
+ * Copyright (C) 2014 Free Electrons
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_MFD_SYSCON_ATMEL_SMC_H_
+#define _LINUX_MFD_SYSCON_ATMEL_SMC_H_
+
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+
+#define AT91SAM9_SMC_GENERIC 0x00
+#define AT91SAM9_SMC_GENERIC_BLK_SZ 0x10
+
+#define SAMA5_SMC_GENERIC 0x600
+#define SAMA5_SMC_GENERIC_BLK_SZ 0x14
+
+#define AT91SAM9_SMC_SETUP(o) ((o) + 0x00)
+#define AT91SAM9_SMC_NWESETUP(x) (x)
+#define AT91SAM9_SMC_NCS_WRSETUP(x) ((x) << 8)
+#define AT91SAM9_SMC_NRDSETUP(x) ((x) << 16)
+#define AT91SAM9_SMC_NCS_NRDSETUP(x) ((x) << 24)
+
+#define AT91SAM9_SMC_PULSE(o) ((o) + 0x04)
+#define AT91SAM9_SMC_NWEPULSE(x) (x)
+#define AT91SAM9_SMC_NCS_WRPULSE(x) ((x) << 8)
+#define AT91SAM9_SMC_NRDPULSE(x) ((x) << 16)
+#define AT91SAM9_SMC_NCS_NRDPULSE(x) ((x) << 24)
+
+#define AT91SAM9_SMC_CYCLE(o) ((o) + 0x08)
+#define AT91SAM9_SMC_NWECYCLE(x) (x)
+#define AT91SAM9_SMC_NRDCYCLE(x) ((x) << 16)
+
+#define AT91SAM9_SMC_MODE(o) ((o) + 0x0c)
+#define SAMA5_SMC_MODE(o) ((o) + 0x10)
+#define AT91_SMC_READMODE BIT(0)
+#define AT91_SMC_READMODE_NCS (0 << 0)
+#define AT91_SMC_READMODE_NRD (1 << 0)
+#define AT91_SMC_WRITEMODE BIT(1)
+#define AT91_SMC_WRITEMODE_NCS (0 << 1)
+#define AT91_SMC_WRITEMODE_NWE (1 << 1)
+#define AT91_SMC_EXNWMODE GENMASK(5, 4)
+#define AT91_SMC_EXNWMODE_DISABLE (0 << 4)
+#define AT91_SMC_EXNWMODE_FROZEN (2 << 4)
+#define AT91_SMC_EXNWMODE_READY (3 << 4)
+#define AT91_SMC_BAT BIT(8)
+#define AT91_SMC_BAT_SELECT (0 << 8)
+#define AT91_SMC_BAT_WRITE (1 << 8)
+#define AT91_SMC_DBW GENMASK(13, 12)
+#define AT91_SMC_DBW_8 (0 << 12)
+#define AT91_SMC_DBW_16 (1 << 12)
+#define AT91_SMC_DBW_32 (2 << 12)
+#define AT91_SMC_TDF GENMASK(19, 16)
+#define AT91_SMC_TDF_(x) ((((x) - 1) << 16) & AT91_SMC_TDF)
+#define AT91_SMC_TDF_MAX 16
+#define AT91_SMC_TDFMODE_OPTIMIZED BIT(20)
+#define AT91_SMC_PMEN BIT(24)
+#define AT91_SMC_PS GENMASK(29, 28)
+#define AT91_SMC_PS_4 (0 << 28)
+#define AT91_SMC_PS_8 (1 << 28)
+#define AT91_SMC_PS_16 (2 << 28)
+#define AT91_SMC_PS_32 (3 << 28)
+
+
+/*
+ * This function converts a setup timing expressed in nanoseconds into an
+ * encoded value that can be written in the SMC_SETUP register.
+ *
+ * The following formula is described in atmel datasheets (section
+ * "SMC Setup Register"):
+ *
+ * setup length = (128* SETUP[5] + SETUP[4:0])
+ *
+ * where setup length is the timing expressed in cycles.
+ */
+static inline u32 at91sam9_smc_setup_ns_to_cycles(unsigned int clk_rate,
+ u32 timing_ns)
+{
+ u32 clk_period = DIV_ROUND_UP(NSEC_PER_SEC, clk_rate);
+ u32 coded_cycles = 0;
+ u32 cycles;
+
+ cycles = DIV_ROUND_UP(timing_ns, clk_period);
+ if (cycles / 32) {
+ coded_cycles |= 1 << 5;
+ if (cycles < 128)
+ cycles = 0;
+ }
+
+ coded_cycles |= cycles % 32;
+
+ return coded_cycles;
+}
+
+/*
+ * This function converts a pulse timing expressed in nanoseconds into an
+ * encoded value that can be written in the SMC_PULSE register.
+ *
+ * The following formula is described in atmel datasheets (section
+ * "SMC Pulse Register"):
+ *
+ * pulse length = (256* PULSE[6] + PULSE[5:0])
+ *
+ * where pulse length is the timing expressed in cycles.
+ */
+static inline u32 at91sam9_smc_pulse_ns_to_cycles(unsigned int clk_rate,
+ u32 timing_ns)
+{
+ u32 clk_period = DIV_ROUND_UP(NSEC_PER_SEC, clk_rate);
+ u32 coded_cycles = 0;
+ u32 cycles;
+
+ cycles = DIV_ROUND_UP(timing_ns, clk_period);
+ if (cycles / 64) {
+ coded_cycles |= 1 << 6;
+ if (cycles < 256)
+ cycles = 0;
+ }
+
+ coded_cycles |= cycles % 64;
+
+ return coded_cycles;
+}
+
+/*
+ * This function converts a cycle timing expressed in nanoseconds into an
+ * encoded value that can be written in the SMC_CYCLE register.
+ *
+ * The following formula is described in atmel datasheets (section
+ * "SMC Cycle Register"):
+ *
+ * cycle length = (CYCLE[8:7]*256 + CYCLE[6:0])
+ *
+ * where cycle length is the timing expressed in cycles.
+ */
+static inline u32 at91sam9_smc_cycle_ns_to_cycles(unsigned int clk_rate,
+ u32 timing_ns)
+{
+ u32 clk_period = DIV_ROUND_UP(NSEC_PER_SEC, clk_rate);
+ u32 coded_cycles = 0;
+ u32 cycles;
+
+ cycles = DIV_ROUND_UP(timing_ns, clk_period);
+ if (cycles / 128) {
+ coded_cycles = cycles / 256;
+ cycles %= 256;
+ if (cycles >= 128) {
+ coded_cycles++;
+ cycles = 0;
+ }
+
+ if (coded_cycles > 0x3) {
+ coded_cycles = 0x3;
+ cycles = 0x7f;
+ }
+
+ coded_cycles <<= 7;
+ }
+
+ coded_cycles |= cycles % 128;
+
+ return coded_cycles;
+}
+
+#endif /* _LINUX_MFD_SYSCON_ATMEL_SMC_H_ */
diff --git a/include/linux/mfd/syscon/atmel-st.h b/include/linux/mfd/syscon/atmel-st.h
new file mode 100644
index 0000000..8acf1ec
--- /dev/null
+++ b/include/linux/mfd/syscon/atmel-st.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2005 Ivan Kokshaysky
+ * Copyright (C) SAN People
+ *
+ * System Timer (ST) - System peripherals registers.
+ * Based on AT91RM9200 datasheet revision E.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _LINUX_MFD_SYSCON_ATMEL_ST_H
+#define _LINUX_MFD_SYSCON_ATMEL_ST_H
+
+#include <linux/bitops.h>
+
+#define AT91_ST_CR 0x00 /* Control Register */
+#define AT91_ST_WDRST BIT(0) /* Watchdog Timer Restart */
+
+#define AT91_ST_PIMR 0x04 /* Period Interval Mode Register */
+#define AT91_ST_PIV 0xffff /* Period Interval Value */
+
+#define AT91_ST_WDMR 0x08 /* Watchdog Mode Register */
+#define AT91_ST_WDV 0xffff /* Watchdog Counter Value */
+#define AT91_ST_RSTEN BIT(16) /* Reset Enable */
+#define AT91_ST_EXTEN BIT(17) /* External Signal Assertion Enable */
+
+#define AT91_ST_RTMR 0x0c /* Real-time Mode Register */
+#define AT91_ST_RTPRES 0xffff /* Real-time Prescalar Value */
+
+#define AT91_ST_SR 0x10 /* Status Register */
+#define AT91_ST_PITS BIT(0) /* Period Interval Timer Status */
+#define AT91_ST_WDOVF BIT(1) /* Watchdog Overflow */
+#define AT91_ST_RTTINC BIT(2) /* Real-time Timer Increment */
+#define AT91_ST_ALMS BIT(3) /* Alarm Status */
+
+#define AT91_ST_IER 0x14 /* Interrupt Enable Register */
+#define AT91_ST_IDR 0x18 /* Interrupt Disable Register */
+#define AT91_ST_IMR 0x1c /* Interrupt Mask Register */
+
+#define AT91_ST_RTAR 0x20 /* Real-time Alarm Register */
+#define AT91_ST_ALMV 0xfffff /* Alarm Value */
+
+#define AT91_ST_CRTR 0x24 /* Current Real-time Register */
+#define AT91_ST_CRTV 0xfffff /* Current Real-Time Value */
+
+#endif /* _LINUX_MFD_SYSCON_ATMEL_ST_H */
diff --git a/include/linux/mfd/syscon/exynos4-pmu.h b/include/linux/mfd/syscon/exynos4-pmu.h
new file mode 100644
index 0000000..278b1b1
--- /dev/null
+++ b/include/linux/mfd/syscon/exynos4-pmu.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2015 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_
+#define _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_
+
+/* Exynos4 PMU register definitions */
+
+/* MIPI_PHYn_CONTROL register offset: n = 0..1 */
+#define EXYNOS4_MIPI_PHY_CONTROL(n) (0x710 + (n) * 4)
+#define EXYNOS4_MIPI_PHY_ENABLE (1 << 0)
+#define EXYNOS4_MIPI_PHY_SRESETN (1 << 1)
+#define EXYNOS4_MIPI_PHY_MRESETN (1 << 2)
+#define EXYNOS4_MIPI_PHY_RESET_MASK (3 << 1)
+
+#endif /* _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_ */
diff --git a/include/linux/mfd/syscon/exynos5-pmu.h b/include/linux/mfd/syscon/exynos5-pmu.h
index 00ef24b..9352adc 100644
--- a/include/linux/mfd/syscon/exynos5-pmu.h
+++ b/include/linux/mfd/syscon/exynos5-pmu.h
@@ -36,6 +36,9 @@
#define EXYNOS5420_MTCADC_PHY_CONTROL (0x724)
#define EXYNOS5420_DPTX_PHY_CONTROL (0x728)
+/* Exynos5433 specific register definitions */
+#define EXYNOS5433_USBHOST30_PHY_CONTROL (0x728)
+
#define EXYNOS5_PHY_ENABLE BIT(0)
#define EXYNOS5_MIPI_PHY_S_RESETN BIT(1)
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index c877cad..d16f4c8 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -207,6 +207,7 @@
#define IMX6Q_GPR3_LVDS0_MUX_CTL_IPU1_DI1 (0x1 << 6)
#define IMX6Q_GPR3_LVDS0_MUX_CTL_IPU2_DI0 (0x2 << 6)
#define IMX6Q_GPR3_LVDS0_MUX_CTL_IPU2_DI1 (0x3 << 6)
+#define IMX6Q_GPR3_MIPI_MUX_CTL_SHIFT 4
#define IMX6Q_GPR3_MIPI_MUX_CTL_MASK (0x3 << 4)
#define IMX6Q_GPR3_MIPI_MUX_CTL_IPU1_DI0 (0x0 << 4)
#define IMX6Q_GPR3_MIPI_MUX_CTL_IPU1_DI1 (0x1 << 4)
diff --git a/include/linux/mfd/tc3589x.h b/include/linux/mfd/tc3589x.h
index e1c12d8..468c31a 100644
--- a/include/linux/mfd/tc3589x.h
+++ b/include/linux/mfd/tc3589x.h
@@ -140,48 +140,13 @@ extern int tc3589x_set_bits(struct tc3589x *tc3589x, u8 reg, u8 mask, u8 val);
#define TC_KPD_DEBOUNCE_PERIOD 0xA3
#define TC_KPD_SETTLE_TIME 0xA3
-/**
- * struct tc35893_platform_data - data structure for platform specific data
- * @keymap_data: matrix scan code table for keycodes
- * @krow: mask for available rows, value is 0xFF
- * @kcol: mask for available columns, value is 0xFF
- * @debounce_period: platform specific debounce time
- * @settle_time: platform specific settle down time
- * @irqtype: type of interrupt, falling or rising edge
- * @enable_wakeup: specifies if keypad event can wake up system from sleep
- * @no_autorepeat: flag for auto repetition
- */
-struct tc3589x_keypad_platform_data {
- const struct matrix_keymap_data *keymap_data;
- u8 krow;
- u8 kcol;
- u8 debounce_period;
- u8 settle_time;
- unsigned long irqtype;
- bool enable_wakeup;
- bool no_autorepeat;
-};
-
-/**
- * struct tc3589x_gpio_platform_data - TC3589x GPIO platform data
- * @setup: callback for board-specific initialization
- * @remove: callback for board-specific teardown
- */
-struct tc3589x_gpio_platform_data {
- void (*setup)(struct tc3589x *tc3589x, unsigned gpio_base);
- void (*remove)(struct tc3589x *tc3589x, unsigned gpio_base);
-};
/**
* struct tc3589x_platform_data - TC3589x platform data
* @block: bitmask of blocks to enable (use TC3589x_BLOCK_*)
- * @gpio: GPIO-specific platform data
- * @keypad: keypad-specific platform data
*/
struct tc3589x_platform_data {
unsigned int block;
- struct tc3589x_gpio_platform_data *gpio;
- const struct tc3589x_keypad_platform_data *keypad;
};
#endif
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index e2e7005..1fd50dc 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -52,6 +52,7 @@
/* IRQ enable */
#define IRQENB_HW_PEN BIT(0)
+#define IRQENB_EOS BIT(1)
#define IRQENB_FIFO0THRES BIT(2)
#define IRQENB_FIFO0OVRRUN BIT(3)
#define IRQENB_FIFO0UNDRFLW BIT(4)
@@ -107,7 +108,7 @@
/* Charge delay */
#define CHARGEDLY_OPEN_MASK (0x3FFFF << 0)
#define CHARGEDLY_OPEN(val) ((val) << 0)
-#define CHARGEDLY_OPENDLY CHARGEDLY_OPEN(1)
+#define CHARGEDLY_OPENDLY CHARGEDLY_OPEN(0x400)
/* Control register */
#define CNTRLREG_TSCSSENB BIT(0)
@@ -127,6 +128,7 @@
/* Sequencer Status */
#define SEQ_STATUS BIT(5)
+#define CHARGE_STEP 0x11
#define ADC_CLK 3000000
#define TOTAL_STEPS 16
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 5738817..24b86d5 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -96,11 +96,6 @@
#define TMIO_MMC_SDIO_STATUS_QUIRK (1 << 8)
/*
- * Some controllers have DMA enable/disable register
- */
-#define TMIO_MMC_HAVE_CTL_DMA_REG (1 << 9)
-
-/*
* Some controllers allows to set SDx actual clock
*/
#define TMIO_MMC_CLK_ACTUAL (1 << 10)
@@ -112,39 +107,22 @@ void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state);
struct dma_chan;
-struct tmio_mmc_dma {
- void *chan_priv_tx;
- void *chan_priv_rx;
- int slave_id_tx;
- int slave_id_rx;
- int alignment_shift;
- dma_addr_t dma_rx_offset;
- bool (*filter)(struct dma_chan *chan, void *arg);
-};
-
-struct tmio_mmc_host;
-
/*
* data for the MMC controller
*/
struct tmio_mmc_data {
+ void *chan_priv_tx;
+ void *chan_priv_rx;
unsigned int hclk;
unsigned long capabilities;
unsigned long capabilities2;
unsigned long flags;
- unsigned long bus_shift;
u32 ocr_mask; /* available voltages */
- struct tmio_mmc_dma *dma;
- struct device *dev;
unsigned int cd_gpio;
+ int alignment_shift;
+ dma_addr_t dma_rx_offset;
void (*set_pwr)(struct platform_device *host, int state);
void (*set_clk_div)(struct platform_device *host, int state);
- int (*write16_hook)(struct tmio_mmc_host *host, int addr);
- /* clock management callbacks */
- int (*clk_enable)(struct platform_device *pdev, unsigned int *f);
- void (*clk_disable)(struct platform_device *pdev);
- int (*multi_io_quirk)(struct mmc_card *card,
- unsigned int direction, int blk_size);
};
/*
diff --git a/include/linux/mfd/wm8350/supply.h b/include/linux/mfd/wm8350/supply.h
index 2b94793..8dc9367 100644
--- a/include/linux/mfd/wm8350/supply.h
+++ b/include/linux/mfd/wm8350/supply.h
@@ -123,9 +123,9 @@ struct wm8350_charger_policy {
struct wm8350_power {
struct platform_device *pdev;
- struct power_supply battery;
- struct power_supply usb;
- struct power_supply ac;
+ struct power_supply *battery;
+ struct power_supply *usb;
+ struct power_supply *ac;
struct wm8350_charger_policy *policy;
int rev_g_coeff;
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index fab9b32..cac1c09 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -67,27 +67,18 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
#ifdef CONFIG_NUMA_BALANCING
extern bool pmd_trans_migrating(pmd_t pmd);
-extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
extern int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node);
-extern bool migrate_ratelimited(int node);
#else
static inline bool pmd_trans_migrating(pmd_t pmd)
{
return false;
}
-static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
-{
-}
static inline int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node)
{
return -EAGAIN; /* can't migrate now */
}
-static inline bool migrate_ratelimited(int node)
-{
- return false;
-}
#endif /* CONFIG_NUMA_BALANCING */
#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index ee80dd7..819077c 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -52,6 +52,7 @@
#define MISC_DYNAMIC_MINOR 255
struct device;
+struct attribute_group;
struct miscdevice {
int minor;
@@ -60,6 +61,7 @@ struct miscdevice {
struct list_head list;
struct device *parent;
struct device *this_device;
+ const struct attribute_group **groups;
const char *nodename;
umode_t mode;
};
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 64d2594..58391f2 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -35,6 +35,8 @@
#include <linux/dma-mapping.h>
#include <linux/if_link.h>
+#include <linux/mlx4/device.h>
+#include <linux/netdevice.h>
enum {
/* initialization and general commands */
@@ -68,9 +70,12 @@ enum {
MLX4_CMD_UNMAP_ICM_AUX = 0xffb,
MLX4_CMD_SET_ICM_SIZE = 0xffd,
MLX4_CMD_ACCESS_REG = 0x3b,
+ MLX4_CMD_ALLOCATE_VPP = 0x80,
+ MLX4_CMD_SET_VPORT_QOS = 0x81,
/*master notify fw on finish for slave's flr*/
MLX4_CMD_INFORM_FLR_DONE = 0x5b,
+ MLX4_CMD_VIRT_PORT_MAP = 0x5c,
MLX4_CMD_GET_OP_REQ = 0x59,
/* TPT commands */
@@ -162,12 +167,21 @@ enum {
MLX4_QP_FLOW_STEERING_ATTACH = 0x65,
MLX4_QP_FLOW_STEERING_DETACH = 0x66,
MLX4_FLOW_STEERING_IB_UC_QP_RANGE = 0x64,
+
+ /* Update and read QCN parameters */
+ MLX4_CMD_CONGESTION_CTRL_OPCODE = 0x68,
+};
+
+enum {
+ MLX4_CMD_TIME_CLASS_A = 60000,
+ MLX4_CMD_TIME_CLASS_B = 60000,
+ MLX4_CMD_TIME_CLASS_C = 60000,
};
enum {
- MLX4_CMD_TIME_CLASS_A = 10000,
- MLX4_CMD_TIME_CLASS_B = 10000,
- MLX4_CMD_TIME_CLASS_C = 10000,
+ /* virtual to physical port mapping opcode modifiers */
+ MLX4_GET_PORT_VIRT2PHY = 0x0,
+ MLX4_SET_PORT_VIRT2PHY = 0x1,
};
enum {
@@ -176,7 +190,14 @@ enum {
};
enum {
- /* set port opcode modifiers */
+ /* Set port opcode modifiers */
+ MLX4_SET_PORT_IB_OPCODE = 0x0,
+ MLX4_SET_PORT_ETH_OPCODE = 0x1,
+ MLX4_SET_PORT_BEACON_OPCODE = 0x4,
+};
+
+enum {
+ /* Set port Ethernet input modifiers */
MLX4_SET_PORT_GENERAL = 0x0,
MLX4_SET_PORT_RQP_CALC = 0x1,
MLX4_SET_PORT_MAC_TABLE = 0x2,
@@ -226,6 +247,16 @@ struct mlx4_config_dev_params {
u8 rx_csum_flags_port_2;
};
+enum mlx4_en_congestion_control_algorithm {
+ MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT = 0,
+};
+
+enum mlx4_en_congestion_control_opmod {
+ MLX4_CONGESTION_CONTROL_GET_PARAMS,
+ MLX4_CONGESTION_CONTROL_GET_STATISTICS,
+ MLX4_CONGESTION_CONTROL_SET_PARAMS = 4,
+};
+
struct mlx4_dev;
struct mlx4_cmd_mailbox {
@@ -271,14 +302,22 @@ static inline int mlx4_cmd_imm(struct mlx4_dev *dev, u64 in_param, u64 *out_para
struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev);
void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox);
+int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index,
+ struct mlx4_counter *counter_stats, int reset);
+int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
+ struct ifla_vf_stats *vf_stats);
u32 mlx4_comm_get_version(void);
int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac);
int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
+int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
+ int max_tx_rate);
int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf);
int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state);
int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
struct mlx4_config_dev_params *params);
+void mlx4_cmd_wake_completions(struct mlx4_dev *dev);
+void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev);
/*
* mlx4_get_slave_default_vlan -
* return true if VST ( default vlan)
@@ -288,5 +327,6 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
u16 *vlan, u8 *qos);
#define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8)
+#define COMM_CHAN_EVENT_INTERNAL_ERR (1 << 17)
#endif /* MLX4_CMD_H */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 25c791e..fd13c1c 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -42,15 +42,14 @@
#include <linux/atomic.h>
-#include <linux/clocksource.h>
+#include <linux/timecounter.h>
#define MAX_MSIX_P_PORT 17
#define MAX_MSIX 64
-#define MSIX_LEGACY_SZ 4
#define MIN_MSIX_P_PORT 5
+#define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \
+ (dev_cap).num_ports * MIN_MSIX_P_PORT)
-#define MLX4_NUM_UP 8
-#define MLX4_NUM_TC 8
#define MLX4_MAX_100M_UNITS_VAL 255 /*
* work around: can't set values
* greater then this value when
@@ -70,6 +69,7 @@ enum {
MLX4_FLAG_SLAVE = 1 << 3,
MLX4_FLAG_SRIOV = 1 << 4,
MLX4_FLAG_OLD_REG_MAC = 1 << 6,
+ MLX4_FLAG_BONDED = 1 << 7
};
enum {
@@ -97,7 +97,7 @@ enum {
MLX4_MAX_NUM_PF = 16,
MLX4_MAX_NUM_VF = 126,
MLX4_MAX_NUM_VF_P_PORT = 64,
- MLX4_MFUNC_MAX = 80,
+ MLX4_MFUNC_MAX = 128,
MLX4_MAX_EQ_NUM = 1024,
MLX4_MFUNC_EQ_NUM = 4,
MLX4_MFUNC_MAX_EQES = 8,
@@ -173,6 +173,7 @@ enum {
MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41,
MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42,
MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48,
+ MLX4_DEV_CAP_FLAG_RSS_IP_FRAG = 1LL << 52,
MLX4_DEV_CAP_FLAG_SET_ETH_SCHED = 1LL << 53,
MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55,
MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59,
@@ -200,7 +201,16 @@ enum {
MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16,
MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17,
MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18,
- MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19
+ MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19,
+ MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20,
+ MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21,
+ MLX4_DEV_CAP_FLAG2_QCN = 1LL << 22,
+ MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT = 1LL << 23,
+ MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN = 1LL << 24,
+ MLX4_DEV_CAP_FLAG2_QOS_VPP = 1LL << 25,
+ MLX4_DEV_CAP_FLAG2_ETS_CFG = 1LL << 26,
+ MLX4_DEV_CAP_FLAG2_PORT_BEACON = 1LL << 27,
+ MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28,
};
enum {
@@ -208,6 +218,10 @@ enum {
MLX4_QUERY_FUNC_FLAGS_A0_RES_QP = 1LL << 1
};
+enum {
+ MLX4_VF_CAP_FLAG_RESET = 1 << 0
+};
+
/* bit enums for an 8-bit flags field indicating special use
* QPs which require special handling in qp_reserve_range.
* Currently, this only includes QPs used by the ETH interface,
@@ -248,9 +262,14 @@ enum {
MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9,
MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10,
MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11,
+ MLX4_BMME_FLAG_PORT_REMAP = 1 << 24,
MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28,
};
+enum {
+ MLX4_FLAG_PORT_REMAP = MLX4_BMME_FLAG_PORT_REMAP
+};
+
enum mlx4_event {
MLX4_EVENT_TYPE_COMP = 0x00,
MLX4_EVENT_TYPE_PATH_MIG = 0x01,
@@ -276,6 +295,7 @@ enum mlx4_event {
MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b,
MLX4_EVENT_TYPE_FLR_EVENT = 0x1c,
MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d,
+ MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT = 0x3e,
MLX4_EVENT_TYPE_NONE = 0xff,
};
@@ -285,6 +305,11 @@ enum {
};
enum {
+ MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE = 1,
+ MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE = 2,
+};
+
+enum {
MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0,
};
@@ -411,6 +436,16 @@ enum {
MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK = 1 << 4,
};
+enum {
+ MLX4_DEVICE_STATE_UP = 1 << 0,
+ MLX4_DEVICE_STATE_INTERNAL_ERROR = 1 << 1,
+};
+
+enum {
+ MLX4_INTERFACE_STATE_UP = 1 << 0,
+ MLX4_INTERFACE_STATE_DELETION = 1 << 1,
+};
+
#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
@@ -421,6 +456,21 @@ enum mlx4_module_id {
MLX4_MODULE_ID_QSFP28 = 0x11,
};
+enum { /* rl */
+ MLX4_QP_RATE_LIMIT_NONE = 0,
+ MLX4_QP_RATE_LIMIT_KBS = 1,
+ MLX4_QP_RATE_LIMIT_MBS = 2,
+ MLX4_QP_RATE_LIMIT_GBS = 3
+};
+
+struct mlx4_rate_limit_caps {
+ u16 num_rates; /* Number of different rates */
+ u8 min_unit;
+ u16 min_val;
+ u8 max_unit;
+ u16 max_val;
+};
+
static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
{
return (major << 32) | (minor << 16) | subminor;
@@ -479,7 +529,6 @@ struct mlx4_caps {
int num_eqs;
int reserved_eqs;
int num_comp_vectors;
- int comp_pool;
int num_mpts;
int max_fmr_maps;
int num_mtts;
@@ -535,6 +584,8 @@ struct mlx4_caps {
u8 alloc_res_qp_mask;
u32 dmfs_high_rate_qpn_base;
u32 dmfs_high_rate_qpn_range;
+ u32 vf_caps;
+ struct mlx4_rate_limit_caps rl_caps;
};
struct mlx4_buf_list {
@@ -660,6 +711,8 @@ struct mlx4_cq {
void (*comp)(struct mlx4_cq *);
void *priv;
} tasklet_ctx;
+ int reset_notify_added;
+ struct list_head reset_notify;
};
struct mlx4_qp {
@@ -718,6 +771,14 @@ union mlx4_ext_av {
struct mlx4_eth_av eth;
};
+/* Counters should be saturate once they reach their maximum value */
+#define ASSIGN_32BIT_COUNTER(counter, value) do { \
+ if ((value) > U32_MAX) \
+ counter = cpu_to_be32(U32_MAX); \
+ else \
+ counter = cpu_to_be32(value); \
+} while (0)
+
struct mlx4_counter {
u8 reserved1[3];
u8 counter_mode;
@@ -744,8 +805,23 @@ struct mlx4_vf_dev {
u8 n_ports;
};
-struct mlx4_dev {
+struct mlx4_dev_persistent {
struct pci_dev *pdev;
+ struct mlx4_dev *dev;
+ int nvfs[MLX4_MAX_PORTS + 1];
+ int num_vfs;
+ enum mlx4_port_type curr_port_type[MLX4_MAX_PORTS + 1];
+ enum mlx4_port_type curr_port_poss_type[MLX4_MAX_PORTS + 1];
+ struct work_struct catas_work;
+ struct workqueue_struct *catas_wq;
+ struct mutex device_state_mutex; /* protect HW state */
+ u8 state;
+ struct mutex interface_state_mutex; /* protect SW state */
+ u8 interface_state;
+};
+
+struct mlx4_dev {
+ struct mlx4_dev_persistent *persist;
unsigned long flags;
unsigned long num_slaves;
struct mlx4_caps caps;
@@ -754,13 +830,17 @@ struct mlx4_dev {
struct radix_tree_root qp_table_tree;
u8 rev_id;
char board_id[MLX4_BOARD_ID_LEN];
- int num_vfs;
int numa_node;
int oper_log_mgm_entry_size;
u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
struct mlx4_vf_dev *dev_vfs;
- int nvfs[MLX4_MAX_PORTS + 1];
+};
+
+struct mlx4_clock_params {
+ u64 offset;
+ u8 bar;
+ u8 size;
};
struct mlx4_eqe {
@@ -832,6 +912,11 @@ struct mlx4_eqe {
} __packed tbl_change_info;
} params;
} __packed port_mgmt_change;
+ struct {
+ u8 reserved[3];
+ u8 port;
+ u32 reserved1[5];
+ } __packed bad_cable;
} event;
u8 slave_id;
u8 reserved3[2];
@@ -886,6 +971,7 @@ struct mlx4_mad_ifc {
((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
#define MLX4_INVALID_SLAVE_ID 0xFF
+#define MLX4_SINK_COUNTER_INDEX(dev) (dev->caps.max_counters - 1)
void handle_port_mgmt_change_event(struct work_struct *work);
@@ -933,6 +1019,11 @@ static inline int mlx4_is_slave(struct mlx4_dev *dev)
return dev->flags & MLX4_FLAG_SLAVE;
}
+static inline int mlx4_is_eth(struct mlx4_dev *dev, int port)
+{
+ return dev->caps.port_type[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
+}
+
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
struct mlx4_buf *buf, gfp_t gfp);
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
@@ -1233,14 +1324,13 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port);
int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
-void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap);
int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
u8 promisc);
-int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
-int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
- u8 *pg, u16 *ratelimit);
+int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
+int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
+ u8 ignore_fcs_value);
int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
@@ -1257,10 +1347,13 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
int mlx4_test_interrupts(struct mlx4_dev *dev);
-int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
- int *vector);
+u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port);
+bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector);
+struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port);
+int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector);
void mlx4_release_eq(struct mlx4_dev *dev, int vec);
+int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector);
int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
int mlx4_get_phys_port_id(struct mlx4_dev *dev);
@@ -1269,7 +1362,12 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
+int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port);
+void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry,
+ int port);
+__be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port);
+void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port);
int mlx4_flow_attach(struct mlx4_dev *dev,
struct mlx4_net_trans_rule *rule, u64 *reg_id);
int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id);
@@ -1338,6 +1436,8 @@ int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port);
int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port);
int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port);
+int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis);
+int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2);
int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port);
int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port);
int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
@@ -1404,4 +1504,7 @@ int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
enum mlx4_access_reg_method method,
struct mlx4_ptys_reg *ptys_reg);
+int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
+ struct mlx4_clock_params *params);
+
#endif /* MLX4_DEVICE_H */
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 022055c..9553a73 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -49,6 +49,10 @@ enum mlx4_dev_event {
MLX4_DEV_EVENT_SLAVE_SHUTDOWN,
};
+enum {
+ MLX4_INTFF_BONDING = 1 << 0
+};
+
struct mlx4_interface {
void * (*add) (struct mlx4_dev *dev);
void (*remove)(struct mlx4_dev *dev, void *context);
@@ -57,11 +61,26 @@ struct mlx4_interface {
void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
struct list_head list;
enum mlx4_protocol protocol;
+ int flags;
};
int mlx4_register_interface(struct mlx4_interface *intf);
void mlx4_unregister_interface(struct mlx4_interface *intf);
+int mlx4_bond(struct mlx4_dev *dev);
+int mlx4_unbond(struct mlx4_dev *dev);
+static inline int mlx4_is_bonded(struct mlx4_dev *dev)
+{
+ return !!(dev->flags & MLX4_FLAG_BONDED);
+}
+
+struct mlx4_port_map {
+ u8 port1;
+ u8 port2;
+};
+
+int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p);
+
void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port);
static inline u64 mlx4_mac_to_u64(u8 *addr)
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 467ccdf..6fed539 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -96,6 +96,7 @@ enum {
MLX4_QP_BIT_RRE = 1 << 15,
MLX4_QP_BIT_RWE = 1 << 14,
MLX4_QP_BIT_RAE = 1 << 13,
+ MLX4_QP_BIT_FPP = 1 << 3,
MLX4_QP_BIT_RIC = 1 << 4,
};
@@ -206,14 +207,17 @@ struct mlx4_qp_context {
__be32 msn;
__be16 rq_wqe_counter;
__be16 sq_wqe_counter;
- u32 reserved3[2];
+ u32 reserved3;
+ __be16 rate_limit_params;
+ u8 reserved4;
+ u8 qos_vport;
__be32 param3;
__be32 nummmcpeers_basemkey;
u8 log_page_size;
- u8 reserved4[2];
+ u8 reserved5[2];
u8 mtt_base_addr_h;
__be32 mtt_base_addr_l;
- u32 reserved5[10];
+ u32 reserved6[10];
};
struct mlx4_update_qp_context {
@@ -228,6 +232,8 @@ struct mlx4_update_qp_context {
enum {
MLX4_UPD_QP_MASK_PM_STATE = 32,
MLX4_UPD_QP_MASK_VSD = 33,
+ MLX4_UPD_QP_MASK_QOS_VPP = 34,
+ MLX4_UPD_QP_MASK_RATE_LIMIT = 35,
};
enum {
@@ -426,8 +432,10 @@ struct mlx4_wqe_inline_seg {
enum mlx4_update_qp_attr {
MLX4_UPDATE_QP_SMAC = 1 << 0,
- MLX4_UPDATE_QP_VSD = 1 << 2,
- MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 2) - 1
+ MLX4_UPDATE_QP_VSD = 1 << 1,
+ MLX4_UPDATE_QP_RATE_LIMIT = 1 << 2,
+ MLX4_UPDATE_QP_QOS_VPORT = 1 << 3,
+ MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 4) - 1
};
enum mlx4_update_qp_params_flags {
@@ -436,7 +444,10 @@ enum mlx4_update_qp_params_flags {
struct mlx4_update_qp_params {
u8 smac_index;
+ u8 qos_vport;
u32 flags;
+ u16 rate_unit;
+ u16 rate_val;
};
int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
diff --git a/include/linux/mlx5/cmd.h b/include/linux/mlx5/cmd.h
index 2826a4b..68cd08f 100644
--- a/include/linux/mlx5/cmd.h
+++ b/include/linux/mlx5/cmd.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index f6b17ac..abc4767 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -137,14 +137,15 @@ enum {
static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
void __iomem *uar_page,
- spinlock_t *doorbell_lock)
+ spinlock_t *doorbell_lock,
+ u32 cons_index)
{
__be32 doorbell[2];
u32 sn;
u32 ci;
sn = cq->arm_sn & 3;
- ci = cq->cons_index & 0xffffff;
+ ci = cons_index & 0xffffff;
*cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
@@ -168,6 +169,9 @@ int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_query_cq_mbox_out *out);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_modify_cq_mbox_in *in, int in_sz);
+int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
+ struct mlx5_core_cq *cq, u16 cq_period,
+ u16 cq_max_count);
int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 4e5bd81..b943cd9 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -35,6 +35,7 @@
#include <linux/types.h>
#include <rdma/ib_verbs.h>
+#include <linux/mlx5/mlx5_ifc.h>
#if defined(__LITTLE_ENDIAN)
#define MLX5_SET_HOST_ENDIANNESS 0
@@ -58,6 +59,8 @@
#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
+#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
@@ -70,6 +73,14 @@
<< __mlx5_dw_bit_off(typ, fld))); \
} while (0)
+#define MLX5_SET_TO_ONES(typ, p, fld) do { \
+ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
+ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
+ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
+ (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
+ << __mlx5_dw_bit_off(typ, fld))); \
+} while (0)
+
#define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
__mlx5_mask(typ, fld))
@@ -88,6 +99,12 @@ __mlx5_mask(typ, fld))
#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
+#define MLX5_GET64_PR(typ, p, fld) ({ \
+ u64 ___t = MLX5_GET64(typ, p, fld); \
+ pr_debug(#fld " = 0x%llx\n", ___t); \
+ ___t; \
+})
+
enum {
MLX5_MAX_COMMANDS = 32,
MLX5_CMD_DATA_BLOCK_SIZE = 512,
@@ -115,6 +132,10 @@ enum {
};
enum {
+ MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
+};
+
+enum {
MLX5_MIN_PKEY_TABLE_SIZE = 128,
MLX5_MAX_LOG_PKEY_TABLE = 5,
};
@@ -264,6 +285,7 @@ enum {
MLX5_OPCODE_RDMA_WRITE_IMM = 0x09,
MLX5_OPCODE_SEND = 0x0a,
MLX5_OPCODE_SEND_IMM = 0x0b,
+ MLX5_OPCODE_LSO = 0x0e,
MLX5_OPCODE_RDMA_READ = 0x10,
MLX5_OPCODE_ATOMIC_CS = 0x11,
MLX5_OPCODE_ATOMIC_FA = 0x12,
@@ -312,13 +334,6 @@ enum {
MLX5_CAP_OFF_CMDIF_CSUM = 46,
};
-enum {
- HCA_CAP_OPMOD_GET_MAX = 0,
- HCA_CAP_OPMOD_GET_CUR = 1,
- HCA_CAP_OPMOD_GET_ODP_MAX = 4,
- HCA_CAP_OPMOD_GET_ODP_CUR = 5
-};
-
struct mlx5_inbox_hdr {
__be16 opcode;
u8 rsvd[4];
@@ -541,6 +556,10 @@ struct mlx5_cmd_prot_block {
u8 sig;
};
+enum {
+ MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
+};
+
struct mlx5_err_cqe {
u8 rsvd0[32];
__be32 srqn;
@@ -554,13 +573,22 @@ struct mlx5_err_cqe {
};
struct mlx5_cqe64 {
- u8 rsvd0[17];
+ u8 rsvd0[4];
+ u8 lro_tcppsh_abort_dupack;
+ u8 lro_min_ttl;
+ __be16 lro_tcp_win;
+ __be32 lro_ack_seq_num;
+ __be32 rss_hash_result;
+ u8 rss_hash_type;
u8 ml_path;
- u8 rsvd20[4];
+ u8 rsvd20[2];
+ __be16 check_sum;
__be16 slid;
__be32 flags_rqpn;
- u8 rsvd28[4];
- __be32 srqn;
+ u8 hds_ip_ext;
+ u8 l4_hdr_type_etc;
+ __be16 vlan_info;
+ __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
__be32 imm_inval_pkey;
u8 rsvd40[4];
__be32 byte_cnt;
@@ -571,6 +599,40 @@ struct mlx5_cqe64 {
u8 op_own;
};
+static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
+{
+ return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
+}
+
+static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
+{
+ return (cqe->l4_hdr_type_etc >> 4) & 0x7;
+}
+
+static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
+{
+ return !!(cqe->l4_hdr_type_etc & 0x1);
+}
+
+enum {
+ CQE_L4_HDR_TYPE_NONE = 0x0,
+ CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1,
+ CQE_L4_HDR_TYPE_UDP = 0x2,
+ CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3,
+ CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4,
+};
+
+enum {
+ CQE_RSS_HTYPE_IP = 0x3 << 6,
+ CQE_RSS_HTYPE_L4 = 0x3 << 2,
+};
+
+enum {
+ CQE_L2_OK = 1 << 0,
+ CQE_L3_OK = 1 << 1,
+ CQE_L4_OK = 1 << 2,
+};
+
struct mlx5_sig_err_cqe {
u8 rsvd0[16];
__be32 expected_trans_sig;
@@ -996,4 +1058,135 @@ struct mlx5_destroy_psv_out {
u8 rsvd[8];
};
+#define MLX5_CMD_OP_MAX 0x920
+
+enum {
+ VPORT_STATE_DOWN = 0x0,
+ VPORT_STATE_UP = 0x1,
+};
+
+enum {
+ MLX5_L3_PROT_TYPE_IPV4 = 0,
+ MLX5_L3_PROT_TYPE_IPV6 = 1,
+};
+
+enum {
+ MLX5_L4_PROT_TYPE_TCP = 0,
+ MLX5_L4_PROT_TYPE_UDP = 1,
+};
+
+enum {
+ MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0,
+ MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1,
+ MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2,
+ MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3,
+ MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4,
+};
+
+enum {
+ MLX5_MATCH_OUTER_HEADERS = 1 << 0,
+ MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
+ MLX5_MATCH_INNER_HEADERS = 1 << 2,
+
+};
+
+enum {
+ MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0,
+ MLX5_FLOW_TABLE_TYPE_ESWITCH = 4,
+};
+
+enum {
+ MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 1,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2,
+};
+
+enum {
+ MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
+ MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1,
+};
+
+/* MLX5 DEV CAPs */
+
+/* TODO: EAT.ME */
+enum mlx5_cap_mode {
+ HCA_CAP_OPMOD_GET_MAX = 0,
+ HCA_CAP_OPMOD_GET_CUR = 1,
+};
+
+enum mlx5_cap_type {
+ MLX5_CAP_GENERAL = 0,
+ MLX5_CAP_ETHERNET_OFFLOADS,
+ MLX5_CAP_ODP,
+ MLX5_CAP_ATOMIC,
+ MLX5_CAP_ROCE,
+ MLX5_CAP_IPOIB_OFFLOADS,
+ MLX5_CAP_EOIB_OFFLOADS,
+ MLX5_CAP_FLOW_TABLE,
+ /* NUM OF CAP Types */
+ MLX5_CAP_NUM
+};
+
+/* GET Dev Caps macros */
+#define MLX5_CAP_GEN(mdev, cap) \
+ MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
+
+#define MLX5_CAP_GEN_MAX(mdev, cap) \
+ MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
+
+#define MLX5_CAP_ETH(mdev, cap) \
+ MLX5_GET(per_protocol_networking_offload_caps,\
+ mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+
+#define MLX5_CAP_ETH_MAX(mdev, cap) \
+ MLX5_GET(per_protocol_networking_offload_caps,\
+ mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+
+#define MLX5_CAP_ROCE(mdev, cap) \
+ MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
+
+#define MLX5_CAP_ROCE_MAX(mdev, cap) \
+ MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
+
+#define MLX5_CAP_ATOMIC(mdev, cap) \
+ MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
+
+#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
+ MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
+
+#define MLX5_CAP_FLOWTABLE(mdev, cap) \
+ MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
+
+#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
+ MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
+
+#define MLX5_CAP_ODP(mdev, cap)\
+ MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
+
+enum {
+ MLX5_CMD_STAT_OK = 0x0,
+ MLX5_CMD_STAT_INT_ERR = 0x1,
+ MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
+ MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
+ MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
+ MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
+ MLX5_CMD_STAT_RES_BUSY = 0x6,
+ MLX5_CMD_STAT_LIM_ERR = 0x8,
+ MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
+ MLX5_CMD_STAT_IX_ERR = 0xa,
+ MLX5_CMD_STAT_NO_RES_ERR = 0xf,
+ MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
+ MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
+ MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
+ MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
+ MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
+};
+
+static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
+{
+ if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
+ return 0;
+ return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
+}
+
#endif /* MLX5_DEVICE_H */
diff --git a/include/linux/mlx5/doorbell.h b/include/linux/mlx5/doorbell.h
index 163a818..afc78a3 100644
--- a/include/linux/mlx5/doorbell.h
+++ b/include/linux/mlx5/doorbell.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 166d931..5722d88 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -44,7 +44,6 @@
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
-#include <linux/mlx5/mlx5_ifc.h>
enum {
MLX5_BOARD_ID_LEN = 64,
@@ -85,7 +84,7 @@ enum {
};
enum {
- MLX5_MAX_EQ_NAME = 32
+ MLX5_MAX_IRQ_NAME = 32
};
enum {
@@ -108,6 +107,7 @@ enum {
MLX5_REG_PUDE = 0x5009,
MLX5_REG_PMPE = 0x5010,
MLX5_REG_PELC = 0x500e,
+ MLX5_REG_PVLC = 0x500f,
MLX5_REG_PMLP = 0, /* TBD */
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
@@ -150,6 +150,11 @@ enum mlx5_dev_event {
MLX5_DEV_EVENT_CLIENT_REREG,
};
+enum mlx5_port_status {
+ MLX5_PORT_UP = 1 << 1,
+ MLX5_PORT_DOWN = 1 << 2,
+};
+
struct mlx5_uuar_info {
struct mlx5_uar *uars;
int num_uars;
@@ -232,6 +237,9 @@ struct mlx5_cmd_stats {
};
struct mlx5_cmd {
+ void *cmd_alloc_buf;
+ dma_addr_t alloc_dma;
+ int alloc_size;
void *cmd_buf;
dma_addr_t dma;
u16 cmdif_rev;
@@ -266,56 +274,7 @@ struct mlx5_cmd {
struct mlx5_port_caps {
int gid_table_len;
int pkey_table_len;
-};
-
-struct mlx5_general_caps {
- u8 log_max_eq;
- u8 log_max_cq;
- u8 log_max_qp;
- u8 log_max_mkey;
- u8 log_max_pd;
- u8 log_max_srq;
- u8 log_max_strq;
- u8 log_max_mrw_sz;
- u8 log_max_bsf_list_size;
- u8 log_max_klm_list_size;
- u32 max_cqes;
- int max_wqes;
- u32 max_eqes;
- u32 max_indirection;
- int max_sq_desc_sz;
- int max_rq_desc_sz;
- int max_dc_sq_desc_sz;
- u64 flags;
- u16 stat_rate_support;
- int log_max_msg;
- int num_ports;
- u8 log_max_ra_res_qp;
- u8 log_max_ra_req_qp;
- int max_srq_wqes;
- int bf_reg_size;
- int bf_regs_per_page;
- struct mlx5_port_caps port[MLX5_MAX_PORTS];
- u8 ext_port_cap[MLX5_MAX_PORTS];
- int max_vf;
- u32 reserved_lkey;
- u8 local_ca_ack_delay;
- u8 log_max_mcg;
- u32 max_qp_mcg;
- int min_page_sz;
- int pd_cap;
- u32 max_qp_counters;
- u32 pkey_table_size;
- u8 log_max_ra_req_dc;
- u8 log_max_ra_res_dc;
- u32 uar_sz;
- u8 min_log_pg_sz;
- u8 log_max_xrcd;
- u16 log_uar_page_sz;
-};
-
-struct mlx5_caps {
- struct mlx5_general_caps gen;
+ u8 ext_port_cap;
};
struct mlx5_cmd_mailbox {
@@ -331,8 +290,6 @@ struct mlx5_buf_list {
struct mlx5_buf {
struct mlx5_buf_list direct;
- struct mlx5_buf_list *page_list;
- int nbufs;
int npages;
int size;
u8 page_shift;
@@ -348,7 +305,6 @@ struct mlx5_eq {
u8 eqn;
int nent;
u64 mask;
- char name[MLX5_MAX_EQ_NAME];
struct list_head list;
int index;
struct mlx5_rsc_debug *dbg;
@@ -384,6 +340,8 @@ struct mlx5_core_mr {
enum mlx5_res_type {
MLX5_RES_QP,
+ MLX5_RES_SRQ,
+ MLX5_RES_XSRQ,
};
struct mlx5_core_rsc_common {
@@ -393,6 +351,7 @@ struct mlx5_core_rsc_common {
};
struct mlx5_core_srq {
+ struct mlx5_core_rsc_common common; /* must be first */
u32 srqn;
int max;
int max_gs;
@@ -407,11 +366,10 @@ struct mlx5_core_srq {
struct mlx5_eq_table {
void __iomem *update_ci;
void __iomem *update_arm_ci;
- struct list_head *comp_eq_head;
+ struct list_head comp_eqs_list;
struct mlx5_eq pages_eq;
struct mlx5_eq async_eq;
struct mlx5_eq cmd_eq;
- struct msix_entry *msix_arr;
int num_comp_vectors;
/* protect EQs list
*/
@@ -464,9 +422,16 @@ struct mlx5_mr_table {
struct radix_tree_root tree;
};
+struct mlx5_irq_info {
+ cpumask_var_t mask;
+ char name[MLX5_MAX_IRQ_NAME];
+};
+
struct mlx5_priv {
char name[MLX5_MAX_NAME_LEN];
struct mlx5_eq_table eq_table;
+ struct msix_entry *msix_arr;
+ struct mlx5_irq_info *irq_info;
struct mlx5_uuar_info uuari;
MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
@@ -517,7 +482,9 @@ struct mlx5_core_dev {
u8 rev_id;
char board_id[MLX5_BOARD_ID_LEN];
struct mlx5_cmd cmd;
- struct mlx5_caps caps;
+ struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
+ u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
void (*event) (struct mlx5_core_dev *dev,
@@ -526,6 +493,7 @@ struct mlx5_core_dev {
struct mlx5_priv priv;
struct mlx5_profile *profile;
atomic_t num_qps;
+ u32 issi;
};
struct mlx5_db {
@@ -546,6 +514,11 @@ enum {
MLX5_COMP_EQ_SIZE = 1024,
};
+enum {
+ MLX5_PTYS_IB = 1 << 0,
+ MLX5_PTYS_EN = 1 << 2,
+};
+
struct mlx5_db_pgdir {
struct list_head list;
DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
@@ -581,13 +554,44 @@ struct mlx5_pas {
u8 log_sz;
};
+enum port_state_policy {
+ MLX5_AAA_000
+};
+
+enum phy_port_state {
+ MLX5_AAA_111
+};
+
+struct mlx5_hca_vport_context {
+ u32 field_select;
+ bool sm_virt_aware;
+ bool has_smi;
+ bool has_raw;
+ enum port_state_policy policy;
+ enum phy_port_state phys_state;
+ enum ib_port_state vport_state;
+ u8 port_physical_state;
+ u64 sys_image_guid;
+ u64 port_guid;
+ u64 node_guid;
+ u32 cap_mask1;
+ u32 cap_mask1_perm;
+ u32 cap_mask2;
+ u32 cap_mask2_perm;
+ u16 lid;
+ u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
+ u8 lmc;
+ u8 subnet_timeout;
+ u16 sm_lid;
+ u8 sm_sl;
+ u16 qkey_violation_counter;
+ u16 pkey_violation_counter;
+ bool grh_required;
+};
+
static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
{
- if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1))
return buf->direct.buf + offset;
- else
- return buf->page_list[offset >> PAGE_SHIFT].buf +
- (offset & (PAGE_SIZE - 1));
}
extern struct workqueue_struct *mlx5_core_wq;
@@ -651,8 +655,8 @@ void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
int mlx5_cmd_status_to_err_v2(void *ptr);
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
- u16 opmod);
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
+ enum mlx5_cap_mode cap_mode);
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
@@ -662,19 +666,21 @@ int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
+void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
void mlx5_health_cleanup(void);
void __init mlx5_health_init(void);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
- struct mlx5_buf *buf);
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
gfp_t flags, int npages);
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *head);
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in, int inlen);
+ struct mlx5_create_srq_mbox_in *in, int inlen,
+ int is_xrc);
int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out);
@@ -693,7 +699,7 @@ int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
u32 *mkey);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
-int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
+int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port);
void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
@@ -722,6 +728,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_start_eqs(struct mlx5_core_dev *dev);
int mlx5_stop_eqs(struct mlx5_core_dev *dev);
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
@@ -730,7 +737,32 @@ void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_num, int arg, int write);
+
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
+int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
+ int ptys_size, int proto_mask, u8 local_port);
+int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
+ u32 *proto_cap, int proto_mask);
+int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
+ u32 *proto_admin, int proto_mask);
+int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
+ u8 *link_width_oper, u8 local_port);
+int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
+ u8 *proto_oper, int proto_mask,
+ u8 local_port);
+int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
+ int proto_mask);
+int mlx5_set_port_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status);
+int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status);
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+ u8 port);
+
+int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
+ u8 *vl_hw_cap, u8 local_port);
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
@@ -777,16 +809,25 @@ enum {
MAX_MR_CACHE_ENTRIES = 16,
};
+enum {
+ MLX5_INTERFACE_PROTOCOL_IB = 0,
+ MLX5_INTERFACE_PROTOCOL_ETH = 1,
+};
+
struct mlx5_interface {
void * (*add)(struct mlx5_core_dev *dev);
void (*remove)(struct mlx5_core_dev *dev, void *context);
void (*event)(struct mlx5_core_dev *dev, void *context,
enum mlx5_dev_event event, unsigned long param);
+ void * (*get_dev)(void *context);
+ int protocol;
struct list_head list;
};
+void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
int mlx5_register_interface(struct mlx5_interface *intf);
void mlx5_unregister_interface(struct mlx5_interface *intf);
+int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
struct mlx5_profile {
u64 mask;
@@ -797,4 +838,14 @@ struct mlx5_profile {
} mr_cache[MAX_MR_CACHE_ENTRIES];
};
+static inline int mlx5_get_gid_table_len(u16 param)
+{
+ if (param > 4) {
+ pr_warn("gid table length is zero\n");
+ return 0;
+ }
+
+ return 8 * (1 << param);
+}
+
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/flow_table.h b/include/linux/mlx5/flow_table.h
new file mode 100644
index 0000000..5f922c6
--- /dev/null
+++ b/include/linux/mlx5/flow_table.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_FLOW_TABLE_H
+#define MLX5_FLOW_TABLE_H
+
+#include <linux/mlx5/driver.h>
+
+struct mlx5_flow_table_group {
+ u8 log_sz;
+ u8 match_criteria_enable;
+ u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
+};
+
+void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
+ u16 num_groups,
+ struct mlx5_flow_table_group *group);
+void mlx5_destroy_flow_table(void *flow_table);
+int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
+ void *match_criteria, void *flow_context,
+ u32 *flow_index);
+void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
+u32 mlx5_get_flow_table_id(void *flow_table);
+
+#endif /* MLX5_FLOW_TABLE_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 5f48b8f..6d2f6fe 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -28,12 +28,45 @@
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
- */
-
+*/
#ifndef MLX5_IFC_H
#define MLX5_IFC_H
enum {
+ MLX5_EVENT_TYPE_CODING_COMPLETION_EVENTS = 0x0,
+ MLX5_EVENT_TYPE_CODING_PATH_MIGRATED_SUCCEEDED = 0x1,
+ MLX5_EVENT_TYPE_CODING_COMMUNICATION_ESTABLISHED = 0x2,
+ MLX5_EVENT_TYPE_CODING_SEND_QUEUE_DRAINED = 0x3,
+ MLX5_EVENT_TYPE_CODING_LAST_WQE_REACHED = 0x13,
+ MLX5_EVENT_TYPE_CODING_SRQ_LIMIT = 0x14,
+ MLX5_EVENT_TYPE_CODING_DCT_ALL_CONNECTIONS_CLOSED = 0x1c,
+ MLX5_EVENT_TYPE_CODING_DCT_ACCESS_KEY_VIOLATION = 0x1d,
+ MLX5_EVENT_TYPE_CODING_CQ_ERROR = 0x4,
+ MLX5_EVENT_TYPE_CODING_LOCAL_WQ_CATASTROPHIC_ERROR = 0x5,
+ MLX5_EVENT_TYPE_CODING_PATH_MIGRATION_FAILED = 0x7,
+ MLX5_EVENT_TYPE_CODING_PAGE_FAULT_EVENT = 0xc,
+ MLX5_EVENT_TYPE_CODING_INVALID_REQUEST_LOCAL_WQ_ERROR = 0x10,
+ MLX5_EVENT_TYPE_CODING_LOCAL_ACCESS_VIOLATION_WQ_ERROR = 0x11,
+ MLX5_EVENT_TYPE_CODING_LOCAL_SRQ_CATASTROPHIC_ERROR = 0x12,
+ MLX5_EVENT_TYPE_CODING_INTERNAL_ERROR = 0x8,
+ MLX5_EVENT_TYPE_CODING_PORT_STATE_CHANGE = 0x9,
+ MLX5_EVENT_TYPE_CODING_GPIO_EVENT = 0x15,
+ MLX5_EVENT_TYPE_CODING_REMOTE_CONFIGURATION_PROTOCOL_EVENT = 0x19,
+ MLX5_EVENT_TYPE_CODING_DOORBELL_BLUEFLAME_CONGESTION_EVENT = 0x1a,
+ MLX5_EVENT_TYPE_CODING_STALL_VL_EVENT = 0x1b,
+ MLX5_EVENT_TYPE_CODING_DROPPED_PACKET_LOGGED_EVENT = 0x1f,
+ MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION = 0xa,
+ MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb
+};
+
+enum {
+ MLX5_MODIFY_TIR_BITMASK_LRO = 0x0,
+ MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE = 0x1,
+ MLX5_MODIFY_TIR_BITMASK_HASH = 0x2,
+ MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN = 0x3
+};
+
+enum {
MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
MLX5_CMD_OP_INIT_HCA = 0x102,
@@ -43,6 +76,8 @@ enum {
MLX5_CMD_OP_QUERY_PAGES = 0x107,
MLX5_CMD_OP_MANAGE_PAGES = 0x108,
MLX5_CMD_OP_SET_HCA_CAP = 0x109,
+ MLX5_CMD_OP_QUERY_ISSI = 0x10a,
+ MLX5_CMD_OP_SET_ISSI = 0x10b,
MLX5_CMD_OP_CREATE_MKEY = 0x200,
MLX5_CMD_OP_QUERY_MKEY = 0x201,
MLX5_CMD_OP_DESTROY_MKEY = 0x202,
@@ -66,6 +101,7 @@ enum {
MLX5_CMD_OP_2ERR_QP = 0x507,
MLX5_CMD_OP_2RST_QP = 0x50a,
MLX5_CMD_OP_QUERY_QP = 0x50b,
+ MLX5_CMD_OP_SQD_RTS_QP = 0x50c,
MLX5_CMD_OP_INIT2INIT_QP = 0x50e,
MLX5_CMD_OP_CREATE_PSV = 0x600,
MLX5_CMD_OP_DESTROY_PSV = 0x601,
@@ -73,7 +109,10 @@ enum {
MLX5_CMD_OP_DESTROY_SRQ = 0x701,
MLX5_CMD_OP_QUERY_SRQ = 0x702,
MLX5_CMD_OP_ARM_RQ = 0x703,
- MLX5_CMD_OP_RESIZE_SRQ = 0x704,
+ MLX5_CMD_OP_CREATE_XRC_SRQ = 0x705,
+ MLX5_CMD_OP_DESTROY_XRC_SRQ = 0x706,
+ MLX5_CMD_OP_QUERY_XRC_SRQ = 0x707,
+ MLX5_CMD_OP_ARM_XRC_SRQ = 0x708,
MLX5_CMD_OP_CREATE_DCT = 0x710,
MLX5_CMD_OP_DESTROY_DCT = 0x711,
MLX5_CMD_OP_DRAIN_DCT = 0x712,
@@ -85,8 +124,12 @@ enum {
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754,
MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755,
- MLX5_CMD_OP_QUERY_RCOE_ADDRESS = 0x760,
+ MLX5_CMD_OP_QUERY_ROCE_ADDRESS = 0x760,
MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761,
+ MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT = 0x762,
+ MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763,
+ MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764,
+ MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765,
MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770,
MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
@@ -98,7 +141,7 @@ enum {
MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804,
MLX5_CMD_OP_ACCESS_REG = 0x805,
MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
- MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
+ MLX5_CMD_OP_DETTACH_FROM_MCG = 0x807,
MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a,
MLX5_CMD_OP_MAD_IFC = 0x50d,
MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b,
@@ -106,23 +149,22 @@ enum {
MLX5_CMD_OP_NOP = 0x80d,
MLX5_CMD_OP_ALLOC_XRCD = 0x80e,
MLX5_CMD_OP_DEALLOC_XRCD = 0x80f,
- MLX5_CMD_OP_SET_BURST_SIZE = 0x812,
- MLX5_CMD_OP_QUERY_BURST_SZIE = 0x813,
- MLX5_CMD_OP_ACTIVATE_TRACER = 0x814,
- MLX5_CMD_OP_DEACTIVATE_TRACER = 0x815,
- MLX5_CMD_OP_CREATE_SNIFFER_RULE = 0x820,
- MLX5_CMD_OP_DESTROY_SNIFFER_RULE = 0x821,
- MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x822,
- MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x823,
- MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x824,
+ MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816,
+ MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN = 0x817,
+ MLX5_CMD_OP_QUERY_CONG_STATUS = 0x822,
+ MLX5_CMD_OP_MODIFY_CONG_STATUS = 0x823,
+ MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x824,
+ MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x825,
+ MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x826,
+ MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT = 0x827,
+ MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT = 0x828,
+ MLX5_CMD_OP_SET_L2_TABLE_ENTRY = 0x829,
+ MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY = 0x82a,
+ MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b,
MLX5_CMD_OP_CREATE_TIR = 0x900,
MLX5_CMD_OP_MODIFY_TIR = 0x901,
MLX5_CMD_OP_DESTROY_TIR = 0x902,
MLX5_CMD_OP_QUERY_TIR = 0x903,
- MLX5_CMD_OP_CREATE_TIS = 0x912,
- MLX5_CMD_OP_MODIFY_TIS = 0x913,
- MLX5_CMD_OP_DESTROY_TIS = 0x914,
- MLX5_CMD_OP_QUERY_TIS = 0x915,
MLX5_CMD_OP_CREATE_SQ = 0x904,
MLX5_CMD_OP_MODIFY_SQ = 0x905,
MLX5_CMD_OP_DESTROY_SQ = 0x906,
@@ -135,9 +177,430 @@ enum {
MLX5_CMD_OP_MODIFY_RMP = 0x90d,
MLX5_CMD_OP_DESTROY_RMP = 0x90e,
MLX5_CMD_OP_QUERY_RMP = 0x90f,
- MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x910,
- MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x911,
- MLX5_CMD_OP_MAX = 0x911
+ MLX5_CMD_OP_CREATE_TIS = 0x912,
+ MLX5_CMD_OP_MODIFY_TIS = 0x913,
+ MLX5_CMD_OP_DESTROY_TIS = 0x914,
+ MLX5_CMD_OP_QUERY_TIS = 0x915,
+ MLX5_CMD_OP_CREATE_RQT = 0x916,
+ MLX5_CMD_OP_MODIFY_RQT = 0x917,
+ MLX5_CMD_OP_DESTROY_RQT = 0x918,
+ MLX5_CMD_OP_QUERY_RQT = 0x919,
+ MLX5_CMD_OP_CREATE_FLOW_TABLE = 0x930,
+ MLX5_CMD_OP_DESTROY_FLOW_TABLE = 0x931,
+ MLX5_CMD_OP_QUERY_FLOW_TABLE = 0x932,
+ MLX5_CMD_OP_CREATE_FLOW_GROUP = 0x933,
+ MLX5_CMD_OP_DESTROY_FLOW_GROUP = 0x934,
+ MLX5_CMD_OP_QUERY_FLOW_GROUP = 0x935,
+ MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936,
+ MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937,
+ MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938
+};
+
+struct mlx5_ifc_flow_table_fields_supported_bits {
+ u8 outer_dmac[0x1];
+ u8 outer_smac[0x1];
+ u8 outer_ether_type[0x1];
+ u8 reserved_0[0x1];
+ u8 outer_first_prio[0x1];
+ u8 outer_first_cfi[0x1];
+ u8 outer_first_vid[0x1];
+ u8 reserved_1[0x1];
+ u8 outer_second_prio[0x1];
+ u8 outer_second_cfi[0x1];
+ u8 outer_second_vid[0x1];
+ u8 reserved_2[0x1];
+ u8 outer_sip[0x1];
+ u8 outer_dip[0x1];
+ u8 outer_frag[0x1];
+ u8 outer_ip_protocol[0x1];
+ u8 outer_ip_ecn[0x1];
+ u8 outer_ip_dscp[0x1];
+ u8 outer_udp_sport[0x1];
+ u8 outer_udp_dport[0x1];
+ u8 outer_tcp_sport[0x1];
+ u8 outer_tcp_dport[0x1];
+ u8 outer_tcp_flags[0x1];
+ u8 outer_gre_protocol[0x1];
+ u8 outer_gre_key[0x1];
+ u8 outer_vxlan_vni[0x1];
+ u8 reserved_3[0x5];
+ u8 source_eswitch_port[0x1];
+
+ u8 inner_dmac[0x1];
+ u8 inner_smac[0x1];
+ u8 inner_ether_type[0x1];
+ u8 reserved_4[0x1];
+ u8 inner_first_prio[0x1];
+ u8 inner_first_cfi[0x1];
+ u8 inner_first_vid[0x1];
+ u8 reserved_5[0x1];
+ u8 inner_second_prio[0x1];
+ u8 inner_second_cfi[0x1];
+ u8 inner_second_vid[0x1];
+ u8 reserved_6[0x1];
+ u8 inner_sip[0x1];
+ u8 inner_dip[0x1];
+ u8 inner_frag[0x1];
+ u8 inner_ip_protocol[0x1];
+ u8 inner_ip_ecn[0x1];
+ u8 inner_ip_dscp[0x1];
+ u8 inner_udp_sport[0x1];
+ u8 inner_udp_dport[0x1];
+ u8 inner_tcp_sport[0x1];
+ u8 inner_tcp_dport[0x1];
+ u8 inner_tcp_flags[0x1];
+ u8 reserved_7[0x9];
+
+ u8 reserved_8[0x40];
+};
+
+struct mlx5_ifc_flow_table_prop_layout_bits {
+ u8 ft_support[0x1];
+ u8 reserved_0[0x1f];
+
+ u8 reserved_1[0x2];
+ u8 log_max_ft_size[0x6];
+ u8 reserved_2[0x10];
+ u8 max_ft_level[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 reserved_4[0x18];
+ u8 log_max_ft_num[0x8];
+
+ u8 reserved_5[0x18];
+ u8 log_max_destination[0x8];
+
+ u8 reserved_6[0x18];
+ u8 log_max_flow[0x8];
+
+ u8 reserved_7[0x40];
+
+ struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
+
+ struct mlx5_ifc_flow_table_fields_supported_bits ft_field_bitmask_support;
+};
+
+struct mlx5_ifc_odp_per_transport_service_cap_bits {
+ u8 send[0x1];
+ u8 receive[0x1];
+ u8 write[0x1];
+ u8 read[0x1];
+ u8 reserved_0[0x1];
+ u8 srq_receive[0x1];
+ u8 reserved_1[0x1a];
+};
+
+struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
+ u8 smac_47_16[0x20];
+
+ u8 smac_15_0[0x10];
+ u8 ethertype[0x10];
+
+ u8 dmac_47_16[0x20];
+
+ u8 dmac_15_0[0x10];
+ u8 first_prio[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vid[0xc];
+
+ u8 ip_protocol[0x8];
+ u8 ip_dscp[0x6];
+ u8 ip_ecn[0x2];
+ u8 vlan_tag[0x1];
+ u8 reserved_0[0x1];
+ u8 frag[0x1];
+ u8 reserved_1[0x4];
+ u8 tcp_flags[0x9];
+
+ u8 tcp_sport[0x10];
+ u8 tcp_dport[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 udp_sport[0x10];
+ u8 udp_dport[0x10];
+
+ u8 src_ip[4][0x20];
+
+ u8 dst_ip[4][0x20];
+};
+
+struct mlx5_ifc_fte_match_set_misc_bits {
+ u8 reserved_0[0x20];
+
+ u8 reserved_1[0x10];
+ u8 source_port[0x10];
+
+ u8 outer_second_prio[0x3];
+ u8 outer_second_cfi[0x1];
+ u8 outer_second_vid[0xc];
+ u8 inner_second_prio[0x3];
+ u8 inner_second_cfi[0x1];
+ u8 inner_second_vid[0xc];
+
+ u8 outer_second_vlan_tag[0x1];
+ u8 inner_second_vlan_tag[0x1];
+ u8 reserved_2[0xe];
+ u8 gre_protocol[0x10];
+
+ u8 gre_key_h[0x18];
+ u8 gre_key_l[0x8];
+
+ u8 vxlan_vni[0x18];
+ u8 reserved_3[0x8];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0xc];
+ u8 outer_ipv6_flow_label[0x14];
+
+ u8 reserved_6[0xc];
+ u8 inner_ipv6_flow_label[0x14];
+
+ u8 reserved_7[0xe0];
+};
+
+struct mlx5_ifc_cmd_pas_bits {
+ u8 pa_h[0x20];
+
+ u8 pa_l[0x14];
+ u8 reserved_0[0xc];
+};
+
+struct mlx5_ifc_uint64_bits {
+ u8 hi[0x20];
+
+ u8 lo[0x20];
+};
+
+enum {
+ MLX5_ADS_STAT_RATE_NO_LIMIT = 0x0,
+ MLX5_ADS_STAT_RATE_2_5GBPS = 0x7,
+ MLX5_ADS_STAT_RATE_10GBPS = 0x8,
+ MLX5_ADS_STAT_RATE_30GBPS = 0x9,
+ MLX5_ADS_STAT_RATE_5GBPS = 0xa,
+ MLX5_ADS_STAT_RATE_20GBPS = 0xb,
+ MLX5_ADS_STAT_RATE_40GBPS = 0xc,
+ MLX5_ADS_STAT_RATE_60GBPS = 0xd,
+ MLX5_ADS_STAT_RATE_80GBPS = 0xe,
+ MLX5_ADS_STAT_RATE_120GBPS = 0xf,
+};
+
+struct mlx5_ifc_ads_bits {
+ u8 fl[0x1];
+ u8 free_ar[0x1];
+ u8 reserved_0[0xe];
+ u8 pkey_index[0x10];
+
+ u8 reserved_1[0x8];
+ u8 grh[0x1];
+ u8 mlid[0x7];
+ u8 rlid[0x10];
+
+ u8 ack_timeout[0x5];
+ u8 reserved_2[0x3];
+ u8 src_addr_index[0x8];
+ u8 reserved_3[0x4];
+ u8 stat_rate[0x4];
+ u8 hop_limit[0x8];
+
+ u8 reserved_4[0x4];
+ u8 tclass[0x8];
+ u8 flow_label[0x14];
+
+ u8 rgid_rip[16][0x8];
+
+ u8 reserved_5[0x4];
+ u8 f_dscp[0x1];
+ u8 f_ecn[0x1];
+ u8 reserved_6[0x1];
+ u8 f_eth_prio[0x1];
+ u8 ecn[0x2];
+ u8 dscp[0x6];
+ u8 udp_sport[0x10];
+
+ u8 dei_cfi[0x1];
+ u8 eth_prio[0x3];
+ u8 sl[0x4];
+ u8 port[0x8];
+ u8 rmac_47_32[0x10];
+
+ u8 rmac_31_0[0x20];
+};
+
+struct mlx5_ifc_flow_table_nic_cap_bits {
+ u8 reserved_0[0x200];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
+
+ u8 reserved_1[0x200];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer;
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit;
+
+ u8 reserved_2[0x200];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
+
+ u8 reserved_3[0x7200];
+};
+
+struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
+ u8 csum_cap[0x1];
+ u8 vlan_cap[0x1];
+ u8 lro_cap[0x1];
+ u8 lro_psh_flag[0x1];
+ u8 lro_time_stamp[0x1];
+ u8 reserved_0[0x6];
+ u8 max_lso_cap[0x5];
+ u8 reserved_1[0x4];
+ u8 rss_ind_tbl_cap[0x4];
+ u8 reserved_2[0x3];
+ u8 tunnel_lso_const_out_ip_id[0x1];
+ u8 reserved_3[0x2];
+ u8 tunnel_statless_gre[0x1];
+ u8 tunnel_stateless_vxlan[0x1];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0x10];
+ u8 lro_min_mss_size[0x10];
+
+ u8 reserved_6[0x120];
+
+ u8 lro_timer_supported_periods[4][0x20];
+
+ u8 reserved_7[0x600];
+};
+
+struct mlx5_ifc_roce_cap_bits {
+ u8 roce_apm[0x1];
+ u8 reserved_0[0x1f];
+
+ u8 reserved_1[0x60];
+
+ u8 reserved_2[0xc];
+ u8 l3_type[0x4];
+ u8 reserved_3[0x8];
+ u8 roce_version[0x8];
+
+ u8 reserved_4[0x10];
+ u8 r_roce_dest_udp_port[0x10];
+
+ u8 r_roce_max_src_udp_port[0x10];
+ u8 r_roce_min_src_udp_port[0x10];
+
+ u8 reserved_5[0x10];
+ u8 roce_address_table_size[0x10];
+
+ u8 reserved_6[0x700];
+};
+
+enum {
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_4_BYTES = 0x4,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_8_BYTES = 0x8,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_16_BYTES = 0x10,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_32_BYTES = 0x20,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_64_BYTES = 0x40,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_128_BYTES = 0x80,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_256_BYTES = 0x100,
+};
+
+enum {
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_1_BYTE = 0x1,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_2_BYTES = 0x2,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_4_BYTES = 0x4,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_8_BYTES = 0x8,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_16_BYTES = 0x10,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_32_BYTES = 0x20,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_64_BYTES = 0x40,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_128_BYTES = 0x80,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_256_BYTES = 0x100,
+};
+
+struct mlx5_ifc_atomic_caps_bits {
+ u8 reserved_0[0x40];
+
+ u8 atomic_req_endianness[0x1];
+ u8 reserved_1[0x1f];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x10];
+ u8 atomic_operations[0x10];
+
+ u8 reserved_4[0x10];
+ u8 atomic_size_qp[0x10];
+
+ u8 reserved_5[0x10];
+ u8 atomic_size_dc[0x10];
+
+ u8 reserved_6[0x720];
+};
+
+struct mlx5_ifc_odp_cap_bits {
+ u8 reserved_0[0x40];
+
+ u8 sig[0x1];
+ u8 reserved_1[0x1f];
+
+ u8 reserved_2[0x20];
+
+ struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps;
+
+ struct mlx5_ifc_odp_per_transport_service_cap_bits uc_odp_caps;
+
+ struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps;
+
+ u8 reserved_3[0x720];
+};
+
+enum {
+ MLX5_WQ_TYPE_LINKED_LIST = 0x0,
+ MLX5_WQ_TYPE_CYCLIC = 0x1,
+ MLX5_WQ_TYPE_STRQ = 0x2,
+};
+
+enum {
+ MLX5_WQ_END_PAD_MODE_NONE = 0x0,
+ MLX5_WQ_END_PAD_MODE_ALIGN = 0x1,
+};
+
+enum {
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_8_GID_ENTRIES = 0x0,
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_16_GID_ENTRIES = 0x1,
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_32_GID_ENTRIES = 0x2,
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_64_GID_ENTRIES = 0x3,
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_128_GID_ENTRIES = 0x4,
+};
+
+enum {
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_128_ENTRIES = 0x0,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_256_ENTRIES = 0x1,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_512_ENTRIES = 0x2,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_1K_ENTRIES = 0x3,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_2K_ENTRIES = 0x4,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_4K_ENTRIES = 0x5,
+};
+
+enum {
+ MLX5_CMD_HCA_CAP_PORT_TYPE_IB = 0x0,
+ MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET = 0x1,
+};
+
+enum {
+ MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_DISABLED = 0x0,
+ MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_INITIAL_STATE = 0x1,
+ MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_ENABLED = 0x3,
+};
+
+enum {
+ MLX5_CAP_PORT_TYPE_IB = 0x0,
+ MLX5_CAP_PORT_TYPE_ETH = 0x1,
};
struct mlx5_ifc_cmd_hca_cap_bits {
@@ -148,9 +611,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_1[0xb];
u8 log_max_qp[0x5];
- u8 log_max_strq_sz[0x8];
- u8 reserved_2[0x3];
- u8 log_max_srqs[0x5];
+ u8 reserved_2[0xb];
+ u8 log_max_srq[0x5];
u8 reserved_3[0x10];
u8 reserved_4[0x8];
@@ -185,123 +647,2112 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 pad_cap[0x1];
u8 cc_query_allowed[0x1];
u8 cc_modify_allowed[0x1];
- u8 reserved_15[0x1d];
+ u8 reserved_15[0xd];
+ u8 gid_table_size[0x10];
- u8 reserved_16[0x6];
+ u8 out_of_seq_cnt[0x1];
+ u8 vport_counters[0x1];
+ u8 reserved_16[0x4];
u8 max_qp_cnt[0xa];
u8 pkey_table_size[0x10];
- u8 eswitch_owner[0x1];
- u8 reserved_17[0xa];
+ u8 vport_group_manager[0x1];
+ u8 vhca_group_manager[0x1];
+ u8 ib_virt[0x1];
+ u8 eth_virt[0x1];
+ u8 reserved_17[0x1];
+ u8 ets[0x1];
+ u8 nic_flow_table[0x1];
+ u8 reserved_18[0x4];
u8 local_ca_ack_delay[0x5];
- u8 reserved_18[0x8];
+ u8 reserved_19[0x6];
+ u8 port_type[0x2];
u8 num_ports[0x8];
- u8 reserved_19[0x3];
+ u8 reserved_20[0x3];
u8 log_max_msg[0x5];
- u8 reserved_20[0x18];
+ u8 reserved_21[0x18];
u8 stat_rate_support[0x10];
- u8 reserved_21[0x10];
+ u8 reserved_22[0xc];
+ u8 cqe_version[0x4];
- u8 reserved_22[0x10];
+ u8 compact_address_vector[0x1];
+ u8 reserved_23[0xe];
+ u8 drain_sigerr[0x1];
u8 cmdif_checksum[0x2];
u8 sigerr_cqe[0x1];
- u8 reserved_23[0x1];
+ u8 reserved_24[0x1];
u8 wq_signature[0x1];
u8 sctr_data_cqe[0x1];
- u8 reserved_24[0x1];
+ u8 reserved_25[0x1];
u8 sho[0x1];
u8 tph[0x1];
u8 rf[0x1];
- u8 dc[0x1];
- u8 reserved_25[0x2];
+ u8 dct[0x1];
+ u8 reserved_26[0x1];
+ u8 eth_net_offloads[0x1];
u8 roce[0x1];
u8 atomic[0x1];
- u8 rsz_srq[0x1];
+ u8 reserved_27[0x1];
u8 cq_oi[0x1];
u8 cq_resize[0x1];
u8 cq_moderation[0x1];
- u8 sniffer_rule_flow[0x1];
- u8 sniffer_rule_vport[0x1];
- u8 sniffer_rule_phy[0x1];
- u8 reserved_26[0x1];
+ u8 reserved_28[0x3];
+ u8 cq_eq_remap[0x1];
u8 pg[0x1];
u8 block_lb_mc[0x1];
- u8 reserved_27[0x3];
+ u8 reserved_29[0x1];
+ u8 scqe_break_moderation[0x1];
+ u8 reserved_30[0x1];
u8 cd[0x1];
- u8 reserved_28[0x1];
+ u8 reserved_31[0x1];
u8 apm[0x1];
- u8 reserved_29[0x7];
+ u8 reserved_32[0x7];
u8 qkv[0x1];
u8 pkv[0x1];
- u8 reserved_30[0x4];
+ u8 reserved_33[0x4];
u8 xrc[0x1];
u8 ud[0x1];
u8 uc[0x1];
u8 rc[0x1];
- u8 reserved_31[0xa];
+ u8 reserved_34[0xa];
u8 uar_sz[0x6];
- u8 reserved_32[0x8];
+ u8 reserved_35[0x8];
u8 log_pg_sz[0x8];
u8 bf[0x1];
- u8 reserved_33[0xa];
+ u8 reserved_36[0x1];
+ u8 pad_tx_eth_packet[0x1];
+ u8 reserved_37[0x8];
u8 log_bf_reg_size[0x5];
- u8 reserved_34[0x10];
+ u8 reserved_38[0x10];
- u8 reserved_35[0x10];
+ u8 reserved_39[0x10];
u8 max_wqe_sz_sq[0x10];
- u8 reserved_36[0x10];
+ u8 reserved_40[0x10];
u8 max_wqe_sz_rq[0x10];
- u8 reserved_37[0x10];
+ u8 reserved_41[0x10];
u8 max_wqe_sz_sq_dc[0x10];
- u8 reserved_38[0x7];
+ u8 reserved_42[0x7];
u8 max_qp_mcg[0x19];
- u8 reserved_39[0x18];
+ u8 reserved_43[0x18];
u8 log_max_mcg[0x8];
- u8 reserved_40[0xb];
+ u8 reserved_44[0x3];
+ u8 log_max_transport_domain[0x5];
+ u8 reserved_45[0x3];
u8 log_max_pd[0x5];
- u8 reserved_41[0xb];
+ u8 reserved_46[0xb];
u8 log_max_xrcd[0x5];
- u8 reserved_42[0x20];
+ u8 reserved_47[0x20];
- u8 reserved_43[0x3];
+ u8 reserved_48[0x3];
u8 log_max_rq[0x5];
- u8 reserved_44[0x3];
+ u8 reserved_49[0x3];
u8 log_max_sq[0x5];
- u8 reserved_45[0x3];
+ u8 reserved_50[0x3];
u8 log_max_tir[0x5];
- u8 reserved_46[0x3];
+ u8 reserved_51[0x3];
u8 log_max_tis[0x5];
- u8 reserved_47[0x13];
- u8 log_max_rq_per_tir[0x5];
- u8 reserved_48[0x3];
+ u8 basic_cyclic_rcv_wqe[0x1];
+ u8 reserved_52[0x2];
+ u8 log_max_rmp[0x5];
+ u8 reserved_53[0x3];
+ u8 log_max_rqt[0x5];
+ u8 reserved_54[0x3];
+ u8 log_max_rqt_size[0x5];
+ u8 reserved_55[0x3];
u8 log_max_tis_per_sq[0x5];
- u8 reserved_49[0xe0];
+ u8 reserved_56[0x3];
+ u8 log_max_stride_sz_rq[0x5];
+ u8 reserved_57[0x3];
+ u8 log_min_stride_sz_rq[0x5];
+ u8 reserved_58[0x3];
+ u8 log_max_stride_sz_sq[0x5];
+ u8 reserved_59[0x3];
+ u8 log_min_stride_sz_sq[0x5];
- u8 reserved_50[0x10];
+ u8 reserved_60[0x1b];
+ u8 log_max_wq_sz[0x5];
+
+ u8 reserved_61[0xa0];
+
+ u8 reserved_62[0x3];
+ u8 log_max_l2_table[0x5];
+ u8 reserved_63[0x8];
u8 log_uar_page_sz[0x10];
- u8 reserved_51[0x100];
+ u8 reserved_64[0x100];
- u8 reserved_52[0x1f];
+ u8 reserved_65[0x1f];
u8 cqe_zip[0x1];
u8 cqe_zip_timeout[0x10];
u8 cqe_zip_max_num[0x10];
- u8 reserved_53[0x220];
+ u8 reserved_66[0x220];
+};
+
+enum {
+ MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_FLOW_TABLE_ = 0x1,
+ MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_TIR = 0x2,
+};
+
+struct mlx5_ifc_dest_format_struct_bits {
+ u8 destination_type[0x8];
+ u8 destination_id[0x18];
+
+ u8 reserved_0[0x20];
+};
+
+struct mlx5_ifc_fte_match_param_bits {
+ struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
+
+ struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
+
+ struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
+
+ u8 reserved_0[0xa00];
+};
+
+enum {
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP = 0x0,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP = 0x1,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT = 0x2,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT = 0x3,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI = 0x4,
+};
+
+struct mlx5_ifc_rx_hash_field_select_bits {
+ u8 l3_prot_type[0x1];
+ u8 l4_prot_type[0x1];
+ u8 selected_fields[0x1e];
+};
+
+enum {
+ MLX5_WQ_WQ_TYPE_WQ_LINKED_LIST = 0x0,
+ MLX5_WQ_WQ_TYPE_WQ_CYCLIC = 0x1,
+};
+
+enum {
+ MLX5_WQ_END_PADDING_MODE_END_PAD_NONE = 0x0,
+ MLX5_WQ_END_PADDING_MODE_END_PAD_ALIGN = 0x1,
+};
+
+struct mlx5_ifc_wq_bits {
+ u8 wq_type[0x4];
+ u8 wq_signature[0x1];
+ u8 end_padding_mode[0x2];
+ u8 cd_slave[0x1];
+ u8 reserved_0[0x18];
+
+ u8 hds_skip_first_sge[0x1];
+ u8 log2_hds_buf_size[0x3];
+ u8 reserved_1[0x7];
+ u8 page_offset[0x5];
+ u8 lwm[0x10];
+
+ u8 reserved_2[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_3[0x8];
+ u8 uar_page[0x18];
+
+ u8 dbr_addr[0x40];
+
+ u8 hw_counter[0x20];
+
+ u8 sw_counter[0x20];
+
+ u8 reserved_4[0xc];
+ u8 log_wq_stride[0x4];
+ u8 reserved_5[0x3];
+ u8 log_wq_pg_sz[0x5];
+ u8 reserved_6[0x3];
+ u8 log_wq_sz[0x5];
+
+ u8 reserved_7[0x4e0];
+
+ struct mlx5_ifc_cmd_pas_bits pas[0];
+};
+
+struct mlx5_ifc_rq_num_bits {
+ u8 reserved_0[0x8];
+ u8 rq_num[0x18];
+};
+
+struct mlx5_ifc_mac_address_layout_bits {
+ u8 reserved_0[0x10];
+ u8 mac_addr_47_32[0x10];
+
+ u8 mac_addr_31_0[0x20];
+};
+
+struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
+ u8 reserved_0[0xa0];
+
+ u8 min_time_between_cnps[0x20];
+
+ u8 reserved_1[0x12];
+ u8 cnp_dscp[0x6];
+ u8 reserved_2[0x5];
+ u8 cnp_802p_prio[0x3];
+
+ u8 reserved_3[0x720];
+};
+
+struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
+ u8 reserved_0[0x60];
+
+ u8 reserved_1[0x4];
+ u8 clamp_tgt_rate[0x1];
+ u8 reserved_2[0x3];
+ u8 clamp_tgt_rate_after_time_inc[0x1];
+ u8 reserved_3[0x17];
+
+ u8 reserved_4[0x20];
+
+ u8 rpg_time_reset[0x20];
+
+ u8 rpg_byte_reset[0x20];
+
+ u8 rpg_threshold[0x20];
+
+ u8 rpg_max_rate[0x20];
+
+ u8 rpg_ai_rate[0x20];
+
+ u8 rpg_hai_rate[0x20];
+
+ u8 rpg_gd[0x20];
+
+ u8 rpg_min_dec_fac[0x20];
+
+ u8 rpg_min_rate[0x20];
+
+ u8 reserved_5[0xe0];
+
+ u8 rate_to_set_on_first_cnp[0x20];
+
+ u8 dce_tcp_g[0x20];
+
+ u8 dce_tcp_rtt[0x20];
+
+ u8 rate_reduce_monitor_period[0x20];
+
+ u8 reserved_6[0x20];
+
+ u8 initial_alpha_value[0x20];
+
+ u8 reserved_7[0x4a0];
+};
+
+struct mlx5_ifc_cong_control_802_1qau_rp_bits {
+ u8 reserved_0[0x80];
+
+ u8 rppp_max_rps[0x20];
+
+ u8 rpg_time_reset[0x20];
+
+ u8 rpg_byte_reset[0x20];
+
+ u8 rpg_threshold[0x20];
+
+ u8 rpg_max_rate[0x20];
+
+ u8 rpg_ai_rate[0x20];
+
+ u8 rpg_hai_rate[0x20];
+
+ u8 rpg_gd[0x20];
+
+ u8 rpg_min_dec_fac[0x20];
+
+ u8 rpg_min_rate[0x20];
+
+ u8 reserved_1[0x640];
+};
+
+enum {
+ MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_CQ_SIZE = 0x1,
+ MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_PAGE_OFFSET = 0x2,
+ MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_PAGE_SIZE = 0x4,
+};
+
+struct mlx5_ifc_resize_field_select_bits {
+ u8 resize_field_select[0x20];
+};
+
+enum {
+ MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD = 0x1,
+ MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT = 0x2,
+ MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_OI = 0x4,
+ MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_C_EQN = 0x8,
+};
+
+struct mlx5_ifc_modify_field_select_bits {
+ u8 modify_field_select[0x20];
+};
+
+struct mlx5_ifc_field_select_r_roce_np_bits {
+ u8 field_select_r_roce_np[0x20];
+};
+
+struct mlx5_ifc_field_select_r_roce_rp_bits {
+ u8 field_select_r_roce_rp[0x20];
+};
+
+enum {
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPPP_MAX_RPS = 0x4,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_TIME_RESET = 0x8,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_BYTE_RESET = 0x10,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_THRESHOLD = 0x20,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MAX_RATE = 0x40,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_AI_RATE = 0x80,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_HAI_RATE = 0x100,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_GD = 0x200,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_DEC_FAC = 0x400,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_RATE = 0x800,
+};
+
+struct mlx5_ifc_field_select_802_1qau_rp_bits {
+ u8 field_select_8021qaurp[0x20];
+};
+
+struct mlx5_ifc_phys_layer_cntrs_bits {
+ u8 time_since_last_clear_high[0x20];
+
+ u8 time_since_last_clear_low[0x20];
+
+ u8 symbol_errors_high[0x20];
+
+ u8 symbol_errors_low[0x20];
+
+ u8 sync_headers_errors_high[0x20];
+
+ u8 sync_headers_errors_low[0x20];
+
+ u8 edpl_bip_errors_lane0_high[0x20];
+
+ u8 edpl_bip_errors_lane0_low[0x20];
+
+ u8 edpl_bip_errors_lane1_high[0x20];
+
+ u8 edpl_bip_errors_lane1_low[0x20];
+
+ u8 edpl_bip_errors_lane2_high[0x20];
+
+ u8 edpl_bip_errors_lane2_low[0x20];
+
+ u8 edpl_bip_errors_lane3_high[0x20];
+
+ u8 edpl_bip_errors_lane3_low[0x20];
+
+ u8 fc_fec_corrected_blocks_lane0_high[0x20];
+
+ u8 fc_fec_corrected_blocks_lane0_low[0x20];
+
+ u8 fc_fec_corrected_blocks_lane1_high[0x20];
+
+ u8 fc_fec_corrected_blocks_lane1_low[0x20];
+
+ u8 fc_fec_corrected_blocks_lane2_high[0x20];
+
+ u8 fc_fec_corrected_blocks_lane2_low[0x20];
+
+ u8 fc_fec_corrected_blocks_lane3_high[0x20];
+
+ u8 fc_fec_corrected_blocks_lane3_low[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane0_high[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane0_low[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane1_high[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane1_low[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane2_high[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane2_low[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane3_high[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane3_low[0x20];
+
+ u8 rs_fec_corrected_blocks_high[0x20];
+
+ u8 rs_fec_corrected_blocks_low[0x20];
+
+ u8 rs_fec_uncorrectable_blocks_high[0x20];
+
+ u8 rs_fec_uncorrectable_blocks_low[0x20];
+
+ u8 rs_fec_no_errors_blocks_high[0x20];
+
+ u8 rs_fec_no_errors_blocks_low[0x20];
+
+ u8 rs_fec_single_error_blocks_high[0x20];
+
+ u8 rs_fec_single_error_blocks_low[0x20];
+
+ u8 rs_fec_corrected_symbols_total_high[0x20];
+
+ u8 rs_fec_corrected_symbols_total_low[0x20];
+
+ u8 rs_fec_corrected_symbols_lane0_high[0x20];
+
+ u8 rs_fec_corrected_symbols_lane0_low[0x20];
+
+ u8 rs_fec_corrected_symbols_lane1_high[0x20];
+
+ u8 rs_fec_corrected_symbols_lane1_low[0x20];
+
+ u8 rs_fec_corrected_symbols_lane2_high[0x20];
+
+ u8 rs_fec_corrected_symbols_lane2_low[0x20];
+
+ u8 rs_fec_corrected_symbols_lane3_high[0x20];
+
+ u8 rs_fec_corrected_symbols_lane3_low[0x20];
+
+ u8 link_down_events[0x20];
+
+ u8 successful_recovery_events[0x20];
+
+ u8 reserved_0[0x180];
+};
+
+struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
+ u8 transmit_queue_high[0x20];
+
+ u8 transmit_queue_low[0x20];
+
+ u8 reserved_0[0x780];
+};
+
+struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
+ u8 rx_octets_high[0x20];
+
+ u8 rx_octets_low[0x20];
+
+ u8 reserved_0[0xc0];
+
+ u8 rx_frames_high[0x20];
+
+ u8 rx_frames_low[0x20];
+
+ u8 tx_octets_high[0x20];
+
+ u8 tx_octets_low[0x20];
+
+ u8 reserved_1[0xc0];
+
+ u8 tx_frames_high[0x20];
+
+ u8 tx_frames_low[0x20];
+
+ u8 rx_pause_high[0x20];
+
+ u8 rx_pause_low[0x20];
+
+ u8 rx_pause_duration_high[0x20];
+
+ u8 rx_pause_duration_low[0x20];
+
+ u8 tx_pause_high[0x20];
+
+ u8 tx_pause_low[0x20];
+
+ u8 tx_pause_duration_high[0x20];
+
+ u8 tx_pause_duration_low[0x20];
+
+ u8 rx_pause_transition_high[0x20];
+
+ u8 rx_pause_transition_low[0x20];
+
+ u8 reserved_2[0x400];
+};
+
+struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
+ u8 port_transmit_wait_high[0x20];
+
+ u8 port_transmit_wait_low[0x20];
+
+ u8 reserved_0[0x780];
+};
+
+struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
+ u8 dot3stats_alignment_errors_high[0x20];
+
+ u8 dot3stats_alignment_errors_low[0x20];
+
+ u8 dot3stats_fcs_errors_high[0x20];
+
+ u8 dot3stats_fcs_errors_low[0x20];
+
+ u8 dot3stats_single_collision_frames_high[0x20];
+
+ u8 dot3stats_single_collision_frames_low[0x20];
+
+ u8 dot3stats_multiple_collision_frames_high[0x20];
+
+ u8 dot3stats_multiple_collision_frames_low[0x20];
+
+ u8 dot3stats_sqe_test_errors_high[0x20];
+
+ u8 dot3stats_sqe_test_errors_low[0x20];
+
+ u8 dot3stats_deferred_transmissions_high[0x20];
+
+ u8 dot3stats_deferred_transmissions_low[0x20];
+
+ u8 dot3stats_late_collisions_high[0x20];
+
+ u8 dot3stats_late_collisions_low[0x20];
+
+ u8 dot3stats_excessive_collisions_high[0x20];
+
+ u8 dot3stats_excessive_collisions_low[0x20];
+
+ u8 dot3stats_internal_mac_transmit_errors_high[0x20];
+
+ u8 dot3stats_internal_mac_transmit_errors_low[0x20];
+
+ u8 dot3stats_carrier_sense_errors_high[0x20];
+
+ u8 dot3stats_carrier_sense_errors_low[0x20];
+
+ u8 dot3stats_frame_too_longs_high[0x20];
+
+ u8 dot3stats_frame_too_longs_low[0x20];
+
+ u8 dot3stats_internal_mac_receive_errors_high[0x20];
+
+ u8 dot3stats_internal_mac_receive_errors_low[0x20];
+
+ u8 dot3stats_symbol_errors_high[0x20];
+
+ u8 dot3stats_symbol_errors_low[0x20];
+
+ u8 dot3control_in_unknown_opcodes_high[0x20];
+
+ u8 dot3control_in_unknown_opcodes_low[0x20];
+
+ u8 dot3in_pause_frames_high[0x20];
+
+ u8 dot3in_pause_frames_low[0x20];
+
+ u8 dot3out_pause_frames_high[0x20];
+
+ u8 dot3out_pause_frames_low[0x20];
+
+ u8 reserved_0[0x3c0];
+};
+
+struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits {
+ u8 ether_stats_drop_events_high[0x20];
+
+ u8 ether_stats_drop_events_low[0x20];
+
+ u8 ether_stats_octets_high[0x20];
+
+ u8 ether_stats_octets_low[0x20];
+
+ u8 ether_stats_pkts_high[0x20];
+
+ u8 ether_stats_pkts_low[0x20];
+
+ u8 ether_stats_broadcast_pkts_high[0x20];
+
+ u8 ether_stats_broadcast_pkts_low[0x20];
+
+ u8 ether_stats_multicast_pkts_high[0x20];
+
+ u8 ether_stats_multicast_pkts_low[0x20];
+
+ u8 ether_stats_crc_align_errors_high[0x20];
+
+ u8 ether_stats_crc_align_errors_low[0x20];
+
+ u8 ether_stats_undersize_pkts_high[0x20];
+
+ u8 ether_stats_undersize_pkts_low[0x20];
+
+ u8 ether_stats_oversize_pkts_high[0x20];
+
+ u8 ether_stats_oversize_pkts_low[0x20];
+
+ u8 ether_stats_fragments_high[0x20];
+
+ u8 ether_stats_fragments_low[0x20];
+
+ u8 ether_stats_jabbers_high[0x20];
+
+ u8 ether_stats_jabbers_low[0x20];
+
+ u8 ether_stats_collisions_high[0x20];
+
+ u8 ether_stats_collisions_low[0x20];
+
+ u8 ether_stats_pkts64octets_high[0x20];
+
+ u8 ether_stats_pkts64octets_low[0x20];
+
+ u8 ether_stats_pkts65to127octets_high[0x20];
+
+ u8 ether_stats_pkts65to127octets_low[0x20];
+
+ u8 ether_stats_pkts128to255octets_high[0x20];
+
+ u8 ether_stats_pkts128to255octets_low[0x20];
+
+ u8 ether_stats_pkts256to511octets_high[0x20];
+
+ u8 ether_stats_pkts256to511octets_low[0x20];
+
+ u8 ether_stats_pkts512to1023octets_high[0x20];
+
+ u8 ether_stats_pkts512to1023octets_low[0x20];
+
+ u8 ether_stats_pkts1024to1518octets_high[0x20];
+
+ u8 ether_stats_pkts1024to1518octets_low[0x20];
+
+ u8 ether_stats_pkts1519to2047octets_high[0x20];
+
+ u8 ether_stats_pkts1519to2047octets_low[0x20];
+
+ u8 ether_stats_pkts2048to4095octets_high[0x20];
+
+ u8 ether_stats_pkts2048to4095octets_low[0x20];
+
+ u8 ether_stats_pkts4096to8191octets_high[0x20];
+
+ u8 ether_stats_pkts4096to8191octets_low[0x20];
+
+ u8 ether_stats_pkts8192to10239octets_high[0x20];
+
+ u8 ether_stats_pkts8192to10239octets_low[0x20];
+
+ u8 reserved_0[0x280];
+};
+
+struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits {
+ u8 if_in_octets_high[0x20];
+
+ u8 if_in_octets_low[0x20];
+
+ u8 if_in_ucast_pkts_high[0x20];
+
+ u8 if_in_ucast_pkts_low[0x20];
+
+ u8 if_in_discards_high[0x20];
+
+ u8 if_in_discards_low[0x20];
+
+ u8 if_in_errors_high[0x20];
+
+ u8 if_in_errors_low[0x20];
+
+ u8 if_in_unknown_protos_high[0x20];
+
+ u8 if_in_unknown_protos_low[0x20];
+
+ u8 if_out_octets_high[0x20];
+
+ u8 if_out_octets_low[0x20];
+
+ u8 if_out_ucast_pkts_high[0x20];
+
+ u8 if_out_ucast_pkts_low[0x20];
+
+ u8 if_out_discards_high[0x20];
+
+ u8 if_out_discards_low[0x20];
+
+ u8 if_out_errors_high[0x20];
+
+ u8 if_out_errors_low[0x20];
+
+ u8 if_in_multicast_pkts_high[0x20];
+
+ u8 if_in_multicast_pkts_low[0x20];
+
+ u8 if_in_broadcast_pkts_high[0x20];
+
+ u8 if_in_broadcast_pkts_low[0x20];
+
+ u8 if_out_multicast_pkts_high[0x20];
+
+ u8 if_out_multicast_pkts_low[0x20];
+
+ u8 if_out_broadcast_pkts_high[0x20];
+
+ u8 if_out_broadcast_pkts_low[0x20];
+
+ u8 reserved_0[0x480];
+};
+
+struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
+ u8 a_frames_transmitted_ok_high[0x20];
+
+ u8 a_frames_transmitted_ok_low[0x20];
+
+ u8 a_frames_received_ok_high[0x20];
+
+ u8 a_frames_received_ok_low[0x20];
+
+ u8 a_frame_check_sequence_errors_high[0x20];
+
+ u8 a_frame_check_sequence_errors_low[0x20];
+
+ u8 a_alignment_errors_high[0x20];
+
+ u8 a_alignment_errors_low[0x20];
+
+ u8 a_octets_transmitted_ok_high[0x20];
+
+ u8 a_octets_transmitted_ok_low[0x20];
+
+ u8 a_octets_received_ok_high[0x20];
+
+ u8 a_octets_received_ok_low[0x20];
+
+ u8 a_multicast_frames_xmitted_ok_high[0x20];
+
+ u8 a_multicast_frames_xmitted_ok_low[0x20];
+
+ u8 a_broadcast_frames_xmitted_ok_high[0x20];
+
+ u8 a_broadcast_frames_xmitted_ok_low[0x20];
+
+ u8 a_multicast_frames_received_ok_high[0x20];
+
+ u8 a_multicast_frames_received_ok_low[0x20];
+
+ u8 a_broadcast_frames_received_ok_high[0x20];
+
+ u8 a_broadcast_frames_received_ok_low[0x20];
+
+ u8 a_in_range_length_errors_high[0x20];
+
+ u8 a_in_range_length_errors_low[0x20];
+
+ u8 a_out_of_range_length_field_high[0x20];
+
+ u8 a_out_of_range_length_field_low[0x20];
+
+ u8 a_frame_too_long_errors_high[0x20];
+
+ u8 a_frame_too_long_errors_low[0x20];
+
+ u8 a_symbol_error_during_carrier_high[0x20];
+
+ u8 a_symbol_error_during_carrier_low[0x20];
+
+ u8 a_mac_control_frames_transmitted_high[0x20];
+
+ u8 a_mac_control_frames_transmitted_low[0x20];
+
+ u8 a_mac_control_frames_received_high[0x20];
+
+ u8 a_mac_control_frames_received_low[0x20];
+
+ u8 a_unsupported_opcodes_received_high[0x20];
+
+ u8 a_unsupported_opcodes_received_low[0x20];
+
+ u8 a_pause_mac_ctrl_frames_received_high[0x20];
+
+ u8 a_pause_mac_ctrl_frames_received_low[0x20];
+
+ u8 a_pause_mac_ctrl_frames_transmitted_high[0x20];
+
+ u8 a_pause_mac_ctrl_frames_transmitted_low[0x20];
+
+ u8 reserved_0[0x300];
+};
+
+struct mlx5_ifc_cmd_inter_comp_event_bits {
+ u8 command_completion_vector[0x20];
+
+ u8 reserved_0[0xc0];
+};
+
+struct mlx5_ifc_stall_vl_event_bits {
+ u8 reserved_0[0x18];
+ u8 port_num[0x1];
+ u8 reserved_1[0x3];
+ u8 vl[0x4];
+
+ u8 reserved_2[0xa0];
+};
+
+struct mlx5_ifc_db_bf_congestion_event_bits {
+ u8 event_subtype[0x8];
+ u8 reserved_0[0x8];
+ u8 congestion_level[0x8];
+ u8 reserved_1[0x8];
+
+ u8 reserved_2[0xa0];
+};
+
+struct mlx5_ifc_gpio_event_bits {
+ u8 reserved_0[0x60];
+
+ u8 gpio_event_hi[0x20];
+
+ u8 gpio_event_lo[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_port_state_change_event_bits {
+ u8 reserved_0[0x40];
+
+ u8 port_num[0x4];
+ u8 reserved_1[0x1c];
+
+ u8 reserved_2[0x80];
+};
+
+struct mlx5_ifc_dropped_packet_logged_bits {
+ u8 reserved_0[0xe0];
+};
+
+enum {
+ MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1,
+ MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2,
+};
+
+struct mlx5_ifc_cq_error_bits {
+ u8 reserved_0[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_1[0x20];
+
+ u8 reserved_2[0x18];
+ u8 syndrome[0x8];
+
+ u8 reserved_3[0x80];
+};
+
+struct mlx5_ifc_rdma_page_fault_event_bits {
+ u8 bytes_committed[0x20];
+
+ u8 r_key[0x20];
+
+ u8 reserved_0[0x10];
+ u8 packet_len[0x10];
+
+ u8 rdma_op_len[0x20];
+
+ u8 rdma_va[0x40];
+
+ u8 reserved_1[0x5];
+ u8 rdma[0x1];
+ u8 write[0x1];
+ u8 requestor[0x1];
+ u8 qp_number[0x18];
+};
+
+struct mlx5_ifc_wqe_associated_page_fault_event_bits {
+ u8 bytes_committed[0x20];
+
+ u8 reserved_0[0x10];
+ u8 wqe_index[0x10];
+
+ u8 reserved_1[0x10];
+ u8 len[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x5];
+ u8 rdma[0x1];
+ u8 write_read[0x1];
+ u8 requestor[0x1];
+ u8 qpn[0x18];
+};
+
+struct mlx5_ifc_qp_events_bits {
+ u8 reserved_0[0xa0];
+
+ u8 type[0x8];
+ u8 reserved_1[0x18];
+
+ u8 reserved_2[0x8];
+ u8 qpn_rqn_sqn[0x18];
+};
+
+struct mlx5_ifc_dct_events_bits {
+ u8 reserved_0[0xc0];
+
+ u8 reserved_1[0x8];
+ u8 dct_number[0x18];
+};
+
+struct mlx5_ifc_comp_event_bits {
+ u8 reserved_0[0xc0];
+
+ u8 reserved_1[0x8];
+ u8 cq_number[0x18];
+};
+
+enum {
+ MLX5_QPC_STATE_RST = 0x0,
+ MLX5_QPC_STATE_INIT = 0x1,
+ MLX5_QPC_STATE_RTR = 0x2,
+ MLX5_QPC_STATE_RTS = 0x3,
+ MLX5_QPC_STATE_SQER = 0x4,
+ MLX5_QPC_STATE_ERR = 0x6,
+ MLX5_QPC_STATE_SQD = 0x7,
+ MLX5_QPC_STATE_SUSPENDED = 0x9,
+};
+
+enum {
+ MLX5_QPC_ST_RC = 0x0,
+ MLX5_QPC_ST_UC = 0x1,
+ MLX5_QPC_ST_UD = 0x2,
+ MLX5_QPC_ST_XRC = 0x3,
+ MLX5_QPC_ST_DCI = 0x5,
+ MLX5_QPC_ST_QP0 = 0x7,
+ MLX5_QPC_ST_QP1 = 0x8,
+ MLX5_QPC_ST_RAW_DATAGRAM = 0x9,
+ MLX5_QPC_ST_REG_UMR = 0xc,
+};
+
+enum {
+ MLX5_QPC_PM_STATE_ARMED = 0x0,
+ MLX5_QPC_PM_STATE_REARM = 0x1,
+ MLX5_QPC_PM_STATE_RESERVED = 0x2,
+ MLX5_QPC_PM_STATE_MIGRATED = 0x3,
+};
+
+enum {
+ MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS = 0x0,
+ MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT = 0x1,
+};
+
+enum {
+ MLX5_QPC_MTU_256_BYTES = 0x1,
+ MLX5_QPC_MTU_512_BYTES = 0x2,
+ MLX5_QPC_MTU_1K_BYTES = 0x3,
+ MLX5_QPC_MTU_2K_BYTES = 0x4,
+ MLX5_QPC_MTU_4K_BYTES = 0x5,
+ MLX5_QPC_MTU_RAW_ETHERNET_QP = 0x7,
+};
+
+enum {
+ MLX5_QPC_ATOMIC_MODE_IB_SPEC = 0x1,
+ MLX5_QPC_ATOMIC_MODE_ONLY_8B = 0x2,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_8B = 0x3,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_16B = 0x4,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_32B = 0x5,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_64B = 0x6,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_128B = 0x7,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_256B = 0x8,
+};
+
+enum {
+ MLX5_QPC_CS_REQ_DISABLE = 0x0,
+ MLX5_QPC_CS_REQ_UP_TO_32B = 0x11,
+ MLX5_QPC_CS_REQ_UP_TO_64B = 0x22,
+};
+
+enum {
+ MLX5_QPC_CS_RES_DISABLE = 0x0,
+ MLX5_QPC_CS_RES_UP_TO_32B = 0x1,
+ MLX5_QPC_CS_RES_UP_TO_64B = 0x2,
+};
+
+struct mlx5_ifc_qpc_bits {
+ u8 state[0x4];
+ u8 reserved_0[0x4];
+ u8 st[0x8];
+ u8 reserved_1[0x3];
+ u8 pm_state[0x2];
+ u8 reserved_2[0x7];
+ u8 end_padding_mode[0x2];
+ u8 reserved_3[0x2];
+
+ u8 wq_signature[0x1];
+ u8 block_lb_mc[0x1];
+ u8 atomic_like_write_en[0x1];
+ u8 latency_sensitive[0x1];
+ u8 reserved_4[0x1];
+ u8 drain_sigerr[0x1];
+ u8 reserved_5[0x2];
+ u8 pd[0x18];
+
+ u8 mtu[0x3];
+ u8 log_msg_max[0x5];
+ u8 reserved_6[0x1];
+ u8 log_rq_size[0x4];
+ u8 log_rq_stride[0x3];
+ u8 no_sq[0x1];
+ u8 log_sq_size[0x4];
+ u8 reserved_7[0x6];
+ u8 rlky[0x1];
+ u8 reserved_8[0x4];
+
+ u8 counter_set_id[0x8];
+ u8 uar_page[0x18];
+
+ u8 reserved_9[0x8];
+ u8 user_index[0x18];
+
+ u8 reserved_10[0x3];
+ u8 log_page_size[0x5];
+ u8 remote_qpn[0x18];
+
+ struct mlx5_ifc_ads_bits primary_address_path;
+
+ struct mlx5_ifc_ads_bits secondary_address_path;
+
+ u8 log_ack_req_freq[0x4];
+ u8 reserved_11[0x4];
+ u8 log_sra_max[0x3];
+ u8 reserved_12[0x2];
+ u8 retry_count[0x3];
+ u8 rnr_retry[0x3];
+ u8 reserved_13[0x1];
+ u8 fre[0x1];
+ u8 cur_rnr_retry[0x3];
+ u8 cur_retry_count[0x3];
+ u8 reserved_14[0x5];
+
+ u8 reserved_15[0x20];
+
+ u8 reserved_16[0x8];
+ u8 next_send_psn[0x18];
+
+ u8 reserved_17[0x8];
+ u8 cqn_snd[0x18];
+
+ u8 reserved_18[0x40];
+
+ u8 reserved_19[0x8];
+ u8 last_acked_psn[0x18];
+
+ u8 reserved_20[0x8];
+ u8 ssn[0x18];
+
+ u8 reserved_21[0x8];
+ u8 log_rra_max[0x3];
+ u8 reserved_22[0x1];
+ u8 atomic_mode[0x4];
+ u8 rre[0x1];
+ u8 rwe[0x1];
+ u8 rae[0x1];
+ u8 reserved_23[0x1];
+ u8 page_offset[0x6];
+ u8 reserved_24[0x3];
+ u8 cd_slave_receive[0x1];
+ u8 cd_slave_send[0x1];
+ u8 cd_master[0x1];
+
+ u8 reserved_25[0x3];
+ u8 min_rnr_nak[0x5];
+ u8 next_rcv_psn[0x18];
+
+ u8 reserved_26[0x8];
+ u8 xrcd[0x18];
+
+ u8 reserved_27[0x8];
+ u8 cqn_rcv[0x18];
+
+ u8 dbr_addr[0x40];
+
+ u8 q_key[0x20];
+
+ u8 reserved_28[0x5];
+ u8 rq_type[0x3];
+ u8 srqn_rmpn[0x18];
+
+ u8 reserved_29[0x8];
+ u8 rmsn[0x18];
+
+ u8 hw_sq_wqebb_counter[0x10];
+ u8 sw_sq_wqebb_counter[0x10];
+
+ u8 hw_rq_counter[0x20];
+
+ u8 sw_rq_counter[0x20];
+
+ u8 reserved_30[0x20];
+
+ u8 reserved_31[0xf];
+ u8 cgs[0x1];
+ u8 cs_req[0x8];
+ u8 cs_res[0x8];
+
+ u8 dc_access_key[0x40];
+
+ u8 reserved_32[0xc0];
+};
+
+struct mlx5_ifc_roce_addr_layout_bits {
+ u8 source_l3_address[16][0x8];
+
+ u8 reserved_0[0x3];
+ u8 vlan_valid[0x1];
+ u8 vlan_id[0xc];
+ u8 source_mac_47_32[0x10];
+
+ u8 source_mac_31_0[0x20];
+
+ u8 reserved_1[0x14];
+ u8 roce_l3_type[0x4];
+ u8 roce_version[0x8];
+
+ u8 reserved_2[0x20];
+};
+
+union mlx5_ifc_hca_cap_union_bits {
+ struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
+ struct mlx5_ifc_odp_cap_bits odp_cap;
+ struct mlx5_ifc_atomic_caps_bits atomic_caps;
+ struct mlx5_ifc_roce_cap_bits roce_cap;
+ struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps;
+ struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
+ u8 reserved_0[0x8000];
+};
+
+enum {
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW = 0x1,
+ MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
+};
+
+struct mlx5_ifc_flow_context_bits {
+ u8 reserved_0[0x20];
+
+ u8 group_id[0x20];
+
+ u8 reserved_1[0x8];
+ u8 flow_tag[0x18];
+
+ u8 reserved_2[0x10];
+ u8 action[0x10];
+
+ u8 reserved_3[0x8];
+ u8 destination_list_size[0x18];
+
+ u8 reserved_4[0x160];
+
+ struct mlx5_ifc_fte_match_param_bits match_value;
+
+ u8 reserved_5[0x600];
+
+ struct mlx5_ifc_dest_format_struct_bits destination[0];
+};
+
+enum {
+ MLX5_XRC_SRQC_STATE_GOOD = 0x0,
+ MLX5_XRC_SRQC_STATE_ERROR = 0x1,
+};
+
+struct mlx5_ifc_xrc_srqc_bits {
+ u8 state[0x4];
+ u8 log_xrc_srq_size[0x4];
+ u8 reserved_0[0x18];
+
+ u8 wq_signature[0x1];
+ u8 cont_srq[0x1];
+ u8 reserved_1[0x1];
+ u8 rlky[0x1];
+ u8 basic_cyclic_rcv_wqe[0x1];
+ u8 log_rq_stride[0x3];
+ u8 xrcd[0x18];
+
+ u8 page_offset[0x6];
+ u8 reserved_2[0x2];
+ u8 cqn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 user_index_equal_xrc_srqn[0x1];
+ u8 reserved_4[0x1];
+ u8 log_page_size[0x6];
+ u8 user_index[0x18];
+
+ u8 reserved_5[0x20];
+
+ u8 reserved_6[0x8];
+ u8 pd[0x18];
+
+ u8 lwm[0x10];
+ u8 wqe_cnt[0x10];
+
+ u8 reserved_7[0x40];
+
+ u8 db_record_addr_h[0x20];
+
+ u8 db_record_addr_l[0x1e];
+ u8 reserved_8[0x2];
+
+ u8 reserved_9[0x80];
+};
+
+struct mlx5_ifc_traffic_counter_bits {
+ u8 packets[0x40];
+
+ u8 octets[0x40];
+};
+
+struct mlx5_ifc_tisc_bits {
+ u8 reserved_0[0xc];
+ u8 prio[0x4];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x100];
+
+ u8 reserved_3[0x8];
+ u8 transport_domain[0x18];
+
+ u8 reserved_4[0x3c0];
+};
+
+enum {
+ MLX5_TIRC_DISP_TYPE_DIRECT = 0x0,
+ MLX5_TIRC_DISP_TYPE_INDIRECT = 0x1,
+};
+
+enum {
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1,
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2,
+};
+
+enum {
+ MLX5_TIRC_RX_HASH_FN_HASH_NONE = 0x0,
+ MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8 = 0x1,
+ MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ = 0x2,
+};
+
+enum {
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_ = 0x1,
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST_ = 0x2,
+};
+
+struct mlx5_ifc_tirc_bits {
+ u8 reserved_0[0x20];
+
+ u8 disp_type[0x4];
+ u8 reserved_1[0x1c];
+
+ u8 reserved_2[0x40];
+
+ u8 reserved_3[0x4];
+ u8 lro_timeout_period_usecs[0x10];
+ u8 lro_enable_mask[0x4];
+ u8 lro_max_ip_payload_size[0x8];
+
+ u8 reserved_4[0x40];
+
+ u8 reserved_5[0x8];
+ u8 inline_rqn[0x18];
+
+ u8 rx_hash_symmetric[0x1];
+ u8 reserved_6[0x1];
+ u8 tunneled_offload_en[0x1];
+ u8 reserved_7[0x5];
+ u8 indirect_table[0x18];
+
+ u8 rx_hash_fn[0x4];
+ u8 reserved_8[0x2];
+ u8 self_lb_block[0x2];
+ u8 transport_domain[0x18];
+
+ u8 rx_hash_toeplitz_key[10][0x20];
+
+ struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer;
+
+ struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner;
+
+ u8 reserved_9[0x4c0];
+};
+
+enum {
+ MLX5_SRQC_STATE_GOOD = 0x0,
+ MLX5_SRQC_STATE_ERROR = 0x1,
+};
+
+struct mlx5_ifc_srqc_bits {
+ u8 state[0x4];
+ u8 log_srq_size[0x4];
+ u8 reserved_0[0x18];
+
+ u8 wq_signature[0x1];
+ u8 cont_srq[0x1];
+ u8 reserved_1[0x1];
+ u8 rlky[0x1];
+ u8 reserved_2[0x1];
+ u8 log_rq_stride[0x3];
+ u8 xrcd[0x18];
+
+ u8 page_offset[0x6];
+ u8 reserved_3[0x2];
+ u8 cqn[0x18];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0x2];
+ u8 log_page_size[0x6];
+ u8 reserved_6[0x18];
+
+ u8 reserved_7[0x20];
+
+ u8 reserved_8[0x8];
+ u8 pd[0x18];
+
+ u8 lwm[0x10];
+ u8 wqe_cnt[0x10];
+
+ u8 reserved_9[0x40];
+
+ u8 dbr_addr[0x40];
+
+ u8 reserved_10[0x80];
+};
+
+enum {
+ MLX5_SQC_STATE_RST = 0x0,
+ MLX5_SQC_STATE_RDY = 0x1,
+ MLX5_SQC_STATE_ERR = 0x3,
+};
+
+struct mlx5_ifc_sqc_bits {
+ u8 rlky[0x1];
+ u8 cd_master[0x1];
+ u8 fre[0x1];
+ u8 flush_in_error_en[0x1];
+ u8 reserved_0[0x4];
+ u8 state[0x4];
+ u8 reserved_1[0x14];
+
+ u8 reserved_2[0x8];
+ u8 user_index[0x18];
+
+ u8 reserved_3[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_4[0xa0];
+
+ u8 tis_lst_sz[0x10];
+ u8 reserved_5[0x10];
+
+ u8 reserved_6[0x40];
+
+ u8 reserved_7[0x8];
+ u8 tis_num_0[0x18];
+
+ struct mlx5_ifc_wq_bits wq;
+};
+
+struct mlx5_ifc_rqtc_bits {
+ u8 reserved_0[0xa0];
+
+ u8 reserved_1[0x10];
+ u8 rqt_max_size[0x10];
+
+ u8 reserved_2[0x10];
+ u8 rqt_actual_size[0x10];
+
+ u8 reserved_3[0x6a0];
+
+ struct mlx5_ifc_rq_num_bits rq_num[0];
+};
+
+enum {
+ MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
+ MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP = 0x1,
+};
+
+enum {
+ MLX5_RQC_STATE_RST = 0x0,
+ MLX5_RQC_STATE_RDY = 0x1,
+ MLX5_RQC_STATE_ERR = 0x3,
+};
+
+struct mlx5_ifc_rqc_bits {
+ u8 rlky[0x1];
+ u8 reserved_0[0x2];
+ u8 vsd[0x1];
+ u8 mem_rq_type[0x4];
+ u8 state[0x4];
+ u8 reserved_1[0x1];
+ u8 flush_in_error_en[0x1];
+ u8 reserved_2[0x12];
+
+ u8 reserved_3[0x8];
+ u8 user_index[0x18];
+
+ u8 reserved_4[0x8];
+ u8 cqn[0x18];
+
+ u8 counter_set_id[0x8];
+ u8 reserved_5[0x18];
+
+ u8 reserved_6[0x8];
+ u8 rmpn[0x18];
+
+ u8 reserved_7[0xe0];
+
+ struct mlx5_ifc_wq_bits wq;
+};
+
+enum {
+ MLX5_RMPC_STATE_RDY = 0x1,
+ MLX5_RMPC_STATE_ERR = 0x3,
+};
+
+struct mlx5_ifc_rmpc_bits {
+ u8 reserved_0[0x8];
+ u8 state[0x4];
+ u8 reserved_1[0x14];
+
+ u8 basic_cyclic_rcv_wqe[0x1];
+ u8 reserved_2[0x1f];
+
+ u8 reserved_3[0x140];
+
+ struct mlx5_ifc_wq_bits wq;
+};
+
+enum {
+ MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_CURRENT_UC_MAC_ADDRESS = 0x0,
+};
+
+struct mlx5_ifc_nic_vport_context_bits {
+ u8 reserved_0[0x1f];
+ u8 roce_en[0x1];
+
+ u8 reserved_1[0x760];
+
+ u8 reserved_2[0x5];
+ u8 allowed_list_type[0x3];
+ u8 reserved_3[0xc];
+ u8 allowed_list_size[0xc];
+
+ struct mlx5_ifc_mac_address_layout_bits permanent_address;
+
+ u8 reserved_4[0x20];
+
+ u8 current_uc_mac_address[0][0x40];
+};
+
+enum {
+ MLX5_MKC_ACCESS_MODE_PA = 0x0,
+ MLX5_MKC_ACCESS_MODE_MTT = 0x1,
+ MLX5_MKC_ACCESS_MODE_KLMS = 0x2,
+};
+
+struct mlx5_ifc_mkc_bits {
+ u8 reserved_0[0x1];
+ u8 free[0x1];
+ u8 reserved_1[0xd];
+ u8 small_fence_on_rdma_read_response[0x1];
+ u8 umr_en[0x1];
+ u8 a[0x1];
+ u8 rw[0x1];
+ u8 rr[0x1];
+ u8 lw[0x1];
+ u8 lr[0x1];
+ u8 access_mode[0x2];
+ u8 reserved_2[0x8];
+
+ u8 qpn[0x18];
+ u8 mkey_7_0[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 length64[0x1];
+ u8 bsf_en[0x1];
+ u8 sync_umr[0x1];
+ u8 reserved_4[0x2];
+ u8 expected_sigerr_count[0x1];
+ u8 reserved_5[0x1];
+ u8 en_rinval[0x1];
+ u8 pd[0x18];
+
+ u8 start_addr[0x40];
+
+ u8 len[0x40];
+
+ u8 bsf_octword_size[0x20];
+
+ u8 reserved_6[0x80];
+
+ u8 translations_octword_size[0x20];
+
+ u8 reserved_7[0x1b];
+ u8 log_page_size[0x5];
+
+ u8 reserved_8[0x20];
+};
+
+struct mlx5_ifc_pkey_bits {
+ u8 reserved_0[0x10];
+ u8 pkey[0x10];
+};
+
+struct mlx5_ifc_array128_auto_bits {
+ u8 array128_auto[16][0x8];
+};
+
+struct mlx5_ifc_hca_vport_context_bits {
+ u8 field_select[0x20];
+
+ u8 reserved_0[0xe0];
+
+ u8 sm_virt_aware[0x1];
+ u8 has_smi[0x1];
+ u8 has_raw[0x1];
+ u8 grh_required[0x1];
+ u8 reserved_1[0xc];
+ u8 port_physical_state[0x4];
+ u8 vport_state_policy[0x4];
+ u8 port_state[0x4];
+ u8 vport_state[0x4];
+
+ u8 reserved_2[0x20];
+
+ u8 system_image_guid[0x40];
+
+ u8 port_guid[0x40];
+
+ u8 node_guid[0x40];
+
+ u8 cap_mask1[0x20];
+
+ u8 cap_mask1_field_select[0x20];
+
+ u8 cap_mask2[0x20];
+
+ u8 cap_mask2_field_select[0x20];
+
+ u8 reserved_3[0x80];
+
+ u8 lid[0x10];
+ u8 reserved_4[0x4];
+ u8 init_type_reply[0x4];
+ u8 lmc[0x3];
+ u8 subnet_timeout[0x5];
+
+ u8 sm_lid[0x10];
+ u8 sm_sl[0x4];
+ u8 reserved_5[0xc];
+
+ u8 qkey_violation_counter[0x10];
+ u8 pkey_violation_counter[0x10];
+
+ u8 reserved_6[0xca0];
+};
+
+enum {
+ MLX5_EQC_STATUS_OK = 0x0,
+ MLX5_EQC_STATUS_EQ_WRITE_FAILURE = 0xa,
+};
+
+enum {
+ MLX5_EQC_ST_ARMED = 0x9,
+ MLX5_EQC_ST_FIRED = 0xa,
+};
+
+struct mlx5_ifc_eqc_bits {
+ u8 status[0x4];
+ u8 reserved_0[0x9];
+ u8 ec[0x1];
+ u8 oi[0x1];
+ u8 reserved_1[0x5];
+ u8 st[0x4];
+ u8 reserved_2[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 reserved_4[0x14];
+ u8 page_offset[0x6];
+ u8 reserved_5[0x6];
+
+ u8 reserved_6[0x3];
+ u8 log_eq_size[0x5];
+ u8 uar_page[0x18];
+
+ u8 reserved_7[0x20];
+
+ u8 reserved_8[0x18];
+ u8 intr[0x8];
+
+ u8 reserved_9[0x3];
+ u8 log_page_size[0x5];
+ u8 reserved_10[0x18];
+
+ u8 reserved_11[0x60];
+
+ u8 reserved_12[0x8];
+ u8 consumer_counter[0x18];
+
+ u8 reserved_13[0x8];
+ u8 producer_counter[0x18];
+
+ u8 reserved_14[0x80];
+};
+
+enum {
+ MLX5_DCTC_STATE_ACTIVE = 0x0,
+ MLX5_DCTC_STATE_DRAINING = 0x1,
+ MLX5_DCTC_STATE_DRAINED = 0x2,
+};
+
+enum {
+ MLX5_DCTC_CS_RES_DISABLE = 0x0,
+ MLX5_DCTC_CS_RES_NA = 0x1,
+ MLX5_DCTC_CS_RES_UP_TO_64B = 0x2,
+};
+
+enum {
+ MLX5_DCTC_MTU_256_BYTES = 0x1,
+ MLX5_DCTC_MTU_512_BYTES = 0x2,
+ MLX5_DCTC_MTU_1K_BYTES = 0x3,
+ MLX5_DCTC_MTU_2K_BYTES = 0x4,
+ MLX5_DCTC_MTU_4K_BYTES = 0x5,
+};
+
+struct mlx5_ifc_dctc_bits {
+ u8 reserved_0[0x4];
+ u8 state[0x4];
+ u8 reserved_1[0x18];
+
+ u8 reserved_2[0x8];
+ u8 user_index[0x18];
+
+ u8 reserved_3[0x8];
+ u8 cqn[0x18];
+
+ u8 counter_set_id[0x8];
+ u8 atomic_mode[0x4];
+ u8 rre[0x1];
+ u8 rwe[0x1];
+ u8 rae[0x1];
+ u8 atomic_like_write_en[0x1];
+ u8 latency_sensitive[0x1];
+ u8 rlky[0x1];
+ u8 free_ar[0x1];
+ u8 reserved_4[0xd];
+
+ u8 reserved_5[0x8];
+ u8 cs_res[0x8];
+ u8 reserved_6[0x3];
+ u8 min_rnr_nak[0x5];
+ u8 reserved_7[0x8];
+
+ u8 reserved_8[0x8];
+ u8 srqn[0x18];
+
+ u8 reserved_9[0x8];
+ u8 pd[0x18];
+
+ u8 tclass[0x8];
+ u8 reserved_10[0x4];
+ u8 flow_label[0x14];
+
+ u8 dc_access_key[0x40];
+
+ u8 reserved_11[0x5];
+ u8 mtu[0x3];
+ u8 port[0x8];
+ u8 pkey_index[0x10];
+
+ u8 reserved_12[0x8];
+ u8 my_addr_index[0x8];
+ u8 reserved_13[0x8];
+ u8 hop_limit[0x8];
+
+ u8 dc_access_key_violation_count[0x20];
+
+ u8 reserved_14[0x14];
+ u8 dei_cfi[0x1];
+ u8 eth_prio[0x3];
+ u8 ecn[0x2];
+ u8 dscp[0x6];
+
+ u8 reserved_15[0x40];
+};
+
+enum {
+ MLX5_CQC_STATUS_OK = 0x0,
+ MLX5_CQC_STATUS_CQ_OVERFLOW = 0x9,
+ MLX5_CQC_STATUS_CQ_WRITE_FAIL = 0xa,
+};
+
+enum {
+ MLX5_CQC_CQE_SZ_64_BYTES = 0x0,
+ MLX5_CQC_CQE_SZ_128_BYTES = 0x1,
+};
+
+enum {
+ MLX5_CQC_ST_SOLICITED_NOTIFICATION_REQUEST_ARMED = 0x6,
+ MLX5_CQC_ST_NOTIFICATION_REQUEST_ARMED = 0x9,
+ MLX5_CQC_ST_FIRED = 0xa,
+};
+
+struct mlx5_ifc_cqc_bits {
+ u8 status[0x4];
+ u8 reserved_0[0x4];
+ u8 cqe_sz[0x3];
+ u8 cc[0x1];
+ u8 reserved_1[0x1];
+ u8 scqe_break_moderation_en[0x1];
+ u8 oi[0x1];
+ u8 reserved_2[0x2];
+ u8 cqe_zip_en[0x1];
+ u8 mini_cqe_res_format[0x2];
+ u8 st[0x4];
+ u8 reserved_3[0x8];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0x14];
+ u8 page_offset[0x6];
+ u8 reserved_6[0x6];
+
+ u8 reserved_7[0x3];
+ u8 log_cq_size[0x5];
+ u8 uar_page[0x18];
+
+ u8 reserved_8[0x4];
+ u8 cq_period[0xc];
+ u8 cq_max_count[0x10];
+
+ u8 reserved_9[0x18];
+ u8 c_eqn[0x8];
+
+ u8 reserved_10[0x3];
+ u8 log_page_size[0x5];
+ u8 reserved_11[0x18];
+
+ u8 reserved_12[0x20];
+
+ u8 reserved_13[0x8];
+ u8 last_notified_index[0x18];
+
+ u8 reserved_14[0x8];
+ u8 last_solicit_index[0x18];
+
+ u8 reserved_15[0x8];
+ u8 consumer_counter[0x18];
+
+ u8 reserved_16[0x8];
+ u8 producer_counter[0x18];
+
+ u8 reserved_17[0x40];
+
+ u8 dbr_addr[0x40];
+};
+
+union mlx5_ifc_cong_control_roce_ecn_auto_bits {
+ struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp;
+ struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp;
+ struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np;
+ u8 reserved_0[0x800];
+};
+
+struct mlx5_ifc_query_adapter_param_block_bits {
+ u8 reserved_0[0xc0];
+
+ u8 reserved_1[0x8];
+ u8 ieee_vendor_id[0x18];
+
+ u8 reserved_2[0x10];
+ u8 vsd_vendor_id[0x10];
+
+ u8 vsd[208][0x8];
+
+ u8 vsd_contd_psid[16][0x8];
+};
+
+union mlx5_ifc_modify_field_select_resize_field_select_auto_bits {
+ struct mlx5_ifc_modify_field_select_bits modify_field_select;
+ struct mlx5_ifc_resize_field_select_bits resize_field_select;
+ u8 reserved_0[0x20];
+};
+
+union mlx5_ifc_field_select_802_1_r_roce_auto_bits {
+ struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp;
+ struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp;
+ struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np;
+ u8 reserved_0[0x20];
+};
+
+union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
+ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
+ struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
+ struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
+ u8 reserved_0[0x7c0];
+};
+
+union mlx5_ifc_event_auto_bits {
+ struct mlx5_ifc_comp_event_bits comp_event;
+ struct mlx5_ifc_dct_events_bits dct_events;
+ struct mlx5_ifc_qp_events_bits qp_events;
+ struct mlx5_ifc_wqe_associated_page_fault_event_bits wqe_associated_page_fault_event;
+ struct mlx5_ifc_rdma_page_fault_event_bits rdma_page_fault_event;
+ struct mlx5_ifc_cq_error_bits cq_error;
+ struct mlx5_ifc_dropped_packet_logged_bits dropped_packet_logged;
+ struct mlx5_ifc_port_state_change_event_bits port_state_change_event;
+ struct mlx5_ifc_gpio_event_bits gpio_event;
+ struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event;
+ struct mlx5_ifc_stall_vl_event_bits stall_vl_event;
+ struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event;
+ u8 reserved_0[0xe0];
+};
+
+struct mlx5_ifc_health_buffer_bits {
+ u8 reserved_0[0x100];
+
+ u8 assert_existptr[0x20];
+
+ u8 assert_callra[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 fw_version[0x20];
+
+ u8 hw_id[0x20];
+
+ u8 reserved_2[0x20];
+
+ u8 irisc_index[0x8];
+ u8 synd[0x8];
+ u8 ext_synd[0x10];
+};
+
+struct mlx5_ifc_register_loopback_control_bits {
+ u8 no_lb[0x1];
+ u8 reserved_0[0x7];
+ u8 port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x60];
+};
+
+struct mlx5_ifc_teardown_hca_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0,
+ MLX5_TEARDOWN_HCA_IN_PROFILE_PANIC_CLOSE = 0x1,
+};
+
+struct mlx5_ifc_teardown_hca_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 profile[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_sqerr2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_sqerr2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_sqd2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_sqd2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_set_roce_address_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_roce_address_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 roce_address_index[0x10];
+ u8 reserved_2[0x10];
+
+ u8 reserved_3[0x20];
+
+ struct mlx5_ifc_roce_addr_layout_bits roce_address;
+};
+
+struct mlx5_ifc_set_mad_demux_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_PASS_ALL = 0x0,
+ MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_SELECTIVE = 0x2,
+};
+
+struct mlx5_ifc_set_mad_demux_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x6];
+ u8 demux_mode[0x2];
+ u8 reserved_4[0x18];
+};
+
+struct mlx5_ifc_set_l2_table_entry_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_l2_table_entry_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x8];
+ u8 table_index[0x18];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0x13];
+ u8 vlan_valid[0x1];
+ u8 vlan[0xc];
+
+ struct mlx5_ifc_mac_address_layout_bits mac_address;
+
+ u8 reserved_6[0xc0];
+};
+
+struct mlx5_ifc_set_issi_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_issi_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 current_issi[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_set_hca_cap_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
};
struct mlx5_ifc_set_hca_cap_in_bits {
@@ -313,10 +2764,653 @@ struct mlx5_ifc_set_hca_cap_in_bits {
u8 reserved_2[0x40];
- struct mlx5_ifc_cmd_hca_cap_bits hca_capability_struct;
+ union mlx5_ifc_hca_cap_union_bits capability;
};
-struct mlx5_ifc_query_hca_cap_in_bits {
+struct mlx5_ifc_set_fte_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_fte_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x40];
+
+ u8 flow_index[0x20];
+
+ u8 reserved_6[0xe0];
+
+ struct mlx5_ifc_flow_context_bits flow_context;
+};
+
+struct mlx5_ifc_rts2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_rts2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_rtr2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_rtr2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_rst2init_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_rst2init_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_query_xrc_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
+
+ u8 reserved_2[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_xrc_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 xrc_srqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+enum {
+ MLX5_QUERY_VPORT_STATE_OUT_STATE_DOWN = 0x0,
+ MLX5_QUERY_VPORT_STATE_OUT_STATE_UP = 0x1,
+};
+
+struct mlx5_ifc_query_vport_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 reserved_2[0x18];
+ u8 admin_state[0x4];
+ u8 state[0x4];
+};
+
+enum {
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0,
+};
+
+struct mlx5_ifc_query_vport_state_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_vport_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_traffic_counter_bits received_errors;
+
+ struct mlx5_ifc_traffic_counter_bits transmit_errors;
+
+ struct mlx5_ifc_traffic_counter_bits received_ib_unicast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_ib_unicast;
+
+ struct mlx5_ifc_traffic_counter_bits received_ib_multicast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_ib_multicast;
+
+ struct mlx5_ifc_traffic_counter_bits received_eth_broadcast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_eth_broadcast;
+
+ struct mlx5_ifc_traffic_counter_bits received_eth_unicast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_eth_unicast;
+
+ struct mlx5_ifc_traffic_counter_bits received_eth_multicast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast;
+
+ u8 reserved_2[0xa00];
+};
+
+enum {
+ MLX5_QUERY_VPORT_COUNTER_IN_OP_MOD_VPORT_COUNTERS = 0x0,
+};
+
+struct mlx5_ifc_query_vport_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x60];
+
+ u8 clear[0x1];
+ u8 reserved_4[0x1f];
+
+ u8 reserved_5[0x20];
+};
+
+struct mlx5_ifc_query_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_tisc_bits tis_context;
+};
+
+struct mlx5_ifc_query_tis_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tisn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_tir_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_tirc_bits tir_context;
+};
+
+struct mlx5_ifc_query_tir_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tirn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_srqc_bits srq_context_entry;
+
+ u8 reserved_2[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 srqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_sqc_bits sq_context;
+};
+
+struct mlx5_ifc_query_sq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 sqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_special_contexts_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 resd_lkey[0x20];
+};
+
+struct mlx5_ifc_query_special_contexts_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_rqt_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_rqtc_bits rqt_context;
+};
+
+struct mlx5_ifc_query_rqt_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqtn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_rqc_bits rq_context;
+};
+
+struct mlx5_ifc_query_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_roce_address_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_roce_addr_layout_bits roce_address;
+};
+
+struct mlx5_ifc_query_roce_address_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 roce_address_index[0x10];
+ u8 reserved_2[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_rmp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_rmpc_bits rmp_context;
+};
+
+struct mlx5_ifc_query_rmp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rmpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_2[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_3[0x80];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_q_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 rx_write_requests[0x20];
+
+ u8 reserved_2[0x20];
+
+ u8 rx_read_requests[0x20];
+
+ u8 reserved_3[0x20];
+
+ u8 rx_atomic_requests[0x20];
+
+ u8 reserved_4[0x20];
+
+ u8 rx_dct_connect[0x20];
+
+ u8 reserved_5[0x20];
+
+ u8 out_of_buffer[0x20];
+
+ u8 reserved_6[0x20];
+
+ u8 out_of_sequence[0x20];
+
+ u8 reserved_7[0x620];
+};
+
+struct mlx5_ifc_query_q_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x80];
+
+ u8 clear[0x1];
+ u8 reserved_3[0x1f];
+
+ u8 reserved_4[0x18];
+ u8 counter_set_id[0x8];
+};
+
+struct mlx5_ifc_query_pages_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x10];
+ u8 function_id[0x10];
+
+ u8 num_pages[0x20];
+};
+
+enum {
+ MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES = 0x1,
+ MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES = 0x2,
+ MLX5_QUERY_PAGES_IN_OP_MOD_REGULAR_PAGES = 0x3,
+};
+
+struct mlx5_ifc_query_pages_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_nic_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
+};
+
+struct mlx5_ifc_query_nic_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x5];
+ u8 allowed_list_type[0x3];
+ u8 reserved_4[0x18];
+};
+
+struct mlx5_ifc_query_mkey_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
+
+ u8 reserved_2[0x600];
+
+ u8 bsf0_klm0_pas_mtt0_1[16][0x8];
+
+ u8 bsf1_klm1_pas_mtt2_3[16][0x8];
+};
+
+struct mlx5_ifc_query_mkey_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 mkey_index[0x18];
+
+ u8 pg_access[0x1];
+ u8 reserved_3[0x1f];
+};
+
+struct mlx5_ifc_query_mad_demux_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 mad_dumux_parameters_block[0x20];
+};
+
+struct mlx5_ifc_query_mad_demux_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
@@ -326,6 +3420,146 @@ struct mlx5_ifc_query_hca_cap_in_bits {
u8 reserved_2[0x40];
};
+struct mlx5_ifc_query_l2_table_entry_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xa0];
+
+ u8 reserved_2[0x13];
+ u8 vlan_valid[0x1];
+ u8 vlan[0xc];
+
+ struct mlx5_ifc_mac_address_layout_bits mac_address;
+
+ u8 reserved_3[0xc0];
+};
+
+struct mlx5_ifc_query_l2_table_entry_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x8];
+ u8 table_index[0x18];
+
+ u8 reserved_4[0x140];
+};
+
+struct mlx5_ifc_query_issi_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x10];
+ u8 current_issi[0x10];
+
+ u8 reserved_2[0xa0];
+
+ u8 supported_issi_reserved[76][0x8];
+ u8 supported_issi_dw0[0x20];
+};
+
+struct mlx5_ifc_query_issi_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_hca_vport_pkey_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_pkey_bits pkey[0];
+};
+
+struct mlx5_ifc_query_hca_vport_pkey_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xb];
+ u8 port_num[0x4];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x10];
+ u8 pkey_index[0x10];
+};
+
+struct mlx5_ifc_query_hca_vport_gid_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 gids_num[0x10];
+ u8 reserved_2[0x10];
+
+ struct mlx5_ifc_array128_auto_bits gid[0];
+};
+
+struct mlx5_ifc_query_hca_vport_gid_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xb];
+ u8 port_num[0x4];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x10];
+ u8 gid_index[0x10];
+};
+
+struct mlx5_ifc_query_hca_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
+};
+
+struct mlx5_ifc_query_hca_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xb];
+ u8 port_num[0x4];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+};
+
struct mlx5_ifc_query_hca_cap_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
@@ -334,16 +3568,3216 @@ struct mlx5_ifc_query_hca_cap_out_bits {
u8 reserved_1[0x40];
- u8 capability_struct[256][0x8];
+ union mlx5_ifc_hca_cap_union_bits capability;
};
-struct mlx5_ifc_set_hca_cap_out_bits {
+struct mlx5_ifc_query_hca_cap_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_flow_table_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x80];
+
+ u8 reserved_2[0x8];
+ u8 level[0x8];
+ u8 reserved_3[0x8];
+ u8 log_size[0x8];
+
+ u8 reserved_4[0x120];
+};
+
+struct mlx5_ifc_query_flow_table_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x140];
+};
+
+struct mlx5_ifc_query_fte_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x1c0];
+
+ struct mlx5_ifc_flow_context_bits flow_context;
+};
+
+struct mlx5_ifc_query_fte_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x40];
+
+ u8 flow_index[0x20];
+
+ u8 reserved_6[0xe0];
+};
+
+enum {
+ MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
+ MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
+ MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+};
+
+struct mlx5_ifc_query_flow_group_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xa0];
+
+ u8 start_flow_index[0x20];
+
+ u8 reserved_2[0x20];
+
+ u8 end_flow_index[0x20];
+
+ u8 reserved_3[0xa0];
+
+ u8 reserved_4[0x18];
+ u8 match_criteria_enable[0x8];
+
+ struct mlx5_ifc_fte_match_param_bits match_criteria;
+
+ u8 reserved_5[0xe00];
+};
+
+struct mlx5_ifc_query_flow_group_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 group_id[0x20];
+
+ u8 reserved_5[0x120];
+};
+
+struct mlx5_ifc_query_eq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_eqc_bits eq_context_entry;
+
+ u8 reserved_2[0x40];
+
+ u8 event_bitmask[0x40];
+
+ u8 reserved_3[0x580];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_eq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 eq_number[0x8];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_dctc_bits dct_context_entry;
+
+ u8 reserved_2[0x180];
+};
+
+struct mlx5_ifc_query_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 dctn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cq_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
u8 syndrome[0x20];
u8 reserved_1[0x40];
+
+ struct mlx5_ifc_cqc_bits cq_context;
+
+ u8 reserved_2[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_cq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_status_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 enable[0x1];
+ u8 tag_enable[0x1];
+ u8 reserved_2[0x1e];
+};
+
+struct mlx5_ifc_query_cong_status_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 priority[0x4];
+ u8 cong_protocol[0x4];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_statistics_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 cur_flows[0x20];
+
+ u8 sum_flows[0x20];
+
+ u8 cnp_ignored_high[0x20];
+
+ u8 cnp_ignored_low[0x20];
+
+ u8 cnp_handled_high[0x20];
+
+ u8 cnp_handled_low[0x20];
+
+ u8 reserved_2[0x100];
+
+ u8 time_stamp_high[0x20];
+
+ u8 time_stamp_low[0x20];
+
+ u8 accumulators_period[0x20];
+
+ u8 ecn_marked_roce_packets_high[0x20];
+
+ u8 ecn_marked_roce_packets_low[0x20];
+
+ u8 cnps_sent_high[0x20];
+
+ u8 cnps_sent_low[0x20];
+
+ u8 reserved_3[0x560];
+};
+
+struct mlx5_ifc_query_cong_statistics_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 clear[0x1];
+ u8 reserved_2[0x1f];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_params_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
+};
+
+struct mlx5_ifc_query_cong_params_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x1c];
+ u8 cong_protocol[0x4];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_adapter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct;
+};
+
+struct mlx5_ifc_query_adapter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_qp_2rst_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_qp_2rst_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_qp_2err_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_qp_2err_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_page_fault_resume_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_page_fault_resume_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 error[0x1];
+ u8 reserved_2[0x4];
+ u8 rdma[0x1];
+ u8 read_write[0x1];
+ u8 req_res[0x1];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_nop_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_nop_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_modify_vport_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_vport_state_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x18];
+ u8 admin_state[0x4];
+ u8 reserved_4[0x4];
+};
+
+struct mlx5_ifc_modify_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_tis_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tisn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 modify_bitmask[0x40];
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_tisc_bits ctx;
+};
+
+struct mlx5_ifc_modify_tir_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_tir_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tirn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 modify_bitmask[0x40];
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_tirc_bits ctx;
+};
+
+struct mlx5_ifc_modify_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_sq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 sq_state[0x4];
+ u8 reserved_2[0x4];
+ u8 sqn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 modify_bitmask[0x40];
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_sqc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rqt_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_rqt_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqtn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 modify_bitmask[0x40];
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_rqtc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 rq_state[0x4];
+ u8 reserved_2[0x4];
+ u8 rqn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 modify_bitmask[0x40];
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_rqc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rmp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_rmp_bitmask_bits {
+ u8 reserved[0x20];
+
+ u8 reserved1[0x1f];
+ u8 lwm[0x1];
+};
+
+struct mlx5_ifc_modify_rmp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 rmp_state[0x4];
+ u8 reserved_2[0x4];
+ u8 rmpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ struct mlx5_ifc_rmp_bitmask_bits bitmask;
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_rmpc_bits ctx;
+};
+
+struct mlx5_ifc_modify_nic_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_nic_vport_field_select_bits {
+ u8 reserved_0[0x1c];
+ u8 permanent_address[0x1];
+ u8 addresses_list[0x1];
+ u8 roce_en[0x1];
+ u8 reserved_1[0x1];
+};
+
+struct mlx5_ifc_modify_nic_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ struct mlx5_ifc_modify_nic_vport_field_select_bits field_select;
+
+ u8 reserved_3[0x780];
+
+ struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
+};
+
+struct mlx5_ifc_modify_hca_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_hca_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xb];
+ u8 port_num[0x4];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+
+ struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
+};
+
+struct mlx5_ifc_modify_cq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_MODIFY_CQ_IN_OP_MOD_MODIFY_CQ = 0x0,
+ MLX5_MODIFY_CQ_IN_OP_MOD_RESIZE_CQ = 0x1,
+};
+
+struct mlx5_ifc_modify_cq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 cqn[0x18];
+
+ union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select;
+
+ struct mlx5_ifc_cqc_bits cq_context;
+
+ u8 reserved_3[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_modify_cong_status_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_cong_status_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 priority[0x4];
+ u8 cong_protocol[0x4];
+
+ u8 enable[0x1];
+ u8 tag_enable[0x1];
+ u8 reserved_3[0x1e];
+};
+
+struct mlx5_ifc_modify_cong_params_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_cong_params_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x1c];
+ u8 cong_protocol[0x4];
+
+ union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select;
+
+ u8 reserved_3[0x80];
+
+ union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
+};
+
+struct mlx5_ifc_manage_pages_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 output_num_entries[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 pas[0][0x40];
+};
+
+enum {
+ MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_FAIL = 0x0,
+ MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_SUCCESS = 0x1,
+ MLX5_MANAGE_PAGES_IN_OP_MOD_HCA_RETURN_PAGES = 0x2,
+};
+
+struct mlx5_ifc_manage_pages_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+
+ u8 input_num_entries[0x20];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_mad_ifc_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 response_mad_packet[256][0x8];
+};
+
+struct mlx5_ifc_mad_ifc_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 remote_lid[0x10];
+ u8 reserved_2[0x8];
+ u8 port[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 mad[256][0x8];
+};
+
+struct mlx5_ifc_init_hca_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_init_hca_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_init2rtr_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_init2rtr_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_init2init_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_init2init_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_get_dropped_packet_log_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 packet_headers_log[128][0x8];
+
+ u8 packet_syndrome[64][0x8];
+};
+
+struct mlx5_ifc_get_dropped_packet_log_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_gen_eqe_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 eq_number[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 eqe[64][0x8];
+};
+
+struct mlx5_ifc_gen_eq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_enable_hca_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+};
+
+struct mlx5_ifc_enable_hca_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_drain_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_drain_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 dctn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_disable_hca_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+};
+
+struct mlx5_ifc_disable_hca_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_detach_from_mcg_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_detach_from_mcg_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 multicast_gid[16][0x8];
+};
+
+struct mlx5_ifc_destroy_xrc_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_xrc_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 xrc_srqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_tis_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tisn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_tir_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_tir_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tirn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 srqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_sq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 sqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_rqt_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rqt_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqtn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_rmp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rmp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rmpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_psv_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_psv_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 psvn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_mkey_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_mkey_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 mkey_index[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_flow_table_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_flow_table_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x140];
+};
+
+struct mlx5_ifc_destroy_flow_group_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_flow_group_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 group_id[0x20];
+
+ u8 reserved_5[0x120];
+};
+
+struct mlx5_ifc_destroy_eq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_eq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 eq_number[0x8];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 dctn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_cq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_cq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_delete_vxlan_udp_dport_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_vxlan_udp_dport_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x10];
+ u8 vxlan_udp_port[0x10];
+};
+
+struct mlx5_ifc_delete_l2_table_entry_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_l2_table_entry_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x8];
+ u8 table_index[0x18];
+
+ u8 reserved_4[0x140];
+};
+
+struct mlx5_ifc_delete_fte_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_fte_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x40];
+
+ u8 flow_index[0x20];
+
+ u8 reserved_6[0xe0];
+};
+
+struct mlx5_ifc_dealloc_xrcd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_xrcd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 xrcd[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_uar_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_uar_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 uar[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_transport_domain_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_transport_domain_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 transport_domain[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_q_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_q_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 counter_set_id[0x8];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_pd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_pd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_create_xrc_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 xrc_srqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_xrc_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
+
+ u8 reserved_3[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_create_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 tisn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_tis_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_tisc_bits ctx;
+};
+
+struct mlx5_ifc_create_tir_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 tirn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_tir_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_tirc_bits ctx;
+};
+
+struct mlx5_ifc_create_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 srqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_srqc_bits srq_context_entry;
+
+ u8 reserved_3[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_create_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 sqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_sq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_sqc_bits ctx;
+};
+
+struct mlx5_ifc_create_rqt_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 rqtn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rqt_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_rqtc_bits rqt_context;
+};
+
+struct mlx5_ifc_create_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 rqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_rqc_bits ctx;
+};
+
+struct mlx5_ifc_create_rmp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 rmpn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rmp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_rmpc_bits ctx;
+};
+
+struct mlx5_ifc_create_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_3[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_4[0x80];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_create_psv_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 reserved_2[0x8];
+ u8 psv0_index[0x18];
+
+ u8 reserved_3[0x8];
+ u8 psv1_index[0x18];
+
+ u8 reserved_4[0x8];
+ u8 psv2_index[0x18];
+
+ u8 reserved_5[0x8];
+ u8 psv3_index[0x18];
+};
+
+struct mlx5_ifc_create_psv_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 num_psv[0x4];
+ u8 reserved_2[0x4];
+ u8 pd[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_create_mkey_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 mkey_index[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_mkey_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 pg_access[0x1];
+ u8 reserved_3[0x1f];
+
+ struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
+
+ u8 reserved_4[0x80];
+
+ u8 translations_octword_actual_size[0x20];
+
+ u8 reserved_5[0x560];
+
+ u8 klm_pas_mtt[0][0x20];
+};
+
+struct mlx5_ifc_create_flow_table_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_flow_table_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0x8];
+ u8 level[0x8];
+ u8 reserved_6[0x8];
+ u8 log_size[0x8];
+
+ u8 reserved_7[0x120];
+};
+
+struct mlx5_ifc_create_flow_group_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 group_id[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+enum {
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+};
+
+struct mlx5_ifc_create_flow_group_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x20];
+
+ u8 start_flow_index[0x20];
+
+ u8 reserved_6[0x20];
+
+ u8 end_flow_index[0x20];
+
+ u8 reserved_7[0xa0];
+
+ u8 reserved_8[0x18];
+ u8 match_criteria_enable[0x8];
+
+ struct mlx5_ifc_fte_match_param_bits match_criteria;
+
+ u8 reserved_9[0xe00];
+};
+
+struct mlx5_ifc_create_eq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x18];
+ u8 eq_number[0x8];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_eq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_eqc_bits eq_context_entry;
+
+ u8 reserved_3[0x40];
+
+ u8 event_bitmask[0x40];
+
+ u8 reserved_4[0x580];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_create_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 dctn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_dctc_bits dct_context_entry;
+
+ u8 reserved_3[0x180];
+};
+
+struct mlx5_ifc_create_cq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_cq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_cqc_bits cq_context;
+
+ u8 reserved_3[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_config_int_moderation_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x4];
+ u8 min_delay[0xc];
+ u8 int_vector[0x10];
+
+ u8 reserved_2[0x20];
+};
+
+enum {
+ MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_WRITE = 0x0,
+ MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_READ = 0x1,
+};
+
+struct mlx5_ifc_config_int_moderation_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x4];
+ u8 min_delay[0xc];
+ u8 int_vector[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_attach_to_mcg_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_attach_to_mcg_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 multicast_gid[16][0x8];
+};
+
+struct mlx5_ifc_arm_xrc_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ = 0x1,
+};
+
+struct mlx5_ifc_arm_xrc_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 xrc_srqn[0x18];
+
+ u8 reserved_3[0x10];
+ u8 lwm[0x10];
+};
+
+struct mlx5_ifc_arm_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_ARM_RQ_IN_OP_MOD_SRQ_ = 0x1,
+};
+
+struct mlx5_ifc_arm_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 srq_number[0x18];
+
+ u8 reserved_3[0x10];
+ u8 lwm[0x10];
+};
+
+struct mlx5_ifc_arm_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_arm_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 dct_number[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_alloc_xrcd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 xrcd[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_xrcd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_uar_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 uar[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_uar_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_transport_domain_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 transport_domain[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_transport_domain_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_q_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x18];
+ u8 counter_set_id[0x8];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_q_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_pd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_pd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x10];
+ u8 vxlan_udp_port[0x10];
+};
+
+struct mlx5_ifc_access_register_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 register_data[0][0x20];
+};
+
+enum {
+ MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE = 0x0,
+ MLX5_ACCESS_REGISTER_IN_OP_MOD_READ = 0x1,
+};
+
+struct mlx5_ifc_access_register_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 register_id[0x10];
+
+ u8 argument[0x20];
+
+ u8 register_data[0][0x20];
+};
+
+struct mlx5_ifc_sltp_reg_bits {
+ u8 status[0x4];
+ u8 version[0x4];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 reserved_0[0x2];
+ u8 lane[0x4];
+ u8 reserved_1[0x8];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x7];
+ u8 polarity[0x1];
+ u8 ob_tap0[0x8];
+ u8 ob_tap1[0x8];
+ u8 ob_tap2[0x8];
+
+ u8 reserved_4[0xc];
+ u8 ob_preemp_mode[0x4];
+ u8 ob_reg[0x8];
+ u8 ob_bias[0x8];
+
+ u8 reserved_5[0x20];
+};
+
+struct mlx5_ifc_slrg_reg_bits {
+ u8 status[0x4];
+ u8 version[0x4];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 reserved_0[0x2];
+ u8 lane[0x4];
+ u8 reserved_1[0x8];
+
+ u8 time_to_link_up[0x10];
+ u8 reserved_2[0xc];
+ u8 grade_lane_speed[0x4];
+
+ u8 grade_version[0x8];
+ u8 grade[0x18];
+
+ u8 reserved_3[0x4];
+ u8 height_grade_type[0x4];
+ u8 height_grade[0x18];
+
+ u8 height_dz[0x10];
+ u8 height_dv[0x10];
+
+ u8 reserved_4[0x10];
+ u8 height_sigma[0x10];
+
+ u8 reserved_5[0x20];
+
+ u8 reserved_6[0x4];
+ u8 phase_grade_type[0x4];
+ u8 phase_grade[0x18];
+
+ u8 reserved_7[0x8];
+ u8 phase_eo_pos[0x8];
+ u8 reserved_8[0x8];
+ u8 phase_eo_neg[0x8];
+
+ u8 ffe_set_tested[0x10];
+ u8 test_errors_per_lane[0x10];
+};
+
+struct mlx5_ifc_pvlc_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x1c];
+ u8 vl_hw_cap[0x4];
+
+ u8 reserved_3[0x1c];
+ u8 vl_admin[0x4];
+
+ u8 reserved_4[0x1c];
+ u8 vl_operational[0x4];
+};
+
+struct mlx5_ifc_pude_reg_bits {
+ u8 swid[0x8];
+ u8 local_port[0x8];
+ u8 reserved_0[0x4];
+ u8 admin_status[0x4];
+ u8 reserved_1[0x4];
+ u8 oper_status[0x4];
+
+ u8 reserved_2[0x60];
+};
+
+struct mlx5_ifc_ptys_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0xd];
+ u8 proto_mask[0x3];
+
+ u8 reserved_2[0x40];
+
+ u8 eth_proto_capability[0x20];
+
+ u8 ib_link_width_capability[0x10];
+ u8 ib_proto_capability[0x10];
+
+ u8 reserved_3[0x20];
+
+ u8 eth_proto_admin[0x20];
+
+ u8 ib_link_width_admin[0x10];
+ u8 ib_proto_admin[0x10];
+
+ u8 reserved_4[0x20];
+
+ u8 eth_proto_oper[0x20];
+
+ u8 ib_link_width_oper[0x10];
+ u8 ib_proto_oper[0x10];
+
+ u8 reserved_5[0x20];
+
+ u8 eth_proto_lp_advertise[0x20];
+
+ u8 reserved_6[0x60];
+};
+
+struct mlx5_ifc_ptas_reg_bits {
+ u8 reserved_0[0x20];
+
+ u8 algorithm_options[0x10];
+ u8 reserved_1[0x4];
+ u8 repetitions_mode[0x4];
+ u8 num_of_repetitions[0x8];
+
+ u8 grade_version[0x8];
+ u8 height_grade_type[0x4];
+ u8 phase_grade_type[0x4];
+ u8 height_grade_weight[0x8];
+ u8 phase_grade_weight[0x8];
+
+ u8 gisim_measure_bits[0x10];
+ u8 adaptive_tap_measure_bits[0x10];
+
+ u8 ber_bath_high_error_threshold[0x10];
+ u8 ber_bath_mid_error_threshold[0x10];
+
+ u8 ber_bath_low_error_threshold[0x10];
+ u8 one_ratio_high_threshold[0x10];
+
+ u8 one_ratio_high_mid_threshold[0x10];
+ u8 one_ratio_low_mid_threshold[0x10];
+
+ u8 one_ratio_low_threshold[0x10];
+ u8 ndeo_error_threshold[0x10];
+
+ u8 mixer_offset_step_size[0x10];
+ u8 reserved_2[0x8];
+ u8 mix90_phase_for_voltage_bath[0x8];
+
+ u8 mixer_offset_start[0x10];
+ u8 mixer_offset_end[0x10];
+
+ u8 reserved_3[0x15];
+ u8 ber_test_time[0xb];
+};
+
+struct mlx5_ifc_pspa_reg_bits {
+ u8 swid[0x8];
+ u8 local_port[0x8];
+ u8 sub_port[0x8];
+ u8 reserved_0[0x8];
+
+ u8 reserved_1[0x20];
+};
+
+struct mlx5_ifc_pqdr_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x5];
+ u8 prio[0x3];
+ u8 reserved_2[0x6];
+ u8 mode[0x2];
+
+ u8 reserved_3[0x20];
+
+ u8 reserved_4[0x10];
+ u8 min_threshold[0x10];
+
+ u8 reserved_5[0x10];
+ u8 max_threshold[0x10];
+
+ u8 reserved_6[0x10];
+ u8 mark_probability_denominator[0x10];
+
+ u8 reserved_7[0x60];
+};
+
+struct mlx5_ifc_ppsc_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x1c];
+ u8 wrps_admin[0x4];
+
+ u8 reserved_4[0x1c];
+ u8 wrps_status[0x4];
+
+ u8 reserved_5[0x8];
+ u8 up_threshold[0x8];
+ u8 reserved_6[0x8];
+ u8 down_threshold[0x8];
+
+ u8 reserved_7[0x20];
+
+ u8 reserved_8[0x1c];
+ u8 srps_admin[0x4];
+
+ u8 reserved_9[0x1c];
+ u8 srps_status[0x4];
+
+ u8 reserved_10[0x40];
+};
+
+struct mlx5_ifc_pplr_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x8];
+ u8 lb_cap[0x8];
+ u8 reserved_3[0x8];
+ u8 lb_en[0x8];
+};
+
+struct mlx5_ifc_pplm_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 port_profile_mode[0x8];
+ u8 static_port_profile[0x8];
+ u8 active_port_profile[0x8];
+ u8 reserved_3[0x8];
+
+ u8 retransmission_active[0x8];
+ u8 fec_mode_active[0x18];
+
+ u8 reserved_4[0x20];
+};
+
+struct mlx5_ifc_ppcnt_reg_bits {
+ u8 swid[0x8];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 reserved_0[0x8];
+ u8 grp[0x6];
+
+ u8 clr[0x1];
+ u8 reserved_1[0x1c];
+ u8 prio_tc[0x3];
+
+ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
+};
+
+struct mlx5_ifc_ppad_reg_bits {
+ u8 reserved_0[0x3];
+ u8 single_mac[0x1];
+ u8 reserved_1[0x4];
+ u8 local_port[0x8];
+ u8 mac_47_32[0x10];
+
+ u8 mac_31_0[0x20];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_pmtu_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 max_mtu[0x10];
+ u8 reserved_2[0x10];
+
+ u8 admin_mtu[0x10];
+ u8 reserved_3[0x10];
+
+ u8 oper_mtu[0x10];
+ u8 reserved_4[0x10];
+};
+
+struct mlx5_ifc_pmpr_reg_bits {
+ u8 reserved_0[0x8];
+ u8 module[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x18];
+ u8 attenuation_5g[0x8];
+
+ u8 reserved_3[0x18];
+ u8 attenuation_7g[0x8];
+
+ u8 reserved_4[0x18];
+ u8 attenuation_12g[0x8];
+};
+
+struct mlx5_ifc_pmpe_reg_bits {
+ u8 reserved_0[0x8];
+ u8 module[0x8];
+ u8 reserved_1[0xc];
+ u8 module_status[0x4];
+
+ u8 reserved_2[0x60];
+};
+
+struct mlx5_ifc_pmpc_reg_bits {
+ u8 module_state_updated[32][0x8];
+};
+
+struct mlx5_ifc_pmlpn_reg_bits {
+ u8 reserved_0[0x4];
+ u8 mlpn_status[0x4];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 e[0x1];
+ u8 reserved_2[0x1f];
+};
+
+struct mlx5_ifc_pmlp_reg_bits {
+ u8 rxtx[0x1];
+ u8 reserved_0[0x7];
+ u8 local_port[0x8];
+ u8 reserved_1[0x8];
+ u8 width[0x8];
+
+ u8 lane0_module_mapping[0x20];
+
+ u8 lane1_module_mapping[0x20];
+
+ u8 lane2_module_mapping[0x20];
+
+ u8 lane3_module_mapping[0x20];
+
+ u8 reserved_2[0x160];
+};
+
+struct mlx5_ifc_pmaos_reg_bits {
+ u8 reserved_0[0x8];
+ u8 module[0x8];
+ u8 reserved_1[0x4];
+ u8 admin_status[0x4];
+ u8 reserved_2[0x4];
+ u8 oper_status[0x4];
+
+ u8 ase[0x1];
+ u8 ee[0x1];
+ u8 reserved_3[0x1c];
+ u8 e[0x2];
+
+ u8 reserved_4[0x40];
+};
+
+struct mlx5_ifc_plpc_reg_bits {
+ u8 reserved_0[0x4];
+ u8 profile_id[0xc];
+ u8 reserved_1[0x4];
+ u8 proto_mask[0x4];
+ u8 reserved_2[0x8];
+
+ u8 reserved_3[0x10];
+ u8 lane_speed[0x10];
+
+ u8 reserved_4[0x17];
+ u8 lpbf[0x1];
+ u8 fec_mode_policy[0x8];
+
+ u8 retransmission_capability[0x8];
+ u8 fec_mode_capability[0x18];
+
+ u8 retransmission_support_admin[0x8];
+ u8 fec_mode_support_admin[0x18];
+
+ u8 retransmission_request_admin[0x8];
+ u8 fec_mode_request_admin[0x18];
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_plib_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x8];
+ u8 ib_port[0x8];
+
+ u8 reserved_2[0x60];
+};
+
+struct mlx5_ifc_plbf_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0xd];
+ u8 lbf_mode[0x3];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_pipg_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 dic[0x1];
+ u8 reserved_2[0x19];
+ u8 ipg[0x4];
+ u8 reserved_3[0x2];
+};
+
+struct mlx5_ifc_pifr_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0xe0];
+
+ u8 port_filter[8][0x20];
+
+ u8 port_filter_update_en[8][0x20];
+};
+
+struct mlx5_ifc_pfcc_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 ppan[0x4];
+ u8 reserved_2[0x4];
+ u8 prio_mask_tx[0x8];
+ u8 reserved_3[0x8];
+ u8 prio_mask_rx[0x8];
+
+ u8 pptx[0x1];
+ u8 aptx[0x1];
+ u8 reserved_4[0x6];
+ u8 pfctx[0x8];
+ u8 reserved_5[0x10];
+
+ u8 pprx[0x1];
+ u8 aprx[0x1];
+ u8 reserved_6[0x6];
+ u8 pfcrx[0x8];
+ u8 reserved_7[0x10];
+
+ u8 reserved_8[0x80];
+};
+
+struct mlx5_ifc_pelc_reg_bits {
+ u8 op[0x4];
+ u8 reserved_0[0x4];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 op_admin[0x8];
+ u8 op_capability[0x8];
+ u8 op_request[0x8];
+ u8 op_active[0x8];
+
+ u8 admin[0x40];
+
+ u8 capability[0x40];
+
+ u8 request[0x40];
+
+ u8 active[0x40];
+
+ u8 reserved_2[0x80];
+};
+
+struct mlx5_ifc_peir_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0xc];
+ u8 error_count[0x4];
+ u8 reserved_3[0x10];
+
+ u8 reserved_4[0xc];
+ u8 lane[0x4];
+ u8 reserved_5[0x8];
+ u8 error_type[0x8];
+};
+
+struct mlx5_ifc_pcap_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 port_capability_mask[4][0x20];
+};
+
+struct mlx5_ifc_paos_reg_bits {
+ u8 swid[0x8];
+ u8 local_port[0x8];
+ u8 reserved_0[0x4];
+ u8 admin_status[0x4];
+ u8 reserved_1[0x4];
+ u8 oper_status[0x4];
+
+ u8 ase[0x1];
+ u8 ee[0x1];
+ u8 reserved_2[0x1c];
+ u8 e[0x2];
+
+ u8 reserved_3[0x40];
+};
+
+struct mlx5_ifc_pamp_reg_bits {
+ u8 reserved_0[0x8];
+ u8 opamp_group[0x8];
+ u8 reserved_1[0xc];
+ u8 opamp_group_type[0x4];
+
+ u8 start_index[0x10];
+ u8 reserved_2[0x4];
+ u8 num_of_indices[0xc];
+
+ u8 index_data[18][0x10];
+};
+
+struct mlx5_ifc_lane_2_module_mapping_bits {
+ u8 reserved_0[0x6];
+ u8 rx_lane[0x2];
+ u8 reserved_1[0x6];
+ u8 tx_lane[0x2];
+ u8 reserved_2[0x8];
+ u8 module[0x8];
+};
+
+struct mlx5_ifc_bufferx_reg_bits {
+ u8 reserved_0[0x6];
+ u8 lossy[0x1];
+ u8 epsb[0x1];
+ u8 reserved_1[0xc];
+ u8 size[0xc];
+
+ u8 xoff_threshold[0x10];
+ u8 xon_threshold[0x10];
+};
+
+struct mlx5_ifc_set_node_in_bits {
+ u8 node_description[64][0x8];
+};
+
+struct mlx5_ifc_register_power_settings_bits {
+ u8 reserved_0[0x18];
+ u8 power_settings_level[0x8];
+
+ u8 reserved_1[0x60];
+};
+
+struct mlx5_ifc_register_host_endianness_bits {
+ u8 he[0x1];
+ u8 reserved_0[0x1f];
+
+ u8 reserved_1[0x60];
+};
+
+struct mlx5_ifc_umr_pointer_desc_argument_bits {
+ u8 reserved_0[0x20];
+
+ u8 mkey[0x20];
+
+ u8 addressh_63_32[0x20];
+
+ u8 addressl_31_0[0x20];
+};
+
+struct mlx5_ifc_ud_adrs_vector_bits {
+ u8 dc_key[0x40];
+
+ u8 ext[0x1];
+ u8 reserved_0[0x7];
+ u8 destination_qp_dct[0x18];
+
+ u8 static_rate[0x4];
+ u8 sl_eth_prio[0x4];
+ u8 fl[0x1];
+ u8 mlid[0x7];
+ u8 rlid_udp_sport[0x10];
+
+ u8 reserved_1[0x20];
+
+ u8 rmac_47_16[0x20];
+
+ u8 rmac_15_0[0x10];
+ u8 tclass[0x8];
+ u8 hop_limit[0x8];
+
+ u8 reserved_2[0x1];
+ u8 grh[0x1];
+ u8 reserved_3[0x2];
+ u8 src_addr_index[0x8];
+ u8 flow_label[0x14];
+
+ u8 rgid_rip[16][0x8];
+};
+
+struct mlx5_ifc_pages_req_event_bits {
+ u8 reserved_0[0x10];
+ u8 function_id[0x10];
+
+ u8 num_pages[0x20];
+
+ u8 reserved_1[0xa0];
+};
+
+struct mlx5_ifc_eqe_bits {
+ u8 reserved_0[0x8];
+ u8 event_type[0x8];
+ u8 reserved_1[0x8];
+ u8 event_sub_type[0x8];
+
+ u8 reserved_2[0xe0];
+
+ union mlx5_ifc_event_auto_bits event_data;
+
+ u8 reserved_3[0x10];
+ u8 signature[0x8];
+ u8 reserved_4[0x7];
+ u8 owner[0x1];
+};
+
+enum {
+ MLX5_CMD_QUEUE_ENTRY_TYPE_PCIE_CMD_IF_TRANSPORT = 0x7,
+};
+
+struct mlx5_ifc_cmd_queue_entry_bits {
+ u8 type[0x8];
+ u8 reserved_0[0x18];
+
+ u8 input_length[0x20];
+
+ u8 input_mailbox_pointer_63_32[0x20];
+
+ u8 input_mailbox_pointer_31_9[0x17];
+ u8 reserved_1[0x9];
+
+ u8 command_input_inline_data[16][0x8];
+
+ u8 command_output_inline_data[16][0x8];
+
+ u8 output_mailbox_pointer_63_32[0x20];
+
+ u8 output_mailbox_pointer_31_9[0x17];
+ u8 reserved_2[0x9];
+
+ u8 output_length[0x20];
+
+ u8 token[0x8];
+ u8 signature[0x8];
+ u8 reserved_3[0x8];
+ u8 status[0x7];
+ u8 ownership[0x1];
+};
+
+struct mlx5_ifc_cmd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 command_output[0x20];
+};
+
+struct mlx5_ifc_cmd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 command[0][0x20];
+};
+
+struct mlx5_ifc_cmd_if_box_bits {
+ u8 mailbox_data[512][0x8];
+
+ u8 reserved_0[0x180];
+
+ u8 next_pointer_63_32[0x20];
+
+ u8 next_pointer_31_10[0x16];
+ u8 reserved_1[0xa];
+
+ u8 block_number[0x20];
+
+ u8 reserved_2[0x8];
+ u8 token[0x8];
+ u8 ctrl_signature[0x8];
+ u8 signature[0x8];
+};
+
+struct mlx5_ifc_mtt_bits {
+ u8 ptag_63_32[0x20];
+
+ u8 ptag_31_8[0x18];
+ u8 reserved_0[0x6];
+ u8 wr_en[0x1];
+ u8 rd_en[0x1];
+};
+
+enum {
+ MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC = 0x2,
+};
+
+enum {
+ MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_FULL_DRIVER = 0x0,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_DISABLED = 0x1,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_NO_DRAM_NIC = 0x2,
+};
+
+enum {
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_INTERNAL_ERR = 0x1,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_DEAD_IRISC = 0x7,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_HW_FATAL_ERR = 0x8,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_CRC_ERR = 0x9,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_FETCH_PCI_ERR = 0xa,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PAGE_ERR = 0xb,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_ASYNCHRONOUS_EQ_BUF_OVERRUN = 0xc,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_IN_ERR = 0xd,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV = 0xe,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR = 0xf,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR = 0x10,
+};
+
+struct mlx5_ifc_initial_seg_bits {
+ u8 fw_rev_minor[0x10];
+ u8 fw_rev_major[0x10];
+
+ u8 cmd_interface_rev[0x10];
+ u8 fw_rev_subminor[0x10];
+
+ u8 reserved_0[0x40];
+
+ u8 cmdq_phy_addr_63_32[0x20];
+
+ u8 cmdq_phy_addr_31_12[0x14];
+ u8 reserved_1[0x2];
+ u8 nic_interface[0x2];
+ u8 log_cmdq_size[0x4];
+ u8 log_cmdq_stride[0x4];
+
+ u8 command_doorbell_vector[0x20];
+
+ u8 reserved_2[0xf00];
+
+ u8 initializing[0x1];
+ u8 reserved_3[0x4];
+ u8 nic_interface_supported[0x3];
+ u8 reserved_4[0x18];
+
+ struct mlx5_ifc_health_buffer_bits health_buffer;
+
+ u8 no_dram_nic_offset[0x20];
+
+ u8 reserved_5[0x6e40];
+
+ u8 reserved_6[0x1f];
+ u8 clear_int[0x1];
+
+ u8 health_syndrome[0x8];
+ u8 health_counter[0x18];
+
+ u8 reserved_7[0x17fc0];
+};
+
+union mlx5_ifc_ports_control_registers_document_bits {
+ struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
+ struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
+ struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
+ struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping;
+ struct mlx5_ifc_pamp_reg_bits pamp_reg;
+ struct mlx5_ifc_paos_reg_bits paos_reg;
+ struct mlx5_ifc_pcap_reg_bits pcap_reg;
+ struct mlx5_ifc_peir_reg_bits peir_reg;
+ struct mlx5_ifc_pelc_reg_bits pelc_reg;
+ struct mlx5_ifc_pfcc_reg_bits pfcc_reg;
+ struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
+ struct mlx5_ifc_pifr_reg_bits pifr_reg;
+ struct mlx5_ifc_pipg_reg_bits pipg_reg;
+ struct mlx5_ifc_plbf_reg_bits plbf_reg;
+ struct mlx5_ifc_plib_reg_bits plib_reg;
+ struct mlx5_ifc_plpc_reg_bits plpc_reg;
+ struct mlx5_ifc_pmaos_reg_bits pmaos_reg;
+ struct mlx5_ifc_pmlp_reg_bits pmlp_reg;
+ struct mlx5_ifc_pmlpn_reg_bits pmlpn_reg;
+ struct mlx5_ifc_pmpc_reg_bits pmpc_reg;
+ struct mlx5_ifc_pmpe_reg_bits pmpe_reg;
+ struct mlx5_ifc_pmpr_reg_bits pmpr_reg;
+ struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
+ struct mlx5_ifc_ppad_reg_bits ppad_reg;
+ struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
+ struct mlx5_ifc_pplm_reg_bits pplm_reg;
+ struct mlx5_ifc_pplr_reg_bits pplr_reg;
+ struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
+ struct mlx5_ifc_pqdr_reg_bits pqdr_reg;
+ struct mlx5_ifc_pspa_reg_bits pspa_reg;
+ struct mlx5_ifc_ptas_reg_bits ptas_reg;
+ struct mlx5_ifc_ptys_reg_bits ptys_reg;
+ struct mlx5_ifc_pude_reg_bits pude_reg;
+ struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
+ struct mlx5_ifc_slrg_reg_bits slrg_reg;
+ struct mlx5_ifc_sltp_reg_bits sltp_reg;
+ u8 reserved_0[0x60e0];
+};
+
+union mlx5_ifc_debug_enhancements_document_bits {
+ struct mlx5_ifc_health_buffer_bits health_buffer;
+ u8 reserved_0[0x200];
+};
+
+union mlx5_ifc_uplink_pci_interface_document_bits {
+ struct mlx5_ifc_initial_seg_bits initial_seg;
+ u8 reserved_0[0x20060];
};
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 61f7a34..f079fb1 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -134,13 +134,21 @@ enum {
enum {
MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
+ MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2,
MLX5_WQE_CTRL_SOLICITED = 1 << 1,
};
enum {
+ MLX5_SEND_WQE_DS = 16,
MLX5_SEND_WQE_BB = 64,
};
+#define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
+
+enum {
+ MLX5_SEND_WQE_MAX_WQEBBS = 16,
+};
+
enum {
MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
@@ -200,6 +208,23 @@ struct mlx5_wqe_ctrl_seg {
#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
+enum {
+ MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
+ MLX5_ETH_WQE_L4_INNER_CSUM = 1 << 5,
+ MLX5_ETH_WQE_L3_CSUM = 1 << 6,
+ MLX5_ETH_WQE_L4_CSUM = 1 << 7,
+};
+
+struct mlx5_wqe_eth_seg {
+ u8 rsvd0[4];
+ u8 cs_flags;
+ u8 rsvd1;
+ __be16 mss;
+ __be32 rsvd2;
+ __be16 inline_hdr_sz;
+ u8 inline_hdr_start[2];
+};
+
struct mlx5_wqe_xrc_seg {
__be32 xrc_srqn;
u8 rsvd[12];
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h
index e1a363a..f43ed05 100644
--- a/include/linux/mlx5/srq.h
+++ b/include/linux/mlx5/srq.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
new file mode 100644
index 0000000..967e0fd
--- /dev/null
+++ b/include/linux/mlx5/vport.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_VPORT_H__
+#define __MLX5_VPORT_H__
+
+#include <linux/mlx5/driver.h>
+
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod);
+void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr);
+int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
+ u8 port_num, u16 vf_num, u16 gid_index,
+ union ib_gid *gid);
+int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
+ u8 port_num, u16 vf_num, u16 pkey_index,
+ u16 *pkey);
+int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
+ u8 other_vport, u8 port_num,
+ u16 vf_num,
+ struct mlx5_hca_vport_context *rep);
+int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
+ u64 *sys_image_guid);
+int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
+ u64 *node_guid);
+
+#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm-arch-hooks.h b/include/linux/mm-arch-hooks.h
new file mode 100644
index 0000000..4efc3f5
--- /dev/null
+++ b/include/linux/mm-arch-hooks.h
@@ -0,0 +1,25 @@
+/*
+ * Generic mm no-op hooks.
+ *
+ * Copyright (C) 2015, IBM Corporation
+ * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _LINUX_MM_ARCH_HOOKS_H
+#define _LINUX_MM_ARCH_HOOKS_H
+
+#include <asm/mm-arch-hooks.h>
+
+#ifndef arch_remap
+static inline void arch_remap(struct mm_struct *mm,
+ unsigned long old_start, unsigned long old_end,
+ unsigned long new_start, unsigned long new_end)
+{
+}
+#define arch_remap arch_remap
+#endif
+
+#endif /* _LINUX_MM_ARCH_HOOKS_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 80fc92a..2e872f9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -27,6 +27,7 @@ struct anon_vma_chain;
struct file_ra_state;
struct user_struct;
struct writeback_control;
+struct bdi_writeback;
#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
extern unsigned long max_mapnr;
@@ -138,7 +139,6 @@ extern unsigned int kobjsize(const void *objp);
#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
-#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
#define VM_ARCH_2 0x02000000
#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
@@ -206,27 +206,26 @@ extern unsigned int kobjsize(const void *objp);
extern pgprot_t protection_map[16];
#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
-#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */
-#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */
-#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */
-#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */
-#define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */
-#define FAULT_FLAG_TRIED 0x40 /* second try */
-#define FAULT_FLAG_USER 0x80 /* The fault originated in userspace */
+#define FAULT_FLAG_MKWRITE 0x02 /* Fault was mkwrite of existing pte */
+#define FAULT_FLAG_ALLOW_RETRY 0x04 /* Retry fault if blocking */
+#define FAULT_FLAG_RETRY_NOWAIT 0x08 /* Don't drop mmap_sem and wait when retrying */
+#define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */
+#define FAULT_FLAG_TRIED 0x20 /* Second try */
+#define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */
/*
* vm_fault is filled by the the pagefault handler and passed to the vma's
* ->fault function. The vma's ->fault is responsible for returning a bitmask
* of VM_FAULT_xxx flags that give details about how the fault was handled.
*
- * pgoff should be used in favour of virtual_address, if possible. If pgoff
- * is used, one may implement ->remap_pages to get nonlinear mapping support.
+ * pgoff should be used in favour of virtual_address, if possible.
*/
struct vm_fault {
unsigned int flags; /* FAULT_FLAG_xxx flags */
pgoff_t pgoff; /* Logical page offset based on vma */
void __user *virtual_address; /* Faulting virtual address */
+ struct page *cow_page; /* Handler may choose to COW */
struct page *page; /* ->fault handlers should return a
* page here, unless VM_FAULT_NOPAGE
* is set (which is also implied by
@@ -253,6 +252,9 @@ struct vm_operations_struct {
* writable, if an error is returned it will cause a SIGBUS */
int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
+ /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
+ int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
+
/* called by access_process_vm when get_user_pages() fails, typically
* for use by special VMAs that can switch between memory and hardware
*/
@@ -287,9 +289,13 @@ struct vm_operations_struct {
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
unsigned long addr);
#endif
- /* called by sys_remap_file_pages() to populate non-linear mapping */
- int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
- unsigned long size, pgoff_t pgoff);
+ /*
+ * Called by vm_normal_page() for special PTEs to find the
+ * page for @addr. This is useful if the default behavior
+ * (using pte_page()) would not find the correct page.
+ */
+ struct page *(*find_special_page)(struct vm_area_struct *vma,
+ unsigned long addr);
};
struct mmu_gather;
@@ -446,6 +452,12 @@ static inline struct page *compound_head_by_tail(struct page *tail)
return tail;
}
+/*
+ * Since either compound page could be dismantled asynchronously in THP
+ * or we access asynchronously arbitrary positioned struct page, there
+ * would be tail flag race. To handle this race, we should call
+ * smp_rmb() before checking tail flag. compound_head_by_tail() did it.
+ */
static inline struct page *compound_head(struct page *page)
{
if (unlikely(PageTail(page)))
@@ -454,6 +466,18 @@ static inline struct page *compound_head(struct page *page)
}
/*
+ * If we access compound page synchronously such as access to
+ * allocated page, there is no need to handle tail flag race, so we can
+ * check tail flag directly without any synchronization primitive.
+ */
+static inline struct page *compound_head_fast(struct page *page)
+{
+ if (unlikely(PageTail(page)))
+ return page->first_page;
+ return page;
+}
+
+/*
* The atomic page->_mapcount, starts from -1: so that transitions
* both from it and to it can be tracked, using atomic_inc_and_test
* and atomic_add_negative(-1).
@@ -465,7 +489,8 @@ static inline void page_mapcount_reset(struct page *page)
static inline int page_mapcount(struct page *page)
{
- return atomic_read(&(page)->_mapcount) + 1;
+ VM_BUG_ON_PAGE(PageSlab(page), page);
+ return atomic_read(&page->_mapcount) + 1;
}
static inline int page_count(struct page *page)
@@ -473,18 +498,9 @@ static inline int page_count(struct page *page)
return atomic_read(&compound_head(page)->_count);
}
-#ifdef CONFIG_HUGETLB_PAGE
-extern int PageHeadHuge(struct page *page_head);
-#else /* CONFIG_HUGETLB_PAGE */
-static inline int PageHeadHuge(struct page *page_head)
-{
- return 0;
-}
-#endif /* CONFIG_HUGETLB_PAGE */
-
static inline bool __compound_tail_refcounted(struct page *page)
{
- return !PageSlab(page) && !PageHeadHuge(page);
+ return PageAnon(page) && !PageSlab(page) && !PageHeadHuge(page);
}
/*
@@ -531,7 +547,14 @@ static inline void get_page(struct page *page)
static inline struct page *virt_to_head_page(const void *x)
{
struct page *page = virt_to_page(x);
- return compound_head(page);
+
+ /*
+ * We don't need to worry about synchronization of tail flag
+ * when we call virt_to_head_page() since it is only called for
+ * already allocated page and this page won't be freed until
+ * this virt_to_head_page() is finished. So use _fast variant.
+ */
+ return compound_head_fast(page);
}
/*
@@ -543,53 +566,6 @@ static inline void init_page_count(struct page *page)
atomic_set(&page->_count, 1);
}
-/*
- * PageBuddy() indicate that the page is free and in the buddy system
- * (see mm/page_alloc.c).
- *
- * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
- * -2 so that an underflow of the page_mapcount() won't be mistaken
- * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
- * efficiently by most CPU architectures.
- */
-#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
-
-static inline int PageBuddy(struct page *page)
-{
- return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
-}
-
-static inline void __SetPageBuddy(struct page *page)
-{
- VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
- atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
-}
-
-static inline void __ClearPageBuddy(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageBuddy(page), page);
- atomic_set(&page->_mapcount, -1);
-}
-
-#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
-
-static inline int PageBalloon(struct page *page)
-{
- return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
-}
-
-static inline void __SetPageBalloon(struct page *page)
-{
- VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
- atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
-}
-
-static inline void __ClearPageBalloon(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageBalloon(page), page);
- atomic_set(&page->_mapcount, -1);
-}
-
void put_page(struct page *page);
void put_pages_list(struct list_head *pages);
@@ -601,29 +577,28 @@ int split_free_page(struct page *page);
* prototype for that function and accessor functions.
* These are _only_ valid on the head of a PG_compound page.
*/
-typedef void compound_page_dtor(struct page *);
static inline void set_compound_page_dtor(struct page *page,
compound_page_dtor *dtor)
{
- page[1].lru.next = (void *)dtor;
+ page[1].compound_dtor = dtor;
}
static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
{
- return (compound_page_dtor *)page[1].lru.next;
+ return page[1].compound_dtor;
}
static inline int compound_order(struct page *page)
{
if (!PageHead(page))
return 0;
- return (unsigned long)page[1].lru.prev;
+ return page[1].compound_order;
}
static inline void set_compound_order(struct page *page, unsigned long order)
{
- page[1].lru.prev = (void *)order;
+ page[1].compound_order = order;
}
#ifdef CONFIG_MMU
@@ -979,34 +954,10 @@ void page_address_init(void);
#define page_address_init() do { } while(0)
#endif
-/*
- * On an anonymous page mapped into a user virtual memory area,
- * page->mapping points to its anon_vma, not to a struct address_space;
- * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
- *
- * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
- * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
- * and then page->mapping points, not to an anon_vma, but to a private
- * structure which KSM associates with that merged page. See ksm.h.
- *
- * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
- *
- * Please note that, confusingly, "page_mapping" refers to the inode
- * address_space which maps the page from disk; whereas "page_mapped"
- * refers to user virtual address space into which the page is mapped.
- */
-#define PAGE_MAPPING_ANON 1
-#define PAGE_MAPPING_KSM 2
-#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
-
+extern void *page_rmapping(struct page *page);
+extern struct anon_vma *page_anon_vma(struct page *page);
extern struct address_space *page_mapping(struct page *page);
-/* Neutral page->mapping pointer to address_space or anon_vma or other */
-static inline void *page_rmapping(struct page *page)
-{
- return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
-}
-
extern struct address_space *__page_file_mapping(struct page *);
static inline
@@ -1018,11 +969,6 @@ struct address_space *page_file_mapping(struct page *page)
return page->mapping;
}
-static inline int PageAnon(struct page *page)
-{
- return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
-}
-
/*
* Return the pagecache index of the passed page. Regular pagecache pages
* use ->index whereas swapcache pages use ->private
@@ -1070,6 +1016,7 @@ static inline int page_mapped(struct page *page)
#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */
#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */
+#define VM_FAULT_SIGSEGV 0x0040
#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
@@ -1078,8 +1025,9 @@ static inline int page_mapped(struct page *page)
#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
-#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
- VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
+#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
+ VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
+ VM_FAULT_FALLBACK)
/* Encode hstate index for a hwpoisoned large page */
#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
@@ -1119,7 +1067,6 @@ extern void user_shm_unlock(size_t, struct user_struct *);
* Parameter block passed down to zap_pte_range in exceptional cases.
*/
struct zap_details {
- struct vm_area_struct *nonlinear_vma; /* Check page->index if set */
struct address_space *check_mapping; /* Check page->mapping if set */
pgoff_t first_index; /* Lowest page->index to unmap */
pgoff_t last_index; /* Highest page->index to unmap */
@@ -1137,8 +1084,6 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
/**
* mm_walk - callbacks for walk_page_range
- * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
- * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
* @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
* this handler is required to be able to handle
* pmd_trans_huge() pmds. They may simply choose to
@@ -1146,16 +1091,18 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
* @pte_entry: if set, called for each non-empty PTE (4th-level) entry
* @pte_hole: if set, called for each hole at all levels
* @hugetlb_entry: if set, called for each hugetlb entry
- * *Caution*: The caller must hold mmap_sem() if @hugetlb_entry
- * is used.
+ * @test_walk: caller specific callback function to determine whether
+ * we walk over the current vma or not. A positive returned
+ * value means "do page table walk over the current vma,"
+ * and a negative one means "abort current page table walk
+ * right now." 0 means "skip the current vma."
+ * @mm: mm_struct representing the target process of page table walk
+ * @vma: vma currently walked (NULL if walking outside vmas)
+ * @private: private data for callbacks' usage
*
- * (see walk_page_range for more details)
+ * (see the comment on walk_page_range() for more details)
*/
struct mm_walk {
- int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
- unsigned long next, struct mm_walk *walk);
- int (*pud_entry)(pud_t *pud, unsigned long addr,
- unsigned long next, struct mm_walk *walk);
int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
unsigned long next, struct mm_walk *walk);
int (*pte_entry)(pte_t *pte, unsigned long addr,
@@ -1165,12 +1112,16 @@ struct mm_walk {
int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long next,
struct mm_walk *walk);
+ int (*test_walk)(unsigned long addr, unsigned long next,
+ struct mm_walk *walk);
struct mm_struct *mm;
+ struct vm_area_struct *vma;
void *private;
};
int walk_page_range(unsigned long addr, unsigned long end,
struct mm_walk *walk);
+int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long ceiling);
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
@@ -1234,6 +1185,17 @@ long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages,
struct vm_area_struct **vmas);
+long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages,
+ int *locked);
+long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages,
+ unsigned int gup_flags);
+long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages);
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
struct kvec;
@@ -1250,10 +1212,15 @@ int __set_page_dirty_nobuffers(struct page *page);
int __set_page_dirty_no_writeback(struct page *page);
int redirty_page_for_writepage(struct writeback_control *wbc,
struct page *page);
-void account_page_dirtied(struct page *page, struct address_space *mapping);
+void account_page_dirtied(struct page *page, struct address_space *mapping,
+ struct mem_cgroup *memcg);
+void account_page_cleaned(struct page *page, struct address_space *mapping,
+ struct mem_cgroup *memcg, struct bdi_writeback *wb);
int set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
+void cancel_dirty_page(struct page *page);
int clear_page_dirty_for_io(struct page *page);
+
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
/* Is the vma a continuation of the stack vma above it? */
@@ -1366,6 +1333,11 @@ static inline void update_hiwater_vm(struct mm_struct *mm)
mm->hiwater_vm = mm->total_vm;
}
+static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
+{
+ mm->hiwater_rss = get_mm_rss(mm);
+}
+
static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
struct mm_struct *mm)
{
@@ -1405,14 +1377,45 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
#endif
-#ifdef __PAGETABLE_PMD_FOLDED
+#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
unsigned long address)
{
return 0;
}
+
+static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
+
+static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
+{
+ return 0;
+}
+
+static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
+static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
+
#else
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
+
+static inline void mm_nr_pmds_init(struct mm_struct *mm)
+{
+ atomic_long_set(&mm->nr_pmds, 0);
+}
+
+static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
+{
+ return atomic_long_read(&mm->nr_pmds);
+}
+
+static inline void mm_inc_nr_pmds(struct mm_struct *mm)
+{
+ atomic_long_inc(&mm->nr_pmds);
+}
+
+static inline void mm_dec_nr_pmds(struct mm_struct *mm)
+{
+ atomic_long_dec(&mm->nr_pmds);
+}
#endif
int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -1632,6 +1635,8 @@ extern void free_highmem_page(struct page *page);
extern void adjust_managed_page_count(struct page *page, long count);
extern void mem_init_print_info(const char *str);
+extern void reserve_bootmem_region(unsigned long start, unsigned long end);
+
/* Free the reserved page into the buddy system, so it gets managed. */
static inline void __free_reserved_page(struct page *page)
{
@@ -1721,7 +1726,8 @@ extern void sparse_memory_present_with_active_regions(int nid);
#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
!defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
-static inline int __early_pfn_to_nid(unsigned long pfn)
+static inline int __early_pfn_to_nid(unsigned long pfn,
+ struct mminit_pfnnid_cache *state)
{
return 0;
}
@@ -1729,7 +1735,8 @@ static inline int __early_pfn_to_nid(unsigned long pfn)
/* please see mm/page_alloc.c */
extern int __meminit early_pfn_to_nid(unsigned long pfn);
/* there is a per-arch backend function. */
-extern int __meminit __early_pfn_to_nid(unsigned long pfn);
+extern int __meminit __early_pfn_to_nid(unsigned long pfn,
+ struct mminit_pfnnid_cache *state);
#endif
extern void set_dma_reserve(unsigned long new_dma_reserve);
@@ -1775,12 +1782,6 @@ struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
for (vma = vma_interval_tree_iter_first(root, start, last); \
vma; vma = vma_interval_tree_iter_next(vma, start, last))
-static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
- struct list_head *list)
-{
- list_add_tail(&vma->shared.nonlinear, list);
-}
-
void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
struct rb_root *root);
void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
@@ -1900,10 +1901,10 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
static inline unsigned long
vm_unmapped_area(struct vm_unmapped_area_info *info)
{
- if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
- return unmapped_area(info);
- else
+ if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
return unmapped_area_topdown(info);
+ else
+ return unmapped_area(info);
}
/* truncate.c */
@@ -2036,7 +2037,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO
* and return without waiting upon it */
-#define FOLL_MLOCK 0x40 /* mark page as mlocked */
+#define FOLL_POPULATE 0x40 /* fault in page */
#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */
#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
@@ -2108,9 +2109,8 @@ int drop_caches_sysctl_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
#endif
-unsigned long shrink_node_slabs(gfp_t gfp_mask, int nid,
- unsigned long nr_scanned,
- unsigned long nr_eligible);
+void drop_slab(void);
+void drop_slab_node(int nid);
#ifndef CONFIG_MMU
#define randomize_va_space 0
@@ -2154,12 +2154,47 @@ enum mf_flags {
extern int memory_failure(unsigned long pfn, int trapno, int flags);
extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
extern int unpoison_memory(unsigned long pfn);
+extern int get_hwpoison_page(struct page *page);
extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
extern void shake_page(struct page *p, int access);
extern atomic_long_t num_poisoned_pages;
extern int soft_offline_page(struct page *page, int flags);
+
+/*
+ * Error handlers for various types of pages.
+ */
+enum mf_result {
+ MF_IGNORED, /* Error: cannot be handled */
+ MF_FAILED, /* Error: handling failed */
+ MF_DELAYED, /* Will be handled later */
+ MF_RECOVERED, /* Successfully recovered */
+};
+
+enum mf_action_page_type {
+ MF_MSG_KERNEL,
+ MF_MSG_KERNEL_HIGH_ORDER,
+ MF_MSG_SLAB,
+ MF_MSG_DIFFERENT_COMPOUND,
+ MF_MSG_POISONED_HUGE,
+ MF_MSG_HUGE,
+ MF_MSG_FREE_HUGE,
+ MF_MSG_UNMAP_FAILED,
+ MF_MSG_DIRTY_SWAPCACHE,
+ MF_MSG_CLEAN_SWAPCACHE,
+ MF_MSG_DIRTY_MLOCKED_LRU,
+ MF_MSG_CLEAN_MLOCKED_LRU,
+ MF_MSG_DIRTY_UNEVICTABLE_LRU,
+ MF_MSG_CLEAN_UNEVICTABLE_LRU,
+ MF_MSG_DIRTY_LRU,
+ MF_MSG_CLEAN_LRU,
+ MF_MSG_TRUNCATED_LRU,
+ MF_MSG_BUDDY,
+ MF_MSG_BUDDY_2ND,
+ MF_MSG_UNKNOWN,
+};
+
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
extern void clear_huge_page(struct page *page,
unsigned long addr,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 6d34aa2..0038ac7 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -28,6 +28,8 @@ struct mem_cgroup;
IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
+typedef void compound_page_dtor(struct page *);
+
/*
* Each physical page in the system has a struct page associated with
* it to keep track of whatever it is we are using the page for at the
@@ -142,6 +144,12 @@ struct page {
struct rcu_head rcu_head; /* Used by SLAB
* when destroying via RCU
*/
+ /* First tail page of compound page */
+ struct {
+ compound_page_dtor *compound_dtor;
+ unsigned long compound_order;
+ };
+
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
pgtable_t pmd_huge_pte; /* protected by page->ptl */
#endif
@@ -218,6 +226,24 @@ struct page_frag {
#endif
};
+#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
+#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
+
+struct page_frag_cache {
+ void * va;
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+ __u16 offset;
+ __u16 size;
+#else
+ __u32 offset;
+#endif
+ /* we maintain a pagecount bias, so that we dont dirty cache line
+ * containing page->_count every time we allocate a fragment.
+ */
+ unsigned int pagecnt_bias;
+ bool pfmemalloc;
+};
+
typedef unsigned long __nocast vm_flags_t;
/*
@@ -273,15 +299,11 @@ struct vm_area_struct {
/*
* For areas with an address space and backing store,
- * linkage into the address_space->i_mmap interval tree, or
- * linkage of vma in the address_space->i_mmap_nonlinear list.
+ * linkage into the address_space->i_mmap interval tree.
*/
- union {
- struct {
- struct rb_node rb;
- unsigned long rb_subtree_last;
- } linear;
- struct list_head nonlinear;
+ struct {
+ struct rb_node rb;
+ unsigned long rb_subtree_last;
} shared;
/*
@@ -359,7 +381,10 @@ struct mm_struct {
pgd_t * pgd;
atomic_t mm_users; /* How many users with user space? */
atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
- atomic_long_t nr_ptes; /* Page table pages */
+ atomic_long_t nr_ptes; /* PTE page table pages */
+#if CONFIG_PGTABLE_LEVELS > 2
+ atomic_long_t nr_pmds; /* PMD page table pages */
+#endif
int map_count; /* number of VMAs */
spinlock_t page_table_lock; /* Protects page tables and some counters */
@@ -422,7 +447,7 @@ struct mm_struct {
#endif
/* store ref to file /proc/<pid>/exe symlink points to */
- struct file *exe_file;
+ struct file __rcu *exe_file;
#ifdef CONFIG_MMU_NOTIFIER
struct mmu_notifier_mm *mmu_notifier_mm;
#endif
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 4d69c00..4d3776d 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -83,7 +83,7 @@ struct mmc_ext_csd {
bool hpi; /* HPI support bit */
unsigned int hpi_cmd; /* cmd used as HPI */
bool bkops; /* background support bit */
- bool bkops_en; /* background enable bit */
+ bool man_bkops_en; /* manual bkops enable bit */
unsigned int data_sector_size; /* 512 bytes or 4KB */
unsigned int data_tag_unit_size; /* DATA TAG UNIT size */
unsigned int boot_ro_lock; /* ro lock support */
@@ -97,6 +97,7 @@ struct mmc_ext_csd {
u8 raw_erased_mem_count; /* 181 */
u8 raw_ext_csd_structure; /* 194 */
u8 raw_card_type; /* 196 */
+ u8 raw_driver_strength; /* 197 */
u8 out_of_int_time; /* 198 */
u8 raw_pwr_cl_52_195; /* 200 */
u8 raw_pwr_cl_26_195; /* 201 */
@@ -305,6 +306,7 @@ struct mmc_card {
unsigned int sd_bus_speed; /* Bus Speed Mode set for the card */
unsigned int mmc_avail_type; /* supported device type by both host and card */
+ unsigned int drive_strength; /* for UHS-I, HS200 or HS400 */
struct dentry *debugfs_root;
struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
@@ -512,8 +514,18 @@ static inline int mmc_card_broken_irq_polling(const struct mmc_card *c)
#define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev)
-extern int mmc_register_driver(struct device_driver *);
-extern void mmc_unregister_driver(struct device_driver *);
+/*
+ * MMC device driver (e.g., Flash card, I/O card...)
+ */
+struct mmc_driver {
+ struct device_driver drv;
+ int (*probe)(struct mmc_card *);
+ void (*remove)(struct mmc_card *);
+ void (*shutdown)(struct mmc_card *);
+};
+
+extern int mmc_register_driver(struct mmc_driver *);
+extern void mmc_unregister_driver(struct mmc_driver *);
extern void mmc_fixup_device(struct mmc_card *card,
const struct mmc_fixup *table);
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index cb2b040..258daf9 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -79,7 +79,7 @@ struct mmc_command {
#define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_MASK)
unsigned int retries; /* max number of retries */
- unsigned int error; /* command error */
+ int error; /* command error */
/*
* Standard errno values are used for errors, but some have specific
@@ -108,7 +108,7 @@ struct mmc_data {
unsigned int timeout_clks; /* data timeout (in clocks) */
unsigned int blksz; /* data block size */
unsigned int blocks; /* number of blocks */
- unsigned int error; /* data error */
+ int error; /* data error */
unsigned int flags;
#define MMC_DATA_WRITE (1 << 8)
@@ -121,6 +121,7 @@ struct mmc_data {
struct mmc_request *mrq; /* associated request */
unsigned int sg_len; /* size of scatter list */
+ int sg_count; /* mapped sg entries */
struct scatterlist *sg; /* I/O scatter list */
s32 host_cookie; /* host private data */
};
@@ -182,7 +183,6 @@ extern int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen);
extern int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
bool is_rel_write);
extern int mmc_hw_reset(struct mmc_host *host);
-extern int mmc_hw_reset_check(struct mmc_host *host);
extern int mmc_can_reset(struct mmc_card *card);
extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *);
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 42b724e..5be9767 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -44,6 +44,7 @@ struct mmc_data;
* struct dw_mci - MMC controller state shared between all slots
* @lock: Spinlock protecting the queue and associated data.
* @regs: Pointer to MMIO registers.
+ * @fifo_reg: Pointer to MMIO registers for data FIFO
* @sg: Scatterlist entry currently being processed by PIO code, if any.
* @sg_miter: PIO mapping scatterlist iterator.
* @cur_slot: The slot which is currently using the controller.
@@ -79,7 +80,6 @@ struct mmc_data;
* @current_speed: Configured rate of the controller.
* @num_slots: Number of slots available.
* @verid: Denote Version ID.
- * @data_offset: Set the offset of DATA register according to VERID.
* @dev: Device associated with the MMC controller.
* @pdata: Platform data associated with the MMC controller.
* @drv_data: Driver specific data for identified variant of the controller
@@ -106,6 +106,11 @@ struct mmc_data;
* @cur_slot, @mrq and @state. These must always be updated
* at the same time while holding @lock.
*
+ * @irq_lock is an irq-safe spinlock protecting the INTMASK register
+ * to allow the interrupt handler to modify it directly. Held for only long
+ * enough to read-modify-write INTMASK and no other locks are grabbed when
+ * holding this one.
+ *
* The @mrq field of struct dw_mci_slot is also protected by @lock,
* and must always be written at the same time as the slot is added to
* @queue.
@@ -125,7 +130,9 @@ struct mmc_data;
*/
struct dw_mci {
spinlock_t lock;
+ spinlock_t irq_lock;
void __iomem *regs;
+ void __iomem *fifo_reg;
struct scatterlist *sg;
struct sg_mapping_iter sg_miter;
@@ -166,7 +173,6 @@ struct dw_mci {
u32 num_slots;
u32 fifoth_val;
u16 verid;
- u16 data_offset;
struct device *dev;
struct dw_mci_board *pdata;
const struct dw_mci_drv_data *drv_data;
@@ -196,6 +202,8 @@ struct dw_mci {
int irq;
int sdio_id0;
+
+ struct timer_list cmd11_timer;
};
/* DMA ops for Internal/External DMAC interface */
@@ -218,12 +226,6 @@ struct dw_mci_dma_ops {
#define DW_MCI_QUIRK_HIGHSPEED BIT(2)
/* Unreliable card detection */
#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3)
-/* No write protect */
-#define DW_MCI_QUIRK_NO_WRITE_PROTECT BIT(4)
-
-/* Slot level quirks */
-/* This slot has no write protect */
-#define DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT BIT(0)
struct dma_pdata;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 9f32270..1369e54 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -12,6 +12,7 @@
#include <linux/leds.h>
#include <linux/mutex.h>
+#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/fault-inject.h>
@@ -80,12 +81,6 @@ struct mmc_ios {
struct mmc_host_ops {
/*
- * 'enable' is called when the host is claimed and 'disable' is called
- * when the host is released. 'enable' and 'disable' are deprecated.
- */
- int (*enable)(struct mmc_host *host);
- int (*disable)(struct mmc_host *host);
- /*
* It is optional for the host to implement pre_req and post_req in
* order to support double buffering of requests (prepare one
* request while another request is active).
@@ -137,7 +132,9 @@ struct mmc_host_ops {
/* Prepare HS400 target operating frequency depending host driver */
int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
- int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv);
+ int (*select_drive_strength)(struct mmc_card *card,
+ unsigned int max_dtr, int host_drv,
+ int card_drv, int *drv_type);
void (*hw_reset)(struct mmc_host *host);
void (*card_event)(struct mmc_host *host);
@@ -166,7 +163,6 @@ struct mmc_async_req {
* struct mmc_slot - MMC slot functions
*
* @cd_irq: MMC/SD-card slot hotplug detection IRQ or -EINVAL
- * @lock: protect the @handler_priv pointer
* @handler_priv: MMC/SD-card slot context
*
* Some MMC/SD host controllers implement slot-functions like card and
@@ -176,7 +172,6 @@ struct mmc_async_req {
*/
struct mmc_slot {
int cd_irq;
- struct mutex lock;
void *handler_priv;
};
@@ -197,6 +192,7 @@ struct mmc_context_info {
};
struct regulator;
+struct mmc_pwrseq;
struct mmc_supply {
struct regulator *vmmc; /* Card power supply */
@@ -208,6 +204,7 @@ struct mmc_host {
struct device class_dev;
int index;
const struct mmc_host_ops *ops;
+ struct mmc_pwrseq *pwrseq;
unsigned int f_min;
unsigned int f_max;
unsigned int f_init;
@@ -291,6 +288,7 @@ struct mmc_host {
MMC_CAP2_HS400_1_2V)
#define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V)
#define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17)
+#define MMC_CAP2_NO_WRITE_PROTECT (1 << 18) /* No physical write protect pin, assume that card is always read-write */
mmc_pm_flag_t pm_caps; /* supported pm features */
@@ -327,10 +325,18 @@ struct mmc_host {
#ifdef CONFIG_MMC_DEBUG
unsigned int removed:1; /* host is being removed */
#endif
+ unsigned int can_retune:1; /* re-tuning can be used */
+ unsigned int doing_retune:1; /* re-tuning in progress */
+ unsigned int retune_now:1; /* do re-tuning at next req */
int rescan_disable; /* disable card detection */
int rescan_entered; /* used with nonremovable devices */
+ int need_retune; /* re-tuning is needed */
+ int hold_retune; /* hold off re-tuning */
+ unsigned int retune_period; /* re-tuning period in secs */
+ struct timer_list retune_timer; /* for periodic re-tuning */
+
bool trigger_card_event; /* card_event necessary */
struct mmc_card *card; /* device attached to this host */
@@ -519,4 +525,18 @@ static inline bool mmc_card_hs400(struct mmc_card *card)
return card->host->ios.timing == MMC_TIMING_MMC_HS400;
}
+void mmc_retune_timer_stop(struct mmc_host *host);
+
+static inline void mmc_retune_needed(struct mmc_host *host)
+{
+ if (host->can_retune)
+ host->need_retune = 1;
+}
+
+static inline void mmc_retune_recheck(struct mmc_host *host)
+{
+ if (host->hold_retune <= 1)
+ host->retune_now = 1;
+}
+
#endif /* LINUX_MMC_HOST_H */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 49ad7a9..15f2c4a 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -53,11 +53,6 @@
#define MMC_SEND_TUNING_BLOCK 19 /* adtc R1 */
#define MMC_SEND_TUNING_BLOCK_HS200 21 /* adtc R1 */
-#define MMC_TUNING_BLK_PATTERN_4BIT_SIZE 64
-#define MMC_TUNING_BLK_PATTERN_8BIT_SIZE 128
-extern const u8 tuning_blk_pattern_4bit[MMC_TUNING_BLK_PATTERN_4BIT_SIZE];
-extern const u8 tuning_blk_pattern_8bit[MMC_TUNING_BLK_PATTERN_8BIT_SIZE];
-
/* class 3 */
#define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */
@@ -307,6 +302,7 @@ struct _mmc_csd {
#define EXT_CSD_REV 192 /* RO */
#define EXT_CSD_STRUCTURE 194 /* RO */
#define EXT_CSD_CARD_TYPE 196 /* RO */
+#define EXT_CSD_DRIVER_STRENGTH 197 /* RO */
#define EXT_CSD_OUT_OF_INTERRUPT_TIME 198 /* RO */
#define EXT_CSD_PART_SWITCH_TIME 199 /* RO */
#define EXT_CSD_PWR_CL_52_195 200 /* RO */
@@ -395,6 +391,7 @@ struct _mmc_csd {
#define EXT_CSD_TIMING_HS 1 /* High speed */
#define EXT_CSD_TIMING_HS200 2 /* HS200 */
#define EXT_CSD_TIMING_HS400 3 /* HS400 */
+#define EXT_CSD_DRV_STR_SHIFT 4 /* Driver Strength shift */
#define EXT_CSD_SEC_ER_EN BIT(0)
#define EXT_CSD_SEC_BD_BLK_EN BIT(2)
@@ -433,6 +430,11 @@ struct _mmc_csd {
#define EXT_CSD_BKOPS_LEVEL_2 0x2
/*
+ * BKOPS modes
+ */
+#define EXT_CSD_MANUAL_BKOPS_MASK 0x01
+
+/*
* MMC_SWITCH access modes
*/
@@ -441,4 +443,6 @@ struct _mmc_csd {
#define MMC_SWITCH_MODE_CLEAR_BITS 0x02 /* Clear bits which are 1 in value */
#define MMC_SWITCH_MODE_WRITE_BYTE 0x03 /* Set target to value */
+#define mmc_driver_type_mask(n) (1 << (n))
+
#endif /* LINUX_MMC_MMC_H */
diff --git a/include/linux/mmc/sdhci-pci-data.h b/include/linux/mmc/sdhci-pci-data.h
index 8959604..fda15b6 100644
--- a/include/linux/mmc/sdhci-pci-data.h
+++ b/include/linux/mmc/sdhci-pci-data.h
@@ -15,4 +15,6 @@ struct sdhci_pci_data {
extern struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev,
int slotno);
+extern int sdhci_pci_spt_drive_strength;
+
#endif
diff --git a/include/linux/mmc/sdhci-spear.h b/include/linux/mmc/sdhci-spear.h
deleted file mode 100644
index 8cc095a..0000000
--- a/include/linux/mmc/sdhci-spear.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * include/linux/mmc/sdhci-spear.h
- *
- * SDHCI declarations specific to ST SPEAr platform
- *
- * Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef LINUX_MMC_SDHCI_SPEAR_H
-#define LINUX_MMC_SDHCI_SPEAR_H
-
-#include <linux/platform_device.h>
-/*
- * struct sdhci_plat_data: spear sdhci platform data structure
- *
- * card_int_gpio: gpio pin used for card detection
- */
-struct sdhci_plat_data {
- int card_int_gpio;
-};
-
-/* This function is used to set platform_data field of pdev->dev */
-static inline void
-sdhci_set_plat_data(struct platform_device *pdev, struct sdhci_plat_data *data)
-{
- pdev->dev.platform_data = data;
-}
-
-#endif /* LINUX_MMC_SDHCI_SPEAR_H */
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
deleted file mode 100644
index f767a0d..0000000
--- a/include/linux/mmc/sdhci.h
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- * linux/include/linux/mmc/sdhci.h - Secure Digital Host Controller Interface
- *
- * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- */
-#ifndef LINUX_MMC_SDHCI_H
-#define LINUX_MMC_SDHCI_H
-
-#include <linux/scatterlist.h>
-#include <linux/compiler.h>
-#include <linux/types.h>
-#include <linux/io.h>
-#include <linux/mmc/host.h>
-
-struct sdhci_host {
- /* Data set by hardware interface driver */
- const char *hw_name; /* Hardware bus name */
-
- unsigned int quirks; /* Deviations from spec. */
-
-/* Controller doesn't honor resets unless we touch the clock register */
-#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0)
-/* Controller has bad caps bits, but really supports DMA */
-#define SDHCI_QUIRK_FORCE_DMA (1<<1)
-/* Controller doesn't like to be reset when there is no card inserted. */
-#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
-/* Controller doesn't like clearing the power reg before a change */
-#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
-/* Controller has flaky internal state so reset it on each ios change */
-#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4)
-/* Controller has an unusable DMA engine */
-#define SDHCI_QUIRK_BROKEN_DMA (1<<5)
-/* Controller has an unusable ADMA engine */
-#define SDHCI_QUIRK_BROKEN_ADMA (1<<6)
-/* Controller can only DMA from 32-bit aligned addresses */
-#define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<7)
-/* Controller can only DMA chunk sizes that are a multiple of 32 bits */
-#define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<8)
-/* Controller can only ADMA chunks that are a multiple of 32 bits */
-#define SDHCI_QUIRK_32BIT_ADMA_SIZE (1<<9)
-/* Controller needs to be reset after each request to stay stable */
-#define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<10)
-/* Controller needs voltage and power writes to happen separately */
-#define SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER (1<<11)
-/* Controller provides an incorrect timeout value for transfers */
-#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12)
-/* Controller has an issue with buffer bits for small transfers */
-#define SDHCI_QUIRK_BROKEN_SMALL_PIO (1<<13)
-/* Controller does not provide transfer-complete interrupt when not busy */
-#define SDHCI_QUIRK_NO_BUSY_IRQ (1<<14)
-/* Controller has unreliable card detection */
-#define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15)
-/* Controller reports inverted write-protect state */
-#define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16)
-/* Controller does not like fast PIO transfers */
-#define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18)
-/* Controller has to be forced to use block size of 2048 bytes */
-#define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20)
-/* Controller cannot do multi-block transfers */
-#define SDHCI_QUIRK_NO_MULTIBLOCK (1<<21)
-/* Controller can only handle 1-bit data transfers */
-#define SDHCI_QUIRK_FORCE_1_BIT_DATA (1<<22)
-/* Controller needs 10ms delay between applying power and clock */
-#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23)
-/* Controller uses SDCLK instead of TMCLK for data timeouts */
-#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24)
-/* Controller reports wrong base clock capability */
-#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1<<25)
-/* Controller cannot support End Attribute in NOP ADMA descriptor */
-#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1<<26)
-/* Controller is missing device caps. Use caps provided by host */
-#define SDHCI_QUIRK_MISSING_CAPS (1<<27)
-/* Controller uses Auto CMD12 command to stop the transfer */
-#define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28)
-/* Controller doesn't have HISPD bit field in HI-SPEED SD card */
-#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29)
-/* Controller treats ADMA descriptors with length 0000h incorrectly */
-#define SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC (1<<30)
-/* The read-only detection via SDHCI_PRESENT_STATE register is unstable */
-#define SDHCI_QUIRK_UNSTABLE_RO_DETECT (1<<31)
-
- unsigned int quirks2; /* More deviations from spec. */
-
-#define SDHCI_QUIRK2_HOST_OFF_CARD_ON (1<<0)
-#define SDHCI_QUIRK2_HOST_NO_CMD23 (1<<1)
-/* The system physically doesn't support 1.8v, even if the host does */
-#define SDHCI_QUIRK2_NO_1_8_V (1<<2)
-#define SDHCI_QUIRK2_PRESET_VALUE_BROKEN (1<<3)
-#define SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON (1<<4)
-/* Controller has a non-standard host control register */
-#define SDHCI_QUIRK2_BROKEN_HOST_CONTROL (1<<5)
-/* Controller does not support HS200 */
-#define SDHCI_QUIRK2_BROKEN_HS200 (1<<6)
-/* Controller does not support DDR50 */
-#define SDHCI_QUIRK2_BROKEN_DDR50 (1<<7)
-/* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */
-#define SDHCI_QUIRK2_STOP_WITH_TC (1<<8)
-/* Controller does not support 64-bit DMA */
-#define SDHCI_QUIRK2_BROKEN_64_BIT_DMA (1<<9)
-/* need clear transfer mode register before send cmd */
-#define SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD (1<<10)
-/* Capability register bit-63 indicates HS400 support */
-#define SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 (1<<11)
-
- int irq; /* Device IRQ */
- void __iomem *ioaddr; /* Mapped address */
-
- const struct sdhci_ops *ops; /* Low level hw interface */
-
- /* Internal data */
- struct mmc_host *mmc; /* MMC structure */
- u64 dma_mask; /* custom DMA mask */
-
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
- struct led_classdev led; /* LED control */
- char led_name[32];
-#endif
-
- spinlock_t lock; /* Mutex */
-
- int flags; /* Host attributes */
-#define SDHCI_USE_SDMA (1<<0) /* Host is SDMA capable */
-#define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */
-#define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */
-#define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */
-#define SDHCI_SDR50_NEEDS_TUNING (1<<4) /* SDR50 needs tuning */
-#define SDHCI_NEEDS_RETUNING (1<<5) /* Host needs retuning */
-#define SDHCI_AUTO_CMD12 (1<<6) /* Auto CMD12 support */
-#define SDHCI_AUTO_CMD23 (1<<7) /* Auto CMD23 support */
-#define SDHCI_PV_ENABLED (1<<8) /* Preset value enabled */
-#define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */
-#define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */
-#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */
-#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */
-#define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */
-
- unsigned int version; /* SDHCI spec. version */
-
- unsigned int max_clk; /* Max possible freq (MHz) */
- unsigned int timeout_clk; /* Timeout freq (KHz) */
- unsigned int clk_mul; /* Clock Muliplier value */
-
- unsigned int clock; /* Current clock (MHz) */
- u8 pwr; /* Current voltage */
-
- bool runtime_suspended; /* Host is runtime suspended */
- bool bus_on; /* Bus power prevents runtime suspend */
- bool preset_enabled; /* Preset is enabled */
-
- struct mmc_request *mrq; /* Current request */
- struct mmc_command *cmd; /* Current command */
- struct mmc_data *data; /* Current data request */
- unsigned int data_early:1; /* Data finished before cmd */
- unsigned int busy_handle:1; /* Handling the order of Busy-end */
-
- struct sg_mapping_iter sg_miter; /* SG state for PIO */
- unsigned int blocks; /* remaining PIO blocks */
-
- int sg_count; /* Mapped sg entries */
-
- void *adma_table; /* ADMA descriptor table */
- void *align_buffer; /* Bounce buffer */
-
- size_t adma_table_sz; /* ADMA descriptor table size */
- size_t align_buffer_sz; /* Bounce buffer size */
-
- dma_addr_t adma_addr; /* Mapped ADMA descr. table */
- dma_addr_t align_addr; /* Mapped bounce buffer */
-
- unsigned int desc_sz; /* ADMA descriptor size */
- unsigned int align_sz; /* ADMA alignment */
- unsigned int align_mask; /* ADMA alignment mask */
-
- struct tasklet_struct finish_tasklet; /* Tasklet structures */
-
- struct timer_list timer; /* Timer for timeouts */
-
- u32 caps; /* Alternative CAPABILITY_0 */
- u32 caps1; /* Alternative CAPABILITY_1 */
-
- unsigned int ocr_avail_sdio; /* OCR bit masks */
- unsigned int ocr_avail_sd;
- unsigned int ocr_avail_mmc;
- u32 ocr_mask; /* available voltages */
-
- unsigned timing; /* Current timing */
-
- u32 thread_isr;
-
- /* cached registers */
- u32 ier;
-
- wait_queue_head_t buf_ready_int; /* Waitqueue for Buffer Read Ready interrupt */
- unsigned int tuning_done; /* Condition flag set when CMD19 succeeds */
-
- unsigned int tuning_count; /* Timer count for re-tuning */
- unsigned int tuning_mode; /* Re-tuning mode supported by host */
-#define SDHCI_TUNING_MODE_1 0
- struct timer_list tuning_timer; /* Timer for tuning */
-
- unsigned long private[0] ____cacheline_aligned;
-};
-#endif /* LINUX_MMC_SDHCI_H */
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 0f01fe0..83430f2 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -24,13 +24,17 @@
* Vendors and devices. Sort key: vendor first, device next.
*/
#define SDIO_VENDOR_ID_BROADCOM 0x02d0
-#define SDIO_DEVICE_ID_BROADCOM_43143 43143
+#define SDIO_DEVICE_ID_BROADCOM_43143 0xa887
#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
+#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c
+#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d
#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
-#define SDIO_DEVICE_ID_BROADCOM_43362 43362
+#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
+#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
+#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345
#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
#define SDIO_VENDOR_ID_INTEL 0x0089
diff --git a/include/linux/mmc/sh_mobile_sdhi.h b/include/linux/mmc/sh_mobile_sdhi.h
index 68927ae..95d6f03 100644
--- a/include/linux/mmc/sh_mobile_sdhi.h
+++ b/include/linux/mmc/sh_mobile_sdhi.h
@@ -3,33 +3,8 @@
#include <linux/types.h>
-struct platform_device;
-
#define SH_MOBILE_SDHI_IRQ_CARD_DETECT "card_detect"
#define SH_MOBILE_SDHI_IRQ_SDCARD "sdcard"
#define SH_MOBILE_SDHI_IRQ_SDIO "sdio"
-/**
- * struct sh_mobile_sdhi_ops - SDHI driver callbacks
- * @cd_wakeup: trigger a card-detection run
- */
-struct sh_mobile_sdhi_ops {
- void (*cd_wakeup)(const struct platform_device *pdev);
-};
-
-struct sh_mobile_sdhi_info {
- int dma_slave_tx;
- int dma_slave_rx;
- unsigned long tmio_flags;
- unsigned long tmio_caps;
- unsigned long tmio_caps2;
- u32 tmio_ocr_mask; /* available MMC voltages */
- unsigned int cd_gpio;
-
- /* callbacks for board specific setup code */
- int (*init)(struct platform_device *pdev,
- const struct sh_mobile_sdhi_ops *ops);
- void (*cleanup)(struct platform_device *pdev);
-};
-
#endif /* LINUX_MMC_SH_MOBILE_SDHI_H */
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index e56fa24..3945a8c 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -15,12 +15,10 @@ struct mmc_host;
int mmc_gpio_get_ro(struct mmc_host *host);
int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio);
-void mmc_gpio_free_ro(struct mmc_host *host);
int mmc_gpio_get_cd(struct mmc_host *host);
int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio,
unsigned int debounce);
-void mmc_gpio_free_cd(struct mmc_host *host);
int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
unsigned int idx, bool override_active_level,
@@ -28,7 +26,8 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
unsigned int idx, bool override_active_level,
unsigned int debounce, bool *gpio_invert);
-void mmc_gpiod_free_cd(struct mmc_host *host);
+void mmc_gpio_set_cd_isr(struct mmc_host *host,
+ irqreturn_t (*isr)(int irq, void *dev_id));
void mmc_gpiod_request_cd_irq(struct mmc_host *host);
#endif
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
index c5d5278..3ba327a 100644
--- a/include/linux/mmiotrace.h
+++ b/include/linux/mmiotrace.h
@@ -106,6 +106,6 @@ extern void enable_mmiotrace(void);
extern void disable_mmiotrace(void);
extern void mmio_trace_rw(struct mmiotrace_rw *rw);
extern void mmio_trace_mapping(struct mmiotrace_map *map);
-extern int mmio_trace_printk(const char *fmt, va_list args);
+extern __printf(1, 0) int mmio_trace_printk(const char *fmt, va_list args);
#endif /* _LINUX_MMIOTRACE_H */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 95243d2..61cd67f 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -324,25 +324,25 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
___pte; \
})
-#define pmdp_clear_flush_notify(__vma, __haddr, __pmd) \
+#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
({ \
unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
struct mm_struct *___mm = (__vma)->vm_mm; \
pmd_t ___pmd; \
\
- ___pmd = pmdp_clear_flush(__vma, __haddr, __pmd); \
+ ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
mmu_notifier_invalidate_range(___mm, ___haddr, \
___haddr + HPAGE_PMD_SIZE); \
\
___pmd; \
})
-#define pmdp_get_and_clear_notify(__mm, __haddr, __pmd) \
+#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \
({ \
unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
pmd_t ___pmd; \
\
- ___pmd = pmdp_get_and_clear(__mm, __haddr, __pmd); \
+ ___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd); \
mmu_notifier_invalidate_range(__mm, ___haddr, \
___haddr + HPAGE_PMD_SIZE); \
\
@@ -428,8 +428,8 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
#define ptep_clear_flush_young_notify ptep_clear_flush_young
#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
#define ptep_clear_flush_notify ptep_clear_flush
-#define pmdp_clear_flush_notify pmdp_clear_flush
-#define pmdp_get_and_clear_notify pmdp_get_and_clear
+#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
+#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
#define set_pte_at_notify set_pte_at
#endif /* CONFIG_MMU_NOTIFIER */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 2f0856d..754c259 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -426,7 +426,7 @@ struct zone {
const char *name;
/*
- * Number of MIGRATE_RESEVE page block. To maintain for just
+ * Number of MIGRATE_RESERVE page block. To maintain for just
* optimization. Protected by zone->lock.
*/
int nr_migrate_reserve_block;
@@ -474,16 +474,15 @@ struct zone {
unsigned long wait_table_bits;
ZONE_PADDING(_pad1_)
-
- /* Write-intensive fields used from the page allocator */
- spinlock_t lock;
-
/* free areas of different sizes */
struct free_area free_area[MAX_ORDER];
/* zone flags, see below */
unsigned long flags;
+ /* Write-intensive fields used from the page allocator */
+ spinlock_t lock;
+
ZONE_PADDING(_pad2_)
/* Write-intensive fields used by page reclaim */
@@ -763,6 +762,14 @@ typedef struct pglist_data {
/* Number of pages migrated during the rate limiting time interval */
unsigned long numabalancing_migrate_nr_pages;
#endif
+
+#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+ /*
+ * If memory initialisation on large machines is deferred then this
+ * is the first PFN that needs to be initialised.
+ */
+ unsigned long first_deferred_pfn;
+#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
} pg_data_t;
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
@@ -843,16 +850,16 @@ static inline int populated_zone(struct zone *zone)
extern int movable_zone;
+#ifdef CONFIG_HIGHMEM
static inline int zone_movable_is_highmem(void)
{
-#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
return movable_zone == ZONE_HIGHMEM;
-#elif defined(CONFIG_HIGHMEM)
- return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
#else
- return 0;
+ return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
#endif
}
+#endif
static inline int is_highmem_idx(enum zone_type idx)
{
@@ -970,7 +977,6 @@ static inline int zonelist_node_idx(struct zoneref *zoneref)
* @z - The cursor used as a starting point for the search
* @highest_zoneidx - The zone index of the highest zone to return
* @nodes - An optional nodemask to filter the zonelist with
- * @zone - The first suitable zone found is returned via this parameter
*
* This function returns the next zone at or below a given zone index that is
* within the allowed nodemask using a cursor as the starting point for the
@@ -980,8 +986,7 @@ static inline int zonelist_node_idx(struct zoneref *zoneref)
*/
struct zoneref *next_zones_zonelist(struct zoneref *z,
enum zone_type highest_zoneidx,
- nodemask_t *nodes,
- struct zone **zone);
+ nodemask_t *nodes);
/**
* first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
@@ -1000,8 +1005,10 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
nodemask_t *nodes,
struct zone **zone)
{
- return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes,
- zone);
+ struct zoneref *z = next_zones_zonelist(zonelist->_zonerefs,
+ highest_zoneidx, nodes);
+ *zone = zonelist_zone(z);
+ return z;
}
/**
@@ -1018,7 +1025,8 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
zone; \
- z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \
+ z = next_zones_zonelist(++z, highidx, nodemask), \
+ zone = zonelist_zone(z)) \
/**
* for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
@@ -1216,11 +1224,16 @@ void sparse_init(void);
#define sparse_index_init(_sec, _nid) do {} while (0)
#endif /* CONFIG_SPARSEMEM */
-#ifdef CONFIG_NODES_SPAN_OTHER_NODES
-bool early_pfn_in_nid(unsigned long pfn, int nid);
-#else
-#define early_pfn_in_nid(pfn, nid) (1)
-#endif
+/*
+ * During memory init memblocks map pfns to nids. The search is expensive and
+ * this caches recent lookups. The implementation of __early_pfn_to_nid
+ * may treat start/end as pfns or sections.
+ */
+struct mminit_pfnnid_cache {
+ unsigned long last_start;
+ unsigned long last_end;
+ int last_nid;
+};
#ifndef early_pfn_valid
#define early_pfn_valid(pfn) (1)
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 745def8..34f25b7 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -53,9 +53,9 @@ struct ieee1394_device_id {
/**
* struct usb_device_id - identifies USB devices for probing and hotplugging
- * @match_flags: Bit mask controlling of the other fields are used to match
- * against new devices. Any field except for driver_info may be used,
- * although some only make sense in conjunction with other fields.
+ * @match_flags: Bit mask controlling which of the other fields are used to
+ * match against new devices. Any field except for driver_info may be
+ * used, although some only make sense in conjunction with other fields.
* This is usually set by a USB_DEVICE_*() macro, which sets all
* other fields in this structure except for driver_info.
* @idVendor: USB vendor ID for a device; numbers are assigned
@@ -189,6 +189,8 @@ struct css_device_id {
struct acpi_device_id {
__u8 id[ACPI_ID_LEN];
kernel_ulong_t driver_data;
+ __u32 cls;
+ __u32 cls_msk;
};
#define PNP_ID_LEN 8
@@ -220,8 +222,7 @@ struct serio_device_id {
/*
* Struct used for matching a device
*/
-struct of_device_id
-{
+struct of_device_id {
char name[32];
char type[32];
char compatible[128];
@@ -365,8 +366,6 @@ struct ssb_device_id {
} __attribute__((packed, aligned(2)));
#define SSB_DEVICE(_vendor, _coreid, _revision) \
{ .vendor = _vendor, .coreid = _coreid, .revision = _revision, }
-#define SSB_DEVTABLE_END \
- { 0, },
#define SSB_ANY_VENDOR 0xFFFF
#define SSB_ANY_ID 0xFFFF
@@ -381,8 +380,6 @@ struct bcma_device_id {
} __attribute__((packed,aligned(2)));
#define BCMA_CORE(_manuf, _id, _rev, _class) \
{ .manuf = _manuf, .id = _id, .rev = _rev, .class = _class, }
-#define BCMA_CORETABLE_END \
- { 0, },
#define BCMA_ANY_MANUF 0xFFFF
#define BCMA_ANY_ID 0xFFFF
@@ -551,6 +548,14 @@ struct amba_id {
void *data;
};
+/**
+ * struct mips_cdmm_device_id - identifies devices in MIPS CDMM bus
+ * @type: Device type identifier.
+ */
+struct mips_cdmm_device_id {
+ __u8 type;
+};
+
/*
* Match x86 CPUs for CPU specific drivers.
* See documentation of "x86_match_cpu" for details.
@@ -596,9 +601,22 @@ struct ipack_device_id {
#define MEI_CL_MODULE_PREFIX "mei:"
#define MEI_CL_NAME_SIZE 32
+#define MEI_CL_UUID_FMT "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x"
+#define MEI_CL_UUID_ARGS(_u) \
+ _u[0], _u[1], _u[2], _u[3], _u[4], _u[5], _u[6], _u[7], \
+ _u[8], _u[9], _u[10], _u[11], _u[12], _u[13], _u[14], _u[15]
+/**
+ * struct mei_cl_device_id - MEI client device identifier
+ * @name: helper name
+ * @uuid: client uuid
+ * @driver_info: information used by the driver.
+ *
+ * identifies mei client device by uuid and name
+ */
struct mei_cl_device_id {
char name[MEI_CL_NAME_SIZE];
+ uuid_le uuid;
kernel_ulong_t driver_info;
};
@@ -626,4 +644,10 @@ struct mcb_device_id {
kernel_ulong_t driver_data;
};
+struct ulpi_device_id {
+ __u16 vendor;
+ __u16 product;
+ kernel_ulong_t driver_data;
+};
+
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index b653d7c..3a19c79 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -11,12 +11,14 @@
#include <linux/compiler.h>
#include <linux/cache.h>
#include <linux/kmod.h>
+#include <linux/init.h>
#include <linux/elf.h>
#include <linux/stringify.h>
#include <linux/kobject.h>
#include <linux/moduleparam.h>
#include <linux/jump_label.h>
#include <linux/export.h>
+#include <linux/rbtree_latch.h>
#include <linux/percpu.h>
#include <asm/module.h>
@@ -70,6 +72,89 @@ extern struct module_attribute module_uevent;
extern int init_module(void);
extern void cleanup_module(void);
+#ifndef MODULE
+/**
+ * module_init() - driver initialization entry point
+ * @x: function to be run at kernel boot time or module insertion
+ *
+ * module_init() will either be called during do_initcalls() (if
+ * builtin) or at module insertion time (if a module). There can only
+ * be one per module.
+ */
+#define module_init(x) __initcall(x);
+
+/**
+ * module_exit() - driver exit entry point
+ * @x: function to be run when driver is removed
+ *
+ * module_exit() will wrap the driver clean-up code
+ * with cleanup_module() when used with rmmod when
+ * the driver is a module. If the driver is statically
+ * compiled into the kernel, module_exit() has no effect.
+ * There can only be one per module.
+ */
+#define module_exit(x) __exitcall(x);
+
+#else /* MODULE */
+
+/*
+ * In most cases loadable modules do not need custom
+ * initcall levels. There are still some valid cases where
+ * a driver may be needed early if built in, and does not
+ * matter when built as a loadable module. Like bus
+ * snooping debug drivers.
+ */
+#define early_initcall(fn) module_init(fn)
+#define core_initcall(fn) module_init(fn)
+#define core_initcall_sync(fn) module_init(fn)
+#define postcore_initcall(fn) module_init(fn)
+#define postcore_initcall_sync(fn) module_init(fn)
+#define arch_initcall(fn) module_init(fn)
+#define subsys_initcall(fn) module_init(fn)
+#define subsys_initcall_sync(fn) module_init(fn)
+#define fs_initcall(fn) module_init(fn)
+#define fs_initcall_sync(fn) module_init(fn)
+#define rootfs_initcall(fn) module_init(fn)
+#define device_initcall(fn) module_init(fn)
+#define device_initcall_sync(fn) module_init(fn)
+#define late_initcall(fn) module_init(fn)
+#define late_initcall_sync(fn) module_init(fn)
+
+#define console_initcall(fn) module_init(fn)
+#define security_initcall(fn) module_init(fn)
+
+/* Each module must use one module_init(). */
+#define module_init(initfn) \
+ static inline initcall_t __inittest(void) \
+ { return initfn; } \
+ int init_module(void) __attribute__((alias(#initfn)));
+
+/* This is only required if you want to be unloadable. */
+#define module_exit(exitfn) \
+ static inline exitcall_t __exittest(void) \
+ { return exitfn; } \
+ void cleanup_module(void) __attribute__((alias(#exitfn)));
+
+#endif
+
+/* This means "can be init if no module support, otherwise module load
+ may call it." */
+#ifdef CONFIG_MODULES
+#define __init_or_module
+#define __initdata_or_module
+#define __initconst_or_module
+#define __INIT_OR_MODULE .text
+#define __INITDATA_OR_MODULE .data
+#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits
+#else
+#define __init_or_module __init
+#define __initdata_or_module __initdata
+#define __initconst_or_module __initconst
+#define __INIT_OR_MODULE __INIT
+#define __INITDATA_OR_MODULE __INITDATA
+#define __INITRODATA_OR_MODULE __INITRODATA
+#endif /*CONFIG_MODULES*/
+
/* Archs provide a method of finding the correct exception table. */
struct exception_table_entry;
@@ -135,7 +220,7 @@ void trim_init_extable(struct module *m);
#ifdef MODULE
/* Creates an alias so file2alias.c can find device table. */
#define MODULE_DEVICE_TABLE(type, name) \
- extern const struct type##_device_id __mod_##type##__##name##_device_table \
+extern const typeof(name) __mod_##type##__##name##_device_table \
__attribute__ ((unused, alias(__stringify(name))))
#else /* !MODULE */
#define MODULE_DEVICE_TABLE(type, name)
@@ -210,6 +295,13 @@ enum module_state {
MODULE_STATE_UNFORMED, /* Still setting it up. */
};
+struct module;
+
+struct mod_tree_node {
+ struct module *mod;
+ struct latch_tree_node node;
+};
+
struct module {
enum module_state state;
@@ -232,6 +324,9 @@ struct module {
unsigned int num_syms;
/* Kernel parameters. */
+#ifdef CONFIG_SYSFS
+ struct mutex param_lock;
+#endif
struct kernel_param *kp;
unsigned int num_kp;
@@ -257,6 +352,8 @@ struct module {
bool sig_ok;
#endif
+ bool async_probe_requested;
+
/* symbols that will be GPL-only in the near future. */
const struct kernel_symbol *gpl_future_syms;
const unsigned long *gpl_future_crcs;
@@ -269,8 +366,15 @@ struct module {
/* Startup function. */
int (*init)(void);
- /* If this is non-NULL, vfree after init() returns */
- void *module_init;
+ /*
+ * If this is non-NULL, vfree() after init() returns.
+ *
+ * Cacheline align here, such that:
+ * module_init, module_core, init_size, core_size,
+ * init_text_size, core_text_size and mtn_core::{mod,node[0]}
+ * are on the same cacheline.
+ */
+ void *module_init ____cacheline_aligned;
/* Here is the actual code + data, vfree'd on unload. */
void *module_core;
@@ -281,6 +385,16 @@ struct module {
/* The size of the executable code in each section. */
unsigned int init_text_size, core_text_size;
+#ifdef CONFIG_MODULES_TREE_LOOKUP
+ /*
+ * We want mtn_core::{mod,node[0]} to be in the same cacheline as the
+ * above entries such that a regular lookup will only touch one
+ * cacheline.
+ */
+ struct mod_tree_node mtn_core;
+ struct mod_tree_node mtn_init;
+#endif
+
/* Size of RO sections of the module (text+rodata) */
unsigned int init_ro_size, core_ro_size;
@@ -336,14 +450,20 @@ struct module {
const char **trace_bprintk_fmt_start;
#endif
#ifdef CONFIG_EVENT_TRACING
- struct ftrace_event_call **trace_events;
+ struct trace_event_call **trace_events;
unsigned int num_trace_events;
+ struct trace_enum_map **trace_enums;
+ unsigned int num_trace_enums;
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
unsigned int num_ftrace_callsites;
unsigned long *ftrace_callsites;
#endif
+#ifdef CONFIG_LIVEPATCH
+ bool klp_alive;
+#endif
+
#ifdef CONFIG_MODULE_UNLOAD
/* What modules depend on me? */
struct list_head source_list;
@@ -361,7 +481,7 @@ struct module {
ctor_fn_t *ctors;
unsigned int num_ctors;
#endif
-};
+} ____cacheline_aligned;
#ifndef MODULE_ARCH_INIT
#define MODULE_ARCH_INIT {}
#endif
@@ -415,14 +535,22 @@ struct symsearch {
bool unused;
};
-/* Search for an exported symbol by name. */
+/*
+ * Search for an exported symbol by name.
+ *
+ * Must be called with module_mutex held or preemption disabled.
+ */
const struct kernel_symbol *find_symbol(const char *name,
struct module **owner,
const unsigned long **crc,
bool gplok,
bool warn);
-/* Walk the exported symbol table */
+/*
+ * Walk the exported symbol table
+ *
+ * Must be called with module_mutex held or preemption disabled.
+ */
bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
struct module *owner,
void *data), void *data);
@@ -502,6 +630,11 @@ int unregister_module_notifier(struct notifier_block *nb);
extern void print_modules(void);
+static inline bool module_requested_async_probing(struct module *module)
+{
+ return module && module->async_probe_requested;
+}
+
#else /* !CONFIG_MODULES... */
/* Given an address, look for it in the exception tables. */
@@ -612,6 +745,12 @@ static inline int unregister_module_notifier(struct notifier_block *nb)
static inline void print_modules(void)
{
}
+
+static inline bool module_requested_async_probing(struct module *module)
+{
+ return false;
+}
+
#endif /* CONFIG_MODULES */
#ifdef CONFIG_SYSFS
@@ -649,4 +788,16 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
static inline void module_bug_cleanup(struct module *mod) {}
#endif /* CONFIG_GENERIC_BUG */
+#ifdef CONFIG_MODULE_SIG
+static inline bool module_sig_ok(struct module *module)
+{
+ return module->sig_ok;
+}
+#else /* !CONFIG_MODULE_SIG */
+static inline bool module_sig_ok(struct module *module)
+{
+ return true;
+}
+#endif /* CONFIG_MODULE_SIG */
+
#endif /* _LINUX_MODULE_H */
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
index f755626..4d0cb9b 100644
--- a/include/linux/moduleloader.h
+++ b/include/linux/moduleloader.h
@@ -84,4 +84,12 @@ void module_arch_cleanup(struct module *mod);
/* Any cleanup before freeing mod->module_init */
void module_arch_freeing_init(struct module *mod);
+
+#ifdef CONFIG_KASAN
+#include <linux/kasan.h>
+#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#else
+#define MODULE_ALIGN PAGE_SIZE
+#endif
+
#endif
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 1c9effa..c12f214 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -67,8 +67,9 @@ enum {
struct kernel_param {
const char *name;
+ struct module *mod;
const struct kernel_param_ops *ops;
- u16 perm;
+ const u16 perm;
s8 level;
u8 flags;
union {
@@ -108,7 +109,7 @@ struct kparam_array
*
* @perm is 0 if the the variable is not to appear in sysfs, or 0444
* for world-readable, 0644 for root-writable, etc. Note that if it
- * is writable, you may need to use kparam_block_sysfs_write() around
+ * is writable, you may need to use kernel_param_lock() around
* accesses (esp. charp, which can be kfreed when it changes).
*
* The @type is simply pasted to refer to a param_ops_##type and a
@@ -216,16 +217,16 @@ struct kparam_array
parameters. */
#define __module_param_call(prefix, name, ops, arg, perm, level, flags) \
/* Default value instead of permissions? */ \
- static const char __param_str_##name[] = prefix #name; \
+ static const char __param_str_##name[] = prefix #name; \
static struct kernel_param __moduleparam_const __param_##name \
__used \
__attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \
- = { __param_str_##name, ops, VERIFY_OCTAL_PERMISSIONS(perm), \
- level, flags, { arg } }
+ = { __param_str_##name, THIS_MODULE, ops, \
+ VERIFY_OCTAL_PERMISSIONS(perm), level, flags, { arg } }
/* Obsolete - use module_param_cb() */
#define module_param_call(name, set, get, arg, perm) \
- static struct kernel_param_ops __param_ops_##name = \
+ static const struct kernel_param_ops __param_ops_##name = \
{ .flags = 0, (void *)set, (void *)get }; \
__module_param_call(MODULE_PARAM_PREFIX, \
name, &__param_ops_##name, arg, \
@@ -238,58 +239,14 @@ __check_old_set_param(int (*oldset)(const char *, struct kernel_param *))
return 0;
}
-/**
- * kparam_block_sysfs_write - make sure a parameter isn't written via sysfs.
- * @name: the name of the parameter
- *
- * There's no point blocking write on a paramter that isn't writable via sysfs!
- */
-#define kparam_block_sysfs_write(name) \
- do { \
- BUG_ON(!(__param_##name.perm & 0222)); \
- __kernel_param_lock(); \
- } while (0)
-
-/**
- * kparam_unblock_sysfs_write - allows sysfs to write to a parameter again.
- * @name: the name of the parameter
- */
-#define kparam_unblock_sysfs_write(name) \
- do { \
- BUG_ON(!(__param_##name.perm & 0222)); \
- __kernel_param_unlock(); \
- } while (0)
-
-/**
- * kparam_block_sysfs_read - make sure a parameter isn't read via sysfs.
- * @name: the name of the parameter
- *
- * This also blocks sysfs writes.
- */
-#define kparam_block_sysfs_read(name) \
- do { \
- BUG_ON(!(__param_##name.perm & 0444)); \
- __kernel_param_lock(); \
- } while (0)
-
-/**
- * kparam_unblock_sysfs_read - allows sysfs to read a parameter again.
- * @name: the name of the parameter
- */
-#define kparam_unblock_sysfs_read(name) \
- do { \
- BUG_ON(!(__param_##name.perm & 0444)); \
- __kernel_param_unlock(); \
- } while (0)
-
#ifdef CONFIG_SYSFS
-extern void __kernel_param_lock(void);
-extern void __kernel_param_unlock(void);
+extern void kernel_param_lock(struct module *mod);
+extern void kernel_param_unlock(struct module *mod);
#else
-static inline void __kernel_param_lock(void)
+static inline void kernel_param_lock(struct module *mod)
{
}
-static inline void __kernel_param_unlock(void)
+static inline void kernel_param_unlock(struct module *mod)
{
}
#endif
@@ -310,6 +267,15 @@ static inline void __kernel_param_unlock(void)
#define core_param(name, var, type, perm) \
param_check_##type(name, &(var)); \
__module_param_call("", name, &param_ops_##type, &var, perm, -1, 0)
+
+/**
+ * core_param_unsafe - same as core_param but taints kernel
+ */
+#define core_param_unsafe(name, var, type, perm) \
+ param_check_##type(name, &(var)); \
+ __module_param_call("", name, &param_ops_##type, &var, perm, \
+ -1, KERNEL_PARAM_FL_UNSAFE)
+
#endif /* !MODULE */
/**
@@ -357,8 +323,9 @@ extern char *parse_args(const char *name,
unsigned num,
s16 level_min,
s16 level_max,
+ void *arg,
int (*unknown)(char *param, char *val,
- const char *doing));
+ const char *doing, void *arg));
/* Called by module remove. */
#ifdef CONFIG_SYSFS
@@ -376,64 +343,70 @@ static inline void destroy_params(const struct kernel_param *params,
#define __param_check(name, p, type) \
static inline type __always_unused *__check_##name(void) { return(p); }
-extern struct kernel_param_ops param_ops_byte;
+extern const struct kernel_param_ops param_ops_byte;
extern int param_set_byte(const char *val, const struct kernel_param *kp);
extern int param_get_byte(char *buffer, const struct kernel_param *kp);
#define param_check_byte(name, p) __param_check(name, p, unsigned char)
-extern struct kernel_param_ops param_ops_short;
+extern const struct kernel_param_ops param_ops_short;
extern int param_set_short(const char *val, const struct kernel_param *kp);
extern int param_get_short(char *buffer, const struct kernel_param *kp);
#define param_check_short(name, p) __param_check(name, p, short)
-extern struct kernel_param_ops param_ops_ushort;
+extern const struct kernel_param_ops param_ops_ushort;
extern int param_set_ushort(const char *val, const struct kernel_param *kp);
extern int param_get_ushort(char *buffer, const struct kernel_param *kp);
#define param_check_ushort(name, p) __param_check(name, p, unsigned short)
-extern struct kernel_param_ops param_ops_int;
+extern const struct kernel_param_ops param_ops_int;
extern int param_set_int(const char *val, const struct kernel_param *kp);
extern int param_get_int(char *buffer, const struct kernel_param *kp);
#define param_check_int(name, p) __param_check(name, p, int)
-extern struct kernel_param_ops param_ops_uint;
+extern const struct kernel_param_ops param_ops_uint;
extern int param_set_uint(const char *val, const struct kernel_param *kp);
extern int param_get_uint(char *buffer, const struct kernel_param *kp);
#define param_check_uint(name, p) __param_check(name, p, unsigned int)
-extern struct kernel_param_ops param_ops_long;
+extern const struct kernel_param_ops param_ops_long;
extern int param_set_long(const char *val, const struct kernel_param *kp);
extern int param_get_long(char *buffer, const struct kernel_param *kp);
#define param_check_long(name, p) __param_check(name, p, long)
-extern struct kernel_param_ops param_ops_ulong;
+extern const struct kernel_param_ops param_ops_ulong;
extern int param_set_ulong(const char *val, const struct kernel_param *kp);
extern int param_get_ulong(char *buffer, const struct kernel_param *kp);
#define param_check_ulong(name, p) __param_check(name, p, unsigned long)
-extern struct kernel_param_ops param_ops_ullong;
+extern const struct kernel_param_ops param_ops_ullong;
extern int param_set_ullong(const char *val, const struct kernel_param *kp);
extern int param_get_ullong(char *buffer, const struct kernel_param *kp);
#define param_check_ullong(name, p) __param_check(name, p, unsigned long long)
-extern struct kernel_param_ops param_ops_charp;
+extern const struct kernel_param_ops param_ops_charp;
extern int param_set_charp(const char *val, const struct kernel_param *kp);
extern int param_get_charp(char *buffer, const struct kernel_param *kp);
#define param_check_charp(name, p) __param_check(name, p, char *)
/* We used to allow int as well as bool. We're taking that away! */
-extern struct kernel_param_ops param_ops_bool;
+extern const struct kernel_param_ops param_ops_bool;
extern int param_set_bool(const char *val, const struct kernel_param *kp);
extern int param_get_bool(char *buffer, const struct kernel_param *kp);
#define param_check_bool(name, p) __param_check(name, p, bool)
-extern struct kernel_param_ops param_ops_invbool;
+extern const struct kernel_param_ops param_ops_bool_enable_only;
+extern int param_set_bool_enable_only(const char *val,
+ const struct kernel_param *kp);
+/* getter is the same as for the regular bool */
+#define param_check_bool_enable_only param_check_bool
+
+extern const struct kernel_param_ops param_ops_invbool;
extern int param_set_invbool(const char *val, const struct kernel_param *kp);
extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
#define param_check_invbool(name, p) __param_check(name, p, bool)
/* An int, which can only be set like a bool (though it shows as an int). */
-extern struct kernel_param_ops param_ops_bint;
+extern const struct kernel_param_ops param_ops_bint;
extern int param_set_bint(const char *val, const struct kernel_param *kp);
#define param_get_bint param_get_int
#define param_check_bint param_check_int
@@ -477,9 +450,9 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
perm, -1, 0); \
__MODULE_PARM_TYPE(name, "array of " #type)
-extern struct kernel_param_ops param_array_ops;
+extern const struct kernel_param_ops param_array_ops;
-extern struct kernel_param_ops param_ops_string;
+extern const struct kernel_param_ops param_ops_string;
extern int param_set_copystring(const char *val, const struct kernel_param *);
extern int param_get_string(char *buffer, const struct kernel_param *kp);
diff --git a/include/linux/mount.h b/include/linux/mount.h
index c2c561d..f822c3c 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -61,6 +61,7 @@ struct mnt_namespace;
#define MNT_DOOMED 0x1000000
#define MNT_SYNC_UMOUNT 0x2000000
#define MNT_MARKED 0x4000000
+#define MNT_UMOUNT 0x8000000
struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
@@ -92,6 +93,6 @@ extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list);
extern void mark_mounts_for_expiry(struct list_head *mounts);
-extern dev_t name_to_dev_t(char *name);
+extern dev_t name_to_dev_t(const char *name);
#endif /* _LINUX_MOUNT_H */
diff --git a/include/linux/mpi.h b/include/linux/mpi.h
index 5af1b81..641b7d6 100644
--- a/include/linux/mpi.h
+++ b/include/linux/mpi.h
@@ -81,6 +81,8 @@ MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread);
int mpi_fromstr(MPI val, const char *str);
u32 mpi_get_keyid(MPI a, u32 *keyid);
void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign);
+int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
+ int *sign);
void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign);
int mpi_set_buffer(MPI a, const void *buffer, unsigned nbytes, int sign);
@@ -142,4 +144,17 @@ int mpi_rshift(MPI x, MPI a, unsigned n);
/*-- mpi-inv.c --*/
int mpi_invm(MPI x, MPI u, MPI v);
+/* inline functions */
+
+/**
+ * mpi_get_size() - returns max size required to store the number
+ *
+ * @a: A multi precision integer for which we want to allocate a bufer
+ *
+ * Return: size required to store the number
+ */
+static inline unsigned int mpi_get_size(MPI a)
+{
+ return a->nlimbs * BYTES_PER_MPI_LIMB;
+}
#endif /*G10_MPI_H */
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 299d7d3..9b57a9b 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -296,183 +296,19 @@ struct cfi_private {
struct flchip chips[0]; /* per-chip data structure for each chip */
};
-/*
- * Returns the command address according to the given geometry.
- */
-static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
- struct map_info *map, struct cfi_private *cfi)
-{
- unsigned bankwidth = map_bankwidth(map);
- unsigned interleave = cfi_interleave(cfi);
- unsigned type = cfi->device_type;
- uint32_t addr;
-
- addr = (cmd_ofs * type) * interleave;
-
- /* Modify the unlock address if we are in compatibility mode.
- * For 16bit devices on 8 bit busses
- * and 32bit devices on 16 bit busses
- * set the low bit of the alternating bit sequence of the address.
- */
- if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa))
- addr |= (type >> 1)*interleave;
-
- return addr;
-}
-
-/*
- * Transforms the CFI command for the given geometry (bus width & interleave).
- * It looks too long to be inline, but in the common case it should almost all
- * get optimised away.
- */
-static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
-{
- map_word val = { {0} };
- int wordwidth, words_per_bus, chip_mode, chips_per_word;
- unsigned long onecmd;
- int i;
-
- /* We do it this way to give the compiler a fighting chance
- of optimising away all the crap for 'bankwidth' larger than
- an unsigned long, in the common case where that support is
- disabled */
- if (map_bankwidth_is_large(map)) {
- wordwidth = sizeof(unsigned long);
- words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
- } else {
- wordwidth = map_bankwidth(map);
- words_per_bus = 1;
- }
-
- chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
- chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
-
- /* First, determine what the bit-pattern should be for a single
- device, according to chip mode and endianness... */
- switch (chip_mode) {
- default: BUG();
- case 1:
- onecmd = cmd;
- break;
- case 2:
- onecmd = cpu_to_cfi16(map, cmd);
- break;
- case 4:
- onecmd = cpu_to_cfi32(map, cmd);
- break;
- }
-
- /* Now replicate it across the size of an unsigned long, or
- just to the bus width as appropriate */
- switch (chips_per_word) {
- default: BUG();
-#if BITS_PER_LONG >= 64
- case 8:
- onecmd |= (onecmd << (chip_mode * 32));
-#endif
- case 4:
- onecmd |= (onecmd << (chip_mode * 16));
- case 2:
- onecmd |= (onecmd << (chip_mode * 8));
- case 1:
- ;
- }
+uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
+ struct map_info *map, struct cfi_private *cfi);
- /* And finally, for the multi-word case, replicate it
- in all words in the structure */
- for (i=0; i < words_per_bus; i++) {
- val.x[i] = onecmd;
- }
-
- return val;
-}
+map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi);
#define CMD(x) cfi_build_cmd((x), map, cfi)
-
-static inline unsigned long cfi_merge_status(map_word val, struct map_info *map,
- struct cfi_private *cfi)
-{
- int wordwidth, words_per_bus, chip_mode, chips_per_word;
- unsigned long onestat, res = 0;
- int i;
-
- /* We do it this way to give the compiler a fighting chance
- of optimising away all the crap for 'bankwidth' larger than
- an unsigned long, in the common case where that support is
- disabled */
- if (map_bankwidth_is_large(map)) {
- wordwidth = sizeof(unsigned long);
- words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
- } else {
- wordwidth = map_bankwidth(map);
- words_per_bus = 1;
- }
-
- chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
- chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
-
- onestat = val.x[0];
- /* Or all status words together */
- for (i=1; i < words_per_bus; i++) {
- onestat |= val.x[i];
- }
-
- res = onestat;
- switch(chips_per_word) {
- default: BUG();
-#if BITS_PER_LONG >= 64
- case 8:
- res |= (onestat >> (chip_mode * 32));
-#endif
- case 4:
- res |= (onestat >> (chip_mode * 16));
- case 2:
- res |= (onestat >> (chip_mode * 8));
- case 1:
- ;
- }
-
- /* Last, determine what the bit-pattern should be for a single
- device, according to chip mode and endianness... */
- switch (chip_mode) {
- case 1:
- break;
- case 2:
- res = cfi16_to_cpu(map, res);
- break;
- case 4:
- res = cfi32_to_cpu(map, res);
- break;
- default: BUG();
- }
- return res;
-}
-
+unsigned long cfi_merge_status(map_word val, struct map_info *map,
+ struct cfi_private *cfi);
#define MERGESTATUS(x) cfi_merge_status((x), map, cfi)
-
-/*
- * Sends a CFI command to a bank of flash for the given geometry.
- *
- * Returns the offset in flash where the command was written.
- * If prev_val is non-null, it will be set to the value at the command address,
- * before the command was written.
- */
-static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
+uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
struct map_info *map, struct cfi_private *cfi,
- int type, map_word *prev_val)
-{
- map_word val;
- uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
- val = cfi_build_cmd(cmd, map, cfi);
-
- if (prev_val)
- *prev_val = map_read(map, addr);
-
- map_write(map, val, addr);
-
- return addr - base;
-}
+ int type, map_word *prev_val);
static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
{
@@ -506,15 +342,7 @@ static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
}
}
-static inline void cfi_udelay(int us)
-{
- if (us >= 1000) {
- msleep((us+999)/1000);
- } else {
- udelay(us);
- cond_resched();
- }
-}
+void cfi_udelay(int us);
int __xipram cfi_qry_present(struct map_info *map, __u32 base,
struct cfi_private *cfi);
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 5f487d7..29975c7 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -77,7 +77,7 @@
/* ensure we never evaluate anything shorted than an unsigned long
* to zero, and ensure we'll never miss the end of an comparison (bjd) */
-#define map_calc_words(map) ((map_bankwidth(map) + (sizeof(unsigned long)-1))/ sizeof(unsigned long))
+#define map_calc_words(map) ((map_bankwidth(map) + (sizeof(unsigned long)-1)) / sizeof(unsigned long))
#ifdef CONFIG_MTD_MAP_BANK_WIDTH_8
# ifdef map_bankwidth
@@ -181,7 +181,7 @@ static inline int map_bankwidth_supported(int w)
}
}
-#define MAX_MAP_LONGS ( ((MAX_MAP_BANKWIDTH*8) + BITS_PER_LONG - 1) / BITS_PER_LONG )
+#define MAX_MAP_LONGS (((MAX_MAP_BANKWIDTH * 8) + BITS_PER_LONG - 1) / BITS_PER_LONG)
typedef union {
unsigned long x[MAX_MAP_LONGS];
@@ -264,20 +264,22 @@ void unregister_mtd_chip_driver(struct mtd_chip_driver *);
struct mtd_info *do_map_probe(const char *name, struct map_info *map);
void map_destroy(struct mtd_info *mtd);
-#define ENABLE_VPP(map) do { if(map->set_vpp) map->set_vpp(map, 1); } while(0)
-#define DISABLE_VPP(map) do { if(map->set_vpp) map->set_vpp(map, 0); } while(0)
+#define ENABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 1); } while (0)
+#define DISABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 0); } while (0)
#define INVALIDATE_CACHED_RANGE(map, from, size) \
- do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0)
+ do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0)
static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2)
{
int i;
- for (i=0; i<map_words(map); i++) {
+
+ for (i = 0; i < map_words(map); i++) {
if (val1.x[i] != val2.x[i])
return 0;
}
+
return 1;
}
@@ -286,9 +288,9 @@ static inline map_word map_word_and(struct map_info *map, map_word val1, map_wor
map_word r;
int i;
- for (i=0; i<map_words(map); i++) {
+ for (i = 0; i < map_words(map); i++)
r.x[i] = val1.x[i] & val2.x[i];
- }
+
return r;
}
@@ -297,9 +299,9 @@ static inline map_word map_word_clr(struct map_info *map, map_word val1, map_wor
map_word r;
int i;
- for (i=0; i<map_words(map); i++) {
+ for (i = 0; i < map_words(map); i++)
r.x[i] = val1.x[i] & ~val2.x[i];
- }
+
return r;
}
@@ -308,22 +310,33 @@ static inline map_word map_word_or(struct map_info *map, map_word val1, map_word
map_word r;
int i;
- for (i=0; i<map_words(map); i++) {
+ for (i = 0; i < map_words(map); i++)
r.x[i] = val1.x[i] | val2.x[i];
- }
+
return r;
}
-#define map_word_andequal(m, a, b, z) map_word_equal(m, z, map_word_and(m, a, b))
+static inline int map_word_andequal(struct map_info *map, map_word val1, map_word val2, map_word val3)
+{
+ int i;
+
+ for (i = 0; i < map_words(map); i++) {
+ if ((val1.x[i] & val2.x[i]) != val3.x[i])
+ return 0;
+ }
+
+ return 1;
+}
static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2)
{
int i;
- for (i=0; i<map_words(map); i++) {
+ for (i = 0; i < map_words(map); i++) {
if (val1.x[i] & val2.x[i])
return 1;
}
+
return 0;
}
@@ -355,14 +368,16 @@ static inline map_word map_word_load_partial(struct map_info *map, map_word orig
if (map_bankwidth_is_large(map)) {
char *dest = (char *)&orig;
+
memcpy(dest+start, buf, len);
} else {
- for (i=start; i < start+len; i++) {
+ for (i = start; i < start+len; i++) {
int bitpos;
+
#ifdef __LITTLE_ENDIAN
- bitpos = i*8;
+ bitpos = i * 8;
#else /* __BIG_ENDIAN */
- bitpos = (map_bankwidth(map)-1-i)*8;
+ bitpos = (map_bankwidth(map) - 1 - i) * 8;
#endif
orig.x[0] &= ~(0xff << bitpos);
orig.x[0] |= (unsigned long)buf[i-start] << bitpos;
@@ -384,9 +399,10 @@ static inline map_word map_word_ff(struct map_info *map)
if (map_bankwidth(map) < MAP_FF_LIMIT) {
int bw = 8 * map_bankwidth(map);
+
r.x[0] = (1UL << bw) - 1;
} else {
- for (i=0; i<map_words(map); i++)
+ for (i = 0; i < map_words(map); i++)
r.x[i] = ~0UL;
}
return r;
@@ -407,7 +423,7 @@ static inline map_word inline_map_read(struct map_info *map, unsigned long ofs)
r.x[0] = __raw_readq(map->virt + ofs);
#endif
else if (map_bankwidth_is_large(map))
- memcpy_fromio(r.x, map->virt+ofs, map->bankwidth);
+ memcpy_fromio(r.x, map->virt + ofs, map->bankwidth);
else
BUG();
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 031ff3a..f17fa75 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -227,6 +227,7 @@ struct mtd_info {
int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
int (*_suspend) (struct mtd_info *mtd);
void (*_resume) (struct mtd_info *mtd);
+ void (*_reboot) (struct mtd_info *mtd);
/*
* If the driver is something smart, like UBI, it may need to maintain
* its own reference counting. The below functions are only for driver.
@@ -408,4 +409,6 @@ static inline int mtd_is_bitflip_or_eccerr(int err) {
return mtd_is_bitflip(err) || mtd_is_eccerr(err);
}
+unsigned mtd_mmap_capabilities(struct mtd_info *mtd);
+
#endif /* __MTD_MTD_H__ */
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 3d4ea7e..f25e2bd 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -26,6 +26,8 @@
struct mtd_info;
struct nand_flash_dev;
+struct device_node;
+
/* Scan and identify a NAND device */
extern int nand_scan(struct mtd_info *mtd, int max_chips);
/*
@@ -542,6 +544,7 @@ struct nand_buffers {
* flash device
* @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the
* flash device.
+ * @dn: [BOARDSPECIFIC] device node describing this instance
* @read_byte: [REPLACEABLE] read one byte from the chip
* @read_word: [REPLACEABLE] read one word from the chip
* @write_byte: [REPLACEABLE] write a single byte to the chip on the
@@ -644,6 +647,8 @@ struct nand_chip {
void __iomem *IO_ADDR_R;
void __iomem *IO_ADDR_W;
+ struct device_node *dn;
+
uint8_t (*read_byte)(struct mtd_info *mtd);
u16 (*read_word)(struct mtd_info *mtd);
void (*write_byte)(struct mtd_info *mtd, uint8_t byte);
@@ -833,7 +838,6 @@ struct nand_manufacturers {
extern struct nand_flash_dev nand_flash_ids[];
extern struct nand_manufacturers nand_manuf_ids[];
-extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
extern int nand_default_bbt(struct mtd_info *mtd);
extern int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
extern int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs);
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 63aeccf..e540952 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -56,6 +56,10 @@
/* Used for Spansion flashes only. */
#define SPINOR_OP_BRWR 0x17 /* Bank register write */
+/* Used for Micron flashes only. */
+#define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */
+#define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */
+
/* Status Register bits. */
#define SR_WIP 1 /* Write in progress */
#define SR_WEL 2 /* Write enable latch */
@@ -67,6 +71,9 @@
#define SR_QUAD_EN_MX 0x40 /* Macronix Quad I/O */
+/* Enhanced Volatile Configuration Register bits */
+#define EVCR_QUAD_EN_MICRON 0x80 /* Micron Quad I/O */
+
/* Flag Status Register bits */
#define FSR_READY 0x80
@@ -148,6 +155,8 @@ enum spi_nor_option_flags {
* @write: [DRIVER-SPECIFIC] write data to the SPI NOR
* @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR
* at the offset @offs
+ * @lock: [FLASH-SPECIFIC] lock a region of the SPI NOR
+ * @unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR
* @priv: the private data
*/
struct spi_nor {
@@ -182,6 +191,9 @@ struct spi_nor {
size_t len, size_t *retlen, const u_char *write_buf);
int (*erase)(struct spi_nor *nor, loff_t offs);
+ int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+ int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+
void *priv;
};
diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h
index c3918a0..1e271cb 100644
--- a/include/linux/mtd/ubi.h
+++ b/include/linux/mtd/ubi.h
@@ -23,22 +23,32 @@
#include <linux/ioctl.h>
#include <linux/types.h>
+#include <linux/scatterlist.h>
#include <mtd/ubi-user.h>
/* All voumes/LEBs */
#define UBI_ALL -1
/*
+ * Maximum number of scatter gather list entries,
+ * we use only 64 to have a lower memory foot print.
+ */
+#define UBI_MAX_SG_COUNT 64
+
+/*
* enum ubi_open_mode - UBI volume open mode constants.
*
* UBI_READONLY: read-only mode
* UBI_READWRITE: read-write mode
* UBI_EXCLUSIVE: exclusive mode
+ * UBI_METAONLY: modify only the volume meta-data,
+ * i.e. the data stored in the volume table, but not in any of volume LEBs.
*/
enum {
UBI_READONLY = 1,
UBI_READWRITE,
- UBI_EXCLUSIVE
+ UBI_EXCLUSIVE,
+ UBI_METAONLY
};
/**
@@ -116,6 +126,35 @@ struct ubi_volume_info {
};
/**
+ * struct ubi_sgl - UBI scatter gather list data structure.
+ * @list_pos: current position in @sg[]
+ * @page_pos: current position in @sg[@list_pos]
+ * @sg: the scatter gather list itself
+ *
+ * ubi_sgl is a wrapper around a scatter list which keeps track of the
+ * current position in the list and the current list item such that
+ * it can be used across multiple ubi_leb_read_sg() calls.
+ */
+struct ubi_sgl {
+ int list_pos;
+ int page_pos;
+ struct scatterlist sg[UBI_MAX_SG_COUNT];
+};
+
+/**
+ * ubi_sgl_init - initialize an UBI scatter gather list data structure.
+ * @usgl: the UBI scatter gather struct itself
+ *
+ * Please note that you still have to use sg_init_table() or any adequate
+ * function to initialize the unterlaying struct scatterlist.
+ */
+static inline void ubi_sgl_init(struct ubi_sgl *usgl)
+{
+ usgl->list_pos = 0;
+ usgl->page_pos = 0;
+}
+
+/**
* struct ubi_device_info - UBI device description data structure.
* @ubi_num: ubi device number
* @leb_size: logical eraseblock size on this UBI device
@@ -210,6 +249,8 @@ int ubi_unregister_volume_notifier(struct notifier_block *nb);
void ubi_close_volume(struct ubi_volume_desc *desc);
int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
int len, int check);
+int ubi_leb_read_sg(struct ubi_volume_desc *desc, int lnum, struct ubi_sgl *sgl,
+ int offset, int len, int check);
int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
int offset, int len);
int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
@@ -230,4 +271,14 @@ static inline int ubi_read(struct ubi_volume_desc *desc, int lnum, char *buf,
{
return ubi_leb_read(desc, lnum, buf, offset, len, 0);
}
+
+/*
+ * This function is the same as the 'ubi_leb_read_sg()' function, but it does
+ * not provide the checking capability.
+ */
+static inline int ubi_read_sg(struct ubi_volume_desc *desc, int lnum,
+ struct ubi_sgl *sgl, int offset, int len)
+{
+ return ubi_leb_read_sg(desc, lnum, sgl, offset, len, 0);
+}
#endif /* !__LINUX_UBI_H__ */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index cc31498..2cb7531 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -59,7 +59,6 @@ struct mutex {
struct optimistic_spin_queue osq; /* Spinner MCS lock */
#endif
#ifdef CONFIG_DEBUG_MUTEXES
- const char *name;
void *magic;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/namei.h b/include/linux/namei.h
index c899077..d8c6334 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -1,16 +1,15 @@
#ifndef _LINUX_NAMEI_H
#define _LINUX_NAMEI_H
-#include <linux/dcache.h>
-#include <linux/errno.h>
-#include <linux/linkage.h>
+#include <linux/kernel.h>
#include <linux/path.h>
-
-struct vfsmount;
-struct nameidata;
+#include <linux/fcntl.h>
+#include <linux/errno.h>
enum { MAX_NESTED_LINKS = 8 };
+#define MAXSYMLINKS 40
+
/*
* Type of the last component on LOOKUP_PARENT
*/
@@ -45,13 +44,29 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
#define LOOKUP_ROOT 0x2000
#define LOOKUP_EMPTY 0x4000
-extern int user_path_at(int, const char __user *, unsigned, struct path *);
extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
-#define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path)
-#define user_lpath(name, path) user_path_at(AT_FDCWD, name, 0, path)
-#define user_path_dir(name, path) \
- user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path)
+static inline int user_path_at(int dfd, const char __user *name, unsigned flags,
+ struct path *path)
+{
+ return user_path_at_empty(dfd, name, flags, path, NULL);
+}
+
+static inline int user_path(const char __user *name, struct path *path)
+{
+ return user_path_at_empty(AT_FDCWD, name, LOOKUP_FOLLOW, path, NULL);
+}
+
+static inline int user_lpath(const char __user *name, struct path *path)
+{
+ return user_path_at_empty(AT_FDCWD, name, 0, path, NULL);
+}
+
+static inline int user_path_dir(const char __user *name, struct path *path)
+{
+ return user_path_at_empty(AT_FDCWD, name,
+ LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path, NULL);
+}
extern int kern_path(const char *, unsigned, struct path *);
@@ -70,9 +85,7 @@ extern int follow_up(struct path *);
extern struct dentry *lock_rename(struct dentry *, struct dentry *);
extern void unlock_rename(struct dentry *, struct dentry *);
-extern void nd_jump_link(struct nameidata *nd, struct path *path);
-extern void nd_set_link(struct nameidata *nd, char *path);
-extern char *nd_get_link(struct nameidata *nd);
+extern void nd_jump_link(struct path *path);
static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
{
diff --git a/include/linux/nbd.h b/include/linux/nbd.h
deleted file mode 100644
index f62f78a..0000000
--- a/include/linux/nbd.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * 1999 Copyright (C) Pavel Machek, pavel@ucw.cz. This code is GPL.
- * 1999/11/04 Copyright (C) 1999 VMware, Inc. (Regis "HPReg" Duchesne)
- * Made nbd_end_request() use the io_request_lock
- * 2001 Copyright (C) Steven Whitehouse
- * New nbd_end_request() for compatibility with new linux block
- * layer code.
- * 2003/06/24 Louis D. Langholtz <ldl@aros.net>
- * Removed unneeded blksize_bits field from nbd_device struct.
- * Cleanup PARANOIA usage & code.
- * 2004/02/19 Paul Clements
- * Removed PARANOIA, plus various cleanup and comments
- */
-#ifndef LINUX_NBD_H
-#define LINUX_NBD_H
-
-
-#include <linux/wait.h>
-#include <linux/mutex.h>
-#include <uapi/linux/nbd.h>
-
-struct request;
-
-struct nbd_device {
- int flags;
- int harderror; /* Code of hard error */
- struct socket * sock; /* If == NULL, device is not ready, yet */
- int magic;
-
- spinlock_t queue_lock;
- struct list_head queue_head; /* Requests waiting result */
- struct request *active_req;
- wait_queue_head_t active_wq;
- struct list_head waiting_queue; /* Requests to be sent */
- wait_queue_head_t waiting_wq;
-
- struct mutex tx_lock;
- struct gendisk *disk;
- int blksize;
- u64 bytesize;
- pid_t pid; /* pid of nbd-client, if attached */
- int xmit_timeout;
- int disconnect; /* a disconnect has been requested by user */
-};
-
-#endif
diff --git a/include/linux/nd.h b/include/linux/nd.h
new file mode 100644
index 0000000..507e47c
--- /dev/null
+++ b/include/linux/nd.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef __LINUX_ND_H__
+#define __LINUX_ND_H__
+#include <linux/fs.h>
+#include <linux/ndctl.h>
+#include <linux/device.h>
+
+struct nd_device_driver {
+ struct device_driver drv;
+ unsigned long type;
+ int (*probe)(struct device *dev);
+ int (*remove)(struct device *dev);
+};
+
+static inline struct nd_device_driver *to_nd_device_driver(
+ struct device_driver *drv)
+{
+ return container_of(drv, struct nd_device_driver, drv);
+};
+
+/**
+ * struct nd_namespace_common - core infrastructure of a namespace
+ * @force_raw: ignore other personalities for the namespace (e.g. btt)
+ * @dev: device model node
+ * @claim: when set a another personality has taken ownership of the namespace
+ * @rw_bytes: access the raw namespace capacity with byte-aligned transfers
+ */
+struct nd_namespace_common {
+ int force_raw;
+ struct device dev;
+ struct device *claim;
+ int (*rw_bytes)(struct nd_namespace_common *, resource_size_t offset,
+ void *buf, size_t size, int rw);
+};
+
+static inline struct nd_namespace_common *to_ndns(struct device *dev)
+{
+ return container_of(dev, struct nd_namespace_common, dev);
+}
+
+/**
+ * struct nd_namespace_io - infrastructure for loading an nd_pmem instance
+ * @dev: namespace device created by the nd region driver
+ * @res: struct resource conversion of a NFIT SPA table
+ */
+struct nd_namespace_io {
+ struct nd_namespace_common common;
+ struct resource res;
+};
+
+/**
+ * struct nd_namespace_pmem - namespace device for dimm-backed interleaved memory
+ * @nsio: device and system physical address range to drive
+ * @alt_name: namespace name supplied in the dimm label
+ * @uuid: namespace name supplied in the dimm label
+ */
+struct nd_namespace_pmem {
+ struct nd_namespace_io nsio;
+ char *alt_name;
+ u8 *uuid;
+};
+
+/**
+ * struct nd_namespace_blk - namespace for dimm-bounded persistent memory
+ * @alt_name: namespace name supplied in the dimm label
+ * @uuid: namespace name supplied in the dimm label
+ * @id: ida allocated id
+ * @lbasize: blk namespaces have a native sector size when btt not present
+ * @num_resources: number of dpa extents to claim
+ * @res: discontiguous dpa extents for given dimm
+ */
+struct nd_namespace_blk {
+ struct nd_namespace_common common;
+ char *alt_name;
+ u8 *uuid;
+ int id;
+ unsigned long lbasize;
+ int num_resources;
+ struct resource **res;
+};
+
+static inline struct nd_namespace_io *to_nd_namespace_io(struct device *dev)
+{
+ return container_of(dev, struct nd_namespace_io, common.dev);
+}
+
+static inline struct nd_namespace_pmem *to_nd_namespace_pmem(struct device *dev)
+{
+ struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
+
+ return container_of(nsio, struct nd_namespace_pmem, nsio);
+}
+
+static inline struct nd_namespace_blk *to_nd_namespace_blk(struct device *dev)
+{
+ return container_of(dev, struct nd_namespace_blk, common.dev);
+}
+
+/**
+ * nvdimm_read_bytes() - synchronously read bytes from an nvdimm namespace
+ * @ndns: device to read
+ * @offset: namespace-relative starting offset
+ * @buf: buffer to fill
+ * @size: transfer length
+ *
+ * @buf is up-to-date upon return from this routine.
+ */
+static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns,
+ resource_size_t offset, void *buf, size_t size)
+{
+ return ndns->rw_bytes(ndns, offset, buf, size, READ);
+}
+
+/**
+ * nvdimm_write_bytes() - synchronously write bytes to an nvdimm namespace
+ * @ndns: device to read
+ * @offset: namespace-relative starting offset
+ * @buf: buffer to drain
+ * @size: transfer length
+ *
+ * NVDIMM Namepaces disks do not implement sectors internally. Depending on
+ * the @ndns, the contents of @buf may be in cpu cache, platform buffers,
+ * or on backing memory media upon return from this routine. Flushing
+ * to media is handled internal to the @ndns driver, if at all.
+ */
+static inline int nvdimm_write_bytes(struct nd_namespace_common *ndns,
+ resource_size_t offset, void *buf, size_t size)
+{
+ return ndns->rw_bytes(ndns, offset, buf, size, WRITE);
+}
+
+#define MODULE_ALIAS_ND_DEVICE(type) \
+ MODULE_ALIAS("nd:t" __stringify(type) "*")
+#define ND_DEVICE_MODALIAS_FMT "nd:t%d"
+
+int __must_check __nd_driver_register(struct nd_device_driver *nd_drv,
+ struct module *module, const char *mod_name);
+#define nd_driver_register(driver) \
+ __nd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
+#endif /* __LINUX_ND_H__ */
diff --git a/include/linux/net.h b/include/linux/net.h
index 17d8339..04aa068 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -38,7 +38,6 @@ struct net;
#define SOCK_NOSPACE 2
#define SOCK_PASSCRED 3
#define SOCK_PASSSEC 4
-#define SOCK_EXTERNALLY_ALLOCATED 5
#ifndef ARCH_HAS_SOCKET_TYPES
/**
@@ -120,7 +119,6 @@ struct socket {
struct vm_area_struct;
struct page;
-struct kiocb;
struct sockaddr;
struct msghdr;
struct module;
@@ -162,8 +160,8 @@ struct proto_ops {
int (*compat_getsockopt)(struct socket *sock, int level,
int optname, char __user *optval, int __user *optlen);
#endif
- int (*sendmsg) (struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len);
+ int (*sendmsg) (struct socket *sock, struct msghdr *m,
+ size_t total_len);
/* Notes for implementing recvmsg:
* ===============================
* msg->msg_namelen should get updated by the recvmsg handlers
@@ -172,9 +170,8 @@ struct proto_ops {
* handlers can assume that msg.msg_name is either NULL or has
* a minimum size of sizeof(struct sockaddr_storage).
*/
- int (*recvmsg) (struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len,
- int flags);
+ int (*recvmsg) (struct socket *sock, struct msghdr *m,
+ size_t total_len, int flags);
int (*mmap) (struct file *file, struct socket *sock,
struct vm_area_struct * vma);
ssize_t (*sendpage) (struct socket *sock, struct page *page,
@@ -210,10 +207,10 @@ void sock_unregister(int family);
int __sock_create(struct net *net, int family, int type, int proto,
struct socket **res, int kern);
int sock_create(int family, int type, int proto, struct socket **res);
-int sock_create_kern(int family, int type, int proto, struct socket **res);
+int sock_create_kern(struct net *net, int family, int type, int proto, struct socket **res);
int sock_create_lite(int family, int type, int proto, struct socket **res);
void sock_release(struct socket *sock);
-int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len);
+int sock_sendmsg(struct socket *sock, struct msghdr *msg);
int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags);
struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 8e30685..9672781 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -160,6 +160,7 @@ enum {
#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
NETIF_F_SG | NETIF_F_HIGHDMA | \
NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
+
/*
* If one device doesn't support one of these features, then disable it
* for all in netdev_increment_features.
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 52fd8e8..e20979d 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -25,7 +25,6 @@
#ifndef _LINUX_NETDEVICE_H
#define _LINUX_NETDEVICE_H
-#include <linux/pm_qos.h>
#include <linux/timer.h>
#include <linux/bug.h>
#include <linux/delay.h>
@@ -51,6 +50,7 @@
#include <linux/netdev_features.h>
#include <linux/neighbour.h>
#include <uapi/linux/netdevice.h>
+#include <uapi/linux/if_bonding.h>
struct netpoll_info;
struct device;
@@ -59,6 +59,7 @@ struct phy_device;
struct wireless_dev;
/* 802.15.4 specific */
struct wpan_dev;
+struct mpls_dev;
void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops);
@@ -260,7 +261,6 @@ struct header_ops {
unsigned short type, const void *daddr,
const void *saddr, unsigned int len);
int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
- int (*rebuild)(struct sk_buff *skb);
int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
void (*cache_update)(struct hh_cache *hh,
const struct net_device *dev,
@@ -587,6 +587,7 @@ struct netdev_queue {
#ifdef CONFIG_BQL
struct dql dql;
#endif
+ unsigned long tx_maxrate;
} ____cacheline_aligned_in_smp;
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
@@ -643,39 +644,40 @@ struct rps_dev_flow_table {
/*
* The rps_sock_flow_table contains mappings of flows to the last CPU
* on which they were processed by the application (set in recvmsg).
+ * Each entry is a 32bit value. Upper part is the high order bits
+ * of flow hash, lower part is cpu number.
+ * rps_cpu_mask is used to partition the space, depending on number of
+ * possible cpus : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
+ * For example, if 64 cpus are possible, rps_cpu_mask = 0x3f,
+ * meaning we use 32-6=26 bits for the hash.
*/
struct rps_sock_flow_table {
- unsigned int mask;
- u16 ents[0];
+ u32 mask;
+
+ u32 ents[0] ____cacheline_aligned_in_smp;
};
-#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
- ((_num) * sizeof(u16)))
+#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
#define RPS_NO_CPU 0xffff
+extern u32 rps_cpu_mask;
+extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
+
static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
u32 hash)
{
if (table && hash) {
- unsigned int cpu, index = hash & table->mask;
+ unsigned int index = hash & table->mask;
+ u32 val = hash & ~rps_cpu_mask;
/* We only give a hint, preemption can change cpu under us */
- cpu = raw_smp_processor_id();
+ val |= raw_smp_processor_id();
- if (table->ents[index] != cpu)
- table->ents[index] = cpu;
+ if (table->ents[index] != val)
+ table->ents[index] = val;
}
}
-static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
- u32 hash)
-{
- if (table && hash)
- table->ents[hash & table->mask] = RPS_NO_CPU;
-}
-
-extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
-
#ifdef CONFIG_RFS_ACCEL
bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
u16 filter_id);
@@ -793,7 +795,10 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
* struct net_device *dev);
* Called when a packet needs to be transmitted.
- * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
+ * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop
+ * the queue before that can happen; it's for obsolete devices and weird
+ * corner cases, but the stack really does a non-trivial amount
+ * of useless work if you return NETDEV_TX_BUSY.
* (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
* Required can not be NULL.
*
@@ -873,6 +878,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
* int (*ndo_set_vf_port)(struct net_device *dev, int vf,
* struct nlattr *port[]);
+ *
+ * Enable or disable the VF ability to query its RSS Redirection Table and
+ * Hash Key. This is needed since on some devices VF share this information
+ * with PF and querying it may adduce a theoretical security risk.
+ * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
* int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
* int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
* Called to setup 'tc' number of traffic classes in the net device. This
@@ -963,9 +973,13 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* Used to add FDB entries to dump requests. Implementers should add
* entries to skb and update idx with the number of entries.
*
- * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
+ * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
+ * u16 flags)
* int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
- * struct net_device *dev, u32 filter_mask)
+ * struct net_device *dev, u32 filter_mask,
+ * int nlflags)
+ * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
+ * u16 flags);
*
* int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
* Called to change device carrier. Soft-devices (like dummy, team, etc)
@@ -1021,15 +1035,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* be otherwise expressed by feature flags. The check is called with
* the set of features that the stack has calculated and it returns
* those the driver believes to be appropriate.
- *
- * int (*ndo_switch_parent_id_get)(struct net_device *dev,
- * struct netdev_phys_item_id *psid);
- * Called to get an ID of the switch chip this port is part of.
- * If driver implements this, it indicates that it represents a port
- * of a switch chip.
- * int (*ndo_switch_port_stp_update)(struct net_device *dev, u8 state);
- * Called to notify switch device port of bridge port STP
- * state change.
+ * int (*ndo_set_tx_maxrate)(struct net_device *dev,
+ * int queue_index, u32 maxrate);
+ * Called when a user wants to set a max-rate limitation of specific
+ * TX queue.
+ * int (*ndo_get_iflink)(const struct net_device *dev);
+ * Called to get the iflink value of this device.
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@@ -1089,11 +1100,18 @@ struct net_device_ops {
struct ifla_vf_info *ivf);
int (*ndo_set_vf_link_state)(struct net_device *dev,
int vf, int link_state);
+ int (*ndo_get_vf_stats)(struct net_device *dev,
+ int vf,
+ struct ifla_vf_stats
+ *vf_stats);
int (*ndo_set_vf_port)(struct net_device *dev,
int vf,
struct nlattr *port[]);
int (*ndo_get_vf_port)(struct net_device *dev,
int vf, struct sk_buff *skb);
+ int (*ndo_set_vf_rss_query_en)(
+ struct net_device *dev,
+ int vf, bool setting);
int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
#if IS_ENABLED(CONFIG_FCOE)
int (*ndo_fcoe_enable)(struct net_device *dev);
@@ -1154,17 +1172,22 @@ struct net_device_ops {
int idx);
int (*ndo_bridge_setlink)(struct net_device *dev,
- struct nlmsghdr *nlh);
+ struct nlmsghdr *nlh,
+ u16 flags);
int (*ndo_bridge_getlink)(struct sk_buff *skb,
u32 pid, u32 seq,
struct net_device *dev,
- u32 filter_mask);
+ u32 filter_mask,
+ int nlflags);
int (*ndo_bridge_dellink)(struct net_device *dev,
- struct nlmsghdr *nlh);
+ struct nlmsghdr *nlh,
+ u16 flags);
int (*ndo_change_carrier)(struct net_device *dev,
bool new_carrier);
int (*ndo_get_phys_port_id)(struct net_device *dev,
struct netdev_phys_item_id *ppid);
+ int (*ndo_get_phys_port_name)(struct net_device *dev,
+ char *name, size_t len);
void (*ndo_add_vxlan_port)(struct net_device *dev,
sa_family_t sa_family,
__be16 port);
@@ -1184,12 +1207,10 @@ struct net_device_ops {
netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
-#ifdef CONFIG_NET_SWITCHDEV
- int (*ndo_switch_parent_id_get)(struct net_device *dev,
- struct netdev_phys_item_id *psid);
- int (*ndo_switch_port_stp_update)(struct net_device *dev,
- u8 state);
-#endif
+ int (*ndo_set_tx_maxrate)(struct net_device *dev,
+ int queue_index,
+ u32 maxrate);
+ int (*ndo_get_iflink)(const struct net_device *dev);
};
/**
@@ -1298,6 +1319,8 @@ enum netdev_priv_flags {
* @base_addr: Device I/O address
* @irq: Device IRQ number
*
+ * @carrier_changes: Stats to monitor carrier on<->off transitions
+ *
* @state: Generic network queuing layer state, see netdev_state_t
* @dev_list: The global list of network devices
* @napi_list: List entry, that is used for polling napi devices
@@ -1321,7 +1344,7 @@ enum netdev_priv_flags {
* @mpls_features: Mask of features inheritable by MPLS
*
* @ifindex: interface index
- * @iflink: unique device identifier
+ * @group: The group, that the device belongs to
*
* @stats: Statistics struct, which was left as a legacy, use
* rtnl_link_stats64 instead
@@ -1331,8 +1354,6 @@ enum netdev_priv_flags {
* @tx_dropped: Dropped packets by core network,
* do not use this in drivers
*
- * @carrier_changes: Stats to monitor carrier on<->off transitions
- *
* @wireless_handlers: List of functions to handle Wireless Extensions,
* instead of ioctl,
* see <net/iw_handler.h> for details.
@@ -1341,8 +1362,7 @@ enum netdev_priv_flags {
* @netdev_ops: Includes several pointers to callbacks,
* if one wants to override the ndo_*() functions
* @ethtool_ops: Management operations
- * @fwd_ops: Management operations
- * @header_ops: Includes callbacks for creating,parsing,rebuilding,etc
+ * @header_ops: Includes callbacks for creating,parsing,caching,etc
* of Layer 2 headers.
*
* @flags: Interface flags (a la BSD)
@@ -1376,14 +1396,14 @@ enum netdev_priv_flags {
* @dev_port: Used to differentiate devices that share
* the same function
* @addr_list_lock: XXX: need comments on this one
- * @uc: unicast mac addresses
- * @mc: multicast mac addresses
- * @dev_addrs: list of device hw addresses
- * @queues_kset: Group of all Kobjects in the Tx and RX queues
* @uc_promisc: Counter, that indicates, that promiscuous mode
* has been enabled due to the need to listen to
* additional unicast addresses in a device that
* does not implement ndo_set_rx_mode()
+ * @uc: unicast mac addresses
+ * @mc: multicast mac addresses
+ * @dev_addrs: list of device hw addresses
+ * @queues_kset: Group of all Kobjects in the Tx and RX queues
* @promiscuity: Number of times, the NIC is told to work in
* Promiscuous mode, if it becomes 0 the NIC will
* exit from working in Promiscuous mode
@@ -1413,6 +1433,12 @@ enum netdev_priv_flags {
* @ingress_queue: XXX: need comments on this one
* @broadcast: hw bcast address
*
+ * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
+ * indexed by RX queue number. Assigned by driver.
+ * This must only be set if the ndo_rx_flow_steer
+ * operation is defined
+ * @index_hlist: Device index hash chain
+ *
* @_tx: Array of TX queues
* @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time
* @real_num_tx_queues: Number of TX queues currently active in device
@@ -1422,11 +1448,6 @@ enum netdev_priv_flags {
*
* @xps_maps: XXX: need comments on this one
*
- * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
- * indexed by RX queue number. Assigned by driver.
- * This must only be set if the ndo_rx_flow_steer
- * operation is defined
- *
* @trans_start: Time (in jiffies) of last Tx
* @watchdog_timeo: Represents the timeout that is used by
* the watchdog ( see dev_watchdog() )
@@ -1434,7 +1455,6 @@ enum netdev_priv_flags {
*
* @pcpu_refcnt: Number of references to this device
* @todo_list: Delayed register/unregister
- * @index_hlist: Device index hash chain
* @link_watch_list: XXX: need comments on this one
*
* @reg_state: Register/unregister state machine
@@ -1482,9 +1502,6 @@ enum netdev_priv_flags {
*
* @qdisc_tx_busylock: XXX: need comments on this one
*
- * @group: The group, that the device belongs to
- * @pm_qos_req: Power Management QoS object
- *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
@@ -1502,6 +1519,8 @@ struct net_device {
unsigned long base_addr;
int irq;
+ atomic_t carrier_changes;
+
/*
* Some hardware also needs these fields (state,dev_list,
* napi_list,unreg_list,close_list) but they are not
@@ -1514,6 +1533,8 @@ struct net_device {
struct list_head napi_list;
struct list_head unreg_list;
struct list_head close_list;
+ struct list_head ptype_all;
+ struct list_head ptype_specific;
struct {
struct list_head upper;
@@ -1533,22 +1554,22 @@ struct net_device {
netdev_features_t mpls_features;
int ifindex;
- int iflink;
+ int group;
struct net_device_stats stats;
atomic_long_t rx_dropped;
atomic_long_t tx_dropped;
- atomic_t carrier_changes;
-
#ifdef CONFIG_WIRELESS_EXT
const struct iw_handler_def * wireless_handlers;
struct iw_public_data * wireless_data;
#endif
const struct net_device_ops *netdev_ops;
const struct ethtool_ops *ethtool_ops;
- const struct forwarding_accel_ops *fwd_ops;
+#ifdef CONFIG_NET_SWITCHDEV
+ const struct switchdev_ops *switchdev_ops;
+#endif
const struct header_ops *header_ops;
@@ -1579,6 +1600,8 @@ struct net_device {
unsigned short dev_id;
unsigned short dev_port;
spinlock_t addr_list_lock;
+ unsigned char name_assign_type;
+ bool uc_promisc;
struct netdev_hw_addr_list uc;
struct netdev_hw_addr_list mc;
struct netdev_hw_addr_list dev_addrs;
@@ -1586,10 +1609,6 @@ struct net_device {
#ifdef CONFIG_SYSFS
struct kset *queues_kset;
#endif
-
- unsigned char name_assign_type;
-
- bool uc_promisc;
unsigned int promiscuity;
unsigned int allmulti;
@@ -1612,6 +1631,9 @@ struct net_device {
void *ax25_ptr;
struct wireless_dev *ieee80211_ptr;
struct wpan_dev *ieee802154_ptr;
+#if IS_ENABLED(CONFIG_MPLS_ROUTING)
+ struct mpls_dev __rcu *mpls_ptr;
+#endif
/*
* Cache lines mostly used on receive path (including eth_type_trans())
@@ -1634,9 +1656,19 @@ struct net_device {
rx_handler_func_t __rcu *rx_handler;
void __rcu *rx_handler_data;
+#ifdef CONFIG_NET_CLS_ACT
+ struct tcf_proto __rcu *ingress_cl_list;
+#endif
struct netdev_queue __rcu *ingress_queue;
- unsigned char broadcast[MAX_ADDR_LEN];
+#ifdef CONFIG_NETFILTER_INGRESS
+ struct list_head nf_hooks_ingress;
+#endif
+ unsigned char broadcast[MAX_ADDR_LEN];
+#ifdef CONFIG_RFS_ACCEL
+ struct cpu_rmap *rx_cpu_rmap;
+#endif
+ struct hlist_node index_hlist;
/*
* Cache lines mostly used on transmit path
@@ -1647,13 +1679,11 @@ struct net_device {
struct Qdisc *qdisc;
unsigned long tx_queue_len;
spinlock_t tx_global_lock;
+ int watchdog_timeo;
#ifdef CONFIG_XPS
struct xps_dev_maps __rcu *xps_maps;
#endif
-#ifdef CONFIG_RFS_ACCEL
- struct cpu_rmap *rx_cpu_rmap;
-#endif
/* These may be needed for future network-power-down code. */
@@ -1663,13 +1693,11 @@ struct net_device {
*/
unsigned long trans_start;
- int watchdog_timeo;
struct timer_list watchdog_timer;
int __percpu *pcpu_refcnt;
struct list_head todo_list;
- struct hlist_node index_hlist;
struct list_head link_watch_list;
enum { NETREG_UNINITIALIZED=0,
@@ -1693,9 +1721,7 @@ struct net_device {
struct netpoll_info __rcu *npinfo;
#endif
-#ifdef CONFIG_NET_NS
- struct net *nd_net;
-#endif
+ possible_net_t nd_net;
/* mid-layer private */
union {
@@ -1736,8 +1762,6 @@ struct net_device {
#endif
struct phy_device *phydev;
struct lock_class_key *qdisc_tx_busylock;
- int group;
- struct pm_qos_request pm_qos_req;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -1835,10 +1859,7 @@ struct net *dev_net(const struct net_device *dev)
static inline
void dev_net_set(struct net_device *dev, struct net *net)
{
-#ifdef CONFIG_NET_NS
- release_net(dev->nd_net);
- dev->nd_net = hold_net(net);
-#endif
+ write_pnet(&dev->nd_net, net);
}
static inline bool netdev_uses_dsa(struct net_device *dev)
@@ -1917,13 +1938,8 @@ struct napi_gro_cb {
/* Number of segments aggregated. */
u16 count;
- /* This is non-zero if the packet may be of the same flow. */
- u8 same_flow;
-
- /* Free the skb? */
- u8 free;
-#define NAPI_GRO_FREE 1
-#define NAPI_GRO_FREE_STOLEN_HEAD 2
+ /* Start offset for remote checksum offload */
+ u16 gro_remcsum_start;
/* jiffies when first packet was created/queued */
unsigned long age;
@@ -1931,6 +1947,9 @@ struct napi_gro_cb {
/* Used in ipv6_gro_receive() and foo-over-udp */
u16 proto;
+ /* This is non-zero if the packet may be of the same flow. */
+ u8 same_flow:1;
+
/* Used in udp_gro_receive */
u8 udp_mark:1;
@@ -1940,9 +1959,16 @@ struct napi_gro_cb {
/* Number of checksums via CHECKSUM_UNNECESSARY */
u8 csum_cnt:3;
+ /* Free the skb? */
+ u8 free:2;
+#define NAPI_GRO_FREE 1
+#define NAPI_GRO_FREE_STOLEN_HEAD 2
+
/* Used in foo-over-udp, set in udp[46]_gro_receive */
u8 is_ipv6:1;
+ /* 7 bit hole */
+
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum;
@@ -1969,20 +1995,32 @@ struct offload_callbacks {
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
netdev_features_t features);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
- struct sk_buff *skb);
+ struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb, int nhoff);
};
struct packet_offload {
__be16 type; /* This is really htons(ether_type). */
+ u16 priority;
struct offload_callbacks callbacks;
struct list_head list;
};
+struct udp_offload;
+
+struct udp_offload_callbacks {
+ struct sk_buff **(*gro_receive)(struct sk_buff **head,
+ struct sk_buff *skb,
+ struct udp_offload *uoff);
+ int (*gro_complete)(struct sk_buff *skb,
+ int nhoff,
+ struct udp_offload *uoff);
+};
+
struct udp_offload {
__be16 port;
u8 ipproto;
- struct offload_callbacks callbacks;
+ struct udp_offload_callbacks callbacks;
};
/* often modified stats are per cpu, other are shared (netdev->stats) */
@@ -1998,10 +2036,10 @@ struct pcpu_sw_netstats {
({ \
typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
if (pcpu_stats) { \
- int i; \
- for_each_possible_cpu(i) { \
+ int __cpu; \
+ for_each_possible_cpu(__cpu) { \
typeof(type) *stat; \
- stat = per_cpu_ptr(pcpu_stats, i); \
+ stat = per_cpu_ptr(pcpu_stats, __cpu); \
u64_stats_init(&stat->syncp); \
} \
} \
@@ -2041,6 +2079,7 @@ struct pcpu_sw_netstats {
#define NETDEV_RESEND_IGMP 0x0016
#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
#define NETDEV_CHANGEINFODATA 0x0018
+#define NETDEV_BONDING_INFO 0x0019
int register_netdevice_notifier(struct notifier_block *nb);
int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -2133,6 +2172,7 @@ void __dev_remove_pack(struct packet_type *pt);
void dev_add_offload(struct packet_offload *po);
void dev_remove_offload(struct packet_offload *po);
+int dev_get_iflink(const struct net_device *dev);
struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
unsigned short mask);
struct net_device *dev_get_by_name(struct net *net, const char *name);
@@ -2141,9 +2181,14 @@ struct net_device *__dev_get_by_name(struct net *net, const char *name);
int dev_alloc_name(struct net_device *dev, const char *name);
int dev_open(struct net_device *dev);
int dev_close(struct net_device *dev);
+int dev_close_many(struct list_head *head, bool unlink);
void dev_disable_lro(struct net_device *dev);
-int dev_loopback_xmit(struct sk_buff *newskb);
-int dev_queue_xmit(struct sk_buff *skb);
+int dev_loopback_xmit(struct sock *sk, struct sk_buff *newskb);
+int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb);
+static inline int dev_queue_xmit(struct sk_buff *skb)
+{
+ return dev_queue_xmit_sk(skb->sk, skb);
+}
int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
int register_netdevice(struct net_device *dev);
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
@@ -2159,6 +2204,12 @@ void netdev_freemem(struct net_device *dev);
void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
+DECLARE_PER_CPU(int, xmit_recursion);
+static inline int dev_recursion_level(void)
+{
+ return this_cpu_read(xmit_recursion);
+}
+
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
@@ -2224,11 +2275,20 @@ static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
+static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
+{
+ return (NAPI_GRO_CB(skb)->gro_remcsum_start - skb_headroom(skb) ==
+ skb_gro_offset(skb));
+}
+
static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
bool zero_okay,
__sum16 check)
{
- return (skb->ip_summed != CHECKSUM_PARTIAL &&
+ return ((skb->ip_summed != CHECKSUM_PARTIAL ||
+ skb_checksum_start_offset(skb) <
+ skb_gro_offset(skb)) &&
+ !skb_at_gro_remcsum_start(skb) &&
NAPI_GRO_CB(skb)->csum_cnt == 0 &&
(!zero_okay || check));
}
@@ -2303,6 +2363,50 @@ do { \
compute_pseudo(skb, proto)); \
} while (0)
+struct gro_remcsum {
+ int offset;
+ __wsum delta;
+};
+
+static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
+{
+ grc->offset = 0;
+ grc->delta = 0;
+}
+
+static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
+ int start, int offset,
+ struct gro_remcsum *grc,
+ bool nopartial)
+{
+ __wsum delta;
+
+ BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
+
+ if (!nopartial) {
+ NAPI_GRO_CB(skb)->gro_remcsum_start =
+ ((unsigned char *)ptr + start) - skb->head;
+ return;
+ }
+
+ delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset);
+
+ /* Adjust skb->csum since we changed the packet */
+ NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
+
+ grc->offset = (ptr + offset) - (void *)skb->head;
+ grc->delta = delta;
+}
+
+static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
+ struct gro_remcsum *grc)
+{
+ if (!grc->delta)
+ return;
+
+ remcsum_unadjust((__sum16 *)(skb->head + grc->offset), grc->delta);
+}
+
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
@@ -2324,15 +2428,6 @@ static inline int dev_parse_header(const struct sk_buff *skb,
return dev->header_ops->parse(skb, haddr);
}
-static inline int dev_rebuild_header(struct sk_buff *skb)
-{
- const struct net_device *dev = skb->dev;
-
- if (!dev->header_ops || !dev->header_ops->rebuild)
- return 0;
- return dev->header_ops->rebuild(skb);
-}
-
typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
static inline int unregister_gifconf(unsigned int family)
@@ -2469,10 +2564,6 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
{
- if (WARN_ON(!dev_queue)) {
- pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
- return;
- }
set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
@@ -2488,15 +2579,7 @@ static inline void netif_stop_queue(struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
}
-static inline void netif_tx_stop_all_queues(struct net_device *dev)
-{
- unsigned int i;
-
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- netif_tx_stop_queue(txq);
- }
-}
+void netif_tx_stop_all_queues(struct net_device *dev);
static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
@@ -2757,6 +2840,9 @@ static inline int netif_set_xps_queue(struct net_device *dev,
}
#endif
+u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
+ unsigned int num_tx_queues);
+
/*
* Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
* as a distribution range limit for the returned value.
@@ -2854,7 +2940,11 @@ static inline void dev_consume_skb_any(struct sk_buff *skb)
int netif_rx(struct sk_buff *skb);
int netif_rx_ni(struct sk_buff *skb);
-int netif_receive_skb(struct sk_buff *skb);
+int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb);
+static inline int netif_receive_skb(struct sk_buff *skb)
+{
+ return netif_receive_skb_sk(skb->sk, skb);
+}
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
void napi_gro_flush(struct napi_struct *napi, bool flush_old);
struct sk_buff *napi_get_frags(struct napi_struct *napi);
@@ -2890,6 +2980,8 @@ int dev_set_mac_address(struct net_device *, struct sockaddr *);
int dev_change_carrier(struct net_device *, bool new_carrier);
int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid);
+int dev_get_phys_port_name(struct net_device *dev,
+ char *name, size_t len);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
@@ -3464,6 +3556,19 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
netdev_features_t features);
+struct netdev_bonding_info {
+ ifslave slave;
+ ifbond master;
+};
+
+struct netdev_notifier_bonding_info {
+ struct netdev_notifier_info info; /* must be first */
+ struct netdev_bonding_info bonding_info;
+};
+
+void netdev_bonding_info_change(struct net_device *dev,
+ struct netdev_bonding_info *bonding_info);
+
static inline
struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
{
@@ -3581,6 +3686,9 @@ void netdev_change_features(struct net_device *dev);
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev);
+netdev_features_t passthru_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features);
netdev_features_t netif_skb_features(struct sk_buff *skb);
static inline bool net_gso_ok(netdev_features_t features, int gso_type)
@@ -3611,7 +3719,7 @@ static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
(!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
}
-static inline bool netif_needs_gso(struct net_device *dev, struct sk_buff *skb,
+static inline bool netif_needs_gso(struct sk_buff *skb,
netdev_features_t features)
{
return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 2517ece..00050df 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -10,7 +10,8 @@
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/static_key.h>
-#include <uapi/linux/netfilter.h>
+#include <linux/netfilter_defs.h>
+
#ifdef CONFIG_NETFILTER
static inline int NF_DROP_GETERR(int verdict)
{
@@ -38,29 +39,58 @@ static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
int netfilter_init(void);
-/* Largest hook number + 1 */
-#define NF_MAX_HOOKS 8
-
struct sk_buff;
struct nf_hook_ops;
+
+struct sock;
+
+struct nf_hook_state {
+ unsigned int hook;
+ int thresh;
+ u_int8_t pf;
+ struct net_device *in;
+ struct net_device *out;
+ struct sock *sk;
+ struct list_head *hook_list;
+ int (*okfn)(struct sock *, struct sk_buff *);
+};
+
+static inline void nf_hook_state_init(struct nf_hook_state *p,
+ struct list_head *hook_list,
+ unsigned int hook,
+ int thresh, u_int8_t pf,
+ struct net_device *indev,
+ struct net_device *outdev,
+ struct sock *sk,
+ int (*okfn)(struct sock *, struct sk_buff *))
+{
+ p->hook = hook;
+ p->thresh = thresh;
+ p->pf = pf;
+ p->in = indev;
+ p->out = outdev;
+ p->sk = sk;
+ p->hook_list = hook_list;
+ p->okfn = okfn;
+}
+
typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *));
+ const struct nf_hook_state *state);
struct nf_hook_ops {
- struct list_head list;
+ struct list_head list;
/* User fills in from here down. */
- nf_hookfn *hook;
- struct module *owner;
- void *priv;
- u_int8_t pf;
- unsigned int hooknum;
+ nf_hookfn *hook;
+ struct net_device *dev;
+ struct module *owner;
+ void *priv;
+ u_int8_t pf;
+ unsigned int hooknum;
/* Hooks are ordered in ascending priority. */
- int priority;
+ int priority;
};
struct nf_sockopt_ops {
@@ -103,48 +133,61 @@ extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
#ifdef HAVE_JUMP_LABEL
extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
-static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+ u_int8_t pf, unsigned int hook)
{
if (__builtin_constant_p(pf) &&
__builtin_constant_p(hook))
return static_key_false(&nf_hooks_needed[pf][hook]);
- return !list_empty(&nf_hooks[pf][hook]);
+ return !list_empty(nf_hook_list);
}
#else
-static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+ u_int8_t pf, unsigned int hook)
{
- return !list_empty(&nf_hooks[pf][hook]);
+ return !list_empty(nf_hook_list);
}
#endif
-int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
- struct net_device *indev, struct net_device *outdev,
- int (*okfn)(struct sk_buff *), int thresh);
+static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+{
+ return nf_hook_list_active(&nf_hooks[pf][hook], pf, hook);
+}
+
+int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
/**
* nf_hook_thresh - call a netfilter hook
- *
+ *
* Returns 1 if the hook has allowed the packet to pass. The function
* okfn must be invoked by the caller in this case. Any other return
* value indicates the packet has been consumed by the hook.
*/
static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
+ struct sock *sk,
struct sk_buff *skb,
struct net_device *indev,
struct net_device *outdev,
- int (*okfn)(struct sk_buff *), int thresh)
+ int (*okfn)(struct sock *, struct sk_buff *),
+ int thresh)
{
- if (nf_hooks_active(pf, hook))
- return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh);
+ if (nf_hooks_active(pf, hook)) {
+ struct nf_hook_state state;
+
+ nf_hook_state_init(&state, &nf_hooks[pf][hook], hook, thresh,
+ pf, indev, outdev, sk, okfn);
+ return nf_hook_slow(skb, &state);
+ }
return 1;
}
-static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
- struct net_device *indev, struct net_device *outdev,
- int (*okfn)(struct sk_buff *))
+static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk,
+ struct sk_buff *skb, struct net_device *indev,
+ struct net_device *outdev,
+ int (*okfn)(struct sock *, struct sk_buff *))
{
- return nf_hook_thresh(pf, hook, skb, indev, outdev, okfn, INT_MIN);
+ return nf_hook_thresh(pf, hook, sk, skb, indev, outdev, okfn, INT_MIN);
}
/* Activate hook; either okfn or kfree_skb called, unless a hook
@@ -165,35 +208,36 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
*/
static inline int
-NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sk_buff *skb,
- struct net_device *in, struct net_device *out,
- int (*okfn)(struct sk_buff *), int thresh)
+NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sock *sk,
+ struct sk_buff *skb, struct net_device *in,
+ struct net_device *out,
+ int (*okfn)(struct sock *, struct sk_buff *), int thresh)
{
- int ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, thresh);
+ int ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, thresh);
if (ret == 1)
- ret = okfn(skb);
+ ret = okfn(sk, skb);
return ret;
}
static inline int
-NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sk_buff *skb,
- struct net_device *in, struct net_device *out,
- int (*okfn)(struct sk_buff *), bool cond)
+NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sock *sk,
+ struct sk_buff *skb, struct net_device *in, struct net_device *out,
+ int (*okfn)(struct sock *, struct sk_buff *), bool cond)
{
int ret;
if (!cond ||
- ((ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN)) == 1))
- ret = okfn(skb);
+ ((ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, INT_MIN)) == 1))
+ ret = okfn(sk, skb);
return ret;
}
static inline int
-NF_HOOK(uint8_t pf, unsigned int hook, struct sk_buff *skb,
+NF_HOOK(uint8_t pf, unsigned int hook, struct sock *sk, struct sk_buff *skb,
struct net_device *in, struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ int (*okfn)(struct sock *, struct sk_buff *))
{
- return NF_HOOK_THRESH(pf, hook, skb, in, out, okfn, INT_MIN);
+ return NF_HOOK_THRESH(pf, hook, sk, skb, in, out, okfn, INT_MIN);
}
/* Call setsockopt() */
@@ -293,19 +337,21 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
}
#else /* !CONFIG_NETFILTER */
-#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb)
-#define NF_HOOK_COND(pf, hook, skb, indev, outdev, okfn, cond) (okfn)(skb)
+#define NF_HOOK(pf, hook, sk, skb, indev, outdev, okfn) (okfn)(sk, skb)
+#define NF_HOOK_COND(pf, hook, sk, skb, indev, outdev, okfn, cond) (okfn)(sk, skb)
static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
+ struct sock *sk,
struct sk_buff *skb,
struct net_device *indev,
struct net_device *outdev,
- int (*okfn)(struct sk_buff *), int thresh)
+ int (*okfn)(struct sock *sk, struct sk_buff *), int thresh)
{
- return okfn(skb);
+ return okfn(sk, skb);
}
-static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
- struct net_device *indev, struct net_device *outdev,
- int (*okfn)(struct sk_buff *))
+static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk,
+ struct sk_buff *skb, struct net_device *indev,
+ struct net_device *outdev,
+ int (*okfn)(struct sock *, struct sk_buff *))
{
return 1;
}
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index f1606fa..48bb01e 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -108,8 +108,13 @@ struct ip_set_counter {
atomic64_t packets;
};
+struct ip_set_comment_rcu {
+ struct rcu_head rcu;
+ char str[0];
+};
+
struct ip_set_comment {
- char *str;
+ struct ip_set_comment_rcu __rcu *c;
};
struct ip_set_skbinfo {
@@ -122,13 +127,13 @@ struct ip_set_skbinfo {
struct ip_set;
#define ext_timeout(e, s) \
-(unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT])
+((unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT]))
#define ext_counter(e, s) \
-(struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER])
+((struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER]))
#define ext_comment(e, s) \
-(struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT])
+((struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT]))
#define ext_skbinfo(e, s) \
-(struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO])
+((struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO]))
typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
const struct ip_set_ext *ext,
@@ -176,6 +181,9 @@ struct ip_set_type_variant {
/* List elements */
int (*list)(const struct ip_set *set, struct sk_buff *skb,
struct netlink_callback *cb);
+ /* Keep listing private when resizing runs parallel */
+ void (*uref)(struct ip_set *set, struct netlink_callback *cb,
+ bool start);
/* Return true if "b" set is the same as "a"
* according to the create set parameters */
@@ -223,7 +231,7 @@ struct ip_set {
/* The name of the set */
char name[IPSET_MAXNAMELEN];
/* Lock protecting the set data */
- rwlock_t lock;
+ spinlock_t lock;
/* References to the set */
u32 ref;
/* The core set type */
@@ -341,12 +349,11 @@ ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo)
cpu_to_be64((u64)skbinfo->skbmark << 32 |
skbinfo->skbmarkmask))) ||
(skbinfo->skbprio &&
- nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
+ nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
cpu_to_be32(skbinfo->skbprio))) ||
(skbinfo->skbqueue &&
- nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
+ nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
cpu_to_be16(skbinfo->skbqueue)));
-
}
static inline void
@@ -380,12 +387,12 @@ ip_set_init_counter(struct ip_set_counter *counter,
/* Netlink CB args */
enum {
- IPSET_CB_NET = 0,
- IPSET_CB_DUMP,
- IPSET_CB_INDEX,
- IPSET_CB_ARG0,
+ IPSET_CB_NET = 0, /* net namespace */
+ IPSET_CB_DUMP, /* dump single set/all sets */
+ IPSET_CB_INDEX, /* set index */
+ IPSET_CB_PRIVATE, /* set private data */
+ IPSET_CB_ARG0, /* type specific */
IPSET_CB_ARG1,
- IPSET_CB_ARG2,
};
/* register and unregister set references */
@@ -483,7 +490,7 @@ static inline int nla_put_ipaddr4(struct sk_buff *skb, int type, __be32 ipaddr)
if (!__nested)
return -EMSGSIZE;
- ret = nla_put_net32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);
+ ret = nla_put_in_addr(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);
if (!ret)
ipset_nest_end(skb, __nested);
return ret;
@@ -497,8 +504,7 @@ static inline int nla_put_ipaddr6(struct sk_buff *skb, int type,
if (!__nested)
return -EMSGSIZE;
- ret = nla_put(skb, IPSET_ATTR_IPADDR_IPV6,
- sizeof(struct in6_addr), ipaddrptr);
+ ret = nla_put_in6_addr(skb, IPSET_ATTR_IPADDR_IPV6, ipaddrptr);
if (!ret)
ipset_nest_end(skb, __nested);
return ret;
@@ -534,29 +540,9 @@ bitmap_bytes(u32 a, u32 b)
#include <linux/netfilter/ipset/ip_set_timeout.h>
#include <linux/netfilter/ipset/ip_set_comment.h>
-static inline int
+int
ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
- const void *e, bool active)
-{
- if (SET_WITH_TIMEOUT(set)) {
- unsigned long *timeout = ext_timeout(e, set);
-
- if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
- htonl(active ? ip_set_timeout_get(timeout)
- : *timeout)))
- return -EMSGSIZE;
- }
- if (SET_WITH_COUNTER(set) &&
- ip_set_put_counter(skb, ext_counter(e, set)))
- return -EMSGSIZE;
- if (SET_WITH_COMMENT(set) &&
- ip_set_put_comment(skb, ext_comment(e, set)))
- return -EMSGSIZE;
- if (SET_WITH_SKBINFO(set) &&
- ip_set_put_skbinfo(skb, ext_skbinfo(e, set)))
- return -EMSGSIZE;
- return 0;
-}
+ const void *e, bool active);
#define IP_SET_INIT_KEXT(skb, opt, set) \
{ .bytes = (skb)->len, .packets = 1, \
@@ -566,8 +552,6 @@ ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
{ .bytes = ULLONG_MAX, .packets = ULLONG_MAX, \
.timeout = (set)->timeout }
-#define IP_SET_INIT_CIDR(a, b) ((a) ? (a) : (b))
-
#define IPSET_CONCAT(a, b) a##b
#define IPSET_TOKEN(a, b) IPSET_CONCAT(a, b)
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
index 21217ea..8d02485 100644
--- a/include/linux/netfilter/ipset/ip_set_comment.h
+++ b/include/linux/netfilter/ipset/ip_set_comment.h
@@ -16,41 +16,57 @@ ip_set_comment_uget(struct nlattr *tb)
return nla_data(tb);
}
+/* Called from uadd only, protected by the set spinlock.
+ * The kadt functions don't use the comment extensions in any way.
+ */
static inline void
ip_set_init_comment(struct ip_set_comment *comment,
const struct ip_set_ext *ext)
{
+ struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1);
size_t len = ext->comment ? strlen(ext->comment) : 0;
- if (unlikely(comment->str)) {
- kfree(comment->str);
- comment->str = NULL;
+ if (unlikely(c)) {
+ kfree_rcu(c, rcu);
+ rcu_assign_pointer(comment->c, NULL);
}
if (!len)
return;
if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
len = IPSET_MAX_COMMENT_SIZE;
- comment->str = kzalloc(len + 1, GFP_ATOMIC);
- if (unlikely(!comment->str))
+ c = kzalloc(sizeof(*c) + len + 1, GFP_ATOMIC);
+ if (unlikely(!c))
return;
- strlcpy(comment->str, ext->comment, len + 1);
+ strlcpy(c->str, ext->comment, len + 1);
+ rcu_assign_pointer(comment->c, c);
}
+/* Used only when dumping a set, protected by rcu_read_lock_bh() */
static inline int
ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment)
{
- if (!comment->str)
+ struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
+
+ if (!c)
return 0;
- return nla_put_string(skb, IPSET_ATTR_COMMENT, comment->str);
+ return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str);
}
+/* Called from uadd/udel, flush or the garbage collectors protected
+ * by the set spinlock.
+ * Called when the set is destroyed and when there can't be any user
+ * of the set data anymore.
+ */
static inline void
ip_set_comment_free(struct ip_set_comment *comment)
{
- if (unlikely(!comment->str))
+ struct ip_set_comment_rcu *c;
+
+ c = rcu_dereference_protected(comment->c, 1);
+ if (unlikely(!c))
return;
- kfree(comment->str);
- comment->str = NULL;
+ kfree_rcu(c, rcu);
+ rcu_assign_pointer(comment->c, NULL);
}
#endif
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
index 83c2f9e..1d6a935 100644
--- a/include/linux/netfilter/ipset/ip_set_timeout.h
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -40,38 +40,33 @@ ip_set_timeout_uget(struct nlattr *tb)
}
static inline bool
-ip_set_timeout_test(unsigned long timeout)
+ip_set_timeout_expired(unsigned long *t)
{
- return timeout == IPSET_ELEM_PERMANENT ||
- time_is_after_jiffies(timeout);
-}
-
-static inline bool
-ip_set_timeout_expired(unsigned long *timeout)
-{
- return *timeout != IPSET_ELEM_PERMANENT &&
- time_is_before_jiffies(*timeout);
+ return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t);
}
static inline void
-ip_set_timeout_set(unsigned long *timeout, u32 t)
+ip_set_timeout_set(unsigned long *timeout, u32 value)
{
- if (!t) {
+ unsigned long t;
+
+ if (!value) {
*timeout = IPSET_ELEM_PERMANENT;
return;
}
- *timeout = msecs_to_jiffies(t * 1000) + jiffies;
- if (*timeout == IPSET_ELEM_PERMANENT)
+ t = msecs_to_jiffies(value * MSEC_PER_SEC) + jiffies;
+ if (t == IPSET_ELEM_PERMANENT)
/* Bingo! :-) */
- (*timeout)--;
+ t--;
+ *timeout = t;
}
static inline u32
ip_set_timeout_get(unsigned long *timeout)
{
return *timeout == IPSET_ELEM_PERMANENT ? 0 :
- jiffies_to_msecs(*timeout - jiffies)/1000;
+ jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
}
#endif /* __KERNEL__ */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index a3e215b..286098a 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -62,6 +62,7 @@ struct xt_mtchk_param {
void *matchinfo;
unsigned int hook_mask;
u_int8_t family;
+ bool nft_compat;
};
/**
@@ -92,6 +93,7 @@ struct xt_tgchk_param {
void *targinfo;
unsigned int hook_mask;
u_int8_t family;
+ bool nft_compat;
};
/* Target destructor parameters */
@@ -222,13 +224,10 @@ struct xt_table_info {
unsigned int stacksize;
unsigned int __percpu *stackptr;
void ***jumpstack;
- /* ipt_entry tables: one per CPU */
- /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
- void *entries[1];
+
+ unsigned char entries[0] __aligned(8);
};
-#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
- + nr_cpu_ids * sizeof(char *))
int xt_register_target(struct xt_target *target);
void xt_unregister_target(struct xt_target *target);
int xt_register_targets(struct xt_target *target, unsigned int n);
@@ -351,6 +350,57 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
return ret;
}
+
+/* On SMP, ip(6)t_entry->counters.pcnt holds address of the
+ * real (percpu) counter. On !SMP, its just the packet count,
+ * so nothing needs to be done there.
+ *
+ * xt_percpu_counter_alloc returns the address of the percpu
+ * counter, or 0 on !SMP. We force an alignment of 16 bytes
+ * so that bytes/packets share a common cache line.
+ *
+ * Hence caller must use IS_ERR_VALUE to check for error, this
+ * allows us to return 0 for single core systems without forcing
+ * callers to deal with SMP vs. NONSMP issues.
+ */
+static inline u64 xt_percpu_counter_alloc(void)
+{
+ if (nr_cpu_ids > 1) {
+ void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
+ sizeof(struct xt_counters));
+
+ if (res == NULL)
+ return (u64) -ENOMEM;
+
+ return (u64) (__force unsigned long) res;
+ }
+
+ return 0;
+}
+static inline void xt_percpu_counter_free(u64 pcnt)
+{
+ if (nr_cpu_ids > 1)
+ free_percpu((void __percpu *) (unsigned long) pcnt);
+}
+
+static inline struct xt_counters *
+xt_get_this_cpu_counter(struct xt_counters *cnt)
+{
+ if (nr_cpu_ids > 1)
+ return this_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt);
+
+ return cnt;
+}
+
+static inline struct xt_counters *
+xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
+{
+ if (nr_cpu_ids > 1)
+ return per_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt, cpu);
+
+ return cnt;
+}
+
struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
index cfb7191..c22a7fb 100644
--- a/include/linux/netfilter_arp/arp_tables.h
+++ b/include/linux/netfilter_arp/arp_tables.h
@@ -54,8 +54,7 @@ extern struct xt_table *arpt_register_table(struct net *net,
extern void arpt_unregister_table(struct xt_table *table);
extern unsigned int arpt_do_table(struct sk_buff *skb,
unsigned int hook,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct xt_table *table);
#ifdef CONFIG_COMPAT
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index c755e49..6d80fc6 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -2,7 +2,7 @@
#define __LINUX_BRIDGE_NETFILTER_H
#include <uapi/linux/netfilter_bridge.h>
-
+#include <linux/skbuff.h>
enum nf_br_hook_priorities {
NF_BR_PRI_FIRST = INT_MIN,
@@ -17,110 +17,53 @@ enum nf_br_hook_priorities {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-#define BRNF_PKT_TYPE 0x01
#define BRNF_BRIDGED_DNAT 0x02
-#define BRNF_BRIDGED 0x04
#define BRNF_NF_BRIDGE_PREROUTING 0x08
-#define BRNF_8021Q 0x10
-#define BRNF_PPPoE 0x20
-static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
-{
- switch (skb->protocol) {
- case __cpu_to_be16(ETH_P_8021Q):
- return VLAN_HLEN;
- case __cpu_to_be16(ETH_P_PPP_SES):
- return PPPOE_SES_HLEN;
- default:
- return 0;
- }
-}
+int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
-static inline void nf_bridge_update_protocol(struct sk_buff *skb)
+static inline void br_drop_fake_rtable(struct sk_buff *skb)
{
- if (skb->nf_bridge->mask & BRNF_8021Q)
- skb->protocol = htons(ETH_P_8021Q);
- else if (skb->nf_bridge->mask & BRNF_PPPoE)
- skb->protocol = htons(ETH_P_PPP_SES);
-}
+ struct dst_entry *dst = skb_dst(skb);
-/* Fill in the header for fragmented IP packets handled by
- * the IPv4 connection tracking code.
- *
- * Only used in br_forward.c
- */
-static inline int nf_bridge_copy_header(struct sk_buff *skb)
-{
- int err;
- unsigned int header_size;
-
- nf_bridge_update_protocol(skb);
- header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
- err = skb_cow_head(skb, header_size);
- if (err)
- return err;
-
- skb_copy_to_linear_data_offset(skb, -header_size,
- skb->nf_bridge->data, header_size);
- __skb_push(skb, nf_bridge_encap_header_len(skb));
- return 0;
+ if (dst && (dst->flags & DST_FAKE_RTABLE))
+ skb_dst_drop(skb);
}
-static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
+static inline int nf_bridge_get_physinif(const struct sk_buff *skb)
{
- if (skb->nf_bridge &&
- skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT))
- return nf_bridge_copy_header(skb);
- return 0;
-}
+ struct nf_bridge_info *nf_bridge;
-static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
-{
- if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE))
- return PPPOE_SES_HLEN;
- return 0;
+ if (skb->nf_bridge == NULL)
+ return 0;
+
+ nf_bridge = skb->nf_bridge;
+ return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0;
}
-int br_handle_frame_finish(struct sk_buff *skb);
-/* Only used in br_device.c */
-static inline int br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
+static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
{
- struct nf_bridge_info *nf_bridge = skb->nf_bridge;
-
- skb_pull(skb, ETH_HLEN);
- nf_bridge->mask ^= BRNF_BRIDGED_DNAT;
- skb_copy_to_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN),
- skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
- skb->dev = nf_bridge->physindev;
- return br_handle_frame_finish(skb);
+ struct nf_bridge_info *nf_bridge;
+
+ if (skb->nf_bridge == NULL)
+ return 0;
+
+ nf_bridge = skb->nf_bridge;
+ return nf_bridge->physoutdev ? nf_bridge->physoutdev->ifindex : 0;
}
-/* This is called by the IP fragmenting code and it ensures there is
- * enough room for the encapsulating header (if there is one). */
-static inline unsigned int nf_bridge_pad(const struct sk_buff *skb)
+static inline struct net_device *
+nf_bridge_get_physindev(const struct sk_buff *skb)
{
- if (skb->nf_bridge)
- return nf_bridge_encap_header_len(skb);
- return 0;
+ return skb->nf_bridge ? skb->nf_bridge->physindev : NULL;
}
-struct bridge_skb_cb {
- union {
- __be32 ipv4;
- } daddr;
-};
-
-static inline void br_drop_fake_rtable(struct sk_buff *skb)
+static inline struct net_device *
+nf_bridge_get_physoutdev(const struct sk_buff *skb)
{
- struct dst_entry *dst = skb_dst(skb);
-
- if (dst && (dst->flags & DST_FAKE_RTABLE))
- skb_dst_drop(skb);
+ return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL;
}
-
#else
-#define nf_bridge_maybe_copy_header(skb) (0)
-#define nf_bridge_pad(skb) (0)
#define br_drop_fake_rtable(skb) do { } while (0)
#endif /* CONFIG_BRIDGE_NETFILTER */
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 34e7a2b..8ca6d64 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -6,15 +6,16 @@
*
* ebtables.c,v 2.0, April, 2002
*
- * This code is stongly inspired on the iptables code which is
+ * This code is strongly inspired by the iptables code which is
* Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
*/
#ifndef __LINUX_BRIDGE_EFF_H
#define __LINUX_BRIDGE_EFF_H
+#include <linux/if.h>
+#include <linux/if_ether.h>
#include <uapi/linux/netfilter_bridge/ebtables.h>
-
/* return values for match() functions */
#define EBT_MATCH 0
#define EBT_NOMATCH 1
diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h
new file mode 100644
index 0000000..d3a7f85
--- /dev/null
+++ b/include/linux/netfilter_defs.h
@@ -0,0 +1,9 @@
+#ifndef __LINUX_NETFILTER_CORE_H_
+#define __LINUX_NETFILTER_CORE_H_
+
+#include <uapi/linux/netfilter.h>
+
+/* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */
+#define NF_MAX_HOOKS 8
+
+#endif
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
new file mode 100644
index 0000000..cb0727f
--- /dev/null
+++ b/include/linux/netfilter_ingress.h
@@ -0,0 +1,41 @@
+#ifndef _NETFILTER_INGRESS_H_
+#define _NETFILTER_INGRESS_H_
+
+#include <linux/netfilter.h>
+#include <linux/netdevice.h>
+
+#ifdef CONFIG_NETFILTER_INGRESS
+static inline int nf_hook_ingress_active(struct sk_buff *skb)
+{
+ return nf_hook_list_active(&skb->dev->nf_hooks_ingress,
+ NFPROTO_NETDEV, NF_NETDEV_INGRESS);
+}
+
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+ struct nf_hook_state state;
+
+ nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress,
+ NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, NULL,
+ skb->dev, NULL, NULL);
+ return nf_hook_slow(skb, &state);
+}
+
+static inline void nf_hook_ingress_init(struct net_device *dev)
+{
+ INIT_LIST_HEAD(&dev->nf_hooks_ingress);
+}
+#else /* CONFIG_NETFILTER_INGRESS */
+static inline int nf_hook_ingress_active(struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline void nf_hook_ingress_init(struct net_device *dev) {}
+#endif /* CONFIG_NETFILTER_INGRESS */
+#endif /* _NETFILTER_INGRESS_H_ */
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index 901e84d..4073510 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -65,8 +65,7 @@ struct ipt_error {
extern void *ipt_alloc_initial_table(const struct xt_table *);
extern unsigned int ipt_do_table(struct sk_buff *skb,
unsigned int hook,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct xt_table *table);
#ifdef CONFIG_COMPAT
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 64dad1cc..8b7d28f 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -25,6 +25,9 @@ void ipv6_netfilter_fini(void);
struct nf_ipv6_ops {
int (*chk_addr)(struct net *net, const struct in6_addr *addr,
const struct net_device *dev, int strict);
+ void (*route_input)(struct sk_buff *skb);
+ int (*fragment)(struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct sock *, struct sk_buff *));
};
extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index 610208b1..b40d2b6 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -31,8 +31,7 @@ extern struct xt_table *ip6t_register_table(struct net *net,
extern void ip6t_unregister_table(struct net *net, struct xt_table *table);
extern unsigned int ip6t_do_table(struct sk_buff *skb,
unsigned int hook,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct xt_table *table);
/* Check for an extension */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 02fc86d..9120edb 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -28,6 +28,8 @@ struct netlink_skb_parms {
__u32 dst_group;
__u32 flags;
struct sock *sk;
+ bool nsid_is_set;
+ int nsid;
};
#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb))
@@ -134,7 +136,7 @@ struct netlink_callback {
struct netlink_notify {
struct net *net;
- int portid;
+ u32 portid;
int protocol;
};
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 022b761..b8e72aa 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -16,6 +16,13 @@
#include <linux/uidgid.h>
#include <uapi/linux/nfs4.h>
+enum nfs4_acl_whotype {
+ NFS4_ACL_WHO_NAMED = 0,
+ NFS4_ACL_WHO_OWNER,
+ NFS4_ACL_WHO_GROUP,
+ NFS4_ACL_WHO_EVERYONE,
+};
+
struct nfs4_ace {
uint32_t type;
uint32_t flag;
@@ -411,6 +418,7 @@ enum lock_type4 {
#define FATTR4_WORD1_TIME_MODIFY_SET (1UL << 22)
#define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23)
#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30)
+#define FATTR4_WORD2_LAYOUT_TYPES (1UL << 0)
#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4)
#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16)
@@ -492,6 +500,7 @@ enum {
NFSPROC4_CLNT_SEEK,
NFSPROC4_CLNT_ALLOCATE,
NFSPROC4_CLNT_DEALLOCATE,
+ NFSPROC4_CLNT_LAYOUTSTATS,
};
/* nfs41 types */
@@ -516,6 +525,8 @@ enum pnfs_layouttype {
LAYOUT_NFSV4_1_FILES = 1,
LAYOUT_OSD2_OBJECTS = 2,
LAYOUT_BLOCK_VOLUME = 3,
+ LAYOUT_FLEX_FILES = 4,
+ LAYOUT_TYPE_MAX
};
/* used for both layout return and recall */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 6d627b9..f91b5ad 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -180,7 +180,6 @@ struct nfs_inode {
/* NFSv4 state */
struct list_head open_states;
struct nfs_delegation __rcu *delegation;
- fmode_t delegation_state;
struct rw_semaphore rwsem;
/* pNFS layout information */
@@ -220,6 +219,7 @@ struct nfs_inode {
#define NFS_INO_COMMIT (7) /* inode is committing unstable writes */
#define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */
#define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */
+#define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */
static inline struct nfs_inode *NFS_I(const struct inode *inode)
{
@@ -344,6 +344,7 @@ extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *,
extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr);
+extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr);
extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *);
extern void nfs_access_set_mask(struct nfs_access_entry *, u32);
@@ -356,8 +357,9 @@ extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode);
extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
+extern int nfs_revalidate_mapping_protected(struct inode *inode, struct address_space *mapping);
extern int nfs_setattr(struct dentry *, struct iattr *);
-extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr);
+extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *);
extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
struct nfs4_label *label);
extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
@@ -370,6 +372,7 @@ extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ct
extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx);
extern u64 nfs_compat_user_ino64(u64 fileid);
extern void nfs_fattr_init(struct nfs_fattr *fattr);
+extern void nfs_fattr_set_barrier(struct nfs_fattr *fattr);
extern unsigned long nfs_inc_attr_generation_counter(void);
extern struct nfs_fattr *nfs_alloc_fattr(void);
@@ -445,13 +448,12 @@ static inline struct rpc_cred *nfs_file_cred(struct file *file)
/*
* linux/fs/nfs/direct.c
*/
-extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t);
+extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *, loff_t);
extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
struct iov_iter *iter,
loff_t pos);
extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
- struct iov_iter *iter,
- loff_t pos);
+ struct iov_iter *iter);
/*
* linux/fs/nfs/dir.c
@@ -510,6 +512,7 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned
* Try to write back everything synchronously (but check the
* return value!)
*/
+extern int nfs_sync_inode(struct inode *inode);
extern int nfs_wb_all(struct inode *inode);
extern int nfs_wb_page(struct inode *inode, struct page* page);
extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index ddea982..a2ea149 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -77,10 +77,6 @@ struct nfs_client {
/* Client owner identifier */
const char * cl_owner_id;
- /* Our own IP address, as a null-terminated string.
- * This is used to generate the mv0 callback address.
- */
- char cl_ipaddr[48];
u32 cl_cb_ident; /* v4.0 callback identifier */
const struct nfs4_minor_version_ops *cl_mvops;
unsigned long cl_mig_gen;
@@ -108,6 +104,11 @@ struct nfs_client {
#define NFS_SP4_MACH_CRED_COMMIT 6 /* COMMIT */
#endif /* CONFIG_NFS_V4 */
+ /* Our own IP address, as a null-terminated string.
+ * This is used to generate the mv0 callback address.
+ */
+ char cl_ipaddr[48];
+
#ifdef CONFIG_NFS_FSCACHE
struct fscache_cookie *fscache; /* client index cache cookie */
#endif
@@ -236,5 +237,6 @@ struct nfs_server {
#define NFS_CAP_SEEK (1U << 19)
#define NFS_CAP_ALLOCATE (1U << 20)
#define NFS_CAP_DEALLOCATE (1U << 21)
+#define NFS_CAP_LAYOUTSTATS (1U << 22)
#endif
diff --git a/include/linux/nfs_idmap.h b/include/linux/nfs_idmap.h
deleted file mode 100644
index 0f4b79d..0000000
--- a/include/linux/nfs_idmap.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * include/linux/nfs_idmap.h
- *
- * UID and GID to name mapping for clients.
- *
- * Copyright (c) 2002 The Regents of the University of Michigan.
- * All rights reserved.
- *
- * Marius Aamodt Eriksen <marius@umich.edu>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef NFS_IDMAP_H
-#define NFS_IDMAP_H
-
-#include <linux/uidgid.h>
-#include <uapi/linux/nfs_idmap.h>
-
-
-/* Forward declaration to make this header independent of others */
-struct nfs_client;
-struct nfs_server;
-struct nfs_fattr;
-struct nfs4_string;
-
-#if IS_ENABLED(CONFIG_NFS_V4)
-int nfs_idmap_init(void);
-void nfs_idmap_quit(void);
-#else
-static inline int nfs_idmap_init(void)
-{
- return 0;
-}
-
-static inline void nfs_idmap_quit(void)
-{}
-#endif
-
-int nfs_idmap_new(struct nfs_client *);
-void nfs_idmap_delete(struct nfs_client *);
-
-void nfs_fattr_init_names(struct nfs_fattr *fattr,
- struct nfs4_string *owner_name,
- struct nfs4_string *group_name);
-void nfs_fattr_free_names(struct nfs_fattr *);
-void nfs_fattr_map_and_free_names(struct nfs_server *, struct nfs_fattr *);
-
-int nfs_map_name_to_uid(const struct nfs_server *, const char *, size_t, kuid_t *);
-int nfs_map_group_to_gid(const struct nfs_server *, const char *, size_t, kgid_t *);
-int nfs_map_uid_to_name(const struct nfs_server *, kuid_t, char *, size_t);
-int nfs_map_gid_to_group(const struct nfs_server *, kgid_t, char *, size_t);
-
-extern unsigned int nfs_idmap_cache_timeout;
-#endif /* NFS_IDMAP_H */
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 6c3e06e..f2f650f 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -58,29 +58,34 @@ struct nfs_pageio_ops {
size_t (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *,
struct nfs_page *);
int (*pg_doio)(struct nfs_pageio_descriptor *);
+ unsigned int (*pg_get_mirror_count)(struct nfs_pageio_descriptor *,
+ struct nfs_page *);
+ void (*pg_cleanup)(struct nfs_pageio_descriptor *);
};
struct nfs_rw_ops {
const fmode_t rw_mode;
struct nfs_pgio_header *(*rw_alloc_header)(void);
void (*rw_free_header)(struct nfs_pgio_header *);
- void (*rw_release)(struct nfs_pgio_header *);
int (*rw_done)(struct rpc_task *, struct nfs_pgio_header *,
struct inode *);
void (*rw_result)(struct rpc_task *, struct nfs_pgio_header *);
void (*rw_initiate)(struct nfs_pgio_header *, struct rpc_message *,
+ const struct nfs_rpc_ops *,
struct rpc_task_setup *, int);
};
-struct nfs_pageio_descriptor {
+struct nfs_pgio_mirror {
struct list_head pg_list;
unsigned long pg_bytes_written;
size_t pg_count;
size_t pg_bsize;
unsigned int pg_base;
- unsigned char pg_moreio : 1,
- pg_recoalesce : 1;
+ unsigned char pg_recoalesce : 1;
+};
+struct nfs_pageio_descriptor {
+ unsigned char pg_moreio : 1;
struct inode *pg_inode;
const struct nfs_pageio_ops *pg_ops;
const struct nfs_rw_ops *pg_rw_ops;
@@ -91,8 +96,18 @@ struct nfs_pageio_descriptor {
struct pnfs_layout_segment *pg_lseg;
struct nfs_direct_req *pg_dreq;
void *pg_layout_private;
+ unsigned int pg_bsize; /* default bsize for mirrors */
+
+ u32 pg_mirror_count;
+ struct nfs_pgio_mirror *pg_mirrors;
+ struct nfs_pgio_mirror pg_mirrors_static[1];
+ struct nfs_pgio_mirror *pg_mirrors_dynamic;
+ u32 pg_mirror_idx; /* current mirror */
};
+/* arbitrarily selected limit to number of mirrors */
+#define NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX 16
+
#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx,
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 467c84e..7bbe505 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -255,11 +255,13 @@ struct nfs4_layoutget {
struct nfs4_getdeviceinfo_args {
struct nfs4_sequence_args seq_args;
struct pnfs_device *pdev;
+ __u32 notify_types;
};
struct nfs4_getdeviceinfo_res {
struct nfs4_sequence_res seq_res;
struct pnfs_device *pdev;
+ __u32 notification;
};
struct nfs4_layoutcommit_args {
@@ -285,6 +287,7 @@ struct nfs4_layoutcommit_data {
struct nfs_fattr fattr;
struct list_head lseg_list;
struct rpc_cred *cred;
+ struct inode *inode;
struct nfs4_layoutcommit_args args;
struct nfs4_layoutcommit_res res;
};
@@ -293,6 +296,7 @@ struct nfs4_layoutreturn_args {
struct nfs4_sequence_args seq_args;
struct pnfs_layout_hdr *layout;
struct inode *inode;
+ struct pnfs_layout_range range;
nfs4_stateid stateid;
__u32 layout_type;
};
@@ -308,9 +312,53 @@ struct nfs4_layoutreturn {
struct nfs4_layoutreturn_res res;
struct rpc_cred *cred;
struct nfs_client *clp;
+ struct inode *inode;
+ int rpc_status;
+};
+
+#define PNFS_LAYOUTSTATS_MAXSIZE 256
+
+struct nfs42_layoutstat_args;
+struct nfs42_layoutstat_devinfo;
+typedef void (*layoutstats_encode_t)(struct xdr_stream *,
+ struct nfs42_layoutstat_args *,
+ struct nfs42_layoutstat_devinfo *);
+
+/* Per file per deviceid layoutstats */
+struct nfs42_layoutstat_devinfo {
+ struct nfs4_deviceid dev_id;
+ __u64 offset;
+ __u64 length;
+ __u64 read_count;
+ __u64 read_bytes;
+ __u64 write_count;
+ __u64 write_bytes;
+ __u32 layout_type;
+ layoutstats_encode_t layoutstats_encode;
+ void *layout_private;
+};
+
+struct nfs42_layoutstat_args {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh *fh;
+ struct inode *inode;
+ nfs4_stateid stateid;
+ int num_dev;
+ struct nfs42_layoutstat_devinfo *devinfo;
+};
+
+struct nfs42_layoutstat_res {
+ struct nfs4_sequence_res seq_res;
+ int num_dev;
int rpc_status;
};
+struct nfs42_layoutstat_data {
+ struct inode *inode;
+ struct nfs42_layoutstat_args args;
+ struct nfs42_layoutstat_res res;
+};
+
struct stateowner_id {
__u64 create_time;
__u32 uniquifier;
@@ -325,6 +373,7 @@ struct nfs_openargs {
struct nfs_seqid * seqid;
int open_flags;
fmode_t fmode;
+ u32 share_access;
u32 access;
__u64 clientid;
struct stateowner_id id;
@@ -389,9 +438,10 @@ struct nfs_open_confirmres {
struct nfs_closeargs {
struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
- nfs4_stateid * stateid;
+ nfs4_stateid stateid;
struct nfs_seqid * seqid;
fmode_t fmode;
+ u32 share_access;
const u32 * bitmask;
};
@@ -416,12 +466,13 @@ struct nfs_lock_args {
struct nfs_fh * fh;
struct file_lock * fl;
struct nfs_seqid * lock_seqid;
- nfs4_stateid * lock_stateid;
+ nfs4_stateid lock_stateid;
struct nfs_seqid * open_seqid;
- nfs4_stateid * open_stateid;
+ nfs4_stateid open_stateid;
struct nfs_lowner lock_owner;
unsigned char block : 1;
unsigned char reclaim : 1;
+ unsigned char new_lock : 1;
unsigned char new_lock_owner : 1;
};
@@ -437,7 +488,7 @@ struct nfs_locku_args {
struct nfs_fh * fh;
struct file_lock * fl;
struct nfs_seqid * seqid;
- nfs4_stateid * stateid;
+ nfs4_stateid stateid;
};
struct nfs_locku_res {
@@ -513,6 +564,7 @@ struct nfs_pgio_res {
struct nfs4_sequence_res seq_res;
struct nfs_fattr * fattr;
__u32 count;
+ __u32 op_status;
int eof; /* used by read */
struct nfs_writeverf * verf; /* used by write */
const struct nfs_server *server; /* used by write */
@@ -532,6 +584,7 @@ struct nfs_commitargs {
struct nfs_commitres {
struct nfs4_sequence_res seq_res;
+ __u32 op_status;
struct nfs_fattr *fattr;
struct nfs_writeverf *verf;
const struct nfs_server *server;
@@ -974,17 +1027,14 @@ struct nfs4_readlink_res {
struct nfs4_sequence_res seq_res;
};
-#define NFS4_SETCLIENTID_NAMELEN (127)
struct nfs4_setclientid {
const nfs4_verifier * sc_verifier;
- unsigned int sc_name_len;
- char sc_name[NFS4_SETCLIENTID_NAMELEN + 1];
u32 sc_prog;
unsigned int sc_netid_len;
char sc_netid[RPCBIND_MAXNETIDLEN + 1];
unsigned int sc_uaddr_len;
char sc_uaddr[RPCBIND_MAXUADDRLEN + 1];
- u32 sc_cb_ident;
+ struct nfs_client *sc_clnt;
struct rpc_cred *sc_cred;
};
@@ -1132,12 +1182,9 @@ struct nfs41_state_protection {
struct nfs4_op_map allow;
};
-#define NFS4_EXCHANGE_ID_LEN (48)
struct nfs41_exchange_id_args {
struct nfs_client *client;
nfs4_verifier *verifier;
- unsigned int id_len;
- char id[NFS4_EXCHANGE_ID_LEN];
u32 flags;
struct nfs41_state_protection state_protect;
};
@@ -1159,8 +1206,15 @@ struct nfs41_impl_id {
struct nfstime4 date;
};
+struct nfs41_bind_conn_to_session_args {
+ struct nfs_client *client;
+ struct nfs4_sessionid sessionid;
+ u32 dir;
+ bool use_conn_in_rdma_mode;
+};
+
struct nfs41_bind_conn_to_session_res {
- struct nfs4_session *session;
+ struct nfs4_sessionid sessionid;
u32 dir;
bool use_conn_in_rdma_mode;
};
@@ -1177,6 +1231,8 @@ struct nfs41_exchange_id_res {
struct nfs41_create_session_args {
struct nfs_client *client;
+ u64 clientid;
+ uint32_t seqid;
uint32_t flags;
uint32_t cb_program;
struct nfs4_channel_attrs fc_attrs; /* Fore Channel */
@@ -1184,7 +1240,11 @@ struct nfs41_create_session_args {
};
struct nfs41_create_session_res {
- struct nfs_client *client;
+ struct nfs4_sessionid sessionid;
+ uint32_t seqid;
+ uint32_t flags;
+ struct nfs4_channel_attrs fc_attrs; /* Fore Channel */
+ struct nfs4_channel_attrs bc_attrs; /* Back Channel */
};
struct nfs41_reclaim_complete_args {
@@ -1250,11 +1310,15 @@ struct nfs42_falloc_args {
nfs4_stateid falloc_stateid;
u64 falloc_offset;
u64 falloc_length;
+ const u32 *falloc_bitmask;
};
struct nfs42_falloc_res {
struct nfs4_sequence_res seq_res;
unsigned int status;
+
+ struct nfs_fattr *falloc_fattr;
+ const struct nfs_server *falloc_server;
};
struct nfs42_seek_args {
@@ -1325,7 +1389,8 @@ struct nfs_pgio_header {
__u64 mds_offset; /* Filelayout dense stripe */
struct nfs_page_array page_array;
struct nfs_client *ds_clp; /* pNFS data server */
- int ds_idx; /* ds index if ds_clp is set */
+ int ds_commit_idx; /* ds index if ds_clp is set */
+ int pgio_mirror_idx;/* mirror index in pgio layer */
};
struct nfs_mds_commit_info {
@@ -1342,7 +1407,7 @@ struct nfs_commit_completion_ops {
};
struct nfs_commit_info {
- spinlock_t *lock;
+ spinlock_t *lock; /* inode->i_lock */
struct nfs_mds_commit_info *mds;
struct pnfs_ds_commit_info *ds;
struct nfs_direct_req *dreq; /* O_DIRECT request */
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index ff3fea3..9abb763 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -460,7 +460,7 @@ struct nilfs_btree_node {
/* level */
#define NILFS_BTREE_LEVEL_DATA 0
#define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1)
-#define NILFS_BTREE_LEVEL_MAX 14
+#define NILFS_BTREE_LEVEL_MAX 14 /* Max level (exclusive) */
/**
* struct nilfs_palloc_group_desc - block group descriptor
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 9b2022a..f94da0e 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -25,16 +25,11 @@ static inline void touch_nmi_watchdog(void)
#endif
#if defined(CONFIG_HARDLOCKUP_DETECTOR)
-extern void watchdog_enable_hardlockup_detector(bool val);
-extern bool watchdog_hardlockup_detector_is_enabled(void);
+extern void hardlockup_detector_disable(void);
#else
-static inline void watchdog_enable_hardlockup_detector(bool val)
+static inline void hardlockup_detector_disable(void)
{
}
-static inline bool watchdog_hardlockup_detector_is_enabled(void)
-{
- return true;
-}
#endif
/*
@@ -68,12 +63,23 @@ static inline bool trigger_allbutself_cpu_backtrace(void)
#ifdef CONFIG_LOCKUP_DETECTOR
int hw_nmi_is_cpu_stuck(struct pt_regs *);
u64 hw_nmi_get_sample_period(int watchdog_thresh);
+extern int nmi_watchdog_enabled;
+extern int soft_watchdog_enabled;
extern int watchdog_user_enabled;
extern int watchdog_thresh;
+extern unsigned long *watchdog_cpumask_bits;
extern int sysctl_softlockup_all_cpu_backtrace;
struct ctl_table;
-extern int proc_dowatchdog(struct ctl_table *, int ,
- void __user *, size_t *, loff_t *);
+extern int proc_watchdog(struct ctl_table *, int ,
+ void __user *, size_t *, loff_t *);
+extern int proc_nmi_watchdog(struct ctl_table *, int ,
+ void __user *, size_t *, loff_t *);
+extern int proc_soft_watchdog(struct ctl_table *, int ,
+ void __user *, size_t *, loff_t *);
+extern int proc_watchdog_thresh(struct ctl_table *, int ,
+ void __user *, size_t *, loff_t *);
+extern int proc_watchdog_cpumask(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
#endif
#ifdef CONFIG_HAVE_ACPI_APEI_NMI
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 83a6aed..6e85889 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -8,14 +8,13 @@
* See detailed comments in the file linux/bitmap.h describing the
* data type on which these nodemasks are based.
*
- * For details of nodemask_scnprintf() and nodemask_parse_user(),
- * see bitmap_scnprintf() and bitmap_parse_user() in lib/bitmap.c.
- * For details of nodelist_scnprintf() and nodelist_parse(), see
- * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
- * For details of node_remap(), see bitmap_bitremap in lib/bitmap.c.
- * For details of nodes_remap(), see bitmap_remap in lib/bitmap.c.
- * For details of nodes_onto(), see bitmap_onto in lib/bitmap.c.
- * For details of nodes_fold(), see bitmap_fold in lib/bitmap.c.
+ * For details of nodemask_parse_user(), see bitmap_parse_user() in
+ * lib/bitmap.c. For details of nodelist_parse(), see bitmap_parselist(),
+ * also in bitmap.c. For details of node_remap(), see bitmap_bitremap in
+ * lib/bitmap.c. For details of nodes_remap(), see bitmap_remap in
+ * lib/bitmap.c. For details of nodes_onto(), see bitmap_onto in
+ * lib/bitmap.c. For details of nodes_fold(), see bitmap_fold in
+ * lib/bitmap.c.
*
* The available nodemask operations are:
*
@@ -52,9 +51,7 @@
* NODE_MASK_NONE Initializer - no bits set
* unsigned long *nodes_addr(mask) Array of unsigned long's in mask
*
- * int nodemask_scnprintf(buf, len, mask) Format nodemask for printing
* int nodemask_parse_user(ubuf, ulen, mask) Parse ascii string as nodemask
- * int nodelist_scnprintf(buf, len, mask) Format nodemask as list for printing
* int nodelist_parse(buf, map) Parse ascii string as nodelist
* int node_remap(oldbit, old, new) newbit = map(old, new)(oldbit)
* void nodes_remap(dst, src, old, new) *dst = map(old, new)(src)
@@ -98,6 +95,14 @@
typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
extern nodemask_t _unused_nodemask_arg_;
+/**
+ * nodemask_pr_args - printf args to output a nodemask
+ * @maskp: nodemask to be printed
+ *
+ * Can be used to provide arguments for '%*pb[l]' when printing a nodemask.
+ */
+#define nodemask_pr_args(maskp) MAX_NUMNODES, (maskp)->bits
+
/*
* The inline keyword gives the compiler room to decide to inline, or
* not inline a function as it sees best. However, as these functions
@@ -120,13 +125,13 @@ static inline void __node_clear(int node, volatile nodemask_t *dstp)
}
#define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES)
-static inline void __nodes_setall(nodemask_t *dstp, int nbits)
+static inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits)
{
bitmap_fill(dstp->bits, nbits);
}
#define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES)
-static inline void __nodes_clear(nodemask_t *dstp, int nbits)
+static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
{
bitmap_zero(dstp->bits, nbits);
}
@@ -144,7 +149,7 @@ static inline int __node_test_and_set(int node, nodemask_t *addr)
#define nodes_and(dst, src1, src2) \
__nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES)
static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
- const nodemask_t *src2p, int nbits)
+ const nodemask_t *src2p, unsigned int nbits)
{
bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
}
@@ -152,7 +157,7 @@ static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_or(dst, src1, src2) \
__nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES)
static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
- const nodemask_t *src2p, int nbits)
+ const nodemask_t *src2p, unsigned int nbits)
{
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
}
@@ -160,7 +165,7 @@ static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_xor(dst, src1, src2) \
__nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES)
static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
- const nodemask_t *src2p, int nbits)
+ const nodemask_t *src2p, unsigned int nbits)
{
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
}
@@ -168,7 +173,7 @@ static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_andnot(dst, src1, src2) \
__nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES)
static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
- const nodemask_t *src2p, int nbits)
+ const nodemask_t *src2p, unsigned int nbits)
{
bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
}
@@ -176,7 +181,7 @@ static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_complement(dst, src) \
__nodes_complement(&(dst), &(src), MAX_NUMNODES)
static inline void __nodes_complement(nodemask_t *dstp,
- const nodemask_t *srcp, int nbits)
+ const nodemask_t *srcp, unsigned int nbits)
{
bitmap_complement(dstp->bits, srcp->bits, nbits);
}
@@ -184,7 +189,7 @@ static inline void __nodes_complement(nodemask_t *dstp,
#define nodes_equal(src1, src2) \
__nodes_equal(&(src1), &(src2), MAX_NUMNODES)
static inline int __nodes_equal(const nodemask_t *src1p,
- const nodemask_t *src2p, int nbits)
+ const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_equal(src1p->bits, src2p->bits, nbits);
}
@@ -192,7 +197,7 @@ static inline int __nodes_equal(const nodemask_t *src1p,
#define nodes_intersects(src1, src2) \
__nodes_intersects(&(src1), &(src2), MAX_NUMNODES)
static inline int __nodes_intersects(const nodemask_t *src1p,
- const nodemask_t *src2p, int nbits)
+ const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
}
@@ -200,25 +205,25 @@ static inline int __nodes_intersects(const nodemask_t *src1p,
#define nodes_subset(src1, src2) \
__nodes_subset(&(src1), &(src2), MAX_NUMNODES)
static inline int __nodes_subset(const nodemask_t *src1p,
- const nodemask_t *src2p, int nbits)
+ const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_subset(src1p->bits, src2p->bits, nbits);
}
#define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES)
-static inline int __nodes_empty(const nodemask_t *srcp, int nbits)
+static inline int __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}
#define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES)
-static inline int __nodes_full(const nodemask_t *srcp, int nbits)
+static inline int __nodes_full(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_full(srcp->bits, nbits);
}
#define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES)
-static inline int __nodes_weight(const nodemask_t *srcp, int nbits)
+static inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_weight(srcp->bits, nbits);
}
@@ -304,14 +309,6 @@ static inline int __first_unset_node(const nodemask_t *maskp)
#define nodes_addr(src) ((src).bits)
-#define nodemask_scnprintf(buf, len, src) \
- __nodemask_scnprintf((buf), (len), &(src), MAX_NUMNODES)
-static inline int __nodemask_scnprintf(char *buf, int len,
- const nodemask_t *srcp, int nbits)
-{
- return bitmap_scnprintf(buf, len, srcp->bits, nbits);
-}
-
#define nodemask_parse_user(ubuf, ulen, dst) \
__nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES)
static inline int __nodemask_parse_user(const char __user *buf, int len,
@@ -320,14 +317,6 @@ static inline int __nodemask_parse_user(const char __user *buf, int len,
return bitmap_parse_user(buf, len, dstp->bits, nbits);
}
-#define nodelist_scnprintf(buf, len, src) \
- __nodelist_scnprintf((buf), (len), &(src), MAX_NUMNODES)
-static inline int __nodelist_scnprintf(char *buf, int len,
- const nodemask_t *srcp, int nbits)
-{
- return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
-}
-
#define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES)
static inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
{
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index 9ac1a62..b02f72b 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -4,15 +4,20 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
* BSD LICENSE
*
- * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -40,49 +45,940 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * Intel PCIe NTB Linux driver
+ * PCIe NTB Linux driver
*
* Contact Information:
- * Jon Mason <jon.mason@intel.com>
+ * Allen Hubbe <Allen.Hubbe@emc.com>
*/
-struct ntb_transport_qp;
+#ifndef _NTB_H_
+#define _NTB_H_
-struct ntb_client {
- struct device_driver driver;
- int (*probe)(struct pci_dev *pdev);
- void (*remove)(struct pci_dev *pdev);
+#include <linux/completion.h>
+#include <linux/device.h>
+
+struct ntb_client;
+struct ntb_dev;
+struct pci_dev;
+
+/**
+ * enum ntb_topo - NTB connection topology
+ * @NTB_TOPO_NONE: Topology is unknown or invalid.
+ * @NTB_TOPO_PRI: On primary side of local ntb.
+ * @NTB_TOPO_SEC: On secondary side of remote ntb.
+ * @NTB_TOPO_B2B_USD: On primary side of local ntb upstream of remote ntb.
+ * @NTB_TOPO_B2B_DSD: On primary side of local ntb downstream of remote ntb.
+ */
+enum ntb_topo {
+ NTB_TOPO_NONE = -1,
+ NTB_TOPO_PRI,
+ NTB_TOPO_SEC,
+ NTB_TOPO_B2B_USD,
+ NTB_TOPO_B2B_DSD,
+};
+
+static inline int ntb_topo_is_b2b(enum ntb_topo topo)
+{
+ switch ((int)topo) {
+ case NTB_TOPO_B2B_USD:
+ case NTB_TOPO_B2B_DSD:
+ return 1;
+ }
+ return 0;
+}
+
+static inline char *ntb_topo_string(enum ntb_topo topo)
+{
+ switch (topo) {
+ case NTB_TOPO_NONE: return "NTB_TOPO_NONE";
+ case NTB_TOPO_PRI: return "NTB_TOPO_PRI";
+ case NTB_TOPO_SEC: return "NTB_TOPO_SEC";
+ case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD";
+ case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD";
+ }
+ return "NTB_TOPO_INVALID";
+}
+
+/**
+ * enum ntb_speed - NTB link training speed
+ * @NTB_SPEED_AUTO: Request the max supported speed.
+ * @NTB_SPEED_NONE: Link is not trained to any speed.
+ * @NTB_SPEED_GEN1: Link is trained to gen1 speed.
+ * @NTB_SPEED_GEN2: Link is trained to gen2 speed.
+ * @NTB_SPEED_GEN3: Link is trained to gen3 speed.
+ */
+enum ntb_speed {
+ NTB_SPEED_AUTO = -1,
+ NTB_SPEED_NONE = 0,
+ NTB_SPEED_GEN1 = 1,
+ NTB_SPEED_GEN2 = 2,
+ NTB_SPEED_GEN3 = 3,
+};
+
+/**
+ * enum ntb_width - NTB link training width
+ * @NTB_WIDTH_AUTO: Request the max supported width.
+ * @NTB_WIDTH_NONE: Link is not trained to any width.
+ * @NTB_WIDTH_1: Link is trained to 1 lane width.
+ * @NTB_WIDTH_2: Link is trained to 2 lane width.
+ * @NTB_WIDTH_4: Link is trained to 4 lane width.
+ * @NTB_WIDTH_8: Link is trained to 8 lane width.
+ * @NTB_WIDTH_12: Link is trained to 12 lane width.
+ * @NTB_WIDTH_16: Link is trained to 16 lane width.
+ * @NTB_WIDTH_32: Link is trained to 32 lane width.
+ */
+enum ntb_width {
+ NTB_WIDTH_AUTO = -1,
+ NTB_WIDTH_NONE = 0,
+ NTB_WIDTH_1 = 1,
+ NTB_WIDTH_2 = 2,
+ NTB_WIDTH_4 = 4,
+ NTB_WIDTH_8 = 8,
+ NTB_WIDTH_12 = 12,
+ NTB_WIDTH_16 = 16,
+ NTB_WIDTH_32 = 32,
+};
+
+/**
+ * struct ntb_client_ops - ntb client operations
+ * @probe: Notify client of a new device.
+ * @remove: Notify client to remove a device.
+ */
+struct ntb_client_ops {
+ int (*probe)(struct ntb_client *client, struct ntb_dev *ntb);
+ void (*remove)(struct ntb_client *client, struct ntb_dev *ntb);
+};
+
+static inline int ntb_client_ops_is_valid(const struct ntb_client_ops *ops)
+{
+ /* commented callbacks are not required: */
+ return
+ ops->probe &&
+ ops->remove &&
+ 1;
+}
+
+/**
+ * struct ntb_ctx_ops - ntb driver context operations
+ * @link_event: See ntb_link_event().
+ * @db_event: See ntb_db_event().
+ */
+struct ntb_ctx_ops {
+ void (*link_event)(void *ctx);
+ void (*db_event)(void *ctx, int db_vector);
+};
+
+static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops)
+{
+ /* commented callbacks are not required: */
+ return
+ /* ops->link_event && */
+ /* ops->db_event && */
+ 1;
+}
+
+/**
+ * struct ntb_ctx_ops - ntb device operations
+ * @mw_count: See ntb_mw_count().
+ * @mw_get_range: See ntb_mw_get_range().
+ * @mw_set_trans: See ntb_mw_set_trans().
+ * @mw_clear_trans: See ntb_mw_clear_trans().
+ * @link_is_up: See ntb_link_is_up().
+ * @link_enable: See ntb_link_enable().
+ * @link_disable: See ntb_link_disable().
+ * @db_is_unsafe: See ntb_db_is_unsafe().
+ * @db_valid_mask: See ntb_db_valid_mask().
+ * @db_vector_count: See ntb_db_vector_count().
+ * @db_vector_mask: See ntb_db_vector_mask().
+ * @db_read: See ntb_db_read().
+ * @db_set: See ntb_db_set().
+ * @db_clear: See ntb_db_clear().
+ * @db_read_mask: See ntb_db_read_mask().
+ * @db_set_mask: See ntb_db_set_mask().
+ * @db_clear_mask: See ntb_db_clear_mask().
+ * @peer_db_addr: See ntb_peer_db_addr().
+ * @peer_db_read: See ntb_peer_db_read().
+ * @peer_db_set: See ntb_peer_db_set().
+ * @peer_db_clear: See ntb_peer_db_clear().
+ * @peer_db_read_mask: See ntb_peer_db_read_mask().
+ * @peer_db_set_mask: See ntb_peer_db_set_mask().
+ * @peer_db_clear_mask: See ntb_peer_db_clear_mask().
+ * @spad_is_unsafe: See ntb_spad_is_unsafe().
+ * @spad_count: See ntb_spad_count().
+ * @spad_read: See ntb_spad_read().
+ * @spad_write: See ntb_spad_write().
+ * @peer_spad_addr: See ntb_peer_spad_addr().
+ * @peer_spad_read: See ntb_peer_spad_read().
+ * @peer_spad_write: See ntb_peer_spad_write().
+ */
+struct ntb_dev_ops {
+ int (*mw_count)(struct ntb_dev *ntb);
+ int (*mw_get_range)(struct ntb_dev *ntb, int idx,
+ phys_addr_t *base, resource_size_t *size,
+ resource_size_t *align, resource_size_t *align_size);
+ int (*mw_set_trans)(struct ntb_dev *ntb, int idx,
+ dma_addr_t addr, resource_size_t size);
+ int (*mw_clear_trans)(struct ntb_dev *ntb, int idx);
+
+ int (*link_is_up)(struct ntb_dev *ntb,
+ enum ntb_speed *speed, enum ntb_width *width);
+ int (*link_enable)(struct ntb_dev *ntb,
+ enum ntb_speed max_speed, enum ntb_width max_width);
+ int (*link_disable)(struct ntb_dev *ntb);
+
+ int (*db_is_unsafe)(struct ntb_dev *ntb);
+ u64 (*db_valid_mask)(struct ntb_dev *ntb);
+ int (*db_vector_count)(struct ntb_dev *ntb);
+ u64 (*db_vector_mask)(struct ntb_dev *ntb, int db_vector);
+
+ u64 (*db_read)(struct ntb_dev *ntb);
+ int (*db_set)(struct ntb_dev *ntb, u64 db_bits);
+ int (*db_clear)(struct ntb_dev *ntb, u64 db_bits);
+
+ u64 (*db_read_mask)(struct ntb_dev *ntb);
+ int (*db_set_mask)(struct ntb_dev *ntb, u64 db_bits);
+ int (*db_clear_mask)(struct ntb_dev *ntb, u64 db_bits);
+
+ int (*peer_db_addr)(struct ntb_dev *ntb,
+ phys_addr_t *db_addr, resource_size_t *db_size);
+ u64 (*peer_db_read)(struct ntb_dev *ntb);
+ int (*peer_db_set)(struct ntb_dev *ntb, u64 db_bits);
+ int (*peer_db_clear)(struct ntb_dev *ntb, u64 db_bits);
+
+ u64 (*peer_db_read_mask)(struct ntb_dev *ntb);
+ int (*peer_db_set_mask)(struct ntb_dev *ntb, u64 db_bits);
+ int (*peer_db_clear_mask)(struct ntb_dev *ntb, u64 db_bits);
+
+ int (*spad_is_unsafe)(struct ntb_dev *ntb);
+ int (*spad_count)(struct ntb_dev *ntb);
+
+ u32 (*spad_read)(struct ntb_dev *ntb, int idx);
+ int (*spad_write)(struct ntb_dev *ntb, int idx, u32 val);
+
+ int (*peer_spad_addr)(struct ntb_dev *ntb, int idx,
+ phys_addr_t *spad_addr);
+ u32 (*peer_spad_read)(struct ntb_dev *ntb, int idx);
+ int (*peer_spad_write)(struct ntb_dev *ntb, int idx, u32 val);
};
-enum {
- NTB_LINK_DOWN = 0,
- NTB_LINK_UP,
+static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
+{
+ /* commented callbacks are not required: */
+ return
+ ops->mw_count &&
+ ops->mw_get_range &&
+ ops->mw_set_trans &&
+ /* ops->mw_clear_trans && */
+ ops->link_is_up &&
+ ops->link_enable &&
+ ops->link_disable &&
+ /* ops->db_is_unsafe && */
+ ops->db_valid_mask &&
+
+ /* both set, or both unset */
+ (!ops->db_vector_count == !ops->db_vector_mask) &&
+
+ ops->db_read &&
+ /* ops->db_set && */
+ ops->db_clear &&
+ /* ops->db_read_mask && */
+ ops->db_set_mask &&
+ ops->db_clear_mask &&
+ ops->peer_db_addr &&
+ /* ops->peer_db_read && */
+ ops->peer_db_set &&
+ /* ops->peer_db_clear && */
+ /* ops->peer_db_read_mask && */
+ /* ops->peer_db_set_mask && */
+ /* ops->peer_db_clear_mask && */
+ /* ops->spad_is_unsafe && */
+ ops->spad_count &&
+ ops->spad_read &&
+ ops->spad_write &&
+ ops->peer_spad_addr &&
+ /* ops->peer_spad_read && */
+ ops->peer_spad_write &&
+ 1;
+}
+
+/**
+ * struct ntb_client - client interested in ntb devices
+ * @drv: Linux driver object.
+ * @ops: See &ntb_client_ops.
+ */
+struct ntb_client {
+ struct device_driver drv;
+ const struct ntb_client_ops ops;
};
-int ntb_register_client(struct ntb_client *drvr);
-void ntb_unregister_client(struct ntb_client *drvr);
-int ntb_register_client_dev(char *device_name);
-void ntb_unregister_client_dev(char *device_name);
-
-struct ntb_queue_handlers {
- void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
- void *data, int len);
- void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
- void *data, int len);
- void (*event_handler)(void *data, int status);
+#define drv_ntb_client(__drv) container_of((__drv), struct ntb_client, drv)
+
+/**
+ * struct ntb_device - ntb device
+ * @dev: Linux device object.
+ * @pdev: Pci device entry of the ntb.
+ * @topo: Detected topology of the ntb.
+ * @ops: See &ntb_dev_ops.
+ * @ctx: See &ntb_ctx_ops.
+ * @ctx_ops: See &ntb_ctx_ops.
+ */
+struct ntb_dev {
+ struct device dev;
+ struct pci_dev *pdev;
+ enum ntb_topo topo;
+ const struct ntb_dev_ops *ops;
+ void *ctx;
+ const struct ntb_ctx_ops *ctx_ops;
+
+ /* private: */
+
+ /* synchronize setting, clearing, and calling ctx_ops */
+ spinlock_t ctx_lock;
+ /* block unregister until device is fully released */
+ struct completion released;
};
-unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp);
-unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp);
-struct ntb_transport_qp *
-ntb_transport_create_queue(void *data, struct pci_dev *pdev,
- const struct ntb_queue_handlers *handlers);
-void ntb_transport_free_queue(struct ntb_transport_qp *qp);
-int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
- unsigned int len);
-int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
- unsigned int len);
-void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len);
-void ntb_transport_link_up(struct ntb_transport_qp *qp);
-void ntb_transport_link_down(struct ntb_transport_qp *qp);
-bool ntb_transport_link_query(struct ntb_transport_qp *qp);
+#define dev_ntb(__dev) container_of((__dev), struct ntb_dev, dev)
+
+/**
+ * ntb_register_client() - register a client for interest in ntb devices
+ * @client: Client context.
+ *
+ * The client will be added to the list of clients interested in ntb devices.
+ * The client will be notified of any ntb devices that are not already
+ * associated with a client, or if ntb devices are registered later.
+ *
+ * Return: Zero if the client is registered, otherwise an error number.
+ */
+#define ntb_register_client(client) \
+ __ntb_register_client((client), THIS_MODULE, KBUILD_MODNAME)
+
+int __ntb_register_client(struct ntb_client *client, struct module *mod,
+ const char *mod_name);
+
+/**
+ * ntb_unregister_client() - unregister a client for interest in ntb devices
+ * @client: Client context.
+ *
+ * The client will be removed from the list of clients interested in ntb
+ * devices. If any ntb devices are associated with the client, the client will
+ * be notified to remove those devices.
+ */
+void ntb_unregister_client(struct ntb_client *client);
+
+#define module_ntb_client(__ntb_client) \
+ module_driver(__ntb_client, ntb_register_client, \
+ ntb_unregister_client)
+
+/**
+ * ntb_register_device() - register a ntb device
+ * @ntb: NTB device context.
+ *
+ * The device will be added to the list of ntb devices. If any clients are
+ * interested in ntb devices, each client will be notified of the ntb device,
+ * until at most one client accepts the device.
+ *
+ * Return: Zero if the device is registered, otherwise an error number.
+ */
+int ntb_register_device(struct ntb_dev *ntb);
+
+/**
+ * ntb_register_device() - unregister a ntb device
+ * @ntb: NTB device context.
+ *
+ * The device will be removed from the list of ntb devices. If the ntb device
+ * is associated with a client, the client will be notified to remove the
+ * device.
+ */
+void ntb_unregister_device(struct ntb_dev *ntb);
+
+/**
+ * ntb_set_ctx() - associate a driver context with an ntb device
+ * @ntb: NTB device context.
+ * @ctx: Driver context.
+ * @ctx_ops: Driver context operations.
+ *
+ * Associate a driver context and operations with a ntb device. The context is
+ * provided by the client driver, and the driver may associate a different
+ * context with each ntb device.
+ *
+ * Return: Zero if the context is associated, otherwise an error number.
+ */
+int ntb_set_ctx(struct ntb_dev *ntb, void *ctx,
+ const struct ntb_ctx_ops *ctx_ops);
+
+/**
+ * ntb_clear_ctx() - disassociate any driver context from an ntb device
+ * @ntb: NTB device context.
+ *
+ * Clear any association that may exist between a driver context and the ntb
+ * device.
+ */
+void ntb_clear_ctx(struct ntb_dev *ntb);
+
+/**
+ * ntb_link_event() - notify driver context of a change in link status
+ * @ntb: NTB device context.
+ *
+ * Notify the driver context that the link status may have changed. The driver
+ * should call ntb_link_is_up() to get the current status.
+ */
+void ntb_link_event(struct ntb_dev *ntb);
+
+/**
+ * ntb_db_event() - notify driver context of a doorbell event
+ * @ntb: NTB device context.
+ * @vector: Interrupt vector number.
+ *
+ * Notify the driver context of a doorbell event. If hardware supports
+ * multiple interrupt vectors for doorbells, the vector number indicates which
+ * vector received the interrupt. The vector number is relative to the first
+ * vector used for doorbells, starting at zero, and must be less than
+ ** ntb_db_vector_count(). The driver may call ntb_db_read() to check which
+ * doorbell bits need service, and ntb_db_vector_mask() to determine which of
+ * those bits are associated with the vector number.
+ */
+void ntb_db_event(struct ntb_dev *ntb, int vector);
+
+/**
+ * ntb_mw_count() - get the number of memory windows
+ * @ntb: NTB device context.
+ *
+ * Hardware and topology may support a different number of memory windows.
+ *
+ * Return: the number of memory windows.
+ */
+static inline int ntb_mw_count(struct ntb_dev *ntb)
+{
+ return ntb->ops->mw_count(ntb);
+}
+
+/**
+ * ntb_mw_get_range() - get the range of a memory window
+ * @ntb: NTB device context.
+ * @idx: Memory window number.
+ * @base: OUT - the base address for mapping the memory window
+ * @size: OUT - the size for mapping the memory window
+ * @align: OUT - the base alignment for translating the memory window
+ * @align_size: OUT - the size alignment for translating the memory window
+ *
+ * Get the range of a memory window. NULL may be given for any output
+ * parameter if the value is not needed. The base and size may be used for
+ * mapping the memory window, to access the peer memory. The alignment and
+ * size may be used for translating the memory window, for the peer to access
+ * memory on the local system.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_mw_get_range(struct ntb_dev *ntb, int idx,
+ phys_addr_t *base, resource_size_t *size,
+ resource_size_t *align, resource_size_t *align_size)
+{
+ return ntb->ops->mw_get_range(ntb, idx, base, size,
+ align, align_size);
+}
+
+/**
+ * ntb_mw_set_trans() - set the translation of a memory window
+ * @ntb: NTB device context.
+ * @idx: Memory window number.
+ * @addr: The dma address local memory to expose to the peer.
+ * @size: The size of the local memory to expose to the peer.
+ *
+ * Set the translation of a memory window. The peer may access local memory
+ * through the window starting at the address, up to the size. The address
+ * must be aligned to the alignment specified by ntb_mw_get_range(). The size
+ * must be aligned to the size alignment specified by ntb_mw_get_range().
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
+ dma_addr_t addr, resource_size_t size)
+{
+ return ntb->ops->mw_set_trans(ntb, idx, addr, size);
+}
+
+/**
+ * ntb_mw_clear_trans() - clear the translation of a memory window
+ * @ntb: NTB device context.
+ * @idx: Memory window number.
+ *
+ * Clear the translation of a memory window. The peer may no longer access
+ * local memory through the window.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_mw_clear_trans(struct ntb_dev *ntb, int idx)
+{
+ if (!ntb->ops->mw_clear_trans)
+ return ntb->ops->mw_set_trans(ntb, idx, 0, 0);
+
+ return ntb->ops->mw_clear_trans(ntb, idx);
+}
+
+/**
+ * ntb_link_is_up() - get the current ntb link state
+ * @ntb: NTB device context.
+ * @speed: OUT - The link speed expressed as PCIe generation number.
+ * @width: OUT - The link width expressed as the number of PCIe lanes.
+ *
+ * Set the translation of a memory window. The peer may access local memory
+ * through the window starting at the address, up to the size. The address
+ * must be aligned to the alignment specified by ntb_mw_get_range(). The size
+ * must be aligned to the size alignment specified by ntb_mw_get_range().
+ *
+ * Return: One if the link is up, zero if the link is down, otherwise a
+ * negative value indicating the error number.
+ */
+static inline int ntb_link_is_up(struct ntb_dev *ntb,
+ enum ntb_speed *speed, enum ntb_width *width)
+{
+ return ntb->ops->link_is_up(ntb, speed, width);
+}
+
+/**
+ * ntb_link_enable() - enable the link on the secondary side of the ntb
+ * @ntb: NTB device context.
+ * @max_speed: The maximum link speed expressed as PCIe generation number.
+ * @max_width: The maximum link width expressed as the number of PCIe lanes.
+ *
+ * Enable the link on the secondary side of the ntb. This can only be done
+ * from the primary side of the ntb in primary or b2b topology. The ntb device
+ * should train the link to its maximum speed and width, or the requested speed
+ * and width, whichever is smaller, if supported.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_link_enable(struct ntb_dev *ntb,
+ enum ntb_speed max_speed,
+ enum ntb_width max_width)
+{
+ return ntb->ops->link_enable(ntb, max_speed, max_width);
+}
+
+/**
+ * ntb_link_disable() - disable the link on the secondary side of the ntb
+ * @ntb: NTB device context.
+ *
+ * Disable the link on the secondary side of the ntb. This can only be
+ * done from the primary side of the ntb in primary or b2b topology. The ntb
+ * device should disable the link. Returning from this call must indicate that
+ * a barrier has passed, though with no more writes may pass in either
+ * direction across the link, except if this call returns an error number.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_link_disable(struct ntb_dev *ntb)
+{
+ return ntb->ops->link_disable(ntb);
+}
+
+/**
+ * ntb_db_is_unsafe() - check if it is safe to use hardware doorbell
+ * @ntb: NTB device context.
+ *
+ * It is possible for some ntb hardware to be affected by errata. Hardware
+ * drivers can advise clients to avoid using doorbells. Clients may ignore
+ * this advice, though caution is recommended.
+ *
+ * Return: Zero if it is safe to use doorbells, or One if it is not safe.
+ */
+static inline int ntb_db_is_unsafe(struct ntb_dev *ntb)
+{
+ if (!ntb->ops->db_is_unsafe)
+ return 0;
+
+ return ntb->ops->db_is_unsafe(ntb);
+}
+
+/**
+ * ntb_db_valid_mask() - get a mask of doorbell bits supported by the ntb
+ * @ntb: NTB device context.
+ *
+ * Hardware may support different number or arrangement of doorbell bits.
+ *
+ * Return: A mask of doorbell bits supported by the ntb.
+ */
+static inline u64 ntb_db_valid_mask(struct ntb_dev *ntb)
+{
+ return ntb->ops->db_valid_mask(ntb);
+}
+
+/**
+ * ntb_db_vector_count() - get the number of doorbell interrupt vectors
+ * @ntb: NTB device context.
+ *
+ * Hardware may support different number of interrupt vectors.
+ *
+ * Return: The number of doorbell interrupt vectors.
+ */
+static inline int ntb_db_vector_count(struct ntb_dev *ntb)
+{
+ if (!ntb->ops->db_vector_count)
+ return 1;
+
+ return ntb->ops->db_vector_count(ntb);
+}
+
+/**
+ * ntb_db_vector_mask() - get a mask of doorbell bits serviced by a vector
+ * @ntb: NTB device context.
+ * @vector: Doorbell vector number.
+ *
+ * Each interrupt vector may have a different number or arrangement of bits.
+ *
+ * Return: A mask of doorbell bits serviced by a vector.
+ */
+static inline u64 ntb_db_vector_mask(struct ntb_dev *ntb, int vector)
+{
+ if (!ntb->ops->db_vector_mask)
+ return ntb_db_valid_mask(ntb);
+
+ return ntb->ops->db_vector_mask(ntb, vector);
+}
+
+/**
+ * ntb_db_read() - read the local doorbell register
+ * @ntb: NTB device context.
+ *
+ * Read the local doorbell register, and return the bits that are set.
+ *
+ * Return: The bits currently set in the local doorbell register.
+ */
+static inline u64 ntb_db_read(struct ntb_dev *ntb)
+{
+ return ntb->ops->db_read(ntb);
+}
+
+/**
+ * ntb_db_set() - set bits in the local doorbell register
+ * @ntb: NTB device context.
+ * @db_bits: Doorbell bits to set.
+ *
+ * Set bits in the local doorbell register, which may generate a local doorbell
+ * interrupt. Bits that were already set must remain set.
+ *
+ * This is unusual, and hardware may not support it.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_db_set(struct ntb_dev *ntb, u64 db_bits)
+{
+ if (!ntb->ops->db_set)
+ return -EINVAL;
+
+ return ntb->ops->db_set(ntb, db_bits);
+}
+
+/**
+ * ntb_db_clear() - clear bits in the local doorbell register
+ * @ntb: NTB device context.
+ * @db_bits: Doorbell bits to clear.
+ *
+ * Clear bits in the local doorbell register, arming the bits for the next
+ * doorbell.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
+{
+ return ntb->ops->db_clear(ntb, db_bits);
+}
+
+/**
+ * ntb_db_read_mask() - read the local doorbell mask
+ * @ntb: NTB device context.
+ *
+ * Read the local doorbell mask register, and return the bits that are set.
+ *
+ * This is unusual, though hardware is likely to support it.
+ *
+ * Return: The bits currently set in the local doorbell mask register.
+ */
+static inline u64 ntb_db_read_mask(struct ntb_dev *ntb)
+{
+ if (!ntb->ops->db_read_mask)
+ return 0;
+
+ return ntb->ops->db_read_mask(ntb);
+}
+
+/**
+ * ntb_db_set_mask() - set bits in the local doorbell mask
+ * @ntb: NTB device context.
+ * @db_bits: Doorbell mask bits to set.
+ *
+ * Set bits in the local doorbell mask register, preventing doorbell interrupts
+ * from being generated for those doorbell bits. Bits that were already set
+ * must remain set.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ return ntb->ops->db_set_mask(ntb, db_bits);
+}
+
+/**
+ * ntb_db_clear_mask() - clear bits in the local doorbell mask
+ * @ntb: NTB device context.
+ * @db_bits: Doorbell bits to clear.
+ *
+ * Clear bits in the local doorbell mask register, allowing doorbell interrupts
+ * from being generated for those doorbell bits. If a doorbell bit is already
+ * set at the time the mask is cleared, and the corresponding mask bit is
+ * changed from set to clear, then the ntb driver must ensure that
+ * ntb_db_event() is called. If the hardware does not generate the interrupt
+ * on clearing the mask bit, then the driver must call ntb_db_event() anyway.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ return ntb->ops->db_clear_mask(ntb, db_bits);
+}
+
+/**
+ * ntb_peer_db_addr() - address and size of the peer doorbell register
+ * @ntb: NTB device context.
+ * @db_addr: OUT - The address of the peer doorbell register.
+ * @db_size: OUT - The number of bytes to write the peer doorbell register.
+ *
+ * Return the address of the peer doorbell register. This may be used, for
+ * example, by drivers that offload memory copy operations to a dma engine.
+ * The drivers may wish to ring the peer doorbell at the completion of memory
+ * copy operations. For efficiency, and to simplify ordering of operations
+ * between the dma memory copies and the ringing doorbell, the driver may
+ * append one additional dma memory copy with the doorbell register as the
+ * destination, after the memory copy operations.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_db_addr(struct ntb_dev *ntb,
+ phys_addr_t *db_addr,
+ resource_size_t *db_size)
+{
+ return ntb->ops->peer_db_addr(ntb, db_addr, db_size);
+}
+
+/**
+ * ntb_peer_db_read() - read the peer doorbell register
+ * @ntb: NTB device context.
+ *
+ * Read the peer doorbell register, and return the bits that are set.
+ *
+ * This is unusual, and hardware may not support it.
+ *
+ * Return: The bits currently set in the peer doorbell register.
+ */
+static inline u64 ntb_peer_db_read(struct ntb_dev *ntb)
+{
+ if (!ntb->ops->peer_db_read)
+ return 0;
+
+ return ntb->ops->peer_db_read(ntb);
+}
+
+/**
+ * ntb_peer_db_set() - set bits in the peer doorbell register
+ * @ntb: NTB device context.
+ * @db_bits: Doorbell bits to set.
+ *
+ * Set bits in the peer doorbell register, which may generate a peer doorbell
+ * interrupt. Bits that were already set must remain set.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
+{
+ return ntb->ops->peer_db_set(ntb, db_bits);
+}
+
+/**
+ * ntb_peer_db_clear() - clear bits in the local doorbell register
+ * @ntb: NTB device context.
+ * @db_bits: Doorbell bits to clear.
+ *
+ * Clear bits in the peer doorbell register, arming the bits for the next
+ * doorbell.
+ *
+ * This is unusual, and hardware may not support it.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_db_clear(struct ntb_dev *ntb, u64 db_bits)
+{
+ if (!ntb->ops->db_clear)
+ return -EINVAL;
+
+ return ntb->ops->peer_db_clear(ntb, db_bits);
+}
+
+/**
+ * ntb_peer_db_read_mask() - read the peer doorbell mask
+ * @ntb: NTB device context.
+ *
+ * Read the peer doorbell mask register, and return the bits that are set.
+ *
+ * This is unusual, and hardware may not support it.
+ *
+ * Return: The bits currently set in the peer doorbell mask register.
+ */
+static inline u64 ntb_peer_db_read_mask(struct ntb_dev *ntb)
+{
+ if (!ntb->ops->db_read_mask)
+ return 0;
+
+ return ntb->ops->peer_db_read_mask(ntb);
+}
+
+/**
+ * ntb_peer_db_set_mask() - set bits in the peer doorbell mask
+ * @ntb: NTB device context.
+ * @db_bits: Doorbell mask bits to set.
+ *
+ * Set bits in the peer doorbell mask register, preventing doorbell interrupts
+ * from being generated for those doorbell bits. Bits that were already set
+ * must remain set.
+ *
+ * This is unusual, and hardware may not support it.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ if (!ntb->ops->db_set_mask)
+ return -EINVAL;
+
+ return ntb->ops->peer_db_set_mask(ntb, db_bits);
+}
+
+/**
+ * ntb_peer_db_clear_mask() - clear bits in the peer doorbell mask
+ * @ntb: NTB device context.
+ * @db_bits: Doorbell bits to clear.
+ *
+ * Clear bits in the peer doorbell mask register, allowing doorbell interrupts
+ * from being generated for those doorbell bits. If the hardware does not
+ * generate the interrupt on clearing the mask bit, then the driver should not
+ * implement this function!
+ *
+ * This is unusual, and hardware may not support it.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ if (!ntb->ops->db_clear_mask)
+ return -EINVAL;
+
+ return ntb->ops->peer_db_clear_mask(ntb, db_bits);
+}
+
+/**
+ * ntb_spad_is_unsafe() - check if it is safe to use the hardware scratchpads
+ * @ntb: NTB device context.
+ *
+ * It is possible for some ntb hardware to be affected by errata. Hardware
+ * drivers can advise clients to avoid using scratchpads. Clients may ignore
+ * this advice, though caution is recommended.
+ *
+ * Return: Zero if it is safe to use scratchpads, or One if it is not safe.
+ */
+static inline int ntb_spad_is_unsafe(struct ntb_dev *ntb)
+{
+ if (!ntb->ops->spad_is_unsafe)
+ return 0;
+
+ return ntb->ops->spad_is_unsafe(ntb);
+}
+
+/**
+ * ntb_mw_count() - get the number of scratchpads
+ * @ntb: NTB device context.
+ *
+ * Hardware and topology may support a different number of scratchpads.
+ *
+ * Return: the number of scratchpads.
+ */
+static inline int ntb_spad_count(struct ntb_dev *ntb)
+{
+ return ntb->ops->spad_count(ntb);
+}
+
+/**
+ * ntb_spad_read() - read the local scratchpad register
+ * @ntb: NTB device context.
+ * @idx: Scratchpad index.
+ *
+ * Read the local scratchpad register, and return the value.
+ *
+ * Return: The value of the local scratchpad register.
+ */
+static inline u32 ntb_spad_read(struct ntb_dev *ntb, int idx)
+{
+ return ntb->ops->spad_read(ntb, idx);
+}
+
+/**
+ * ntb_spad_write() - write the local scratchpad register
+ * @ntb: NTB device context.
+ * @idx: Scratchpad index.
+ * @val: Scratchpad value.
+ *
+ * Write the value to the local scratchpad register.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
+{
+ return ntb->ops->spad_write(ntb, idx, val);
+}
+
+/**
+ * ntb_peer_spad_addr() - address of the peer scratchpad register
+ * @ntb: NTB device context.
+ * @idx: Scratchpad index.
+ * @spad_addr: OUT - The address of the peer scratchpad register.
+ *
+ * Return the address of the peer doorbell register. This may be used, for
+ * example, by drivers that offload memory copy operations to a dma engine.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
+ phys_addr_t *spad_addr)
+{
+ return ntb->ops->peer_spad_addr(ntb, idx, spad_addr);
+}
+
+/**
+ * ntb_peer_spad_read() - read the peer scratchpad register
+ * @ntb: NTB device context.
+ * @idx: Scratchpad index.
+ *
+ * Read the peer scratchpad register, and return the value.
+ *
+ * Return: The value of the local scratchpad register.
+ */
+static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
+{
+ return ntb->ops->peer_spad_read(ntb, idx);
+}
+
+/**
+ * ntb_peer_spad_write() - write the peer scratchpad register
+ * @ntb: NTB device context.
+ * @idx: Scratchpad index.
+ * @val: Scratchpad value.
+ *
+ * Write the value to the peer scratchpad register.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+static inline int ntb_peer_spad_write(struct ntb_dev *ntb, int idx, u32 val)
+{
+ return ntb->ops->peer_spad_write(ntb, idx, val);
+}
+
+#endif
diff --git a/include/linux/ntb_transport.h b/include/linux/ntb_transport.h
new file mode 100644
index 0000000..2862861
--- /dev/null
+++ b/include/linux/ntb_transport.h
@@ -0,0 +1,85 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copy
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * PCIe NTB Transport Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+
+struct ntb_transport_qp;
+
+struct ntb_transport_client {
+ struct device_driver driver;
+ int (*probe)(struct device *client_dev);
+ void (*remove)(struct device *client_dev);
+};
+
+int ntb_transport_register_client(struct ntb_transport_client *drvr);
+void ntb_transport_unregister_client(struct ntb_transport_client *drvr);
+int ntb_transport_register_client_dev(char *device_name);
+void ntb_transport_unregister_client_dev(char *device_name);
+
+struct ntb_queue_handlers {
+ void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
+ void *data, int len);
+ void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
+ void *data, int len);
+ void (*event_handler)(void *data, int status);
+};
+
+unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp);
+unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp);
+struct ntb_transport_qp *
+ntb_transport_create_queue(void *data, struct device *client_dev,
+ const struct ntb_queue_handlers *handlers);
+void ntb_transport_free_queue(struct ntb_transport_qp *qp);
+int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+ unsigned int len);
+int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+ unsigned int len);
+void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len);
+void ntb_transport_link_up(struct ntb_transport_qp *qp);
+void ntb_transport_link_down(struct ntb_transport_qp *qp);
+bool ntb_transport_link_query(struct ntb_transport_qp *qp);
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 258945f..c0d94ed 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -17,7 +17,6 @@
#include <uapi/linux/nvme.h>
#include <linux/pci.h>
-#include <linux/miscdevice.h>
#include <linux/kref.h>
#include <linux/blk-mq.h>
@@ -62,8 +61,6 @@ enum {
NVME_CSTS_SHST_MASK = 3 << 2,
};
-#define NVME_VS(major, minor) (major << 16 | minor)
-
extern unsigned char nvme_io_timeout;
#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
@@ -77,7 +74,7 @@ struct nvme_dev {
struct blk_mq_tag_set tagset;
struct blk_mq_tag_set admin_tagset;
u32 __iomem *dbs;
- struct pci_dev *pci_dev;
+ struct device *dev;
struct dma_pool *prp_page_pool;
struct dma_pool *prp_small_pool;
int instance;
@@ -91,9 +88,11 @@ struct nvme_dev {
struct nvme_bar __iomem *bar;
struct list_head namespaces;
struct kref kref;
- struct miscdevice miscdev;
+ struct device *device;
work_func_t reset_workfn;
struct work_struct reset_work;
+ struct work_struct probe_work;
+ struct work_struct scan_work;
char name[12];
char serial[20];
char model[40];
@@ -105,7 +104,6 @@ struct nvme_dev {
u16 abort_limit;
u8 event_limit;
u8 vwc;
- u8 initialized;
};
/*
@@ -120,7 +118,9 @@ struct nvme_ns {
unsigned ns_id;
int lba_shift;
- int ms;
+ u16 ms;
+ bool ext;
+ u8 pi_type;
u64 mode_select_num_blocks;
u32 mode_select_block_len;
};
@@ -132,13 +132,13 @@ struct nvme_ns {
* allocated to store the PRP list.
*/
struct nvme_iod {
- void *private; /* For the use of the submitter of the I/O */
+ unsigned long private; /* For the use of the submitter of the I/O */
int npages; /* In the PRP list. 0 means small pool in use */
int offset; /* Of PRP list */
int nents; /* Used in scatterlist */
int length; /* Of data, in bytes */
dma_addr_t first_dma;
- struct list_head node;
+ struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */
struct scatterlist sg[0];
};
@@ -147,25 +147,15 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
return (sector >> (ns->lba_shift - 9));
}
-/**
- * nvme_free_iod - frees an nvme_iod
- * @dev: The device that the I/O was submitted to
- * @iod: The memory to free
- */
-void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod);
-
-int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int, gfp_t);
-struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
- unsigned long addr, unsigned length);
-void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
- struct nvme_iod *iod);
-int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_ns *,
- struct nvme_command *, u32 *);
-int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
-int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
- u32 *result);
-int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns,
- dma_addr_t dma_addr);
+int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
+ void *buf, unsigned bufflen);
+int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
+ void *buffer, void __user *ubuffer, unsigned bufflen,
+ u32 *result, unsigned timeout);
+int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id);
+int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid,
+ struct nvme_id_ns **id);
+int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log);
int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
dma_addr_t dma_addr, u32 *result);
int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
diff --git a/include/linux/nx842.h b/include/linux/nx842.h
deleted file mode 100644
index a4d324c..0000000
--- a/include/linux/nx842.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __NX842_H__
-#define __NX842_H__
-
-int nx842_get_workmem_size(void);
-int nx842_get_workmem_size_aligned(void);
-int nx842_compress(const unsigned char *in, unsigned int in_len,
- unsigned char *out, unsigned int *out_len, void *wrkmem);
-int nx842_decompress(const unsigned char *in, unsigned int in_len,
- unsigned char *out, unsigned int *out_len, void *wrkmem);
-
-#endif
diff --git a/include/linux/of.h b/include/linux/of.h
index dfde07e..edc068d 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -120,13 +120,21 @@ extern struct device_node *of_aliases;
extern struct device_node *of_stdout;
extern raw_spinlock_t devtree_lock;
+/* flag descriptions (need to be visible even when !CONFIG_OF) */
+#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
+#define OF_DETACHED 2 /* node has been detached from the device tree */
+#define OF_POPULATED 3 /* device already created for the node */
+#define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */
+
#ifdef CONFIG_OF
+void of_core_init(void);
+
static inline bool is_of_node(struct fwnode_handle *fwnode)
{
return fwnode && fwnode->type == FWNODE_OF;
}
-static inline struct device_node *of_node(struct fwnode_handle *fwnode)
+static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
{
return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL;
}
@@ -217,12 +225,6 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
#endif
-/* flag descriptions */
-#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
-#define OF_DETACHED 2 /* node has been detached from the device tree */
-#define OF_POPULATED 3 /* device already created for the node */
-#define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */
-
#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
@@ -305,6 +307,7 @@ extern int of_property_read_string_helper(struct device_node *np,
extern int of_device_is_compatible(const struct device_node *device,
const char *);
extern bool of_device_is_available(const struct device_node *device);
+extern bool of_device_is_big_endian(const struct device_node *device);
extern const void *of_get_property(const struct device_node *node,
const char *name,
int *lenp);
@@ -332,6 +335,7 @@ extern int of_count_phandle_with_args(const struct device_node *np,
extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
extern int of_alias_get_id(struct device_node *np, const char *stem);
+extern int of_alias_get_highest_id(const char *stem);
extern int of_machine_is_compatible(const char *compat);
@@ -374,12 +378,16 @@ bool of_console_check(struct device_node *dn, char *name, int index);
#else /* CONFIG_OF */
+static inline void of_core_init(void)
+{
+}
+
static inline bool is_of_node(struct fwnode_handle *fwnode)
{
return false;
}
-static inline struct device_node *of_node(struct fwnode_handle *fwnode)
+static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
{
return NULL;
}
@@ -420,6 +428,11 @@ static inline struct device_node *of_find_node_opts_by_path(const char *path,
return NULL;
}
+static inline struct device_node *of_find_node_by_phandle(phandle handle)
+{
+ return NULL;
+}
+
static inline struct device_node *of_get_parent(const struct device_node *node)
{
return NULL;
@@ -466,6 +479,11 @@ static inline bool of_device_is_available(const struct device_node *device)
return false;
}
+static inline bool of_device_is_big_endian(const struct device_node *device)
+{
+ return false;
+}
+
static inline struct property *of_find_property(const struct device_node *np,
const char *name,
int *lenp)
@@ -594,6 +612,11 @@ static inline int of_alias_get_id(struct device_node *np, const char *stem)
return -ENOSYS;
}
+static inline int of_alias_get_highest_id(const char *stem)
+{
+ return -ENOSYS;
+}
+
static inline int of_machine_is_compatible(const char *compat)
{
return 0;
@@ -616,6 +639,38 @@ static inline const char *of_prop_next_string(struct property *prop,
return NULL;
}
+static inline int of_node_check_flag(struct device_node *n, unsigned long flag)
+{
+ return 0;
+}
+
+static inline int of_node_test_and_set_flag(struct device_node *n,
+ unsigned long flag)
+{
+ return 0;
+}
+
+static inline void of_node_set_flag(struct device_node *n, unsigned long flag)
+{
+}
+
+static inline void of_node_clear_flag(struct device_node *n, unsigned long flag)
+{
+}
+
+static inline int of_property_check_flag(struct property *p, unsigned long flag)
+{
+ return 0;
+}
+
+static inline void of_property_set_flag(struct property *p, unsigned long flag)
+{
+}
+
+static inline void of_property_clear_flag(struct property *p, unsigned long flag)
+{
+}
+
#define of_match_ptr(_ptr) NULL
#define of_match_node(_matches, _node) NULL
#endif /* CONFIG_OF */
@@ -623,7 +678,10 @@ static inline const char *of_prop_next_string(struct property *prop,
#if defined(CONFIG_OF) && defined(CONFIG_NUMA)
extern int of_node_to_nid(struct device_node *np);
#else
-static inline int of_node_to_nid(struct device_node *device) { return 0; }
+static inline int of_node_to_nid(struct device_node *device)
+{
+ return NUMA_NO_NODE;
+}
#endif
static inline struct device_node *of_find_matching_node(
@@ -771,7 +829,7 @@ static inline int of_property_read_string_index(struct device_node *np,
* @propname: name of the property to be searched.
*
* Search for a property in a device node.
- * Returns true if the property exist false otherwise.
+ * Returns true if the property exists false otherwise.
*/
static inline bool of_property_read_bool(const struct device_node *np,
const char *propname)
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index ef37021..4c50854 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -33,6 +33,8 @@ extern int of_device_add(struct platform_device *pdev);
extern int of_device_register(struct platform_device *ofdev);
extern void of_device_unregister(struct platform_device *ofdev);
+extern const void *of_device_get_match_data(const struct device *dev);
+
extern ssize_t of_device_get_modalias(struct device *dev,
char *str, ssize_t len);
@@ -53,6 +55,7 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
return of_node_get(cpu_dev->of_node);
}
+void of_dma_configure(struct device *dev, struct device_node *np);
#else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev,
@@ -64,6 +67,11 @@ static inline int of_driver_match_device(struct device *dev,
static inline void of_device_uevent(struct device *dev,
struct kobj_uevent_env *env) { }
+static inline const void *of_device_get_match_data(const struct device *dev)
+{
+ return NULL;
+}
+
static inline int of_device_get_modalias(struct device *dev,
char *str, ssize_t len)
{
@@ -90,6 +98,8 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
{
return NULL;
}
+static inline void of_dma_configure(struct device *dev, struct device_node *np)
+{}
#endif /* CONFIG_OF */
#endif /* _LINUX_OF_DEVICE_H */
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h
index 56bc026..98ba752 100644
--- a/include/linux/of_dma.h
+++ b/include/linux/of_dma.h
@@ -23,6 +23,9 @@ struct of_dma {
struct device_node *of_node;
struct dma_chan *(*of_dma_xlate)
(struct of_phandle_args *, struct of_dma *);
+ void *(*of_dma_route_allocate)
+ (struct of_phandle_args *, struct of_dma *);
+ struct dma_router *dma_router;
void *of_dma_data;
};
@@ -37,12 +40,20 @@ extern int of_dma_controller_register(struct device_node *np,
(struct of_phandle_args *, struct of_dma *),
void *data);
extern void of_dma_controller_free(struct device_node *np);
+
+extern int of_dma_router_register(struct device_node *np,
+ void *(*of_dma_route_allocate)
+ (struct of_phandle_args *, struct of_dma *),
+ struct dma_router *dma_router);
+#define of_dma_router_free of_dma_controller_free
+
extern struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
const char *name);
extern struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma);
extern struct dma_chan *of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec,
struct of_dma *ofdma);
+
#else
static inline int of_dma_controller_register(struct device_node *np,
struct dma_chan *(*of_dma_xlate)
@@ -56,6 +67,16 @@ static inline void of_dma_controller_free(struct device_node *np)
{
}
+static inline int of_dma_router_register(struct device_node *np,
+ void *(*of_dma_route_allocate)
+ (struct of_phandle_args *, struct of_dma *),
+ struct dma_router *dma_router)
+{
+ return -ENODEV;
+}
+
+#define of_dma_router_free of_dma_controller_free
+
static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
const char *name)
{
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 0ff360d..df9ef38 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -33,9 +33,11 @@ extern void *of_fdt_get_property(const void *blob,
extern int of_fdt_is_compatible(const void *blob,
unsigned long node,
const char *compat);
+extern bool of_fdt_is_big_endian(const void *blob,
+ unsigned long node);
extern int of_fdt_match(const void *blob, unsigned long node,
const char *const *compat);
-extern void of_fdt_unflatten_tree(unsigned long *blob,
+extern void of_fdt_unflatten_tree(const unsigned long *blob,
struct device_node **mynodes);
/* TBD: Temporary export of fdt globals - remove when code fully merged */
@@ -62,6 +64,7 @@ extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
int depth, void *data);
extern void early_init_fdt_scan_reserved_mem(void);
+extern void early_init_fdt_reserve_self(void);
extern void early_init_dt_add_memory_arch(u64 base, u64 size);
extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
bool no_map);
@@ -89,6 +92,7 @@ extern u64 fdt_translate_address(const void *blob, int node_offset);
extern void of_fdt_limit_memory(int limit);
#else /* CONFIG_OF_FLATTREE */
static inline void early_init_fdt_scan_reserved_mem(void) {}
+static inline void early_init_fdt_reserve_self(void) {}
static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
static inline void unflatten_device_tree(void) {}
static inline void unflatten_and_copy_device_tree(void) {}
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 38fc050..69dbe31 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -52,6 +52,7 @@ extern int of_get_named_gpio_flags(struct device_node *np,
extern int of_mm_gpiochip_add(struct device_node *np,
struct of_mm_gpio_chip *mm_gc);
+extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
extern void of_gpiochip_add(struct gpio_chip *gc);
extern void of_gpiochip_remove(struct gpio_chip *gc);
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
index befef42..f8bcd0e 100644
--- a/include/linux/of_graph.h
+++ b/include/linux/of_graph.h
@@ -14,6 +14,8 @@
#ifndef __LINUX_OF_GRAPH_H
#define __LINUX_OF_GRAPH_H
+#include <linux/types.h>
+
/**
* struct of_endpoint - the OF graph endpoint data structure
* @port: identifier (value of reg property) of a port this endpoint belongs to
@@ -26,11 +28,25 @@ struct of_endpoint {
const struct device_node *local_node;
};
+/**
+ * for_each_endpoint_of_node - iterate over every endpoint in a device node
+ * @parent: parent device node containing ports and endpoints
+ * @child: loop variable pointing to the current endpoint node
+ *
+ * When breaking out of the loop, of_node_put(child) has to be called manually.
+ */
+#define for_each_endpoint_of_node(parent, child) \
+ for (child = of_graph_get_next_endpoint(parent, NULL); child != NULL; \
+ child = of_graph_get_next_endpoint(parent, child))
+
#ifdef CONFIG_OF
int of_graph_parse_endpoint(const struct device_node *node,
struct of_endpoint *endpoint);
+struct device_node *of_graph_get_port_by_id(struct device_node *node, u32 id);
struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
struct device_node *previous);
+struct device_node *of_graph_get_endpoint_by_regs(
+ const struct device_node *parent, int port_reg, int reg);
struct device_node *of_graph_get_remote_port_parent(
const struct device_node *node);
struct device_node *of_graph_get_remote_port(const struct device_node *node);
@@ -42,6 +58,12 @@ static inline int of_graph_parse_endpoint(const struct device_node *node,
return -ENOSYS;
}
+static inline struct device_node *of_graph_get_port_by_id(
+ struct device_node *node, u32 id)
+{
+ return NULL;
+}
+
static inline struct device_node *of_graph_get_next_endpoint(
const struct device_node *parent,
struct device_node *previous)
@@ -49,6 +71,12 @@ static inline struct device_node *of_graph_get_next_endpoint(
return NULL;
}
+static inline struct device_node *of_graph_get_endpoint_by_regs(
+ const struct device_node *parent, int port_reg, int reg)
+{
+ return NULL;
+}
+
static inline struct device_node *of_graph_get_remote_port_parent(
const struct device_node *node)
{
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index 16c7554..ffbe470 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -12,7 +12,8 @@ extern int of_get_dma_window(struct device_node *dn, const char *prefix,
size_t *size);
extern void of_iommu_init(void);
-extern struct iommu_ops *of_iommu_configure(struct device *dev);
+extern struct iommu_ops *of_iommu_configure(struct device *dev,
+ struct device_node *master_np);
#else
@@ -24,7 +25,8 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
}
static inline void of_iommu_init(void) { }
-static inline struct iommu_ops *of_iommu_configure(struct device *dev)
+static inline struct iommu_ops *of_iommu_configure(struct device *dev,
+ struct device_node *master_np)
{
return NULL;
}
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index bfec136..d884929 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -37,8 +37,6 @@ extern int of_irq_parse_one(struct device_node *device, int index,
extern unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data);
extern int of_irq_to_resource(struct device_node *dev, int index,
struct resource *r);
-extern int of_irq_to_resource_table(struct device_node *dev,
- struct resource *res, int nr_irqs);
extern void of_irq_init(const struct of_device_id *matches);
@@ -46,6 +44,8 @@ extern void of_irq_init(const struct of_device_id *matches);
extern int of_irq_count(struct device_node *dev);
extern int of_irq_get(struct device_node *dev, int index);
extern int of_irq_get_byname(struct device_node *dev, const char *name);
+extern int of_irq_to_resource_table(struct device_node *dev,
+ struct resource *res, int nr_irqs);
#else
static inline int of_irq_count(struct device_node *dev)
{
@@ -59,6 +59,11 @@ static inline int of_irq_get_byname(struct device_node *dev, const char *name)
{
return 0;
}
+static inline int of_irq_to_resource_table(struct device_node *dev,
+ struct resource *res, int nr_irqs)
+{
+ return 0;
+}
#endif
#if defined(CONFIG_OF)
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index d449018..8f2237e 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -24,6 +24,7 @@ struct phy_device *of_phy_attach(struct net_device *dev,
phy_interface_t iface);
extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
+extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np);
#else /* CONFIG_OF */
static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
@@ -60,6 +61,12 @@ static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
{
return NULL;
}
+
+static inline int of_mdio_parse_addr(struct device *dev,
+ const struct device_node *np)
+{
+ return -ENOSYS;
+}
#endif /* CONFIG_OF */
#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY)
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
index 34597c8..9cd72aa 100644
--- a/include/linux/of_net.h
+++ b/include/linux/of_net.h
@@ -9,8 +9,11 @@
#ifdef CONFIG_OF_NET
#include <linux/of.h>
+
+struct net_device;
extern int of_get_phy_mode(struct device_node *np);
extern const void *of_get_mac_address(struct device_node *np);
+extern struct net_device *of_find_net_device_by_node(struct device_node *np);
#else
static inline int of_get_phy_mode(struct device_node *np)
{
@@ -21,6 +24,11 @@ static inline const void *of_get_mac_address(struct device_node *np)
{
return NULL;
}
+
+static inline struct net_device *of_find_net_device_by_node(struct device_node *np)
+{
+ return NULL;
+}
#endif
#endif /* __LINUX_OF_NET_H */
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index ce0e5ab..29fd3fe 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -16,6 +16,7 @@ int of_pci_get_devfn(struct device_node *np);
int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
int of_get_pci_domain_nr(struct device_node *node);
+void of_pci_dma_configure(struct pci_dev *pci_dev);
#else
static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
{
@@ -50,6 +51,8 @@ of_get_pci_domain_nr(struct device_node *node)
{
return -1;
}
+
+static inline void of_pci_dma_configure(struct pci_dev *pci_dev) { }
#endif
#if defined(CONFIG_OF_ADDRESS)
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index 8a860f0..611a691 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -84,7 +84,7 @@ static inline int of_platform_populate(struct device_node *root,
static inline void of_platform_depopulate(struct device *parent) { }
#endif
-#ifdef CONFIG_OF_DYNAMIC
+#if defined(CONFIG_OF_DYNAMIC) && defined(CONFIG_OF_ADDRESS)
extern void of_platform_register_reconfig_notifier(void);
#else
static inline void of_platform_register_reconfig_notifier(void) { }
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index c2080ee..7dee0014 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -163,7 +163,8 @@ extern unsigned int gpmc_ticks_to_ns(unsigned int ticks);
extern void gpmc_cs_write_reg(int cs, int idx, u32 val);
extern int gpmc_calc_divider(unsigned int sync_clk);
-extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t);
+extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t,
+ const struct gpmc_settings *s);
extern int gpmc_cs_program_settings(int cs, struct gpmc_settings *p);
extern int gpmc_cs_request(int cs, unsigned long size, unsigned long *base);
extern void gpmc_cs_free(int cs);
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 7620098..7deecb7 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -32,6 +32,8 @@ enum oom_scan_t {
/* Thread is the potential origin of an oom condition; kill first on oom */
#define OOM_FLAG_ORIGIN ((__force oom_flags_t)0x1)
+extern struct mutex oom_lock;
+
static inline void set_current_oom_origin(void)
{
current->signal->oom_flags |= OOM_FLAG_ORIGIN;
@@ -47,6 +49,8 @@ static inline bool oom_task_origin(const struct task_struct *p)
return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN);
}
+extern void mark_oom_victim(struct task_struct *tsk);
+
extern unsigned long oom_badness(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask,
unsigned long totalpages);
@@ -58,32 +62,25 @@ extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
struct mem_cgroup *memcg, nodemask_t *nodemask,
const char *message);
-extern bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_flags);
-extern void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_flags);
-
extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
- int order, const nodemask_t *nodemask);
+ int order, const nodemask_t *nodemask,
+ struct mem_cgroup *memcg);
extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
unsigned long totalpages, const nodemask_t *nodemask,
bool force_kill);
-extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
+extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
int order, nodemask_t *mask, bool force_kill);
+
+extern void exit_oom_victim(void);
+
extern int register_oom_notifier(struct notifier_block *nb);
extern int unregister_oom_notifier(struct notifier_block *nb);
extern bool oom_killer_disabled;
-
-static inline void oom_killer_disable(void)
-{
- oom_killer_disabled = true;
-}
-
-static inline void oom_killer_enable(void)
-{
- oom_killer_disabled = false;
-}
+extern bool oom_killer_disable(void);
+extern void oom_killer_enable(void);
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
index 90230d5..703ea5c 100644
--- a/include/linux/osq_lock.h
+++ b/include/linux/osq_lock.h
@@ -5,8 +5,11 @@
* An MCS like lock especially tailored for optimistic spinning for sleeping
* lock implementations (mutex, rwsem, etc).
*/
-
-#define OSQ_UNLOCKED_VAL (0)
+struct optimistic_spin_node {
+ struct optimistic_spin_node *next, *prev;
+ int locked; /* 1 if lock acquired */
+ int cpu; /* encoded CPU # + 1 value */
+};
struct optimistic_spin_queue {
/*
@@ -16,6 +19,8 @@ struct optimistic_spin_queue {
atomic_t tail;
};
+#define OSQ_UNLOCKED_VAL (0)
+
/* Init macro and function. */
#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
@@ -24,4 +29,12 @@ static inline void osq_lock_init(struct optimistic_spin_queue *lock)
atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
}
+extern bool osq_lock(struct optimistic_spin_queue *lock);
+extern void osq_unlock(struct optimistic_spin_queue *lock);
+
+static inline bool osq_is_locked(struct optimistic_spin_queue *lock)
+{
+ return atomic_read(&lock->tail) != OSQ_UNLOCKED_VAL;
+}
+
#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index e1f5fcd..f34e040 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -121,8 +121,12 @@ enum pageflags {
PG_fscache = PG_private_2, /* page backed by cache */
/* XEN */
+ /* Pinned in Xen as a read-only pagetable page. */
PG_pinned = PG_owner_priv_1,
+ /* Pinned as part of domain save (see xen_mm_pin_all()). */
PG_savepinned = PG_dirty,
+ /* Has a grant mapping of another (foreign) domain's page. */
+ PG_foreign = PG_owner_priv_1,
/* SLOB */
PG_slob_free = PG_private,
@@ -215,6 +219,7 @@ __PAGEFLAG(Slab, slab)
PAGEFLAG(Checked, checked) /* Used by some filesystems */
PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
PAGEFLAG(SavePinned, savepinned); /* Xen */
+PAGEFLAG(Foreign, foreign); /* Xen */
PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
__SETPAGEFLAG(SwapBacked, swapbacked)
@@ -284,6 +289,47 @@ PAGEFLAG_FALSE(HWPoison)
#define __PG_HWPOISON 0
#endif
+/*
+ * On an anonymous page mapped into a user virtual memory area,
+ * page->mapping points to its anon_vma, not to a struct address_space;
+ * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
+ *
+ * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
+ * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
+ * and then page->mapping points, not to an anon_vma, but to a private
+ * structure which KSM associates with that merged page. See ksm.h.
+ *
+ * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
+ *
+ * Please note that, confusingly, "page_mapping" refers to the inode
+ * address_space which maps the page from disk; whereas "page_mapped"
+ * refers to user virtual address space into which the page is mapped.
+ */
+#define PAGE_MAPPING_ANON 1
+#define PAGE_MAPPING_KSM 2
+#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
+
+static inline int PageAnon(struct page *page)
+{
+ return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
+}
+
+#ifdef CONFIG_KSM
+/*
+ * A KSM page is one of those write-protected "shared pages" or "merged pages"
+ * which KSM maps into multiple mms, wherever identical anonymous page content
+ * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
+ * anon_vma, but to that page's node of the stable tree.
+ */
+static inline int PageKsm(struct page *page)
+{
+ return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
+ (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
+}
+#else
+TESTPAGEFLAG_FALSE(Ksm)
+#endif
+
u64 stable_page_flags(struct page *page);
static inline int PageUptodate(struct page *page)
@@ -323,8 +369,6 @@ static inline void SetPageUptodate(struct page *page)
CLEARPAGEFLAG(Uptodate, uptodate)
-extern void cancel_dirty_page(struct page *page, unsigned int account_size);
-
int test_clear_page_writeback(struct page *page);
int __test_set_page_writeback(struct page *page, bool keep_write);
@@ -423,6 +467,21 @@ static inline void ClearPageCompound(struct page *page)
#endif /* !PAGEFLAGS_EXTENDED */
+#ifdef CONFIG_HUGETLB_PAGE
+int PageHuge(struct page *page);
+int PageHeadHuge(struct page *page);
+bool page_huge_active(struct page *page);
+#else
+TESTPAGEFLAG_FALSE(Huge)
+TESTPAGEFLAG_FALSE(HeadHuge)
+
+static inline bool page_huge_active(struct page *page)
+{
+ return 0;
+}
+#endif
+
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* PageHuge() only returns true for hugetlbfs pages, but not for
@@ -477,6 +536,53 @@ static inline int PageTransTail(struct page *page)
#endif
/*
+ * PageBuddy() indicate that the page is free and in the buddy system
+ * (see mm/page_alloc.c).
+ *
+ * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
+ * -2 so that an underflow of the page_mapcount() won't be mistaken
+ * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
+ * efficiently by most CPU architectures.
+ */
+#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
+
+static inline int PageBuddy(struct page *page)
+{
+ return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
+}
+
+static inline void __SetPageBuddy(struct page *page)
+{
+ VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
+ atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
+}
+
+static inline void __ClearPageBuddy(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageBuddy(page), page);
+ atomic_set(&page->_mapcount, -1);
+}
+
+#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
+
+static inline int PageBalloon(struct page *page)
+{
+ return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
+}
+
+static inline void __SetPageBalloon(struct page *page)
+{
+ VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
+ atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
+}
+
+static inline void __ClearPageBalloon(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageBalloon(page), page);
+ atomic_set(&page->_mapcount, -1);
+}
+
+/*
* If network-based swap is enabled, sl*b must keep track of whether pages
* were allocated from pfmemalloc reserves.
*/
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h
index 9554215..17fa4f8 100644
--- a/include/linux/page_counter.h
+++ b/include/linux/page_counter.h
@@ -41,7 +41,8 @@ int page_counter_try_charge(struct page_counter *counter,
struct page_counter **fail);
void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
int page_counter_limit(struct page_counter *counter, unsigned long limit);
-int page_counter_memparse(const char *buf, unsigned long *nr_pages);
+int page_counter_memparse(const char *buf, const char *max,
+ unsigned long *nr_pages);
static inline void page_counter_reset_watermark(struct page_counter *counter)
{
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index d2a2c84..c42981c 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -40,7 +40,7 @@ struct page_ext {
#ifdef CONFIG_PAGE_OWNER
unsigned int order;
gfp_t gfp_mask;
- struct stack_trace trace;
+ unsigned int nr_entries;
unsigned long trace_entries[8];
#endif
};
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index b48c347..cacaabe 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -8,6 +8,7 @@ extern struct page_ext_operations page_owner_ops;
extern void __reset_page_owner(struct page *page, unsigned int order);
extern void __set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask);
+extern gfp_t __get_page_owner_gfp(struct page *page);
static inline void reset_page_owner(struct page *page, unsigned int order)
{
@@ -25,6 +26,14 @@ static inline void set_page_owner(struct page *page,
__set_page_owner(page, order, gfp_mask);
}
+
+static inline gfp_t get_page_owner_gfp(struct page *page)
+{
+ if (likely(!page_owner_inited))
+ return 0;
+
+ return __get_page_owner_gfp(page);
+}
#else
static inline void reset_page_owner(struct page *page, unsigned int order)
{
@@ -33,6 +42,10 @@ static inline void set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask)
{
}
+static inline gfp_t get_page_owner_gfp(struct page *page)
+{
+ return 0;
+}
#endif /* CONFIG_PAGE_OWNER */
#endif /* __LINUX_PAGE_OWNER_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 4b3736f..a6c78e0 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -651,7 +651,8 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void delete_from_page_cache(struct page *page);
-extern void __delete_from_page_cache(struct page *page, void *shadow);
+extern void __delete_from_page_cache(struct page *page, void *shadow,
+ struct mem_cgroup *memcg);
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
/*
@@ -670,4 +671,10 @@ static inline int add_to_page_cache(struct page *page,
return error;
}
+static inline unsigned long dir_pages(struct inode *inode)
+{
+ return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT;
+}
+
#endif /* _LINUX_PAGEMAP_H */
diff --git a/include/linux/parport.h b/include/linux/parport.h
index c22f125..58e3c64 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -13,6 +13,7 @@
#include <linux/wait.h>
#include <linux/irqreturn.h>
#include <linux/semaphore.h>
+#include <linux/device.h>
#include <asm/ptrace.h>
#include <uapi/linux/parport.h>
@@ -145,6 +146,8 @@ struct pardevice {
unsigned int flags;
struct pardevice *next;
struct pardevice *prev;
+ struct device dev;
+ bool devmodel;
struct parport_state *state; /* saved status over preemption */
wait_queue_head_t wait_q;
unsigned long int time;
@@ -156,6 +159,8 @@ struct pardevice {
void * sysctl_table;
};
+#define to_pardevice(n) container_of(n, struct pardevice, dev)
+
/* IEEE1284 information */
/* IEEE1284 phases. These are exposed to userland through ppdev IOCTL
@@ -195,7 +200,7 @@ struct parport {
* This may unfortulately be null if the
* port has a legacy driver.
*/
-
+ struct device bus_dev; /* to link with the bus */
struct parport *physport;
/* If this is a non-default mux
parport, i.e. we're a clone of a real
@@ -245,15 +250,26 @@ struct parport {
struct parport *slaves[3];
};
+#define to_parport_dev(n) container_of(n, struct parport, bus_dev)
+
#define DEFAULT_SPIN_TIME 500 /* us */
struct parport_driver {
const char *name;
void (*attach) (struct parport *);
void (*detach) (struct parport *);
+ void (*match_port)(struct parport *);
+ int (*probe)(struct pardevice *);
+ struct device_driver driver;
+ bool devmodel;
struct list_head list;
};
+#define to_parport_driver(n) container_of(n, struct parport_driver, driver)
+
+int parport_bus_init(void);
+void parport_bus_exit(void);
+
/* parport_register_port registers a new parallel port at the given
address (if one does not already exist) and returns a pointer to it.
This entails claiming the I/O region, IRQ and DMA. NULL is returned
@@ -272,10 +288,20 @@ void parport_announce_port (struct parport *port);
extern void parport_remove_port(struct parport *port);
/* Register a new high-level driver. */
-extern int parport_register_driver (struct parport_driver *);
+
+int __must_check __parport_register_driver(struct parport_driver *,
+ struct module *,
+ const char *mod_name);
+/*
+ * parport_register_driver must be a macro so that KBUILD_MODNAME can
+ * be expanded
+ */
+#define parport_register_driver(driver) \
+ __parport_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
/* Unregister a high-level driver. */
extern void parport_unregister_driver (struct parport_driver *);
+void parport_unregister_driver(struct parport_driver *);
/* If parport_register_driver doesn't fit your needs, perhaps
* parport_find_xxx does. */
@@ -288,6 +314,15 @@ extern irqreturn_t parport_irq_handler(int irq, void *dev_id);
/* Reference counting for ports. */
extern struct parport *parport_get_port (struct parport *);
extern void parport_put_port (struct parport *);
+void parport_del_port(struct parport *);
+
+struct pardev_cb {
+ int (*preempt)(void *);
+ void (*wakeup)(void *);
+ void *private;
+ void (*irq_func)(void *);
+ unsigned int flags;
+};
/* parport_register_device declares that a device is connected to a
port, and tells the kernel all it needs to know.
@@ -301,6 +336,10 @@ struct pardevice *parport_register_device(struct parport *port,
void (*irq_func)(void *),
int flags, void *handle);
+struct pardevice *
+parport_register_dev_model(struct parport *port, const char *name,
+ const struct pardev_cb *par_dev_cb, int cnt);
+
/* parport_unregister unlinks a device from the chain. */
extern void parport_unregister_device(struct pardevice *dev);
diff --git a/include/linux/pata_arasan_cf_data.h b/include/linux/pata_arasan_cf_data.h
index 3cc21c9..9fade5d 100644
--- a/include/linux/pata_arasan_cf_data.h
+++ b/include/linux/pata_arasan_cf_data.h
@@ -4,7 +4,7 @@
* Arasan Compact Flash host controller platform data header file
*
* Copyright (C) 2011 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 24c7728..a965efa 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -77,6 +77,11 @@ static inline void acpiphp_remove_slots(struct pci_bus *bus) { }
static inline void acpiphp_check_host_bridge(struct acpi_device *adev) { }
#endif
+extern const u8 pci_acpi_dsm_uuid[];
+#define DEVICE_LABEL_DSM 0x07
+#define RESET_DELAY_DSM 0x08
+#define FUNCTION_DELAY_DSM 0x09
+
#else /* CONFIG_ACPI */
static inline void acpi_pci_add_bus(struct pci_bus *bus) { }
static inline void acpi_pci_remove_bus(struct pci_bus *bus) { }
diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h
index 8af4610..207c561 100644
--- a/include/linux/pci-aspm.h
+++ b/include/linux/pci-aspm.h
@@ -29,7 +29,6 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
void pci_disable_link_state(struct pci_dev *pdev, int state);
void pci_disable_link_state_locked(struct pci_dev *pdev, int state);
-void pcie_clear_aspm(struct pci_bus *bus);
void pcie_no_aspm(void);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev)
@@ -47,9 +46,6 @@ static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
static inline void pci_disable_link_state(struct pci_dev *pdev, int state)
{
}
-static inline void pcie_clear_aspm(struct pci_bus *bus)
-{
-}
static inline void pcie_no_aspm(void)
{
}
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 9603094..8a0321a 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -29,6 +29,7 @@
#include <linux/atomic.h>
#include <linux/device.h>
#include <linux/io.h>
+#include <linux/resource_ext.h>
#include <uapi/linux/pci.h>
#include <linux/pci_ids.h>
@@ -177,6 +178,8 @@ enum pci_dev_flags {
PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
/* Do not use bus resets for device */
PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
+ /* Do not use PM reset even if device advertises NoSoftRst- */
+ PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
};
enum pci_irq_reroute_variant {
@@ -352,6 +355,7 @@ struct pci_dev {
unsigned int broken_intx_masking:1;
unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
unsigned int irq_managed:1;
+ unsigned int has_secondary_link:1;
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
@@ -397,18 +401,13 @@ static inline int pci_channel_offline(struct pci_dev *pdev)
return (pdev->error_state != pci_channel_io_normal);
}
-struct pci_host_bridge_window {
- struct list_head list;
- struct resource *res; /* host bridge aperture (CPU address) */
- resource_size_t offset; /* bus address + offset = CPU address */
-};
-
struct pci_host_bridge {
struct device dev;
struct pci_bus *bus; /* root bus */
- struct list_head windows; /* pci_host_bridge_windows */
+ struct list_head windows; /* resource_entry */
void (*release_fn)(struct pci_host_bridge *);
void *release_data;
+ unsigned int ignore_reset_delay:1; /* for entire hierarchy */
};
#define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
@@ -513,6 +512,9 @@ static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
return dev->bus->self;
}
+struct device *pci_get_host_bridge_device(struct pci_dev *dev);
+void pci_put_host_bridge_device(struct device *dev);
+
#ifdef CONFIG_PCI_MSI
static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
{
@@ -562,6 +564,7 @@ static inline int pcibios_err_to_errno(int err)
/* Low-level architecture-dependent routines */
struct pci_ops {
+ void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
};
@@ -575,9 +578,15 @@ int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 val);
+#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT
+typedef u64 pci_bus_addr_t;
+#else
+typedef u32 pci_bus_addr_t;
+#endif
+
struct pci_bus_region {
- dma_addr_t start;
- dma_addr_t end;
+ pci_bus_addr_t start;
+ pci_bus_addr_t end;
};
struct pci_dynids {
@@ -771,8 +780,6 @@ void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
void pcibios_scan_specific_bus(int busn);
struct pci_bus *pci_find_bus(int domain, int busnr);
void pci_bus_add_devices(const struct pci_bus *bus);
-struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus,
- struct pci_ops *ops, void *sysdata);
struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata,
@@ -859,6 +866,16 @@ int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
int where, u16 val);
int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
int where, u32 val);
+
+int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val);
+int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val);
+int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val);
+int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val);
+
struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
@@ -962,7 +979,6 @@ void pci_intx(struct pci_dev *dev, int enable);
bool pci_intx_mask_supported(struct pci_dev *dev);
bool pci_check_and_mask_intx(struct pci_dev *dev);
bool pci_check_and_unmask_intx(struct pci_dev *dev);
-void pci_msi_off(struct pci_dev *dev);
int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size);
int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask);
int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
@@ -994,6 +1010,7 @@ int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
bool pci_device_is_present(struct pci_dev *pdev);
+void pci_ignore_hotplug(struct pci_dev *dev);
/* ROM control related routines */
int pci_enable_rom(struct pci_dev *pdev);
@@ -1031,11 +1048,6 @@ bool pci_dev_run_wake(struct pci_dev *dev);
bool pci_check_pme_status(struct pci_dev *dev);
void pci_pme_wakeup_bus(struct pci_bus *bus);
-static inline void pci_ignore_hotplug(struct pci_dev *dev)
-{
- dev->ignore_hotplug = 1;
-}
-
static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
bool enable)
{
@@ -1116,7 +1128,7 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
-static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
+static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
{
struct pci_bus_region region;
@@ -1166,6 +1178,7 @@ unsigned char pci_bus_max_busnr(struct pci_bus *bus);
void pci_setup_bridge(struct pci_bus *bus);
resource_size_t pcibios_window_alignment(struct pci_bus *bus,
unsigned long type);
+resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
#define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
#define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
@@ -1184,15 +1197,6 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode,
#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
-enum pci_dma_burst_strategy {
- PCI_DMA_BURST_INFINITY, /* make bursts as large as possible,
- strategy_parameter is N/A */
- PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter
- byte boundaries */
- PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of
- strategy_parameter byte boundaries */
-};
-
struct msix_entry {
u32 vector; /* kernel uses to write allocated vector */
u16 entry; /* driver uses to specify entry, OS writes */
@@ -1417,8 +1421,6 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
{ return -EIO; }
static inline void pci_release_regions(struct pci_dev *dev) { }
-#define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0)
-
static inline void pci_block_cfg_access(struct pci_dev *dev) { }
static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev)
{ return 0; }
@@ -1661,13 +1663,25 @@ int pci_ext_cfg_avail(void);
void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
#ifdef CONFIG_PCI_IOV
+int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
+int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
+
int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
void pci_disable_sriov(struct pci_dev *dev);
int pci_num_vf(struct pci_dev *dev);
int pci_vfs_assigned(struct pci_dev *dev);
int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
int pci_sriov_get_totalvfs(struct pci_dev *dev);
+resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
#else
+static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
+{
+ return -ENOSYS;
+}
+static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
+{
+ return -ENOSYS;
+}
static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
{ return -ENODEV; }
static inline void pci_disable_sriov(struct pci_dev *dev) { }
@@ -1678,6 +1692,8 @@ static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
{ return 0; }
static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
{ return 0; }
+static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
+{ return 0; }
#endif
#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
@@ -1850,6 +1866,8 @@ static inline void pci_set_of_node(struct pci_dev *dev) { }
static inline void pci_release_of_node(struct pci_dev *dev) { }
static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
+static inline struct device_node *
+pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
#endif /* CONFIG_OF */
#ifdef CONFIG_EEH
@@ -1876,4 +1894,15 @@ static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
{
return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
}
+
+/**
+ * pci_ari_enabled - query ARI forwarding status
+ * @bus: the PCI bus
+ *
+ * Returns true if ARI forwarding is enabled.
+ */
+static inline bool pci_ari_enabled(struct pci_bus *bus)
+{
+ return bus->self && bus->self->ari_enabled;
+}
#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index e63c02a..fcff8f8 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -579,6 +579,7 @@
#define PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE 0x7800
#define PCI_DEVICE_ID_AMD_HUDSON2_SMBUS 0x780b
#define PCI_DEVICE_ID_AMD_HUDSON2_IDE 0x780c
+#define PCI_DEVICE_ID_AMD_KERNCZ_SMBUS 0x790b
#define PCI_VENDOR_ID_TRIDENT 0x1023
#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000
@@ -2315,6 +2316,8 @@
#define PCI_VENDOR_ID_CENATEK 0x16CA
#define PCI_DEVICE_ID_CENATEK_IDE 0x0001
+#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
+
#define PCI_VENDOR_ID_VITESSE 0x1725
#define PCI_DEVICE_ID_VITESSE_VSC7174 0x7174
@@ -2327,6 +2330,8 @@
#define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea
#define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb
+#define PCI_VENDOR_ID_CAVIUM 0x177d
+
#define PCI_VENDOR_ID_BELKIN 0x1799
#define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f
@@ -2539,10 +2544,6 @@
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
-#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
-#define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
-#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
-#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320
#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321
#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index b433764..12c9b48 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -128,8 +128,22 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
static inline bool __ref_is_percpu(struct percpu_ref *ref,
unsigned long __percpu **percpu_countp)
{
- /* paired with smp_store_release() in percpu_ref_reinit() */
- unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
+ unsigned long percpu_ptr;
+
+ /*
+ * The value of @ref->percpu_count_ptr is tested for
+ * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then
+ * used as a pointer. If the compiler generates a separate fetch
+ * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
+ * between contaminating the pointer value, meaning that
+ * ACCESS_ONCE() is required when fetching it.
+ *
+ * Also, we need a data dependency barrier to be paired with
+ * smp_store_release() in __percpu_ref_switch_to_percpu().
+ *
+ * Use lockless deref which contains both.
+ */
+ percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
/*
* Theoretically, the following could test just ATOMIC; however,
@@ -233,7 +247,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
if (__ref_is_percpu(ref, &percpu_count)) {
this_cpu_inc(*percpu_count);
ret = true;
- } else if (!(ACCESS_ONCE(ref->percpu_count_ptr) & __PERCPU_REF_DEAD)) {
+ } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
ret = atomic_long_inc_not_zero(&ref->count);
}
@@ -281,6 +295,20 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
}
/**
+ * percpu_ref_is_dying - test whether a percpu refcount is dying or dead
+ * @ref: percpu_ref to test
+ *
+ * Returns %true if @ref is dying or dead.
+ *
+ * This function is safe to call as long as @ref is between init and exit
+ * and the caller is responsible for synchronizing against state changes.
+ */
+static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
+{
+ return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
+}
+
+/**
* percpu_ref_is_zero - test whether a percpu refcount reached zero
* @ref: percpu_ref to test
*
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 50e5009..84a1094 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -41,7 +41,12 @@ void percpu_counter_destroy(struct percpu_counter *fbc);
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
s64 __percpu_counter_sum(struct percpu_counter *fbc);
-int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs);
+int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
+
+static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
+{
+ return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
+}
static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
@@ -116,6 +121,12 @@ static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
return 0;
}
+static inline int
+__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
+{
+ return percpu_counter_compare(fbc, rhs);
+}
+
static inline void
percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 4f7a61c..2027809 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -53,6 +53,7 @@ struct perf_guest_info_callbacks {
#include <linux/sysfs.h>
#include <linux/perf_regs.h>
#include <linux/workqueue.h>
+#include <linux/cgroup.h>
#include <asm/local.h>
struct perf_callchain_entry {
@@ -91,8 +92,6 @@ struct hw_perf_event_extra {
int idx; /* index in shared_regs->regs[] */
};
-struct event_constraint;
-
/**
* struct hw_perf_event - performance event hardware details:
*/
@@ -111,17 +110,24 @@ struct hw_perf_event {
struct hw_perf_event_extra extra_reg;
struct hw_perf_event_extra branch_reg;
-
- struct event_constraint *constraint;
};
struct { /* software */
struct hrtimer hrtimer;
};
struct { /* tracepoint */
- struct task_struct *tp_target;
/* for tp_event->class */
struct list_head tp_list;
};
+ struct { /* intel_cqm */
+ int cqm_state;
+ u32 cqm_rmid;
+ struct list_head cqm_events_entry;
+ struct list_head cqm_groups_entry;
+ struct list_head cqm_group_entry;
+ };
+ struct { /* itrace */
+ int itrace_started;
+ };
#ifdef CONFIG_HAVE_HW_BREAKPOINT
struct { /* breakpoint */
/*
@@ -129,12 +135,12 @@ struct hw_perf_event {
* problem hw_breakpoint has with context
* creation and event initalization.
*/
- struct task_struct *bp_target;
struct arch_hw_breakpoint info;
struct list_head bp_list;
};
#endif
};
+ struct task_struct *target;
int state;
local64_t prev_count;
u64 sample_period;
@@ -166,6 +172,11 @@ struct perf_event;
* pmu::capabilities flags
*/
#define PERF_PMU_CAP_NO_INTERRUPT 0x01
+#define PERF_PMU_CAP_NO_NMI 0x02
+#define PERF_PMU_CAP_AUX_NO_SG 0x04
+#define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08
+#define PERF_PMU_CAP_EXCLUSIVE 0x10
+#define PERF_PMU_CAP_ITRACE 0x20
/**
* struct pmu - generic performance monitoring unit
@@ -186,6 +197,7 @@ struct pmu {
int * __percpu pmu_disable_count;
struct perf_cpu_context * __percpu pmu_cpu_context;
+ atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
int task_ctx_nr;
int hrtimer_interval_ms;
@@ -202,6 +214,13 @@ struct pmu {
*/
int (*event_init) (struct perf_event *event);
+ /*
+ * Notification that the event was mapped or unmapped. Called
+ * in the context of the mapping task.
+ */
+ void (*event_mapped) (struct perf_event *event); /*optional*/
+ void (*event_unmapped) (struct perf_event *event); /*optional*/
+
#define PERF_EF_START 0x01 /* start the counter when adding */
#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
@@ -255,9 +274,37 @@ struct pmu {
int (*event_idx) (struct perf_event *event); /*optional */
/*
- * flush branch stack on context-switches (needed in cpu-wide mode)
+ * context-switches callback
+ */
+ void (*sched_task) (struct perf_event_context *ctx,
+ bool sched_in);
+ /*
+ * PMU specific data size
*/
- void (*flush_branch_stack) (void);
+ size_t task_ctx_size;
+
+
+ /*
+ * Return the count value for a counter.
+ */
+ u64 (*count) (struct perf_event *event); /*optional*/
+
+ /*
+ * Set up pmu-private data structures for an AUX area
+ */
+ void *(*setup_aux) (int cpu, void **pages,
+ int nr_pages, bool overwrite);
+ /* optional */
+
+ /*
+ * Free pmu-private AUX data structures
+ */
+ void (*free_aux) (void *aux); /* optional */
+
+ /*
+ * Filter events for PMU-specific reasons.
+ */
+ int (*filter_match) (struct perf_event *event); /* optional */
};
/**
@@ -293,6 +340,7 @@ struct swevent_hlist {
#define PERF_ATTACH_CONTEXT 0x01
#define PERF_ATTACH_GROUP 0x02
#define PERF_ATTACH_TASK 0x04
+#define PERF_ATTACH_TASK_DATA 0x08
struct perf_cgroup;
struct ring_buffer;
@@ -431,11 +479,12 @@ struct perf_event {
struct pid_namespace *ns;
u64 id;
+ u64 (*clock)(void);
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
#ifdef CONFIG_EVENT_TRACING
- struct ftrace_event_call *tp_event;
+ struct trace_event_call *tp_event;
struct event_filter *filter;
#ifdef CONFIG_FUNCTION_TRACER
struct ftrace_ops ftrace_ops;
@@ -450,11 +499,6 @@ struct perf_event {
#endif /* CONFIG_PERF_EVENTS */
};
-enum perf_event_context_type {
- task_context,
- cpu_context,
-};
-
/**
* struct perf_event_context - event context structure
*
@@ -462,7 +506,6 @@ enum perf_event_context_type {
*/
struct perf_event_context {
struct pmu *pmu;
- enum perf_event_context_type type;
/*
* Protect the states of the events in the list,
* nr_active, and the list:
@@ -475,6 +518,7 @@ struct perf_event_context {
*/
struct mutex mutex;
+ struct list_head active_ctx_list;
struct list_head pinned_groups;
struct list_head flexible_groups;
struct list_head event_list;
@@ -502,7 +546,7 @@ struct perf_event_context {
u64 generation;
int pin_count;
int nr_cgroups; /* cgroup evts */
- int nr_branch_stack; /* branch_stack evt */
+ void *task_ctx_data; /* pmu specific data */
struct rcu_head rcu_head;
struct delayed_work orphans_remove;
@@ -523,9 +567,12 @@ struct perf_cpu_context {
struct perf_event_context *task_ctx;
int active_oncpu;
int exclusive;
+
+ raw_spinlock_t hrtimer_lock;
struct hrtimer hrtimer;
ktime_t hrtimer_interval;
- struct list_head rotation_list;
+ unsigned int hrtimer_active;
+
struct pmu *unique_pmu;
struct perf_cgroup *cgrp;
};
@@ -535,12 +582,52 @@ struct perf_output_handle {
struct ring_buffer *rb;
unsigned long wakeup;
unsigned long size;
- void *addr;
+ union {
+ void *addr;
+ unsigned long head;
+ };
int page;
};
+#ifdef CONFIG_CGROUP_PERF
+
+/*
+ * perf_cgroup_info keeps track of time_enabled for a cgroup.
+ * This is a per-cpu dynamically allocated data structure.
+ */
+struct perf_cgroup_info {
+ u64 time;
+ u64 timestamp;
+};
+
+struct perf_cgroup {
+ struct cgroup_subsys_state css;
+ struct perf_cgroup_info __percpu *info;
+};
+
+/*
+ * Must ensure cgroup is pinned (css_get) before calling
+ * this function. In other words, we cannot call this function
+ * if there is no cgroup event for the current CPU context.
+ */
+static inline struct perf_cgroup *
+perf_cgroup_from_task(struct task_struct *task)
+{
+ return container_of(task_css(task, perf_event_cgrp_id),
+ struct perf_cgroup, css);
+}
+#endif /* CONFIG_CGROUP_PERF */
+
#ifdef CONFIG_PERF_EVENTS
+extern void *perf_aux_output_begin(struct perf_output_handle *handle,
+ struct perf_event *event);
+extern void perf_aux_output_end(struct perf_output_handle *handle,
+ unsigned long size, bool truncated);
+extern int perf_aux_output_skip(struct perf_output_handle *handle,
+ unsigned long size);
+extern void *perf_get_aux(struct perf_output_handle *handle);
+
extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
extern void perf_pmu_unregister(struct pmu *pmu);
@@ -557,6 +644,8 @@ extern void perf_event_delayed_put(struct task_struct *task);
extern void perf_event_print_debug(void);
extern void perf_pmu_disable(struct pmu *pmu);
extern void perf_pmu_enable(struct pmu *pmu);
+extern void perf_sched_cb_dec(struct pmu *pmu);
+extern void perf_sched_cb_inc(struct pmu *pmu);
extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);
extern int perf_event_refresh(struct perf_event *event, int refresh);
@@ -650,6 +739,22 @@ extern int perf_event_overflow(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs);
+extern void perf_event_output(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs);
+
+extern void
+perf_event_header__init_id(struct perf_event_header *header,
+ struct perf_sample_data *data,
+ struct perf_event *event);
+extern void
+perf_event__output_id_sample(struct perf_event *event,
+ struct perf_output_handle *handle,
+ struct perf_sample_data *sample);
+
+extern void
+perf_log_lost_samples(struct perf_event *event, u64 lost);
+
static inline bool is_sampling_event(struct perf_event *event)
{
return event->attr.sample_period != 0;
@@ -665,6 +770,7 @@ static inline int is_software_event(struct perf_event *event)
extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
+extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
#ifndef perf_arch_fetch_caller_regs
@@ -689,35 +795,73 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs)
static __always_inline void
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{
- struct pt_regs hot_regs;
+ if (static_key_false(&perf_swevent_enabled[event_id]))
+ __perf_sw_event(event_id, nr, regs, addr);
+}
+
+DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
+/*
+ * 'Special' version for the scheduler, it hard assumes no recursion,
+ * which is guaranteed by us not actually scheduling inside other swevents
+ * because those disable preemption.
+ */
+static __always_inline void
+perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
+{
if (static_key_false(&perf_swevent_enabled[event_id])) {
- if (!regs) {
- perf_fetch_caller_regs(&hot_regs);
- regs = &hot_regs;
- }
- __perf_sw_event(event_id, nr, regs, addr);
+ struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
+
+ perf_fetch_caller_regs(regs);
+ ___perf_sw_event(event_id, nr, regs, addr);
}
}
extern struct static_key_deferred perf_sched_events;
+static __always_inline bool
+perf_sw_migrate_enabled(void)
+{
+ if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
+ return true;
+ return false;
+}
+
+static inline void perf_event_task_migrate(struct task_struct *task)
+{
+ if (perf_sw_migrate_enabled())
+ task->sched_migrated = 1;
+}
+
static inline void perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task)
{
if (static_key_false(&perf_sched_events.key))
__perf_event_task_sched_in(prev, task);
+
+ if (perf_sw_migrate_enabled() && task->sched_migrated) {
+ struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
+
+ perf_fetch_caller_regs(regs);
+ ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
+ task->sched_migrated = 0;
+ }
}
static inline void perf_event_task_sched_out(struct task_struct *prev,
struct task_struct *next)
{
- perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
+ perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
if (static_key_false(&perf_sched_events.key))
__perf_event_task_sched_out(prev, next);
}
+static inline u64 __perf_event_count(struct perf_event *event)
+{
+ return local64_read(&event->count) + atomic64_read(&event->child_count);
+}
+
extern void perf_event_mmap(struct vm_area_struct *vma);
extern struct perf_guest_info_callbacks *perf_guest_cbs;
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
@@ -787,6 +931,16 @@ static inline bool has_branch_stack(struct perf_event *event)
return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
}
+static inline bool needs_branch_stack(struct perf_event *event)
+{
+ return event->attr.branch_sample_type != 0;
+}
+
+static inline bool has_aux(struct perf_event *event)
+{
+ return event->pmu->setup_aux;
+}
+
extern int perf_output_begin(struct perf_output_handle *handle,
struct perf_event *event, unsigned int size);
extern void perf_output_end(struct perf_output_handle *handle);
@@ -802,6 +956,19 @@ extern void perf_event_disable(struct perf_event *event);
extern int __perf_event_disable(void *info);
extern void perf_event_task_tick(void);
#else /* !CONFIG_PERF_EVENTS: */
+static inline void *
+perf_aux_output_begin(struct perf_output_handle *handle,
+ struct perf_event *event) { return NULL; }
+static inline void
+perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
+ bool truncated) { }
+static inline int
+perf_aux_output_skip(struct perf_output_handle *handle,
+ unsigned long size) { return -EINVAL; }
+static inline void *
+perf_get_aux(struct perf_output_handle *handle) { return NULL; }
+static inline void
+perf_event_task_migrate(struct task_struct *task) { }
static inline void
perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task) { }
@@ -823,6 +990,8 @@ static inline int perf_event_refresh(struct perf_event *event, int refresh)
static inline void
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
static inline void
+perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { }
+static inline void
perf_bp_event(struct perf_event *event, void *data) { }
static inline int perf_register_guest_info_callbacks
@@ -899,12 +1068,22 @@ struct perf_pmu_events_attr {
const char *event_str;
};
+ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
+ char *page);
+
#define PMU_EVENT_ATTR(_name, _var, _id, _show) \
static struct perf_pmu_events_attr _var = { \
.attr = __ATTR(_name, 0444, _show, NULL), \
.id = _id, \
};
+#define PMU_EVENT_ATTR_STRING(_name, _var, _str) \
+static struct perf_pmu_events_attr _var = { \
+ .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
+ .id = 0, \
+ .event_str = _str, \
+};
+
#define PMU_FORMAT_ATTR(_name, _format) \
static ssize_t \
_name##_show(struct device *dev, \
diff --git a/include/linux/personality.h b/include/linux/personality.h
index 646c0a7..aeb7892 100644
--- a/include/linux/personality.h
+++ b/include/linux/personality.h
@@ -3,52 +3,14 @@
#include <uapi/linux/personality.h>
-
-/*
- * Handling of different ABIs (personalities).
- */
-
-struct exec_domain;
-struct pt_regs;
-
-extern int register_exec_domain(struct exec_domain *);
-extern int unregister_exec_domain(struct exec_domain *);
-extern int __set_personality(unsigned int);
-
-
-/*
- * Description of an execution domain.
- *
- * The first two members are refernced from assembly source
- * and should stay where they are unless explicitly needed.
- */
-typedef void (*handler_t)(int, struct pt_regs *);
-
-struct exec_domain {
- const char *name; /* name of the execdomain */
- handler_t handler; /* handler for syscalls */
- unsigned char pers_low; /* lowest personality */
- unsigned char pers_high; /* highest personality */
- unsigned long *signal_map; /* signal mapping */
- unsigned long *signal_invmap; /* reverse signal mapping */
- struct map_segment *err_map; /* error mapping */
- struct map_segment *socktype_map; /* socket type mapping */
- struct map_segment *sockopt_map; /* socket option mapping */
- struct map_segment *af_map; /* address family mapping */
- struct module *module; /* module context of the ed. */
- struct exec_domain *next; /* linked list (internal) */
-};
-
/*
* Return the base personality without flags.
*/
#define personality(pers) (pers & PER_MASK)
-
/*
* Change personality of the currently running process.
*/
-#define set_personality(pers) \
- ((current->personality == (pers)) ? 0 : __set_personality(pers))
+#define set_personality(pers) (current->personality = (pers))
#endif /* _LINUX_PERSONALITY_H */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 22af8f8..a26c3f8 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -181,6 +181,9 @@ struct mii_bus {
/* PHY addresses to be ignored when probing */
u32 phy_mask;
+ /* PHY addresses to ignore the TA/read failure */
+ u32 phy_ignore_ta_mask;
+
/*
* Pointer to an array of interrupts, each PHY's
* interrupt at the index matching its address
@@ -327,6 +330,8 @@ struct phy_c45_device_ids {
* c45_ids: 802.3-c45 Device Identifers if is_c45.
* is_c45: Set to true if this phy uses clause 45 addressing.
* is_internal: Set to true if this phy is internal to a MAC.
+ * has_fixups: Set to true if this phy has fixups/quirks.
+ * suspended: Set to true if this phy has been suspended successfully.
* state: state of the PHY for management purposes
* dev_flags: Device-specific flags used by the PHY driver.
* addr: Bus address of PHY
@@ -364,6 +369,7 @@ struct phy_device {
bool is_c45;
bool is_internal;
bool has_fixups;
+ bool suspended;
enum phy_state state;
@@ -565,6 +571,15 @@ struct phy_driver {
void (*write_mmd_indirect)(struct phy_device *dev, int ptrad,
int devnum, int regnum, u32 val);
+ /* Get the size and type of the eeprom contained within a plug-in
+ * module */
+ int (*module_info)(struct phy_device *dev,
+ struct ethtool_modinfo *modinfo);
+
+ /* Get the eeprom information from the plug-in module */
+ int (*module_eeprom)(struct phy_device *dev,
+ struct ethtool_eeprom *ee, u8 *data);
+
struct device_driver driver;
};
#define to_phy_driver(d) container_of(d, struct phy_driver, driver)
@@ -663,6 +678,17 @@ static inline bool phy_is_internal(struct phy_device *phydev)
}
/**
+ * phy_interface_is_rgmii - Convenience function for testing if a PHY interface
+ * is RGMII (all variants)
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_interface_is_rgmii(struct phy_device *phydev)
+{
+ return phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
+ phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID;
+}
+
+/**
* phy_write_mmd - Convenience function for writing a register
* on an MMD on a given PHY.
* @phydev: The phy_device struct
diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h
new file mode 100644
index 0000000..9d18e9f
--- /dev/null
+++ b/include/linux/phy/phy-qcom-ufs.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef PHY_QCOM_UFS_H_
+#define PHY_QCOM_UFS_H_
+
+#include "phy.h"
+
+/**
+ * ufs_qcom_phy_enable_ref_clk() - Enable the phy
+ * ref clock.
+ * @phy: reference to a generic phy
+ *
+ * returns 0 for success, and non-zero for error.
+ */
+int ufs_qcom_phy_enable_ref_clk(struct phy *phy);
+
+/**
+ * ufs_qcom_phy_disable_ref_clk() - Disable the phy
+ * ref clock.
+ * @phy: reference to a generic phy.
+ */
+void ufs_qcom_phy_disable_ref_clk(struct phy *phy);
+
+/**
+ * ufs_qcom_phy_enable_dev_ref_clk() - Enable the device
+ * ref clock.
+ * @phy: reference to a generic phy.
+ */
+void ufs_qcom_phy_enable_dev_ref_clk(struct phy *phy);
+
+/**
+ * ufs_qcom_phy_disable_dev_ref_clk() - Disable the device
+ * ref clock.
+ * @phy: reference to a generic phy.
+ */
+void ufs_qcom_phy_disable_dev_ref_clk(struct phy *phy);
+
+int ufs_qcom_phy_enable_iface_clk(struct phy *phy);
+void ufs_qcom_phy_disable_iface_clk(struct phy *phy);
+int ufs_qcom_phy_start_serdes(struct phy *phy);
+int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes);
+int ufs_qcom_phy_calibrate_phy(struct phy *phy, bool is_rate_B);
+int ufs_qcom_phy_is_pcs_ready(struct phy *phy);
+void ufs_qcom_phy_save_controller_version(struct phy *phy,
+ u8 major, u16 minor, u16 step);
+
+#endif /* PHY_QCOM_UFS_H_ */
diff --git a/include/linux/phy/phy-sun4i-usb.h b/include/linux/phy/phy-sun4i-usb.h
new file mode 100644
index 0000000..50aed92
--- /dev/null
+++ b/include/linux/phy/phy-sun4i-usb.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2015 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PHY_SUN4I_USB_H_
+#define PHY_SUN4I_USB_H_
+
+#include "phy.h"
+
+/**
+ * sun4i_usb_phy_set_squelch_detect() - Enable/disable squelch detect
+ * @phy: reference to a sun4i usb phy
+ * @enabled: wether to enable or disable squelch detect
+ */
+void sun4i_usb_phy_set_squelch_detect(struct phy *phy, bool enabled);
+
+#endif
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index a0197fa..8cf05e3 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -133,6 +133,8 @@ struct phy *devm_phy_get(struct device *dev, const char *string);
struct phy *devm_phy_optional_get(struct device *dev, const char *string);
struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
const char *con_id);
+struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
+ int index);
void phy_put(struct phy *phy);
void devm_phy_put(struct device *dev, struct phy *phy);
struct phy *of_phy_get(struct device_node *np, const char *con_id);
@@ -261,6 +263,13 @@ static inline struct phy *devm_of_phy_get(struct device *dev,
return ERR_PTR(-ENOSYS);
}
+static inline struct phy *devm_of_phy_get_by_index(struct device *dev,
+ struct device_node *np,
+ int index)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
static inline void phy_put(struct phy *phy)
{
}
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index 7e75bfe..fe5732d 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -21,6 +21,9 @@ extern void fixed_phy_del(int phy_addr);
extern int fixed_phy_set_link_update(struct phy_device *phydev,
int (*link_update)(struct net_device *,
struct fixed_phy_status *));
+extern int fixed_phy_update_state(struct phy_device *phydev,
+ const struct fixed_phy_status *status,
+ const struct fixed_phy_status *changed);
#else
static inline int fixed_phy_add(unsigned int irq, int phy_id,
struct fixed_phy_status *status)
@@ -43,6 +46,12 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev,
{
return -ENODEV;
}
+static inline int fixed_phy_update_state(struct phy_device *phydev,
+ const struct fixed_phy_status *status,
+ const struct fixed_phy_status *changed)
+{
+ return -ENODEV;
+}
#endif /* CONFIG_FIXED_PHY */
#endif /* __PHY_FIXED_H */
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index b9cf6c5..918b117 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -19,7 +19,7 @@ struct pidmap {
#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
#define PIDMAP_ENTRIES ((PID_MAX_LIMIT+BITS_PER_PAGE-1)/BITS_PER_PAGE)
-struct bsd_acct_struct;
+struct fs_pin;
struct pid_namespace {
struct kref kref;
@@ -37,7 +37,7 @@ struct pid_namespace {
struct dentry *proc_thread_self;
#endif
#ifdef CONFIG_BSD_PROCESS_ACCT
- struct bsd_acct_struct *bacct;
+ struct fs_pin *bacct;
#endif
struct user_namespace *user_ns;
struct work_struct proc_work;
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index 18eccef..d7e5d60 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -142,7 +142,7 @@ static inline struct pinctrl * __must_check pinctrl_get_select(
s = pinctrl_lookup_state(p, name);
if (IS_ERR(s)) {
pinctrl_put(p);
- return ERR_PTR(PTR_ERR(s));
+ return ERR_CAST(s);
}
ret = pinctrl_select_state(p, s);
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index d578a60..fe65962 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -115,6 +115,18 @@ enum pin_config_param {
PIN_CONFIG_END = 0x7FFF,
};
+#ifdef CONFIG_DEBUG_FS
+#define PCONFDUMP(a, b, c, d) { .param = a, .display = b, .format = c, \
+ .has_arg = d }
+
+struct pin_config_item {
+ const enum pin_config_param param;
+ const char * const display;
+ const char * const format;
+ bool has_arg;
+};
+#endif /* CONFIG_DEBUG_FS */
+
/*
* Helpful configuration macro to be used in tables etc.
*/
@@ -150,6 +162,12 @@ static inline unsigned long pinconf_to_config_packed(enum pin_config_param param
struct pinctrl_dev;
struct pinctrl_map;
+struct pinconf_generic_params {
+ const char * const property;
+ enum pin_config_param param;
+ u32 default_value;
+};
+
int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
struct device_node *np, struct pinctrl_map **map,
unsigned *reserved_maps, unsigned *num_maps,
@@ -174,6 +192,17 @@ static inline int pinconf_generic_dt_node_to_map_pin(
PIN_MAP_TYPE_CONFIGS_PIN);
}
+static inline int pinconf_generic_dt_node_to_map_all(
+ struct pinctrl_dev *pctldev, struct device_node *np_config,
+ struct pinctrl_map **map, unsigned *num_maps)
+{
+ /*
+ * passing the type as PIN_MAP_TYPE_INVALID causes the underlying parser
+ * to infer the map type from the DT properties used.
+ */
+ return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
+ PIN_MAP_TYPE_INVALID);
+}
#endif
#endif /* CONFIG_GENERIC_PINCONF */
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index cc8e1af..9ba59fc 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -24,6 +24,7 @@ struct pinctrl_dev;
struct pinctrl_map;
struct pinmux_ops;
struct pinconf_ops;
+struct pin_config_item;
struct gpio_chip;
struct device_node;
@@ -117,15 +118,26 @@ struct pinctrl_ops {
* @confops: pin config operations vtable, if you support pin configuration in
* your driver
* @owner: module providing the pin controller, used for refcounting
+ * @num_custom_params: Number of driver-specific custom parameters to be parsed
+ * from the hardware description
+ * @custom_params: List of driver_specific custom parameters to be parsed from
+ * the hardware description
+ * @custom_conf_items: Information how to print @params in debugfs, must be
+ * the same size as the @custom_params, i.e. @num_custom_params
*/
struct pinctrl_desc {
const char *name;
- struct pinctrl_pin_desc const *pins;
+ const struct pinctrl_pin_desc *pins;
unsigned int npins;
const struct pinctrl_ops *pctlops;
const struct pinmux_ops *pmxops;
const struct pinconf_ops *confops;
struct module *owner;
+#ifdef CONFIG_GENERIC_PINCONF
+ unsigned int num_custom_params;
+ const struct pinconf_generic_params *custom_params;
+ const struct pin_config_item *custom_conf_items;
+#endif
};
/* External interface to pin controller */
diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h
index 511bda9..ace60d7 100644
--- a/include/linux/pinctrl/pinmux.h
+++ b/include/linux/pinctrl/pinmux.h
@@ -56,6 +56,9 @@ struct pinctrl_dev;
* depending on whether the GPIO is configured as input or output,
* a direction selector function may be implemented as a backing
* to the GPIO controllers that need pin muxing.
+ * @strict: do not allow simultaneous use of the same pin for GPIO and another
+ * function. Check both gpio_owner and mux_owner strictly before approving
+ * the pin request.
*/
struct pinmux_ops {
int (*request) (struct pinctrl_dev *pctldev, unsigned offset);
@@ -66,7 +69,7 @@ struct pinmux_ops {
int (*get_function_groups) (struct pinctrl_dev *pctldev,
unsigned selector,
const char * const **groups,
- unsigned * const num_groups);
+ unsigned *num_groups);
int (*set_mux) (struct pinctrl_dev *pctldev, unsigned func_selector,
unsigned group_selector);
int (*gpio_request_enable) (struct pinctrl_dev *pctldev,
@@ -79,6 +82,7 @@ struct pinmux_ops {
struct pinctrl_gpio_range *range,
unsigned offset,
bool input);
+ bool strict;
};
#endif /* CONFIG_PINMUX */
diff --git a/include/linux/platform_data/bfin_rotary.h b/include/linux/platform_data/bfin_rotary.h
new file mode 100644
index 0000000..9882937
--- /dev/null
+++ b/include/linux/platform_data/bfin_rotary.h
@@ -0,0 +1,117 @@
+/*
+ * board initialization should put one of these structures into platform_data
+ * and place the bfin-rotary onto platform_bus named "bfin-rotary".
+ *
+ * Copyright 2008-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _BFIN_ROTARY_H
+#define _BFIN_ROTARY_H
+
+/* mode bitmasks */
+#define ROT_QUAD_ENC CNTMODE_QUADENC /* quadrature/grey code encoder mode */
+#define ROT_BIN_ENC CNTMODE_BINENC /* binary encoder mode */
+#define ROT_UD_CNT CNTMODE_UDCNT /* rotary counter mode */
+#define ROT_DIR_CNT CNTMODE_DIRCNT /* direction counter mode */
+
+#define ROT_DEBE DEBE /* Debounce Enable */
+
+#define ROT_CDGINV CDGINV /* CDG Pin Polarity Invert */
+#define ROT_CUDINV CUDINV /* CUD Pin Polarity Invert */
+#define ROT_CZMINV CZMINV /* CZM Pin Polarity Invert */
+
+struct bfin_rotary_platform_data {
+ /* set rotary UP KEY_### or BTN_### in case you prefer
+ * bfin-rotary to send EV_KEY otherwise set 0
+ */
+ unsigned int rotary_up_key;
+ /* set rotary DOWN KEY_### or BTN_### in case you prefer
+ * bfin-rotary to send EV_KEY otherwise set 0
+ */
+ unsigned int rotary_down_key;
+ /* set rotary BUTTON KEY_### or BTN_### */
+ unsigned int rotary_button_key;
+ /* set rotary Relative Axis REL_### in case you prefer
+ * bfin-rotary to send EV_REL otherwise set 0
+ */
+ unsigned int rotary_rel_code;
+ unsigned short debounce; /* 0..17 */
+ unsigned short mode;
+ unsigned short pm_wakeup;
+ unsigned short *pin_list;
+};
+
+/* CNT_CONFIG bitmasks */
+#define CNTE (1 << 0) /* Counter Enable */
+#define DEBE (1 << 1) /* Debounce Enable */
+#define CDGINV (1 << 4) /* CDG Pin Polarity Invert */
+#define CUDINV (1 << 5) /* CUD Pin Polarity Invert */
+#define CZMINV (1 << 6) /* CZM Pin Polarity Invert */
+#define CNTMODE_SHIFT 8
+#define CNTMODE (0x7 << CNTMODE_SHIFT) /* Counter Operating Mode */
+#define ZMZC (1 << 1) /* CZM Zeroes Counter Enable */
+#define BNDMODE_SHIFT 12
+#define BNDMODE (0x3 << BNDMODE_SHIFT) /* Boundary register Mode */
+#define INPDIS (1 << 15) /* CUG and CDG Input Disable */
+
+#define CNTMODE_QUADENC (0 << CNTMODE_SHIFT) /* quadrature encoder mode */
+#define CNTMODE_BINENC (1 << CNTMODE_SHIFT) /* binary encoder mode */
+#define CNTMODE_UDCNT (2 << CNTMODE_SHIFT) /* up/down counter mode */
+#define CNTMODE_DIRCNT (4 << CNTMODE_SHIFT) /* direction counter mode */
+#define CNTMODE_DIRTMR (5 << CNTMODE_SHIFT) /* direction timer mode */
+
+#define BNDMODE_COMP (0 << BNDMODE_SHIFT) /* boundary compare mode */
+#define BNDMODE_ZERO (1 << BNDMODE_SHIFT) /* boundary compare and zero mode */
+#define BNDMODE_CAPT (2 << BNDMODE_SHIFT) /* boundary capture mode */
+#define BNDMODE_AEXT (3 << BNDMODE_SHIFT) /* boundary auto-extend mode */
+
+/* CNT_IMASK bitmasks */
+#define ICIE (1 << 0) /* Illegal Gray/Binary Code Interrupt Enable */
+#define UCIE (1 << 1) /* Up count Interrupt Enable */
+#define DCIE (1 << 2) /* Down count Interrupt Enable */
+#define MINCIE (1 << 3) /* Min Count Interrupt Enable */
+#define MAXCIE (1 << 4) /* Max Count Interrupt Enable */
+#define COV31IE (1 << 5) /* Bit 31 Overflow Interrupt Enable */
+#define COV15IE (1 << 6) /* Bit 15 Overflow Interrupt Enable */
+#define CZEROIE (1 << 7) /* Count to Zero Interrupt Enable */
+#define CZMIE (1 << 8) /* CZM Pin Interrupt Enable */
+#define CZMEIE (1 << 9) /* CZM Error Interrupt Enable */
+#define CZMZIE (1 << 10) /* CZM Zeroes Counter Interrupt Enable */
+
+/* CNT_STATUS bitmasks */
+#define ICII (1 << 0) /* Illegal Gray/Binary Code Interrupt Identifier */
+#define UCII (1 << 1) /* Up count Interrupt Identifier */
+#define DCII (1 << 2) /* Down count Interrupt Identifier */
+#define MINCII (1 << 3) /* Min Count Interrupt Identifier */
+#define MAXCII (1 << 4) /* Max Count Interrupt Identifier */
+#define COV31II (1 << 5) /* Bit 31 Overflow Interrupt Identifier */
+#define COV15II (1 << 6) /* Bit 15 Overflow Interrupt Identifier */
+#define CZEROII (1 << 7) /* Count to Zero Interrupt Identifier */
+#define CZMII (1 << 8) /* CZM Pin Interrupt Identifier */
+#define CZMEII (1 << 9) /* CZM Error Interrupt Identifier */
+#define CZMZII (1 << 10) /* CZM Zeroes Counter Interrupt Identifier */
+
+/* CNT_COMMAND bitmasks */
+#define W1LCNT 0xf /* Load Counter Register */
+#define W1LMIN 0xf0 /* Load Min Register */
+#define W1LMAX 0xf00 /* Load Max Register */
+#define W1ZMONCE (1 << 12) /* Enable CZM Clear Counter Once */
+
+#define W1LCNT_ZERO (1 << 0) /* write 1 to load CNT_COUNTER with zero */
+#define W1LCNT_MIN (1 << 2) /* write 1 to load CNT_COUNTER from CNT_MIN */
+#define W1LCNT_MAX (1 << 3) /* write 1 to load CNT_COUNTER from CNT_MAX */
+
+#define W1LMIN_ZERO (1 << 4) /* write 1 to load CNT_MIN with zero */
+#define W1LMIN_CNT (1 << 5) /* write 1 to load CNT_MIN from CNT_COUNTER */
+#define W1LMIN_MAX (1 << 7) /* write 1 to load CNT_MIN from CNT_MAX */
+
+#define W1LMAX_ZERO (1 << 8) /* write 1 to load CNT_MAX with zero */
+#define W1LMAX_CNT (1 << 9) /* write 1 to load CNT_MAX from CNT_COUNTER */
+#define W1LMAX_MIN (1 << 10) /* write 1 to load CNT_MAX from CNT_MIN */
+
+/* CNT_DEBOUNCE bitmasks */
+#define DPRESCALE 0xf /* Load Counter Register */
+
+#endif
diff --git a/include/linux/platform_data/cpuidle-exynos.h b/include/linux/platform_data/cpuidle-exynos.h
new file mode 100644
index 0000000..bfa40e4
--- /dev/null
+++ b/include/linux/platform_data/cpuidle-exynos.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __CPUIDLE_EXYNOS_H
+#define __CPUIDLE_EXYNOS_H
+
+struct cpuidle_exynos_data {
+ int (*cpu0_enter_aftr)(void);
+ int (*cpu1_powerdown)(void);
+ void (*pre_enter_aftr)(void);
+ void (*post_enter_aftr)(void);
+};
+
+#endif
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
index d8155c0..87ac14c 100644
--- a/include/linux/platform_data/dma-dw.h
+++ b/include/linux/platform_data/dma-dw.h
@@ -13,10 +13,12 @@
#include <linux/device.h>
+#define DW_DMA_MAX_NR_MASTERS 4
+
/**
* struct dw_dma_slave - Controller-specific information about a slave
*
- * @dma_dev: required DMA master device. Depricated.
+ * @dma_dev: required DMA master device
* @src_id: src request line
* @dst_id: dst request line
* @src_master: src master for transfers on allocated channel.
@@ -53,7 +55,7 @@ struct dw_dma_platform_data {
unsigned char chan_priority;
unsigned short block_size;
unsigned char nr_masters;
- unsigned char data_width[4];
+ unsigned char data_width[DW_DMA_MAX_NR_MASTERS];
};
#endif /* _PLATFORM_DATA_DMA_DW_H */
diff --git a/include/linux/platform_data/dma-hsu.h b/include/linux/platform_data/dma-hsu.h
new file mode 100644
index 0000000..8a1f6a4
--- /dev/null
+++ b/include/linux/platform_data/dma-hsu.h
@@ -0,0 +1,25 @@
+/*
+ * Driver for the High Speed UART DMA
+ *
+ * Copyright (C) 2015 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _PLATFORM_DATA_DMA_HSU_H
+#define _PLATFORM_DATA_DMA_HSU_H
+
+#include <linux/device.h>
+
+struct hsu_dma_slave {
+ struct device *dma_dev;
+ int chan_id;
+};
+
+struct hsu_dma_platform_data {
+ unsigned short nr_channels;
+};
+
+#endif /* _PLATFORM_DATA_DMA_HSU_H */
diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h
index eabac4e..2d08816 100644
--- a/include/linux/platform_data/dma-imx-sdma.h
+++ b/include/linux/platform_data/dma-imx-sdma.h
@@ -48,6 +48,9 @@ struct sdma_script_start_addrs {
s32 ssish_2_mcu_addr;
s32 hdmi_dma_addr;
/* End of v2 array */
+ s32 zcanfd_2_mcu_addr;
+ s32 zqspi_2_mcu_addr;
+ /* End of v3 array */
};
/**
diff --git a/include/linux/platform_data/dma-mmp_tdma.h b/include/linux/platform_data/dma-mmp_tdma.h
index 66574ea..0c72886 100644
--- a/include/linux/platform_data/dma-mmp_tdma.h
+++ b/include/linux/platform_data/dma-mmp_tdma.h
@@ -28,6 +28,13 @@ struct sram_platdata {
int granularity;
};
+#ifdef CONFIG_ARM
extern struct gen_pool *sram_get_gpool(char *pool_name);
+#else
+static inline struct gen_pool *sram_get_gpool(char *pool_name)
+{
+ return NULL;
+}
+#endif
#endif /* __DMA_MMP_TDMA_H */
diff --git a/include/linux/platform_data/dma-rcar-audmapp.h b/include/linux/platform_data/dma-rcar-audmapp.h
deleted file mode 100644
index 471fffe..0000000
--- a/include/linux/platform_data/dma-rcar-audmapp.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * This is for Renesas R-Car Audio-DMAC-peri-peri.
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- * Copyright (C) 2014 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This file is based on the include/linux/sh_dma.h
- *
- * Header for the new SH dmaengine driver
- *
- * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef SH_AUDMAPP_H
-#define SH_AUDMAPP_H
-
-#include <linux/dmaengine.h>
-
-struct audmapp_slave_config {
- int slave_id;
- dma_addr_t src;
- dma_addr_t dst;
- u32 chcr;
-};
-
-struct audmapp_pdata {
- struct audmapp_slave_config *slave;
- int slave_num;
-};
-
-#endif /* SH_AUDMAPP_H */
diff --git a/include/linux/platform_data/gpio-ath79.h b/include/linux/platform_data/gpio-ath79.h
new file mode 100644
index 0000000..88b0db7
--- /dev/null
+++ b/include/linux/platform_data/gpio-ath79.h
@@ -0,0 +1,19 @@
+/*
+ * Atheros AR7XXX/AR9XXX GPIO controller platform data
+ *
+ * Copyright (C) 2015 Alban Bedel <albeu@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_GPIO_ATH79_H
+#define __LINUX_PLATFORM_DATA_GPIO_ATH79_H
+
+struct ath79_gpio_platform_data {
+ unsigned ngpios;
+ bool oe_inverted;
+};
+
+#endif
diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h
index 5d50b25..cb26181 100644
--- a/include/linux/platform_data/gpio-omap.h
+++ b/include/linux/platform_data/gpio-omap.h
@@ -208,9 +208,17 @@ struct omap_gpio_platform_data {
int (*get_context_loss_count)(struct device *dev);
};
+#if IS_BUILTIN(CONFIG_GPIO_OMAP)
extern void omap2_gpio_prepare_for_idle(int off_mode);
extern void omap2_gpio_resume_after_idle(void);
-extern void omap_set_gpio_debounce(int gpio, int enable);
-extern void omap_set_gpio_debounce_time(int gpio, int enable);
+#else
+static inline void omap2_gpio_prepare_for_idle(int off_mode)
+{
+}
+
+static inline void omap2_gpio_resume_after_idle(void)
+{
+}
+#endif
#endif
diff --git a/include/linux/platform_data/hsmmc-omap.h b/include/linux/platform_data/hsmmc-omap.h
index 67bbcf0..8e981be 100644
--- a/include/linux/platform_data/hsmmc-omap.h
+++ b/include/linux/platform_data/hsmmc-omap.h
@@ -55,9 +55,6 @@ struct omap_hsmmc_platform_data {
u32 caps; /* Used for the MMC driver on 2430 and later */
u32 pm_caps; /* PM capabilities of the mmc */
- /* switch pin can be for card detect (default) or card cover */
- unsigned cover:1;
-
/* use the internal clock */
unsigned internal_clock:1;
@@ -73,7 +70,8 @@ struct omap_hsmmc_platform_data {
#define HSMMC_HAS_HSPE_SUPPORT (1 << 2)
unsigned features;
- int switch_pin; /* gpio (card detect) */
+ int gpio_cd; /* gpio (card detect) */
+ int gpio_cod; /* gpio (cover detect) */
int gpio_wp; /* gpio (write protect) */
int (*set_power)(struct device *dev, int power_on, int vdd);
diff --git a/include/linux/platform_data/i2c-davinci.h b/include/linux/platform_data/i2c-davinci.h
index 2312d19..89fd347 100644
--- a/include/linux/platform_data/i2c-davinci.h
+++ b/include/linux/platform_data/i2c-davinci.h
@@ -18,6 +18,7 @@ struct davinci_i2c_platform_data {
unsigned int bus_delay; /* post-transaction delay (usec) */
unsigned int sda_pin; /* GPIO pin ID to use for SDA */
unsigned int scl_pin; /* GPIO pin ID to use for SCL */
+ bool has_pfunc; /*chip has a ICPFUNC register */
};
/* for board setup code */
diff --git a/include/linux/platform_data/ipmmu-vmsa.h b/include/linux/platform_data/ipmmu-vmsa.h
deleted file mode 100644
index 5275b3a..0000000
--- a/include/linux/platform_data/ipmmu-vmsa.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * IPMMU VMSA Platform Data
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- */
-
-#ifndef __IPMMU_VMSA_H__
-#define __IPMMU_VMSA_H__
-
-struct ipmmu_vmsa_master {
- const char *name;
- unsigned int utlb;
-};
-
-struct ipmmu_vmsa_platform_data {
- const struct ipmmu_vmsa_master *masters;
- unsigned int num_masters;
-};
-
-#endif /* __IPMMU_VMSA_H__ */
diff --git a/include/linux/platform_data/irda-sa11x0.h b/include/linux/platform_data/irda-sa11x0.h
new file mode 100644
index 0000000..38f77b5
--- /dev/null
+++ b/include/linux/platform_data/irda-sa11x0.h
@@ -0,0 +1,20 @@
+/*
+ * arch/arm/include/asm/mach/irda.h
+ *
+ * Copyright (C) 2004 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARM_MACH_IRDA_H
+#define __ASM_ARM_MACH_IRDA_H
+
+struct irda_platform_data {
+ int (*startup)(struct device *);
+ void (*shutdown)(struct device *);
+ int (*set_power)(struct device *, unsigned int state);
+ void (*set_speed)(struct device *, unsigned int speed);
+};
+
+#endif
diff --git a/include/linux/platform_data/keyboard-spear.h b/include/linux/platform_data/keyboard-spear.h
index 9248e3a..5e3ff65 100644
--- a/include/linux/platform_data/keyboard-spear.h
+++ b/include/linux/platform_data/keyboard-spear.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2010 ST Microelectronics
- * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ * Rajeev Kumar <rajeevkumar.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/include/linux/platform_data/mmc-msm_sdcc.h b/include/linux/platform_data/mmc-msm_sdcc.h
deleted file mode 100644
index 55aa873..0000000
--- a/include/linux/platform_data/mmc-msm_sdcc.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef __MMC_MSM_SDCC_H
-#define __MMC_MSM_SDCC_H
-
-#include <linux/mmc/host.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/sdio_func.h>
-
-struct msm_mmc_gpio {
- unsigned no;
- const char *name;
-};
-
-struct msm_mmc_gpio_data {
- struct msm_mmc_gpio *gpio;
- u8 size;
-};
-
-struct msm_mmc_platform_data {
- unsigned int ocr_mask; /* available voltages */
- u32 (*translate_vdd)(struct device *, unsigned int);
- unsigned int (*status)(struct device *);
- int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id);
- struct msm_mmc_gpio_data *gpio_data;
- void (*init_card)(struct mmc_card *card);
-};
-
-#endif
diff --git a/include/linux/platform_data/mmc-omap.h b/include/linux/platform_data/mmc-omap.h
index 5c188f4..9294692 100644
--- a/include/linux/platform_data/mmc-omap.h
+++ b/include/linux/platform_data/mmc-omap.h
@@ -31,10 +31,6 @@ struct omap_mmc_platform_data {
void (*cleanup)(struct device *dev);
void (*shutdown)(struct device *dev);
- /* To handle board related suspend/resume functionality for MMC */
- int (*suspend)(struct device *dev, int slot);
- int (*resume)(struct device *dev, int slot);
-
/* Return context loss count due to PM states changing */
int (*get_context_loss_count)(struct device *dev);
diff --git a/include/linux/platform_data/msm_serial_hs.h b/include/linux/platform_data/msm_serial_hs.h
deleted file mode 100644
index 98a2046..0000000
--- a/include/linux/platform_data/msm_serial_hs.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2008 Google, Inc.
- * Author: Nick Pelly <npelly@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_ARCH_MSM_SERIAL_HS_H
-#define __ASM_ARCH_MSM_SERIAL_HS_H
-
-#include <linux/serial_core.h>
-
-/* API to request the uart clock off or on for low power management
- * Clients should call request_clock_off() when no uart data is expected,
- * and must call request_clock_on() before any further uart data can be
- * received. */
-extern void msm_hs_request_clock_off(struct uart_port *uport);
-extern void msm_hs_request_clock_on(struct uart_port *uport);
-
-/**
- * struct msm_serial_hs_platform_data
- * @rx_wakeup_irq: Rx activity irq
- * @rx_to_inject: extra character to be inserted to Rx tty on wakeup
- * @inject_rx: 1 = insert rx_to_inject. 0 = do not insert extra character
- * @exit_lpm_cb: function called before every Tx transaction
- *
- * This is an optional structure required for UART Rx GPIO IRQ based
- * wakeup from low power state. UART wakeup can be triggered by RX activity
- * (using a wakeup GPIO on the UART RX pin). This should only be used if
- * there is not a wakeup GPIO on the UART CTS, and the first RX byte is
- * known (eg., with the Bluetooth Texas Instruments HCILL protocol),
- * since the first RX byte will always be lost. RTS will be asserted even
- * while the UART is clocked off in this mode of operation.
- */
-struct msm_serial_hs_platform_data {
- int rx_wakeup_irq;
- unsigned char inject_rx_on_wakeup;
- char rx_to_inject;
- void (*exit_lpm_cb)(struct uart_port *);
-};
-
-#endif
diff --git a/include/linux/platform_data/nfcmrvl.h b/include/linux/platform_data/nfcmrvl.h
new file mode 100644
index 0000000..ac91707
--- /dev/null
+++ b/include/linux/platform_data/nfcmrvl.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#ifndef _NFCMRVL_PTF_H_
+#define _NFCMRVL_PTF_H_
+
+struct nfcmrvl_platform_data {
+ /*
+ * Generic
+ */
+
+ /* GPIO that is wired to RESET_N signal */
+ unsigned int reset_n_io;
+ /* Tell if transport is muxed in HCI one */
+ unsigned int hci_muxed;
+
+ /*
+ * UART specific
+ */
+
+ /* Tell if UART needs flow control at init */
+ unsigned int flow_control;
+ /* Tell if firmware supports break control for power management */
+ unsigned int break_control;
+};
+
+#endif /* _NFCMRVL_PTF_H_ */
diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h
index 0a6de4c..aed1705 100644
--- a/include/linux/platform_data/ntc_thermistor.h
+++ b/include/linux/platform_data/ntc_thermistor.h
@@ -27,6 +27,7 @@ enum ntc_thermistor_type {
TYPE_NCPXXWB473,
TYPE_NCPXXWL333,
TYPE_B57330V2103,
+ TYPE_NCPXXWF104,
};
struct ntc_thermistor_platform_data {
diff --git a/include/linux/platform_data/nxp-nci.h b/include/linux/platform_data/nxp-nci.h
new file mode 100644
index 0000000..d6ed286
--- /dev/null
+++ b/include/linux/platform_data/nxp-nci.h
@@ -0,0 +1,27 @@
+/*
+ * Generic platform data for the NXP NCI NFC chips.
+ *
+ * Copyright (C) 2014 NXP Semiconductors All rights reserved.
+ *
+ * Authors: Clément Perrochaud <clement.perrochaud@nxp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _NXP_NCI_H_
+#define _NXP_NCI_H_
+
+struct nxp_nci_nfc_platform_data {
+ unsigned int gpio_en;
+ unsigned int gpio_fw;
+ unsigned int irq;
+};
+
+#endif /* _NXP_NCI_H_ */
diff --git a/include/linux/platform_data/regulator-haptic.h b/include/linux/platform_data/regulator-haptic.h
new file mode 100644
index 0000000..5658e58
--- /dev/null
+++ b/include/linux/platform_data/regulator-haptic.h
@@ -0,0 +1,29 @@
+/*
+ * Regulator Haptic Platform Data
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Jaewon Kim <jaewon02.kim@samsung.com>
+ * Author: Hyunhee Kim <hyunhee.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _REGULATOR_HAPTIC_H
+#define _REGULATOR_HAPTIC_H
+
+/*
+ * struct regulator_haptic_data - Platform device data
+ *
+ * @max_volt: maximum voltage value supplied to the haptic motor.
+ * <The unit of the voltage is a micro>
+ * @min_volt: minimum voltage value supplied to the haptic motor.
+ * <The unit of the voltage is a micro>
+ */
+struct regulator_haptic_data {
+ unsigned int max_volt;
+ unsigned int min_volt;
+};
+
+#endif /* _REGULATOR_HAPTIC_H */
diff --git a/include/linux/platform_data/serial-imx.h b/include/linux/platform_data/serial-imx.h
index 3cc2e3c..a938eba 100644
--- a/include/linux/platform_data/serial-imx.h
+++ b/include/linux/platform_data/serial-imx.h
@@ -20,14 +20,9 @@
#define ASMARM_ARCH_UART_H
#define IMXUART_HAVE_RTSCTS (1<<0)
-#define IMXUART_IRDA (1<<1)
struct imxuart_platform_data {
unsigned int flags;
- void (*irda_enable)(int enable);
- unsigned int irda_inv_rx:1;
- unsigned int irda_inv_tx:1;
- unsigned short transceiver_delay;
};
#endif
diff --git a/include/linux/platform_data/si5351.h b/include/linux/platform_data/si5351.h
index a947ab8..533d980 100644
--- a/include/linux/platform_data/si5351.h
+++ b/include/linux/platform_data/si5351.h
@@ -5,8 +5,6 @@
#ifndef __LINUX_PLATFORM_DATA_SI5351_H__
#define __LINUX_PLATFORM_DATA_SI5351_H__
-struct clk;
-
/**
* enum si5351_pll_src - Si5351 pll clock source
* @SI5351_PLL_SRC_DEFAULT: default, do not change eeprom config
@@ -107,8 +105,6 @@ struct si5351_clkout_config {
* @clkout: array of clkout configuration
*/
struct si5351_platform_data {
- struct clk *clk_xtal;
- struct clk *clk_clkin;
enum si5351_pll_src pll_src[2];
struct si5351_clkout_config clkout[8];
};
diff --git a/include/linux/platform_data/sky81452-backlight.h b/include/linux/platform_data/sky81452-backlight.h
new file mode 100644
index 0000000..1231e9b
--- /dev/null
+++ b/include/linux/platform_data/sky81452-backlight.h
@@ -0,0 +1,46 @@
+/*
+ * sky81452.h SKY81452 backlight driver
+ *
+ * Copyright 2014 Skyworks Solutions Inc.
+ * Author : Gyungoh Yoo <jack.yoo@skyworksinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _SKY81452_BACKLIGHT_H
+#define _SKY81452_BACKLIGHT_H
+
+/**
+ * struct sky81452_platform_data
+ * @name: backlight driver name.
+ If it is not defined, default name is lcd-backlight.
+ * @gpio_enable:GPIO number which control EN pin
+ * @enable: Enable mask for current sink channel 1, 2, 3, 4, 5 and 6.
+ * @ignore_pwm: true if DPWMI should be ignored.
+ * @dpwm_mode: true is DPWM dimming mode, otherwise Analog dimming mode.
+ * @phase_shift:true is phase shift mode.
+ * @short_detecion_threshold: It should be one of 4, 5, 6 and 7V.
+ * @boost_current_limit: It should be one of 2300, 2750mA.
+ */
+struct sky81452_bl_platform_data {
+ const char *name;
+ int gpio_enable;
+ unsigned int enable;
+ bool ignore_pwm;
+ bool dpwm_mode;
+ bool phase_shift;
+ unsigned int short_detection_threshold;
+ unsigned int boost_current_limit;
+};
+
+#endif
diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st-nci.h
index c3b432f..d9d400a 100644
--- a/include/linux/platform_data/st21nfcb.h
+++ b/include/linux/platform_data/st-nci.h
@@ -1,7 +1,7 @@
/*
- * Driver include for the ST21NFCB NFC chip.
+ * Driver include for ST NCI NFC chip family.
*
- * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -16,16 +16,14 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _ST21NFCB_NCI_H_
-#define _ST21NFCB_NCI_H_
+#ifndef _ST_NCI_H_
+#define _ST_NCI_H_
-#include <linux/i2c.h>
+#define ST_NCI_DRIVER_NAME "st_nci"
-#define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci"
-
-struct st21nfcb_nfc_platform_data {
+struct st_nci_nfc_platform_data {
unsigned int gpio_reset;
unsigned int irq_polarity;
};
-#endif /* _ST21NFCA_HCI_H_ */
+#endif /* _ST_NCI_H_ */
diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h
index 5087fff..cc2bdaf 100644
--- a/include/linux/platform_data/st21nfca.h
+++ b/include/linux/platform_data/st21nfca.h
@@ -26,6 +26,8 @@
struct st21nfca_nfc_platform_data {
unsigned int gpio_ena;
unsigned int irq_polarity;
+ bool is_ese_present;
+ bool is_uicc_present;
};
#endif /* _ST21NFCA_HCI_H_ */
diff --git a/include/linux/platform_data/irq-renesas-irqc.h b/include/linux/platform_data/st33zp24.h
index 3ae17b3..817dfdb 100644
--- a/include/linux/platform_data/irq-renesas-irqc.h
+++ b/include/linux/platform_data/st33zp24.h
@@ -1,11 +1,11 @@
/*
- * Renesas IRQC Driver
- *
- * Copyright (C) 2013 Magnus Damm
+ * STMicroelectronics TPM Linux driver for TPM 1.2 ST33ZP24
+ * Copyright (C) 2009 - 2015 STMicroelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -13,15 +13,16 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
+#ifndef __ST33ZP24_H__
+#define __ST33ZP24_H__
-#ifndef __IRQ_RENESAS_IRQC_H__
-#define __IRQ_RENESAS_IRQC_H__
+#define TPM_ST33_I2C "st33zp24-i2c"
+#define TPM_ST33_SPI "st33zp24-spi"
-struct renesas_irqc_config {
- unsigned int irq_base;
+struct st33zp24_platform_data {
+ int io_lpcpd;
};
-#endif /* __IRQ_RENESAS_IRQC_H__ */
+#endif /* __ST33ZP24_H__ */
diff --git a/include/linux/platform_data/st_nci.h b/include/linux/platform_data/st_nci.h
new file mode 100644
index 0000000..d9d400a
--- /dev/null
+++ b/include/linux/platform_data/st_nci.h
@@ -0,0 +1,29 @@
+/*
+ * Driver include for ST NCI NFC chip family.
+ *
+ * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ST_NCI_H_
+#define _ST_NCI_H_
+
+#define ST_NCI_DRIVER_NAME "st_nci"
+
+struct st_nci_nfc_platform_data {
+ unsigned int gpio_reset;
+ unsigned int irq_polarity;
+};
+
+#endif /* _ST_NCI_H_ */
diff --git a/include/linux/platform_data/usb-rcar-gen2-phy.h b/include/linux/platform_data/usb-rcar-gen2-phy.h
deleted file mode 100644
index dd3ba46..0000000
--- a/include/linux/platform_data/usb-rcar-gen2-phy.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2013 Renesas Solutions Corp.
- * Copyright (C) 2013 Cogent Embedded, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __USB_RCAR_GEN2_PHY_H
-#define __USB_RCAR_GEN2_PHY_H
-
-#include <linux/types.h>
-
-struct rcar_gen2_phy_platform_data {
- /* USB channel 0 configuration */
- bool chan0_pci:1; /* true: PCI USB host 0, false: USBHS */
- /* USB channel 2 configuration */
- bool chan2_pci:1; /* true: PCI USB host 2, false: USBSS */
-};
-
-#endif
diff --git a/include/linux/platform_data/video-msm_fb.h b/include/linux/platform_data/video-msm_fb.h
deleted file mode 100644
index 31449be..0000000
--- a/include/linux/platform_data/video-msm_fb.h
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Internal shared definitions for various MSM framebuffer parts.
- *
- * Copyright (C) 2007 Google Incorporated
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _MSM_FB_H_
-#define _MSM_FB_H_
-
-#include <linux/device.h>
-
-struct mddi_info;
-
-struct msm_fb_data {
- int xres; /* x resolution in pixels */
- int yres; /* y resolution in pixels */
- int width; /* disply width in mm */
- int height; /* display height in mm */
- unsigned output_format;
-};
-
-struct msmfb_callback {
- void (*func)(struct msmfb_callback *);
-};
-
-enum {
- MSM_MDDI_PMDH_INTERFACE,
- MSM_MDDI_EMDH_INTERFACE,
- MSM_EBI2_INTERFACE,
-};
-
-#define MSMFB_CAP_PARTIAL_UPDATES (1 << 0)
-
-struct msm_panel_data {
- /* turns off the fb memory */
- int (*suspend)(struct msm_panel_data *);
- /* turns on the fb memory */
- int (*resume)(struct msm_panel_data *);
- /* turns off the panel */
- int (*blank)(struct msm_panel_data *);
- /* turns on the panel */
- int (*unblank)(struct msm_panel_data *);
- void (*wait_vsync)(struct msm_panel_data *);
- void (*request_vsync)(struct msm_panel_data *, struct msmfb_callback *);
- void (*clear_vsync)(struct msm_panel_data *);
- /* from the enum above */
- unsigned interface_type;
- /* data to be passed to the fb driver */
- struct msm_fb_data *fb_data;
-
- /* capabilities supported by the panel */
- uint32_t caps;
-};
-
-struct msm_mddi_client_data {
- void (*suspend)(struct msm_mddi_client_data *);
- void (*resume)(struct msm_mddi_client_data *);
- void (*activate_link)(struct msm_mddi_client_data *);
- void (*remote_write)(struct msm_mddi_client_data *, uint32_t val,
- uint32_t reg);
- uint32_t (*remote_read)(struct msm_mddi_client_data *, uint32_t reg);
- void (*auto_hibernate)(struct msm_mddi_client_data *, int);
- /* custom data that needs to be passed from the board file to a
- * particular client */
- void *private_client_data;
- struct resource *fb_resource;
- /* from the list above */
- unsigned interface_type;
-};
-
-struct msm_mddi_platform_data {
- unsigned int clk_rate;
- void (*power_client)(struct msm_mddi_client_data *, int on);
-
- /* fixup the mfr name, product id */
- void (*fixup)(uint16_t *mfr_name, uint16_t *product_id);
-
- struct resource *fb_resource; /*optional*/
- /* number of clients in the list that follows */
- int num_clients;
- /* array of client information of clients */
- struct {
- unsigned product_id; /* mfr id in top 16 bits, product id
- * in lower 16 bits
- */
- char *name; /* the device name will be the platform
- * device name registered for the client,
- * it should match the name of the associated
- * driver
- */
- unsigned id; /* id for mddi client device node, will also
- * be used as device id of panel devices, if
- * the client device will have multiple panels
- * space must be left here for them
- */
- void *client_data; /* required private client data */
- unsigned int clk_rate; /* optional: if the client requires a
- * different mddi clk rate
- */
- } client_platform_data[];
-};
-
-struct mdp_blit_req;
-struct fb_info;
-struct mdp_device {
- struct device dev;
- void (*dma)(struct mdp_device *mpd, uint32_t addr,
- uint32_t stride, uint32_t w, uint32_t h, uint32_t x,
- uint32_t y, struct msmfb_callback *callback, int interface);
- void (*dma_wait)(struct mdp_device *mdp);
- int (*blit)(struct mdp_device *mdp, struct fb_info *fb,
- struct mdp_blit_req *req);
- void (*set_grp_disp)(struct mdp_device *mdp, uint32_t disp_id);
-};
-
-struct class_interface;
-int register_mdp_client(struct class_interface *class_intf);
-
-/**** private client data structs go below this line ***/
-
-struct msm_mddi_bridge_platform_data {
- /* from board file */
- int (*init)(struct msm_mddi_bridge_platform_data *,
- struct msm_mddi_client_data *);
- int (*uninit)(struct msm_mddi_bridge_platform_data *,
- struct msm_mddi_client_data *);
- /* passed to panel for use by the fb driver */
- int (*blank)(struct msm_mddi_bridge_platform_data *,
- struct msm_mddi_client_data *);
- int (*unblank)(struct msm_mddi_bridge_platform_data *,
- struct msm_mddi_client_data *);
- struct msm_fb_data fb_data;
-};
-
-
-
-#endif
diff --git a/include/linux/platform_data/vsp1.h b/include/linux/platform_data/vsp1.h
deleted file mode 100644
index 63170e26..0000000
--- a/include/linux/platform_data/vsp1.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * vsp1.h -- R-Car VSP1 Platform Data
- *
- * Copyright (C) 2013 Renesas Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#ifndef __PLATFORM_VSP1_H__
-#define __PLATFORM_VSP1_H__
-
-#define VSP1_HAS_LIF (1 << 0)
-#define VSP1_HAS_LUT (1 << 1)
-#define VSP1_HAS_SRU (1 << 2)
-
-struct vsp1_platform_data {
- unsigned int features;
- unsigned int rpf_count;
- unsigned int uds_count;
- unsigned int wpf_count;
-};
-
-#endif /* __PLATFORM_VSP1_H__ */
diff --git a/include/linux/platform_data/wkup_m3.h b/include/linux/platform_data/wkup_m3.h
new file mode 100644
index 0000000..3f1d77e
--- /dev/null
+++ b/include/linux/platform_data/wkup_m3.h
@@ -0,0 +1,30 @@
+/*
+ * TI Wakeup M3 remote processor platform data
+ *
+ * Copyright (C) 2014-2015 Texas Instruments, Inc.
+ *
+ * Dave Gerlach <d-gerlach@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_PLATFORM_DATA_WKUP_M3_H
+#define _LINUX_PLATFORM_DATA_WKUP_M3_H
+
+struct platform_device;
+
+struct wkup_m3_platform_data {
+ const char *reset_name;
+
+ int (*assert_reset)(struct platform_device *pdev, const char *name);
+ int (*deassert_reset)(struct platform_device *pdev, const char *name);
+};
+
+#endif /* _LINUX_PLATFORM_DATA_WKUP_M3_H */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index ae4882c..bba08f4 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -59,7 +59,7 @@ extern int platform_add_devices(struct platform_device **, int);
struct platform_device_info {
struct device *parent;
- struct acpi_dev_node acpi_node;
+ struct fwnode_handle *fwnode;
const char *name;
int id;
@@ -222,6 +222,15 @@ static inline void platform_set_drvdata(struct platform_device *pdev,
module_driver(__platform_driver, platform_driver_register, \
platform_driver_unregister)
+/* builtin_platform_driver() - Helper macro for builtin drivers that
+ * don't do anything special in driver init. This eliminates some
+ * boilerplate. Each driver may only use this macro once, and
+ * calling it replaces device_initcall(). Note this is meant to be
+ * a parallel of module_platform_driver() above, but w/o _exit stuff.
+ */
+#define builtin_platform_driver(__platform_driver) \
+ builtin_driver(__platform_driver, platform_driver_register)
+
/* module_platform_driver_probe() - Helper macro for drivers that don't do
* anything special in module init/exit. This eliminates a lot of
* boilerplate. Each module may only use this macro once, and
@@ -240,6 +249,20 @@ static void __exit __platform_driver##_exit(void) \
} \
module_exit(__platform_driver##_exit);
+/* builtin_platform_driver_probe() - Helper macro for drivers that don't do
+ * anything special in device init. This eliminates some boilerplate. Each
+ * driver may only use this macro once, and using it replaces device_initcall.
+ * This is meant to be a parallel of module_platform_driver_probe above, but
+ * without the __exit parts.
+ */
+#define builtin_platform_driver_probe(__platform_driver, __platform_probe) \
+static int __init __platform_driver##_init(void) \
+{ \
+ return platform_driver_probe(&(__platform_driver), \
+ __platform_probe); \
+} \
+device_initcall(__platform_driver##_init); \
+
#define platform_create_bundle(driver, probe, res, n_res, data, size) \
__platform_create_bundle(driver, probe, res, n_res, data, size, THIS_MODULE)
extern struct platform_device *__platform_create_bundle(
diff --git a/include/linux/resume-trace.h b/include/linux/pm-trace.h
index f31db23..ecbde7a 100644
--- a/include/linux/resume-trace.h
+++ b/include/linux/pm-trace.h
@@ -1,8 +1,8 @@
-#ifndef RESUME_TRACE_H
-#define RESUME_TRACE_H
+#ifndef PM_TRACE_H
+#define PM_TRACE_H
#ifdef CONFIG_PM_TRACE
-#include <asm/resume-trace.h>
+#include <asm/pm-trace.h>
#include <linux/types.h>
extern int pm_trace_enabled;
@@ -14,7 +14,7 @@ static inline int pm_trace_is_enabled(void)
struct device;
extern void set_trace_device(struct device *);
-extern void generate_resume_trace(const void *tracedata, unsigned int user);
+extern void generate_pm_trace(const void *tracedata, unsigned int user);
extern int show_trace_dev_match(char *buf, size_t size);
#define TRACE_DEVICE(dev) do { \
@@ -28,6 +28,7 @@ static inline int pm_trace_is_enabled(void) { return 0; }
#define TRACE_DEVICE(dev) do { } while (0)
#define TRACE_RESUME(dev) do { } while (0)
+#define TRACE_SUSPEND(dev) do { } while (0)
#endif
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 8b59763..35d599e 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -342,6 +342,18 @@ struct dev_pm_ops {
#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
+#ifdef CONFIG_PM_SLEEP
+#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ .suspend_noirq = suspend_fn, \
+ .resume_noirq = resume_fn, \
+ .freeze_noirq = suspend_fn, \
+ .thaw_noirq = resume_fn, \
+ .poweroff_noirq = suspend_fn, \
+ .restore_noirq = resume_fn,
+#else
+#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
+#endif
+
#ifdef CONFIG_PM
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
.runtime_suspend = suspend_fn, \
@@ -529,6 +541,7 @@ enum rpm_request {
};
struct wakeup_source;
+struct wake_irq;
struct pm_domain_data;
struct pm_subsys_data {
@@ -568,6 +581,7 @@ struct dev_pm_info {
unsigned long timer_expires;
struct work_struct work;
wait_queue_head_t wait_queue;
+ struct wake_irq *wakeirq;
atomic_t usage_count;
atomic_t child_count;
unsigned int disable_depth:3;
@@ -597,16 +611,24 @@ struct dev_pm_info {
extern void update_pm_runtime_accounting(struct device *dev);
extern int dev_pm_get_subsys_data(struct device *dev);
-extern int dev_pm_put_subsys_data(struct device *dev);
+extern void dev_pm_put_subsys_data(struct device *dev);
/*
* Power domains provide callbacks that are executed during system suspend,
* hibernation, system resume and during runtime PM transitions along with
* subsystem-level and driver-level callbacks.
+ *
+ * @detach: Called when removing a device from the domain.
+ * @activate: Called before executing probe routines for bus types and drivers.
+ * @sync: Called after successful driver probe.
+ * @dismiss: Called after unsuccessful driver probe and after driver removal.
*/
struct dev_pm_domain {
struct dev_pm_ops ops;
void (*detach)(struct device *dev, bool power_off);
+ int (*activate)(struct device *dev);
+ void (*sync)(struct device *dev);
+ void (*dismiss)(struct device *dev);
};
/*
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
index 0b00396..25266c6 100644
--- a/include/linux/pm_clock.h
+++ b/include/linux/pm_clock.h
@@ -20,6 +20,16 @@ struct pm_clk_notifier_block {
struct clk;
+#ifdef CONFIG_PM
+extern int pm_clk_runtime_suspend(struct device *dev);
+extern int pm_clk_runtime_resume(struct device *dev);
+#define USE_PM_CLK_RUNTIME_OPS \
+ .runtime_suspend = pm_clk_runtime_suspend, \
+ .runtime_resume = pm_clk_runtime_resume,
+#else
+#define USE_PM_CLK_RUNTIME_OPS
+#endif
+
#ifdef CONFIG_PM_CLK
static inline bool pm_clk_no_clocks(struct device *dev)
{
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index a9edab2..681ccb0 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -113,8 +113,6 @@ struct generic_pm_domain_data {
struct pm_domain_data base;
struct gpd_timing_data td;
struct notifier_block nb;
- struct mutex lock;
- unsigned int refcount;
int need_restore;
};
@@ -129,7 +127,7 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
return to_gpd_data(dev->power.subsys_data->domain_data);
}
-extern struct generic_pm_domain *dev_to_genpd(struct device *dev);
+extern struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev);
extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
struct device *dev,
struct gpd_timing_data *td);
@@ -140,7 +138,6 @@ extern int __pm_genpd_name_add_device(const char *domain_name,
extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
struct device *dev);
-extern void pm_genpd_dev_need_restore(struct device *dev, bool val);
extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *new_subdomain);
extern int pm_genpd_add_subdomain_names(const char *master_name,
@@ -166,9 +163,9 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
{
return ERR_PTR(-ENOSYS);
}
-static inline struct generic_pm_domain *dev_to_genpd(struct device *dev)
+static inline struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
struct device *dev,
@@ -187,7 +184,6 @@ static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd,
{
return -ENOSYS;
}
-static inline void pm_genpd_dev_need_restore(struct device *dev, bool val) {}
static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *new_sd)
{
diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h
new file mode 100644
index 0000000..cd5b62d
--- /dev/null
+++ b/include/linux/pm_wakeirq.h
@@ -0,0 +1,51 @@
+/*
+ * pm_wakeirq.h - Device wakeirq helper functions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_PM_WAKEIRQ_H
+#define _LINUX_PM_WAKEIRQ_H
+
+#ifdef CONFIG_PM
+
+extern int dev_pm_set_wake_irq(struct device *dev, int irq);
+extern int dev_pm_set_dedicated_wake_irq(struct device *dev,
+ int irq);
+extern void dev_pm_clear_wake_irq(struct device *dev);
+extern void dev_pm_enable_wake_irq(struct device *dev);
+extern void dev_pm_disable_wake_irq(struct device *dev);
+
+#else /* !CONFIG_PM */
+
+static inline int dev_pm_set_wake_irq(struct device *dev, int irq)
+{
+ return 0;
+}
+
+static inline int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
+{
+ return 0;
+}
+
+static inline void dev_pm_clear_wake_irq(struct device *dev)
+{
+}
+
+static inline void dev_pm_enable_wake_irq(struct device *dev)
+{
+}
+
+static inline void dev_pm_disable_wake_irq(struct device *dev)
+{
+}
+
+#endif /* CONFIG_PM */
+#endif /* _LINUX_PM_WAKEIRQ_H */
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index a0f7080..a344793 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -28,9 +28,17 @@
#include <linux/types.h>
+struct wake_irq;
+
/**
* struct wakeup_source - Representation of wakeup sources
*
+ * @name: Name of the wakeup source
+ * @entry: Wakeup source list entry
+ * @lock: Wakeup source lock
+ * @wakeirq: Optional device specific wakeirq
+ * @timer: Wakeup timer list
+ * @timer_expires: Wakeup timer expiration
* @total_time: Total time this wakeup source has been active.
* @max_time: Maximum time this wakeup source has been continuously active.
* @last_time: Monotonic clock when the wakeup source's was touched last time.
@@ -47,6 +55,7 @@ struct wakeup_source {
const char *name;
struct list_head entry;
spinlock_t lock;
+ struct wake_irq *wakeirq;
struct timer_list timer;
unsigned long timer_expires;
ktime_t total_time;
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
new file mode 100644
index 0000000..d211404
--- /dev/null
+++ b/include/linux/pmem.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef __PMEM_H__
+#define __PMEM_H__
+
+#include <linux/io.h>
+
+#ifdef CONFIG_ARCH_HAS_PMEM_API
+#include <asm/cacheflush.h>
+#else
+static inline void arch_wmb_pmem(void)
+{
+ BUG();
+}
+
+static inline bool __arch_has_wmb_pmem(void)
+{
+ return false;
+}
+
+static inline void __pmem *arch_memremap_pmem(resource_size_t offset,
+ unsigned long size)
+{
+ return NULL;
+}
+
+static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
+ size_t n)
+{
+ BUG();
+}
+#endif
+
+/*
+ * Architectures that define ARCH_HAS_PMEM_API must provide
+ * implementations for arch_memremap_pmem(), arch_memcpy_to_pmem(),
+ * arch_wmb_pmem(), and __arch_has_wmb_pmem().
+ */
+
+static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size)
+{
+ memcpy(dst, (void __force const *) src, size);
+}
+
+static inline void memunmap_pmem(void __pmem *addr)
+{
+ iounmap((void __force __iomem *) addr);
+}
+
+/**
+ * arch_has_wmb_pmem - true if wmb_pmem() ensures durability
+ *
+ * For a given cpu implementation within an architecture it is possible
+ * that wmb_pmem() resolves to a nop. In the case this returns
+ * false, pmem api users are unable to ensure durability and may want to
+ * fall back to a different data consistency model, or otherwise notify
+ * the user.
+ */
+static inline bool arch_has_wmb_pmem(void)
+{
+ if (IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
+ return __arch_has_wmb_pmem();
+ return false;
+}
+
+static inline bool arch_has_pmem_api(void)
+{
+ return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && arch_has_wmb_pmem();
+}
+
+/*
+ * These defaults seek to offer decent performance and minimize the
+ * window between i/o completion and writes being durable on media.
+ * However, it is undefined / architecture specific whether
+ * default_memremap_pmem + default_memcpy_to_pmem is sufficient for
+ * making data durable relative to i/o completion.
+ */
+static void default_memcpy_to_pmem(void __pmem *dst, const void *src,
+ size_t size)
+{
+ memcpy((void __force *) dst, src, size);
+}
+
+static void __pmem *default_memremap_pmem(resource_size_t offset,
+ unsigned long size)
+{
+ return (void __pmem __force *)ioremap_wt(offset, size);
+}
+
+/**
+ * memremap_pmem - map physical persistent memory for pmem api
+ * @offset: physical address of persistent memory
+ * @size: size of the mapping
+ *
+ * Establish a mapping of the architecture specific memory type expected
+ * by memcpy_to_pmem() and wmb_pmem(). For example, it may be
+ * the case that an uncacheable or writethrough mapping is sufficient,
+ * or a writeback mapping provided memcpy_to_pmem() and
+ * wmb_pmem() arrange for the data to be written through the
+ * cache to persistent media.
+ */
+static inline void __pmem *memremap_pmem(resource_size_t offset,
+ unsigned long size)
+{
+ if (arch_has_pmem_api())
+ return arch_memremap_pmem(offset, size);
+ return default_memremap_pmem(offset, size);
+}
+
+/**
+ * memcpy_to_pmem - copy data to persistent memory
+ * @dst: destination buffer for the copy
+ * @src: source buffer for the copy
+ * @n: length of the copy in bytes
+ *
+ * Perform a memory copy that results in the destination of the copy
+ * being effectively evicted from, or never written to, the processor
+ * cache hierarchy after the copy completes. After memcpy_to_pmem()
+ * data may still reside in cpu or platform buffers, so this operation
+ * must be followed by a wmb_pmem().
+ */
+static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n)
+{
+ if (arch_has_pmem_api())
+ arch_memcpy_to_pmem(dst, src, n);
+ else
+ default_memcpy_to_pmem(dst, src, n);
+}
+
+/**
+ * wmb_pmem - synchronize writes to persistent memory
+ *
+ * After a series of memcpy_to_pmem() operations this drains data from
+ * cpu write buffers and any platform (memory controller) buffers to
+ * ensure that written data is durable on persistent memory media.
+ */
+static inline void wmb_pmem(void)
+{
+ if (arch_has_pmem_api())
+ arch_wmb_pmem();
+}
+#endif /* __PMEM_H__ */
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 195aafc..5df733b 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -12,6 +12,7 @@
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/mod_devicetable.h>
+#include <linux/console.h>
#define PNP_NAME_LEN 50
@@ -309,15 +310,22 @@ struct pnp_fixup {
#define PNP_DISABLE 0x0004
#define PNP_CONFIGURABLE 0x0008
#define PNP_REMOVABLE 0x0010
+#define PNP_CONSOLE 0x0020
#define pnp_can_read(dev) (((dev)->protocol->get) && \
((dev)->capabilities & PNP_READ))
#define pnp_can_write(dev) (((dev)->protocol->set) && \
((dev)->capabilities & PNP_WRITE))
-#define pnp_can_disable(dev) (((dev)->protocol->disable) && \
- ((dev)->capabilities & PNP_DISABLE))
+#define pnp_can_disable(dev) (((dev)->protocol->disable) && \
+ ((dev)->capabilities & PNP_DISABLE) && \
+ (!((dev)->capabilities & PNP_CONSOLE) || \
+ console_suspend_enabled))
#define pnp_can_configure(dev) ((!(dev)->active) && \
((dev)->capabilities & PNP_CONFIGURABLE))
+#define pnp_can_suspend(dev) (((dev)->protocol->suspend) && \
+ (!((dev)->capabilities & PNP_CONSOLE) || \
+ console_suspend_enabled))
+
#ifdef CONFIG_ISAPNP
extern struct pnp_protocol isapnp_protocol;
@@ -502,4 +510,16 @@ static inline void pnp_unregister_driver(struct pnp_driver *drv) { }
#endif /* CONFIG_PNP */
+/**
+ * module_pnp_driver() - Helper macro for registering a PnP driver
+ * @__pnp_driver: pnp_driver struct
+ *
+ * Helper macro for PnP drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_pnp_driver(__pnp_driver) \
+ module_driver(__pnp_driver, pnp_register_driver, \
+ pnp_unregister_driver)
+
#endif /* _LINUX_PNP_H */
diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h
index e97fc65..eadf28c 100644
--- a/include/linux/power/charger-manager.h
+++ b/include/linux/power/charger-manager.h
@@ -17,6 +17,7 @@
#include <linux/power_supply.h>
#include <linux/extcon.h>
+#include <linux/alarmtimer.h>
enum data_source {
CM_BATTERY_PRESENT,
@@ -45,29 +46,6 @@ enum cm_event_types {
};
/**
- * struct charger_global_desc
- * @rtc_name: the name of RTC used to wake up the system from suspend.
- * @rtc_only_wakeup:
- * If the system is woken up by waekup-sources other than the RTC or
- * callbacks, Charger Manager should recognize with
- * rtc_only_wakeup() returning false.
- * If the RTC given to CM is the only wakeup reason,
- * rtc_only_wakeup should return true.
- * @assume_timer_stops_in_suspend:
- * Assume that the jiffy timer stops in suspend-to-RAM.
- * When enabled, CM does not rely on jiffies value in
- * suspend_again and assumes that jiffies value does not
- * change during suspend.
- */
-struct charger_global_desc {
- char *rtc_name;
-
- bool (*rtc_only_wakeup)(void);
-
- bool assume_timer_stops_in_suspend;
-};
-
-/**
* struct charger_cable
* @extcon_name: the name of extcon device.
* @name: the name of charger cable(external connector).
@@ -264,24 +242,17 @@ struct charger_manager {
int emergency_stop;
char psy_name_buf[PSY_NAME_MAX + 1];
- struct power_supply charger_psy;
-
- bool status_save_ext_pwr_inserted;
- bool status_save_batt;
+ struct power_supply_desc charger_psy_desc;
+ struct power_supply *charger_psy;
u64 charging_start_time;
u64 charging_end_time;
};
#ifdef CONFIG_CHARGER_MANAGER
-extern int setup_charger_manager(struct charger_global_desc *gd);
-extern bool cm_suspend_again(void);
extern void cm_notify_event(struct power_supply *psy,
enum cm_event_types type, char *msg);
#else
-static inline int setup_charger_manager(struct charger_global_desc *gd)
-{ return 0; }
-static inline bool cm_suspend_again(void) { return false; }
static inline void cm_notify_event(struct power_supply *psy,
enum cm_event_types type, char *msg) { }
#endif
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
index 89dd84f..522757a 100644
--- a/include/linux/power/max17042_battery.h
+++ b/include/linux/power/max17042_battery.h
@@ -126,7 +126,14 @@ enum max17047_register {
MAX17047_QRTbl30 = 0x42,
};
-enum max170xx_chip_type {MAX17042, MAX17047};
+enum max170xx_chip_type {
+ MAXIM_DEVICE_TYPE_UNKNOWN = 0,
+ MAXIM_DEVICE_TYPE_MAX17042,
+ MAXIM_DEVICE_TYPE_MAX17047,
+ MAXIM_DEVICE_TYPE_MAX17050,
+
+ MAXIM_DEVICE_TYPE_NUM
+};
/*
* used for setting a register to a desired value
@@ -208,6 +215,10 @@ struct max17042_platform_data {
* the datasheet although it can be changed by board designers.
*/
unsigned int r_sns;
+ int vmin; /* in millivolts */
+ int vmax; /* in millivolts */
+ int temp_min; /* in tenths of degree Celsius */
+ int temp_max; /* in tenths of degree Celsius */
};
#endif /* __MAX17042_BATTERY_H_ */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 096dbce..ef9f159 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -13,6 +13,7 @@
#ifndef __LINUX_POWER_SUPPLY_H__
#define __LINUX_POWER_SUPPLY_H__
+#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/leds.h>
#include <linux/spinlock.h>
@@ -173,28 +174,43 @@ union power_supply_propval {
const char *strval;
};
-struct device;
struct device_node;
+struct power_supply;
-struct power_supply {
- const char *name;
- enum power_supply_type type;
- enum power_supply_property *properties;
- size_t num_properties;
+/* Run-time specific power supply configuration */
+struct power_supply_config {
+ struct device_node *of_node;
+ /* Driver private data */
+ void *drv_data;
char **supplied_to;
size_t num_supplicants;
+};
- char **supplied_from;
- size_t num_supplies;
- struct device_node *of_node;
+/* Description of power supply */
+struct power_supply_desc {
+ const char *name;
+ enum power_supply_type type;
+ enum power_supply_property *properties;
+ size_t num_properties;
+ /*
+ * Functions for drivers implementing power supply class.
+ * These shouldn't be called directly by other drivers for accessing
+ * this power supply. Instead use power_supply_*() functions (for
+ * example power_supply_get_property()).
+ */
int (*get_property)(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val);
int (*set_property)(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val);
+ /*
+ * property_is_writeable() will be called during registration
+ * of power supply. If this happens during device probe then it must
+ * not access internal data of device (because probe did not end).
+ */
int (*property_is_writeable)(struct power_supply *psy,
enum power_supply_property psp);
void (*external_power_changed)(struct power_supply *psy);
@@ -208,12 +224,28 @@ struct power_supply {
bool no_thermal;
/* For APM emulation, think legacy userspace. */
int use_for_apm;
+};
+
+struct power_supply {
+ const struct power_supply_desc *desc;
+
+ char **supplied_to;
+ size_t num_supplicants;
+
+ char **supplied_from;
+ size_t num_supplies;
+ struct device_node *of_node;
+
+ /* Driver private data */
+ void *drv_data;
/* private */
- struct device *dev;
+ struct device dev;
struct work_struct changed_work;
+ struct delayed_work deferred_register_work;
spinlock_t changed_lock;
bool changed;
+ atomic_t use_cnt;
#ifdef CONFIG_THERMAL
struct thermal_zone_device *tzd;
struct thermal_cooling_device *tcd;
@@ -256,13 +288,19 @@ extern struct atomic_notifier_head power_supply_notifier;
extern int power_supply_reg_notifier(struct notifier_block *nb);
extern void power_supply_unreg_notifier(struct notifier_block *nb);
extern struct power_supply *power_supply_get_by_name(const char *name);
+extern void power_supply_put(struct power_supply *psy);
#ifdef CONFIG_OF
extern struct power_supply *power_supply_get_by_phandle(struct device_node *np,
const char *property);
+extern struct power_supply *devm_power_supply_get_by_phandle(
+ struct device *dev, const char *property);
#else /* !CONFIG_OF */
static inline struct power_supply *
power_supply_get_by_phandle(struct device_node *np, const char *property)
{ return NULL; }
+static inline struct power_supply *
+devm_power_supply_get_by_phandle(struct device *dev, const char *property)
+{ return NULL; }
#endif /* CONFIG_OF */
extern void power_supply_changed(struct power_supply *psy);
extern int power_supply_am_i_supplied(struct power_supply *psy);
@@ -274,13 +312,36 @@ extern int power_supply_is_system_supplied(void);
static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
#endif
-extern int power_supply_register(struct device *parent,
- struct power_supply *psy);
-extern int power_supply_register_no_ws(struct device *parent,
- struct power_supply *psy);
+extern int power_supply_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val);
+extern int power_supply_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val);
+extern int power_supply_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp);
+extern void power_supply_external_power_changed(struct power_supply *psy);
+
+extern struct power_supply *__must_check
+power_supply_register(struct device *parent,
+ const struct power_supply_desc *desc,
+ const struct power_supply_config *cfg);
+extern struct power_supply *__must_check
+power_supply_register_no_ws(struct device *parent,
+ const struct power_supply_desc *desc,
+ const struct power_supply_config *cfg);
+extern struct power_supply *__must_check
+devm_power_supply_register(struct device *parent,
+ const struct power_supply_desc *desc,
+ const struct power_supply_config *cfg);
+extern struct power_supply *__must_check
+devm_power_supply_register_no_ws(struct device *parent,
+ const struct power_supply_desc *desc,
+ const struct power_supply_config *cfg);
extern void power_supply_unregister(struct power_supply *psy);
extern int power_supply_powers(struct power_supply *psy, struct device *dev);
+extern void *power_supply_get_drvdata(struct power_supply *psy);
/* For APM emulation, think legacy userspace. */
extern struct class *power_supply_class;
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index de83b4e..84991f1 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -10,13 +10,117 @@
#include <linux/list.h>
/*
- * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
- * the other bits -- can't include that header due to inclusion hell.
+ * We put the hardirq and softirq counter into the preemption
+ * counter. The bitmask has the following meaning:
+ *
+ * - bits 0-7 are the preemption count (max preemption depth: 256)
+ * - bits 8-15 are the softirq count (max # of softirqs: 256)
+ *
+ * The hardirq count could in theory be the same as the number of
+ * interrupts in the system, but we run all interrupt handlers with
+ * interrupts disabled, so we cannot have nesting interrupts. Though
+ * there are a few palaeontologic drivers which reenable interrupts in
+ * the handler, so we need more than one bit here.
+ *
+ * PREEMPT_MASK: 0x000000ff
+ * SOFTIRQ_MASK: 0x0000ff00
+ * HARDIRQ_MASK: 0x000f0000
+ * NMI_MASK: 0x00100000
+ * PREEMPT_ACTIVE: 0x00200000
+ * PREEMPT_NEED_RESCHED: 0x80000000
*/
+#define PREEMPT_BITS 8
+#define SOFTIRQ_BITS 8
+#define HARDIRQ_BITS 4
+#define NMI_BITS 1
+
+#define PREEMPT_SHIFT 0
+#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
+#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
+#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
+
+#define __IRQ_MASK(x) ((1UL << (x))-1)
+
+#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
+#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
+#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
+#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
+
+#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
+#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
+#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
+#define NMI_OFFSET (1UL << NMI_SHIFT)
+
+#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
+
+#define PREEMPT_ACTIVE_BITS 1
+#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
+#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
+
+/* We use the MSB mostly because its available */
#define PREEMPT_NEED_RESCHED 0x80000000
+/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
#include <asm/preempt.h>
+#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
+ | NMI_MASK))
+
+/*
+ * Are we doing bottom half or hardware interrupt processing?
+ * Are we in a softirq context? Interrupt context?
+ * in_softirq - Are we currently processing softirq or have bh disabled?
+ * in_serving_softirq - Are we currently processing softirq?
+ */
+#define in_irq() (hardirq_count())
+#define in_softirq() (softirq_count())
+#define in_interrupt() (irq_count())
+#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+
+/*
+ * Are we in NMI context?
+ */
+#define in_nmi() (preempt_count() & NMI_MASK)
+
+#if defined(CONFIG_PREEMPT_COUNT)
+# define PREEMPT_DISABLE_OFFSET 1
+#else
+# define PREEMPT_DISABLE_OFFSET 0
+#endif
+
+/*
+ * The preempt_count offset needed for things like:
+ *
+ * spin_lock_bh()
+ *
+ * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
+ * softirqs, such that unlock sequences of:
+ *
+ * spin_unlock();
+ * local_bh_enable();
+ *
+ * Work as expected.
+ */
+#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET)
+
+/*
+ * Are we running in atomic context? WARNING: this macro cannot
+ * always detect atomic context; in particular, it cannot know about
+ * held spinlocks in non-preemptible kernels. Thus it should not be
+ * used in the general case to determine whether sleeping is possible.
+ * Do not use in_atomic() in driver code.
+ */
+#define in_atomic() (preempt_count() != 0)
+
+/*
+ * Check whether we were atomic before we did preempt_disable():
+ * (used by the scheduler)
+ */
+#define in_atomic_preempt_off() \
+ ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_DISABLE_OFFSET)
+
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
extern void preempt_count_add(int val);
extern void preempt_count_sub(int val);
@@ -33,6 +137,18 @@ extern void preempt_count_sub(int val);
#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)
+#define preempt_active_enter() \
+do { \
+ preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
+ barrier(); \
+} while (0)
+
+#define preempt_active_exit() \
+do { \
+ barrier(); \
+ preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
+} while (0)
+
#ifdef CONFIG_PREEMPT_COUNT
#define preempt_disable() \
@@ -49,6 +165,8 @@ do { \
#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
+#define preemptible() (preempt_count() == 0 && !irqs_disabled())
+
#ifdef CONFIG_PREEMPT
#define preempt_enable() \
do { \
@@ -57,52 +175,46 @@ do { \
__preempt_schedule(); \
} while (0)
+#define preempt_enable_notrace() \
+do { \
+ barrier(); \
+ if (unlikely(__preempt_count_dec_and_test())) \
+ __preempt_schedule_notrace(); \
+} while (0)
+
#define preempt_check_resched() \
do { \
if (should_resched()) \
__preempt_schedule(); \
} while (0)
-#else
+#else /* !CONFIG_PREEMPT */
#define preempt_enable() \
do { \
barrier(); \
preempt_count_dec(); \
} while (0)
-#define preempt_check_resched() do { } while (0)
-#endif
-
-#define preempt_disable_notrace() \
-do { \
- __preempt_count_inc(); \
- barrier(); \
-} while (0)
-#define preempt_enable_no_resched_notrace() \
+#define preempt_enable_notrace() \
do { \
barrier(); \
__preempt_count_dec(); \
} while (0)
-#ifdef CONFIG_PREEMPT
-
-#ifndef CONFIG_CONTEXT_TRACKING
-#define __preempt_schedule_context() __preempt_schedule()
-#endif
+#define preempt_check_resched() do { } while (0)
+#endif /* CONFIG_PREEMPT */
-#define preempt_enable_notrace() \
+#define preempt_disable_notrace() \
do { \
+ __preempt_count_inc(); \
barrier(); \
- if (unlikely(__preempt_count_dec_and_test())) \
- __preempt_schedule_context(); \
} while (0)
-#else
-#define preempt_enable_notrace() \
+
+#define preempt_enable_no_resched_notrace() \
do { \
barrier(); \
__preempt_count_dec(); \
} while (0)
-#endif
#else /* !CONFIG_PREEMPT_COUNT */
@@ -121,6 +233,7 @@ do { \
#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier()
+#define preemptible() 0
#endif /* CONFIG_PREEMPT_COUNT */
@@ -180,6 +293,8 @@ struct preempt_notifier {
struct preempt_ops *ops;
};
+void preempt_notifier_inc(void);
+void preempt_notifier_dec(void);
void preempt_notifier_register(struct preempt_notifier *notifier);
void preempt_notifier_unregister(struct preempt_notifier *notifier);
diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h
deleted file mode 100644
index dbeec4d..0000000
--- a/include/linux/preempt_mask.h
+++ /dev/null
@@ -1,117 +0,0 @@
-#ifndef LINUX_PREEMPT_MASK_H
-#define LINUX_PREEMPT_MASK_H
-
-#include <linux/preempt.h>
-
-/*
- * We put the hardirq and softirq counter into the preemption
- * counter. The bitmask has the following meaning:
- *
- * - bits 0-7 are the preemption count (max preemption depth: 256)
- * - bits 8-15 are the softirq count (max # of softirqs: 256)
- *
- * The hardirq count could in theory be the same as the number of
- * interrupts in the system, but we run all interrupt handlers with
- * interrupts disabled, so we cannot have nesting interrupts. Though
- * there are a few palaeontologic drivers which reenable interrupts in
- * the handler, so we need more than one bit here.
- *
- * PREEMPT_MASK: 0x000000ff
- * SOFTIRQ_MASK: 0x0000ff00
- * HARDIRQ_MASK: 0x000f0000
- * NMI_MASK: 0x00100000
- * PREEMPT_ACTIVE: 0x00200000
- */
-#define PREEMPT_BITS 8
-#define SOFTIRQ_BITS 8
-#define HARDIRQ_BITS 4
-#define NMI_BITS 1
-
-#define PREEMPT_SHIFT 0
-#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
-#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
-#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
-
-#define __IRQ_MASK(x) ((1UL << (x))-1)
-
-#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
-#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
-#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
-#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
-
-#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
-#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
-#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
-#define NMI_OFFSET (1UL << NMI_SHIFT)
-
-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
-
-#define PREEMPT_ACTIVE_BITS 1
-#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
-#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
-
-#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
-#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
-#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
- | NMI_MASK))
-
-/*
- * Are we doing bottom half or hardware interrupt processing?
- * Are we in a softirq context? Interrupt context?
- * in_softirq - Are we currently processing softirq or have bh disabled?
- * in_serving_softirq - Are we currently processing softirq?
- */
-#define in_irq() (hardirq_count())
-#define in_softirq() (softirq_count())
-#define in_interrupt() (irq_count())
-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-
-/*
- * Are we in NMI context?
- */
-#define in_nmi() (preempt_count() & NMI_MASK)
-
-#if defined(CONFIG_PREEMPT_COUNT)
-# define PREEMPT_CHECK_OFFSET 1
-#else
-# define PREEMPT_CHECK_OFFSET 0
-#endif
-
-/*
- * The preempt_count offset needed for things like:
- *
- * spin_lock_bh()
- *
- * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
- * softirqs, such that unlock sequences of:
- *
- * spin_unlock();
- * local_bh_enable();
- *
- * Work as expected.
- */
-#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_CHECK_OFFSET)
-
-/*
- * Are we running in atomic context? WARNING: this macro cannot
- * always detect atomic context; in particular, it cannot know about
- * held spinlocks in non-preemptible kernels. Thus it should not be
- * used in the general case to determine whether sleeping is possible.
- * Do not use in_atomic() in driver code.
- */
-#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
-
-/*
- * Check whether we were atomic before we did preempt_disable():
- * (used by the scheduler, *after* releasing the kernel lock)
- */
-#define in_atomic_preempt_off() \
- ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
-
-#ifdef CONFIG_PREEMPT_COUNT
-# define preemptible() (preempt_count() == 0 && !irqs_disabled())
-#else
-# define preemptible() 0
-#endif
-
-#endif /* LINUX_PREEMPT_MASK_H */
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 4d5bf57..a6298b2 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -30,6 +30,8 @@ static inline const char *printk_skip_level(const char *buffer)
return buffer;
}
+#define CONSOLE_EXT_LOG_MAX 8192
+
/* printk's without a loglevel use this.. */
#define MESSAGE_LOGLEVEL_DEFAULT CONFIG_MESSAGE_LOGLEVEL_DEFAULT
@@ -120,7 +122,7 @@ static inline __printf(1, 2) __cold
void early_printk(const char *s, ...) { }
#endif
-typedef int(*printk_func_t)(const char *fmt, va_list args);
+typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args);
#ifdef CONFIG_PRINTK
asmlinkage __printf(5, 0)
@@ -164,7 +166,7 @@ char *log_buf_addr_get(void);
u32 log_buf_len_get(void);
void log_buf_kexec_setup(void);
void __init setup_log_buf(int early);
-void dump_stack_set_arch_desc(const char *fmt, ...);
+__printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
void dump_stack_print_info(const char *log_lvl);
void show_regs_print_info(const char *log_lvl);
#else
@@ -215,7 +217,7 @@ static inline void setup_log_buf(int early)
{
}
-static inline void dump_stack_set_arch_desc(const char *fmt, ...)
+static inline __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...)
{
}
@@ -255,6 +257,11 @@ extern asmlinkage void dump_stack(void) __cold;
printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
#define pr_info(fmt, ...) \
printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+/*
+ * Like KERN_CONT, pr_cont() should only be used when continuing
+ * a line with no newline ('\n') enclosed. Otherwise it defaults
+ * back to KERN_DEFAULT.
+ */
#define pr_cont(fmt, ...) \
printk(KERN_CONT fmt, ##__VA_ARGS__)
@@ -417,9 +424,9 @@ enum {
DUMP_PREFIX_ADDRESS,
DUMP_PREFIX_OFFSET
};
-extern void hex_dump_to_buffer(const void *buf, size_t len,
- int rowsize, int groupsize,
- char *linebuf, size_t linebuflen, bool ascii);
+extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
+ int groupsize, char *linebuf, size_t linebuflen,
+ bool ascii);
#ifdef CONFIG_PRINTK
extern void print_hex_dump(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
diff --git a/include/linux/property.h b/include/linux/property.h
index a6a3d98..76ebde9 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -13,6 +13,7 @@
#ifndef _LINUX_PROPERTY_H_
#define _LINUX_PROPERTY_H_
+#include <linux/fwnode.h>
#include <linux/types.h>
struct device;
@@ -40,16 +41,6 @@ int device_property_read_string_array(struct device *dev, const char *propname,
int device_property_read_string(struct device *dev, const char *propname,
const char **val);
-enum fwnode_type {
- FWNODE_INVALID = 0,
- FWNODE_OF,
- FWNODE_ACPI,
-};
-
-struct fwnode_handle {
- enum fwnode_type type;
-};
-
bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname);
int fwnode_property_read_u8_array(struct fwnode_handle *fwnode,
const char *propname, u8 *val,
@@ -140,4 +131,39 @@ static inline int fwnode_property_read_u64(struct fwnode_handle *fwnode,
return fwnode_property_read_u64_array(fwnode, propname, val, 1);
}
+/**
+ * struct property_entry - "Built-in" device property representation.
+ * @name: Name of the property.
+ * @type: Type of the property.
+ * @nval: Number of items of type @type making up the value.
+ * @value: Value of the property (an array of @nval items of type @type).
+ */
+struct property_entry {
+ const char *name;
+ enum dev_prop_type type;
+ size_t nval;
+ union {
+ void *raw_data;
+ u8 *u8_data;
+ u16 *u16_data;
+ u32 *u32_data;
+ u64 *u64_data;
+ const char **str;
+ } value;
+};
+
+/**
+ * struct property_set - Collection of "built-in" device properties.
+ * @fwnode: Handle to be pointed to by the fwnode field of struct device.
+ * @properties: Array of properties terminated with a null entry.
+ */
+struct property_set {
+ struct fwnode_handle fwnode;
+ struct property_entry *properties;
+};
+
+void device_add_property_set(struct device *dev, struct property_set *pset);
+
+bool device_dma_is_coherent(struct device *dev);
+
#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index ece0c6b..8e7a25b 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -39,6 +39,8 @@ enum pstore_type_id {
PSTORE_TYPE_PPC_RTAS = 4,
PSTORE_TYPE_PPC_OF = 5,
PSTORE_TYPE_PPC_COMMON = 6,
+ PSTORE_TYPE_PMSG = 7,
+ PSTORE_TYPE_PPC_OPAL = 8,
PSTORE_TYPE_UNKNOWN = 255
};
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 4af3fdc..9c9d6c1 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -81,6 +81,7 @@ struct ramoops_platform_data {
unsigned long record_size;
unsigned long console_size;
unsigned long ftrace_size;
+ unsigned long pmsg_size;
int dump_oops;
struct persistent_ram_ecc_info ecc_info;
};
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 0d8ff3f..b8b7306 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -64,11 +64,11 @@ struct ptp_clock_request {
* @adjtime: Shifts the time of the hardware clock.
* parameter delta: Desired change in nanoseconds.
*
- * @gettime: Reads the current time from the hardware clock.
- * parameter ts: Holds the result.
+ * @gettime64: Reads the current time from the hardware clock.
+ * parameter ts: Holds the result.
*
- * @settime: Set the current time on the hardware clock.
- * parameter ts: Time value to set.
+ * @settime64: Set the current time on the hardware clock.
+ * parameter ts: Time value to set.
*
* @enable: Request driver to enable or disable an ancillary feature.
* parameter request: Desired resource to enable or disable.
@@ -104,8 +104,8 @@ struct ptp_clock_info {
struct ptp_pin_desc *pin_config;
int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
- int (*gettime)(struct ptp_clock_info *ptp, struct timespec *ts);
- int (*settime)(struct ptp_clock_info *ptp, const struct timespec *ts);
+ int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
+ int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts);
int (*enable)(struct ptp_clock_info *ptp,
struct ptp_clock_request *request, int on);
int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index e90628c..36262d0 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -182,6 +182,8 @@ struct pwm_chip {
int pwm_set_chip_data(struct pwm_device *pwm, void *data);
void *pwm_get_chip_data(struct pwm_device *pwm);
+int pwmchip_add_with_polarity(struct pwm_chip *chip,
+ enum pwm_polarity polarity);
int pwmchip_add(struct pwm_chip *chip);
int pwmchip_remove(struct pwm_chip *chip);
struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
@@ -217,6 +219,11 @@ static inline int pwmchip_add(struct pwm_chip *chip)
return -EINVAL;
}
+static inline int pwmchip_add_inversed(struct pwm_chip *chip)
+{
+ return -EINVAL;
+}
+
static inline int pwmchip_remove(struct pwm_chip *chip)
{
return -EINVAL;
@@ -290,10 +297,15 @@ struct pwm_lookup {
#if IS_ENABLED(CONFIG_PWM)
void pwm_add_table(struct pwm_lookup *table, size_t num);
+void pwm_remove_table(struct pwm_lookup *table, size_t num);
#else
static inline void pwm_add_table(struct pwm_lookup *table, size_t num)
{
}
+
+static inline void pwm_remove_table(struct pwm_lookup *table, size_t num)
+{
+}
#endif
#ifdef CONFIG_PWM_SYSFS
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 77aed9e..0485bab 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -37,6 +37,7 @@
#define SSDR (0x10) /* SSP Data Write/Data Read Register */
#define SSTO (0x28) /* SSP Time Out Register */
+#define DDS_RATE (0x28) /* SSP DDS Clock Rate Register (Intel Quark) */
#define SSPSP (0x2C) /* SSP Programmable Serial Protocol */
#define SSTSA (0x30) /* SSP Tx Timeslot Active */
#define SSRSA (0x34) /* SSP Rx Timeslot Active */
@@ -193,8 +194,9 @@ enum pxa_ssp_type {
PXA168_SSP,
PXA910_SSP,
CE4100_SSP,
- LPSS_SSP,
QUARK_X1000_SSP,
+ LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
+ LPSS_BYT_SSP,
};
struct ssp_device {
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
new file mode 100644
index 0000000..6e7d5ec
--- /dev/null
+++ b/include/linux/qcom_scm.h
@@ -0,0 +1,39 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __QCOM_SCM_H
+#define __QCOM_SCM_H
+
+extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
+extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus);
+
+#define QCOM_SCM_HDCP_MAX_REQ_CNT 5
+
+struct qcom_scm_hdcp_req {
+ u32 addr;
+ u32 val;
+};
+
+extern bool qcom_scm_hdcp_available(void);
+extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
+ u32 *resp);
+
+#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0
+#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1
+
+extern void qcom_scm_cpu_power_down(u32 flags);
+
+#define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
+
+extern u32 qcom_scm_get_version(void);
+
+#endif
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 50978b7..b2505ac 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -50,6 +50,7 @@
#undef USRQUOTA
#undef GRPQUOTA
+#undef PRJQUOTA
enum quota_type {
USRQUOTA = 0, /* element used for user quotas */
GRPQUOTA = 1, /* element used for group quotas */
@@ -216,19 +217,21 @@ struct mem_dqinfo {
unsigned long dqi_flags;
unsigned int dqi_bgrace;
unsigned int dqi_igrace;
- qsize_t dqi_maxblimit;
- qsize_t dqi_maxilimit;
+ qsize_t dqi_max_spc_limit;
+ qsize_t dqi_max_ino_limit;
void *dqi_priv;
};
struct super_block;
-#define DQF_MASK 0xffff /* Mask for format specific flags */
-#define DQF_GETINFO_MASK 0x1ffff /* Mask for flags passed to userspace */
-#define DQF_SETINFO_MASK 0xffff /* Mask for flags modifiable from userspace */
-#define DQF_SYS_FILE_B 16
-#define DQF_SYS_FILE (1 << DQF_SYS_FILE_B) /* Quota file stored as system file */
-#define DQF_INFO_DIRTY_B 31
+/* Mask for flags passed to userspace */
+#define DQF_GETINFO_MASK (DQF_ROOT_SQUASH | DQF_SYS_FILE)
+/* Mask for flags modifiable from userspace */
+#define DQF_SETINFO_MASK DQF_ROOT_SQUASH
+
+enum {
+ DQF_INFO_DIRTY_B = DQF_PRIVATE,
+};
#define DQF_INFO_DIRTY (1 << DQF_INFO_DIRTY_B) /* Is info dirty? */
extern void mark_info_dirty(struct super_block *sb, int type);
@@ -317,23 +320,113 @@ struct dquot_operations {
/* get reserved quota for delayed alloc, value returned is managed by
* quota code only */
qsize_t *(*get_reserved_space) (struct inode *);
+ int (*get_projid) (struct inode *, kprojid_t *);/* Get project ID */
};
struct path;
+/* Structure for communicating via ->get_dqblk() & ->set_dqblk() */
+struct qc_dqblk {
+ int d_fieldmask; /* mask of fields to change in ->set_dqblk() */
+ u64 d_spc_hardlimit; /* absolute limit on used space */
+ u64 d_spc_softlimit; /* preferred limit on used space */
+ u64 d_ino_hardlimit; /* maximum # allocated inodes */
+ u64 d_ino_softlimit; /* preferred inode limit */
+ u64 d_space; /* Space owned by the user */
+ u64 d_ino_count; /* # inodes owned by the user */
+ s64 d_ino_timer; /* zero if within inode limits */
+ /* if not, we refuse service */
+ s64 d_spc_timer; /* similar to above; for space */
+ int d_ino_warns; /* # warnings issued wrt num inodes */
+ int d_spc_warns; /* # warnings issued wrt used space */
+ u64 d_rt_spc_hardlimit; /* absolute limit on realtime space */
+ u64 d_rt_spc_softlimit; /* preferred limit on RT space */
+ u64 d_rt_space; /* realtime space owned */
+ s64 d_rt_spc_timer; /* similar to above; for RT space */
+ int d_rt_spc_warns; /* # warnings issued wrt RT space */
+};
+
+/*
+ * Field specifiers for ->set_dqblk() in struct qc_dqblk and also for
+ * ->set_info() in struct qc_info
+ */
+#define QC_INO_SOFT (1<<0)
+#define QC_INO_HARD (1<<1)
+#define QC_SPC_SOFT (1<<2)
+#define QC_SPC_HARD (1<<3)
+#define QC_RT_SPC_SOFT (1<<4)
+#define QC_RT_SPC_HARD (1<<5)
+#define QC_LIMIT_MASK (QC_INO_SOFT | QC_INO_HARD | QC_SPC_SOFT | QC_SPC_HARD | \
+ QC_RT_SPC_SOFT | QC_RT_SPC_HARD)
+#define QC_SPC_TIMER (1<<6)
+#define QC_INO_TIMER (1<<7)
+#define QC_RT_SPC_TIMER (1<<8)
+#define QC_TIMER_MASK (QC_SPC_TIMER | QC_INO_TIMER | QC_RT_SPC_TIMER)
+#define QC_SPC_WARNS (1<<9)
+#define QC_INO_WARNS (1<<10)
+#define QC_RT_SPC_WARNS (1<<11)
+#define QC_WARNS_MASK (QC_SPC_WARNS | QC_INO_WARNS | QC_RT_SPC_WARNS)
+#define QC_SPACE (1<<12)
+#define QC_INO_COUNT (1<<13)
+#define QC_RT_SPACE (1<<14)
+#define QC_ACCT_MASK (QC_SPACE | QC_INO_COUNT | QC_RT_SPACE)
+#define QC_FLAGS (1<<15)
+
+#define QCI_SYSFILE (1 << 0) /* Quota file is hidden from userspace */
+#define QCI_ROOT_SQUASH (1 << 1) /* Root squash turned on */
+#define QCI_ACCT_ENABLED (1 << 2) /* Quota accounting enabled */
+#define QCI_LIMITS_ENFORCED (1 << 3) /* Quota limits enforced */
+
+/* Structures for communicating via ->get_state */
+struct qc_type_state {
+ unsigned int flags; /* Flags QCI_* */
+ unsigned int spc_timelimit; /* Time after which space softlimit is
+ * enforced */
+ unsigned int ino_timelimit; /* Ditto for inode softlimit */
+ unsigned int rt_spc_timelimit; /* Ditto for real-time space */
+ unsigned int spc_warnlimit; /* Limit for number of space warnings */
+ unsigned int ino_warnlimit; /* Ditto for inodes */
+ unsigned int rt_spc_warnlimit; /* Ditto for real-time space */
+ unsigned long long ino; /* Inode number of quota file */
+ blkcnt_t blocks; /* Number of 512-byte blocks in the file */
+ blkcnt_t nextents; /* Number of extents in the file */
+};
+
+struct qc_state {
+ unsigned int s_incoredqs; /* Number of dquots in core */
+ /*
+ * Per quota type information. The array should really have
+ * max(MAXQUOTAS, XQM_MAXQUOTAS) entries. BUILD_BUG_ON in
+ * quota_getinfo() makes sure XQM_MAXQUOTAS is large enough. Once VFS
+ * supports project quotas, this can be changed to MAXQUOTAS
+ */
+ struct qc_type_state s_state[XQM_MAXQUOTAS];
+};
+
+/* Structure for communicating via ->set_info */
+struct qc_info {
+ int i_fieldmask; /* mask of fields to change in ->set_info() */
+ unsigned int i_flags; /* Flags QCI_* */
+ unsigned int i_spc_timelimit; /* Time after which space softlimit is
+ * enforced */
+ unsigned int i_ino_timelimit; /* Ditto for inode softlimit */
+ unsigned int i_rt_spc_timelimit;/* Ditto for real-time space */
+ unsigned int i_spc_warnlimit; /* Limit for number of space warnings */
+ unsigned int i_ino_warnlimit; /* Limit for number of inode warnings */
+ unsigned int i_rt_spc_warnlimit; /* Ditto for real-time space */
+};
+
/* Operations handling requests from userspace */
struct quotactl_ops {
int (*quota_on)(struct super_block *, int, int, struct path *);
- int (*quota_on_meta)(struct super_block *, int, int);
int (*quota_off)(struct super_block *, int);
+ int (*quota_enable)(struct super_block *, unsigned int);
+ int (*quota_disable)(struct super_block *, unsigned int);
int (*quota_sync)(struct super_block *, int);
- int (*get_info)(struct super_block *, int, struct if_dqinfo *);
- int (*set_info)(struct super_block *, int, struct if_dqinfo *);
- int (*get_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *);
- int (*set_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *);
- int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
- int (*set_xstate)(struct super_block *, unsigned int, int);
- int (*get_xstatev)(struct super_block *, struct fs_quota_statv *);
+ int (*set_info)(struct super_block *, int, struct qc_info *);
+ int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
+ int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
+ int (*get_state)(struct super_block *, struct qc_state *);
int (*rm_xquota)(struct super_block *, unsigned int);
};
@@ -344,7 +437,19 @@ struct quota_format_type {
struct quota_format_type *qf_next;
};
-/* Quota state flags - they actually come in two flavors - for users and groups */
+/**
+ * Quota state flags - they actually come in two flavors - for users and groups.
+ *
+ * Actual typed flags layout:
+ * USRQUOTA GRPQUOTA
+ * DQUOT_USAGE_ENABLED 0x0001 0x0002
+ * DQUOT_LIMITS_ENABLED 0x0004 0x0008
+ * DQUOT_SUSPENDED 0x0010 0x0020
+ *
+ * Following bits are used for non-typed flags:
+ * DQUOT_QUOTA_SYS_FILE 0x0040
+ * DQUOT_NEGATIVE_USAGE 0x0080
+ */
enum {
_DQUOT_USAGE_ENABLED = 0, /* Track disk usage for users */
_DQUOT_LIMITS_ENABLED, /* Enforce quota limits for users */
@@ -353,9 +458,9 @@ enum {
* memory to turn them on */
_DQUOT_STATE_FLAGS
};
-#define DQUOT_USAGE_ENABLED (1 << _DQUOT_USAGE_ENABLED)
-#define DQUOT_LIMITS_ENABLED (1 << _DQUOT_LIMITS_ENABLED)
-#define DQUOT_SUSPENDED (1 << _DQUOT_SUSPENDED)
+#define DQUOT_USAGE_ENABLED (1 << _DQUOT_USAGE_ENABLED * MAXQUOTAS)
+#define DQUOT_LIMITS_ENABLED (1 << _DQUOT_LIMITS_ENABLED * MAXQUOTAS)
+#define DQUOT_SUSPENDED (1 << _DQUOT_SUSPENDED * MAXQUOTAS)
#define DQUOT_STATE_FLAGS (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED | \
DQUOT_SUSPENDED)
/* Other quota flags */
@@ -369,15 +474,21 @@ enum {
*/
#define DQUOT_NEGATIVE_USAGE (1 << (DQUOT_STATE_LAST + 1))
/* Allow negative quota usage */
-
static inline unsigned int dquot_state_flag(unsigned int flags, int type)
{
- return flags << _DQUOT_STATE_FLAGS * type;
+ return flags << type;
}
static inline unsigned int dquot_generic_flag(unsigned int flags, int type)
{
- return (flags >> _DQUOT_STATE_FLAGS * type) & DQUOT_STATE_FLAGS;
+ return (flags >> type) & DQUOT_STATE_FLAGS;
+}
+
+/* Bitmap of quota types where flag is set in flags */
+static __always_inline unsigned dquot_state_types(unsigned flags, unsigned flag)
+{
+ BUILD_BUG_ON_NOT_POWER_OF_2(flag);
+ return (flags / flag) & ((1 << MAXQUOTAS) - 1);
}
#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index f23538a..77ca660 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -95,12 +95,12 @@ int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
int dquot_quota_off(struct super_block *sb, int type);
int dquot_writeback_dquots(struct super_block *sb, int type);
int dquot_quota_sync(struct super_block *sb, int type);
-int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
-int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
+int dquot_get_state(struct super_block *sb, struct qc_state *state);
+int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii);
int dquot_get_dqblk(struct super_block *sb, struct kqid id,
- struct fs_disk_quota *di);
+ struct qc_dqblk *di);
int dquot_set_dqblk(struct super_block *sb, struct kqid id,
- struct fs_disk_quota *di);
+ struct qc_dqblk *di);
int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
int dquot_transfer(struct inode *inode, struct iattr *iattr);
@@ -134,10 +134,7 @@ static inline bool sb_has_quota_suspended(struct super_block *sb, int type)
static inline unsigned sb_any_quota_suspended(struct super_block *sb)
{
- unsigned type, tmsk = 0;
- for (type = 0; type < MAXQUOTAS; type++)
- tmsk |= sb_has_quota_suspended(sb, type) << type;
- return tmsk;
+ return dquot_state_types(sb_dqopt(sb)->flags, DQUOT_SUSPENDED);
}
/* Does kernel know about any quota information for given sb + type? */
@@ -149,10 +146,7 @@ static inline bool sb_has_quota_loaded(struct super_block *sb, int type)
static inline unsigned sb_any_quota_loaded(struct super_block *sb)
{
- unsigned type, tmsk = 0;
- for (type = 0; type < MAXQUOTAS; type++)
- tmsk |= sb_has_quota_loaded(sb, type) << type;
- return tmsk;
+ return dquot_state_types(sb_dqopt(sb)->flags, DQUOT_USAGE_ENABLED);
}
static inline bool sb_has_quota_active(struct super_block *sb, int type)
@@ -166,6 +160,7 @@ static inline bool sb_has_quota_active(struct super_block *sb, int type)
*/
extern const struct dquot_operations dquot_operations;
extern const struct quotactl_ops dquot_quotactl_ops;
+extern const struct quotactl_ops dquot_quotactl_sysfile_ops;
#else
@@ -386,4 +381,6 @@ static inline void dquot_release_reservation_block(struct inode *inode,
__dquot_free_space(inode, nr << inode->i_blkbits, DQUOT_SPACE_RESERVE);
}
+unsigned int qtype_enforce_flag(int type);
+
#endif /* _LINUX_QUOTAOPS_ */
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 73069cb..a7a06d1 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -72,6 +72,7 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
/* Routine choices */
struct raid6_calls {
void (*gen_syndrome)(int, size_t, void **);
+ void (*xor_syndrome)(int, int, int, size_t, void **);
int (*valid)(void); /* Returns 1 if this routine set is usable */
const char *name; /* Name of this routine set */
int prefer; /* Has special performance attribute */
diff --git a/include/linux/random.h b/include/linux/random.h
index b05856e..e651874 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -6,14 +6,23 @@
#ifndef _LINUX_RANDOM_H
#define _LINUX_RANDOM_H
+#include <linux/list.h>
#include <uapi/linux/random.h>
+struct random_ready_callback {
+ struct list_head list;
+ void (*func)(struct random_ready_callback *rdy);
+ struct module *owner;
+};
+
extern void add_device_randomness(const void *, unsigned int);
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value);
extern void add_interrupt_randomness(int irq, int irq_flags);
extern void get_random_bytes(void *buf, int nbytes);
+extern int add_random_ready_callback(struct random_ready_callback *rdy);
+extern void del_random_ready_callback(struct random_ready_callback *rdy);
extern void get_random_bytes_arch(void *buf, int nbytes);
void generate_random_uuid(unsigned char uuid_out[16]);
extern int random_int_secret_init(void);
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index 57e75ae..830c499 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -31,6 +31,7 @@
#include <linux/kernel.h>
#include <linux/stddef.h>
+#include <linux/rcupdate.h>
struct rb_node {
unsigned long __rb_parent_color;
@@ -51,7 +52,7 @@ struct rb_root {
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
-/* 'empty' nodes are nodes that are known not to be inserted in an rbree */
+/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
#define RB_EMPTY_NODE(node) \
((node)->__rb_parent_color == (unsigned long)(node))
#define RB_CLEAR_NODE(node) \
@@ -73,11 +74,11 @@ extern struct rb_node *rb_first_postorder(const struct rb_root *);
extern struct rb_node *rb_next_postorder(const struct rb_node *);
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
-extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
+extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
-static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
- struct rb_node ** rb_link)
+static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
+ struct rb_node **rb_link)
{
node->__rb_parent_color = (unsigned long)parent;
node->rb_left = node->rb_right = NULL;
@@ -85,6 +86,15 @@ static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
*rb_link = node;
}
+static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent,
+ struct rb_node **rb_link)
+{
+ node->__rb_parent_color = (unsigned long)parent;
+ node->rb_left = node->rb_right = NULL;
+
+ rcu_assign_pointer(*rb_link, node);
+}
+
#define rb_entry_safe(ptr, type, member) \
({ typeof(ptr) ____ptr = (ptr); \
____ptr ? rb_entry(____ptr, type, member) : NULL; \
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index 378c5ee..14d7b83 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -123,11 +123,11 @@ __rb_change_child(struct rb_node *old, struct rb_node *new,
{
if (parent) {
if (parent->rb_left == old)
- parent->rb_left = new;
+ WRITE_ONCE(parent->rb_left, new);
else
- parent->rb_right = new;
+ WRITE_ONCE(parent->rb_right, new);
} else
- root->rb_node = new;
+ WRITE_ONCE(root->rb_node, new);
}
extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
@@ -137,7 +137,8 @@ static __always_inline struct rb_node *
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
- struct rb_node *child = node->rb_right, *tmp = node->rb_left;
+ struct rb_node *child = node->rb_right;
+ struct rb_node *tmp = node->rb_left;
struct rb_node *parent, *rebalance;
unsigned long pc;
@@ -167,6 +168,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
tmp = parent;
} else {
struct rb_node *successor = child, *child2;
+
tmp = child->rb_left;
if (!tmp) {
/*
@@ -180,6 +182,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
*/
parent = successor;
child2 = successor->rb_right;
+
augment->copy(node, successor);
} else {
/*
@@ -201,19 +204,23 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
successor = tmp;
tmp = tmp->rb_left;
} while (tmp);
- parent->rb_left = child2 = successor->rb_right;
- successor->rb_right = child;
+ child2 = successor->rb_right;
+ WRITE_ONCE(parent->rb_left, child2);
+ WRITE_ONCE(successor->rb_right, child);
rb_set_parent(child, successor);
+
augment->copy(node, successor);
augment->propagate(parent, successor);
}
- successor->rb_left = tmp = node->rb_left;
+ tmp = node->rb_left;
+ WRITE_ONCE(successor->rb_left, tmp);
rb_set_parent(tmp, successor);
pc = node->__rb_parent_color;
tmp = __rb_parent(pc);
__rb_change_child(node, successor, tmp, root);
+
if (child2) {
successor->__rb_parent_color = pc;
rb_set_parent_color(child2, parent, RB_BLACK);
diff --git a/include/linux/rbtree_latch.h b/include/linux/rbtree_latch.h
new file mode 100644
index 0000000..4f3432c
--- /dev/null
+++ b/include/linux/rbtree_latch.h
@@ -0,0 +1,212 @@
+/*
+ * Latched RB-trees
+ *
+ * Copyright (C) 2015 Intel Corp., Peter Zijlstra <peterz@infradead.org>
+ *
+ * Since RB-trees have non-atomic modifications they're not immediately suited
+ * for RCU/lockless queries. Even though we made RB-tree lookups non-fatal for
+ * lockless lookups; we cannot guarantee they return a correct result.
+ *
+ * The simplest solution is a seqlock + RB-tree, this will allow lockless
+ * lookups; but has the constraint (inherent to the seqlock) that read sides
+ * cannot nest in write sides.
+ *
+ * If we need to allow unconditional lookups (say as required for NMI context
+ * usage) we need a more complex setup; this data structure provides this by
+ * employing the latch technique -- see @raw_write_seqcount_latch -- to
+ * implement a latched RB-tree which does allow for unconditional lookups by
+ * virtue of always having (at least) one stable copy of the tree.
+ *
+ * However, while we have the guarantee that there is at all times one stable
+ * copy, this does not guarantee an iteration will not observe modifications.
+ * What might have been a stable copy at the start of the iteration, need not
+ * remain so for the duration of the iteration.
+ *
+ * Therefore, this does require a lockless RB-tree iteration to be non-fatal;
+ * see the comment in lib/rbtree.c. Note however that we only require the first
+ * condition -- not seeing partial stores -- because the latch thing isolates
+ * us from loops. If we were to interrupt a modification the lookup would be
+ * pointed at the stable tree and complete while the modification was halted.
+ */
+
+#ifndef RB_TREE_LATCH_H
+#define RB_TREE_LATCH_H
+
+#include <linux/rbtree.h>
+#include <linux/seqlock.h>
+
+struct latch_tree_node {
+ struct rb_node node[2];
+};
+
+struct latch_tree_root {
+ seqcount_t seq;
+ struct rb_root tree[2];
+};
+
+/**
+ * latch_tree_ops - operators to define the tree order
+ * @less: used for insertion; provides the (partial) order between two elements.
+ * @comp: used for lookups; provides the order between the search key and an element.
+ *
+ * The operators are related like:
+ *
+ * comp(a->key,b) < 0 := less(a,b)
+ * comp(a->key,b) > 0 := less(b,a)
+ * comp(a->key,b) == 0 := !less(a,b) && !less(b,a)
+ *
+ * If these operators define a partial order on the elements we make no
+ * guarantee on which of the elements matching the key is found. See
+ * latch_tree_find().
+ */
+struct latch_tree_ops {
+ bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b);
+ int (*comp)(void *key, struct latch_tree_node *b);
+};
+
+static __always_inline struct latch_tree_node *
+__lt_from_rb(struct rb_node *node, int idx)
+{
+ return container_of(node, struct latch_tree_node, node[idx]);
+}
+
+static __always_inline void
+__lt_insert(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx,
+ bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b))
+{
+ struct rb_root *root = &ltr->tree[idx];
+ struct rb_node **link = &root->rb_node;
+ struct rb_node *node = &ltn->node[idx];
+ struct rb_node *parent = NULL;
+ struct latch_tree_node *ltp;
+
+ while (*link) {
+ parent = *link;
+ ltp = __lt_from_rb(parent, idx);
+
+ if (less(ltn, ltp))
+ link = &parent->rb_left;
+ else
+ link = &parent->rb_right;
+ }
+
+ rb_link_node_rcu(node, parent, link);
+ rb_insert_color(node, root);
+}
+
+static __always_inline void
+__lt_erase(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx)
+{
+ rb_erase(&ltn->node[idx], &ltr->tree[idx]);
+}
+
+static __always_inline struct latch_tree_node *
+__lt_find(void *key, struct latch_tree_root *ltr, int idx,
+ int (*comp)(void *key, struct latch_tree_node *node))
+{
+ struct rb_node *node = rcu_dereference_raw(ltr->tree[idx].rb_node);
+ struct latch_tree_node *ltn;
+ int c;
+
+ while (node) {
+ ltn = __lt_from_rb(node, idx);
+ c = comp(key, ltn);
+
+ if (c < 0)
+ node = rcu_dereference_raw(node->rb_left);
+ else if (c > 0)
+ node = rcu_dereference_raw(node->rb_right);
+ else
+ return ltn;
+ }
+
+ return NULL;
+}
+
+/**
+ * latch_tree_insert() - insert @node into the trees @root
+ * @node: nodes to insert
+ * @root: trees to insert @node into
+ * @ops: operators defining the node order
+ *
+ * It inserts @node into @root in an ordered fashion such that we can always
+ * observe one complete tree. See the comment for raw_write_seqcount_latch().
+ *
+ * The inserts use rcu_assign_pointer() to publish the element such that the
+ * tree structure is stored before we can observe the new @node.
+ *
+ * All modifications (latch_tree_insert, latch_tree_remove) are assumed to be
+ * serialized.
+ */
+static __always_inline void
+latch_tree_insert(struct latch_tree_node *node,
+ struct latch_tree_root *root,
+ const struct latch_tree_ops *ops)
+{
+ raw_write_seqcount_latch(&root->seq);
+ __lt_insert(node, root, 0, ops->less);
+ raw_write_seqcount_latch(&root->seq);
+ __lt_insert(node, root, 1, ops->less);
+}
+
+/**
+ * latch_tree_erase() - removes @node from the trees @root
+ * @node: nodes to remote
+ * @root: trees to remove @node from
+ * @ops: operators defining the node order
+ *
+ * Removes @node from the trees @root in an ordered fashion such that we can
+ * always observe one complete tree. See the comment for
+ * raw_write_seqcount_latch().
+ *
+ * It is assumed that @node will observe one RCU quiescent state before being
+ * reused of freed.
+ *
+ * All modifications (latch_tree_insert, latch_tree_remove) are assumed to be
+ * serialized.
+ */
+static __always_inline void
+latch_tree_erase(struct latch_tree_node *node,
+ struct latch_tree_root *root,
+ const struct latch_tree_ops *ops)
+{
+ raw_write_seqcount_latch(&root->seq);
+ __lt_erase(node, root, 0);
+ raw_write_seqcount_latch(&root->seq);
+ __lt_erase(node, root, 1);
+}
+
+/**
+ * latch_tree_find() - find the node matching @key in the trees @root
+ * @key: search key
+ * @root: trees to search for @key
+ * @ops: operators defining the node order
+ *
+ * Does a lockless lookup in the trees @root for the node matching @key.
+ *
+ * It is assumed that this is called while holding the appropriate RCU read
+ * side lock.
+ *
+ * If the operators define a partial order on the elements (there are multiple
+ * elements which have the same key value) it is undefined which of these
+ * elements will be found. Nor is it possible to iterate the tree to find
+ * further elements with the same key value.
+ *
+ * Returns: a pointer to the node matching @key or NULL.
+ */
+static __always_inline struct latch_tree_node *
+latch_tree_find(void *key, struct latch_tree_root *root,
+ const struct latch_tree_ops *ops)
+{
+ struct latch_tree_node *node;
+ unsigned int seq;
+
+ do {
+ seq = raw_read_seqcount_latch(&root->seq);
+ node = __lt_find(key, root, seq & 1, ops->comp);
+ } while (read_seqcount_retry(&root->seq, seq));
+
+ return node;
+}
+
+#endif /* RB_TREE_LATCH_H */
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 529bc94..17c6b1f 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -29,8 +29,8 @@
*/
static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
{
- ACCESS_ONCE(list->next) = list;
- ACCESS_ONCE(list->prev) = list;
+ WRITE_ONCE(list->next, list);
+ WRITE_ONCE(list->prev, list);
}
/*
@@ -288,7 +288,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
#define list_first_or_null_rcu(ptr, type, member) \
({ \
struct list_head *__ptr = (ptr); \
- struct list_head *__next = ACCESS_ONCE(__ptr->next); \
+ struct list_head *__next = READ_ONCE(__ptr->next); \
likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
})
@@ -524,11 +524,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_continue_rcu(pos, member) \
- for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
- typeof(*(pos)), member); \
+ for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
+ &(pos)->member)), typeof(*(pos)), member); \
pos; \
- pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
- typeof(*(pos)), member))
+ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
+ &(pos)->member)), typeof(*(pos)), member))
/**
* hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
@@ -536,11 +536,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_continue_rcu_bh(pos, member) \
- for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
- typeof(*(pos)), member); \
+ for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
+ &(pos)->member)), typeof(*(pos)), member); \
pos; \
- pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
- typeof(*(pos)), member))
+ pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
+ &(pos)->member)), typeof(*(pos)), member))
/**
* hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
@@ -549,8 +549,8 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
*/
#define hlist_for_each_entry_from_rcu(pos, member) \
for (; pos; \
- pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
- typeof(*(pos)), member))
+ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
+ &(pos)->member)), typeof(*(pos)), member))
#endif /* __KERNEL__ */
#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index ed4f593..4cf5f51 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -44,10 +44,32 @@
#include <linux/debugobjects.h>
#include <linux/bug.h>
#include <linux/compiler.h>
+#include <linux/ktime.h>
+
#include <asm/barrier.h>
extern int rcu_expedited; /* for sysctl */
+#ifdef CONFIG_TINY_RCU
+/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
+static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */
+{
+ return false;
+}
+
+static inline void rcu_expedite_gp(void)
+{
+}
+
+static inline void rcu_unexpedite_gp(void)
+{
+}
+#else /* #ifdef CONFIG_TINY_RCU */
+bool rcu_gp_is_expedited(void); /* Internal RCU use. */
+void rcu_expedite_gp(void);
+void rcu_unexpedite_gp(void);
+#endif /* #else #ifdef CONFIG_TINY_RCU */
+
enum rcutorture_type {
RCU_FLAVOR,
RCU_BH_FLAVOR,
@@ -195,6 +217,15 @@ void call_rcu_sched(struct rcu_head *head,
void synchronize_sched(void);
+/*
+ * Structure allowing asynchronous waiting on RCU.
+ */
+struct rcu_synchronize {
+ struct rcu_head head;
+ struct completion completion;
+};
+void wakeme_after_rcu(struct rcu_head *head);
+
/**
* call_rcu_tasks() - Queue an RCU for invocation task-based grace period
* @head: structure to be used for queueing the RCU updates.
@@ -258,14 +289,13 @@ static inline int rcu_preempt_depth(void)
/* Internal to kernel */
void rcu_init(void);
+void rcu_end_inkernel_boot(void);
void rcu_sched_qs(void);
void rcu_bh_qs(void);
void rcu_check_callbacks(int user);
struct notifier_block;
-void rcu_idle_enter(void);
-void rcu_idle_exit(void);
-void rcu_irq_enter(void);
-void rcu_irq_exit(void);
+int rcu_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu);
#ifdef CONFIG_RCU_STALL_COMMON
void rcu_sysrq_start(void);
@@ -331,12 +361,13 @@ static inline void rcu_init_nohz(void)
extern struct srcu_struct tasks_rcu_exit_srcu;
#define rcu_note_voluntary_context_switch(t) \
do { \
- if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
- ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
+ rcu_all_qs(); \
+ if (READ_ONCE((t)->rcu_tasks_holdout)) \
+ WRITE_ONCE((t)->rcu_tasks_holdout, false); \
} while (0)
#else /* #ifdef CONFIG_TASKS_RCU */
#define TASKS_RCU(x) do { } while (0)
-#define rcu_note_voluntary_context_switch(t) do { } while (0)
+#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
#endif /* #else #ifdef CONFIG_TASKS_RCU */
/**
@@ -576,17 +607,17 @@ static inline void rcu_preempt_sleep_check(void)
#define __rcu_access_pointer(p, space) \
({ \
- typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
+ typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(_________p1)); \
})
#define __rcu_dereference_check(p, c, space) \
({ \
- typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
+ /* Dependency order vs. p above. */ \
+ typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
rcu_dereference_sparse(p, space); \
- smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
- ((typeof(*p) __force __kernel *)(_________p1)); \
+ ((typeof(*p) __force __kernel *)(________p1)); \
})
#define __rcu_dereference_protected(p, c, space) \
({ \
@@ -595,21 +626,6 @@ static inline void rcu_preempt_sleep_check(void)
((typeof(*p) __force __kernel *)(p)); \
})
-#define __rcu_access_index(p, space) \
-({ \
- typeof(p) _________p1 = ACCESS_ONCE(p); \
- rcu_dereference_sparse(p, space); \
- (_________p1); \
-})
-#define __rcu_dereference_index_check(p, c) \
-({ \
- typeof(p) _________p1 = ACCESS_ONCE(p); \
- rcu_lockdep_assert(c, \
- "suspicious rcu_dereference_index_check() usage"); \
- smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
- (_________p1); \
-})
-
/**
* RCU_INITIALIZER() - statically initialize an RCU-protected global variable
* @v: The value to statically initialize with.
@@ -617,21 +633,6 @@ static inline void rcu_preempt_sleep_check(void)
#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
/**
- * lockless_dereference() - safely load a pointer for later dereference
- * @p: The pointer to load
- *
- * Similar to rcu_dereference(), but for situations where the pointed-to
- * object's lifetime is managed by something other than RCU. That
- * "something other" might be reference counting or simple immortality.
- */
-#define lockless_dereference(p) \
-({ \
- typeof(p) _________p1 = ACCESS_ONCE(p); \
- smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
- (_________p1); \
-})
-
-/**
* rcu_assign_pointer() - assign to RCU-protected pointer
* @p: pointer to assign to
* @v: value to assign (publish)
@@ -669,7 +670,7 @@ static inline void rcu_preempt_sleep_check(void)
* @p: The pointer to read
*
* Return the value of the specified RCU-protected pointer, but omit the
- * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
+ * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful
* when the value of this pointer is accessed, but the pointer is not
* dereferenced, for example, when testing an RCU-protected pointer against
* NULL. Although rcu_access_pointer() may also be used in cases where
@@ -719,7 +720,7 @@ static inline void rcu_preempt_sleep_check(void)
* annotated as __rcu.
*/
#define rcu_dereference_check(p, c) \
- __rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu)
+ __rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu)
/**
* rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
@@ -729,7 +730,7 @@ static inline void rcu_preempt_sleep_check(void)
* This is the RCU-bh counterpart to rcu_dereference_check().
*/
#define rcu_dereference_bh_check(p, c) \
- __rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu)
+ __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu)
/**
* rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
@@ -739,7 +740,7 @@ static inline void rcu_preempt_sleep_check(void)
* This is the RCU-sched counterpart to rcu_dereference_check().
*/
#define rcu_dereference_sched_check(p, c) \
- __rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \
+ __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
__rcu)
#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
@@ -754,47 +755,12 @@ static inline void rcu_preempt_sleep_check(void)
#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
/**
- * rcu_access_index() - fetch RCU index with no dereferencing
- * @p: The index to read
- *
- * Return the value of the specified RCU-protected index, but omit the
- * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
- * when the value of this index is accessed, but the index is not
- * dereferenced, for example, when testing an RCU-protected index against
- * -1. Although rcu_access_index() may also be used in cases where
- * update-side locks prevent the value of the index from changing, you
- * should instead use rcu_dereference_index_protected() for this use case.
- */
-#define rcu_access_index(p) __rcu_access_index((p), __rcu)
-
-/**
- * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
- * @p: The pointer to read, prior to dereferencing
- * @c: The conditions under which the dereference will take place
- *
- * Similar to rcu_dereference_check(), but omits the sparse checking.
- * This allows rcu_dereference_index_check() to be used on integers,
- * which can then be used as array indices. Attempting to use
- * rcu_dereference_check() on an integer will give compiler warnings
- * because the sparse address-space mechanism relies on dereferencing
- * the RCU-protected pointer. Dereferencing integers is not something
- * that even gcc will put up with.
- *
- * Note that this function does not implicitly check for RCU read-side
- * critical sections. If this function gains lots of uses, it might
- * make sense to provide versions for each flavor of RCU, but it does
- * not make sense as of early 2010.
- */
-#define rcu_dereference_index_check(p, c) \
- __rcu_dereference_index_check((p), (c))
-
-/**
* rcu_dereference_protected() - fetch RCU pointer when updates prevented
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This
+ * both the smp_read_barrier_depends() and the READ_ONCE(). This
* is useful in cases where update-side locks prevent the value of the
* pointer from changing. Please note that this primitive does -not-
* prevent the compiler from repeating this reference or combining it
@@ -932,9 +898,9 @@ static inline void rcu_read_unlock(void)
{
rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_unlock() used illegally while idle");
- rcu_lock_release(&rcu_lock_map);
__release(RCU);
__rcu_read_unlock();
+ rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
}
/**
@@ -1120,13 +1086,13 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#define kfree_rcu(ptr, rcu_head) \
__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
-#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL)
-static inline int rcu_needs_cpu(unsigned long *delta_jiffies)
+#ifdef CONFIG_TINY_RCU
+static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
{
- *delta_jiffies = ULONG_MAX;
+ *nextevt = KTIME_MAX;
return 0;
}
-#endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */
+#endif /* #ifdef CONFIG_TINY_RCU */
#if defined(CONFIG_RCU_NOCB_CPU_ALL)
static inline bool rcu_is_nocb_cpu(int cpu) { return true; }
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 0e53662..3df6c1e 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -92,17 +92,49 @@ static inline void rcu_virt_note_context_switch(int cpu)
}
/*
- * Return the number of grace periods.
+ * Return the number of grace periods started.
*/
-static inline long rcu_batches_completed(void)
+static inline unsigned long rcu_batches_started(void)
{
return 0;
}
/*
- * Return the number of bottom-half grace periods.
+ * Return the number of bottom-half grace periods started.
*/
-static inline long rcu_batches_completed_bh(void)
+static inline unsigned long rcu_batches_started_bh(void)
+{
+ return 0;
+}
+
+/*
+ * Return the number of sched grace periods started.
+ */
+static inline unsigned long rcu_batches_started_sched(void)
+{
+ return 0;
+}
+
+/*
+ * Return the number of grace periods completed.
+ */
+static inline unsigned long rcu_batches_completed(void)
+{
+ return 0;
+}
+
+/*
+ * Return the number of bottom-half grace periods completed.
+ */
+static inline unsigned long rcu_batches_completed_bh(void)
+{
+ return 0;
+}
+
+/*
+ * Return the number of sched grace periods completed.
+ */
+static inline unsigned long rcu_batches_completed_sched(void)
{
return 0;
}
@@ -127,6 +159,22 @@ static inline void rcu_cpu_stall_reset(void)
{
}
+static inline void rcu_idle_enter(void)
+{
+}
+
+static inline void rcu_idle_exit(void)
+{
+}
+
+static inline void rcu_irq_enter(void)
+{
+}
+
+static inline void rcu_irq_exit(void)
+{
+}
+
static inline void exit_rcu(void)
{
}
@@ -154,7 +202,10 @@ static inline bool rcu_is_watching(void)
return true;
}
-
#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
+static inline void rcu_all_qs(void)
+{
+}
+
#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 5295379..4568791 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -31,9 +31,7 @@
#define __LINUX_RCUTREE_H
void rcu_note_context_switch(void);
-#ifndef CONFIG_RCU_NOCB_CPU_ALL
-int rcu_needs_cpu(unsigned long *delta_jiffies);
-#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
+int rcu_needs_cpu(u64 basem, u64 *nextevt);
void rcu_cpu_stall_reset(void);
/*
@@ -81,15 +79,23 @@ void cond_synchronize_rcu(unsigned long oldstate);
extern unsigned long rcutorture_testseq;
extern unsigned long rcutorture_vernum;
-long rcu_batches_completed(void);
-long rcu_batches_completed_bh(void);
-long rcu_batches_completed_sched(void);
+unsigned long rcu_batches_started(void);
+unsigned long rcu_batches_started_bh(void);
+unsigned long rcu_batches_started_sched(void);
+unsigned long rcu_batches_completed(void);
+unsigned long rcu_batches_completed_bh(void);
+unsigned long rcu_batches_completed_sched(void);
void show_rcu_gp_kthreads(void);
void rcu_force_quiescent_state(void);
void rcu_bh_force_quiescent_state(void);
void rcu_sched_force_quiescent_state(void);
+void rcu_idle_enter(void);
+void rcu_idle_exit(void);
+void rcu_irq_enter(void);
+void rcu_irq_exit(void);
+
void exit_rcu(void);
void rcu_scheduler_starting(void);
@@ -97,4 +103,6 @@ extern int rcu_scheduler_active __read_mostly;
bool rcu_is_watching(void);
+void rcu_all_qs(void);
+
#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 67fc8fc..a7ff409 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -70,7 +70,8 @@ void ctrl_alt_del(void);
#define POWEROFF_CMD_PATH_LEN 256
extern char poweroff_cmd[POWEROFF_CMD_PATH_LEN];
-extern int orderly_poweroff(bool force);
+extern void orderly_poweroff(bool force);
+extern void orderly_reboot(void);
/*
* Emergency restart, callable from an interrupt handler.
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 4419b99..59c55ea 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -433,6 +433,8 @@ int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change);
int regmap_get_val_bytes(struct regmap *map);
+int regmap_get_max_register(struct regmap *map);
+int regmap_get_reg_stride(struct regmap *map);
int regmap_async_complete(struct regmap *map);
bool regmap_can_raw_write(struct regmap *map);
@@ -468,7 +470,7 @@ bool regmap_reg_in_ranges(unsigned int reg,
*
* @reg: Offset of the register within the regmap bank
* @lsb: lsb of the register field.
- * @reg: msb of the register field.
+ * @msb: msb of the register field.
* @id_size: port size if it has some ports
* @id_offset: address offset for each ports
*/
@@ -676,6 +678,18 @@ static inline int regmap_get_val_bytes(struct regmap *map)
return -EINVAL;
}
+static inline int regmap_get_max_register(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_get_reg_stride(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regcache_sync(struct regmap *map)
{
WARN_ONCE(1, "regmap API is disabled");
diff --git a/include/linux/regulator/act8865.h b/include/linux/regulator/act8865.h
index b6c4909..15fa8f2 100644
--- a/include/linux/regulator/act8865.h
+++ b/include/linux/regulator/act8865.h
@@ -19,6 +19,19 @@
#include <linux/regulator/machine.h>
enum {
+ ACT8600_ID_DCDC1,
+ ACT8600_ID_DCDC2,
+ ACT8600_ID_DCDC3,
+ ACT8600_ID_SUDCDC4,
+ ACT8600_ID_LDO5,
+ ACT8600_ID_LDO6,
+ ACT8600_ID_LDO7,
+ ACT8600_ID_LDO8,
+ ACT8600_ID_LDO9,
+ ACT8600_ID_LDO10,
+};
+
+enum {
ACT8865_ID_DCDC1,
ACT8865_ID_DCDC2,
ACT8865_ID_DCDC3,
@@ -46,6 +59,7 @@ enum {
};
enum {
+ ACT8600,
ACT8865,
ACT8846,
};
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index d17e1ff..f8a689e 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -114,7 +114,7 @@ struct regmap;
#define REGULATOR_EVENT_OVER_TEMP 0x10
#define REGULATOR_EVENT_FORCE_DISABLE 0x20
#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
-#define REGULATOR_EVENT_DISABLE 0x80
+#define REGULATOR_EVENT_DISABLE 0x80
#define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100
#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200
#define REGULATOR_EVENT_PRE_DISABLE 0x400
@@ -238,7 +238,7 @@ int regulator_get_current_limit(struct regulator *regulator);
int regulator_set_mode(struct regulator *regulator, unsigned int mode);
unsigned int regulator_get_mode(struct regulator *regulator);
-int regulator_set_optimum_mode(struct regulator *regulator, int load_uA);
+int regulator_set_load(struct regulator *regulator, int load_uA);
int regulator_allow_bypass(struct regulator *regulator, bool allow);
@@ -252,8 +252,12 @@ int regulator_list_hardware_vsel(struct regulator *regulator,
/* regulator notifier block */
int regulator_register_notifier(struct regulator *regulator,
struct notifier_block *nb);
+int devm_regulator_register_notifier(struct regulator *regulator,
+ struct notifier_block *nb);
int regulator_unregister_notifier(struct regulator *regulator,
struct notifier_block *nb);
+void devm_regulator_unregister_notifier(struct regulator *regulator,
+ struct notifier_block *nb);
/* driver data - core doesn't touch */
void *regulator_get_drvdata(struct regulator *regulator);
@@ -479,8 +483,7 @@ static inline unsigned int regulator_get_mode(struct regulator *regulator)
return REGULATOR_MODE_NORMAL;
}
-static inline int regulator_set_optimum_mode(struct regulator *regulator,
- int load_uA)
+static inline int regulator_set_load(struct regulator *regulator, int load_uA)
{
return REGULATOR_MODE_NORMAL;
}
@@ -515,12 +518,24 @@ static inline int regulator_register_notifier(struct regulator *regulator,
return 0;
}
+static inline int devm_regulator_register_notifier(struct regulator *regulator,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
static inline int regulator_unregister_notifier(struct regulator *regulator,
struct notifier_block *nb)
{
return 0;
}
+static inline int devm_regulator_unregister_notifier(struct regulator *regulator,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
static inline void *regulator_get_drvdata(struct regulator *regulator)
{
return NULL;
diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h
index 5479394..5dd65ac 100644
--- a/include/linux/regulator/da9211.h
+++ b/include/linux/regulator/da9211.h
@@ -32,6 +32,8 @@ struct da9211_pdata {
* 2 : 2 phase 2 buck
*/
int num_buck;
+ int gpio_ren[DA9211_MAX_REGULATORS];
+ struct device_node *reg_node[DA9211_MAX_REGULATORS];
struct regulator_init_data *init_data[DA9211_MAX_REGULATORS];
};
#endif
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 5f1e9ca..4db9fbe 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -21,6 +21,7 @@
struct regmap;
struct regulator_dev;
+struct regulator_config;
struct regulator_init_data;
struct regulator_enable_gpio;
@@ -90,6 +91,7 @@ struct regulator_linear_range {
* @set_current_limit: Configure a limit for a current-limited regulator.
* The driver should select the current closest to max_uA.
* @get_current_limit: Get the configured limit for a current-limited regulator.
+ * @set_input_current_limit: Configure an input limit.
*
* @set_mode: Set the configured operating mode for the regulator.
* @get_mode: Get the configured operating mode for the regulator.
@@ -97,6 +99,7 @@ struct regulator_linear_range {
* REGULATOR_STATUS value (or negative errno)
* @get_optimum_mode: Get the most efficient operating mode for the regulator
* when running with the specified parameters.
+ * @set_load: Set the load for the regulator.
*
* @set_bypass: Set the regulator in bypass mode.
* @get_bypass: Get the regulator bypass mode state.
@@ -109,6 +112,7 @@ struct regulator_linear_range {
* to stabilise after being set to a new value, in microseconds.
* The function provides the from and to voltage selector, the
* function should return the worst case.
+ * @set_soft_start: Enable soft start for the regulator.
*
* @set_suspend_voltage: Set the voltage for the regulator when the system
* is suspended.
@@ -119,6 +123,9 @@ struct regulator_linear_range {
* @set_suspend_mode: Set the operating mode for the regulator when the
* system is suspended.
*
+ * @set_pull_down: Configure the regulator to pull down when the regulator
+ * is disabled.
+ *
* This struct describes regulator operations which can be implemented by
* regulator chip drivers.
*/
@@ -140,6 +147,8 @@ struct regulator_ops {
int min_uA, int max_uA);
int (*get_current_limit) (struct regulator_dev *);
+ int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);
+
/* enable/disable regulator */
int (*enable) (struct regulator_dev *);
int (*disable) (struct regulator_dev *);
@@ -156,6 +165,8 @@ struct regulator_ops {
unsigned int old_selector,
unsigned int new_selector);
+ int (*set_soft_start) (struct regulator_dev *);
+
/* report regulator status ... most other accessors report
* control inputs, this reports results of combining inputs
* from Linux (and other sources) with the actual load.
@@ -166,6 +177,8 @@ struct regulator_ops {
/* get most efficient regulator operating mode for load */
unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV,
int output_uV, int load_uA);
+ /* set the load on the regulator */
+ int (*set_load)(struct regulator_dev *, int load_uA);
/* control and report on bypass mode */
int (*set_bypass)(struct regulator_dev *dev, bool enable);
@@ -183,6 +196,8 @@ struct regulator_ops {
/* set regulator suspend operating mode (defined in consumer.h) */
int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode);
+
+ int (*set_pull_down) (struct regulator_dev *);
};
/*
@@ -205,6 +220,15 @@ enum regulator_type {
* @supply_name: Identifying the regulator supply
* @of_match: Name used to identify regulator in DT.
* @regulators_node: Name of node containing regulator definitions in DT.
+ * @of_parse_cb: Optional callback called only if of_match is present.
+ * Will be called for each regulator parsed from DT, during
+ * init_data parsing.
+ * The regulator_config passed as argument to the callback will
+ * be a copy of config passed to regulator_register, valid only
+ * for this particular call. Callback may freely change the
+ * config but it cannot store it for later usage.
+ * Callback should return 0 on success or negative ERRNO
+ * indicating failure.
* @id: Numerical identifier for the regulator.
* @ops: Regulator operations table.
* @irq: Interrupt number for the regulator.
@@ -251,6 +275,9 @@ struct regulator_desc {
const char *supply_name;
const char *of_match;
const char *regulators_node;
+ int (*of_parse_cb)(struct device_node *,
+ const struct regulator_desc *,
+ struct regulator_config *);
int id;
bool continuous_voltage_range;
unsigned n_voltages;
@@ -303,7 +330,7 @@ struct regulator_desc {
* @driver_data: private regulator data
* @of_node: OpenFirmware node to parse for device tree bindings (may be
* NULL).
- * @regmap: regmap to use for core regmap helpers if dev_get_regulator() is
+ * @regmap: regmap to use for core regmap helpers if dev_get_regmap() is
* insufficient.
* @ena_gpio_initialized: GPIO controlling regulator enable was properly
* initialized, meaning that >= 0 is a valid gpio
@@ -354,6 +381,7 @@ struct regulator_dev {
struct device dev;
struct regulation_constraints *constraints;
struct regulator *supply; /* for tree */
+ const char *supply_name;
struct regmap *regmap;
struct delayed_work disable_work;
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 0b08d05..b11be12 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -75,6 +75,8 @@ struct regulator_state {
*
* @min_uA: Smallest current consumers may set.
* @max_uA: Largest current consumers may set.
+ * @ilim_uA: Maximum input current.
+ * @system_load: Load that isn't captured by any consumer requests.
*
* @valid_modes_mask: Mask of modes which may be configured by consumers.
* @valid_ops_mask: Operations which may be performed by consumers.
@@ -86,6 +88,8 @@ struct regulator_state {
* applied.
* @apply_uV: Apply the voltage constraint when initialising.
* @ramp_disable: Disable ramp delay when initialising or when setting voltage.
+ * @soft_start: Enable soft start so that voltage ramps slowly.
+ * @pull_down: Enable pull down when regulator is disabled.
*
* @input_uV: Input voltage for regulator when supplied by another regulator.
*
@@ -111,6 +115,9 @@ struct regulation_constraints {
/* current output range (inclusive) - for current control */
int min_uA;
int max_uA;
+ int ilim_uA;
+
+ int system_load;
/* valid regulator operating modes for this machine */
unsigned int valid_modes_mask;
@@ -138,6 +145,8 @@ struct regulation_constraints {
unsigned boot_on:1; /* bootloader/firmware enabled regulator */
unsigned apply_uV:1; /* apply uV constraint if min == max */
unsigned ramp_disable:1; /* disable ramp delay */
+ unsigned soft_start:1; /* ramp voltage slowly */
+ unsigned pull_down:1; /* pull down resistor when regulator off */
};
/**
@@ -191,15 +200,22 @@ struct regulator_init_data {
void *driver_data; /* core does not touch this */
};
-int regulator_suspend_prepare(suspend_state_t state);
-int regulator_suspend_finish(void);
-
#ifdef CONFIG_REGULATOR
void regulator_has_full_constraints(void);
+int regulator_suspend_prepare(suspend_state_t state);
+int regulator_suspend_finish(void);
#else
static inline void regulator_has_full_constraints(void)
{
}
+static inline int regulator_suspend_prepare(suspend_state_t state)
+{
+ return 0;
+}
+static inline int regulator_suspend_finish(void)
+{
+ return 0;
+}
#endif
#endif
diff --git a/include/linux/regulator/max8973-regulator.h b/include/linux/regulator/max8973-regulator.h
index f8acc05..f6a8a16 100644
--- a/include/linux/regulator/max8973-regulator.h
+++ b/include/linux/regulator/max8973-regulator.h
@@ -58,6 +58,9 @@
* control signal from EN input pin. If it is false then
* voltage output will be enabled/disabled through EN bit of
* device register.
+ * @enable_gpio: Enable GPIO. If EN pin is controlled through GPIO from host
+ * then GPIO number can be provided. If no GPIO controlled then
+ * it should be -1.
* @dvs_gpio: GPIO for dvs. It should be -1 if this is tied with fixed logic.
* @dvs_def_state: Default state of dvs. 1 if it is high else 0.
*/
@@ -65,6 +68,7 @@ struct max8973_regulator_platform_data {
struct regulator_init_data *reg_init_data;
unsigned long control_flags;
bool enable_ext_control;
+ int enable_gpio;
int dvs_gpio;
unsigned dvs_def_state:1;
};
diff --git a/include/linux/regulator/mt6397-regulator.h b/include/linux/regulator/mt6397-regulator.h
new file mode 100644
index 0000000..30cc596
--- /dev/null
+++ b/include/linux/regulator/mt6397-regulator.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu <flora.fu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6397_H
+#define __LINUX_REGULATOR_MT6397_H
+
+enum {
+ MT6397_ID_VPCA15 = 0,
+ MT6397_ID_VPCA7,
+ MT6397_ID_VSRAMCA15,
+ MT6397_ID_VSRAMCA7,
+ MT6397_ID_VCORE,
+ MT6397_ID_VGPU,
+ MT6397_ID_VDRM,
+ MT6397_ID_VIO18 = 7,
+ MT6397_ID_VTCXO,
+ MT6397_ID_VA28,
+ MT6397_ID_VCAMA,
+ MT6397_ID_VIO28,
+ MT6397_ID_VUSB,
+ MT6397_ID_VMC,
+ MT6397_ID_VMCH,
+ MT6397_ID_VEMC3V3,
+ MT6397_ID_VGP1,
+ MT6397_ID_VGP2,
+ MT6397_ID_VGP3,
+ MT6397_ID_VGP4,
+ MT6397_ID_VGP5,
+ MT6397_ID_VGP6,
+ MT6397_ID_VIBR,
+ MT6397_ID_RG_MAX,
+};
+
+#define MT6397_MAX_REGULATOR MT6397_ID_RG_MAX
+#define MT6397_REGULATOR_ID97 0x97
+#define MT6397_REGULATOR_ID91 0x91
+
+#endif /* __LINUX_REGULATOR_MT6397_H */
diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h
index 364f7a7..70c6c66 100644
--- a/include/linux/regulator/pfuze100.h
+++ b/include/linux/regulator/pfuze100.h
@@ -49,6 +49,20 @@
#define PFUZE200_VGEN5 11
#define PFUZE200_VGEN6 12
+#define PFUZE3000_SW1A 0
+#define PFUZE3000_SW1B 1
+#define PFUZE3000_SW2 2
+#define PFUZE3000_SW3 3
+#define PFUZE3000_SWBST 4
+#define PFUZE3000_VSNVS 5
+#define PFUZE3000_VREFDDR 6
+#define PFUZE3000_VLDO1 7
+#define PFUZE3000_VLDO2 8
+#define PFUZE3000_VCCSD 9
+#define PFUZE3000_V33 10
+#define PFUZE3000_VLDO3 11
+#define PFUZE3000_VLDO4 12
+
struct regulator_init_data;
struct pfuze_regulator_platform_data {
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 9e7e745..9c4e138 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -36,11 +36,11 @@
#define REMOTEPROC_H
#include <linux/types.h>
-#include <linux/klist.h>
#include <linux/mutex.h>
#include <linux/virtio.h>
#include <linux/completion.h>
#include <linux/idr.h>
+#include <linux/of.h>
/**
* struct resource_table - firmware resource table header
@@ -330,11 +330,13 @@ struct rproc;
* @start: power on the device and boot it
* @stop: power off the device
* @kick: kick a virtqueue (virtqueue id given as a parameter)
+ * @da_to_va: optional platform hook to perform address translations
*/
struct rproc_ops {
int (*start)(struct rproc *rproc);
int (*stop)(struct rproc *rproc);
void (*kick)(struct rproc *rproc, int vqid);
+ void * (*da_to_va)(struct rproc *rproc, u64 da, int len);
};
/**
@@ -375,7 +377,7 @@ enum rproc_crash_type {
/**
* struct rproc - represents a physical remote processor device
- * @node: klist node of this rproc object
+ * @node: list node of this rproc object
* @domain: iommu domain
* @name: human readable name of the rproc
* @firmware: name of firmware file to be loaded
@@ -404,9 +406,10 @@ enum rproc_crash_type {
* @table_ptr: pointer to the resource table in effect
* @cached_table: copy of the resource table
* @table_csum: checksum of the resource table
+ * @has_iommu: flag to indicate if remote processor is behind an MMU
*/
struct rproc {
- struct klist_node node;
+ struct list_head node;
struct iommu_domain *domain;
const char *name;
const char *firmware;
@@ -435,6 +438,7 @@ struct rproc {
struct resource_table *table_ptr;
struct resource_table *cached_table;
u32 table_csum;
+ bool has_iommu;
};
/* we currently support only two vrings per rvdev */
@@ -479,6 +483,7 @@ struct rproc_vdev {
u32 rsc_offset;
};
+struct rproc *rproc_get_by_phandle(phandle phandle);
struct rproc *rproc_alloc(struct device *dev, const char *name,
const struct rproc_ops *ops,
const char *firmware, int len);
diff --git a/include/linux/reset/bcm63xx_pmb.h b/include/linux/reset/bcm63xx_pmb.h
new file mode 100644
index 0000000..bb4af7b
--- /dev/null
+++ b/include/linux/reset/bcm63xx_pmb.h
@@ -0,0 +1,88 @@
+/*
+ * Broadcom BCM63xx Processor Monitor Bus shared routines (SMP and reset)
+ *
+ * Copyright (C) 2015, Broadcom Corporation
+ * Author: Florian Fainelli <f.fainelli@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __BCM63XX_PMB_H
+#define __BCM63XX_PMB_H
+
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+
+/* PMB Master controller register */
+#define PMB_CTRL 0x00
+#define PMC_PMBM_START (1 << 31)
+#define PMC_PMBM_TIMEOUT (1 << 30)
+#define PMC_PMBM_SLAVE_ERR (1 << 29)
+#define PMC_PMBM_BUSY (1 << 28)
+#define PMC_PMBM_READ (0 << 20)
+#define PMC_PMBM_WRITE (1 << 20)
+#define PMB_WR_DATA 0x04
+#define PMB_TIMEOUT 0x08
+#define PMB_RD_DATA 0x0C
+
+#define PMB_BUS_ID_SHIFT 8
+
+/* Perform the low-level PMB master operation, shared between reads and
+ * writes.
+ */
+static inline int __bpcm_do_op(void __iomem *master, unsigned int addr,
+ u32 off, u32 op)
+{
+ unsigned int timeout = 1000;
+ u32 cmd;
+
+ cmd = (PMC_PMBM_START | op | (addr & 0xff) << 12 | off);
+ writel(cmd, master + PMB_CTRL);
+ do {
+ cmd = readl(master + PMB_CTRL);
+ if (!(cmd & PMC_PMBM_START))
+ return 0;
+
+ if (cmd & PMC_PMBM_SLAVE_ERR)
+ return -EIO;
+
+ if (cmd & PMC_PMBM_TIMEOUT)
+ return -ETIMEDOUT;
+
+ udelay(1);
+ } while (timeout-- > 0);
+
+ return -ETIMEDOUT;
+}
+
+static inline int bpcm_rd(void __iomem *master, unsigned int addr,
+ u32 off, u32 *val)
+{
+ int ret = 0;
+
+ ret = __bpcm_do_op(master, addr, off >> 2, PMC_PMBM_READ);
+ *val = readl(master + PMB_RD_DATA);
+
+ return ret;
+}
+
+static inline int bpcm_wr(void __iomem *master, unsigned int addr,
+ u32 off, u32 val)
+{
+ int ret = 0;
+
+ writel(val, master + PMB_WR_DATA);
+ ret = __bpcm_do_op(master, addr, off >> 2, PMC_PMBM_WRITE);
+
+ return ret;
+}
+
+#endif /* __BCM63XX_PMB_H */
diff --git a/include/linux/resource_ext.h b/include/linux/resource_ext.h
new file mode 100644
index 0000000..e2bf63d
--- /dev/null
+++ b/include/linux/resource_ext.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2015, Intel Corporation
+ * Author: Jiang Liu <jiang.liu@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#ifndef _LINUX_RESOURCE_EXT_H
+#define _LINUX_RESOURCE_EXT_H
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+
+/* Represent resource window for bridge devices */
+struct resource_win {
+ struct resource res; /* In master (CPU) address space */
+ resource_size_t offset; /* Translation offset for bridge */
+};
+
+/*
+ * Common resource list management data structure and interfaces to support
+ * ACPI, PNP and PCI host bridge etc.
+ */
+struct resource_entry {
+ struct list_head node;
+ struct resource *res; /* In master (CPU) address space */
+ resource_size_t offset; /* Translation offset for bridge */
+ struct resource __res; /* Default storage for res */
+};
+
+extern struct resource_entry *
+resource_list_create_entry(struct resource *res, size_t extra_size);
+extern void resource_list_free(struct list_head *head);
+
+static inline void resource_list_add(struct resource_entry *entry,
+ struct list_head *head)
+{
+ list_add(&entry->node, head);
+}
+
+static inline void resource_list_add_tail(struct resource_entry *entry,
+ struct list_head *head)
+{
+ list_add_tail(&entry->node, head);
+}
+
+static inline void resource_list_del(struct resource_entry *entry)
+{
+ list_del(&entry->node);
+}
+
+static inline void resource_list_free_entry(struct resource_entry *entry)
+{
+ kfree(entry);
+}
+
+static inline void
+resource_list_destroy_entry(struct resource_entry *entry)
+{
+ resource_list_del(entry);
+ resource_list_free_entry(entry);
+}
+
+#define resource_list_for_each_entry(entry, list) \
+ list_for_each_entry((entry), (list), node)
+
+#define resource_list_for_each_entry_safe(entry, tmp, list) \
+ list_for_each_entry_safe((entry), (tmp), (list), node)
+
+#endif /* _LINUX_RESOURCE_EXT_H */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index b93fd89..843ceca 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -1,14 +1,13 @@
/*
* Resizable, Scalable, Concurrent Hash Table
*
- * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
*
- * Based on the following paper by Josh Triplett, Paul E. McKenney
- * and Jonathan Walpole:
- * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
- *
* Code partially derived from nft_hash
+ * Rewritten with rehash code from br_multicast plus single list
+ * pointer as suggested by Josh Triplett
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -18,21 +17,81 @@
#ifndef _LINUX_RHASHTABLE_H
#define _LINUX_RHASHTABLE_H
-#include <linux/rculist.h>
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/jhash.h>
+#include <linux/list_nulls.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/rcupdate.h>
+
+/*
+ * The end of the chain is marked with a special nulls marks which has
+ * the following format:
+ *
+ * +-------+-----------------------------------------------------+-+
+ * | Base | Hash |1|
+ * +-------+-----------------------------------------------------+-+
+ *
+ * Base (4 bits) : Reserved to distinguish between multiple tables.
+ * Specified via &struct rhashtable_params.nulls_base.
+ * Hash (27 bits): Full hash (unmasked) of first element added to bucket
+ * 1 (1 bit) : Nulls marker (always set)
+ *
+ * The remaining bits of the next pointer remain unused for now.
+ */
+#define RHT_BASE_BITS 4
+#define RHT_HASH_BITS 27
+#define RHT_BASE_SHIFT RHT_HASH_BITS
+
+/* Base bits plus 1 bit for nulls marker */
+#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
struct rhash_head {
struct rhash_head __rcu *next;
};
-#define INIT_HASH_HEAD(ptr) ((ptr)->next = NULL)
-
+/**
+ * struct bucket_table - Table of hash buckets
+ * @size: Number of hash buckets
+ * @rehash: Current bucket being rehashed
+ * @hash_rnd: Random seed to fold into hash
+ * @locks_mask: Mask to apply before accessing locks[]
+ * @locks: Array of spinlocks protecting individual buckets
+ * @walkers: List of active walkers
+ * @rcu: RCU structure for freeing the table
+ * @future_tbl: Table under construction during rehashing
+ * @buckets: size * hash buckets
+ */
struct bucket_table {
- size_t size;
- struct rhash_head __rcu *buckets[];
+ unsigned int size;
+ unsigned int rehash;
+ u32 hash_rnd;
+ unsigned int locks_mask;
+ spinlock_t *locks;
+ struct list_head walkers;
+ struct rcu_head rcu;
+
+ struct bucket_table __rcu *future_tbl;
+
+ struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
+};
+
+/**
+ * struct rhashtable_compare_arg - Key for the function rhashtable_compare
+ * @ht: Hash table
+ * @key: Key to compare against
+ */
+struct rhashtable_compare_arg {
+ struct rhashtable *ht;
+ const void *key;
};
typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
-typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 seed);
+typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
+typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
+ const void *obj);
struct rhashtable;
@@ -42,79 +101,259 @@ struct rhashtable;
* @key_len: Length of key
* @key_offset: Offset of key in struct to be hashed
* @head_offset: Offset of rhash_head in struct to be hashed
- * @hash_rnd: Seed to use while hashing
- * @max_shift: Maximum number of shifts while expanding
- * @min_shift: Minimum number of shifts while shrinking
- * @hashfn: Function to hash key
+ * @insecure_max_entries: Maximum number of entries (may be exceeded)
+ * @max_size: Maximum size while expanding
+ * @min_size: Minimum size while shrinking
+ * @nulls_base: Base value to generate nulls marker
+ * @insecure_elasticity: Set to true to disable chain length checks
+ * @automatic_shrinking: Enable automatic shrinking of tables
+ * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
+ * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
* @obj_hashfn: Function to hash object
- * @grow_decision: If defined, may return true if table should expand
- * @shrink_decision: If defined, may return true if table should shrink
- * @mutex_is_held: Must return true if protecting mutex is held
+ * @obj_cmpfn: Function to compare key with object
*/
struct rhashtable_params {
size_t nelem_hint;
size_t key_len;
size_t key_offset;
size_t head_offset;
- u32 hash_rnd;
- size_t max_shift;
- size_t min_shift;
+ unsigned int insecure_max_entries;
+ unsigned int max_size;
+ unsigned int min_size;
+ u32 nulls_base;
+ bool insecure_elasticity;
+ bool automatic_shrinking;
+ size_t locks_mul;
rht_hashfn_t hashfn;
rht_obj_hashfn_t obj_hashfn;
- bool (*grow_decision)(const struct rhashtable *ht,
- size_t new_size);
- bool (*shrink_decision)(const struct rhashtable *ht,
- size_t new_size);
-#ifdef CONFIG_PROVE_LOCKING
- int (*mutex_is_held)(void *parent);
- void *parent;
-#endif
+ rht_obj_cmpfn_t obj_cmpfn;
};
/**
* struct rhashtable - Hash table handle
* @tbl: Bucket table
* @nelems: Number of elements in table
- * @shift: Current size (1 << shift)
+ * @key_len: Key length for hashfn
+ * @elasticity: Maximum chain length before rehash
* @p: Configuration parameters
+ * @run_work: Deferred worker to expand/shrink asynchronously
+ * @mutex: Mutex to protect current/future table swapping
+ * @lock: Spin lock to protect walker list
*/
struct rhashtable {
struct bucket_table __rcu *tbl;
- size_t nelems;
- size_t shift;
+ atomic_t nelems;
+ unsigned int key_len;
+ unsigned int elasticity;
struct rhashtable_params p;
+ struct work_struct run_work;
+ struct mutex mutex;
+ spinlock_t lock;
+};
+
+/**
+ * struct rhashtable_walker - Hash table walker
+ * @list: List entry on list of walkers
+ * @tbl: The table that we were walking over
+ */
+struct rhashtable_walker {
+ struct list_head list;
+ struct bucket_table *tbl;
};
+/**
+ * struct rhashtable_iter - Hash table iterator, fits into netlink cb
+ * @ht: Table to iterate through
+ * @p: Current pointer
+ * @walker: Associated rhashtable walker
+ * @slot: Current slot
+ * @skip: Number of entries to skip in slot
+ */
+struct rhashtable_iter {
+ struct rhashtable *ht;
+ struct rhash_head *p;
+ struct rhashtable_walker *walker;
+ unsigned int slot;
+ unsigned int skip;
+};
+
+static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
+{
+ return NULLS_MARKER(ht->p.nulls_base + hash);
+}
+
+#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
+ ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
+
+static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
+{
+ return ((unsigned long) ptr & 1);
+}
+
+static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
+{
+ return ((unsigned long) ptr) >> 1;
+}
+
+static inline void *rht_obj(const struct rhashtable *ht,
+ const struct rhash_head *he)
+{
+ return (char *)he - ht->p.head_offset;
+}
+
+static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
+ unsigned int hash)
+{
+ return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
+}
+
+static inline unsigned int rht_key_hashfn(
+ struct rhashtable *ht, const struct bucket_table *tbl,
+ const void *key, const struct rhashtable_params params)
+{
+ unsigned int hash;
+
+ /* params must be equal to ht->p if it isn't constant. */
+ if (!__builtin_constant_p(params.key_len))
+ hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
+ else if (params.key_len) {
+ unsigned int key_len = params.key_len;
+
+ if (params.hashfn)
+ hash = params.hashfn(key, key_len, tbl->hash_rnd);
+ else if (key_len & (sizeof(u32) - 1))
+ hash = jhash(key, key_len, tbl->hash_rnd);
+ else
+ hash = jhash2(key, key_len / sizeof(u32),
+ tbl->hash_rnd);
+ } else {
+ unsigned int key_len = ht->p.key_len;
+
+ if (params.hashfn)
+ hash = params.hashfn(key, key_len, tbl->hash_rnd);
+ else
+ hash = jhash(key, key_len, tbl->hash_rnd);
+ }
+
+ return rht_bucket_index(tbl, hash);
+}
+
+static inline unsigned int rht_head_hashfn(
+ struct rhashtable *ht, const struct bucket_table *tbl,
+ const struct rhash_head *he, const struct rhashtable_params params)
+{
+ const char *ptr = rht_obj(ht, he);
+
+ return likely(params.obj_hashfn) ?
+ rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
+ ht->p.key_len,
+ tbl->hash_rnd)) :
+ rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
+}
+
+/**
+ * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
+ * @ht: hash table
+ * @tbl: current table
+ */
+static inline bool rht_grow_above_75(const struct rhashtable *ht,
+ const struct bucket_table *tbl)
+{
+ /* Expand table when exceeding 75% load */
+ return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
+ (!ht->p.max_size || tbl->size < ht->p.max_size);
+}
+
+/**
+ * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
+ * @ht: hash table
+ * @tbl: current table
+ */
+static inline bool rht_shrink_below_30(const struct rhashtable *ht,
+ const struct bucket_table *tbl)
+{
+ /* Shrink table beneath 30% load */
+ return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
+ tbl->size > ht->p.min_size;
+}
+
+/**
+ * rht_grow_above_100 - returns true if nelems > table-size
+ * @ht: hash table
+ * @tbl: current table
+ */
+static inline bool rht_grow_above_100(const struct rhashtable *ht,
+ const struct bucket_table *tbl)
+{
+ return atomic_read(&ht->nelems) > tbl->size &&
+ (!ht->p.max_size || tbl->size < ht->p.max_size);
+}
+
+/**
+ * rht_grow_above_max - returns true if table is above maximum
+ * @ht: hash table
+ * @tbl: current table
+ */
+static inline bool rht_grow_above_max(const struct rhashtable *ht,
+ const struct bucket_table *tbl)
+{
+ return ht->p.insecure_max_entries &&
+ atomic_read(&ht->nelems) >= ht->p.insecure_max_entries;
+}
+
+/* The bucket lock is selected based on the hash and protects mutations
+ * on a group of hash buckets.
+ *
+ * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
+ * a single lock always covers both buckets which may both contains
+ * entries which link to the same bucket of the old table during resizing.
+ * This allows to simplify the locking as locking the bucket in both
+ * tables during resize always guarantee protection.
+ *
+ * IMPORTANT: When holding the bucket lock of both the old and new table
+ * during expansions and shrinking, the old bucket lock must always be
+ * acquired first.
+ */
+static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
+ unsigned int hash)
+{
+ return &tbl->locks[hash & tbl->locks_mask];
+}
+
#ifdef CONFIG_PROVE_LOCKING
-int lockdep_rht_mutex_is_held(const struct rhashtable *ht);
+int lockdep_rht_mutex_is_held(struct rhashtable *ht);
+int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
#else
-static inline int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
+static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
{
return 1;
}
-#endif /* CONFIG_PROVE_LOCKING */
-
-int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
-u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len);
-u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr);
-
-void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
-bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
-void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
- struct rhash_head __rcu **pprev);
+static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
+ u32 hash)
+{
+ return 1;
+}
+#endif /* CONFIG_PROVE_LOCKING */
-bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size);
-bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
+int rhashtable_init(struct rhashtable *ht,
+ const struct rhashtable_params *params);
-int rhashtable_expand(struct rhashtable *ht);
-int rhashtable_shrink(struct rhashtable *ht);
+int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
+ struct rhash_head *obj,
+ struct bucket_table *old_tbl);
+int rhashtable_insert_rehash(struct rhashtable *ht);
-void *rhashtable_lookup(const struct rhashtable *ht, const void *key);
-void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
- bool (*compare)(void *, void *), void *arg);
+int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
+void rhashtable_walk_exit(struct rhashtable_iter *iter);
+int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
+void *rhashtable_walk_next(struct rhashtable_iter *iter);
+void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
-void rhashtable_destroy(const struct rhashtable *ht);
+void rhashtable_free_and_destroy(struct rhashtable *ht,
+ void (*free_fn)(void *ptr, void *arg),
+ void *arg);
+void rhashtable_destroy(struct rhashtable *ht);
#define rht_dereference(p, ht) \
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
@@ -122,92 +361,462 @@ void rhashtable_destroy(const struct rhashtable *ht);
#define rht_dereference_rcu(p, ht) \
rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
-#define rht_entry(ptr, type, member) container_of(ptr, type, member)
-#define rht_entry_safe(ptr, type, member) \
-({ \
- typeof(ptr) __ptr = (ptr); \
- __ptr ? rht_entry(__ptr, type, member) : NULL; \
-})
+#define rht_dereference_bucket(p, tbl, hash) \
+ rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
+
+#define rht_dereference_bucket_rcu(p, tbl, hash) \
+ rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
-#define rht_next_entry_safe(pos, ht, member) \
-({ \
- pos ? rht_entry_safe(rht_dereference((pos)->member.next, ht), \
- typeof(*(pos)), member) : NULL; \
-})
+#define rht_entry(tpos, pos, member) \
+ ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
+
+/**
+ * rht_for_each_continue - continue iterating over hash chain
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @head: the previous &struct rhash_head to continue from
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ */
+#define rht_for_each_continue(pos, head, tbl, hash) \
+ for (pos = rht_dereference_bucket(head, tbl, hash); \
+ !rht_is_a_nulls(pos); \
+ pos = rht_dereference_bucket((pos)->next, tbl, hash))
/**
* rht_for_each - iterate over hash chain
- * @pos: &struct rhash_head to use as a loop cursor.
- * @head: head of the hash chain (struct rhash_head *)
- * @ht: pointer to your struct rhashtable
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
*/
-#define rht_for_each(pos, head, ht) \
- for (pos = rht_dereference(head, ht); \
- pos; \
- pos = rht_dereference((pos)->next, ht))
+#define rht_for_each(pos, tbl, hash) \
+ rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
+
+/**
+ * rht_for_each_entry_continue - continue iterating over hash chain
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @head: the previous &struct rhash_head to continue from
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ * @member: name of the &struct rhash_head within the hashable struct.
+ */
+#define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
+ for (pos = rht_dereference_bucket(head, tbl, hash); \
+ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
+ pos = rht_dereference_bucket((pos)->next, tbl, hash))
/**
* rht_for_each_entry - iterate over hash chain of given type
- * @pos: type * to use as a loop cursor.
- * @head: head of the hash chain (struct rhash_head *)
- * @ht: pointer to your struct rhashtable
- * @member: name of the rhash_head within the hashable struct.
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ * @member: name of the &struct rhash_head within the hashable struct.
*/
-#define rht_for_each_entry(pos, head, ht, member) \
- for (pos = rht_entry_safe(rht_dereference(head, ht), \
- typeof(*(pos)), member); \
- pos; \
- pos = rht_next_entry_safe(pos, ht, member))
+#define rht_for_each_entry(tpos, pos, tbl, hash, member) \
+ rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
+ tbl, hash, member)
/**
* rht_for_each_entry_safe - safely iterate over hash chain of given type
- * @pos: type * to use as a loop cursor.
- * @n: type * to use for temporary next object storage
- * @head: head of the hash chain (struct rhash_head *)
- * @ht: pointer to your struct rhashtable
- * @member: name of the rhash_head within the hashable struct.
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @next: the &struct rhash_head to use as next in loop cursor.
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ * @member: name of the &struct rhash_head within the hashable struct.
*
* This hash chain list-traversal primitive allows for the looped code to
* remove the loop cursor from the list.
*/
-#define rht_for_each_entry_safe(pos, n, head, ht, member) \
- for (pos = rht_entry_safe(rht_dereference(head, ht), \
- typeof(*(pos)), member), \
- n = rht_next_entry_safe(pos, ht, member); \
- pos; \
- pos = n, \
- n = rht_next_entry_safe(pos, ht, member))
+#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
+ for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
+ next = !rht_is_a_nulls(pos) ? \
+ rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
+ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
+ pos = next, \
+ next = !rht_is_a_nulls(pos) ? \
+ rht_dereference_bucket(pos->next, tbl, hash) : NULL)
+
+/**
+ * rht_for_each_rcu_continue - continue iterating over rcu hash chain
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @head: the previous &struct rhash_head to continue from
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ *
+ * This hash chain list-traversal primitive may safely run concurrently with
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
+ * traversal is guarded by rcu_read_lock().
+ */
+#define rht_for_each_rcu_continue(pos, head, tbl, hash) \
+ for (({barrier(); }), \
+ pos = rht_dereference_bucket_rcu(head, tbl, hash); \
+ !rht_is_a_nulls(pos); \
+ pos = rcu_dereference_raw(pos->next))
/**
* rht_for_each_rcu - iterate over rcu hash chain
- * @pos: &struct rhash_head to use as a loop cursor.
- * @head: head of the hash chain (struct rhash_head *)
- * @ht: pointer to your struct rhashtable
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
*
* This hash chain list-traversal primitive may safely run concurrently with
- * the _rcu fkht mutation primitives such as rht_insert() as long as the
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock().
*/
-#define rht_for_each_rcu(pos, head, ht) \
- for (pos = rht_dereference_rcu(head, ht); \
- pos; \
- pos = rht_dereference_rcu((pos)->next, ht))
+#define rht_for_each_rcu(pos, tbl, hash) \
+ rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
+
+/**
+ * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @head: the previous &struct rhash_head to continue from
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ * @member: name of the &struct rhash_head within the hashable struct.
+ *
+ * This hash chain list-traversal primitive may safely run concurrently with
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
+ * traversal is guarded by rcu_read_lock().
+ */
+#define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
+ for (({barrier(); }), \
+ pos = rht_dereference_bucket_rcu(head, tbl, hash); \
+ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
+ pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
/**
* rht_for_each_entry_rcu - iterate over rcu hash chain of given type
- * @pos: type * to use as a loop cursor.
- * @head: head of the hash chain (struct rhash_head *)
- * @member: name of the rhash_head within the hashable struct.
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ * @member: name of the &struct rhash_head within the hashable struct.
*
* This hash chain list-traversal primitive may safely run concurrently with
- * the _rcu fkht mutation primitives such as rht_insert() as long as the
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock().
*/
-#define rht_for_each_entry_rcu(pos, head, member) \
- for (pos = rht_entry_safe(rcu_dereference_raw(head), \
- typeof(*(pos)), member); \
- pos; \
- pos = rht_entry_safe(rcu_dereference_raw((pos)->member.next), \
- typeof(*(pos)), member))
+#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
+ rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
+ tbl, hash, member)
+
+static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
+ const void *obj)
+{
+ struct rhashtable *ht = arg->ht;
+ const char *ptr = obj;
+
+ return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
+}
+
+/**
+ * rhashtable_lookup_fast - search hash table, inlined version
+ * @ht: hash table
+ * @key: the pointer to the key
+ * @params: hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+static inline void *rhashtable_lookup_fast(
+ struct rhashtable *ht, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhashtable_compare_arg arg = {
+ .ht = ht,
+ .key = key,
+ };
+ const struct bucket_table *tbl;
+ struct rhash_head *he;
+ unsigned int hash;
+
+ rcu_read_lock();
+
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+restart:
+ hash = rht_key_hashfn(ht, tbl, key, params);
+ rht_for_each_rcu(he, tbl, hash) {
+ if (params.obj_cmpfn ?
+ params.obj_cmpfn(&arg, rht_obj(ht, he)) :
+ rhashtable_compare(&arg, rht_obj(ht, he)))
+ continue;
+ rcu_read_unlock();
+ return rht_obj(ht, he);
+ }
+
+ /* Ensure we see any new tables. */
+ smp_rmb();
+
+ tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ if (unlikely(tbl))
+ goto restart;
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+/* Internal function, please use rhashtable_insert_fast() instead */
+static inline int __rhashtable_insert_fast(
+ struct rhashtable *ht, const void *key, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ struct rhashtable_compare_arg arg = {
+ .ht = ht,
+ .key = key,
+ };
+ struct bucket_table *tbl, *new_tbl;
+ struct rhash_head *head;
+ spinlock_t *lock;
+ unsigned int elasticity;
+ unsigned int hash;
+ int err;
+
+restart:
+ rcu_read_lock();
+
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+
+ /* All insertions must grab the oldest table containing
+ * the hashed bucket that is yet to be rehashed.
+ */
+ for (;;) {
+ hash = rht_head_hashfn(ht, tbl, obj, params);
+ lock = rht_bucket_lock(tbl, hash);
+ spin_lock_bh(lock);
+
+ if (tbl->rehash <= hash)
+ break;
+
+ spin_unlock_bh(lock);
+ tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ }
+
+ new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ if (unlikely(new_tbl)) {
+ err = rhashtable_insert_slow(ht, key, obj, new_tbl);
+ if (err == -EAGAIN)
+ goto slow_path;
+ goto out;
+ }
+
+ err = -E2BIG;
+ if (unlikely(rht_grow_above_max(ht, tbl)))
+ goto out;
+
+ if (unlikely(rht_grow_above_100(ht, tbl))) {
+slow_path:
+ spin_unlock_bh(lock);
+ err = rhashtable_insert_rehash(ht);
+ rcu_read_unlock();
+ if (err)
+ return err;
+
+ goto restart;
+ }
+
+ err = -EEXIST;
+ elasticity = ht->elasticity;
+ rht_for_each(head, tbl, hash) {
+ if (key &&
+ unlikely(!(params.obj_cmpfn ?
+ params.obj_cmpfn(&arg, rht_obj(ht, head)) :
+ rhashtable_compare(&arg, rht_obj(ht, head)))))
+ goto out;
+ if (!--elasticity)
+ goto slow_path;
+ }
+
+ err = 0;
+
+ head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
+
+ RCU_INIT_POINTER(obj->next, head);
+
+ rcu_assign_pointer(tbl->buckets[hash], obj);
+
+ atomic_inc(&ht->nelems);
+ if (rht_grow_above_75(ht, tbl))
+ schedule_work(&ht->run_work);
+
+out:
+ spin_unlock_bh(lock);
+ rcu_read_unlock();
+
+ return err;
+}
+
+/**
+ * rhashtable_insert_fast - insert object into hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Will take a per bucket spinlock to protect against mutual mutations
+ * on the same bucket. Multiple insertions may occur in parallel unless
+ * they map to the same bucket lock.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhashtable_insert_fast(
+ struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ return __rhashtable_insert_fast(ht, NULL, obj, params);
+}
+
+/**
+ * rhashtable_lookup_insert_fast - lookup and insert object into hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Locks down the bucket chain in both the old and new table if a resize
+ * is in progress to ensure that writers can't remove from the old table
+ * and can't insert to the new table during the atomic operation of search
+ * and insertion. Searches for duplicates in both the old and new table if
+ * a resize is in progress.
+ *
+ * This lookup function may only be used for fixed key hash table (key_len
+ * parameter set). It will BUG() if used inappropriately.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhashtable_lookup_insert_fast(
+ struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ const char *key = rht_obj(ht, obj);
+
+ BUG_ON(ht->p.obj_hashfn);
+
+ return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj,
+ params);
+}
+
+/**
+ * rhashtable_lookup_insert_key - search and insert object to hash table
+ * with explicit key
+ * @ht: hash table
+ * @key: key
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Locks down the bucket chain in both the old and new table if a resize
+ * is in progress to ensure that writers can't remove from the old table
+ * and can't insert to the new table during the atomic operation of search
+ * and insertion. Searches for duplicates in both the old and new table if
+ * a resize is in progress.
+ *
+ * Lookups may occur in parallel with hashtable mutations and resizing.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ *
+ * Returns zero on success.
+ */
+static inline int rhashtable_lookup_insert_key(
+ struct rhashtable *ht, const void *key, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ BUG_ON(!ht->p.obj_hashfn || !key);
+
+ return __rhashtable_insert_fast(ht, key, obj, params);
+}
+
+/* Internal function, please use rhashtable_remove_fast() instead */
+static inline int __rhashtable_remove_fast(
+ struct rhashtable *ht, struct bucket_table *tbl,
+ struct rhash_head *obj, const struct rhashtable_params params)
+{
+ struct rhash_head __rcu **pprev;
+ struct rhash_head *he;
+ spinlock_t * lock;
+ unsigned int hash;
+ int err = -ENOENT;
+
+ hash = rht_head_hashfn(ht, tbl, obj, params);
+ lock = rht_bucket_lock(tbl, hash);
+
+ spin_lock_bh(lock);
+
+ pprev = &tbl->buckets[hash];
+ rht_for_each(he, tbl, hash) {
+ if (he != obj) {
+ pprev = &he->next;
+ continue;
+ }
+
+ rcu_assign_pointer(*pprev, obj->next);
+ err = 0;
+ break;
+ }
+
+ spin_unlock_bh(lock);
+
+ return err;
+}
+
+/**
+ * rhashtable_remove_fast - remove object from hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Since the hash chain is single linked, the removal operation needs to
+ * walk the bucket chain upon removal. The removal operation is thus
+ * considerable slow if the hash table is not correctly sized.
+ *
+ * Will automatically shrink the table via rhashtable_expand() if the
+ * shrink_decision function specified at rhashtable_init() returns true.
+ *
+ * Returns zero on success, -ENOENT if the entry could not be found.
+ */
+static inline int rhashtable_remove_fast(
+ struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ struct bucket_table *tbl;
+ int err;
+
+ rcu_read_lock();
+
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+
+ /* Because we have already taken (and released) the bucket
+ * lock in old_tbl, if we find that future_tbl is not yet
+ * visible then that guarantees the entry to still be in
+ * the old tbl if it exists.
+ */
+ while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) &&
+ (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
+ ;
+
+ if (err)
+ goto out;
+
+ atomic_dec(&ht->nelems);
+ if (unlikely(ht->p.automatic_shrinking &&
+ rht_shrink_below_30(ht, tbl)))
+ schedule_work(&ht->run_work);
+
+out:
+ rcu_read_unlock();
+
+ return err;
+}
#endif /* _LINUX_RHASHTABLE_H */
diff --git a/include/linux/rio.h b/include/linux/rio.h
index 6bda06f..cde976e 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -298,7 +298,7 @@ struct rio_id_table {
* struct rio_net - RIO network info
* @node: Node in global list of RIO networks
* @devices: List of devices in this network
- * @switches: List of switches in this netowrk
+ * @switches: List of switches in this network
* @mports: List of master ports accessing this network
* @hport: Default port for accessing this network
* @id: RIO network ID
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index d9d7e7e..c89c53a 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -105,14 +105,6 @@ static inline void put_anon_vma(struct anon_vma *anon_vma)
__put_anon_vma(anon_vma);
}
-static inline struct anon_vma *page_anon_vma(struct page *page)
-{
- if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
- PAGE_MAPPING_ANON)
- return NULL;
- return page_rmapping(page);
-}
-
static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
@@ -198,7 +190,7 @@ int page_referenced(struct page *, int is_locked,
int try_to_unmap(struct page *, enum ttu_flags flags);
/*
- * Called from mm/filemap_xip.c to unmap empty zero page
+ * Used by uprobes to replace a userspace page safely
*/
pte_t *__page_check_address(struct page *, struct mm_struct *,
unsigned long, spinlock_t **, int);
@@ -246,7 +238,6 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
* arg: passed to rmap_one() and invalid_vma()
* rmap_one: executed on each vma where page is mapped
* done: for checking traversing termination condition
- * file_nonlinear: for handling file nonlinear mapping
* anon_lock: for getting anon_lock by optimized way rather than default
* invalid_vma: for skipping uninterested vma
*/
@@ -255,7 +246,6 @@ struct rmap_walk_control {
int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *arg);
int (*done)(struct page *page);
- int (*file_nonlinear)(struct page *, struct address_space *, void *arg);
struct anon_vma *(*anon_lock)(struct page *page);
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
};
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 6d6be09..3359f04 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -24,6 +24,14 @@ extern void rtc_time64_to_tm(time64_t time, struct rtc_time *tm);
ktime_t rtc_tm_to_ktime(struct rtc_time tm);
struct rtc_time rtc_ktime_to_tm(ktime_t kt);
+/*
+ * rtc_tm_sub - Return the difference in seconds.
+ */
+static inline time64_t rtc_tm_sub(struct rtc_time *lhs, struct rtc_time *rhs)
+{
+ return rtc_tm_to_time64(lhs) - rtc_tm_to_time64(rhs);
+}
+
/**
* Deprecated. Use rtc_time64_to_tm().
*/
@@ -77,6 +85,7 @@ struct rtc_class_ops {
int (*read_alarm)(struct device *, struct rtc_wkalrm *);
int (*set_alarm)(struct device *, struct rtc_wkalrm *);
int (*proc)(struct device *, struct seq_file *);
+ int (*set_mmss64)(struct device *, time64_t secs);
int (*set_mmss)(struct device *, unsigned long secs);
int (*read_callback)(struct device *, int data);
int (*alarm_irq_enable)(struct device *, unsigned int enabled);
@@ -100,8 +109,7 @@ struct rtc_timer {
/* flags */
#define RTC_DEV_BUSY 0
-struct rtc_device
-{
+struct rtc_device {
struct device dev;
struct module *owner;
@@ -160,8 +168,7 @@ extern void devm_rtc_device_unregister(struct device *dev,
extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm);
extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm);
-extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs);
-extern int rtc_set_ntp_time(struct timespec now);
+extern int rtc_set_ntp_time(struct timespec64 now);
int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm);
extern int rtc_read_alarm(struct rtc_device *rtc,
struct rtc_wkalrm *alrm);
@@ -197,10 +204,10 @@ int rtc_register(rtc_task_t *task);
int rtc_unregister(rtc_task_t *task);
int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg);
-void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data);
-int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer,
- ktime_t expires, ktime_t period);
-int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer);
+void rtc_timer_init(struct rtc_timer *timer, void (*f)(void *p), void *data);
+int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer,
+ ktime_t expires, ktime_t period);
+void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer);
void rtc_timer_do_work(struct work_struct *work);
static inline bool is_leap_year(unsigned int year)
diff --git a/include/linux/rtc/ds1685.h b/include/linux/rtc/ds1685.h
new file mode 100644
index 0000000..e6337a5
--- /dev/null
+++ b/include/linux/rtc/ds1685.h
@@ -0,0 +1,375 @@
+/*
+ * Definitions for the registers, addresses, and platform data of the
+ * DS1685/DS1687-series RTC chips.
+ *
+ * This Driver also works for the DS17X85/DS17X87 RTC chips. Functionally
+ * similar to the DS1685/DS1687, they support a few extra features which
+ * include larger, battery-backed NV-SRAM, burst-mode access, and an RTC
+ * write counter.
+ *
+ * Copyright (C) 2011-2014 Joshua Kinard <kumba@gentoo.org>.
+ * Copyright (C) 2009 Matthias Fuchs <matthias.fuchs@esd-electronics.com>.
+ *
+ * References:
+ * DS1685/DS1687 3V/5V Real-Time Clocks, 19-5215, Rev 4/10.
+ * DS17x85/DS17x87 3V/5V Real-Time Clocks, 19-5222, Rev 4/10.
+ * DS1689/DS1693 3V/5V Serialized Real-Time Clocks, Rev 112105.
+ * Application Note 90, Using the Multiplex Bus RTC Extended Features.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_RTC_DS1685_H_
+#define _LINUX_RTC_DS1685_H_
+
+#include <linux/rtc.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+/**
+ * struct ds1685_priv - DS1685 private data structure.
+ * @dev: pointer to the rtc_device structure.
+ * @regs: iomapped base address pointer of the RTC registers.
+ * @regstep: padding/step size between registers (optional).
+ * @baseaddr: base address of the RTC device.
+ * @size: resource size.
+ * @lock: private lock variable for spin locking/unlocking.
+ * @work: private workqueue.
+ * @irq: IRQ number assigned to the RTC device.
+ * @prepare_poweroff: pointer to platform pre-poweroff function.
+ * @wake_alarm: pointer to platform wake alarm function.
+ * @post_ram_clear: pointer to platform post ram-clear function.
+ */
+struct ds1685_priv {
+ struct rtc_device *dev;
+ void __iomem *regs;
+ u32 regstep;
+ resource_size_t baseaddr;
+ size_t size;
+ spinlock_t lock;
+ struct work_struct work;
+ int irq_num;
+ bool bcd_mode;
+ bool no_irq;
+ bool uie_unsupported;
+ bool alloc_io_resources;
+ u8 (*read)(struct ds1685_priv *, int);
+ void (*write)(struct ds1685_priv *, int, u8);
+ void (*prepare_poweroff)(void);
+ void (*wake_alarm)(void);
+ void (*post_ram_clear)(void);
+};
+
+
+/**
+ * struct ds1685_rtc_platform_data - platform data structure.
+ * @plat_prepare_poweroff: platform-specific pre-poweroff function.
+ * @plat_wake_alarm: platform-specific wake alarm function.
+ * @plat_post_ram_clear: platform-specific post ram-clear function.
+ *
+ * If your platform needs to use a custom padding/step size between
+ * registers, or uses one or more of the extended interrupts and needs special
+ * handling, then include this header file in your platform definition and
+ * set regstep and the plat_* pointers as appropriate.
+ */
+struct ds1685_rtc_platform_data {
+ const u32 regstep;
+ const bool bcd_mode;
+ const bool no_irq;
+ const bool uie_unsupported;
+ const bool alloc_io_resources;
+ u8 (*plat_read)(struct ds1685_priv *, int);
+ void (*plat_write)(struct ds1685_priv *, int, u8);
+ void (*plat_prepare_poweroff)(void);
+ void (*plat_wake_alarm)(void);
+ void (*plat_post_ram_clear)(void);
+};
+
+
+/*
+ * Time Registers.
+ */
+#define RTC_SECS 0x00 /* Seconds 00-59 */
+#define RTC_SECS_ALARM 0x01 /* Alarm Seconds 00-59 */
+#define RTC_MINS 0x02 /* Minutes 00-59 */
+#define RTC_MINS_ALARM 0x03 /* Alarm Minutes 00-59 */
+#define RTC_HRS 0x04 /* Hours 01-12 AM/PM || 00-23 */
+#define RTC_HRS_ALARM 0x05 /* Alarm Hours 01-12 AM/PM || 00-23 */
+#define RTC_WDAY 0x06 /* Day of Week 01-07 */
+#define RTC_MDAY 0x07 /* Day of Month 01-31 */
+#define RTC_MONTH 0x08 /* Month 01-12 */
+#define RTC_YEAR 0x09 /* Year 00-99 */
+#define RTC_CENTURY 0x48 /* Century 00-99 */
+#define RTC_MDAY_ALARM 0x49 /* Alarm Day of Month 01-31 */
+
+
+/*
+ * Bit masks for the Time registers in BCD Mode (DM = 0).
+ */
+#define RTC_SECS_BCD_MASK 0x7f /* - x x x x x x x */
+#define RTC_MINS_BCD_MASK 0x7f /* - x x x x x x x */
+#define RTC_HRS_12_BCD_MASK 0x1f /* - - - x x x x x */
+#define RTC_HRS_24_BCD_MASK 0x3f /* - - x x x x x x */
+#define RTC_MDAY_BCD_MASK 0x3f /* - - x x x x x x */
+#define RTC_MONTH_BCD_MASK 0x1f /* - - - x x x x x */
+#define RTC_YEAR_BCD_MASK 0xff /* x x x x x x x x */
+
+/*
+ * Bit masks for the Time registers in BIN Mode (DM = 1).
+ */
+#define RTC_SECS_BIN_MASK 0x3f /* - - x x x x x x */
+#define RTC_MINS_BIN_MASK 0x3f /* - - x x x x x x */
+#define RTC_HRS_12_BIN_MASK 0x0f /* - - - - x x x x */
+#define RTC_HRS_24_BIN_MASK 0x1f /* - - - x x x x x */
+#define RTC_MDAY_BIN_MASK 0x1f /* - - - x x x x x */
+#define RTC_MONTH_BIN_MASK 0x0f /* - - - - x x x x */
+#define RTC_YEAR_BIN_MASK 0x7f /* - x x x x x x x */
+
+/*
+ * Bit masks common for the Time registers in BCD or BIN Mode.
+ */
+#define RTC_WDAY_MASK 0x07 /* - - - - - x x x */
+#define RTC_CENTURY_MASK 0xff /* x x x x x x x x */
+#define RTC_MDAY_ALARM_MASK 0xff /* x x x x x x x x */
+#define RTC_HRS_AMPM_MASK BIT(7) /* Mask for the AM/PM bit */
+
+
+
+/*
+ * Control Registers.
+ */
+#define RTC_CTRL_A 0x0a /* Control Register A */
+#define RTC_CTRL_B 0x0b /* Control Register B */
+#define RTC_CTRL_C 0x0c /* Control Register C */
+#define RTC_CTRL_D 0x0d /* Control Register D */
+#define RTC_EXT_CTRL_4A 0x4a /* Extended Control Register 4A */
+#define RTC_EXT_CTRL_4B 0x4b /* Extended Control Register 4B */
+
+
+/*
+ * Bit names in Control Register A.
+ */
+#define RTC_CTRL_A_UIP BIT(7) /* Update In Progress */
+#define RTC_CTRL_A_DV2 BIT(6) /* Countdown Chain */
+#define RTC_CTRL_A_DV1 BIT(5) /* Oscillator Enable */
+#define RTC_CTRL_A_DV0 BIT(4) /* Bank Select */
+#define RTC_CTRL_A_RS2 BIT(2) /* Rate-Selection Bit 2 */
+#define RTC_CTRL_A_RS3 BIT(3) /* Rate-Selection Bit 3 */
+#define RTC_CTRL_A_RS1 BIT(1) /* Rate-Selection Bit 1 */
+#define RTC_CTRL_A_RS0 BIT(0) /* Rate-Selection Bit 0 */
+#define RTC_CTRL_A_RS_MASK 0x0f /* RS3 + RS2 + RS1 + RS0 */
+
+/*
+ * Bit names in Control Register B.
+ */
+#define RTC_CTRL_B_SET BIT(7) /* SET Bit */
+#define RTC_CTRL_B_PIE BIT(6) /* Periodic-Interrupt Enable */
+#define RTC_CTRL_B_AIE BIT(5) /* Alarm-Interrupt Enable */
+#define RTC_CTRL_B_UIE BIT(4) /* Update-Ended Interrupt-Enable */
+#define RTC_CTRL_B_SQWE BIT(3) /* Square-Wave Enable */
+#define RTC_CTRL_B_DM BIT(2) /* Data Mode */
+#define RTC_CTRL_B_2412 BIT(1) /* 12-Hr/24-Hr Mode */
+#define RTC_CTRL_B_DSE BIT(0) /* Daylight Savings Enable */
+#define RTC_CTRL_B_PAU_MASK 0x70 /* PIE + AIE + UIE */
+
+
+/*
+ * Bit names in Control Register C.
+ *
+ * BIT(0), BIT(1), BIT(2), & BIT(3) are unused, always return 0, and cannot
+ * be written to.
+ */
+#define RTC_CTRL_C_IRQF BIT(7) /* Interrupt-Request Flag */
+#define RTC_CTRL_C_PF BIT(6) /* Periodic-Interrupt Flag */
+#define RTC_CTRL_C_AF BIT(5) /* Alarm-Interrupt Flag */
+#define RTC_CTRL_C_UF BIT(4) /* Update-Ended Interrupt Flag */
+#define RTC_CTRL_C_PAU_MASK 0x70 /* PF + AF + UF */
+
+
+/*
+ * Bit names in Control Register D.
+ *
+ * BIT(0) through BIT(6) are unused, always return 0, and cannot
+ * be written to.
+ */
+#define RTC_CTRL_D_VRT BIT(7) /* Valid RAM and Time */
+
+
+/*
+ * Bit names in Extended Control Register 4A.
+ *
+ * On the DS1685/DS1687/DS1689/DS1693, BIT(4) and BIT(5) are reserved for
+ * future use. They can be read from and written to, but have no effect
+ * on the RTC's operation.
+ *
+ * On the DS17x85/DS17x87, BIT(5) is Burst-Mode Enable (BME), and allows
+ * access to the extended NV-SRAM by automatically incrementing the address
+ * register when they are read from or written to.
+ */
+#define RTC_CTRL_4A_VRT2 BIT(7) /* Auxillary Battery Status */
+#define RTC_CTRL_4A_INCR BIT(6) /* Increment-in-Progress Status */
+#define RTC_CTRL_4A_PAB BIT(3) /* Power-Active Bar Control */
+#define RTC_CTRL_4A_RF BIT(2) /* RAM-Clear Flag */
+#define RTC_CTRL_4A_WF BIT(1) /* Wake-Up Alarm Flag */
+#define RTC_CTRL_4A_KF BIT(0) /* Kickstart Flag */
+#if !defined(CONFIG_RTC_DRV_DS1685) && !defined(CONFIG_RTC_DRV_DS1689)
+#define RTC_CTRL_4A_BME BIT(5) /* Burst-Mode Enable */
+#endif
+#define RTC_CTRL_4A_RWK_MASK 0x07 /* RF + WF + KF */
+
+
+/*
+ * Bit names in Extended Control Register 4B.
+ */
+#define RTC_CTRL_4B_ABE BIT(7) /* Auxillary Battery Enable */
+#define RTC_CTRL_4B_E32K BIT(6) /* Enable 32.768Hz on SQW Pin */
+#define RTC_CTRL_4B_CS BIT(5) /* Crystal Select */
+#define RTC_CTRL_4B_RCE BIT(4) /* RAM Clear-Enable */
+#define RTC_CTRL_4B_PRS BIT(3) /* PAB Reset-Select */
+#define RTC_CTRL_4B_RIE BIT(2) /* RAM Clear-Interrupt Enable */
+#define RTC_CTRL_4B_WIE BIT(1) /* Wake-Up Alarm-Interrupt Enable */
+#define RTC_CTRL_4B_KSE BIT(0) /* Kickstart Interrupt-Enable */
+#define RTC_CTRL_4B_RWK_MASK 0x07 /* RIE + WIE + KSE */
+
+
+/*
+ * Misc register names in Bank 1.
+ *
+ * The DV0 bit in Control Register A must be set to 1 for these registers
+ * to become available, including Extended Control Registers 4A & 4B.
+ */
+#define RTC_BANK1_SSN_MODEL 0x40 /* Model Number */
+#define RTC_BANK1_SSN_BYTE_1 0x41 /* 1st Byte of Serial Number */
+#define RTC_BANK1_SSN_BYTE_2 0x42 /* 2nd Byte of Serial Number */
+#define RTC_BANK1_SSN_BYTE_3 0x43 /* 3rd Byte of Serial Number */
+#define RTC_BANK1_SSN_BYTE_4 0x44 /* 4th Byte of Serial Number */
+#define RTC_BANK1_SSN_BYTE_5 0x45 /* 5th Byte of Serial Number */
+#define RTC_BANK1_SSN_BYTE_6 0x46 /* 6th Byte of Serial Number */
+#define RTC_BANK1_SSN_CRC 0x47 /* Serial CRC Byte */
+#define RTC_BANK1_RAM_DATA_PORT 0x53 /* Extended RAM Data Port */
+
+
+/*
+ * Model-specific registers in Bank 1.
+ *
+ * The addresses below differ depending on the model of the RTC chip
+ * selected in the kernel configuration. Not all of these features are
+ * supported in the main driver at present.
+ *
+ * DS1685/DS1687 - Extended NV-SRAM address (LSB only).
+ * DS1689/DS1693 - Vcc, Vbat, Pwr Cycle Counters & Customer-specific S/N.
+ * DS17x85/DS17x87 - Extended NV-SRAM addresses (MSB & LSB) & Write counter.
+ */
+#if defined(CONFIG_RTC_DRV_DS1685)
+#define RTC_BANK1_RAM_ADDR 0x50 /* NV-SRAM Addr */
+#elif defined(CONFIG_RTC_DRV_DS1689)
+#define RTC_BANK1_VCC_CTR_LSB 0x54 /* Vcc Counter Addr (LSB) */
+#define RTC_BANK1_VCC_CTR_MSB 0x57 /* Vcc Counter Addr (MSB) */
+#define RTC_BANK1_VBAT_CTR_LSB 0x58 /* Vbat Counter Addr (LSB) */
+#define RTC_BANK1_VBAT_CTR_MSB 0x5b /* Vbat Counter Addr (MSB) */
+#define RTC_BANK1_PWR_CTR_LSB 0x5c /* Pwr Cycle Counter Addr (LSB) */
+#define RTC_BANK1_PWR_CTR_MSB 0x5d /* Pwr Cycle Counter Addr (MSB) */
+#define RTC_BANK1_UNIQ_SN 0x60 /* Customer-specific S/N */
+#else /* DS17x85/DS17x87 */
+#define RTC_BANK1_RAM_ADDR_LSB 0x50 /* NV-SRAM Addr (LSB) */
+#define RTC_BANK1_RAM_ADDR_MSB 0x51 /* NV-SRAM Addr (MSB) */
+#define RTC_BANK1_WRITE_CTR 0x5e /* RTC Write Counter */
+#endif
+
+
+/*
+ * Model numbers.
+ *
+ * The DS1688/DS1691 and DS1689/DS1693 chips share the same model number
+ * and the manual doesn't indicate any major differences. As such, they
+ * are regarded as the same chip in this driver.
+ */
+#define RTC_MODEL_DS1685 0x71 /* DS1685/DS1687 */
+#define RTC_MODEL_DS17285 0x72 /* DS17285/DS17287 */
+#define RTC_MODEL_DS1689 0x73 /* DS1688/DS1691/DS1689/DS1693 */
+#define RTC_MODEL_DS17485 0x74 /* DS17485/DS17487 */
+#define RTC_MODEL_DS17885 0x78 /* DS17885/DS17887 */
+
+
+/*
+ * Periodic Interrupt Rates / Square-Wave Output Frequency
+ *
+ * Periodic rates are selected by setting the RS3-RS0 bits in Control
+ * Register A and enabled via either the E32K bit in Extended Control
+ * Register 4B or the SQWE bit in Control Register B.
+ *
+ * E32K overrides the settings of RS3-RS0 and outputs a frequency of 32768Hz
+ * on the SQW pin of the RTC chip. While there are 16 possible selections,
+ * the 1-of-16 decoder is only able to divide the base 32768Hz signal into 13
+ * smaller frequencies. The values 0x01 and 0x02 are not used and are
+ * synonymous with 0x08 and 0x09, respectively.
+ *
+ * When E32K is set to a logic 1, periodic interrupts are disabled and reading
+ * /dev/rtc will return -EINVAL. This also applies if the periodic interrupt
+ * frequency is set to 0Hz.
+ *
+ * Not currently used by the rtc-ds1685 driver because the RTC core removed
+ * support for hardware-generated periodic-interrupts in favour of
+ * hrtimer-generated interrupts. But these defines are kept around for use
+ * in userland, as documentation to the hardware, and possible future use if
+ * hardware-generated periodic interrupts are ever added back.
+ */
+ /* E32K RS3 RS2 RS1 RS0 */
+#define RTC_SQW_8192HZ 0x03 /* 0 0 0 1 1 */
+#define RTC_SQW_4096HZ 0x04 /* 0 0 1 0 0 */
+#define RTC_SQW_2048HZ 0x05 /* 0 0 1 0 1 */
+#define RTC_SQW_1024HZ 0x06 /* 0 0 1 1 0 */
+#define RTC_SQW_512HZ 0x07 /* 0 0 1 1 1 */
+#define RTC_SQW_256HZ 0x08 /* 0 1 0 0 0 */
+#define RTC_SQW_128HZ 0x09 /* 0 1 0 0 1 */
+#define RTC_SQW_64HZ 0x0a /* 0 1 0 1 0 */
+#define RTC_SQW_32HZ 0x0b /* 0 1 0 1 1 */
+#define RTC_SQW_16HZ 0x0c /* 0 1 1 0 0 */
+#define RTC_SQW_8HZ 0x0d /* 0 1 1 0 1 */
+#define RTC_SQW_4HZ 0x0e /* 0 1 1 1 0 */
+#define RTC_SQW_2HZ 0x0f /* 0 1 1 1 1 */
+#define RTC_SQW_0HZ 0x00 /* 0 0 0 0 0 */
+#define RTC_SQW_32768HZ 32768 /* 1 - - - - */
+#define RTC_MAX_USER_FREQ 8192
+
+
+/*
+ * NVRAM data & addresses:
+ * - 50 bytes of NVRAM are available just past the clock registers.
+ * - 64 additional bytes are available in Bank0.
+ *
+ * Extended, battery-backed NV-SRAM:
+ * - DS1685/DS1687 - 128 bytes.
+ * - DS1689/DS1693 - 0 bytes.
+ * - DS17285/DS17287 - 2048 bytes.
+ * - DS17485/DS17487 - 4096 bytes.
+ * - DS17885/DS17887 - 8192 bytes.
+ */
+#define NVRAM_TIME_BASE 0x0e /* NVRAM Addr in Time regs */
+#define NVRAM_BANK0_BASE 0x40 /* NVRAM Addr in Bank0 regs */
+#define NVRAM_SZ_TIME 50
+#define NVRAM_SZ_BANK0 64
+#if defined(CONFIG_RTC_DRV_DS1685)
+# define NVRAM_SZ_EXTND 128
+#elif defined(CONFIG_RTC_DRV_DS1689)
+# define NVRAM_SZ_EXTND 0
+#elif defined(CONFIG_RTC_DRV_DS17285)
+# define NVRAM_SZ_EXTND 2048
+#elif defined(CONFIG_RTC_DRV_DS17485)
+# define NVRAM_SZ_EXTND 4096
+#elif defined(CONFIG_RTC_DRV_DS17885)
+# define NVRAM_SZ_EXTND 8192
+#endif
+#define NVRAM_TOTAL_SZ_BANK0 (NVRAM_SZ_TIME + NVRAM_SZ_BANK0)
+#define NVRAM_TOTAL_SZ (NVRAM_TOTAL_SZ_BANK0 + NVRAM_SZ_EXTND)
+
+
+/*
+ * Function Prototypes.
+ */
+extern void __noreturn
+ds1685_rtc_poweroff(struct platform_device *pdev);
+
+#endif /* _LINUX_RTC_DS1685_H_ */
diff --git a/include/linux/rtc/sirfsoc_rtciobrg.h b/include/linux/rtc/sirfsoc_rtciobrg.h
index 2c92e1c..aefd997 100644
--- a/include/linux/rtc/sirfsoc_rtciobrg.h
+++ b/include/linux/rtc/sirfsoc_rtciobrg.h
@@ -9,10 +9,14 @@
#ifndef _SIRFSOC_RTC_IOBRG_H_
#define _SIRFSOC_RTC_IOBRG_H_
+struct regmap_config;
+
extern void sirfsoc_rtc_iobrg_besyncing(void);
extern u32 sirfsoc_rtc_iobrg_readl(u32 addr);
extern void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr);
+struct regmap *devm_regmap_init_iobg(struct device *dev,
+ const struct regmap_config *config);
#endif
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 5db76a3..39adaa9 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -77,7 +77,12 @@ static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
return rtnl_dereference(dev->ingress_queue);
}
-extern struct netdev_queue *dev_ingress_queue_create(struct net_device *dev);
+struct netdev_queue *dev_ingress_queue_create(struct net_device *dev);
+
+#ifdef CONFIG_NET_INGRESS
+void net_inc_ingress_queue(void);
+void net_dec_ingress_queue(void);
+#endif
extern void rtnetlink_init(void);
extern void __rtnl_unlock(void);
@@ -109,5 +114,9 @@ extern int ndo_dflt_fdb_del(struct ndmsg *ndm,
extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u16 mode,
- u32 flags, u32 mask);
+ u32 flags, u32 mask, int nlflags,
+ u32 filter_mask,
+ int (*vlan_fill)(struct sk_buff *skb,
+ struct net_device *dev,
+ u32 filter_mask));
#endif /* __LINUX_RTNETLINK_H */
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index ed8f9e7..9b1ef0c 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -2,13 +2,39 @@
#define _LINUX_SCATTERLIST_H
#include <linux/string.h>
+#include <linux/types.h>
#include <linux/bug.h>
#include <linux/mm.h>
-
-#include <asm/types.h>
-#include <asm/scatterlist.h>
#include <asm/io.h>
+struct scatterlist {
+#ifdef CONFIG_DEBUG_SG
+ unsigned long sg_magic;
+#endif
+ unsigned long page_link;
+ unsigned int offset;
+ unsigned int length;
+ dma_addr_t dma_address;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ unsigned int dma_length;
+#endif
+};
+
+/*
+ * These macros should be used after a dma_map_sg call has been done
+ * to get bus addresses of each of the SG entries and their lengths.
+ * You should only work with the number of sg entries dma_map_sg
+ * returns, or alternatively stop on the first sg_dma_len(sg) which
+ * is 0.
+ */
+#define sg_dma_address(sg) ((sg)->dma_address)
+
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+#define sg_dma_len(sg) ((sg)->dma_length)
+#else
+#define sg_dma_len(sg) ((sg)->length)
+#endif
+
struct sg_table {
struct scatterlist *sgl; /* the list */
unsigned int nents; /* number of mapped entries */
@@ -18,10 +44,9 @@ struct sg_table {
/*
* Notes on SG table design.
*
- * Architectures must provide an unsigned long page_link field in the
- * scatterlist struct. We use that to place the page pointer AND encode
- * information about the sg table as well. The two lower bits are reserved
- * for this information.
+ * We use the unsigned long page_link field in the scatterlist struct to place
+ * the page pointer AND encode information about the sg table as well. The two
+ * lower bits are reserved for this information.
*
* If bit 0 is set, then the page_link contains a pointer to the next sg
* table list. Otherwise the next entry is at sg + 1.
@@ -221,6 +246,7 @@ static inline void *sg_virt(struct scatterlist *sg)
}
int sg_nents(struct scatterlist *sg);
+int sg_nents_for_len(struct scatterlist *sg, u64 len);
struct scatterlist *sg_next(struct scatterlist *);
struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
void sg_init_table(struct scatterlist *, unsigned int);
@@ -239,13 +265,16 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
unsigned long offset, unsigned long size,
gfp_t gfp_mask);
+size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
+ size_t buflen, off_t skip, bool to_buffer);
+
size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen);
+ const void *buf, size_t buflen);
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen);
size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen, off_t skip);
+ const void *buf, size_t buflen, off_t skip);
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen, off_t skip);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8db31ef..04b5ada 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -25,7 +25,7 @@ struct sched_param {
#include <linux/errno.h>
#include <linux/nodemask.h>
#include <linux/mm_types.h>
-#include <linux/preempt_mask.h>
+#include <linux/preempt.h>
#include <asm/page.h>
#include <asm/ptrace.h>
@@ -58,6 +58,7 @@ struct sched_param {
#include <linux/uidgid.h>
#include <linux/gfp.h>
#include <linux/magic.h>
+#include <linux/cgroup-defs.h>
#include <asm/processor.h>
@@ -125,7 +126,6 @@ struct sched_attr {
u64 sched_period;
};
-struct exec_domain;
struct futex_pi_state;
struct robust_list_head;
struct bio_list;
@@ -133,6 +133,7 @@ struct fs_struct;
struct perf_event_context;
struct blk_plug;
struct filename;
+struct nameidata;
#define VMACACHE_BITS 2
#define VMACACHE_SIZE (1U << VMACACHE_BITS)
@@ -174,7 +175,12 @@ extern unsigned long nr_iowait_cpu(int cpu);
extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
extern void calc_global_load(unsigned long ticks);
+
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void update_cpu_load_nohz(void);
+#else
+static inline void update_cpu_load_nohz(void) { }
+#endif
extern unsigned long get_parent_ip(unsigned long addr);
@@ -186,8 +192,6 @@ struct task_group;
#ifdef CONFIG_SCHED_DEBUG
extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
extern void proc_sched_set_task(struct task_struct *p);
-extern void
-print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
#endif
/*
@@ -214,9 +218,10 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
#define TASK_WAKEKILL 128
#define TASK_WAKING 256
#define TASK_PARKED 512
-#define TASK_STATE_MAX 1024
+#define TASK_NOLOAD 1024
+#define TASK_STATE_MAX 2048
-#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
+#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN"
extern char ___assert_task_state[1 - 2*!!(
sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
@@ -226,6 +231,8 @@ extern char ___assert_task_state[1 - 2*!!(
#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
+#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
+
/* Convenience macros for the sake of wake_up */
#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
@@ -241,7 +248,8 @@ extern char ___assert_task_state[1 - 2*!!(
((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
#define task_contributes_to_load(task) \
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
- (task->flags & PF_FROZEN) == 0)
+ (task->flags & PF_FROZEN) == 0 && \
+ (task->state & TASK_NOLOAD) == 0)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
@@ -253,7 +261,7 @@ extern char ___assert_task_state[1 - 2*!!(
#define set_task_state(tsk, state_value) \
do { \
(tsk)->task_state_change = _THIS_IP_; \
- set_mb((tsk)->state, (state_value)); \
+ smp_store_mb((tsk)->state, (state_value)); \
} while (0)
/*
@@ -275,7 +283,7 @@ extern char ___assert_task_state[1 - 2*!!(
#define set_current_state(state_value) \
do { \
current->task_state_change = _THIS_IP_; \
- set_mb(current->state, (state_value)); \
+ smp_store_mb(current->state, (state_value)); \
} while (0)
#else
@@ -283,7 +291,7 @@ extern char ___assert_task_state[1 - 2*!!(
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
#define set_task_state(tsk, state_value) \
- set_mb((tsk)->state, (state_value))
+ smp_store_mb((tsk)->state, (state_value))
/*
* set_current_state() includes a barrier so that the write of current->state
@@ -299,7 +307,7 @@ extern char ___assert_task_state[1 - 2*!!(
#define __set_current_state(state_value) \
do { current->state = (state_value); } while (0)
#define set_current_state(state_value) \
- set_mb(current->state, (state_value))
+ smp_store_mb(current->state, (state_value))
#endif
@@ -329,19 +337,17 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
extern void init_idle_bootup_task(struct task_struct *idle);
+extern cpumask_var_t cpu_isolated_map;
+
extern int runqueue_is_locked(int cpu);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void nohz_balance_enter_idle(int cpu);
extern void set_cpu_sd_state_idle(void);
-extern int get_nohz_timer_target(int pinned);
+extern int get_nohz_timer_target(void);
#else
static inline void nohz_balance_enter_idle(int cpu) { }
static inline void set_cpu_sd_state_idle(void) { }
-static inline int get_nohz_timer_target(int pinned)
-{
- return smp_processor_id();
-}
#endif
/*
@@ -363,9 +369,6 @@ extern void show_regs(struct pt_regs *);
*/
extern void show_stack(struct task_struct *task, unsigned long *sp);
-void io_schedule(void);
-long io_schedule_timeout(long timeout);
-
extern void cpu_init (void);
extern void trap_init(void);
extern void update_process_times(int user);
@@ -422,6 +425,13 @@ extern signed long schedule_timeout_uninterruptible(signed long timeout);
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);
+extern long io_schedule_timeout(long timeout);
+
+static inline void io_schedule(void)
+{
+ io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
+}
+
struct nsproxy;
struct user_namespace;
@@ -562,6 +572,23 @@ struct task_cputime {
.sum_exec_runtime = 0, \
}
+/*
+ * This is the atomic variant of task_cputime, which can be used for
+ * storing and updating task_cputime statistics without locking.
+ */
+struct task_cputime_atomic {
+ atomic64_t utime;
+ atomic64_t stime;
+ atomic64_t sum_exec_runtime;
+};
+
+#define INIT_CPUTIME_ATOMIC \
+ (struct task_cputime_atomic) { \
+ .utime = ATOMIC64_INIT(0), \
+ .stime = ATOMIC64_INIT(0), \
+ .sum_exec_runtime = ATOMIC64_INIT(0), \
+ }
+
#ifdef CONFIG_PREEMPT_COUNT
#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED)
#else
@@ -579,18 +606,16 @@ struct task_cputime {
/**
* struct thread_group_cputimer - thread group interval timer counts
- * @cputime: thread group interval timers.
+ * @cputime_atomic: atomic thread group interval timers.
* @running: non-zero when there are timers running and
* @cputime receives updates.
- * @lock: lock for fields in this struct.
*
* This structure contains the version of task_cputime, above, that is
* used for thread group CPU timer calculations.
*/
struct thread_group_cputimer {
- struct task_cputime cputime;
+ struct task_cputime_atomic cputime_atomic;
int running;
- raw_spinlock_t lock;
};
#include <linux/rwsem.h>
@@ -729,18 +754,6 @@ struct signal_struct {
unsigned audit_tty_log_passwd;
struct tty_audit_buf *tty_audit_buf;
#endif
-#ifdef CONFIG_CGROUPS
- /*
- * group_rwsem prevents new tasks from entering the threadgroup and
- * member tasks from exiting,a more specifically, setting of
- * PF_EXITING. fork and exit paths are protected with this rwsem
- * using threadgroup_change_begin/end(). Users which require
- * threadgroup to remain stable should use threadgroup_[un]lock()
- * which also takes care of exec path. Currently, cgroup is the
- * only user.
- */
- struct rw_semaphore group_rwsem;
-#endif
oom_flags_t oom_flags;
short oom_score_adj; /* OOM kill score adjustment */
@@ -823,7 +836,7 @@ extern struct user_struct root_user;
struct backing_dev_info;
struct reclaim_state;
-#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+#ifdef CONFIG_SCHED_INFO
struct sched_info {
/* cumulative counters */
unsigned long pcount; /* # of times run on this cpu */
@@ -833,7 +846,7 @@ struct sched_info {
unsigned long long last_arrival,/* when we last ran on a cpu */
last_queued; /* when we were last queued to run */
};
-#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
+#endif /* CONFIG_SCHED_INFO */
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info {
@@ -895,6 +908,50 @@ enum cpu_idle_type {
#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
/*
+ * Wake-queues are lists of tasks with a pending wakeup, whose
+ * callers have already marked the task as woken internally,
+ * and can thus carry on. A common use case is being able to
+ * do the wakeups once the corresponding user lock as been
+ * released.
+ *
+ * We hold reference to each task in the list across the wakeup,
+ * thus guaranteeing that the memory is still valid by the time
+ * the actual wakeups are performed in wake_up_q().
+ *
+ * One per task suffices, because there's never a need for a task to be
+ * in two wake queues simultaneously; it is forbidden to abandon a task
+ * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is
+ * already in a wake queue, the wakeup will happen soon and the second
+ * waker can just skip it.
+ *
+ * The WAKE_Q macro declares and initializes the list head.
+ * wake_up_q() does NOT reinitialize the list; it's expected to be
+ * called near the end of a function, where the fact that the queue is
+ * not used again will be easy to see by inspection.
+ *
+ * Note that this can cause spurious wakeups. schedule() callers
+ * must ensure the call is done inside a loop, confirming that the
+ * wakeup condition has in fact occurred.
+ */
+struct wake_q_node {
+ struct wake_q_node *next;
+};
+
+struct wake_q_head {
+ struct wake_q_node *first;
+ struct wake_q_node **lastp;
+};
+
+#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
+
+#define WAKE_Q(name) \
+ struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
+
+extern void wake_q_add(struct wake_q_head *head,
+ struct task_struct *task);
+extern void wake_up_q(struct wake_q_head *head);
+
+/*
* sched-domains (multiprocessor balancing) declarations:
*/
#ifdef CONFIG_SMP
@@ -1111,15 +1168,28 @@ struct load_weight {
};
struct sched_avg {
+ u64 last_runnable_update;
+ s64 decay_count;
+ /*
+ * utilization_avg_contrib describes the amount of time that a
+ * sched_entity is running on a CPU. It is based on running_avg_sum
+ * and is scaled in the range [0..SCHED_LOAD_SCALE].
+ * load_avg_contrib described the amount of time that a sched_entity
+ * is runnable on a rq. It is based on both runnable_avg_sum and the
+ * weight of the task.
+ */
+ unsigned long load_avg_contrib, utilization_avg_contrib;
/*
* These sums represent an infinite geometric series and so are bound
* above by 1024/(1-y). Thus we only need a u32 to store them for all
* choices of y < 1-2^(-32)*1024.
+ * running_avg_sum reflects the time that the sched_entity is
+ * effectively running on the CPU.
+ * runnable_avg_sum represents the amount of time a sched_entity is on
+ * a runqueue which includes the running time that is monitored by
+ * running_avg_sum.
*/
- u32 runnable_avg_sum, runnable_avg_period;
- u64 last_runnable_update;
- s64 decay_count;
- unsigned long load_avg_contrib;
+ u32 runnable_avg_sum, avg_period, running_avg_sum;
};
#ifdef CONFIG_SCHEDSTATS
@@ -1316,8 +1386,6 @@ struct task_struct {
int rcu_read_lock_nesting;
union rcu_special rcu_read_unlock_special;
struct list_head rcu_node_entry;
-#endif /* #ifdef CONFIG_PREEMPT_RCU */
-#ifdef CONFIG_PREEMPT_RCU
struct rcu_node *rcu_blocked_node;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TASKS_RCU
@@ -1327,7 +1395,7 @@ struct task_struct {
int rcu_tasks_idle_cpu;
#endif /* #ifdef CONFIG_TASKS_RCU */
-#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+#ifdef CONFIG_SCHED_INFO
struct sched_info sched_info;
#endif
@@ -1338,9 +1406,6 @@ struct task_struct {
#endif
struct mm_struct *mm, *active_mm;
-#ifdef CONFIG_COMPAT_BRK
- unsigned brk_randomized:1;
-#endif
/* per-thread vma caching */
u32 vmacache_seqnum;
struct vm_area_struct *vmacache[VMACACHE_SIZE];
@@ -1351,7 +1416,7 @@ struct task_struct {
int exit_state;
int exit_code, exit_signal;
int pdeath_signal; /* The signal sent when the parent dies */
- unsigned int jobctl; /* JOBCTL_*, siglock protected */
+ unsigned long jobctl; /* JOBCTL_*, siglock protected */
/* Used for emulating ABI behavior of previous Linux versions */
unsigned int personality;
@@ -1363,13 +1428,19 @@ struct task_struct {
/* Revert to default priority/policy when forking */
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
+ unsigned sched_migrated:1;
#ifdef CONFIG_MEMCG_KMEM
unsigned memcg_kmem_skip_account:1;
#endif
+#ifdef CONFIG_COMPAT_BRK
+ unsigned brk_randomized:1;
+#endif
unsigned long atomic_flags; /* Flags needing atomic access. */
+ struct restart_block restart_block;
+
pid_t pid;
pid_t tgid;
@@ -1441,7 +1512,7 @@ struct task_struct {
it with task_lock())
- initialized normally by setup_new_exec */
/* file system info */
- int link_count, total_link_count;
+ struct nameidata *nameidata;
#ifdef CONFIG_SYSVIPC
/* ipc stuff */
struct sysv_sem sysvsem;
@@ -1451,8 +1522,6 @@ struct task_struct {
/* hung task detection */
unsigned long last_switch_count;
#endif
-/* CPU-specific state of this task */
- struct thread_struct thread;
/* filesystem information */
struct fs_struct *fs;
/* open file information */
@@ -1491,6 +1560,8 @@ struct task_struct {
/* Protection of the PI data structures: */
raw_spinlock_t pi_lock;
+ struct wake_q_node wake_q;
+
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task */
struct rb_root pi_waiters;
@@ -1619,11 +1690,11 @@ struct task_struct {
/*
* numa_faults_locality tracks if faults recorded during the last
- * scan window were remote/local. The task scan period is adapted
- * based on the locality of the faults with different weights
- * depending on whether they were shared or private faults
+ * scan window were remote/local or failed to migrate. The task scan
+ * period is adapted based on the locality of the faults with different
+ * weights depending on whether they were shared or private faults
*/
- unsigned long numa_faults_locality[2];
+ unsigned long numa_faults_locality[3];
unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */
@@ -1662,6 +1733,9 @@ struct task_struct {
unsigned long timer_slack_ns;
unsigned long default_timer_slack_ns;
+#ifdef CONFIG_KASAN
+ unsigned int kasan_depth;
+#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack */
int curr_ret_stack;
@@ -1701,8 +1775,23 @@ struct task_struct {
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
#endif
+ int pagefault_disabled;
+/* CPU-specific state of this task */
+ struct thread_struct thread;
+/*
+ * WARNING: on x86, 'thread_struct' contains a variable-sized
+ * structure. It *MUST* be at the end of 'task_struct'.
+ *
+ * Do not put anything below here!
+ */
};
+#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
+extern int arch_task_struct_size __read_mostly;
+#else
+# define arch_task_struct_size (sizeof(struct task_struct))
+#endif
+
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
@@ -1710,6 +1799,7 @@ struct task_struct {
#define TNF_NO_GROUP 0x02
#define TNF_SHARED 0x04
#define TNF_FAULT_LOCAL 0x08
+#define TNF_MIGRATE_FAIL 0x10
#ifdef CONFIG_NUMA_BALANCING
extern void task_numa_fault(int last_node, int node, int pages, int flags);
@@ -2053,22 +2143,22 @@ TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
-#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT)
-#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT)
-#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT)
-#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT)
-#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT)
-#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT)
-#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT)
+#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
+#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
+#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT)
+#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT)
+#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT)
+#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
+#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
extern bool task_set_jobctl_pending(struct task_struct *task,
- unsigned int mask);
+ unsigned long mask);
extern void task_clear_jobctl_trapping(struct task_struct *task);
extern void task_clear_jobctl_pending(struct task_struct *task,
- unsigned int mask);
+ unsigned long mask);
static inline void rcu_copy_process(struct task_struct *p)
{
@@ -2145,6 +2235,7 @@ extern unsigned long long notrace sched_clock(void);
*/
extern u64 cpu_clock(int cpu);
extern u64 local_clock(void);
+extern u64 running_clock(void);
extern u64 sched_clock_cpu(int cpu);
@@ -2277,11 +2368,6 @@ extern void set_curr_task(int cpu, struct task_struct *p);
void yield(void);
-/*
- * The default (Linux) execution domain.
- */
-extern struct exec_domain default_exec_domain;
-
union thread_union {
struct thread_info thread_info;
unsigned long stack[THREAD_SIZE/sizeof(long)];
@@ -2345,7 +2431,6 @@ extern void sched_dead(struct task_struct *p);
extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
-extern void __flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
@@ -2469,8 +2554,22 @@ extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
/* Remove the current tasks stale references to the old mm_struct */
extern void mm_release(struct task_struct *, struct mm_struct *);
+#ifdef CONFIG_HAVE_COPY_THREAD_TLS
+extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
+ struct task_struct *, unsigned long);
+#else
extern int copy_thread(unsigned long, unsigned long, unsigned long,
struct task_struct *);
+
+/* Architectures that haven't opted into copy_thread_tls get the tls argument
+ * via pt_regs, so ignore the tls argument passed via C. */
+static inline int copy_thread_tls(
+ unsigned long clone_flags, unsigned long sp, unsigned long arg,
+ struct task_struct *p, unsigned long tls)
+{
+ return copy_thread(clone_flags, sp, arg, p);
+}
+#endif
extern void flush_thread(void);
extern void exit_thread(void);
@@ -2489,6 +2588,7 @@ extern int do_execveat(int, struct filename *,
const char __user * const __user *,
const char __user * const __user *,
int);
+extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
struct task_struct *fork_idle(int);
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
@@ -2512,6 +2612,9 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
}
#endif
+#define tasklist_empty() \
+ list_empty(&init_task.tasks)
+
#define next_task(p) \
list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
@@ -2620,53 +2723,33 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
}
-#ifdef CONFIG_CGROUPS
-static inline void threadgroup_change_begin(struct task_struct *tsk)
-{
- down_read(&tsk->signal->group_rwsem);
-}
-static inline void threadgroup_change_end(struct task_struct *tsk)
-{
- up_read(&tsk->signal->group_rwsem);
-}
-
/**
- * threadgroup_lock - lock threadgroup
- * @tsk: member task of the threadgroup to lock
- *
- * Lock the threadgroup @tsk belongs to. No new task is allowed to enter
- * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
- * change ->group_leader/pid. This is useful for cases where the threadgroup
- * needs to stay stable across blockable operations.
+ * threadgroup_change_begin - mark the beginning of changes to a threadgroup
+ * @tsk: task causing the changes
*
- * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
- * synchronization. While held, no new task will be added to threadgroup
- * and no existing live task will have its PF_EXITING set.
- *
- * de_thread() does threadgroup_change_{begin|end}() when a non-leader
- * sub-thread becomes a new leader.
+ * All operations which modify a threadgroup - a new thread joining the
+ * group, death of a member thread (the assertion of PF_EXITING) and
+ * exec(2) dethreading the process and replacing the leader - are wrapped
+ * by threadgroup_change_{begin|end}(). This is to provide a place which
+ * subsystems needing threadgroup stability can hook into for
+ * synchronization.
*/
-static inline void threadgroup_lock(struct task_struct *tsk)
+static inline void threadgroup_change_begin(struct task_struct *tsk)
{
- down_write(&tsk->signal->group_rwsem);
+ might_sleep();
+ cgroup_threadgroup_change_begin(tsk);
}
/**
- * threadgroup_unlock - unlock threadgroup
- * @tsk: member task of the threadgroup to unlock
+ * threadgroup_change_end - mark the end of changes to a threadgroup
+ * @tsk: task causing the changes
*
- * Reverse threadgroup_lock().
+ * See threadgroup_change_begin().
*/
-static inline void threadgroup_unlock(struct task_struct *tsk)
+static inline void threadgroup_change_end(struct task_struct *tsk)
{
- up_write(&tsk->signal->group_rwsem);
+ cgroup_threadgroup_change_end(tsk);
}
-#else
-static inline void threadgroup_change_begin(struct task_struct *tsk) {}
-static inline void threadgroup_change_end(struct task_struct *tsk) {}
-static inline void threadgroup_lock(struct task_struct *tsk) {}
-static inline void threadgroup_unlock(struct task_struct *tsk) {}
-#endif
#ifndef __HAVE_THREAD_FUNCTIONS
@@ -2942,11 +3025,6 @@ static __always_inline bool need_resched(void)
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
-static inline void thread_group_cputime_init(struct signal_struct *sig)
-{
- raw_spin_lock_init(&sig->cputimer.lock);
-}
-
/*
* Reevaluate whether the task has signals pending delivery.
* Wake the task if so.
@@ -3060,13 +3138,13 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
static inline unsigned long task_rlimit(const struct task_struct *tsk,
unsigned int limit)
{
- return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
+ return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
}
static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
unsigned int limit)
{
- return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
+ return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
}
static inline unsigned long rlimit(unsigned int limit)
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index 6341f5b..a30b172 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -18,7 +18,7 @@ static inline int rt_task(struct task_struct *p)
#ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
-extern int rt_mutex_check_prio(struct task_struct *task, int newprio);
+extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio);
extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
extern void rt_mutex_adjust_pi(struct task_struct *p);
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
@@ -31,9 +31,10 @@ static inline int rt_mutex_getprio(struct task_struct *p)
return p->normal_prio;
}
-static inline int rt_mutex_check_prio(struct task_struct *task, int newprio)
+static inline int rt_mutex_get_effective_prio(struct task_struct *task,
+ int newprio)
{
- return 0;
+ return newprio;
}
static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 596a0e0..c9e4731 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -57,24 +57,12 @@ extern unsigned int sysctl_numa_balancing_scan_size;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
extern unsigned int sysctl_sched_time_avg;
-extern unsigned int sysctl_timer_migration;
extern unsigned int sysctl_sched_shares_window;
int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length,
loff_t *ppos);
#endif
-#ifdef CONFIG_SCHED_DEBUG
-static inline unsigned int get_sysctl_timer_migration(void)
-{
- return sysctl_timer_migration;
-}
-#else
-static inline unsigned int get_sysctl_timer_migration(void)
-{
- return 1;
-}
-#endif
/*
* control realtime throttling:
diff --git a/include/linux/scif.h b/include/linux/scif.h
new file mode 100644
index 0000000..44f4f38
--- /dev/null
+++ b/include/linux/scif.h
@@ -0,0 +1,993 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel SCIF driver.
+ *
+ */
+#ifndef __SCIF_H__
+#define __SCIF_H__
+
+#include <linux/types.h>
+#include <linux/poll.h>
+#include <linux/scif_ioctl.h>
+
+#define SCIF_ACCEPT_SYNC 1
+#define SCIF_SEND_BLOCK 1
+#define SCIF_RECV_BLOCK 1
+
+enum {
+ SCIF_PROT_READ = (1 << 0),
+ SCIF_PROT_WRITE = (1 << 1)
+};
+
+enum {
+ SCIF_MAP_FIXED = 0x10,
+ SCIF_MAP_KERNEL = 0x20,
+};
+
+enum {
+ SCIF_FENCE_INIT_SELF = (1 << 0),
+ SCIF_FENCE_INIT_PEER = (1 << 1),
+ SCIF_SIGNAL_LOCAL = (1 << 4),
+ SCIF_SIGNAL_REMOTE = (1 << 5)
+};
+
+enum {
+ SCIF_RMA_USECPU = (1 << 0),
+ SCIF_RMA_USECACHE = (1 << 1),
+ SCIF_RMA_SYNC = (1 << 2),
+ SCIF_RMA_ORDERED = (1 << 3)
+};
+
+/* End of SCIF Admin Reserved Ports */
+#define SCIF_ADMIN_PORT_END 1024
+
+/* End of SCIF Reserved Ports */
+#define SCIF_PORT_RSVD 1088
+
+typedef struct scif_endpt *scif_epd_t;
+
+#define SCIF_OPEN_FAILED ((scif_epd_t)-1)
+#define SCIF_REGISTER_FAILED ((off_t)-1)
+#define SCIF_MMAP_FAILED ((void *)-1)
+
+/**
+ * scif_open() - Create an endpoint
+ *
+ * Return:
+ * Upon successful completion, scif_open() returns an endpoint descriptor to
+ * be used in subsequent SCIF functions calls to refer to that endpoint;
+ * otherwise in user mode SCIF_OPEN_FAILED (that is ((scif_epd_t)-1)) is
+ * returned and errno is set to indicate the error; in kernel mode a NULL
+ * scif_epd_t is returned.
+ *
+ * Errors:
+ * ENOMEM - Insufficient kernel memory was available
+ */
+scif_epd_t scif_open(void);
+
+/**
+ * scif_bind() - Bind an endpoint to a port
+ * @epd: endpoint descriptor
+ * @pn: port number
+ *
+ * scif_bind() binds endpoint epd to port pn, where pn is a port number on the
+ * local node. If pn is zero, a port number greater than or equal to
+ * SCIF_PORT_RSVD is assigned and returned. Each endpoint may be bound to
+ * exactly one local port. Ports less than 1024 when requested can only be bound
+ * by system (or root) processes or by processes executed by privileged users.
+ *
+ * Return:
+ * Upon successful completion, scif_bind() returns the port number to which epd
+ * is bound; otherwise in user mode -1 is returned and errno is set to
+ * indicate the error; in kernel mode the negative of one of the following
+ * errors is returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * EINVAL - the endpoint or the port is already bound
+ * EISCONN - The endpoint is already connected
+ * ENOSPC - No port number available for assignment
+ * EACCES - The port requested is protected and the user is not the superuser
+ */
+int scif_bind(scif_epd_t epd, u16 pn);
+
+/**
+ * scif_listen() - Listen for connections on an endpoint
+ * @epd: endpoint descriptor
+ * @backlog: maximum pending connection requests
+ *
+ * scif_listen() marks the endpoint epd as a listening endpoint - that is, as
+ * an endpoint that will be used to accept incoming connection requests. Once
+ * so marked, the endpoint is said to be in the listening state and may not be
+ * used as the endpoint of a connection.
+ *
+ * The endpoint, epd, must have been bound to a port.
+ *
+ * The backlog argument defines the maximum length to which the queue of
+ * pending connections for epd may grow. If a connection request arrives when
+ * the queue is full, the client may receive an error with an indication that
+ * the connection was refused.
+ *
+ * Return:
+ * Upon successful completion, scif_listen() returns 0; otherwise in user mode
+ * -1 is returned and errno is set to indicate the error; in kernel mode the
+ * negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * EINVAL - the endpoint is not bound to a port
+ * EISCONN - The endpoint is already connected or listening
+ */
+int scif_listen(scif_epd_t epd, int backlog);
+
+/**
+ * scif_connect() - Initiate a connection on a port
+ * @epd: endpoint descriptor
+ * @dst: global id of port to which to connect
+ *
+ * The scif_connect() function requests the connection of endpoint epd to remote
+ * port dst. If the connection is successful, a peer endpoint, bound to dst, is
+ * created on node dst.node. On successful return, the connection is complete.
+ *
+ * If the endpoint epd has not already been bound to a port, scif_connect()
+ * will bind it to an unused local port.
+ *
+ * A connection is terminated when an endpoint of the connection is closed,
+ * either explicitly by scif_close(), or when a process that owns one of the
+ * endpoints of the connection is terminated.
+ *
+ * In user space, scif_connect() supports an asynchronous connection mode
+ * if the application has set the O_NONBLOCK flag on the endpoint via the
+ * fcntl() system call. Setting this flag will result in the calling process
+ * not to wait during scif_connect().
+ *
+ * Return:
+ * Upon successful completion, scif_connect() returns the port ID to which the
+ * endpoint, epd, is bound; otherwise in user mode -1 is returned and errno is
+ * set to indicate the error; in kernel mode the negative of one of the
+ * following errors is returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNREFUSED - The destination was not listening for connections or refused
+ * the connection request
+ * EINVAL - dst.port is not a valid port ID
+ * EISCONN - The endpoint is already connected
+ * ENOMEM - No buffer space is available
+ * ENODEV - The destination node does not exist, or the node is lost or existed,
+ * but is not currently in the network since it may have crashed
+ * ENOSPC - No port number available for assignment
+ * EOPNOTSUPP - The endpoint is listening and cannot be connected
+ */
+int scif_connect(scif_epd_t epd, struct scif_port_id *dst);
+
+/**
+ * scif_accept() - Accept a connection on an endpoint
+ * @epd: endpoint descriptor
+ * @peer: global id of port to which connected
+ * @newepd: new connected endpoint descriptor
+ * @flags: flags
+ *
+ * The scif_accept() call extracts the first connection request from the queue
+ * of pending connections for the port on which epd is listening. scif_accept()
+ * creates a new endpoint, bound to the same port as epd, and allocates a new
+ * SCIF endpoint descriptor, returned in newepd, for the endpoint. The new
+ * endpoint is connected to the endpoint through which the connection was
+ * requested. epd is unaffected by this call, and remains in the listening
+ * state.
+ *
+ * On successful return, peer holds the global port identifier (node id and
+ * local port number) of the port which requested the connection.
+ *
+ * A connection is terminated when an endpoint of the connection is closed,
+ * either explicitly by scif_close(), or when a process that owns one of the
+ * endpoints of the connection is terminated.
+ *
+ * The number of connections that can (subsequently) be accepted on epd is only
+ * limited by system resources (memory).
+ *
+ * The flags argument is formed by OR'ing together zero or more of the
+ * following values.
+ * SCIF_ACCEPT_SYNC - block until a connection request is presented. If
+ * SCIF_ACCEPT_SYNC is not in flags, and no pending
+ * connections are present on the queue, scif_accept()
+ * fails with an EAGAIN error
+ *
+ * In user mode, the select() and poll() functions can be used to determine
+ * when there is a connection request. In kernel mode, the scif_poll()
+ * function may be used for this purpose. A readable event will be delivered
+ * when a connection is requested.
+ *
+ * Return:
+ * Upon successful completion, scif_accept() returns 0; otherwise in user mode
+ * -1 is returned and errno is set to indicate the error; in kernel mode the
+ * negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EAGAIN - SCIF_ACCEPT_SYNC is not set and no connections are present to be
+ * accepted or SCIF_ACCEPT_SYNC is not set and remote node failed to complete
+ * its connection request
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * EINTR - Interrupted function
+ * EINVAL - epd is not a listening endpoint, or flags is invalid, or peer is
+ * NULL, or newepd is NULL
+ * ENODEV - The requesting node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOMEM - Not enough space
+ * ENOENT - Secondary part of epd registration failed
+ */
+int scif_accept(scif_epd_t epd, struct scif_port_id *peer, scif_epd_t
+ *newepd, int flags);
+
+/**
+ * scif_close() - Close an endpoint
+ * @epd: endpoint descriptor
+ *
+ * scif_close() closes an endpoint and performs necessary teardown of
+ * facilities associated with that endpoint.
+ *
+ * If epd is a listening endpoint then it will no longer accept connection
+ * requests on the port to which it is bound. Any pending connection requests
+ * are rejected.
+ *
+ * If epd is a connected endpoint, then its peer endpoint is also closed. RMAs
+ * which are in-process through epd or its peer endpoint will complete before
+ * scif_close() returns. Registered windows of the local and peer endpoints are
+ * released as if scif_unregister() was called against each window.
+ *
+ * Closing a SCIF endpoint does not affect local registered memory mapped by
+ * a SCIF endpoint on a remote node. The local memory remains mapped by the peer
+ * SCIF endpoint explicitly removed by calling munmap(..) by the peer.
+ *
+ * If the peer endpoint's receive queue is not empty at the time that epd is
+ * closed, then the peer endpoint can be passed as the endpoint parameter to
+ * scif_recv() until the receive queue is empty.
+ *
+ * epd is freed and may no longer be accessed.
+ *
+ * Return:
+ * Upon successful completion, scif_close() returns 0; otherwise in user mode
+ * -1 is returned and errno is set to indicate the error; in kernel mode the
+ * negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ */
+int scif_close(scif_epd_t epd);
+
+/**
+ * scif_send() - Send a message
+ * @epd: endpoint descriptor
+ * @msg: message buffer address
+ * @len: message length
+ * @flags: blocking mode flags
+ *
+ * scif_send() sends data to the peer of endpoint epd. Up to len bytes of data
+ * are copied from memory starting at address msg. On successful execution the
+ * return value of scif_send() is the number of bytes that were sent, and is
+ * zero if no bytes were sent because len was zero. scif_send() may be called
+ * only when the endpoint is in a connected state.
+ *
+ * If a scif_send() call is non-blocking, then it sends only those bytes which
+ * can be sent without waiting, up to a maximum of len bytes.
+ *
+ * If a scif_send() call is blocking, then it normally returns after sending
+ * all len bytes. If a blocking call is interrupted or the connection is
+ * reset, the call is considered successful if some bytes were sent or len is
+ * zero, otherwise the call is considered unsuccessful.
+ *
+ * In user mode, the select() and poll() functions can be used to determine
+ * when the send queue is not full. In kernel mode, the scif_poll() function
+ * may be used for this purpose.
+ *
+ * It is recommended that scif_send()/scif_recv() only be used for short
+ * control-type message communication between SCIF endpoints. The SCIF RMA
+ * APIs are expected to provide better performance for transfer sizes of
+ * 1024 bytes or longer for the current MIC hardware and software
+ * implementation.
+ *
+ * scif_send() will block until the entire message is sent if SCIF_SEND_BLOCK
+ * is passed as the flags argument.
+ *
+ * Return:
+ * Upon successful completion, scif_send() returns the number of bytes sent;
+ * otherwise in user mode -1 is returned and errno is set to indicate the
+ * error; in kernel mode the negative of one of the following errors is
+ * returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EFAULT - An invalid address was specified for a parameter
+ * EINVAL - flags is invalid, or len is negative
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOMEM - Not enough space
+ * ENOTCONN - The endpoint is not connected
+ */
+int scif_send(scif_epd_t epd, void *msg, int len, int flags);
+
+/**
+ * scif_recv() - Receive a message
+ * @epd: endpoint descriptor
+ * @msg: message buffer address
+ * @len: message buffer length
+ * @flags: blocking mode flags
+ *
+ * scif_recv() receives data from the peer of endpoint epd. Up to len bytes of
+ * data are copied to memory starting at address msg. On successful execution
+ * the return value of scif_recv() is the number of bytes that were received,
+ * and is zero if no bytes were received because len was zero. scif_recv() may
+ * be called only when the endpoint is in a connected state.
+ *
+ * If a scif_recv() call is non-blocking, then it receives only those bytes
+ * which can be received without waiting, up to a maximum of len bytes.
+ *
+ * If a scif_recv() call is blocking, then it normally returns after receiving
+ * all len bytes. If the blocking call was interrupted due to a disconnection,
+ * subsequent calls to scif_recv() will copy all bytes received upto the point
+ * of disconnection.
+ *
+ * In user mode, the select() and poll() functions can be used to determine
+ * when data is available to be received. In kernel mode, the scif_poll()
+ * function may be used for this purpose.
+ *
+ * It is recommended that scif_send()/scif_recv() only be used for short
+ * control-type message communication between SCIF endpoints. The SCIF RMA
+ * APIs are expected to provide better performance for transfer sizes of
+ * 1024 bytes or longer for the current MIC hardware and software
+ * implementation.
+ *
+ * scif_recv() will block until the entire message is received if
+ * SCIF_RECV_BLOCK is passed as the flags argument.
+ *
+ * Return:
+ * Upon successful completion, scif_recv() returns the number of bytes
+ * received; otherwise in user mode -1 is returned and errno is set to
+ * indicate the error; in kernel mode the negative of one of the following
+ * errors is returned.
+ *
+ * Errors:
+ * EAGAIN - The destination node is returning from a low power state
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EFAULT - An invalid address was specified for a parameter
+ * EINVAL - flags is invalid, or len is negative
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOMEM - Not enough space
+ * ENOTCONN - The endpoint is not connected
+ */
+int scif_recv(scif_epd_t epd, void *msg, int len, int flags);
+
+/**
+ * scif_register() - Mark a memory region for remote access.
+ * @epd: endpoint descriptor
+ * @addr: starting virtual address
+ * @len: length of range
+ * @offset: offset of window
+ * @prot_flags: read/write protection flags
+ * @map_flags: mapping flags
+ *
+ * The scif_register() function opens a window, a range of whole pages of the
+ * registered address space of the endpoint epd, starting at offset po and
+ * continuing for len bytes. The value of po, further described below, is a
+ * function of the parameters offset and len, and the value of map_flags. Each
+ * page of the window represents the physical memory page which backs the
+ * corresponding page of the range of virtual address pages starting at addr
+ * and continuing for len bytes. addr and len are constrained to be multiples
+ * of the page size. A successful scif_register() call returns po.
+ *
+ * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset
+ * exactly, and offset is constrained to be a multiple of the page size. The
+ * mapping established by scif_register() will not replace any existing
+ * registration; an error is returned if any page within the range [offset,
+ * offset + len - 1] intersects an existing window.
+ *
+ * When SCIF_MAP_FIXED is not set, the implementation uses offset in an
+ * implementation-defined manner to arrive at po. The po value so chosen will
+ * be an area of the registered address space that the implementation deems
+ * suitable for a mapping of len bytes. An offset value of 0 is interpreted as
+ * granting the implementation complete freedom in selecting po, subject to
+ * constraints described below. A non-zero value of offset is taken to be a
+ * suggestion of an offset near which the mapping should be placed. When the
+ * implementation selects a value for po, it does not replace any extant
+ * window. In all cases, po will be a multiple of the page size.
+ *
+ * The physical pages which are so represented by a window are available for
+ * access in calls to mmap(), scif_readfrom(), scif_writeto(),
+ * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the
+ * physical pages represented by the window will not be reused by the memory
+ * subsystem for any other purpose. Note that the same physical page may be
+ * represented by multiple windows.
+ *
+ * Subsequent operations which change the memory pages to which virtual
+ * addresses are mapped (such as mmap(), munmap()) have no effect on
+ * existing window.
+ *
+ * If the process will fork(), it is recommended that the registered
+ * virtual address range be marked with MADV_DONTFORK. Doing so will prevent
+ * problems due to copy-on-write semantics.
+ *
+ * The prot_flags argument is formed by OR'ing together one or more of the
+ * following values.
+ * SCIF_PROT_READ - allow read operations from the window
+ * SCIF_PROT_WRITE - allow write operations to the window
+ *
+ * The map_flags argument can be set to SCIF_MAP_FIXED which interprets a
+ * fixed offset.
+ *
+ * Return:
+ * Upon successful completion, scif_register() returns the offset at which the
+ * mapping was placed (po); otherwise in user mode SCIF_REGISTER_FAILED (that
+ * is (off_t *)-1) is returned and errno is set to indicate the error; in
+ * kernel mode the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags, and pages in the range
+ * [offset, offset + len -1] are already registered
+ * EAGAIN - The mapping could not be performed due to lack of resources
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid
+ * EINVAL - map_flags is invalid, or prot_flags is invalid, or SCIF_MAP_FIXED is
+ * set in flags, and offset is not a multiple of the page size, or addr is not a
+ * multiple of the page size, or len is not a multiple of the page size, or is
+ * 0, or offset is negative
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOMEM - Not enough space
+ * ENOTCONN -The endpoint is not connected
+ */
+off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
+ int prot_flags, int map_flags);
+
+/**
+ * scif_unregister() - Mark a memory region for remote access.
+ * @epd: endpoint descriptor
+ * @offset: start of range to unregister
+ * @len: length of range to unregister
+ *
+ * The scif_unregister() function closes those previously registered windows
+ * which are entirely within the range [offset, offset + len - 1]. It is an
+ * error to specify a range which intersects only a subrange of a window.
+ *
+ * On a successful return, pages within the window may no longer be specified
+ * in calls to mmap(), scif_readfrom(), scif_writeto(), scif_vreadfrom(),
+ * scif_vwriteto(), scif_get_pages, and scif_fence_signal(). The window,
+ * however, continues to exist until all previous references against it are
+ * removed. A window is referenced if there is a mapping to it created by
+ * mmap(), or if scif_get_pages() was called against the window
+ * (and the pages have not been returned via scif_put_pages()). A window is
+ * also referenced while an RMA, in which some range of the window is a source
+ * or destination, is in progress. Finally a window is referenced while some
+ * offset in that window was specified to scif_fence_signal(), and the RMAs
+ * marked by that call to scif_fence_signal() have not completed. While a
+ * window is in this state, its registered address space pages are not
+ * available for use in a new registered window.
+ *
+ * When all such references to the window have been removed, its references to
+ * all the physical pages which it represents are removed. Similarly, the
+ * registered address space pages of the window become available for
+ * registration in a new window.
+ *
+ * Return:
+ * Upon successful completion, scif_unregister() returns 0; otherwise in user
+ * mode -1 is returned and errno is set to indicate the error; in kernel mode
+ * the negative of one of the following errors is returned. In the event of an
+ * error, no windows are unregistered.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EINVAL - the range [offset, offset + len - 1] intersects a subrange of a
+ * window, or offset is negative
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid for the
+ * registered address space of epd
+ */
+int scif_unregister(scif_epd_t epd, off_t offset, size_t len);
+
+/**
+ * scif_readfrom() - Copy from a remote address space
+ * @epd: endpoint descriptor
+ * @loffset: offset in local registered address space to
+ * which to copy
+ * @len: length of range to copy
+ * @roffset: offset in remote registered address space
+ * from which to copy
+ * @rma_flags: transfer mode flags
+ *
+ * scif_readfrom() copies len bytes from the remote registered address space of
+ * the peer of endpoint epd, starting at the offset roffset to the local
+ * registered address space of epd, starting at the offset loffset.
+ *
+ * Each of the specified ranges [loffset, loffset + len - 1] and [roffset,
+ * roffset + len - 1] must be within some registered window or windows of the
+ * local and remote nodes. A range may intersect multiple registered windows,
+ * but only if those windows are contiguous in the registered address space.
+ *
+ * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
+ * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
+ * flags includes SCIF_RMA_SYNC, then scif_readfrom() will return after the
+ * transfer is complete. Otherwise, the transfer may be performed asynchron-
+ * ously. The order in which any two asynchronous RMA operations complete
+ * is non-deterministic. The synchronization functions, scif_fence_mark()/
+ * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
+ * the completion of asynchronous RMA operations on the same endpoint.
+ *
+ * The DMA transfer of individual bytes is not guaranteed to complete in
+ * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
+ * cacheline or partial cacheline of the source range will become visible on
+ * the destination node after all other transferred data in the source
+ * range has become visible on the destination node.
+ *
+ * The optimal DMA performance will likely be realized if both
+ * loffset and roffset are cacheline aligned (are a multiple of 64). Lower
+ * performance will likely be realized if loffset and roffset are not
+ * cacheline aligned but are separated by some multiple of 64. The lowest level
+ * of performance is likely if loffset and roffset are not separated by a
+ * multiple of 64.
+ *
+ * The rma_flags argument is formed by ORing together zero or more of the
+ * following values.
+ * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
+ * engine.
+ * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
+ * transfer has completed. Passing this flag results in the
+ * current implementation busy waiting and consuming CPU cycles
+ * while the DMA transfer is in progress for best performance by
+ * avoiding the interrupt latency.
+ * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
+ * the source range becomes visible on the destination node
+ * after all other transferred data in the source range has
+ * become visible on the destination
+ *
+ * Return:
+ * Upon successful completion, scif_readfrom() returns 0; otherwise in user
+ * mode -1 is returned and errno is set to indicate the error; in kernel mode
+ * the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EACCESS - Attempt to write to a read-only range
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EINVAL - rma_flags is invalid
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered
+ * address space of epd, or, The range [roffset, roffset + len - 1] is invalid
+ * for the registered address space of the peer of epd, or loffset or roffset
+ * is negative
+ */
+int scif_readfrom(scif_epd_t epd, off_t loffset, size_t len, off_t
+ roffset, int rma_flags);
+
+/**
+ * scif_writeto() - Copy to a remote address space
+ * @epd: endpoint descriptor
+ * @loffset: offset in local registered address space
+ * from which to copy
+ * @len: length of range to copy
+ * @roffset: offset in remote registered address space to
+ * which to copy
+ * @rma_flags: transfer mode flags
+ *
+ * scif_writeto() copies len bytes from the local registered address space of
+ * epd, starting at the offset loffset to the remote registered address space
+ * of the peer of endpoint epd, starting at the offset roffset.
+ *
+ * Each of the specified ranges [loffset, loffset + len - 1] and [roffset,
+ * roffset + len - 1] must be within some registered window or windows of the
+ * local and remote nodes. A range may intersect multiple registered windows,
+ * but only if those windows are contiguous in the registered address space.
+ *
+ * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
+ * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
+ * flags includes SCIF_RMA_SYNC, then scif_writeto() will return after the
+ * transfer is complete. Otherwise, the transfer may be performed asynchron-
+ * ously. The order in which any two asynchronous RMA operations complete
+ * is non-deterministic. The synchronization functions, scif_fence_mark()/
+ * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
+ * the completion of asynchronous RMA operations on the same endpoint.
+ *
+ * The DMA transfer of individual bytes is not guaranteed to complete in
+ * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
+ * cacheline or partial cacheline of the source range will become visible on
+ * the destination node after all other transferred data in the source
+ * range has become visible on the destination node.
+ *
+ * The optimal DMA performance will likely be realized if both
+ * loffset and roffset are cacheline aligned (are a multiple of 64). Lower
+ * performance will likely be realized if loffset and roffset are not cacheline
+ * aligned but are separated by some multiple of 64. The lowest level of
+ * performance is likely if loffset and roffset are not separated by a multiple
+ * of 64.
+ *
+ * The rma_flags argument is formed by ORing together zero or more of the
+ * following values.
+ * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
+ * engine.
+ * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
+ * transfer has completed. Passing this flag results in the
+ * current implementation busy waiting and consuming CPU cycles
+ * while the DMA transfer is in progress for best performance by
+ * avoiding the interrupt latency.
+ * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
+ * the source range becomes visible on the destination node
+ * after all other transferred data in the source range has
+ * become visible on the destination
+ *
+ * Return:
+ * Upon successful completion, scif_readfrom() returns 0; otherwise in user
+ * mode -1 is returned and errno is set to indicate the error; in kernel mode
+ * the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EACCESS - Attempt to write to a read-only range
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EINVAL - rma_flags is invalid
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered
+ * address space of epd, or, The range [roffset , roffset + len -1] is invalid
+ * for the registered address space of the peer of epd, or loffset or roffset
+ * is negative
+ */
+int scif_writeto(scif_epd_t epd, off_t loffset, size_t len, off_t
+ roffset, int rma_flags);
+
+/**
+ * scif_vreadfrom() - Copy from a remote address space
+ * @epd: endpoint descriptor
+ * @addr: address to which to copy
+ * @len: length of range to copy
+ * @roffset: offset in remote registered address space
+ * from which to copy
+ * @rma_flags: transfer mode flags
+ *
+ * scif_vreadfrom() copies len bytes from the remote registered address
+ * space of the peer of endpoint epd, starting at the offset roffset, to local
+ * memory, starting at addr.
+ *
+ * The specified range [roffset, roffset + len - 1] must be within some
+ * registered window or windows of the remote nodes. The range may
+ * intersect multiple registered windows, but only if those windows are
+ * contiguous in the registered address space.
+ *
+ * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
+ * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
+ * flags includes SCIF_RMA_SYNC, then scif_vreadfrom() will return after the
+ * transfer is complete. Otherwise, the transfer may be performed asynchron-
+ * ously. The order in which any two asynchronous RMA operations complete
+ * is non-deterministic. The synchronization functions, scif_fence_mark()/
+ * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
+ * the completion of asynchronous RMA operations on the same endpoint.
+ *
+ * The DMA transfer of individual bytes is not guaranteed to complete in
+ * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
+ * cacheline or partial cacheline of the source range will become visible on
+ * the destination node after all other transferred data in the source
+ * range has become visible on the destination node.
+ *
+ * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back
+ * the specified local memory range may be remain in a pinned state even after
+ * the specified transfer completes. This may reduce overhead if some or all of
+ * the same virtual address range is referenced in a subsequent call of
+ * scif_vreadfrom() or scif_vwriteto().
+ *
+ * The optimal DMA performance will likely be realized if both
+ * addr and roffset are cacheline aligned (are a multiple of 64). Lower
+ * performance will likely be realized if addr and roffset are not
+ * cacheline aligned but are separated by some multiple of 64. The lowest level
+ * of performance is likely if addr and roffset are not separated by a
+ * multiple of 64.
+ *
+ * The rma_flags argument is formed by ORing together zero or more of the
+ * following values.
+ * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
+ * engine.
+ * SCIF_RMA_USECACHE - enable registration caching
+ * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
+ * transfer has completed. Passing this flag results in the
+ * current implementation busy waiting and consuming CPU cycles
+ * while the DMA transfer is in progress for best performance by
+ * avoiding the interrupt latency.
+ * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
+ * the source range becomes visible on the destination node
+ * after all other transferred data in the source range has
+ * become visible on the destination
+ *
+ * Return:
+ * Upon successful completion, scif_vreadfrom() returns 0; otherwise in user
+ * mode -1 is returned and errno is set to indicate the error; in kernel mode
+ * the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EACCESS - Attempt to write to a read-only range
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid
+ * EINVAL - rma_flags is invalid
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the
+ * registered address space of epd
+ */
+int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len, off_t roffset,
+ int rma_flags);
+
+/**
+ * scif_vwriteto() - Copy to a remote address space
+ * @epd: endpoint descriptor
+ * @addr: address from which to copy
+ * @len: length of range to copy
+ * @roffset: offset in remote registered address space to
+ * which to copy
+ * @rma_flags: transfer mode flags
+ *
+ * scif_vwriteto() copies len bytes from the local memory, starting at addr, to
+ * the remote registered address space of the peer of endpoint epd, starting at
+ * the offset roffset.
+ *
+ * The specified range [roffset, roffset + len - 1] must be within some
+ * registered window or windows of the remote nodes. The range may intersect
+ * multiple registered windows, but only if those windows are contiguous in the
+ * registered address space.
+ *
+ * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
+ * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
+ * flags includes SCIF_RMA_SYNC, then scif_vwriteto() will return after the
+ * transfer is complete. Otherwise, the transfer may be performed asynchron-
+ * ously. The order in which any two asynchronous RMA operations complete
+ * is non-deterministic. The synchronization functions, scif_fence_mark()/
+ * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
+ * the completion of asynchronous RMA operations on the same endpoint.
+ *
+ * The DMA transfer of individual bytes is not guaranteed to complete in
+ * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
+ * cacheline or partial cacheline of the source range will become visible on
+ * the destination node after all other transferred data in the source
+ * range has become visible on the destination node.
+ *
+ * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back
+ * the specified local memory range may be remain in a pinned state even after
+ * the specified transfer completes. This may reduce overhead if some or all of
+ * the same virtual address range is referenced in a subsequent call of
+ * scif_vreadfrom() or scif_vwriteto().
+ *
+ * The optimal DMA performance will likely be realized if both
+ * addr and offset are cacheline aligned (are a multiple of 64). Lower
+ * performance will likely be realized if addr and offset are not cacheline
+ * aligned but are separated by some multiple of 64. The lowest level of
+ * performance is likely if addr and offset are not separated by a multiple of
+ * 64.
+ *
+ * The rma_flags argument is formed by ORing together zero or more of the
+ * following values.
+ * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
+ * engine.
+ * SCIF_RMA_USECACHE - allow registration caching
+ * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
+ * transfer has completed. Passing this flag results in the
+ * current implementation busy waiting and consuming CPU cycles
+ * while the DMA transfer is in progress for best performance by
+ * avoiding the interrupt latency.
+ * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
+ * the source range becomes visible on the destination node
+ * after all other transferred data in the source range has
+ * become visible on the destination
+ *
+ * Return:
+ * Upon successful completion, scif_vwriteto() returns 0; otherwise in user
+ * mode -1 is returned and errno is set to indicate the error; in kernel mode
+ * the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EACCESS - Attempt to write to a read-only range
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid
+ * EINVAL - rma_flags is invalid
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the
+ * registered address space of epd
+ */
+int scif_vwriteto(scif_epd_t epd, void *addr, size_t len, off_t roffset,
+ int rma_flags);
+
+/**
+ * scif_fence_mark() - Mark previously issued RMAs
+ * @epd: endpoint descriptor
+ * @flags: control flags
+ * @mark: marked value returned as output.
+ *
+ * scif_fence_mark() returns after marking the current set of all uncompleted
+ * RMAs initiated through the endpoint epd or the current set of all
+ * uncompleted RMAs initiated through the peer of endpoint epd. The RMAs are
+ * marked with a value returned at mark. The application may subsequently call
+ * scif_fence_wait(), passing the value returned at mark, to await completion
+ * of all RMAs so marked.
+ *
+ * The flags argument has exactly one of the following values.
+ * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint
+ * epd are marked
+ * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer
+ * of endpoint epd are marked
+ *
+ * Return:
+ * Upon successful completion, scif_fence_mark() returns 0; otherwise in user
+ * mode -1 is returned and errno is set to indicate the error; in kernel mode
+ * the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EINVAL - flags is invalid
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENOMEM - Insufficient kernel memory was available
+ */
+int scif_fence_mark(scif_epd_t epd, int flags, int *mark);
+
+/**
+ * scif_fence_wait() - Wait for completion of marked RMAs
+ * @epd: endpoint descriptor
+ * @mark: mark request
+ *
+ * scif_fence_wait() returns after all RMAs marked with mark have completed.
+ * The value passed in mark must have been obtained in a previous call to
+ * scif_fence_mark().
+ *
+ * Return:
+ * Upon successful completion, scif_fence_wait() returns 0; otherwise in user
+ * mode -1 is returned and errno is set to indicate the error; in kernel mode
+ * the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENOMEM - Insufficient kernel memory was available
+ */
+int scif_fence_wait(scif_epd_t epd, int mark);
+
+/**
+ * scif_fence_signal() - Request a memory update on completion of RMAs
+ * @epd: endpoint descriptor
+ * @loff: local offset
+ * @lval: local value to write to loffset
+ * @roff: remote offset
+ * @rval: remote value to write to roffset
+ * @flags: flags
+ *
+ * scif_fence_signal() returns after marking the current set of all uncompleted
+ * RMAs initiated through the endpoint epd or marking the current set of all
+ * uncompleted RMAs initiated through the peer of endpoint epd.
+ *
+ * If flags includes SCIF_SIGNAL_LOCAL, then on completion of the RMAs in the
+ * marked set, lval is written to memory at the address corresponding to offset
+ * loff in the local registered address space of epd. loff must be within a
+ * registered window. If flags includes SCIF_SIGNAL_REMOTE, then on completion
+ * of the RMAs in the marked set, rval is written to memory at the address
+ * corresponding to offset roff in the remote registered address space of epd.
+ * roff must be within a remote registered window of the peer of epd. Note
+ * that any specified offset must be DWORD (4 byte / 32 bit) aligned.
+ *
+ * The flags argument is formed by OR'ing together the following.
+ * Exactly one of the following values.
+ * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint
+ * epd are marked
+ * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer
+ * of endpoint epd are marked
+ * One or more of the following values.
+ * SCIF_SIGNAL_LOCAL - On completion of the marked set of RMAs, write lval to
+ * memory at the address corresponding to offset loff in the local
+ * registered address space of epd.
+ * SCIF_SIGNAL_REMOTE - On completion of the marked set of RMAs, write rval to
+ * memory at the address corresponding to offset roff in the remote
+ * registered address space of epd.
+ *
+ * Return:
+ * Upon successful completion, scif_fence_signal() returns 0; otherwise in
+ * user mode -1 is returned and errno is set to indicate the error; in kernel
+ * mode the negative of one of the following errors is returned.
+ *
+ * Errors:
+ * EBADF, ENOTTY - epd is not a valid endpoint descriptor
+ * ECONNRESET - Connection reset by peer
+ * EINVAL - flags is invalid, or loff or roff are not DWORD aligned
+ * ENODEV - The remote node is lost or existed, but is not currently in the
+ * network since it may have crashed
+ * ENOTCONN - The endpoint is not connected
+ * ENXIO - loff is invalid for the registered address of epd, or roff is invalid
+ * for the registered address space, of the peer of epd
+ */
+int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval, off_t roff,
+ u64 rval, int flags);
+
+/**
+ * scif_get_node_ids() - Return information about online nodes
+ * @nodes: array in which to return online node IDs
+ * @len: number of entries in the nodes array
+ * @self: address to place the node ID of the local node
+ *
+ * scif_get_node_ids() fills in the nodes array with up to len node IDs of the
+ * nodes in the SCIF network. If there is not enough space in nodes, as
+ * indicated by the len parameter, only len node IDs are returned in nodes. The
+ * return value of scif_get_node_ids() is the total number of nodes currently in
+ * the SCIF network. By checking the return value against the len parameter,
+ * the user may determine if enough space for nodes was allocated.
+ *
+ * The node ID of the local node is returned at self.
+ *
+ * Return:
+ * Upon successful completion, scif_get_node_ids() returns the actual number of
+ * online nodes in the SCIF network including 'self'; otherwise in user mode
+ * -1 is returned and errno is set to indicate the error; in kernel mode no
+ * errors are returned.
+ *
+ * Errors:
+ * EFAULT - Bad address
+ */
+int scif_get_node_ids(u16 *nodes, int len, u16 *self);
+
+#endif /* __SCIF_H__ */
diff --git a/include/linux/security.h b/include/linux/security.h
index ba96471..79d85dd 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/string.h>
+#include <linux/mm.h>
struct linux_binprm;
struct cred;
@@ -43,7 +44,6 @@ struct file;
struct vfsmount;
struct path;
struct qstr;
-struct nameidata;
struct iattr;
struct fown_struct;
struct file_operations;
@@ -54,9 +54,6 @@ struct xattr;
struct xfrm_sec_ctx;
struct mm_struct;
-/* Maximum number of letters for an LSM name string */
-#define SECURITY_NAME_MAX 10
-
/* If capable should audit the security request */
#define SECURITY_CAP_NOAUDIT 0
#define SECURITY_CAP_AUDIT 1
@@ -69,10 +66,7 @@ struct audit_krule;
struct user_namespace;
struct timezone;
-/*
- * These functions are in security/capability.c and are used
- * as the default capabilities functions
- */
+/* These functions are in security/commoncap.c */
extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
int cap, int audit);
extern int cap_settime(const struct timespec *ts, const struct timezone *tz);
@@ -114,10 +108,6 @@ struct xfrm_state;
struct xfrm_user_sec_ctx;
struct seq_file;
-extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
-
-void reset_security_ops(void);
-
#ifdef CONFIG_MMU
extern unsigned long mmap_min_addr;
extern unsigned long dac_mmap_min_addr;
@@ -188,1557 +178,17 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
opts->num_mnt_opts = 0;
}
-/**
- * struct security_operations - main security structure
- *
- * Security module identifier.
- *
- * @name:
- * A string that acts as a unique identifier for the LSM with max number
- * of characters = SECURITY_NAME_MAX.
- *
- * Security hooks for program execution operations.
- *
- * @bprm_set_creds:
- * Save security information in the bprm->security field, typically based
- * on information about the bprm->file, for later use by the apply_creds
- * hook. This hook may also optionally check permissions (e.g. for
- * transitions between security domains).
- * This hook may be called multiple times during a single execve, e.g. for
- * interpreters. The hook can tell whether it has already been called by
- * checking to see if @bprm->security is non-NULL. If so, then the hook
- * may decide either to retain the security information saved earlier or
- * to replace it.
- * @bprm contains the linux_binprm structure.
- * Return 0 if the hook is successful and permission is granted.
- * @bprm_check_security:
- * This hook mediates the point when a search for a binary handler will
- * begin. It allows a check the @bprm->security value which is set in the
- * preceding set_creds call. The primary difference from set_creds is
- * that the argv list and envp list are reliably available in @bprm. This
- * hook may be called multiple times during a single execve; and in each
- * pass set_creds is called first.
- * @bprm contains the linux_binprm structure.
- * Return 0 if the hook is successful and permission is granted.
- * @bprm_committing_creds:
- * Prepare to install the new security attributes of a process being
- * transformed by an execve operation, based on the old credentials
- * pointed to by @current->cred and the information set in @bprm->cred by
- * the bprm_set_creds hook. @bprm points to the linux_binprm structure.
- * This hook is a good place to perform state changes on the process such
- * as closing open file descriptors to which access will no longer be
- * granted when the attributes are changed. This is called immediately
- * before commit_creds().
- * @bprm_committed_creds:
- * Tidy up after the installation of the new security attributes of a
- * process being transformed by an execve operation. The new credentials
- * have, by this point, been set to @current->cred. @bprm points to the
- * linux_binprm structure. This hook is a good place to perform state
- * changes on the process such as clearing out non-inheritable signal
- * state. This is called immediately after commit_creds().
- * @bprm_secureexec:
- * Return a boolean value (0 or 1) indicating whether a "secure exec"
- * is required. The flag is passed in the auxiliary table
- * on the initial stack to the ELF interpreter to indicate whether libc
- * should enable secure mode.
- * @bprm contains the linux_binprm structure.
- *
- * Security hooks for filesystem operations.
- *
- * @sb_alloc_security:
- * Allocate and attach a security structure to the sb->s_security field.
- * The s_security field is initialized to NULL when the structure is
- * allocated.
- * @sb contains the super_block structure to be modified.
- * Return 0 if operation was successful.
- * @sb_free_security:
- * Deallocate and clear the sb->s_security field.
- * @sb contains the super_block structure to be modified.
- * @sb_statfs:
- * Check permission before obtaining filesystem statistics for the @mnt
- * mountpoint.
- * @dentry is a handle on the superblock for the filesystem.
- * Return 0 if permission is granted.
- * @sb_mount:
- * Check permission before an object specified by @dev_name is mounted on
- * the mount point named by @nd. For an ordinary mount, @dev_name
- * identifies a device if the file system type requires a device. For a
- * remount (@flags & MS_REMOUNT), @dev_name is irrelevant. For a
- * loopback/bind mount (@flags & MS_BIND), @dev_name identifies the
- * pathname of the object being mounted.
- * @dev_name contains the name for object being mounted.
- * @path contains the path for mount point object.
- * @type contains the filesystem type.
- * @flags contains the mount flags.
- * @data contains the filesystem-specific data.
- * Return 0 if permission is granted.
- * @sb_copy_data:
- * Allow mount option data to be copied prior to parsing by the filesystem,
- * so that the security module can extract security-specific mount
- * options cleanly (a filesystem may modify the data e.g. with strsep()).
- * This also allows the original mount data to be stripped of security-
- * specific options to avoid having to make filesystems aware of them.
- * @type the type of filesystem being mounted.
- * @orig the original mount data copied from userspace.
- * @copy copied data which will be passed to the security module.
- * Returns 0 if the copy was successful.
- * @sb_remount:
- * Extracts security system specific mount options and verifies no changes
- * are being made to those options.
- * @sb superblock being remounted
- * @data contains the filesystem-specific data.
- * Return 0 if permission is granted.
- * @sb_umount:
- * Check permission before the @mnt file system is unmounted.
- * @mnt contains the mounted file system.
- * @flags contains the unmount flags, e.g. MNT_FORCE.
- * Return 0 if permission is granted.
- * @sb_pivotroot:
- * Check permission before pivoting the root filesystem.
- * @old_path contains the path for the new location of the current root (put_old).
- * @new_path contains the path for the new root (new_root).
- * Return 0 if permission is granted.
- * @sb_set_mnt_opts:
- * Set the security relevant mount options used for a superblock
- * @sb the superblock to set security mount options for
- * @opts binary data structure containing all lsm mount data
- * @sb_clone_mnt_opts:
- * Copy all security options from a given superblock to another
- * @oldsb old superblock which contain information to clone
- * @newsb new superblock which needs filled in
- * @sb_parse_opts_str:
- * Parse a string of security data filling in the opts structure
- * @options string containing all mount options known by the LSM
- * @opts binary data structure usable by the LSM
- * @dentry_init_security:
- * Compute a context for a dentry as the inode is not yet available
- * since NFSv4 has no label backed by an EA anyway.
- * @dentry dentry to use in calculating the context.
- * @mode mode used to determine resource type.
- * @name name of the last path component used to create file
- * @ctx pointer to place the pointer to the resulting context in.
- * @ctxlen point to place the length of the resulting context.
- *
- *
- * Security hooks for inode operations.
- *
- * @inode_alloc_security:
- * Allocate and attach a security structure to @inode->i_security. The
- * i_security field is initialized to NULL when the inode structure is
- * allocated.
- * @inode contains the inode structure.
- * Return 0 if operation was successful.
- * @inode_free_security:
- * @inode contains the inode structure.
- * Deallocate the inode security structure and set @inode->i_security to
- * NULL.
- * @inode_init_security:
- * Obtain the security attribute name suffix and value to set on a newly
- * created inode and set up the incore security field for the new inode.
- * This hook is called by the fs code as part of the inode creation
- * transaction and provides for atomic labeling of the inode, unlike
- * the post_create/mkdir/... hooks called by the VFS. The hook function
- * is expected to allocate the name and value via kmalloc, with the caller
- * being responsible for calling kfree after using them.
- * If the security module does not use security attributes or does
- * not wish to put a security attribute on this particular inode,
- * then it should return -EOPNOTSUPP to skip this processing.
- * @inode contains the inode structure of the newly created inode.
- * @dir contains the inode structure of the parent directory.
- * @qstr contains the last path component of the new object
- * @name will be set to the allocated name suffix (e.g. selinux).
- * @value will be set to the allocated attribute value.
- * @len will be set to the length of the value.
- * Returns 0 if @name and @value have been successfully set,
- * -EOPNOTSUPP if no security attribute is needed, or
- * -ENOMEM on memory allocation failure.
- * @inode_create:
- * Check permission to create a regular file.
- * @dir contains inode structure of the parent of the new file.
- * @dentry contains the dentry structure for the file to be created.
- * @mode contains the file mode of the file to be created.
- * Return 0 if permission is granted.
- * @inode_link:
- * Check permission before creating a new hard link to a file.
- * @old_dentry contains the dentry structure for an existing link to the file.
- * @dir contains the inode structure of the parent directory of the new link.
- * @new_dentry contains the dentry structure for the new link.
- * Return 0 if permission is granted.
- * @path_link:
- * Check permission before creating a new hard link to a file.
- * @old_dentry contains the dentry structure for an existing link
- * to the file.
- * @new_dir contains the path structure of the parent directory of
- * the new link.
- * @new_dentry contains the dentry structure for the new link.
- * Return 0 if permission is granted.
- * @inode_unlink:
- * Check the permission to remove a hard link to a file.
- * @dir contains the inode structure of parent directory of the file.
- * @dentry contains the dentry structure for file to be unlinked.
- * Return 0 if permission is granted.
- * @path_unlink:
- * Check the permission to remove a hard link to a file.
- * @dir contains the path structure of parent directory of the file.
- * @dentry contains the dentry structure for file to be unlinked.
- * Return 0 if permission is granted.
- * @inode_symlink:
- * Check the permission to create a symbolic link to a file.
- * @dir contains the inode structure of parent directory of the symbolic link.
- * @dentry contains the dentry structure of the symbolic link.
- * @old_name contains the pathname of file.
- * Return 0 if permission is granted.
- * @path_symlink:
- * Check the permission to create a symbolic link to a file.
- * @dir contains the path structure of parent directory of
- * the symbolic link.
- * @dentry contains the dentry structure of the symbolic link.
- * @old_name contains the pathname of file.
- * Return 0 if permission is granted.
- * @inode_mkdir:
- * Check permissions to create a new directory in the existing directory
- * associated with inode structure @dir.
- * @dir contains the inode structure of parent of the directory to be created.
- * @dentry contains the dentry structure of new directory.
- * @mode contains the mode of new directory.
- * Return 0 if permission is granted.
- * @path_mkdir:
- * Check permissions to create a new directory in the existing directory
- * associated with path structure @path.
- * @dir contains the path structure of parent of the directory
- * to be created.
- * @dentry contains the dentry structure of new directory.
- * @mode contains the mode of new directory.
- * Return 0 if permission is granted.
- * @inode_rmdir:
- * Check the permission to remove a directory.
- * @dir contains the inode structure of parent of the directory to be removed.
- * @dentry contains the dentry structure of directory to be removed.
- * Return 0 if permission is granted.
- * @path_rmdir:
- * Check the permission to remove a directory.
- * @dir contains the path structure of parent of the directory to be
- * removed.
- * @dentry contains the dentry structure of directory to be removed.
- * Return 0 if permission is granted.
- * @inode_mknod:
- * Check permissions when creating a special file (or a socket or a fifo
- * file created via the mknod system call). Note that if mknod operation
- * is being done for a regular file, then the create hook will be called
- * and not this hook.
- * @dir contains the inode structure of parent of the new file.
- * @dentry contains the dentry structure of the new file.
- * @mode contains the mode of the new file.
- * @dev contains the device number.
- * Return 0 if permission is granted.
- * @path_mknod:
- * Check permissions when creating a file. Note that this hook is called
- * even if mknod operation is being done for a regular file.
- * @dir contains the path structure of parent of the new file.
- * @dentry contains the dentry structure of the new file.
- * @mode contains the mode of the new file.
- * @dev contains the undecoded device number. Use new_decode_dev() to get
- * the decoded device number.
- * Return 0 if permission is granted.
- * @inode_rename:
- * Check for permission to rename a file or directory.
- * @old_dir contains the inode structure for parent of the old link.
- * @old_dentry contains the dentry structure of the old link.
- * @new_dir contains the inode structure for parent of the new link.
- * @new_dentry contains the dentry structure of the new link.
- * Return 0 if permission is granted.
- * @path_rename:
- * Check for permission to rename a file or directory.
- * @old_dir contains the path structure for parent of the old link.
- * @old_dentry contains the dentry structure of the old link.
- * @new_dir contains the path structure for parent of the new link.
- * @new_dentry contains the dentry structure of the new link.
- * Return 0 if permission is granted.
- * @path_chmod:
- * Check for permission to change DAC's permission of a file or directory.
- * @dentry contains the dentry structure.
- * @mnt contains the vfsmnt structure.
- * @mode contains DAC's mode.
- * Return 0 if permission is granted.
- * @path_chown:
- * Check for permission to change owner/group of a file or directory.
- * @path contains the path structure.
- * @uid contains new owner's ID.
- * @gid contains new group's ID.
- * Return 0 if permission is granted.
- * @path_chroot:
- * Check for permission to change root directory.
- * @path contains the path structure.
- * Return 0 if permission is granted.
- * @inode_readlink:
- * Check the permission to read the symbolic link.
- * @dentry contains the dentry structure for the file link.
- * Return 0 if permission is granted.
- * @inode_follow_link:
- * Check permission to follow a symbolic link when looking up a pathname.
- * @dentry contains the dentry structure for the link.
- * @nd contains the nameidata structure for the parent directory.
- * Return 0 if permission is granted.
- * @inode_permission:
- * Check permission before accessing an inode. This hook is called by the
- * existing Linux permission function, so a security module can use it to
- * provide additional checking for existing Linux permission checks.
- * Notice that this hook is called when a file is opened (as well as many
- * other operations), whereas the file_security_ops permission hook is
- * called when the actual read/write operations are performed.
- * @inode contains the inode structure to check.
- * @mask contains the permission mask.
- * Return 0 if permission is granted.
- * @inode_setattr:
- * Check permission before setting file attributes. Note that the kernel
- * call to notify_change is performed from several locations, whenever
- * file attributes change (such as when a file is truncated, chown/chmod
- * operations, transferring disk quotas, etc).
- * @dentry contains the dentry structure for the file.
- * @attr is the iattr structure containing the new file attributes.
- * Return 0 if permission is granted.
- * @path_truncate:
- * Check permission before truncating a file.
- * @path contains the path structure for the file.
- * Return 0 if permission is granted.
- * @inode_getattr:
- * Check permission before obtaining file attributes.
- * @mnt is the vfsmount where the dentry was looked up
- * @dentry contains the dentry structure for the file.
- * Return 0 if permission is granted.
- * @inode_setxattr:
- * Check permission before setting the extended attributes
- * @value identified by @name for @dentry.
- * Return 0 if permission is granted.
- * @inode_post_setxattr:
- * Update inode security field after successful setxattr operation.
- * @value identified by @name for @dentry.
- * @inode_getxattr:
- * Check permission before obtaining the extended attributes
- * identified by @name for @dentry.
- * Return 0 if permission is granted.
- * @inode_listxattr:
- * Check permission before obtaining the list of extended attribute
- * names for @dentry.
- * Return 0 if permission is granted.
- * @inode_removexattr:
- * Check permission before removing the extended attribute
- * identified by @name for @dentry.
- * Return 0 if permission is granted.
- * @inode_getsecurity:
- * Retrieve a copy of the extended attribute representation of the
- * security label associated with @name for @inode via @buffer. Note that
- * @name is the remainder of the attribute name after the security prefix
- * has been removed. @alloc is used to specify of the call should return a
- * value via the buffer or just the value length Return size of buffer on
- * success.
- * @inode_setsecurity:
- * Set the security label associated with @name for @inode from the
- * extended attribute value @value. @size indicates the size of the
- * @value in bytes. @flags may be XATTR_CREATE, XATTR_REPLACE, or 0.
- * Note that @name is the remainder of the attribute name after the
- * security. prefix has been removed.
- * Return 0 on success.
- * @inode_listsecurity:
- * Copy the extended attribute names for the security labels
- * associated with @inode into @buffer. The maximum size of @buffer
- * is specified by @buffer_size. @buffer may be NULL to request
- * the size of the buffer required.
- * Returns number of bytes used/required on success.
- * @inode_need_killpriv:
- * Called when an inode has been changed.
- * @dentry is the dentry being changed.
- * Return <0 on error to abort the inode change operation.
- * Return 0 if inode_killpriv does not need to be called.
- * Return >0 if inode_killpriv does need to be called.
- * @inode_killpriv:
- * The setuid bit is being removed. Remove similar security labels.
- * Called with the dentry->d_inode->i_mutex held.
- * @dentry is the dentry being changed.
- * Return 0 on success. If error is returned, then the operation
- * causing setuid bit removal is failed.
- * @inode_getsecid:
- * Get the secid associated with the node.
- * @inode contains a pointer to the inode.
- * @secid contains a pointer to the location where result will be saved.
- * In case of failure, @secid will be set to zero.
- *
- * Security hooks for file operations
- *
- * @file_permission:
- * Check file permissions before accessing an open file. This hook is
- * called by various operations that read or write files. A security
- * module can use this hook to perform additional checking on these
- * operations, e.g. to revalidate permissions on use to support privilege
- * bracketing or policy changes. Notice that this hook is used when the
- * actual read/write operations are performed, whereas the
- * inode_security_ops hook is called when a file is opened (as well as
- * many other operations).
- * Caveat: Although this hook can be used to revalidate permissions for
- * various system call operations that read or write files, it does not
- * address the revalidation of permissions for memory-mapped files.
- * Security modules must handle this separately if they need such
- * revalidation.
- * @file contains the file structure being accessed.
- * @mask contains the requested permissions.
- * Return 0 if permission is granted.
- * @file_alloc_security:
- * Allocate and attach a security structure to the file->f_security field.
- * The security field is initialized to NULL when the structure is first
- * created.
- * @file contains the file structure to secure.
- * Return 0 if the hook is successful and permission is granted.
- * @file_free_security:
- * Deallocate and free any security structures stored in file->f_security.
- * @file contains the file structure being modified.
- * @file_ioctl:
- * @file contains the file structure.
- * @cmd contains the operation to perform.
- * @arg contains the operational arguments.
- * Check permission for an ioctl operation on @file. Note that @arg
- * sometimes represents a user space pointer; in other cases, it may be a
- * simple integer value. When @arg represents a user space pointer, it
- * should never be used by the security module.
- * Return 0 if permission is granted.
- * @mmap_addr :
- * Check permissions for a mmap operation at @addr.
- * @addr contains virtual address that will be used for the operation.
- * Return 0 if permission is granted.
- * @mmap_file :
- * Check permissions for a mmap operation. The @file may be NULL, e.g.
- * if mapping anonymous memory.
- * @file contains the file structure for file to map (may be NULL).
- * @reqprot contains the protection requested by the application.
- * @prot contains the protection that will be applied by the kernel.
- * @flags contains the operational flags.
- * Return 0 if permission is granted.
- * @file_mprotect:
- * Check permissions before changing memory access permissions.
- * @vma contains the memory region to modify.
- * @reqprot contains the protection requested by the application.
- * @prot contains the protection that will be applied by the kernel.
- * Return 0 if permission is granted.
- * @file_lock:
- * Check permission before performing file locking operations.
- * Note: this hook mediates both flock and fcntl style locks.
- * @file contains the file structure.
- * @cmd contains the posix-translated lock operation to perform
- * (e.g. F_RDLCK, F_WRLCK).
- * Return 0 if permission is granted.
- * @file_fcntl:
- * Check permission before allowing the file operation specified by @cmd
- * from being performed on the file @file. Note that @arg sometimes
- * represents a user space pointer; in other cases, it may be a simple
- * integer value. When @arg represents a user space pointer, it should
- * never be used by the security module.
- * @file contains the file structure.
- * @cmd contains the operation to be performed.
- * @arg contains the operational arguments.
- * Return 0 if permission is granted.
- * @file_set_fowner:
- * Save owner security information (typically from current->security) in
- * file->f_security for later use by the send_sigiotask hook.
- * @file contains the file structure to update.
- * Return 0 on success.
- * @file_send_sigiotask:
- * Check permission for the file owner @fown to send SIGIO or SIGURG to the
- * process @tsk. Note that this hook is sometimes called from interrupt.
- * Note that the fown_struct, @fown, is never outside the context of a
- * struct file, so the file structure (and associated security information)
- * can always be obtained:
- * container_of(fown, struct file, f_owner)
- * @tsk contains the structure of task receiving signal.
- * @fown contains the file owner information.
- * @sig is the signal that will be sent. When 0, kernel sends SIGIO.
- * Return 0 if permission is granted.
- * @file_receive:
- * This hook allows security modules to control the ability of a process
- * to receive an open file descriptor via socket IPC.
- * @file contains the file structure being received.
- * Return 0 if permission is granted.
- * @file_open
- * Save open-time permission checking state for later use upon
- * file_permission, and recheck access if anything has changed
- * since inode_permission.
- *
- * Security hooks for task operations.
- *
- * @task_create:
- * Check permission before creating a child process. See the clone(2)
- * manual page for definitions of the @clone_flags.
- * @clone_flags contains the flags indicating what should be shared.
- * Return 0 if permission is granted.
- * @task_free:
- * @task task being freed
- * Handle release of task-related resources. (Note that this can be called
- * from interrupt context.)
- * @cred_alloc_blank:
- * @cred points to the credentials.
- * @gfp indicates the atomicity of any memory allocations.
- * Only allocate sufficient memory and attach to @cred such that
- * cred_transfer() will not get ENOMEM.
- * @cred_free:
- * @cred points to the credentials.
- * Deallocate and clear the cred->security field in a set of credentials.
- * @cred_prepare:
- * @new points to the new credentials.
- * @old points to the original credentials.
- * @gfp indicates the atomicity of any memory allocations.
- * Prepare a new set of credentials by copying the data from the old set.
- * @cred_transfer:
- * @new points to the new credentials.
- * @old points to the original credentials.
- * Transfer data from original creds to new creds
- * @kernel_act_as:
- * Set the credentials for a kernel service to act as (subjective context).
- * @new points to the credentials to be modified.
- * @secid specifies the security ID to be set
- * The current task must be the one that nominated @secid.
- * Return 0 if successful.
- * @kernel_create_files_as:
- * Set the file creation context in a set of credentials to be the same as
- * the objective context of the specified inode.
- * @new points to the credentials to be modified.
- * @inode points to the inode to use as a reference.
- * The current task must be the one that nominated @inode.
- * Return 0 if successful.
- * @kernel_fw_from_file:
- * Load firmware from userspace (not called for built-in firmware).
- * @file contains the file structure pointing to the file containing
- * the firmware to load. This argument will be NULL if the firmware
- * was loaded via the uevent-triggered blob-based interface exposed
- * by CONFIG_FW_LOADER_USER_HELPER.
- * @buf pointer to buffer containing firmware contents.
- * @size length of the firmware contents.
- * Return 0 if permission is granted.
- * @kernel_module_request:
- * Ability to trigger the kernel to automatically upcall to userspace for
- * userspace to load a kernel module with the given name.
- * @kmod_name name of the module requested by the kernel
- * Return 0 if successful.
- * @kernel_module_from_file:
- * Load a kernel module from userspace.
- * @file contains the file structure pointing to the file containing
- * the kernel module to load. If the module is being loaded from a blob,
- * this argument will be NULL.
- * Return 0 if permission is granted.
- * @task_fix_setuid:
- * Update the module's state after setting one or more of the user
- * identity attributes of the current process. The @flags parameter
- * indicates which of the set*uid system calls invoked this hook. If
- * @new is the set of credentials that will be installed. Modifications
- * should be made to this rather than to @current->cred.
- * @old is the set of credentials that are being replaces
- * @flags contains one of the LSM_SETID_* values.
- * Return 0 on success.
- * @task_setpgid:
- * Check permission before setting the process group identifier of the
- * process @p to @pgid.
- * @p contains the task_struct for process being modified.
- * @pgid contains the new pgid.
- * Return 0 if permission is granted.
- * @task_getpgid:
- * Check permission before getting the process group identifier of the
- * process @p.
- * @p contains the task_struct for the process.
- * Return 0 if permission is granted.
- * @task_getsid:
- * Check permission before getting the session identifier of the process
- * @p.
- * @p contains the task_struct for the process.
- * Return 0 if permission is granted.
- * @task_getsecid:
- * Retrieve the security identifier of the process @p.
- * @p contains the task_struct for the process and place is into @secid.
- * In case of failure, @secid will be set to zero.
- *
- * @task_setnice:
- * Check permission before setting the nice value of @p to @nice.
- * @p contains the task_struct of process.
- * @nice contains the new nice value.
- * Return 0 if permission is granted.
- * @task_setioprio
- * Check permission before setting the ioprio value of @p to @ioprio.
- * @p contains the task_struct of process.
- * @ioprio contains the new ioprio value
- * Return 0 if permission is granted.
- * @task_getioprio
- * Check permission before getting the ioprio value of @p.
- * @p contains the task_struct of process.
- * Return 0 if permission is granted.
- * @task_setrlimit:
- * Check permission before setting the resource limits of the current
- * process for @resource to @new_rlim. The old resource limit values can
- * be examined by dereferencing (current->signal->rlim + resource).
- * @resource contains the resource whose limit is being set.
- * @new_rlim contains the new limits for @resource.
- * Return 0 if permission is granted.
- * @task_setscheduler:
- * Check permission before setting scheduling policy and/or parameters of
- * process @p based on @policy and @lp.
- * @p contains the task_struct for process.
- * @policy contains the scheduling policy.
- * @lp contains the scheduling parameters.
- * Return 0 if permission is granted.
- * @task_getscheduler:
- * Check permission before obtaining scheduling information for process
- * @p.
- * @p contains the task_struct for process.
- * Return 0 if permission is granted.
- * @task_movememory
- * Check permission before moving memory owned by process @p.
- * @p contains the task_struct for process.
- * Return 0 if permission is granted.
- * @task_kill:
- * Check permission before sending signal @sig to @p. @info can be NULL,
- * the constant 1, or a pointer to a siginfo structure. If @info is 1 or
- * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming
- * from the kernel and should typically be permitted.
- * SIGIO signals are handled separately by the send_sigiotask hook in
- * file_security_ops.
- * @p contains the task_struct for process.
- * @info contains the signal information.
- * @sig contains the signal value.
- * @secid contains the sid of the process where the signal originated
- * Return 0 if permission is granted.
- * @task_wait:
- * Check permission before allowing a process to reap a child process @p
- * and collect its status information.
- * @p contains the task_struct for process.
- * Return 0 if permission is granted.
- * @task_prctl:
- * Check permission before performing a process control operation on the
- * current process.
- * @option contains the operation.
- * @arg2 contains a argument.
- * @arg3 contains a argument.
- * @arg4 contains a argument.
- * @arg5 contains a argument.
- * Return -ENOSYS if no-one wanted to handle this op, any other value to
- * cause prctl() to return immediately with that value.
- * @task_to_inode:
- * Set the security attributes for an inode based on an associated task's
- * security attributes, e.g. for /proc/pid inodes.
- * @p contains the task_struct for the task.
- * @inode contains the inode structure for the inode.
- *
- * Security hooks for Netlink messaging.
- *
- * @netlink_send:
- * Save security information for a netlink message so that permission
- * checking can be performed when the message is processed. The security
- * information can be saved using the eff_cap field of the
- * netlink_skb_parms structure. Also may be used to provide fine
- * grained control over message transmission.
- * @sk associated sock of task sending the message.
- * @skb contains the sk_buff structure for the netlink message.
- * Return 0 if the information was successfully saved and message
- * is allowed to be transmitted.
- *
- * Security hooks for Unix domain networking.
- *
- * @unix_stream_connect:
- * Check permissions before establishing a Unix domain stream connection
- * between @sock and @other.
- * @sock contains the sock structure.
- * @other contains the peer sock structure.
- * @newsk contains the new sock structure.
- * Return 0 if permission is granted.
- * @unix_may_send:
- * Check permissions before connecting or sending datagrams from @sock to
- * @other.
- * @sock contains the socket structure.
- * @other contains the peer socket structure.
- * Return 0 if permission is granted.
- *
- * The @unix_stream_connect and @unix_may_send hooks were necessary because
- * Linux provides an alternative to the conventional file name space for Unix
- * domain sockets. Whereas binding and connecting to sockets in the file name
- * space is mediated by the typical file permissions (and caught by the mknod
- * and permission hooks in inode_security_ops), binding and connecting to
- * sockets in the abstract name space is completely unmediated. Sufficient
- * control of Unix domain sockets in the abstract name space isn't possible
- * using only the socket layer hooks, since we need to know the actual target
- * socket, which is not looked up until we are inside the af_unix code.
- *
- * Security hooks for socket operations.
- *
- * @socket_create:
- * Check permissions prior to creating a new socket.
- * @family contains the requested protocol family.
- * @type contains the requested communications type.
- * @protocol contains the requested protocol.
- * @kern set to 1 if a kernel socket.
- * Return 0 if permission is granted.
- * @socket_post_create:
- * This hook allows a module to update or allocate a per-socket security
- * structure. Note that the security field was not added directly to the
- * socket structure, but rather, the socket security information is stored
- * in the associated inode. Typically, the inode alloc_security hook will
- * allocate and and attach security information to
- * sock->inode->i_security. This hook may be used to update the
- * sock->inode->i_security field with additional information that wasn't
- * available when the inode was allocated.
- * @sock contains the newly created socket structure.
- * @family contains the requested protocol family.
- * @type contains the requested communications type.
- * @protocol contains the requested protocol.
- * @kern set to 1 if a kernel socket.
- * @socket_bind:
- * Check permission before socket protocol layer bind operation is
- * performed and the socket @sock is bound to the address specified in the
- * @address parameter.
- * @sock contains the socket structure.
- * @address contains the address to bind to.
- * @addrlen contains the length of address.
- * Return 0 if permission is granted.
- * @socket_connect:
- * Check permission before socket protocol layer connect operation
- * attempts to connect socket @sock to a remote address, @address.
- * @sock contains the socket structure.
- * @address contains the address of remote endpoint.
- * @addrlen contains the length of address.
- * Return 0 if permission is granted.
- * @socket_listen:
- * Check permission before socket protocol layer listen operation.
- * @sock contains the socket structure.
- * @backlog contains the maximum length for the pending connection queue.
- * Return 0 if permission is granted.
- * @socket_accept:
- * Check permission before accepting a new connection. Note that the new
- * socket, @newsock, has been created and some information copied to it,
- * but the accept operation has not actually been performed.
- * @sock contains the listening socket structure.
- * @newsock contains the newly created server socket for connection.
- * Return 0 if permission is granted.
- * @socket_sendmsg:
- * Check permission before transmitting a message to another socket.
- * @sock contains the socket structure.
- * @msg contains the message to be transmitted.
- * @size contains the size of message.
- * Return 0 if permission is granted.
- * @socket_recvmsg:
- * Check permission before receiving a message from a socket.
- * @sock contains the socket structure.
- * @msg contains the message structure.
- * @size contains the size of message structure.
- * @flags contains the operational flags.
- * Return 0 if permission is granted.
- * @socket_getsockname:
- * Check permission before the local address (name) of the socket object
- * @sock is retrieved.
- * @sock contains the socket structure.
- * Return 0 if permission is granted.
- * @socket_getpeername:
- * Check permission before the remote address (name) of a socket object
- * @sock is retrieved.
- * @sock contains the socket structure.
- * Return 0 if permission is granted.
- * @socket_getsockopt:
- * Check permissions before retrieving the options associated with socket
- * @sock.
- * @sock contains the socket structure.
- * @level contains the protocol level to retrieve option from.
- * @optname contains the name of option to retrieve.
- * Return 0 if permission is granted.
- * @socket_setsockopt:
- * Check permissions before setting the options associated with socket
- * @sock.
- * @sock contains the socket structure.
- * @level contains the protocol level to set options for.
- * @optname contains the name of the option to set.
- * Return 0 if permission is granted.
- * @socket_shutdown:
- * Checks permission before all or part of a connection on the socket
- * @sock is shut down.
- * @sock contains the socket structure.
- * @how contains the flag indicating how future sends and receives are handled.
- * Return 0 if permission is granted.
- * @socket_sock_rcv_skb:
- * Check permissions on incoming network packets. This hook is distinct
- * from Netfilter's IP input hooks since it is the first time that the
- * incoming sk_buff @skb has been associated with a particular socket, @sk.
- * Must not sleep inside this hook because some callers hold spinlocks.
- * @sk contains the sock (not socket) associated with the incoming sk_buff.
- * @skb contains the incoming network data.
- * @socket_getpeersec_stream:
- * This hook allows the security module to provide peer socket security
- * state for unix or connected tcp sockets to userspace via getsockopt
- * SO_GETPEERSEC. For tcp sockets this can be meaningful if the
- * socket is associated with an ipsec SA.
- * @sock is the local socket.
- * @optval userspace memory where the security state is to be copied.
- * @optlen userspace int where the module should copy the actual length
- * of the security state.
- * @len as input is the maximum length to copy to userspace provided
- * by the caller.
- * Return 0 if all is well, otherwise, typical getsockopt return
- * values.
- * @socket_getpeersec_dgram:
- * This hook allows the security module to provide peer socket security
- * state for udp sockets on a per-packet basis to userspace via
- * getsockopt SO_GETPEERSEC. The application must first have indicated
- * the IP_PASSSEC option via getsockopt. It can then retrieve the
- * security state returned by this hook for a packet via the SCM_SECURITY
- * ancillary message type.
- * @skb is the skbuff for the packet being queried
- * @secdata is a pointer to a buffer in which to copy the security data
- * @seclen is the maximum length for @secdata
- * Return 0 on success, error on failure.
- * @sk_alloc_security:
- * Allocate and attach a security structure to the sk->sk_security field,
- * which is used to copy security attributes between local stream sockets.
- * @sk_free_security:
- * Deallocate security structure.
- * @sk_clone_security:
- * Clone/copy security structure.
- * @sk_getsecid:
- * Retrieve the LSM-specific secid for the sock to enable caching of network
- * authorizations.
- * @sock_graft:
- * Sets the socket's isec sid to the sock's sid.
- * @inet_conn_request:
- * Sets the openreq's sid to socket's sid with MLS portion taken from peer sid.
- * @inet_csk_clone:
- * Sets the new child socket's sid to the openreq sid.
- * @inet_conn_established:
- * Sets the connection's peersid to the secmark on skb.
- * @secmark_relabel_packet:
- * check if the process should be allowed to relabel packets to the given secid
- * @security_secmark_refcount_inc
- * tells the LSM to increment the number of secmark labeling rules loaded
- * @security_secmark_refcount_dec
- * tells the LSM to decrement the number of secmark labeling rules loaded
- * @req_classify_flow:
- * Sets the flow's sid to the openreq sid.
- * @tun_dev_alloc_security:
- * This hook allows a module to allocate a security structure for a TUN
- * device.
- * @security pointer to a security structure pointer.
- * Returns a zero on success, negative values on failure.
- * @tun_dev_free_security:
- * This hook allows a module to free the security structure for a TUN
- * device.
- * @security pointer to the TUN device's security structure
- * @tun_dev_create:
- * Check permissions prior to creating a new TUN device.
- * @tun_dev_attach_queue:
- * Check permissions prior to attaching to a TUN device queue.
- * @security pointer to the TUN device's security structure.
- * @tun_dev_attach:
- * This hook can be used by the module to update any security state
- * associated with the TUN device's sock structure.
- * @sk contains the existing sock structure.
- * @security pointer to the TUN device's security structure.
- * @tun_dev_open:
- * This hook can be used by the module to update any security state
- * associated with the TUN device's security structure.
- * @security pointer to the TUN devices's security structure.
- * @skb_owned_by:
- * This hook sets the packet's owning sock.
- * @skb is the packet.
- * @sk the sock which owns the packet.
- *
- * Security hooks for XFRM operations.
- *
- * @xfrm_policy_alloc_security:
- * @ctxp is a pointer to the xfrm_sec_ctx being added to Security Policy
- * Database used by the XFRM system.
- * @sec_ctx contains the security context information being provided by
- * the user-level policy update program (e.g., setkey).
- * Allocate a security structure to the xp->security field; the security
- * field is initialized to NULL when the xfrm_policy is allocated.
- * Return 0 if operation was successful (memory to allocate, legal context)
- * @gfp is to specify the context for the allocation
- * @xfrm_policy_clone_security:
- * @old_ctx contains an existing xfrm_sec_ctx.
- * @new_ctxp contains a new xfrm_sec_ctx being cloned from old.
- * Allocate a security structure in new_ctxp that contains the
- * information from the old_ctx structure.
- * Return 0 if operation was successful (memory to allocate).
- * @xfrm_policy_free_security:
- * @ctx contains the xfrm_sec_ctx
- * Deallocate xp->security.
- * @xfrm_policy_delete_security:
- * @ctx contains the xfrm_sec_ctx.
- * Authorize deletion of xp->security.
- * @xfrm_state_alloc:
- * @x contains the xfrm_state being added to the Security Association
- * Database by the XFRM system.
- * @sec_ctx contains the security context information being provided by
- * the user-level SA generation program (e.g., setkey or racoon).
- * Allocate a security structure to the x->security field; the security
- * field is initialized to NULL when the xfrm_state is allocated. Set the
- * context to correspond to sec_ctx. Return 0 if operation was successful
- * (memory to allocate, legal context).
- * @xfrm_state_alloc_acquire:
- * @x contains the xfrm_state being added to the Security Association
- * Database by the XFRM system.
- * @polsec contains the policy's security context.
- * @secid contains the secid from which to take the mls portion of the
- * context.
- * Allocate a security structure to the x->security field; the security
- * field is initialized to NULL when the xfrm_state is allocated. Set the
- * context to correspond to secid. Return 0 if operation was successful
- * (memory to allocate, legal context).
- * @xfrm_state_free_security:
- * @x contains the xfrm_state.
- * Deallocate x->security.
- * @xfrm_state_delete_security:
- * @x contains the xfrm_state.
- * Authorize deletion of x->security.
- * @xfrm_policy_lookup:
- * @ctx contains the xfrm_sec_ctx for which the access control is being
- * checked.
- * @fl_secid contains the flow security label that is used to authorize
- * access to the policy xp.
- * @dir contains the direction of the flow (input or output).
- * Check permission when a flow selects a xfrm_policy for processing
- * XFRMs on a packet. The hook is called when selecting either a
- * per-socket policy or a generic xfrm policy.
- * Return 0 if permission is granted, -ESRCH otherwise, or -errno
- * on other errors.
- * @xfrm_state_pol_flow_match:
- * @x contains the state to match.
- * @xp contains the policy to check for a match.
- * @fl contains the flow to check for a match.
- * Return 1 if there is a match.
- * @xfrm_decode_session:
- * @skb points to skb to decode.
- * @secid points to the flow key secid to set.
- * @ckall says if all xfrms used should be checked for same secid.
- * Return 0 if ckall is zero or all xfrms used have the same secid.
- *
- * Security hooks affecting all Key Management operations
- *
- * @key_alloc:
- * Permit allocation of a key and assign security data. Note that key does
- * not have a serial number assigned at this point.
- * @key points to the key.
- * @flags is the allocation flags
- * Return 0 if permission is granted, -ve error otherwise.
- * @key_free:
- * Notification of destruction; free security data.
- * @key points to the key.
- * No return value.
- * @key_permission:
- * See whether a specific operational right is granted to a process on a
- * key.
- * @key_ref refers to the key (key pointer + possession attribute bit).
- * @cred points to the credentials to provide the context against which to
- * evaluate the security data on the key.
- * @perm describes the combination of permissions required of this key.
- * Return 0 if permission is granted, -ve error otherwise.
- * @key_getsecurity:
- * Get a textual representation of the security context attached to a key
- * for the purposes of honouring KEYCTL_GETSECURITY. This function
- * allocates the storage for the NUL-terminated string and the caller
- * should free it.
- * @key points to the key to be queried.
- * @_buffer points to a pointer that should be set to point to the
- * resulting string (if no label or an error occurs).
- * Return the length of the string (including terminating NUL) or -ve if
- * an error.
- * May also return 0 (and a NULL buffer pointer) if there is no label.
- *
- * Security hooks affecting all System V IPC operations.
- *
- * @ipc_permission:
- * Check permissions for access to IPC
- * @ipcp contains the kernel IPC permission structure
- * @flag contains the desired (requested) permission set
- * Return 0 if permission is granted.
- * @ipc_getsecid:
- * Get the secid associated with the ipc object.
- * @ipcp contains the kernel IPC permission structure.
- * @secid contains a pointer to the location where result will be saved.
- * In case of failure, @secid will be set to zero.
- *
- * Security hooks for individual messages held in System V IPC message queues
- * @msg_msg_alloc_security:
- * Allocate and attach a security structure to the msg->security field.
- * The security field is initialized to NULL when the structure is first
- * created.
- * @msg contains the message structure to be modified.
- * Return 0 if operation was successful and permission is granted.
- * @msg_msg_free_security:
- * Deallocate the security structure for this message.
- * @msg contains the message structure to be modified.
- *
- * Security hooks for System V IPC Message Queues
- *
- * @msg_queue_alloc_security:
- * Allocate and attach a security structure to the
- * msq->q_perm.security field. The security field is initialized to
- * NULL when the structure is first created.
- * @msq contains the message queue structure to be modified.
- * Return 0 if operation was successful and permission is granted.
- * @msg_queue_free_security:
- * Deallocate security structure for this message queue.
- * @msq contains the message queue structure to be modified.
- * @msg_queue_associate:
- * Check permission when a message queue is requested through the
- * msgget system call. This hook is only called when returning the
- * message queue identifier for an existing message queue, not when a
- * new message queue is created.
- * @msq contains the message queue to act upon.
- * @msqflg contains the operation control flags.
- * Return 0 if permission is granted.
- * @msg_queue_msgctl:
- * Check permission when a message control operation specified by @cmd
- * is to be performed on the message queue @msq.
- * The @msq may be NULL, e.g. for IPC_INFO or MSG_INFO.
- * @msq contains the message queue to act upon. May be NULL.
- * @cmd contains the operation to be performed.
- * Return 0 if permission is granted.
- * @msg_queue_msgsnd:
- * Check permission before a message, @msg, is enqueued on the message
- * queue, @msq.
- * @msq contains the message queue to send message to.
- * @msg contains the message to be enqueued.
- * @msqflg contains operational flags.
- * Return 0 if permission is granted.
- * @msg_queue_msgrcv:
- * Check permission before a message, @msg, is removed from the message
- * queue, @msq. The @target task structure contains a pointer to the
- * process that will be receiving the message (not equal to the current
- * process when inline receives are being performed).
- * @msq contains the message queue to retrieve message from.
- * @msg contains the message destination.
- * @target contains the task structure for recipient process.
- * @type contains the type of message requested.
- * @mode contains the operational flags.
- * Return 0 if permission is granted.
- *
- * Security hooks for System V Shared Memory Segments
- *
- * @shm_alloc_security:
- * Allocate and attach a security structure to the shp->shm_perm.security
- * field. The security field is initialized to NULL when the structure is
- * first created.
- * @shp contains the shared memory structure to be modified.
- * Return 0 if operation was successful and permission is granted.
- * @shm_free_security:
- * Deallocate the security struct for this memory segment.
- * @shp contains the shared memory structure to be modified.
- * @shm_associate:
- * Check permission when a shared memory region is requested through the
- * shmget system call. This hook is only called when returning the shared
- * memory region identifier for an existing region, not when a new shared
- * memory region is created.
- * @shp contains the shared memory structure to be modified.
- * @shmflg contains the operation control flags.
- * Return 0 if permission is granted.
- * @shm_shmctl:
- * Check permission when a shared memory control operation specified by
- * @cmd is to be performed on the shared memory region @shp.
- * The @shp may be NULL, e.g. for IPC_INFO or SHM_INFO.
- * @shp contains shared memory structure to be modified.
- * @cmd contains the operation to be performed.
- * Return 0 if permission is granted.
- * @shm_shmat:
- * Check permissions prior to allowing the shmat system call to attach the
- * shared memory segment @shp to the data segment of the calling process.
- * The attaching address is specified by @shmaddr.
- * @shp contains the shared memory structure to be modified.
- * @shmaddr contains the address to attach memory region to.
- * @shmflg contains the operational flags.
- * Return 0 if permission is granted.
- *
- * Security hooks for System V Semaphores
- *
- * @sem_alloc_security:
- * Allocate and attach a security structure to the sma->sem_perm.security
- * field. The security field is initialized to NULL when the structure is
- * first created.
- * @sma contains the semaphore structure
- * Return 0 if operation was successful and permission is granted.
- * @sem_free_security:
- * deallocate security struct for this semaphore
- * @sma contains the semaphore structure.
- * @sem_associate:
- * Check permission when a semaphore is requested through the semget
- * system call. This hook is only called when returning the semaphore
- * identifier for an existing semaphore, not when a new one must be
- * created.
- * @sma contains the semaphore structure.
- * @semflg contains the operation control flags.
- * Return 0 if permission is granted.
- * @sem_semctl:
- * Check permission when a semaphore operation specified by @cmd is to be
- * performed on the semaphore @sma. The @sma may be NULL, e.g. for
- * IPC_INFO or SEM_INFO.
- * @sma contains the semaphore structure. May be NULL.
- * @cmd contains the operation to be performed.
- * Return 0 if permission is granted.
- * @sem_semop
- * Check permissions before performing operations on members of the
- * semaphore set @sma. If the @alter flag is nonzero, the semaphore set
- * may be modified.
- * @sma contains the semaphore structure.
- * @sops contains the operations to perform.
- * @nsops contains the number of operations to perform.
- * @alter contains the flag indicating whether changes are to be made.
- * Return 0 if permission is granted.
- *
- * @ptrace_access_check:
- * Check permission before allowing the current process to trace the
- * @child process.
- * Security modules may also want to perform a process tracing check
- * during an execve in the set_security or apply_creds hooks of
- * tracing check during an execve in the bprm_set_creds hook of
- * binprm_security_ops if the process is being traced and its security
- * attributes would be changed by the execve.
- * @child contains the task_struct structure for the target process.
- * @mode contains the PTRACE_MODE flags indicating the form of access.
- * Return 0 if permission is granted.
- * @ptrace_traceme:
- * Check that the @parent process has sufficient permission to trace the
- * current process before allowing the current process to present itself
- * to the @parent process for tracing.
- * @parent contains the task_struct structure for debugger process.
- * Return 0 if permission is granted.
- * @capget:
- * Get the @effective, @inheritable, and @permitted capability sets for
- * the @target process. The hook may also perform permission checking to
- * determine if the current process is allowed to see the capability sets
- * of the @target process.
- * @target contains the task_struct structure for target process.
- * @effective contains the effective capability set.
- * @inheritable contains the inheritable capability set.
- * @permitted contains the permitted capability set.
- * Return 0 if the capability sets were successfully obtained.
- * @capset:
- * Set the @effective, @inheritable, and @permitted capability sets for
- * the current process.
- * @new contains the new credentials structure for target process.
- * @old contains the current credentials structure for target process.
- * @effective contains the effective capability set.
- * @inheritable contains the inheritable capability set.
- * @permitted contains the permitted capability set.
- * Return 0 and update @new if permission is granted.
- * @capable:
- * Check whether the @tsk process has the @cap capability in the indicated
- * credentials.
- * @cred contains the credentials to use.
- * @ns contains the user namespace we want the capability in
- * @cap contains the capability <include/linux/capability.h>.
- * @audit: Whether to write an audit message or not
- * Return 0 if the capability is granted for @tsk.
- * @syslog:
- * Check permission before accessing the kernel message ring or changing
- * logging to the console.
- * See the syslog(2) manual page for an explanation of the @type values.
- * @type contains the type of action.
- * @from_file indicates the context of action (if it came from /proc).
- * Return 0 if permission is granted.
- * @settime:
- * Check permission to change the system time.
- * struct timespec and timezone are defined in include/linux/time.h
- * @ts contains new time
- * @tz contains new timezone
- * Return 0 if permission is granted.
- * @vm_enough_memory:
- * Check permissions for allocating a new virtual mapping.
- * @mm contains the mm struct it is being added to.
- * @pages contains the number of pages.
- * Return 0 if permission is granted.
- *
- * @ismaclabel:
- * Check if the extended attribute specified by @name
- * represents a MAC label. Returns 1 if name is a MAC
- * attribute otherwise returns 0.
- * @name full extended attribute name to check against
- * LSM as a MAC label.
- *
- * @secid_to_secctx:
- * Convert secid to security context. If secdata is NULL the length of
- * the result will be returned in seclen, but no secdata will be returned.
- * This does mean that the length could change between calls to check the
- * length and the next call which actually allocates and returns the secdata.
- * @secid contains the security ID.
- * @secdata contains the pointer that stores the converted security context.
- * @seclen pointer which contains the length of the data
- * @secctx_to_secid:
- * Convert security context to secid.
- * @secid contains the pointer to the generated security ID.
- * @secdata contains the security context.
- *
- * @release_secctx:
- * Release the security context.
- * @secdata contains the security context.
- * @seclen contains the length of the security context.
- *
- * Security hooks for Audit
- *
- * @audit_rule_init:
- * Allocate and initialize an LSM audit rule structure.
- * @field contains the required Audit action. Fields flags are defined in include/linux/audit.h
- * @op contains the operator the rule uses.
- * @rulestr contains the context where the rule will be applied to.
- * @lsmrule contains a pointer to receive the result.
- * Return 0 if @lsmrule has been successfully set,
- * -EINVAL in case of an invalid rule.
- *
- * @audit_rule_known:
- * Specifies whether given @rule contains any fields related to current LSM.
- * @rule contains the audit rule of interest.
- * Return 1 in case of relation found, 0 otherwise.
- *
- * @audit_rule_match:
- * Determine if given @secid matches a rule previously approved
- * by @audit_rule_known.
- * @secid contains the security id in question.
- * @field contains the field which relates to current LSM.
- * @op contains the operator that will be used for matching.
- * @rule points to the audit rule that will be checked against.
- * @actx points to the audit context associated with the check.
- * Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure.
- *
- * @audit_rule_free:
- * Deallocate the LSM audit rule structure previously allocated by
- * audit_rule_init.
- * @rule contains the allocated rule
- *
- * @inode_notifysecctx:
- * Notify the security module of what the security context of an inode
- * should be. Initializes the incore security context managed by the
- * security module for this inode. Example usage: NFS client invokes
- * this hook to initialize the security context in its incore inode to the
- * value provided by the server for the file when the server returned the
- * file's attributes to the client.
- *
- * Must be called with inode->i_mutex locked.
- *
- * @inode we wish to set the security context of.
- * @ctx contains the string which we wish to set in the inode.
- * @ctxlen contains the length of @ctx.
- *
- * @inode_setsecctx:
- * Change the security context of an inode. Updates the
- * incore security context managed by the security module and invokes the
- * fs code as needed (via __vfs_setxattr_noperm) to update any backing
- * xattrs that represent the context. Example usage: NFS server invokes
- * this hook to change the security context in its incore inode and on the
- * backing filesystem to a value provided by the client on a SETATTR
- * operation.
- *
- * Must be called with inode->i_mutex locked.
- *
- * @dentry contains the inode we wish to set the security context of.
- * @ctx contains the string which we wish to set in the inode.
- * @ctxlen contains the length of @ctx.
- *
- * @inode_getsecctx:
- * On success, returns 0 and fills out @ctx and @ctxlen with the security
- * context for the given @inode.
- *
- * @inode we wish to get the security context of.
- * @ctx is a pointer in which to place the allocated security context.
- * @ctxlen points to the place to put the length of @ctx.
- * This is the main security structure.
- */
-struct security_operations {
- char name[SECURITY_NAME_MAX + 1];
-
- int (*ptrace_access_check) (struct task_struct *child, unsigned int mode);
- int (*ptrace_traceme) (struct task_struct *parent);
- int (*capget) (struct task_struct *target,
- kernel_cap_t *effective,
- kernel_cap_t *inheritable, kernel_cap_t *permitted);
- int (*capset) (struct cred *new,
- const struct cred *old,
- const kernel_cap_t *effective,
- const kernel_cap_t *inheritable,
- const kernel_cap_t *permitted);
- int (*capable) (const struct cred *cred, struct user_namespace *ns,
- int cap, int audit);
- int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
- int (*quota_on) (struct dentry *dentry);
- int (*syslog) (int type);
- int (*settime) (const struct timespec *ts, const struct timezone *tz);
- int (*vm_enough_memory) (struct mm_struct *mm, long pages);
-
- int (*bprm_set_creds) (struct linux_binprm *bprm);
- int (*bprm_check_security) (struct linux_binprm *bprm);
- int (*bprm_secureexec) (struct linux_binprm *bprm);
- void (*bprm_committing_creds) (struct linux_binprm *bprm);
- void (*bprm_committed_creds) (struct linux_binprm *bprm);
-
- int (*sb_alloc_security) (struct super_block *sb);
- void (*sb_free_security) (struct super_block *sb);
- int (*sb_copy_data) (char *orig, char *copy);
- int (*sb_remount) (struct super_block *sb, void *data);
- int (*sb_kern_mount) (struct super_block *sb, int flags, void *data);
- int (*sb_show_options) (struct seq_file *m, struct super_block *sb);
- int (*sb_statfs) (struct dentry *dentry);
- int (*sb_mount) (const char *dev_name, struct path *path,
- const char *type, unsigned long flags, void *data);
- int (*sb_umount) (struct vfsmount *mnt, int flags);
- int (*sb_pivotroot) (struct path *old_path,
- struct path *new_path);
- int (*sb_set_mnt_opts) (struct super_block *sb,
- struct security_mnt_opts *opts,
- unsigned long kern_flags,
- unsigned long *set_kern_flags);
- int (*sb_clone_mnt_opts) (const struct super_block *oldsb,
- struct super_block *newsb);
- int (*sb_parse_opts_str) (char *options, struct security_mnt_opts *opts);
- int (*dentry_init_security) (struct dentry *dentry, int mode,
- struct qstr *name, void **ctx,
- u32 *ctxlen);
-
-
-#ifdef CONFIG_SECURITY_PATH
- int (*path_unlink) (struct path *dir, struct dentry *dentry);
- int (*path_mkdir) (struct path *dir, struct dentry *dentry, umode_t mode);
- int (*path_rmdir) (struct path *dir, struct dentry *dentry);
- int (*path_mknod) (struct path *dir, struct dentry *dentry, umode_t mode,
- unsigned int dev);
- int (*path_truncate) (struct path *path);
- int (*path_symlink) (struct path *dir, struct dentry *dentry,
- const char *old_name);
- int (*path_link) (struct dentry *old_dentry, struct path *new_dir,
- struct dentry *new_dentry);
- int (*path_rename) (struct path *old_dir, struct dentry *old_dentry,
- struct path *new_dir, struct dentry *new_dentry);
- int (*path_chmod) (struct path *path, umode_t mode);
- int (*path_chown) (struct path *path, kuid_t uid, kgid_t gid);
- int (*path_chroot) (struct path *path);
-#endif
-
- int (*inode_alloc_security) (struct inode *inode);
- void (*inode_free_security) (struct inode *inode);
- int (*inode_init_security) (struct inode *inode, struct inode *dir,
- const struct qstr *qstr, const char **name,
- void **value, size_t *len);
- int (*inode_create) (struct inode *dir,
- struct dentry *dentry, umode_t mode);
- int (*inode_link) (struct dentry *old_dentry,
- struct inode *dir, struct dentry *new_dentry);
- int (*inode_unlink) (struct inode *dir, struct dentry *dentry);
- int (*inode_symlink) (struct inode *dir,
- struct dentry *dentry, const char *old_name);
- int (*inode_mkdir) (struct inode *dir, struct dentry *dentry, umode_t mode);
- int (*inode_rmdir) (struct inode *dir, struct dentry *dentry);
- int (*inode_mknod) (struct inode *dir, struct dentry *dentry,
- umode_t mode, dev_t dev);
- int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry);
- int (*inode_readlink) (struct dentry *dentry);
- int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd);
- int (*inode_permission) (struct inode *inode, int mask);
- int (*inode_setattr) (struct dentry *dentry, struct iattr *attr);
- int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry);
- int (*inode_setxattr) (struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags);
- void (*inode_post_setxattr) (struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags);
- int (*inode_getxattr) (struct dentry *dentry, const char *name);
- int (*inode_listxattr) (struct dentry *dentry);
- int (*inode_removexattr) (struct dentry *dentry, const char *name);
- int (*inode_need_killpriv) (struct dentry *dentry);
- int (*inode_killpriv) (struct dentry *dentry);
- int (*inode_getsecurity) (const struct inode *inode, const char *name, void **buffer, bool alloc);
- int (*inode_setsecurity) (struct inode *inode, const char *name, const void *value, size_t size, int flags);
- int (*inode_listsecurity) (struct inode *inode, char *buffer, size_t buffer_size);
- void (*inode_getsecid) (const struct inode *inode, u32 *secid);
-
- int (*file_permission) (struct file *file, int mask);
- int (*file_alloc_security) (struct file *file);
- void (*file_free_security) (struct file *file);
- int (*file_ioctl) (struct file *file, unsigned int cmd,
- unsigned long arg);
- int (*mmap_addr) (unsigned long addr);
- int (*mmap_file) (struct file *file,
- unsigned long reqprot, unsigned long prot,
- unsigned long flags);
- int (*file_mprotect) (struct vm_area_struct *vma,
- unsigned long reqprot,
- unsigned long prot);
- int (*file_lock) (struct file *file, unsigned int cmd);
- int (*file_fcntl) (struct file *file, unsigned int cmd,
- unsigned long arg);
- void (*file_set_fowner) (struct file *file);
- int (*file_send_sigiotask) (struct task_struct *tsk,
- struct fown_struct *fown, int sig);
- int (*file_receive) (struct file *file);
- int (*file_open) (struct file *file, const struct cred *cred);
-
- int (*task_create) (unsigned long clone_flags);
- void (*task_free) (struct task_struct *task);
- int (*cred_alloc_blank) (struct cred *cred, gfp_t gfp);
- void (*cred_free) (struct cred *cred);
- int (*cred_prepare)(struct cred *new, const struct cred *old,
- gfp_t gfp);
- void (*cred_transfer)(struct cred *new, const struct cred *old);
- int (*kernel_act_as)(struct cred *new, u32 secid);
- int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
- int (*kernel_fw_from_file)(struct file *file, char *buf, size_t size);
- int (*kernel_module_request)(char *kmod_name);
- int (*kernel_module_from_file)(struct file *file);
- int (*task_fix_setuid) (struct cred *new, const struct cred *old,
- int flags);
- int (*task_setpgid) (struct task_struct *p, pid_t pgid);
- int (*task_getpgid) (struct task_struct *p);
- int (*task_getsid) (struct task_struct *p);
- void (*task_getsecid) (struct task_struct *p, u32 *secid);
- int (*task_setnice) (struct task_struct *p, int nice);
- int (*task_setioprio) (struct task_struct *p, int ioprio);
- int (*task_getioprio) (struct task_struct *p);
- int (*task_setrlimit) (struct task_struct *p, unsigned int resource,
- struct rlimit *new_rlim);
- int (*task_setscheduler) (struct task_struct *p);
- int (*task_getscheduler) (struct task_struct *p);
- int (*task_movememory) (struct task_struct *p);
- int (*task_kill) (struct task_struct *p,
- struct siginfo *info, int sig, u32 secid);
- int (*task_wait) (struct task_struct *p);
- int (*task_prctl) (int option, unsigned long arg2,
- unsigned long arg3, unsigned long arg4,
- unsigned long arg5);
- void (*task_to_inode) (struct task_struct *p, struct inode *inode);
-
- int (*ipc_permission) (struct kern_ipc_perm *ipcp, short flag);
- void (*ipc_getsecid) (struct kern_ipc_perm *ipcp, u32 *secid);
-
- int (*msg_msg_alloc_security) (struct msg_msg *msg);
- void (*msg_msg_free_security) (struct msg_msg *msg);
-
- int (*msg_queue_alloc_security) (struct msg_queue *msq);
- void (*msg_queue_free_security) (struct msg_queue *msq);
- int (*msg_queue_associate) (struct msg_queue *msq, int msqflg);
- int (*msg_queue_msgctl) (struct msg_queue *msq, int cmd);
- int (*msg_queue_msgsnd) (struct msg_queue *msq,
- struct msg_msg *msg, int msqflg);
- int (*msg_queue_msgrcv) (struct msg_queue *msq,
- struct msg_msg *msg,
- struct task_struct *target,
- long type, int mode);
-
- int (*shm_alloc_security) (struct shmid_kernel *shp);
- void (*shm_free_security) (struct shmid_kernel *shp);
- int (*shm_associate) (struct shmid_kernel *shp, int shmflg);
- int (*shm_shmctl) (struct shmid_kernel *shp, int cmd);
- int (*shm_shmat) (struct shmid_kernel *shp,
- char __user *shmaddr, int shmflg);
-
- int (*sem_alloc_security) (struct sem_array *sma);
- void (*sem_free_security) (struct sem_array *sma);
- int (*sem_associate) (struct sem_array *sma, int semflg);
- int (*sem_semctl) (struct sem_array *sma, int cmd);
- int (*sem_semop) (struct sem_array *sma,
- struct sembuf *sops, unsigned nsops, int alter);
-
- int (*netlink_send) (struct sock *sk, struct sk_buff *skb);
-
- void (*d_instantiate) (struct dentry *dentry, struct inode *inode);
-
- int (*getprocattr) (struct task_struct *p, char *name, char **value);
- int (*setprocattr) (struct task_struct *p, char *name, void *value, size_t size);
- int (*ismaclabel) (const char *name);
- int (*secid_to_secctx) (u32 secid, char **secdata, u32 *seclen);
- int (*secctx_to_secid) (const char *secdata, u32 seclen, u32 *secid);
- void (*release_secctx) (char *secdata, u32 seclen);
-
- int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen);
- int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen);
- int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen);
-
-#ifdef CONFIG_SECURITY_NETWORK
- int (*unix_stream_connect) (struct sock *sock, struct sock *other, struct sock *newsk);
- int (*unix_may_send) (struct socket *sock, struct socket *other);
-
- int (*socket_create) (int family, int type, int protocol, int kern);
- int (*socket_post_create) (struct socket *sock, int family,
- int type, int protocol, int kern);
- int (*socket_bind) (struct socket *sock,
- struct sockaddr *address, int addrlen);
- int (*socket_connect) (struct socket *sock,
- struct sockaddr *address, int addrlen);
- int (*socket_listen) (struct socket *sock, int backlog);
- int (*socket_accept) (struct socket *sock, struct socket *newsock);
- int (*socket_sendmsg) (struct socket *sock,
- struct msghdr *msg, int size);
- int (*socket_recvmsg) (struct socket *sock,
- struct msghdr *msg, int size, int flags);
- int (*socket_getsockname) (struct socket *sock);
- int (*socket_getpeername) (struct socket *sock);
- int (*socket_getsockopt) (struct socket *sock, int level, int optname);
- int (*socket_setsockopt) (struct socket *sock, int level, int optname);
- int (*socket_shutdown) (struct socket *sock, int how);
- int (*socket_sock_rcv_skb) (struct sock *sk, struct sk_buff *skb);
- int (*socket_getpeersec_stream) (struct socket *sock, char __user *optval, int __user *optlen, unsigned len);
- int (*socket_getpeersec_dgram) (struct socket *sock, struct sk_buff *skb, u32 *secid);
- int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority);
- void (*sk_free_security) (struct sock *sk);
- void (*sk_clone_security) (const struct sock *sk, struct sock *newsk);
- void (*sk_getsecid) (struct sock *sk, u32 *secid);
- void (*sock_graft) (struct sock *sk, struct socket *parent);
- int (*inet_conn_request) (struct sock *sk, struct sk_buff *skb,
- struct request_sock *req);
- void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req);
- void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb);
- int (*secmark_relabel_packet) (u32 secid);
- void (*secmark_refcount_inc) (void);
- void (*secmark_refcount_dec) (void);
- void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl);
- int (*tun_dev_alloc_security) (void **security);
- void (*tun_dev_free_security) (void *security);
- int (*tun_dev_create) (void);
- int (*tun_dev_attach_queue) (void *security);
- int (*tun_dev_attach) (struct sock *sk, void *security);
- int (*tun_dev_open) (void *security);
- void (*skb_owned_by) (struct sk_buff *skb, struct sock *sk);
-#endif /* CONFIG_SECURITY_NETWORK */
-
-#ifdef CONFIG_SECURITY_NETWORK_XFRM
- int (*xfrm_policy_alloc_security) (struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp);
- int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx);
- void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx);
- int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx);
- int (*xfrm_state_alloc) (struct xfrm_state *x,
- struct xfrm_user_sec_ctx *sec_ctx);
- int (*xfrm_state_alloc_acquire) (struct xfrm_state *x,
- struct xfrm_sec_ctx *polsec,
- u32 secid);
- void (*xfrm_state_free_security) (struct xfrm_state *x);
- int (*xfrm_state_delete_security) (struct xfrm_state *x);
- int (*xfrm_policy_lookup) (struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
- int (*xfrm_state_pol_flow_match) (struct xfrm_state *x,
- struct xfrm_policy *xp,
- const struct flowi *fl);
- int (*xfrm_decode_session) (struct sk_buff *skb, u32 *secid, int ckall);
-#endif /* CONFIG_SECURITY_NETWORK_XFRM */
-
- /* key management security hooks */
-#ifdef CONFIG_KEYS
- int (*key_alloc) (struct key *key, const struct cred *cred, unsigned long flags);
- void (*key_free) (struct key *key);
- int (*key_permission) (key_ref_t key_ref,
- const struct cred *cred,
- unsigned perm);
- int (*key_getsecurity)(struct key *key, char **_buffer);
-#endif /* CONFIG_KEYS */
-
-#ifdef CONFIG_AUDIT
- int (*audit_rule_init) (u32 field, u32 op, char *rulestr, void **lsmrule);
- int (*audit_rule_known) (struct audit_krule *krule);
- int (*audit_rule_match) (u32 secid, u32 field, u32 op, void *lsmrule,
- struct audit_context *actx);
- void (*audit_rule_free) (void *lsmrule);
-#endif /* CONFIG_AUDIT */
-};
-
/* prototypes */
extern int security_init(void);
-extern int security_module_enable(struct security_operations *ops);
-extern int register_security(struct security_operations *ops);
-extern void __init security_fixup_ops(struct security_operations *ops);
-
/* Security operations */
+int security_binder_set_context_mgr(struct task_struct *mgr);
+int security_binder_transaction(struct task_struct *from,
+ struct task_struct *to);
+int security_binder_transfer_binder(struct task_struct *from,
+ struct task_struct *to);
+int security_binder_transfer_file(struct task_struct *from,
+ struct task_struct *to, struct file *file);
int security_ptrace_access_check(struct task_struct *child, unsigned int mode);
int security_ptrace_traceme(struct task_struct *parent);
int security_capget(struct task_struct *target,
@@ -1806,10 +256,11 @@ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags);
int security_inode_readlink(struct dentry *dentry);
-int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd);
+int security_inode_follow_link(struct dentry *dentry, struct inode *inode,
+ bool rcu);
int security_inode_permission(struct inode *inode, int mask);
int security_inode_setattr(struct dentry *dentry, struct iattr *attr);
-int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry);
+int security_inode_getattr(const struct path *path);
int security_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
void security_inode_post_setxattr(struct dentry *dentry, const char *name,
@@ -1927,6 +378,30 @@ static inline int security_init(void)
return 0;
}
+static inline int security_binder_set_context_mgr(struct task_struct *mgr)
+{
+ return 0;
+}
+
+static inline int security_binder_transaction(struct task_struct *from,
+ struct task_struct *to)
+{
+ return 0;
+}
+
+static inline int security_binder_transfer_binder(struct task_struct *from,
+ struct task_struct *to)
+{
+ return 0;
+}
+
+static inline int security_binder_transfer_file(struct task_struct *from,
+ struct task_struct *to,
+ struct file *file)
+{
+ return 0;
+}
+
static inline int security_ptrace_access_check(struct task_struct *child,
unsigned int mode)
{
@@ -1990,7 +465,7 @@ static inline int security_settime(const struct timespec *ts,
static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
{
- return cap_vm_enough_memory(mm, pages);
+ return __vm_enough_memory(mm, pages, cap_vm_enough_memory(mm, pages));
}
static inline int security_bprm_set_creds(struct linux_binprm *bprm)
@@ -2185,7 +660,8 @@ static inline int security_inode_readlink(struct dentry *dentry)
}
static inline int security_inode_follow_link(struct dentry *dentry,
- struct nameidata *nd)
+ struct inode *inode,
+ bool rcu)
{
return 0;
}
@@ -2201,8 +677,7 @@ static inline int security_inode_setattr(struct dentry *dentry,
return 0;
}
-static inline int security_inode_getattr(struct vfsmount *mnt,
- struct dentry *dentry)
+static inline int security_inode_getattr(const struct path *path)
{
return 0;
}
@@ -2594,7 +1069,7 @@ static inline int security_setprocattr(struct task_struct *p, char *name, void *
static inline int security_netlink_send(struct sock *sk, struct sk_buff *skb)
{
- return cap_netlink_send(sk, skb);
+ return 0;
}
static inline int security_ismaclabel(const char *name)
@@ -2677,8 +1152,6 @@ int security_tun_dev_attach_queue(void *security);
int security_tun_dev_attach(struct sock *sk, void *security);
int security_tun_dev_open(void *security);
-void security_skb_owned_by(struct sk_buff *skb, struct sock *sk);
-
#else /* CONFIG_SECURITY_NETWORK */
static inline int security_unix_stream_connect(struct sock *sock,
struct sock *other,
@@ -2870,11 +1343,6 @@ static inline int security_tun_dev_open(void *security)
{
return 0;
}
-
-static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
-{
-}
-
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -3169,36 +1637,5 @@ static inline void free_secdata(void *secdata)
{ }
#endif /* CONFIG_SECURITY */
-#ifdef CONFIG_SECURITY_YAMA
-extern int yama_ptrace_access_check(struct task_struct *child,
- unsigned int mode);
-extern int yama_ptrace_traceme(struct task_struct *parent);
-extern void yama_task_free(struct task_struct *task);
-extern int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
- unsigned long arg4, unsigned long arg5);
-#else
-static inline int yama_ptrace_access_check(struct task_struct *child,
- unsigned int mode)
-{
- return 0;
-}
-
-static inline int yama_ptrace_traceme(struct task_struct *parent)
-{
- return 0;
-}
-
-static inline void yama_task_free(struct task_struct *task)
-{
-}
-
-static inline int yama_task_prctl(int option, unsigned long arg2,
- unsigned long arg3, unsigned long arg4,
- unsigned long arg5)
-{
- return -ENOSYS;
-}
-#endif /* CONFIG_SECURITY_YAMA */
-
#endif /* ! __LINUX_SECURITY_H */
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
index 9aafe0e..fb7eb9c 100644
--- a/include/linux/seq_buf.h
+++ b/include/linux/seq_buf.h
@@ -125,9 +125,6 @@ extern int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
unsigned int len);
extern int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc);
-extern int seq_buf_bitmask(struct seq_buf *s, const unsigned long *maskp,
- int nmaskbits);
-
#ifdef CONFIG_BINARY_PRINTF
extern int
seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary);
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index cf6a9da..912a7c4 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -123,34 +123,10 @@ __printf(2, 3) int seq_printf(struct seq_file *, const char *, ...);
__printf(2, 0) int seq_vprintf(struct seq_file *, const char *, va_list args);
int seq_path(struct seq_file *, const struct path *, const char *);
+int seq_file_path(struct seq_file *, struct file *, const char *);
int seq_dentry(struct seq_file *, struct dentry *, const char *);
int seq_path_root(struct seq_file *m, const struct path *path,
const struct path *root, const char *esc);
-int seq_bitmap(struct seq_file *m, const unsigned long *bits,
- unsigned int nr_bits);
-static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask)
-{
- return seq_bitmap(m, cpumask_bits(mask), nr_cpu_ids);
-}
-
-static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask)
-{
- return seq_bitmap(m, mask->bits, MAX_NUMNODES);
-}
-
-int seq_bitmap_list(struct seq_file *m, const unsigned long *bits,
- unsigned int nr_bits);
-
-static inline int seq_cpumask_list(struct seq_file *m,
- const struct cpumask *mask)
-{
- return seq_bitmap_list(m, cpumask_bits(mask), nr_cpu_ids);
-}
-
-static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
-{
- return seq_bitmap_list(m, mask->bits, MAX_NUMNODES);
-}
int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index f5df8f6..e058210 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -35,6 +35,7 @@
#include <linux/spinlock.h>
#include <linux/preempt.h>
#include <linux/lockdep.h>
+#include <linux/compiler.h>
#include <asm/processor.h>
/*
@@ -108,7 +109,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
unsigned ret;
repeat:
- ret = ACCESS_ONCE(s->sequence);
+ ret = READ_ONCE(s->sequence);
if (unlikely(ret & 1)) {
cpu_relax();
goto repeat;
@@ -127,7 +128,7 @@ repeat:
*/
static inline unsigned raw_read_seqcount(const seqcount_t *s)
{
- unsigned ret = ACCESS_ONCE(s->sequence);
+ unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
return ret;
}
@@ -179,7 +180,7 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
*/
static inline unsigned raw_seqcount_begin(const seqcount_t *s)
{
- unsigned ret = ACCESS_ONCE(s->sequence);
+ unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
return ret & ~1;
}
@@ -233,9 +234,128 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
s->sequence++;
}
-/*
+/**
+ * raw_write_seqcount_barrier - do a seq write barrier
+ * @s: pointer to seqcount_t
+ *
+ * This can be used to provide an ordering guarantee instead of the
+ * usual consistency guarantee. It is one wmb cheaper, because we can
+ * collapse the two back-to-back wmb()s.
+ *
+ * seqcount_t seq;
+ * bool X = true, Y = false;
+ *
+ * void read(void)
+ * {
+ * bool x, y;
+ *
+ * do {
+ * int s = read_seqcount_begin(&seq);
+ *
+ * x = X; y = Y;
+ *
+ * } while (read_seqcount_retry(&seq, s));
+ *
+ * BUG_ON(!x && !y);
+ * }
+ *
+ * void write(void)
+ * {
+ * Y = true;
+ *
+ * raw_write_seqcount_barrier(seq);
+ *
+ * X = false;
+ * }
+ */
+static inline void raw_write_seqcount_barrier(seqcount_t *s)
+{
+ s->sequence++;
+ smp_wmb();
+ s->sequence++;
+}
+
+static inline int raw_read_seqcount_latch(seqcount_t *s)
+{
+ return lockless_dereference(s->sequence);
+}
+
+/**
* raw_write_seqcount_latch - redirect readers to even/odd copy
* @s: pointer to seqcount_t
+ *
+ * The latch technique is a multiversion concurrency control method that allows
+ * queries during non-atomic modifications. If you can guarantee queries never
+ * interrupt the modification -- e.g. the concurrency is strictly between CPUs
+ * -- you most likely do not need this.
+ *
+ * Where the traditional RCU/lockless data structures rely on atomic
+ * modifications to ensure queries observe either the old or the new state the
+ * latch allows the same for non-atomic updates. The trade-off is doubling the
+ * cost of storage; we have to maintain two copies of the entire data
+ * structure.
+ *
+ * Very simply put: we first modify one copy and then the other. This ensures
+ * there is always one copy in a stable state, ready to give us an answer.
+ *
+ * The basic form is a data structure like:
+ *
+ * struct latch_struct {
+ * seqcount_t seq;
+ * struct data_struct data[2];
+ * };
+ *
+ * Where a modification, which is assumed to be externally serialized, does the
+ * following:
+ *
+ * void latch_modify(struct latch_struct *latch, ...)
+ * {
+ * smp_wmb(); <- Ensure that the last data[1] update is visible
+ * latch->seq++;
+ * smp_wmb(); <- Ensure that the seqcount update is visible
+ *
+ * modify(latch->data[0], ...);
+ *
+ * smp_wmb(); <- Ensure that the data[0] update is visible
+ * latch->seq++;
+ * smp_wmb(); <- Ensure that the seqcount update is visible
+ *
+ * modify(latch->data[1], ...);
+ * }
+ *
+ * The query will have a form like:
+ *
+ * struct entry *latch_query(struct latch_struct *latch, ...)
+ * {
+ * struct entry *entry;
+ * unsigned seq, idx;
+ *
+ * do {
+ * seq = lockless_dereference(latch->seq);
+ *
+ * idx = seq & 0x01;
+ * entry = data_query(latch->data[idx], ...);
+ *
+ * smp_rmb();
+ * } while (seq != latch->seq);
+ *
+ * return entry;
+ * }
+ *
+ * So during the modification, queries are first redirected to data[1]. Then we
+ * modify data[0]. When that is complete, we redirect queries back to data[0]
+ * and we can modify data[1].
+ *
+ * NOTE: The non-requirement for atomic modifications does _NOT_ include
+ * the publishing of new entries in the case where data is a dynamic
+ * data structure.
+ *
+ * An iteration might start in data[0] and get suspended long enough
+ * to miss an entire modification sequence, once it resumes it might
+ * observe the new entry.
+ *
+ * NOTE: When data is a dynamic data structure; one should use regular RCU
+ * patterns to manage the lifetimes of the objects within.
*/
static inline void raw_write_seqcount_latch(seqcount_t *s)
{
@@ -266,13 +386,13 @@ static inline void write_seqcount_end(seqcount_t *s)
}
/**
- * write_seqcount_barrier - invalidate in-progress read-side seq operations
+ * write_seqcount_invalidate - invalidate in-progress read-side seq operations
* @s: pointer to seqcount_t
*
- * After write_seqcount_barrier, no read-side seq operations will complete
+ * After write_seqcount_invalidate, no read-side seq operations will complete
* successfully and see data older than this.
*/
-static inline void write_seqcount_barrier(seqcount_t *s)
+static inline void write_seqcount_invalidate(seqcount_t *s)
{
smp_wmb();
s->sequence+=2;
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index e02acf0..ba82c07 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -12,6 +12,7 @@
#define _LINUX_SERIAL_8250_H
#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
#include <linux/platform_device.h>
/*
@@ -60,6 +61,20 @@ enum {
};
struct uart_8250_dma;
+struct uart_8250_port;
+
+/**
+ * 8250 core driver operations
+ *
+ * @setup_irq() Setup irq handling. The universal 8250 driver links this
+ * port to the irq chain. Other drivers may @request_irq().
+ * @release_irq() Undo irq handling. The universal 8250 driver unlinks
+ * the port from the irq chain.
+ */
+struct uart_8250_ops {
+ int (*setup_irq)(struct uart_8250_port *);
+ void (*release_irq)(struct uart_8250_port *);
+};
/*
* This should be used by drivers which want to register
@@ -85,6 +100,11 @@ struct uart_8250_port {
unsigned char mcr_force; /* mask of forced bits */
unsigned char cur_iotype; /* Running I/O type */
unsigned int rpm_tx_active;
+ unsigned char canary; /* non-zero during system sleep
+ * if no_console_suspend
+ */
+ unsigned char probe;
+#define UART_PROBE_RSA (1 << 0)
/*
* Some bits in registers are cleared on a read, so they must
@@ -97,6 +117,7 @@ struct uart_8250_port {
unsigned char msr_saved_flags;
struct uart_8250_dma *dma;
+ const struct uart_8250_ops *ops;
/* 8250 specific callbacks */
int (*dl_read)(struct uart_8250_port *);
@@ -115,17 +136,17 @@ void serial8250_resume_port(int line);
extern int early_serial_setup(struct uart_port *port);
-extern int serial8250_find_port(struct uart_port *p);
-extern int serial8250_find_port_for_earlycon(void);
extern unsigned int serial8250_early_in(struct uart_port *port, int offset);
extern void serial8250_early_out(struct uart_port *port, int offset, int value);
-extern int setup_early_serial8250_console(char *cmdline);
+extern int early_serial8250_setup(struct earlycon_device *device,
+ const char *options);
extern void serial8250_do_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old);
extern int serial8250_do_startup(struct uart_port *port);
extern void serial8250_do_shutdown(struct uart_port *port);
extern void serial8250_do_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate);
+extern void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl);
extern int fsl8250_handle_irq(struct uart_port *port);
int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 057038c..297d4fa 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -35,7 +35,7 @@
#define uart_console(port) \
((port)->cons && (port)->cons->index == (port)->line)
#else
-#define uart_console(port) (0)
+#define uart_console(port) ({ (void)port; 0; })
#endif
struct uart_port;
@@ -123,6 +123,7 @@ struct uart_port {
void (*set_termios)(struct uart_port *,
struct ktermios *new,
struct ktermios *old);
+ void (*set_mctrl)(struct uart_port *, unsigned int);
int (*startup)(struct uart_port *port);
void (*shutdown)(struct uart_port *port);
void (*throttle)(struct uart_port *port);
@@ -142,13 +143,13 @@ struct uart_port {
unsigned char iotype; /* io access style */
unsigned char unused1;
-#define UPIO_PORT (0) /* 8b I/O port access */
-#define UPIO_HUB6 (1) /* Hub6 ISA card */
-#define UPIO_MEM (2) /* 8b MMIO access */
-#define UPIO_MEM32 (3) /* 32b little endian */
-#define UPIO_MEM32BE (4) /* 32b big endian */
-#define UPIO_AU (5) /* Au1x00 and RT288x type IO */
-#define UPIO_TSI (6) /* Tsi108/109 type IO */
+#define UPIO_PORT (SERIAL_IO_PORT) /* 8b I/O port access */
+#define UPIO_HUB6 (SERIAL_IO_HUB6) /* Hub6 ISA card */
+#define UPIO_MEM (SERIAL_IO_MEM) /* 8b MMIO access */
+#define UPIO_MEM32 (SERIAL_IO_MEM32) /* 32b little endian */
+#define UPIO_AU (SERIAL_IO_AU) /* Au1x00 and RT288x type IO */
+#define UPIO_TSI (SERIAL_IO_TSI) /* Tsi108/109 type IO */
+#define UPIO_MEM32BE (SERIAL_IO_MEM32BE) /* 32b big endian */
unsigned int read_status_mask; /* driver specific */
unsigned int ignore_status_mask; /* driver specific */
@@ -190,8 +191,10 @@ struct uart_port {
#define UPF_NO_TXEN_TEST ((__force upf_t) (1 << 15))
#define UPF_MAGIC_MULTIPLIER ((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ )
-/* Port has hardware-assisted h/w flow control (iow, auto-RTS *not* auto-CTS) */
-#define UPF_HARD_FLOW ((__force upf_t) (1 << 21))
+/* Port has hardware-assisted h/w flow control */
+#define UPF_AUTO_CTS ((__force upf_t) (1 << 20))
+#define UPF_AUTO_RTS ((__force upf_t) (1 << 21))
+#define UPF_HARD_FLOW ((__force upf_t) (UPF_AUTO_CTS | UPF_AUTO_RTS))
/* Port has hardware-assisted s/w flow control */
#define UPF_SOFT_FLOW ((__force upf_t) (1 << 22))
#define UPF_CONS_FLOW ((__force upf_t) (1 << 23))
@@ -213,11 +216,17 @@ struct uart_port {
#error Change mask not equivalent to userspace-visible bit defines
#endif
- /* status must be updated while holding port lock */
+ /*
+ * Must hold termios_rwsem, port mutex and port lock to change;
+ * can hold any one lock to read.
+ */
upstat_t status;
#define UPSTAT_CTS_ENABLE ((__force upstat_t) (1 << 0))
#define UPSTAT_DCD_ENABLE ((__force upstat_t) (1 << 1))
+#define UPSTAT_AUTORTS ((__force upstat_t) (1 << 2))
+#define UPSTAT_AUTOCTS ((__force upstat_t) (1 << 3))
+#define UPSTAT_AUTOXOFF ((__force upstat_t) (1 << 4))
int hw_stopped; /* sw-assisted CTS flow state */
unsigned int mctrl; /* current modem ctrl settings */
@@ -226,7 +235,9 @@ struct uart_port {
const struct uart_ops *ops;
unsigned int custom_divisor;
unsigned int line; /* port index */
+ unsigned int minor;
resource_size_t mapbase; /* for ioremap */
+ resource_size_t mapsize;
struct device *dev; /* parent device */
unsigned char hub6; /* this should be in the 8250 driver */
unsigned char suspended;
@@ -327,24 +338,29 @@ struct earlycon_device {
char options[16]; /* e.g., 115200n8 */
unsigned int baud;
};
-int setup_earlycon(char *buf, const char *match,
- int (*setup)(struct earlycon_device *, const char *));
+struct earlycon_id {
+ char name[16];
+ int (*setup)(struct earlycon_device *, const char *options);
+} __aligned(32);
+
+extern int setup_earlycon(char *buf);
extern int of_setup_earlycon(unsigned long addr,
int (*setup)(struct earlycon_device *, const char *));
-#define EARLYCON_DECLARE(name, func) \
-static int __init name ## _setup_earlycon(char *buf) \
-{ \
- return setup_earlycon(buf, __stringify(name), func); \
-} \
-early_param("earlycon", name ## _setup_earlycon);
+#define EARLYCON_DECLARE(_name, func) \
+ static const struct earlycon_id __earlycon_##_name \
+ __used __section(__earlycon_table) \
+ = { .name = __stringify(_name), \
+ .setup = func }
#define OF_EARLYCON_DECLARE(name, compat, fn) \
_OF_DECLARE(earlycon, name, compat, fn, void *)
struct uart_port *uart_get_console(struct uart_port *ports, int nr,
struct console *c);
+int uart_parse_earlycon(char *p, unsigned char *iotype, unsigned long *addr,
+ char **options);
void uart_parse_options(char *options, int *baud, int *parity, int *bits,
int *flow);
int uart_set_options(struct uart_port *port, struct console *co, int baud,
@@ -391,6 +407,13 @@ static inline bool uart_cts_enabled(struct uart_port *uport)
return !!(uport->status & UPSTAT_CTS_ENABLE);
}
+static inline bool uart_softcts_mode(struct uart_port *uport)
+{
+ upstat_t mask = UPSTAT_CTS_ENABLE | UPSTAT_AUTOCTS;
+
+ return ((uport->status & mask) == UPSTAT_CTS_ENABLE);
+}
+
/*
* The following are helper functions for the low level drivers.
*/
diff --git a/include/linux/serial_mfd.h b/include/linux/serial_mfd.h
deleted file mode 100644
index 2b071e0..0000000
--- a/include/linux/serial_mfd.h
+++ /dev/null
@@ -1,47 +0,0 @@
-#ifndef _SERIAL_MFD_H_
-#define _SERIAL_MFD_H_
-
-/* HW register offset definition */
-#define UART_FOR 0x08
-#define UART_PS 0x0C
-#define UART_MUL 0x0D
-#define UART_DIV 0x0E
-
-#define HSU_GBL_IEN 0x0
-#define HSU_GBL_IST 0x4
-
-#define HSU_GBL_INT_BIT_PORT0 0x0
-#define HSU_GBL_INT_BIT_PORT1 0x1
-#define HSU_GBL_INT_BIT_PORT2 0x2
-#define HSU_GBL_INT_BIT_IRI 0x3
-#define HSU_GBL_INT_BIT_HDLC 0x4
-#define HSU_GBL_INT_BIT_DMA 0x5
-
-#define HSU_GBL_ISR 0x8
-#define HSU_GBL_DMASR 0x400
-#define HSU_GBL_DMAISR 0x404
-
-#define HSU_PORT_REG_OFFSET 0x80
-#define HSU_PORT0_REG_OFFSET 0x80
-#define HSU_PORT1_REG_OFFSET 0x100
-#define HSU_PORT2_REG_OFFSET 0x180
-#define HSU_PORT_REG_LENGTH 0x80
-
-#define HSU_DMA_CHANS_REG_OFFSET 0x500
-#define HSU_DMA_CHANS_REG_LENGTH 0x40
-
-#define HSU_CH_SR 0x0 /* channel status reg */
-#define HSU_CH_CR 0x4 /* control reg */
-#define HSU_CH_DCR 0x8 /* descriptor control reg */
-#define HSU_CH_BSR 0x10 /* max fifo buffer size reg */
-#define HSU_CH_MOTSR 0x14 /* minimum ocp transfer size */
-#define HSU_CH_D0SAR 0x20 /* desc 0 start addr */
-#define HSU_CH_D0TSR 0x24 /* desc 0 transfer size */
-#define HSU_CH_D1SAR 0x28
-#define HSU_CH_D1TSR 0x2C
-#define HSU_CH_D2SAR 0x30
-#define HSU_CH_D2TSR 0x34
-#define HSU_CH_D3SAR 0x38
-#define HSU_CH_D3TSR 0x3C
-
-#endif
diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h
index e6fc956..a7f004a 100644
--- a/include/linux/serial_s3c.h
+++ b/include/linux/serial_s3c.h
@@ -104,6 +104,31 @@
S3C2410_UCON_RXIRQMODE | \
S3C2410_UCON_RXFIFO_TOI)
+#define S3C64XX_UCON_TXBURST_1 (0<<20)
+#define S3C64XX_UCON_TXBURST_4 (1<<20)
+#define S3C64XX_UCON_TXBURST_8 (2<<20)
+#define S3C64XX_UCON_TXBURST_16 (3<<20)
+#define S3C64XX_UCON_TXBURST_MASK (0xf<<20)
+#define S3C64XX_UCON_RXBURST_1 (0<<16)
+#define S3C64XX_UCON_RXBURST_4 (1<<16)
+#define S3C64XX_UCON_RXBURST_8 (2<<16)
+#define S3C64XX_UCON_RXBURST_16 (3<<16)
+#define S3C64XX_UCON_RXBURST_MASK (0xf<<16)
+#define S3C64XX_UCON_TIMEOUT_SHIFT (12)
+#define S3C64XX_UCON_TIMEOUT_MASK (0xf<<12)
+#define S3C64XX_UCON_EMPTYINT_EN (1<<11)
+#define S3C64XX_UCON_DMASUS_EN (1<<10)
+#define S3C64XX_UCON_TXINT_LEVEL (1<<9)
+#define S3C64XX_UCON_RXINT_LEVEL (1<<8)
+#define S3C64XX_UCON_TIMEOUT_EN (1<<7)
+#define S3C64XX_UCON_ERRINT_EN (1<<6)
+#define S3C64XX_UCON_TXMODE_DMA (2<<2)
+#define S3C64XX_UCON_TXMODE_CPU (1<<2)
+#define S3C64XX_UCON_TXMODE_MASK (3<<2)
+#define S3C64XX_UCON_RXMODE_DMA (2<<0)
+#define S3C64XX_UCON_RXMODE_CPU (1<<0)
+#define S3C64XX_UCON_RXMODE_MASK (3<<0)
+
#define S3C2410_UFCON_FIFOMODE (1<<0)
#define S3C2410_UFCON_TXTRIG0 (0<<6)
#define S3C2410_UFCON_RXTRIG8 (1<<4)
@@ -155,6 +180,7 @@
#define S3C2440_UFSTAT_TXMASK (63<<8)
#define S3C2440_UFSTAT_RXMASK (63)
+#define S3C2410_UTRSTAT_TIMEOUT (1<<3)
#define S3C2410_UTRSTAT_TXE (1<<2)
#define S3C2410_UTRSTAT_TXFE (1<<1)
#define S3C2410_UTRSTAT_RXDR (1<<0)
@@ -179,8 +205,10 @@
#define S3C64XX_UINTM 0x38
#define S3C64XX_UINTM_RXD (0)
+#define S3C64XX_UINTM_ERROR (1)
#define S3C64XX_UINTM_TXD (2)
#define S3C64XX_UINTM_RXD_MSK (1 << S3C64XX_UINTM_RXD)
+#define S3C64XX_UINTM_ERR_MSK (1 << S3C64XX_UINTM_ERROR)
#define S3C64XX_UINTM_TXD_MSK (1 << S3C64XX_UINTM_TXD)
/* Following are specific to S5PV210 */
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index 6c5e3bb..7c536ac 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -1,6 +1,7 @@
#ifndef __LINUX_SERIAL_SCI_H
#define __LINUX_SERIAL_SCI_H
+#include <linux/bitops.h>
#include <linux/serial_core.h>
#include <linux/sh_dma.h>
@@ -10,59 +11,16 @@
#define SCIx_NOT_SUPPORTED (-1)
-/* SCSMR (Serial Mode Register) */
-#define SCSMR_CHR (1 << 6) /* 7-bit Character Length */
-#define SCSMR_PE (1 << 5) /* Parity Enable */
-#define SCSMR_ODD (1 << 4) /* Odd Parity */
-#define SCSMR_STOP (1 << 3) /* Stop Bit Length */
-#define SCSMR_CKS 0x0003 /* Clock Select */
-
/* Serial Control Register (@ = not supported by all parts) */
-#define SCSCR_TIE (1 << 7) /* Transmit Interrupt Enable */
-#define SCSCR_RIE (1 << 6) /* Receive Interrupt Enable */
-#define SCSCR_TE (1 << 5) /* Transmit Enable */
-#define SCSCR_RE (1 << 4) /* Receive Enable */
-#define SCSCR_REIE (1 << 3) /* Receive Error Interrupt Enable @ */
-#define SCSCR_TOIE (1 << 2) /* Timeout Interrupt Enable @ */
-#define SCSCR_CKE1 (1 << 1) /* Clock Enable 1 */
-#define SCSCR_CKE0 (1 << 0) /* Clock Enable 0 */
-/* SCIFA/SCIFB only */
-#define SCSCR_TDRQE (1 << 15) /* Tx Data Transfer Request Enable */
-#define SCSCR_RDRQE (1 << 14) /* Rx Data Transfer Request Enable */
-
-/* SCxSR (Serial Status Register) on SCI */
-#define SCI_TDRE 0x80 /* Transmit Data Register Empty */
-#define SCI_RDRF 0x40 /* Receive Data Register Full */
-#define SCI_ORER 0x20 /* Overrun Error */
-#define SCI_FER 0x10 /* Framing Error */
-#define SCI_PER 0x08 /* Parity Error */
-#define SCI_TEND 0x04 /* Transmit End */
-
-#define SCI_DEFAULT_ERROR_MASK (SCI_PER | SCI_FER)
-
-/* SCxSR (Serial Status Register) on SCIF, HSCIF */
-#define SCIF_ER 0x0080 /* Receive Error */
-#define SCIF_TEND 0x0040 /* Transmission End */
-#define SCIF_TDFE 0x0020 /* Transmit FIFO Data Empty */
-#define SCIF_BRK 0x0010 /* Break Detect */
-#define SCIF_FER 0x0008 /* Framing Error */
-#define SCIF_PER 0x0004 /* Parity Error */
-#define SCIF_RDF 0x0002 /* Receive FIFO Data Full */
-#define SCIF_DR 0x0001 /* Receive Data Ready */
-
-#define SCIF_DEFAULT_ERROR_MASK (SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK)
-
-/* SCFCR (FIFO Control Register) */
-#define SCFCR_LOOP (1 << 0) /* Loopback Test */
-
-/* SCSPTR (Serial Port Register), optional */
-#define SCSPTR_RTSIO (1 << 7) /* Serial Port RTS Pin Input/Output */
-#define SCSPTR_CTSIO (1 << 5) /* Serial Port CTS Pin Input/Output */
-#define SCSPTR_SPB2IO (1 << 1) /* Serial Port Break Input/Output */
-#define SCSPTR_SPB2DT (1 << 0) /* Serial Port Break Data */
-
-/* HSSRR HSCIF */
-#define HSCIF_SRE 0x8000 /* Sampling Rate Register Enable */
+#define SCSCR_TIE BIT(7) /* Transmit Interrupt Enable */
+#define SCSCR_RIE BIT(6) /* Receive Interrupt Enable */
+#define SCSCR_TE BIT(5) /* Transmit Enable */
+#define SCSCR_RE BIT(4) /* Receive Enable */
+#define SCSCR_REIE BIT(3) /* Receive Error Interrupt Enable @ */
+#define SCSCR_TOIE BIT(2) /* Timeout Interrupt Enable @ */
+#define SCSCR_CKE1 BIT(1) /* Clock Enable 1 */
+#define SCSCR_CKE0 BIT(0) /* Clock Enable 0 */
+
enum {
SCIx_PROBE_REGTYPE,
@@ -82,28 +40,6 @@ enum {
SCIx_NR_REGTYPES,
};
-/*
- * SCI register subset common for all port types.
- * Not all registers will exist on all parts.
- */
-enum {
- SCSMR, /* Serial Mode Register */
- SCBRR, /* Bit Rate Register */
- SCSCR, /* Serial Control Register */
- SCxSR, /* Serial Status Register */
- SCFCR, /* FIFO Control Register */
- SCFDR, /* FIFO Data Count Register */
- SCxTDR, /* Transmit (FIFO) Data Register */
- SCxRDR, /* Receive (FIFO) Data Register */
- SCLSR, /* Line Status Register */
- SCTFDR, /* Transmit FIFO Data Count Register */
- SCRFDR, /* Receive FIFO Data Count Register */
- SCSPTR, /* Serial Port Register */
- HSSRR, /* Sampling Rate Register */
-
- SCIx_NR_REGS,
-};
-
struct device;
struct plat_sci_port_ops {
@@ -113,7 +49,7 @@ struct plat_sci_port_ops {
/*
* Port-specific capabilities
*/
-#define SCIx_HAVE_RTSCTS (1 << 0)
+#define SCIx_HAVE_RTSCTS BIT(0)
/*
* Platform device specific platform_data struct
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index abdf1f2..dd0ba50 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -69,6 +69,7 @@ struct shdma_chan {
int id; /* Raw id of this channel */
int irq; /* Channel IRQ */
int slave_id; /* Client ID for slave DMA */
+ int real_slave_id; /* argument passed to filter function */
int hw_req; /* DMA request line for slave DMA - same
* as MID/RID, used with DT */
enum shdma_pm_state pm_state;
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index f4aee75..4fcacd9 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -20,6 +20,9 @@ struct shrink_control {
/* current node being shrunk (for NUMA aware shrinkers) */
int nid;
+
+ /* current memcg being shrunk (for memcg aware shrinkers) */
+ struct mem_cgroup *memcg;
};
#define SHRINK_STOP (~0UL)
@@ -61,7 +64,8 @@ struct shrinker {
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
/* Flags */
-#define SHRINKER_NUMA_AWARE (1 << 0)
+#define SHRINKER_NUMA_AWARE (1 << 0)
+#define SHRINKER_MEMCG_AWARE (1 << 1)
extern int register_shrinker(struct shrinker *);
extern void unregister_shrinker(struct shrinker *);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 85ab7d7..d6cdd6e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -34,7 +34,9 @@
#include <linux/dma-mapping.h>
#include <linux/netdev_features.h>
#include <linux/sched.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
+#include <linux/splice.h>
+#include <linux/in6.h>
/* A. Checksumming of received packets by device.
*
@@ -83,11 +85,15 @@
*
* CHECKSUM_PARTIAL:
*
- * This is identical to the case for output below. This may occur on a packet
+ * A checksum is set up to be offloaded to a device as described in the
+ * output description for CHECKSUM_PARTIAL. This may occur on a packet
* received directly from another Linux OS, e.g., a virtualized Linux kernel
- * on the same host. The packet can be treated in the same way as
- * CHECKSUM_UNNECESSARY, except that on output (i.e., forwarding) the
- * checksum must be filled in by the OS or the hardware.
+ * on the same host, or it may be set in the input path in GRO or remote
+ * checksum offload. For the purposes of checksum verification, the checksum
+ * referred to by skb->csum_start + skb->csum_offset and any preceding
+ * checksums in the packet are considered verified. Any checksums in the
+ * packet that are after the checksum being offloaded are not considered to
+ * be verified.
*
* B. Checksumming on output.
*
@@ -162,10 +168,23 @@ struct nf_conntrack {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
struct nf_bridge_info {
atomic_t use;
+ enum {
+ BRNF_PROTO_UNCHANGED,
+ BRNF_PROTO_8021Q,
+ BRNF_PROTO_PPPOE
+ } orig_proto:8;
+ bool pkt_otherhost;
+ __u16 frag_max_size;
unsigned int mask;
struct net_device *physindev;
- struct net_device *physoutdev;
- unsigned long data[32 / sizeof(unsigned long)];
+ union {
+ struct net_device *physoutdev;
+ char neigh_header[8];
+ };
+ union {
+ __be32 ipv4_daddr;
+ struct in6_addr ipv6_daddr;
+ };
};
#endif
@@ -488,7 +507,6 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
* @napi_id: id of the NAPI struct this skb came from
* @secmark: security marking
* @mark: Generic packet mark
- * @dropcount: total number of sk_receive_queue overflows
* @vlan_proto: vlan encapsulation protocol
* @vlan_tci: vlan tag control information
* @inner_protocol: Protocol (encapsulation)
@@ -626,15 +644,17 @@ struct sk_buff {
__u32 hash;
__be16 vlan_proto;
__u16 vlan_tci;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- unsigned int napi_id;
+#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
+ union {
+ unsigned int napi_id;
+ unsigned int sender_cpu;
+ };
#endif
#ifdef CONFIG_NETWORK_SECMARK
__u32 secmark;
#endif
union {
__u32 mark;
- __u32 dropcount;
__u32 reserved_tailroom;
};
@@ -762,6 +782,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
int node);
+struct sk_buff *__build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb(void *data, unsigned int frag_size);
static inline struct sk_buff *alloc_skb(unsigned int size,
gfp_t priority)
@@ -846,6 +867,9 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
int len, int odd, struct sk_buff *skb),
void *from, int length);
+int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
+ int offset, size_t size);
+
struct skb_seq_state {
__u32 lower_offset;
__u32 upper_offset;
@@ -863,8 +887,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
void skb_abort_seq_read(struct skb_seq_state *st);
unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
- unsigned int to, struct ts_config *config,
- struct ts_state *state);
+ unsigned int to, struct ts_config *config);
/*
* Packet hash types specify the type of hash in skb_set_hash.
@@ -907,7 +930,6 @@ skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
skb->hash = hash;
}
-void __skb_get_hash(struct sk_buff *skb);
static inline __u32 skb_get_hash(struct sk_buff *skb)
{
if (!skb->l4_hash && !skb->sw_hash)
@@ -916,6 +938,8 @@ static inline __u32 skb_get_hash(struct sk_buff *skb)
return skb->hash;
}
+__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
+
static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
{
return skb->hash;
@@ -941,6 +965,13 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
to->l4_hash = from->l4_hash;
};
+static inline void skb_sender_cpu_clear(struct sk_buff *skb)
+{
+#ifdef CONFIG_XPS
+ skb->sender_cpu = 0;
+#endif
+}
+
#ifdef NET_SKBUFF_DATA_USES_OFFSET
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
@@ -1916,8 +1947,8 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
if (skb_transport_header_was_set(skb))
return;
- else if (skb_flow_dissect(skb, &keys))
- skb_set_transport_header(skb, keys.thoff);
+ else if (skb_flow_dissect_flow_keys(skb, &keys))
+ skb_set_transport_header(skb, keys.control.thoff);
else
skb_set_transport_header(skb, offset_hint);
}
@@ -2108,10 +2139,6 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
kfree_skb(skb);
}
-#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
-#define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
-#define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE
-
void *netdev_alloc_frag(unsigned int fragsz);
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
@@ -2166,6 +2193,11 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
}
+static inline void skb_free_frag(void *addr)
+{
+ __free_page_frag(addr);
+}
+
void *napi_alloc_frag(unsigned int fragsz);
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
unsigned int length, gfp_t gfp_mask);
@@ -2484,19 +2516,18 @@ static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
}
static inline int skb_add_data(struct sk_buff *skb,
- char __user *from, int copy)
+ struct iov_iter *from, int copy)
{
const int off = skb->len;
if (skb->ip_summed == CHECKSUM_NONE) {
- int err = 0;
- __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
- copy, 0, &err);
- if (!err) {
+ __wsum csum = 0;
+ if (csum_and_copy_from_iter(skb_put(skb, copy), copy,
+ &csum, from) == copy) {
skb->csum = csum_block_add(skb->csum, csum, off);
return 0;
}
- } else if (!copy_from_user(skb_put(skb, copy), from, copy))
+ } else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy)
return 0;
__skb_trim(skb, off);
@@ -2674,9 +2705,15 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
int len, __wsum csum);
-int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
+ssize_t skb_socket_splice(struct sock *sk,
+ struct pipe_inode_info *pipe,
+ struct splice_pipe_desc *spd);
+int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int len,
- unsigned int flags);
+ unsigned int flags,
+ ssize_t (*splice_cb)(struct sock *,
+ struct pipe_inode_info *,
+ struct splice_pipe_desc *));
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
@@ -2693,8 +2730,7 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
{
- /* XXX: stripping const */
- return memcpy_fromiovec(data, (struct iovec *)msg->msg_iter.iov, len);
+ return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
}
static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
@@ -2712,8 +2748,9 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum csum);
-static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset,
- int len, void *data, int hlen, void *buffer)
+static inline void * __must_check
+__skb_header_pointer(const struct sk_buff *skb, int offset,
+ int len, void *data, int hlen, void *buffer)
{
if (hlen - offset >= len)
return data + offset;
@@ -2725,8 +2762,8 @@ static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset,
return buffer;
}
-static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
- int len, void *buffer)
+static inline void * __must_check
+skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
{
return __skb_header_pointer(skb, offset, len, skb->data,
skb_headlen(skb), buffer);
@@ -2914,7 +2951,10 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb);
static inline int skb_csum_unnecessary(const struct sk_buff *skb)
{
- return ((skb->ip_summed & CHECKSUM_UNNECESSARY) || skb->csum_valid);
+ return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
+ skb->csum_valid ||
+ (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_start_offset(skb) >= 0));
}
/**
@@ -2998,6 +3038,18 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
*/
#define CHECKSUM_BREAK 76
+/* Unset checksum-complete
+ *
+ * Unset checksum complete can be done when packet is being modified
+ * (uncompressed for instance) and checksum-complete value is
+ * invalidated.
+ */
+static inline void skb_checksum_complete_unset(struct sk_buff *skb)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->ip_summed = CHECKSUM_NONE;
+}
+
/* Validate (init) checksum based on checksum complete.
*
* Return values:
@@ -3018,7 +3070,7 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
}
} else if (skb->csum_bad) {
/* ip_summed == CHECKSUM_NONE in this case */
- return 1;
+ return (__force __sum16)1;
}
skb->csum = psum;
@@ -3071,7 +3123,7 @@ static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
#define skb_checksum_validate_zero_check(skb, proto, check, \
compute_pseudo) \
- __skb_checksum_validate_(skb, proto, true, true, check, compute_pseudo)
+ __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
#define skb_checksum_simple_validate(skb) \
__skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
@@ -3096,6 +3148,40 @@ do { \
compute_pseudo(skb, proto)); \
} while (0)
+static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
+ u16 start, u16 offset)
+{
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
+ skb->csum_offset = offset - start;
+}
+
+/* Update skbuf and packet to reflect the remote checksum offload operation.
+ * When called, ptr indicates the starting point for skb->csum when
+ * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete
+ * here, skb_postpull_rcsum is done so skb->csum start is ptr.
+ */
+static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
+ int start, int offset, bool nopartial)
+{
+ __wsum delta;
+
+ if (!nopartial) {
+ skb_remcsum_adjust_partial(skb, ptr, start, offset);
+ return;
+ }
+
+ if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
+ __skb_checksum_complete(skb);
+ skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
+ }
+
+ delta = remcsum_adjust(ptr, skb->csum, start, offset);
+
+ /* Adjust skb->csum since we changed the packet */
+ skb->csum = csum_add(skb->csum, delta);
+}
+
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
void nf_conntrack_destroy(struct nf_conntrack *nfct);
static inline void nf_conntrack_put(struct nf_conntrack *nfct)
@@ -3232,9 +3318,6 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
return skb->queue_mapping != 0;
}
-u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
- unsigned int num_tx_queues);
-
static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
@@ -3289,15 +3372,14 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
{
int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) -
- skb_transport_offset(skb);
- __u16 csum;
+ skb_transport_offset(skb);
+ __wsum partial;
- csum = csum_fold(csum_partial(skb_transport_header(skb),
- plen, skb->csum));
+ partial = csum_partial(skb_transport_header(skb), plen, skb->csum);
skb->csum = res;
SKB_GSO_CB(skb)->csum_start -= plen;
- return csum;
+ return csum_fold(partial);
}
static inline bool skb_is_gso(const struct sk_buff *skb)
@@ -3352,10 +3434,9 @@ static inline void skb_checksum_none_assert(const struct sk_buff *skb)
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
-
-u32 skb_get_poff(const struct sk_buff *skb);
-u32 __skb_get_poff(const struct sk_buff *skb, void *data,
- const struct flow_keys *keys, int hlen);
+struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
+ unsigned int transport_len,
+ __sum16(*skb_chkf)(struct sk_buff *skb));
/**
* skb_head_is_locked - Determine if the skb->head is locked down
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 9a139b6..a99f0e5 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -18,7 +18,7 @@
/*
* Flags to pass to kmem_cache_create().
- * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
+ * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
*/
#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
@@ -104,6 +104,7 @@
(unsigned long)ZERO_SIZE_PTR)
#include <linux/kmemleak.h>
+#include <linux/kasan.h>
struct mem_cgroup;
/*
@@ -115,14 +116,12 @@ int slab_is_available(void);
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
unsigned long,
void (*)(void *));
-#ifdef CONFIG_MEMCG_KMEM
-struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *,
- struct kmem_cache *,
- const char *);
-#endif
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
-void kmem_cache_free(struct kmem_cache *, void *);
+
+void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
+void memcg_deactivate_kmem_caches(struct mem_cgroup *);
+void memcg_destroy_kmem_caches(struct mem_cgroup *);
/*
* Please use this macro to create slab caches. Simply specify the
@@ -241,8 +240,8 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
* belongs to.
* 0 = zero alloc
* 1 = 65 .. 96 bytes
- * 2 = 120 .. 192 bytes
- * n = 2^(n-1) .. 2^n -1
+ * 2 = 129 .. 192 bytes
+ * n = 2^(n-1)+1 .. 2^n
*/
static __always_inline int kmalloc_index(size_t size)
{
@@ -289,6 +288,7 @@ static __always_inline int kmalloc_index(size_t size)
void *__kmalloc(size_t size, gfp_t flags);
void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
+void kmem_cache_free(struct kmem_cache *, void *);
#ifdef CONFIG_NUMA
void *__kmalloc_node(size_t size, gfp_t flags, int node);
@@ -326,7 +326,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
gfp_t flags, size_t size)
{
- return kmem_cache_alloc(s, flags);
+ void *ret = kmem_cache_alloc(s, flags);
+
+ kasan_kmalloc(s, ret, size);
+ return ret;
}
static __always_inline void *
@@ -334,7 +337,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags,
int node, size_t size)
{
- return kmem_cache_alloc_node(s, gfpflags, node);
+ void *ret = kmem_cache_alloc_node(s, gfpflags, node);
+
+ kasan_kmalloc(s, ret, size);
+ return ret;
}
#endif /* CONFIG_TRACING */
@@ -474,14 +480,14 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
#ifndef ARCH_SLAB_MINALIGN
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#endif
+
+struct memcg_cache_array {
+ struct rcu_head rcu;
+ struct kmem_cache *entries[0];
+};
+
/*
* This is the main placeholder for memcg-related information in kmem caches.
- * struct kmem_cache will hold a pointer to it, so the memory cost while
- * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it
- * would otherwise be if that would be bundled in kmem_cache: we'll need an
- * extra pointer chase. But the trade off clearly lays in favor of not
- * penalizing non-users.
- *
* Both the root cache and the child caches will have it. For the root cache,
* this will hold a dynamically allocated array large enough to hold
* information about the currently limited memcgs in the system. To allow the
@@ -491,19 +497,18 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
* Child caches will hold extra metadata needed for its operation. Fields are:
*
* @memcg: pointer to the memcg this cache belongs to
- * @list: list_head for the list of all caches in this memcg
* @root_cache: pointer to the global, root cache, this cache was derived from
+ *
+ * Both root and child caches of the same kind are linked into a list chained
+ * through @list.
*/
struct memcg_cache_params {
bool is_root_cache;
+ struct list_head list;
union {
- struct {
- struct rcu_head rcu_head;
- struct kmem_cache *memcg_caches[0];
- };
+ struct memcg_cache_array __rcu *memcg_caches;
struct {
struct mem_cgroup *memcg;
- struct list_head list;
struct kmem_cache *root_cache;
};
};
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index b869d16..33d0490 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -70,7 +70,7 @@ struct kmem_cache {
int obj_offset;
#endif /* CONFIG_DEBUG_SLAB */
#ifdef CONFIG_MEMCG_KMEM
- struct memcg_cache_params *memcg_params;
+ struct memcg_cache_params memcg_params;
#endif
struct kmem_cache_node *node[MAX_NUMNODES];
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index d82abd4..3388511 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -85,7 +85,7 @@ struct kmem_cache {
struct kobject kobj; /* For sysfs */
#endif
#ifdef CONFIG_MEMCG_KMEM
- struct memcg_cache_params *memcg_params;
+ struct memcg_cache_params memcg_params;
int max_attr_size; /* for propagation, maximum size of a stored attr */
#ifdef CONFIG_SYSFS
struct kset *memcg_kset;
@@ -110,4 +110,23 @@ static inline void sysfs_slab_remove(struct kmem_cache *s)
}
#endif
+
+/**
+ * virt_to_obj - returns address of the beginning of object.
+ * @s: object's kmem_cache
+ * @slab_page: address of slab page
+ * @x: address within object memory range
+ *
+ * Returns address of the beginning of object
+ */
+static inline void *virt_to_obj(struct kmem_cache *s,
+ const void *slab_page,
+ const void *x)
+{
+ return (void *)x - ((x - slab_page) % s->size);
+}
+
+void object_err(struct kmem_cache *s, struct page *page,
+ u8 *object, char *reason);
+
#endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 93dff5f..c441407 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -18,7 +18,7 @@ struct call_single_data {
struct llist_node llist;
smp_call_func_t func;
void *info;
- u16 flags;
+ unsigned int flags;
};
/* total number of cpus in this system (may exceed NR_CPUS) */
@@ -151,6 +151,13 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
static inline void kick_all_cpus_sync(void) { }
static inline void wake_up_all_idle_cpus(void) { }
+#ifdef CONFIG_UP_LATE_INIT
+extern void __init up_late_init(void);
+static inline void smp_init(void) { up_late_init(); }
+#else
+static inline void smp_init(void) { }
+#endif
+
#endif /* !SMP */
/*
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index 13e9296..da3c593 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -27,6 +27,8 @@ struct smpboot_thread_data;
* @pre_unpark: Optional unpark function, called before the thread is
* unparked (cpu online). This is not guaranteed to be
* called on the target cpu of the thread. Careful!
+ * @cpumask: Internal state. To update which threads are unparked,
+ * call smpboot_update_cpumask_percpu_thread().
* @selfparking: Thread is not parked by the park function.
* @thread_comm: The base name of the thread
*/
@@ -41,12 +43,14 @@ struct smp_hotplug_thread {
void (*park)(unsigned int cpu);
void (*unpark)(unsigned int cpu);
void (*pre_unpark)(unsigned int cpu);
+ cpumask_var_t cpumask;
bool selfparking;
const char *thread_comm;
};
int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread);
void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
-int smpboot_thread_schedule(void);
+int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
+ const struct cpumask *);
#endif
diff --git a/include/linux/soc/sunxi/sunxi_sram.h b/include/linux/soc/sunxi/sunxi_sram.h
new file mode 100644
index 0000000..c5f663b
--- /dev/null
+++ b/include/linux/soc/sunxi/sunxi_sram.h
@@ -0,0 +1,19 @@
+/*
+ * Allwinner SoCs SRAM Controller Driver
+ *
+ * Copyright (C) 2015 Maxime Ripard
+ *
+ * Author: Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _SUNXI_SRAM_H_
+#define _SUNXI_SRAM_H_
+
+int sunxi_sram_claim(struct device *dev);
+int sunxi_sram_release(struct device *dev);
+
+#endif /* _SUNXI_SRAM_H_ */
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index 46cca4c..fddebc6 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -1,7 +1,10 @@
#ifndef __SOCK_DIAG_H__
#define __SOCK_DIAG_H__
+#include <linux/netlink.h>
#include <linux/user_namespace.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
#include <uapi/linux/sock_diag.h>
struct sk_buff;
@@ -11,6 +14,7 @@ struct sock;
struct sock_diag_handler {
__u8 family;
int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
+ int (*get_info)(struct sk_buff *skb, struct sock *sk);
};
int sock_diag_register(const struct sock_diag_handler *h);
@@ -19,11 +23,49 @@ void sock_diag_unregister(const struct sock_diag_handler *h);
void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
-int sock_diag_check_cookie(void *sk, __u32 *cookie);
-void sock_diag_save_cookie(void *sk, __u32 *cookie);
+int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie);
+void sock_diag_save_cookie(struct sock *sk, __u32 *cookie);
int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
struct sk_buff *skb, int attrtype);
+static inline
+enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk)
+{
+ switch (sk->sk_family) {
+ case AF_INET:
+ switch (sk->sk_protocol) {
+ case IPPROTO_TCP:
+ return SKNLGRP_INET_TCP_DESTROY;
+ case IPPROTO_UDP:
+ return SKNLGRP_INET_UDP_DESTROY;
+ default:
+ return SKNLGRP_NONE;
+ }
+ case AF_INET6:
+ switch (sk->sk_protocol) {
+ case IPPROTO_TCP:
+ return SKNLGRP_INET6_TCP_DESTROY;
+ case IPPROTO_UDP:
+ return SKNLGRP_INET6_UDP_DESTROY;
+ default:
+ return SKNLGRP_NONE;
+ }
+ default:
+ return SKNLGRP_NONE;
+ }
+}
+
+static inline
+bool sock_diag_has_destroy_listeners(const struct sock *sk)
+{
+ const struct net *n = sock_net(sk);
+ const enum sknetlink_groups group = sock_diag_destroy_group(sk);
+
+ return group != SKNLGRP_NONE && n->diag_nlsk &&
+ netlink_has_listeners(n->diag_nlsk, group);
+}
+void sock_diag_broadcast_destroy(struct sock *sk);
+
#endif
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 6e49a14..5bf59c8 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -51,6 +51,7 @@ struct msghdr {
void *msg_control; /* ancillary data */
__kernel_size_t msg_controllen; /* ancillary data buffer length */
unsigned int msg_flags; /* flags on received message */
+ struct kiocb *msg_iocb; /* ptr to iocb for async requests */
};
struct user_msghdr {
@@ -138,6 +139,11 @@ static inline struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr
return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg);
}
+static inline size_t msg_data_left(struct msghdr *msg)
+{
+ return iov_iter_count(&msg->msg_iter);
+}
+
/* "Socket"-level control message types: */
#define SCM_RIGHTS 0x01 /* rw: access rights (array of int) */
@@ -181,6 +187,7 @@ struct ucred {
#define AF_WANPIPE 25 /* Wanpipe API Sockets */
#define AF_LLC 26 /* Linux LLC */
#define AF_IB 27 /* Native InfiniBand address */
+#define AF_MPLS 28 /* MPLS */
#define AF_CAN 29 /* Controller Area Network */
#define AF_TIPC 30 /* TIPC sockets */
#define AF_BLUETOOTH 31 /* Bluetooth sockets */
@@ -226,6 +233,7 @@ struct ucred {
#define PF_WANPIPE AF_WANPIPE
#define PF_LLC AF_LLC
#define PF_IB AF_IB
+#define PF_MPLS AF_MPLS
#define PF_CAN AF_CAN
#define PF_TIPC AF_TIPC
#define PF_BLUETOOTH AF_BLUETOOTH
@@ -318,13 +326,6 @@ struct ucred {
/* IPX options */
#define IPX_TYPE 1
-extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
- struct iovec *iov,
- int offset,
- unsigned int len, __wsum *csump);
-extern unsigned long iov_pages(const struct iovec *iov, int offset,
- unsigned long nr_segs);
-
extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h
index b2b1afb..b63fe6f 100644
--- a/include/linux/spi/at86rf230.h
+++ b/include/linux/spi/at86rf230.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Written by:
* Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
*/
@@ -26,6 +22,7 @@ struct at86rf230_platform_data {
int rstn;
int slp_tr;
int dig2;
+ u8 xtal_trim;
};
#endif
diff --git a/include/linux/spi/l4f00242t03.h b/include/linux/spi/l4f00242t03.h
index bc8677c..e69e9b5 100644
--- a/include/linux/spi/l4f00242t03.h
+++ b/include/linux/spi/l4f00242t03.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_
diff --git a/include/linux/spi/lms283gf05.h b/include/linux/spi/lms283gf05.h
index 555d254..fdd1d1d 100644
--- a/include/linux/spi/lms283gf05.h
+++ b/include/linux/spi/lms283gf05.h
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_
diff --git a/include/linux/spi/mxs-spi.h b/include/linux/spi/mxs-spi.h
index 4835486..381d368 100644
--- a/include/linux/spi/mxs-spi.h
+++ b/include/linux/spi/mxs-spi.h
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef __LINUX_SPI_MXS_SPI_H__
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index d5a3165..6d36dac 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -10,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __linux_pxa2xx_spi_h
#define __linux_pxa2xx_spi_h
@@ -57,7 +53,6 @@ struct pxa2xx_spi_chip {
#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
#include <linux/clk.h>
-#include <mach/dma.h>
extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info);
diff --git a/include/linux/spi/rspi.h b/include/linux/spi/rspi.h
index e546b2c..a693188 100644
--- a/include/linux/spi/rspi.h
+++ b/include/linux/spi/rspi.h
@@ -11,11 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __LINUX_SPI_RENESAS_SPI_H__
diff --git a/include/linux/spi/sh_hspi.h b/include/linux/spi/sh_hspi.h
index a1121f8..aa0d440 100644
--- a/include/linux/spi/sh_hspi.h
+++ b/include/linux/spi/sh_hspi.h
@@ -9,10 +9,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef SH_HSPI_H
#define SH_HSPI_H
diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h
index 88a14d8..b087a85 100644
--- a/include/linux/spi/sh_msiof.h
+++ b/include/linux/spi/sh_msiof.h
@@ -7,6 +7,8 @@ struct sh_msiof_spi_info {
u16 num_chipselect;
unsigned int dma_tx_id;
unsigned int dma_rx_id;
+ u32 dtdl;
+ u32 syncdl;
};
#endif /* __SPI_SH_MSIOF_H__ */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index a6ef2a8..d673072 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -10,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_SPI_H
@@ -166,8 +162,6 @@ struct spi_transfer;
* @remove: Unbinds this driver from the spi device
* @shutdown: Standard shutdown callback used during system state
* transitions such as powerdown/halt and kexec
- * @suspend: Standard suspend callback used during system state transitions
- * @resume: Standard resume callback used during system state transitions
* @driver: SPI device drivers should initialize the name and owner
* field of this structure.
*
@@ -188,8 +182,6 @@ struct spi_driver {
int (*probe)(struct spi_device *spi);
int (*remove)(struct spi_device *spi);
void (*shutdown)(struct spi_device *spi);
- int (*suspend)(struct spi_device *spi, pm_message_t mesg);
- int (*resume)(struct spi_device *spi);
struct device_driver driver;
};
@@ -260,6 +252,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @pump_messages: work struct for scheduling work to the message pump
* @queue_lock: spinlock to syncronise access to message queue
* @queue: message queue
+ * @idling: the device is entering idle state
* @cur_msg: the currently in-flight message
* @cur_msg_prepared: spi_prepare_message was called for the currently
* in-flight message
@@ -297,6 +290,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* transfer_one_message are mutually exclusive; when both
* are set, the generic subsystem does not call your
* transfer_one callback.
+ * @handle_err: the subsystem calls the driver to handle an error that occurs
+ * in the generic implementation of transfer_one_message().
* @unprepare_message: undo any work done by prepare_message().
* @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
* number. Any individual value may be -ENOENT for CS lines that
@@ -425,6 +420,7 @@ struct spi_master {
spinlock_t queue_lock;
struct list_head queue;
struct spi_message *cur_msg;
+ bool idling;
bool busy;
bool running;
bool rt;
@@ -450,6 +446,8 @@ struct spi_master {
void (*set_cs)(struct spi_device *spi, bool enable);
int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
struct spi_transfer *transfer);
+ void (*handle_err)(struct spi_master *master,
+ struct spi_message *message);
/* gpio chip select */
int *cs_gpios;
@@ -651,7 +649,7 @@ struct spi_transfer {
* sequence completes. On some systems, many such sequences can execute as
* as single programmed DMA transfer. On all systems, these messages are
* queued, and might complete after transactions to other devices. Messages
- * sent to a given spi_device are alway executed in FIFO order.
+ * sent to a given spi_device are always executed in FIFO order.
*
* The code that submits an spi_message (and its spi_transfers)
* to the lower layers is responsible for managing its memory.
diff --git a/include/linux/spi/tle62x0.h b/include/linux/spi/tle62x0.h
index 60b5918..414c6fd 100644
--- a/include/linux/spi/tle62x0.h
+++ b/include/linux/spi/tle62x0.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
struct tle62x0_pdata {
diff --git a/include/linux/spi/tsc2005.h b/include/linux/spi/tsc2005.h
index 8f721e4..563b3b1 100644
--- a/include/linux/spi/tsc2005.h
+++ b/include/linux/spi/tsc2005.h
@@ -12,11 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _LINUX_SPI_TSC2005_H
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 262ba4e..0063b24 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -120,7 +120,7 @@ do { \
/*
* Despite its name it doesn't necessarily has to be a full barrier.
* It should only guarantee that a STORE before the critical section
- * can not be reordered with a LOAD inside this section.
+ * can not be reordered with LOADs and STOREs inside this section.
* spin_lock() is the one-way barrier, this LOAD can not escape out
* of the region. So the default implementation simply ensures that
* a STORE can not move into the critical section, smp_wmb() should
@@ -190,6 +190,8 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define raw_spin_lock_nested(lock, subclass) \
_raw_spin_lock_nested(lock, subclass)
+# define raw_spin_lock_bh_nested(lock, subclass) \
+ _raw_spin_lock_bh_nested(lock, subclass)
# define raw_spin_lock_nest_lock(lock, nest_lock) \
do { \
@@ -205,6 +207,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
# define raw_spin_lock_nested(lock, subclass) \
_raw_spin_lock(((void)(subclass), (lock)))
# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
+# define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock)
#endif
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
@@ -324,6 +327,11 @@ do { \
raw_spin_lock_nested(spinlock_check(lock), subclass); \
} while (0)
+#define spin_lock_bh_nested(lock, subclass) \
+do { \
+ raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
+} while (0)
+
#define spin_lock_nest_lock(lock, nest_lock) \
do { \
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 42dfab8..5344268 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr);
void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
__acquires(lock);
+void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass)
+ __acquires(lock);
void __lockfunc
_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
__acquires(lock);
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index d0d1888..d3afef9 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -57,6 +57,7 @@
#define _raw_spin_lock(lock) __LOCK(lock)
#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
+#define _raw_spin_lock_bh_nested(lock, subclass) __LOCK(lock)
#define _raw_read_lock(lock) __LOCK(lock)
#define _raw_write_lock(lock) __LOCK(lock)
#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index a2783cb..bdeb456 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -45,7 +45,7 @@ struct rcu_batch {
#define RCU_BATCH_INIT(name) { NULL, &(name.head) }
struct srcu_struct {
- unsigned completed;
+ unsigned long completed;
struct srcu_struct_array __percpu *per_cpu_ref;
spinlock_t queue_lock; /* protect ->batch_queue, ->running */
bool running;
@@ -102,13 +102,11 @@ void process_srcu(struct work_struct *work);
* define and init a srcu struct at build time.
* dont't call init_srcu_struct() nor cleanup_srcu_struct() on it.
*/
-#define DEFINE_SRCU(name) \
+#define __DEFINE_SRCU(name, is_static) \
static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
- struct srcu_struct name = __SRCU_STRUCT_INIT(name);
-
-#define DEFINE_STATIC_SRCU(name) \
- static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
- static struct srcu_struct name = __SRCU_STRUCT_INIT(name);
+ is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
+#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
+#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
/**
* call_srcu() - Queue a callback for invocation after an SRCU grace period
@@ -135,7 +133,7 @@ int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
void synchronize_srcu(struct srcu_struct *sp);
void synchronize_srcu_expedited(struct srcu_struct *sp);
-long srcu_batches_completed(struct srcu_struct *sp);
+unsigned long srcu_batches_completed(struct srcu_struct *sp);
void srcu_barrier(struct srcu_struct *sp);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -184,7 +182,7 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)
* lockdep_is_held() calls.
*/
#define srcu_dereference_check(p, sp, c) \
- __rcu_dereference_check((p), srcu_read_lock_held(sp) || (c), __rcu)
+ __rcu_dereference_check((p), (c) || srcu_read_lock_held(sp), __rcu)
/**
* srcu_dereference - fetch SRCU-protected pointer for later dereferencing
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 4568a5c..c3d1a52 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -29,10 +29,13 @@ struct ssb_sprom {
u8 il0mac[6] __aligned(sizeof(u16)); /* MAC address for 802.11b/g */
u8 et0mac[6] __aligned(sizeof(u16)); /* MAC address for Ethernet */
u8 et1mac[6] __aligned(sizeof(u16)); /* MAC address for 802.11a */
+ u8 et2mac[6] __aligned(sizeof(u16)); /* MAC address for extra Ethernet */
u8 et0phyaddr; /* MII address for enet0 */
u8 et1phyaddr; /* MII address for enet1 */
+ u8 et2phyaddr; /* MII address for enet2 */
u8 et0mdcport; /* MDIO for enet0 */
u8 et1mdcport; /* MDIO for enet1 */
+ u8 et2mdcport; /* MDIO for enet2 */
u16 dev_id; /* Device ID overriding e.g. PCI ID */
u16 board_rev; /* Board revision number from SPROM. */
u16 board_num; /* Board number from SPROM. */
@@ -88,11 +91,14 @@ struct ssb_sprom {
u32 ofdm5glpo; /* 5.2GHz OFDM power offset */
u32 ofdm5gpo; /* 5.3GHz OFDM power offset */
u32 ofdm5ghpo; /* 5.8GHz OFDM power offset */
+ u32 boardflags;
+ u32 boardflags2;
+ u32 boardflags3;
+ /* TODO: Switch all drivers to new u32 fields and drop below ones */
u16 boardflags_lo; /* Board flags (bits 0-15) */
u16 boardflags_hi; /* Board flags (bits 16-31) */
u16 boardflags2_lo; /* Board flags (bits 32-47) */
u16 boardflags2_hi; /* Board flags (bits 48-63) */
- /* TODO store board flags in a single u64 */
struct ssb_sprom_core_pwr_info core_pwr_info[4];
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h
index f7b9100..c0f707a 100644
--- a/include/linux/ssb/ssb_regs.h
+++ b/include/linux/ssb/ssb_regs.h
@@ -173,6 +173,7 @@
#define SSB_SPROMSIZE_BYTES_R123 (SSB_SPROMSIZE_WORDS_R123 * sizeof(u16))
#define SSB_SPROMSIZE_BYTES_R4 (SSB_SPROMSIZE_WORDS_R4 * sizeof(u16))
#define SSB_SPROMSIZE_WORDS_R10 230
+#define SSB_SPROMSIZE_WORDS_R11 234
#define SSB_SPROM_BASE1 0x1000
#define SSB_SPROM_BASE31 0x0800
#define SSB_SPROM_REVISION 0x007E
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 669045a..0a34489 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -7,8 +7,6 @@ struct task_struct;
struct pt_regs;
#ifdef CONFIG_STACKTRACE
-struct task_struct;
-
struct stack_trace {
unsigned int nr_entries, max_entries;
unsigned long *entries;
diff --git a/include/linux/stddef.h b/include/linux/stddef.h
index f4aec0e..9c61c7c 100644
--- a/include/linux/stddef.h
+++ b/include/linux/stddef.h
@@ -3,7 +3,6 @@
#include <uapi/linux/stddef.h>
-
#undef NULL
#define NULL ((void *)0)
@@ -14,8 +13,18 @@ enum {
#undef offsetof
#ifdef __compiler_offsetof
-#define offsetof(TYPE,MEMBER) __compiler_offsetof(TYPE,MEMBER)
+#define offsetof(TYPE, MEMBER) __compiler_offsetof(TYPE, MEMBER)
#else
-#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER)
#endif
+
+/**
+ * offsetofend(TYPE, MEMBER)
+ *
+ * @TYPE: The type of the structure
+ * @MEMBER: The member within the structure to get the end offset of
+ */
+#define offsetofend(TYPE, MEMBER) \
+ (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER))
+
#endif
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index cd63851..c735f5c 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -99,6 +99,7 @@ struct plat_stmmacenet_data {
int phy_addr;
int interface;
struct stmmac_mdio_bus_data *mdio_bus_data;
+ struct device_node *phy_node;
struct stmmac_dma_cfg *dma_cfg;
int clk_csr;
int has_gmac;
@@ -114,6 +115,8 @@ struct plat_stmmacenet_data {
int maxmtu;
int multicast_filter_bins;
int unicast_filter_entries;
+ int tx_fifo_size;
+ int rx_fifo_size;
void (*fix_mac_speed)(void *priv, unsigned int speed);
void (*bus_setup)(void __iomem *ioaddr);
void *(*setup)(struct platform_device *pdev);
diff --git a/include/linux/string.h b/include/linux/string.h
index 2e22a2e..a8d90db 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -40,9 +40,6 @@ extern int strcmp(const char *,const char *);
#ifndef __HAVE_ARCH_STRNCMP
extern int strncmp(const char *,const char *,__kernel_size_t);
#endif
-#ifndef __HAVE_ARCH_STRNICMP
-#define strnicmp strncasecmp
-#endif
#ifndef __HAVE_ARCH_STRCASECMP
extern int strcasecmp(const char *s1, const char *s2);
#endif
@@ -114,8 +111,12 @@ extern int memcmp(const void *,const void *,__kernel_size_t);
extern void * memchr(const void *,int,__kernel_size_t);
#endif
void *memchr_inv(const void *s, int c, size_t n);
+char *strreplace(char *s, char old, char new);
+
+extern void kfree_const(const void *x);
extern char *kstrdup(const char *s, gfp_t gfp);
+extern const char *kstrdup_const(const char *s, gfp_t gfp);
extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index 6eb567a..71f711d 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -10,8 +10,8 @@ enum string_size_units {
STRING_UNITS_2, /* use binary powers of 2^10 */
};
-int string_get_size(u64 size, enum string_size_units units,
- char *buf, int len);
+void string_get_size(u64 size, u64 blk_size, enum string_size_units units,
+ char *buf, int len);
#define UNESCAPE_SPACE 0x01
#define UNESCAPE_OCTAL 0x02
@@ -47,22 +47,22 @@ static inline int string_unescape_any_inplace(char *buf)
#define ESCAPE_ANY_NP (ESCAPE_ANY | ESCAPE_NP)
#define ESCAPE_HEX 0x20
-int string_escape_mem(const char *src, size_t isz, char **dst, size_t osz,
+int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
unsigned int flags, const char *esc);
static inline int string_escape_mem_any_np(const char *src, size_t isz,
- char **dst, size_t osz, const char *esc)
+ char *dst, size_t osz, const char *esc)
{
return string_escape_mem(src, isz, dst, osz, ESCAPE_ANY_NP, esc);
}
-static inline int string_escape_str(const char *src, char **dst, size_t sz,
+static inline int string_escape_str(const char *src, char *dst, size_t sz,
unsigned int flags, const char *esc)
{
return string_escape_mem(src, strlen(src), dst, sz, flags, esc);
}
-static inline int string_escape_str_any_np(const char *src, char **dst,
+static inline int string_escape_str_any_np(const char *src, char *dst,
size_t sz, const char *esc)
{
return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, esc);
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
index 2ca67b5..8df43c9f 100644
--- a/include/linux/sunrpc/bc_xprt.h
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -37,7 +37,6 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied);
void xprt_free_bc_request(struct rpc_rqst *req);
int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs);
-int bc_send(struct rpc_rqst *req);
/*
* Determine if a shared backchannel is in use
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index d86acc6..131032f 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -56,8 +56,9 @@ struct rpc_clnt {
struct rpc_rtt * cl_rtt; /* RTO estimator data */
const struct rpc_timeout *cl_timeout; /* Timeout strategy */
+ atomic_t cl_swapper; /* swapfile count */
int cl_nodelen; /* nodename length */
- char cl_nodename[UNX_MAXNODENAME];
+ char cl_nodename[UNX_MAXNODENAME+1];
struct rpc_pipe_dir_head cl_pipedir_objects;
struct rpc_clnt * cl_parent; /* Points to parent of clones */
struct rpc_rtt cl_rtt_default;
@@ -112,6 +113,7 @@ struct rpc_create_args {
struct sockaddr *saddress;
const struct rpc_timeout *timeout;
const char *servername;
+ const char *nodename;
const struct rpc_program *program;
u32 prognumber; /* overrides program->number */
u32 version;
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
index c57d8ea..59a7889 100644
--- a/include/linux/sunrpc/debug.h
+++ b/include/linux/sunrpc/debug.h
@@ -60,17 +60,17 @@ struct rpc_xprt;
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
void rpc_register_sysctl(void);
void rpc_unregister_sysctl(void);
-int sunrpc_debugfs_init(void);
+void sunrpc_debugfs_init(void);
void sunrpc_debugfs_exit(void);
-int rpc_clnt_debugfs_register(struct rpc_clnt *);
+void rpc_clnt_debugfs_register(struct rpc_clnt *);
void rpc_clnt_debugfs_unregister(struct rpc_clnt *);
-int rpc_xprt_debugfs_register(struct rpc_xprt *);
+void rpc_xprt_debugfs_register(struct rpc_xprt *);
void rpc_xprt_debugfs_unregister(struct rpc_xprt *);
#else
-static inline int
+static inline void
sunrpc_debugfs_init(void)
{
- return 0;
+ return;
}
static inline void
@@ -79,10 +79,10 @@ sunrpc_debugfs_exit(void)
return;
}
-static inline int
+static inline void
rpc_clnt_debugfs_register(struct rpc_clnt *clnt)
{
- return 0;
+ return;
}
static inline void
@@ -91,10 +91,10 @@ rpc_clnt_debugfs_unregister(struct rpc_clnt *clnt)
return;
}
-static inline int
+static inline void
rpc_xprt_debugfs_register(struct rpc_xprt *xprt)
{
- return 0;
+ return;
}
static inline void
diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h
index eecb5a7..694eecb 100644
--- a/include/linux/sunrpc/metrics.h
+++ b/include/linux/sunrpc/metrics.h
@@ -79,6 +79,8 @@ struct rpc_clnt;
struct rpc_iostats * rpc_alloc_iostats(struct rpc_clnt *);
void rpc_count_iostats(const struct rpc_task *,
struct rpc_iostats *);
+void rpc_count_iostats_metrics(const struct rpc_task *,
+ struct rpc_iostats *);
void rpc_print_iostats(struct seq_file *, struct rpc_clnt *);
void rpc_free_iostats(struct rpc_iostats *);
@@ -87,6 +89,11 @@ void rpc_free_iostats(struct rpc_iostats *);
static inline struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) { return NULL; }
static inline void rpc_count_iostats(const struct rpc_task *task,
struct rpc_iostats *stats) {}
+static inline void rpc_count_iostats_metrics(const struct rpc_task *task,
+ struct rpc_iostats *stats)
+{
+}
+
static inline void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) {}
static inline void rpc_free_iostats(struct rpc_iostats *stats) {}
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h
index aadc6a0..8073713 100644
--- a/include/linux/sunrpc/msg_prot.h
+++ b/include/linux/sunrpc/msg_prot.h
@@ -142,12 +142,18 @@ typedef __be32 rpc_fraghdr;
(RPC_REPHDRSIZE + (2 + RPC_MAX_AUTH_SIZE/4))
/*
- * RFC1833/RFC3530 rpcbind (v3+) well-known netid's.
+ * Well-known netids. See:
+ *
+ * http://www.iana.org/assignments/rpc-netids/rpc-netids.xhtml
*/
#define RPCBIND_NETID_UDP "udp"
#define RPCBIND_NETID_TCP "tcp"
+#define RPCBIND_NETID_RDMA "rdma"
+#define RPCBIND_NETID_SCTP "sctp"
#define RPCBIND_NETID_UDP6 "udp6"
#define RPCBIND_NETID_TCP6 "tcp6"
+#define RPCBIND_NETID_RDMA6 "rdma6"
+#define RPCBIND_NETID_SCTP6 "sctp6"
#define RPCBIND_NETID_LOCAL "local"
/*
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h
index b78f16b..f33c5a4 100644
--- a/include/linux/sunrpc/rpc_rdma.h
+++ b/include/linux/sunrpc/rpc_rdma.h
@@ -42,6 +42,9 @@
#include <linux/types.h>
+#define RPCRDMA_VERSION 1
+#define rpcrdma_version cpu_to_be32(RPCRDMA_VERSION)
+
struct rpcrdma_segment {
__be32 rs_handle; /* Registered memory handle */
__be32 rs_length; /* Length of the chunk in bytes */
@@ -95,7 +98,10 @@ struct rpcrdma_msg {
} rm_body;
};
-#define RPCRDMA_HDRLEN_MIN 28
+/*
+ * Smallest RPC/RDMA header: rm_xid through rm_type, then rm_nochunks
+ */
+#define RPCRDMA_HDRLEN_MIN (sizeof(__be32) * 7)
enum rpcrdma_errcode {
ERR_VERS = 1,
@@ -115,4 +121,10 @@ enum rpcrdma_proc {
RDMA_ERROR = 4 /* An RPC RDMA encoding error */
};
+#define rdma_msg cpu_to_be32(RDMA_MSG)
+#define rdma_nomsg cpu_to_be32(RDMA_NOMSG)
+#define rdma_msgp cpu_to_be32(RDMA_MSGP)
+#define rdma_done cpu_to_be32(RDMA_DONE)
+#define rdma_error cpu_to_be32(RDMA_ERROR)
+
#endif /* _LINUX_SUNRPC_RPC_RDMA_H */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 5f1e6bd..d703f0e 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -205,8 +205,7 @@ struct rpc_wait_queue {
*/
struct rpc_task *rpc_new_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
-struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
- const struct rpc_call_ops *ops);
+struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req);
void rpc_put_task(struct rpc_task *);
void rpc_put_task_async(struct rpc_task *);
void rpc_exit_task(struct rpc_task *);
@@ -269,4 +268,20 @@ static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q,
}
#endif
+#if IS_ENABLED(CONFIG_SUNRPC_SWAP)
+int rpc_clnt_swap_activate(struct rpc_clnt *clnt);
+void rpc_clnt_swap_deactivate(struct rpc_clnt *clnt);
+#else
+static inline int
+rpc_clnt_swap_activate(struct rpc_clnt *clnt)
+{
+ return -EINVAL;
+}
+
+static inline void
+rpc_clnt_swap_deactivate(struct rpc_clnt *clnt)
+{
+}
+#endif /* CONFIG_SUNRPC_SWAP */
+
#endif /* _LINUX_SUNRPC_SCHED_H_ */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 6f22cfe..fae6fb9 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -110,7 +110,7 @@ struct svc_serv {
* We use sv_nrthreads as a reference count. svc_destroy() drops
* this refcount, so we need to bump it up around operations that
* change the number of threads. Horrible, but there it is.
- * Should be called with the BKL held.
+ * Should be called with the "service mutex" held.
*/
static inline void svc_get(struct svc_serv *serv)
{
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 975da75..cb94ee4 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -63,8 +63,6 @@ extern atomic_t rdma_stat_rq_prod;
extern atomic_t rdma_stat_sq_poll;
extern atomic_t rdma_stat_sq_prod;
-#define RPCRDMA_VERSION 1
-
/*
* Contexts are built when an RDMA request is created and are a
* record of the resources that can be recovered when the request
@@ -79,6 +77,7 @@ struct svc_rdma_op_ctxt {
enum ib_wr_opcode wr_op;
enum ib_wc_status wc_status;
u32 byte_len;
+ u32 position;
struct svcxprt_rdma *xprt;
unsigned long flags;
enum dma_data_direction direction;
@@ -150,6 +149,10 @@ struct svcxprt_rdma {
struct ib_cq *sc_rq_cq;
struct ib_cq *sc_sq_cq;
struct ib_mr *sc_phys_mr; /* MR for server memory */
+ int (*sc_reader)(struct svcxprt_rdma *,
+ struct svc_rqst *,
+ struct svc_rdma_op_ctxt *,
+ int *, u32 *, u32, u32, u64, bool);
u32 sc_dev_caps; /* distilled device caps */
u32 sc_dma_lkey; /* local dma key */
unsigned int sc_frmr_pg_list_len;
@@ -169,6 +172,13 @@ struct svcxprt_rdma {
#define RDMAXPRT_SQ_PENDING 2
#define RDMAXPRT_CONN_PENDING 3
+#define RPCRDMA_MAX_SVC_SEGS (64) /* server max scatter/gather */
+#if RPCSVC_MAXPAYLOAD < (RPCRDMA_MAX_SVC_SEGS << PAGE_SHIFT)
+#define RPCRDMA_MAXPAYLOAD RPCSVC_MAXPAYLOAD
+#else
+#define RPCRDMA_MAXPAYLOAD (RPCRDMA_MAX_SVC_SEGS << PAGE_SHIFT)
+#endif
+
#define RPCRDMA_LISTEN_BACKLOG 10
/* The default ORD value is based on two outstanding full-size writes with a
* page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */
@@ -178,13 +188,10 @@ struct svcxprt_rdma {
#define RPCRDMA_MAX_REQ_SIZE 4096
/* svc_rdma_marshal.c */
-extern void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *,
- int *, int *);
extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *);
-extern int svc_rdma_xdr_decode_deferred_req(struct svc_rqst *);
extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
struct rpcrdma_msg *,
- enum rpcrdma_errcode, u32 *);
+ enum rpcrdma_errcode, __be32 *);
extern void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *, int);
extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int);
extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int,
@@ -197,6 +204,12 @@ extern int svc_rdma_xdr_get_reply_hdr_len(struct rpcrdma_msg *);
/* svc_rdma_recvfrom.c */
extern int svc_rdma_recvfrom(struct svc_rqst *);
+extern int rdma_read_chunk_lcl(struct svcxprt_rdma *, struct svc_rqst *,
+ struct svc_rdma_op_ctxt *, int *, u32 *,
+ u32, u32, u64, bool);
+extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
+ struct svc_rdma_op_ctxt *, int *, u32 *,
+ u32, u32, u64, bool);
/* svc_rdma_sendto.c */
extern int svc_rdma_sendto(struct svc_rqst *);
@@ -205,7 +218,6 @@ extern int svc_rdma_sendto(struct svc_rqst *);
extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
enum rpcrdma_errcode);
-struct page *svc_rdma_get_page(void);
extern int svc_rdma_post_recv(struct svcxprt_rdma *);
extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 9d27ac4..0fb9acb 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -133,6 +133,9 @@ struct rpc_xprt_ops {
void (*close)(struct rpc_xprt *xprt);
void (*destroy)(struct rpc_xprt *xprt);
void (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq);
+ int (*enable_swap)(struct rpc_xprt *xprt);
+ void (*disable_swap)(struct rpc_xprt *xprt);
+ void (*inject_disconnect)(struct rpc_xprt *xprt);
};
/*
@@ -180,7 +183,7 @@ struct rpc_xprt {
atomic_t num_reqs; /* total slots */
unsigned long state; /* transport state */
unsigned char resvport : 1; /* use a reserved port */
- unsigned int swapper; /* we're swapping over this
+ atomic_t swapper; /* we're swapping over this
transport */
unsigned int bind_index; /* bind function index */
@@ -212,7 +215,8 @@ struct rpc_xprt {
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
struct svc_serv *bc_serv; /* The RPC service which will */
/* process the callback */
- unsigned int bc_alloc_count; /* Total number of preallocs */
+ int bc_alloc_count; /* Total number of preallocs */
+ atomic_t bc_free_slots;
spinlock_t bc_pa_lock; /* Protects the preallocated
* items */
struct list_head bc_pa_list; /* List of preallocated
@@ -241,6 +245,7 @@ struct rpc_xprt {
const char *address_strings[RPC_DISPLAY_MAX];
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct dentry *debugfs; /* debugfs directory */
+ atomic_t inject_disconnect;
#endif
};
@@ -327,6 +332,18 @@ static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *
return p + xprt->tsh_size;
}
+static inline int
+xprt_enable_swap(struct rpc_xprt *xprt)
+{
+ return xprt->ops->enable_swap(xprt);
+}
+
+static inline void
+xprt_disable_swap(struct rpc_xprt *xprt)
+{
+ xprt->ops->disable_swap(xprt);
+}
+
/*
* Transport switch helper functions
*/
@@ -345,7 +362,9 @@ void xprt_release_rqst_cong(struct rpc_task *task);
void xprt_disconnect_done(struct rpc_xprt *xprt);
void xprt_force_disconnect(struct rpc_xprt *xprt);
void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
-int xs_swapper(struct rpc_xprt *xprt, int enable);
+
+bool xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *);
+void xprt_unlock_connect(struct rpc_xprt *, void *);
/*
* Reserved bit positions in xprt->state
@@ -357,10 +376,7 @@ int xs_swapper(struct rpc_xprt *xprt, int enable);
#define XPRT_BOUND (4)
#define XPRT_BINDING (5)
#define XPRT_CLOSING (6)
-#define XPRT_CONNECTION_ABORT (7)
-#define XPRT_CONNECTION_CLOSE (8)
#define XPRT_CONGESTED (9)
-#define XPRT_CONNECTION_REUSE (10)
static inline void xprt_set_connected(struct rpc_xprt *xprt)
{
@@ -431,6 +447,23 @@ static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt)
return test_and_set_bit(XPRT_BINDING, &xprt->state);
}
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+extern unsigned int rpc_inject_disconnect;
+static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
+{
+ if (!rpc_inject_disconnect)
+ return;
+ if (atomic_dec_return(&xprt->inject_disconnect))
+ return;
+ atomic_set(&xprt->inject_disconnect, rpc_inject_disconnect);
+ xprt->ops->inject_disconnect(xprt);
+}
+#else
+static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
+{
+}
+#endif
+
#endif /* __KERNEL__*/
#endif /* _LINUX_SUNRPC_XPRT_H */
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index 64a0a0a..b176130 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -41,11 +41,6 @@
#define _LINUX_SUNRPC_XPRTRDMA_H
/*
- * rpcbind (v3+) RDMA netid.
- */
-#define RPCBIND_NETID_RDMA "rdma"
-
-/*
* Constants. Max RPC/NFS header is big enough to account for
* additional marshaling buffers passed down by Linux client.
*
@@ -61,7 +56,8 @@
#define RPCRDMA_INLINE_PAD_THRESH (512)/* payload threshold to pad (bytes) */
-/* memory registration strategies */
+/* Memory registration strategies, by number.
+ * This is part of a kernel / user space API. Do not remove. */
enum rpcrdma_memreg {
RPCRDMA_BOUNCEBUFFERS = 0,
RPCRDMA_REGISTER,
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 3388c1b..5efe743 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -201,6 +201,21 @@ struct platform_freeze_ops {
*/
extern void suspend_set_ops(const struct platform_suspend_ops *ops);
extern int suspend_valid_only_mem(suspend_state_t state);
+
+/* Suspend-to-idle state machnine. */
+enum freeze_state {
+ FREEZE_STATE_NONE, /* Not suspended/suspending. */
+ FREEZE_STATE_ENTER, /* Enter suspend-to-idle. */
+ FREEZE_STATE_WAKE, /* Wake up from suspend-to-idle. */
+};
+
+extern enum freeze_state __read_mostly suspend_freeze_state;
+
+static inline bool idle_should_freeze(void)
+{
+ return unlikely(suspend_freeze_state == FREEZE_STATE_ENTER);
+}
+
extern void freeze_set_ops(const struct platform_freeze_ops *ops);
extern void freeze_wake(void);
@@ -228,6 +243,7 @@ extern int pm_suspend(suspend_state_t state);
static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
+static inline bool idle_should_freeze(void) { return false; }
static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {}
static inline void freeze_wake(void) {}
#endif /* !CONFIG_SUSPEND */
diff --git a/include/linux/sw842.h b/include/linux/sw842.h
new file mode 100644
index 0000000..109ba04
--- /dev/null
+++ b/include/linux/sw842.h
@@ -0,0 +1,12 @@
+#ifndef __SW842_H__
+#define __SW842_H__
+
+#define SW842_MEM_COMPRESS (0xf000)
+
+int sw842_compress(const u8 *src, unsigned int srclen,
+ u8 *dst, unsigned int *destlen, void *wmem);
+
+int sw842_decompress(const u8 *src, unsigned int srclen,
+ u8 *dst, unsigned int *destlen);
+
+#endif
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 34e8b60..3887472 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -307,7 +307,7 @@ extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_all(void);
extern void rotate_reclaimable_page(struct page *page);
-extern void deactivate_page(struct page *page);
+extern void deactivate_file_page(struct page *page);
extern void swap_setup(void);
extern void add_page_to_unevictable_list(struct page *page);
@@ -377,7 +377,6 @@ extern void end_swap_bio_write(struct bio *bio, int err);
extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
void (*end_write_func)(struct bio *, int));
extern int swap_set_page_dirty(struct page *page);
-extern void end_swap_bio_read(struct bio *bio, int err);
int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
unsigned long nr_pages, sector_t start_block);
@@ -437,16 +436,6 @@ extern int reuse_swap_page(struct page *);
extern int try_to_free_swap(struct page *);
struct backing_dev_info;
-#ifdef CONFIG_MEMCG
-extern void
-mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
-#else
-static inline void
-mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
-{
-}
-#endif
-
#else /* CONFIG_SWAP */
#define swap_address_space(entry) (NULL)
@@ -547,11 +536,6 @@ static inline swp_entry_t get_swap_page(void)
return entry;
}
-static inline void
-mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
-{
-}
-
#endif /* CONFIG_SWAP */
#endif /* __KERNEL__*/
#endif /* _LINUX_SWAP_H */
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 6adfb7b..cedf3d3 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -54,7 +54,7 @@ static inline pgoff_t swp_offset(swp_entry_t entry)
/* check whether a pte points to a swap entry */
static inline int is_swap_pte(pte_t pte)
{
- return !pte_none(pte) && !pte_present_nonuma(pte) && !pte_file(pte);
+ return !pte_none(pte) && !pte_present(pte);
}
#endif
@@ -66,7 +66,6 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
{
swp_entry_t arch_entry;
- BUG_ON(pte_file(pte));
if (pte_swp_soft_dirty(pte))
pte = pte_swp_clear_soft_dirty(pte);
arch_entry = __pte_to_swp_entry(pte);
@@ -82,7 +81,6 @@ static inline pte_t swp_entry_to_pte(swp_entry_t entry)
swp_entry_t arch_entry;
arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
- BUG_ON(pte_file(__swp_entry_to_pte(arch_entry)));
return __swp_entry_to_pte(arch_entry);
}
@@ -137,6 +135,8 @@ static inline void make_migration_entry_read(swp_entry_t *entry)
*entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
}
+extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
+ spinlock_t *ptl);
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address);
extern void migration_entry_wait_huge(struct vm_area_struct *vma,
@@ -150,6 +150,8 @@ static inline int is_migration_entry(swp_entry_t swp)
}
#define migration_entry_to_page(swp) NULL
static inline void make_migration_entry_read(swp_entry_t *entryp) { }
+static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
+ spinlock_t *ptl) { }
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address) { }
static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 85893d7..b45c45b 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -111,14 +111,14 @@ union bpf_attr;
#define __SC_STR_ADECL(t, a) #a
#define __SC_STR_TDECL(t, a) #t
-extern struct ftrace_event_class event_class_syscall_enter;
-extern struct ftrace_event_class event_class_syscall_exit;
+extern struct trace_event_class event_class_syscall_enter;
+extern struct trace_event_class event_class_syscall_exit;
extern struct trace_event_functions enter_syscall_print_funcs;
extern struct trace_event_functions exit_syscall_print_funcs;
#define SYSCALL_TRACE_ENTER_EVENT(sname) \
static struct syscall_metadata __syscall_meta_##sname; \
- static struct ftrace_event_call __used \
+ static struct trace_event_call __used \
event_enter_##sname = { \
.class = &event_class_syscall_enter, \
{ \
@@ -128,13 +128,13 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.data = (void *)&__syscall_meta_##sname,\
.flags = TRACE_EVENT_FL_CAP_ANY, \
}; \
- static struct ftrace_event_call __used \
+ static struct trace_event_call __used \
__attribute__((section("_ftrace_events"))) \
*__event_enter_##sname = &event_enter_##sname;
#define SYSCALL_TRACE_EXIT_EVENT(sname) \
static struct syscall_metadata __syscall_meta_##sname; \
- static struct ftrace_event_call __used \
+ static struct trace_event_call __used \
event_exit_##sname = { \
.class = &event_class_syscall_exit, \
{ \
@@ -144,7 +144,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.data = (void *)&__syscall_meta_##sname,\
.flags = TRACE_EVENT_FL_CAP_ANY, \
}; \
- static struct ftrace_event_call __used \
+ static struct trace_event_call __used \
__attribute__((section("_ftrace_events"))) \
*__event_exit_##sname = &event_exit_##sname;
@@ -410,12 +410,16 @@ asmlinkage long sys_newlstat(const char __user *filename,
struct stat __user *statbuf);
asmlinkage long sys_newfstat(unsigned int fd, struct stat __user *statbuf);
asmlinkage long sys_ustat(unsigned dev, struct ustat __user *ubuf);
-#if BITS_PER_LONG == 32
+#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
asmlinkage long sys_stat64(const char __user *filename,
struct stat64 __user *statbuf);
asmlinkage long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf);
asmlinkage long sys_lstat64(const char __user *filename,
struct stat64 __user *statbuf);
+asmlinkage long sys_fstatat64(int dfd, const char __user *filename,
+ struct stat64 __user *statbuf, int flag);
+#endif
+#if BITS_PER_LONG == 32
asmlinkage long sys_truncate64(const char __user *path, loff_t length);
asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length);
#endif
@@ -771,8 +775,6 @@ asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
umode_t mode);
asmlinkage long sys_newfstatat(int dfd, const char __user *filename,
struct stat __user *statbuf, int flag);
-asmlinkage long sys_fstatat64(int dfd, const char __user *filename,
- struct stat64 __user *statbuf, int flag);
asmlinkage long sys_readlinkat(int dfd, const char __user *path, char __user *buf,
int bufsiz);
asmlinkage long sys_utimensat(int dfd, const char __user *filename,
@@ -825,15 +827,15 @@ asmlinkage long sys_syncfs(int fd);
asmlinkage long sys_fork(void);
asmlinkage long sys_vfork(void);
#ifdef CONFIG_CLONE_BACKWARDS
-asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int,
+asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, unsigned long,
int __user *);
#else
#ifdef CONFIG_CLONE_BACKWARDS3
asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *,
- int __user *, int);
+ int __user *, unsigned long);
#else
asmlinkage long sys_clone(unsigned long, unsigned long, int __user *,
- int __user *, int);
+ int __user *, unsigned long);
#endif
#endif
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index b7361f8..fa7bc29 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -188,6 +188,9 @@ struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path,
void unregister_sysctl_table(struct ctl_table_header * table);
extern int sysctl_init(void);
+
+extern struct ctl_table sysctl_mount_point[];
+
#else /* CONFIG_SYSCTL */
static inline struct ctl_table_header *register_sysctl_table(struct ctl_table * table)
{
@@ -212,4 +215,7 @@ static inline void setup_sysctl_set(struct ctl_table_set *p,
#endif /* CONFIG_SYSCTL */
+int sysctl_max_threads(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+
#endif /* _LINUX_SYSCTL_H */
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index ddad161..9f65758 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -57,6 +57,21 @@ do { \
#define sysfs_attr_init(attr) do {} while (0)
#endif
+/**
+ * struct attribute_group - data structure used to declare an attribute group.
+ * @name: Optional: Attribute group name
+ * If specified, the attribute group will be created in
+ * a new subdirectory with this name.
+ * @is_visible: Optional: Function to return permissions associated with an
+ * attribute of the group. Will be called repeatedly for each
+ * attribute in the group. Only read/write permissions as well as
+ * SYSFS_PREALLOC are accepted. Must return 0 if an attribute is
+ * not visible. The returned value will replace static permissions
+ * defined in struct attribute or struct bin_attribute.
+ * @attrs: Pointer to NULL terminated list of attributes.
+ * @bin_attrs: Pointer to NULL terminated list of binary attributes.
+ * Either attrs or bin_attrs or both must be provided.
+ */
struct attribute_group {
const char *name;
umode_t (*is_visible)(struct kobject *,
@@ -195,6 +210,10 @@ int __must_check sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name,
int __must_check sysfs_move_dir_ns(struct kobject *kobj,
struct kobject *new_parent_kobj,
const void *new_ns);
+int __must_check sysfs_create_mount_point(struct kobject *parent_kobj,
+ const char *name);
+void sysfs_remove_mount_point(struct kobject *parent_kobj,
+ const char *name);
int __must_check sysfs_create_file_ns(struct kobject *kobj,
const struct attribute *attr,
@@ -283,6 +302,17 @@ static inline int sysfs_move_dir_ns(struct kobject *kobj,
return 0;
}
+static inline int sysfs_create_mount_point(struct kobject *parent_kobj,
+ const char *name)
+{
+ return 0;
+}
+
+static inline void sysfs_remove_mount_point(struct kobject *parent_kobj,
+ const char *name)
+{
+}
+
static inline int sysfs_create_file_ns(struct kobject *kobj,
const struct attribute *attr,
const void *ns)
diff --git a/include/linux/syslog.h b/include/linux/syslog.h
index 4b7b875..c3a7f0c 100644
--- a/include/linux/syslog.h
+++ b/include/linux/syslog.h
@@ -47,12 +47,12 @@
#define SYSLOG_FROM_READER 0
#define SYSLOG_FROM_PROC 1
-int do_syslog(int type, char __user *buf, int count, bool from_file);
+int do_syslog(int type, char __user *buf, int count, int source);
#ifdef CONFIG_PRINTK
-int check_syslog_permissions(int type, bool from_file);
+int check_syslog_permissions(int type, int source);
#else
-static inline int check_syslog_permissions(int type, bool from_file)
+static inline int check_syslog_permissions(int type, int source)
{
return 0;
}
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 67309ec..48c3696 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -58,6 +58,7 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
struct tcp_fastopen_cookie {
s8 len;
u8 val[TCP_FASTOPEN_COOKIE_MAX];
+ bool exp; /* In RFC6994 experimental option format */
};
/* This defines a selective acknowledgement block. */
@@ -111,10 +112,11 @@ struct tcp_request_sock_ops;
struct tcp_request_sock {
struct inet_request_sock req;
const struct tcp_request_sock_ops *af_specific;
- struct sock *listener; /* needed for TFO */
+ bool tfo_listener;
u32 rcv_isn;
u32 snt_isn;
u32 snt_synack; /* synack sent time */
+ u32 last_oow_ack_time; /* last SYNACK */
u32 rcv_nxt; /* the ack # by SYNACK. For
* FastOpen it's the seq#
* after data-in-SYN.
@@ -143,15 +145,31 @@ struct tcp_sock {
* read the code and the spec side by side (and laugh ...)
* See RFC793 and RFC1122. The RFC writes these in capitals.
*/
+ u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived
+ * sum(delta(rcv_nxt)), or how many bytes
+ * were acked.
+ */
+ u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn
+ * total number of segments in.
+ */
u32 rcv_nxt; /* What we want to receive next */
u32 copied_seq; /* Head of yet unread data */
u32 rcv_wup; /* rcv_nxt on last window update sent */
u32 snd_nxt; /* Next sequence we send */
+ u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
+ * The total number of segments sent.
+ */
+ u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
+ * sum(delta(snd_una)), or how many bytes
+ * were acked.
+ */
+ struct u64_stats_sync syncp; /* protects 64bit vars (cf tcp_get_info()) */
u32 snd_una; /* First byte we want an ack for */
u32 snd_sml; /* Last byte of the most recently transmitted small packet */
u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
+ u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */
u32 tsoffset; /* timestamp offset */
@@ -186,7 +204,9 @@ struct tcp_sock {
u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */
syn_data:1, /* SYN includes data */
syn_fastopen:1, /* SYN includes Fast Open option */
+ syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
+ save_syn:1, /* Save headers of SYN packet */
is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
@@ -234,7 +254,6 @@ struct tcp_sock {
u32 lost_out; /* Lost packets */
u32 sacked_out; /* SACK'd packets */
u32 fackets_out; /* FACK'd packets */
- u32 tso_deferred;
/* from STCP, retrans queue hinting */
struct sk_buff* lost_skb_hint;
@@ -315,6 +334,7 @@ struct tcp_sock {
* socket. Used to retransmit SYNACKs etc.
*/
struct request_sock *fastopen_rsk;
+ u32 *saved_syn;
};
enum tsq_flags {
@@ -340,6 +360,10 @@ struct tcp_timewait_sock {
u32 tw_rcv_wnd;
u32 tw_ts_offset;
u32 tw_ts_recent;
+
+ /* The time we sent the last out-of-window ACK: */
+ u32 tw_last_oow_ack_time;
+
long tw_ts_recent_stamp;
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *tw_md5_key;
@@ -378,4 +402,10 @@ static inline int fastopen_init_queue(struct sock *sk, int backlog)
return 0;
}
+static inline void tcp_saved_syn_free(struct tcp_sock *tp)
+{
+ kfree(tp->saved_syn);
+ tp->saved_syn = NULL;
+}
+
#endif /* _LINUX_TCP_H */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index fc52e30..037e9df 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -40,6 +40,9 @@
/* No upper/lower limit requirement */
#define THERMAL_NO_LIMIT ((u32)~0)
+/* Default weight of a bound cooling device */
+#define THERMAL_WEIGHT_DEFAULT 0
+
/* Unit conversion macros */
#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \
((long)t-2732+5)/10 : ((long)t-2732-5)/10)
@@ -56,10 +59,13 @@
#define DEFAULT_THERMAL_GOVERNOR "fair_share"
#elif defined(CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE)
#define DEFAULT_THERMAL_GOVERNOR "user_space"
+#elif defined(CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR)
+#define DEFAULT_THERMAL_GOVERNOR "power_allocator"
#endif
struct thermal_zone_device;
struct thermal_cooling_device;
+struct thermal_instance;
enum thermal_device_mode {
THERMAL_DEVICE_DISABLED = 0,
@@ -113,6 +119,12 @@ struct thermal_cooling_device_ops {
int (*get_max_state) (struct thermal_cooling_device *, unsigned long *);
int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *);
int (*set_cur_state) (struct thermal_cooling_device *, unsigned long);
+ int (*get_requested_power)(struct thermal_cooling_device *,
+ struct thermal_zone_device *, u32 *);
+ int (*state2power)(struct thermal_cooling_device *,
+ struct thermal_zone_device *, unsigned long, u32 *);
+ int (*power2state)(struct thermal_cooling_device *,
+ struct thermal_zone_device *, u32, unsigned long *);
};
struct thermal_cooling_device {
@@ -144,8 +156,7 @@ struct thermal_attr {
* @devdata: private pointer for device private data
* @trips: number of trip points the thermal zone supports
* @passive_delay: number of milliseconds to wait between polls when
- * performing passive cooling. Currenty only used by the
- * step-wise governor
+ * performing passive cooling.
* @polling_delay: number of milliseconds to wait between polls when
* checking whether trip points have been crossed (0 for
* interrupt driven systems)
@@ -155,13 +166,13 @@ struct thermal_attr {
* @last_temperature: previous temperature read
* @emul_temperature: emulated temperature when using CONFIG_THERMAL_EMULATION
* @passive: 1 if you've crossed a passive trip point, 0 otherwise.
- * Currenty only used by the step-wise governor.
* @forced_passive: If > 0, temperature at which to switch on all ACPI
* processor cooling devices. Currently only used by the
* step-wise governor.
* @ops: operations this &thermal_zone_device supports
* @tzp: thermal zone parameters
* @governor: pointer to the governor for this thermal zone
+ * @governor_data: private pointer for governor data
* @thermal_instances: list of &struct thermal_instance of this thermal zone
* @idr: &struct idr to generate unique id for this zone's cooling
* devices
@@ -186,8 +197,9 @@ struct thermal_zone_device {
int passive;
unsigned int forced_passive;
struct thermal_zone_device_ops *ops;
- const struct thermal_zone_params *tzp;
+ struct thermal_zone_params *tzp;
struct thermal_governor *governor;
+ void *governor_data;
struct list_head thermal_instances;
struct idr idr;
struct mutex lock;
@@ -198,12 +210,19 @@ struct thermal_zone_device {
/**
* struct thermal_governor - structure that holds thermal governor information
* @name: name of the governor
+ * @bind_to_tz: callback called when binding to a thermal zone. If it
+ * returns 0, the governor is bound to the thermal zone,
+ * otherwise it fails.
+ * @unbind_from_tz: callback called when a governor is unbound from a
+ * thermal zone.
* @throttle: callback called for every trip point even if temperature is
* below the trip point temperature
* @governor_list: node in thermal_governor_list (in thermal_core.c)
*/
struct thermal_governor {
char name[THERMAL_NAME_LENGTH];
+ int (*bind_to_tz)(struct thermal_zone_device *tz);
+ void (*unbind_from_tz)(struct thermal_zone_device *tz);
int (*throttle)(struct thermal_zone_device *tz, int trip);
struct list_head governor_list;
};
@@ -214,9 +233,12 @@ struct thermal_bind_params {
/*
* This is a measure of 'how effectively these devices can
- * cool 'this' thermal zone. The shall be determined by platform
- * characterization. This is on a 'percentage' scale.
- * See Documentation/thermal/sysfs-api.txt for more information.
+ * cool 'this' thermal zone. It shall be determined by
+ * platform characterization. This value is relative to the
+ * rest of the weights so a cooling device whose weight is
+ * double that of another cooling device is twice as
+ * effective. See Documentation/thermal/sysfs-api.txt for more
+ * information.
*/
int weight;
@@ -253,6 +275,44 @@ struct thermal_zone_params {
int num_tbps; /* Number of tbp entries */
struct thermal_bind_params *tbp;
+
+ /*
+ * Sustainable power (heat) that this thermal zone can dissipate in
+ * mW
+ */
+ u32 sustainable_power;
+
+ /*
+ * Proportional parameter of the PID controller when
+ * overshooting (i.e., when temperature is below the target)
+ */
+ s32 k_po;
+
+ /*
+ * Proportional parameter of the PID controller when
+ * undershooting
+ */
+ s32 k_pu;
+
+ /* Integral parameter of the PID controller */
+ s32 k_i;
+
+ /* Derivative parameter of the PID controller */
+ s32 k_d;
+
+ /* threshold below which the error is no longer accumulated */
+ s32 integral_cutoff;
+
+ /*
+ * @slope: slope of a linear temperature adjustment curve.
+ * Used by thermal zone drivers.
+ */
+ int slope;
+ /*
+ * @offset: offset of a linear temperature adjustment curve.
+ * Used by thermal zone drivers (default 0).
+ */
+ int offset;
};
struct thermal_genl_event {
@@ -314,14 +374,27 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
}
#endif
+
+#if IS_ENABLED(CONFIG_THERMAL)
+static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
+{
+ return cdev->ops->get_requested_power && cdev->ops->state2power &&
+ cdev->ops->power2state;
+}
+
+int power_actor_get_max_power(struct thermal_cooling_device *,
+ struct thermal_zone_device *tz, u32 *max_power);
+int power_actor_set_power(struct thermal_cooling_device *,
+ struct thermal_instance *, u32);
struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
void *, struct thermal_zone_device_ops *,
- const struct thermal_zone_params *, int, int);
+ struct thermal_zone_params *, int, int);
void thermal_zone_device_unregister(struct thermal_zone_device *);
int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
struct thermal_cooling_device *,
- unsigned long, unsigned long);
+ unsigned long, unsigned long,
+ unsigned int);
int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
struct thermal_cooling_device *);
void thermal_zone_device_update(struct thermal_zone_device *);
@@ -340,8 +413,66 @@ struct thermal_instance *get_thermal_instance(struct thermal_zone_device *,
struct thermal_cooling_device *, int);
void thermal_cdev_update(struct thermal_cooling_device *);
void thermal_notify_framework(struct thermal_zone_device *, int);
-
-#ifdef CONFIG_NET
+#else
+static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
+{ return false; }
+static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev,
+ struct thermal_zone_device *tz, u32 *max_power)
+{ return 0; }
+static inline int power_actor_set_power(struct thermal_cooling_device *cdev,
+ struct thermal_instance *tz, u32 power)
+{ return 0; }
+static inline struct thermal_zone_device *thermal_zone_device_register(
+ const char *type, int trips, int mask, void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp,
+ int passive_delay, int polling_delay)
+{ return ERR_PTR(-ENODEV); }
+static inline void thermal_zone_device_unregister(
+ struct thermal_zone_device *tz)
+{ }
+static inline int thermal_zone_bind_cooling_device(
+ struct thermal_zone_device *tz, int trip,
+ struct thermal_cooling_device *cdev,
+ unsigned long upper, unsigned long lower)
+{ return -ENODEV; }
+static inline int thermal_zone_unbind_cooling_device(
+ struct thermal_zone_device *tz, int trip,
+ struct thermal_cooling_device *cdev)
+{ return -ENODEV; }
+static inline void thermal_zone_device_update(struct thermal_zone_device *tz)
+{ }
+static inline struct thermal_cooling_device *
+thermal_cooling_device_register(char *type, void *devdata,
+ const struct thermal_cooling_device_ops *ops)
+{ return ERR_PTR(-ENODEV); }
+static inline struct thermal_cooling_device *
+thermal_of_cooling_device_register(struct device_node *np,
+ char *type, void *devdata, const struct thermal_cooling_device_ops *ops)
+{ return ERR_PTR(-ENODEV); }
+static inline void thermal_cooling_device_unregister(
+ struct thermal_cooling_device *cdev)
+{ }
+static inline struct thermal_zone_device *thermal_zone_get_zone_by_name(
+ const char *name)
+{ return ERR_PTR(-ENODEV); }
+static inline int thermal_zone_get_temp(
+ struct thermal_zone_device *tz, unsigned long *temp)
+{ return -ENODEV; }
+static inline int get_tz_trend(struct thermal_zone_device *tz, int trip)
+{ return -ENODEV; }
+static inline struct thermal_instance *
+get_thermal_instance(struct thermal_zone_device *tz,
+ struct thermal_cooling_device *cdev, int trip)
+{ return ERR_PTR(-ENODEV); }
+static inline void thermal_cdev_update(struct thermal_cooling_device *cdev)
+{ }
+static inline void thermal_notify_framework(struct thermal_zone_device *tz,
+ int trip)
+{ }
+#endif /* CONFIG_THERMAL */
+
+#if defined(CONFIG_NET) && IS_ENABLED(CONFIG_THERMAL)
extern int thermal_generate_netlink_event(struct thermal_zone_device *tz,
enum events event);
#else
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h
index 884d626..c78dcfe 100644
--- a/include/linux/ti_wilink_st.h
+++ b/include/linux/ti_wilink_st.h
@@ -86,6 +86,7 @@ struct st_proto_s {
extern long st_register(struct st_proto_s *);
extern long st_unregister(struct st_proto_s *);
+extern struct ti_st_plat_data *dt_pdata;
/*
* header information used by st_core.c
@@ -261,7 +262,7 @@ struct kim_data_s {
struct completion kim_rcvd, ldisc_installed;
char resp_buffer[30];
const struct firmware *fw_entry;
- long nshutdown;
+ unsigned nshutdown;
unsigned long rx_state;
unsigned long rx_count;
struct sk_buff *rx_skb;
@@ -269,8 +270,8 @@ struct kim_data_s {
struct chip_version version;
unsigned char ldisc_install;
unsigned char dev_name[UART_DEV_NAME_LEN + 1];
- unsigned char flow_cntrl;
- unsigned long baud_rate;
+ unsigned flow_cntrl;
+ unsigned baud_rate;
};
/**
@@ -436,10 +437,10 @@ struct gps_event_hdr {
*
*/
struct ti_st_plat_data {
- long nshutdown_gpio;
+ u32 nshutdown_gpio;
unsigned char dev_name[UART_DEV_NAME_LEN]; /* uart name */
- unsigned char flow_cntrl; /* flow control flag */
- unsigned long baud_rate;
+ u32 flow_cntrl; /* flow control flag */
+ u32 baud_rate;
int (*suspend)(struct platform_device *, pm_message_t);
int (*resume)(struct platform_device *);
int (*chip_enable) (struct kim_data_s *);
diff --git a/include/linux/tick.h b/include/linux/tick.h
index eda850c..edbfc9a 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -1,7 +1,5 @@
-/* linux/include/linux/tick.h
- *
- * This file contains the structure definitions for tick related functions
- *
+/*
+ * Tick related global functions
*/
#ifndef _LINUX_TICK_H
#define _LINUX_TICK_H
@@ -9,144 +7,106 @@
#include <linux/clockchips.h>
#include <linux/irqflags.h>
#include <linux/percpu.h>
-#include <linux/hrtimer.h>
#include <linux/context_tracking_state.h>
#include <linux/cpumask.h>
#include <linux/sched.h>
#ifdef CONFIG_GENERIC_CLOCKEVENTS
-
-enum tick_device_mode {
- TICKDEV_MODE_PERIODIC,
- TICKDEV_MODE_ONESHOT,
-};
-
-struct tick_device {
- struct clock_event_device *evtdev;
- enum tick_device_mode mode;
-};
-
-enum tick_nohz_mode {
- NOHZ_MODE_INACTIVE,
- NOHZ_MODE_LOWRES,
- NOHZ_MODE_HIGHRES,
-};
-
-/**
- * struct tick_sched - sched tick emulation and no idle tick control/stats
- * @sched_timer: hrtimer to schedule the periodic tick in high
- * resolution mode
- * @last_tick: Store the last tick expiry time when the tick
- * timer is modified for nohz sleeps. This is necessary
- * to resume the tick timer operation in the timeline
- * when the CPU returns from nohz sleep.
- * @tick_stopped: Indicator that the idle tick has been stopped
- * @idle_jiffies: jiffies at the entry to idle for idle time accounting
- * @idle_calls: Total number of idle calls
- * @idle_sleeps: Number of idle calls, where the sched tick was stopped
- * @idle_entrytime: Time when the idle call was entered
- * @idle_waketime: Time when the idle was interrupted
- * @idle_exittime: Time when the idle state was left
- * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped
- * @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding
- * @sleep_length: Duration of the current idle sleep
- * @do_timer_lst: CPU was the last one doing do_timer before going idle
- */
-struct tick_sched {
- struct hrtimer sched_timer;
- unsigned long check_clocks;
- enum tick_nohz_mode nohz_mode;
- ktime_t last_tick;
- int inidle;
- int tick_stopped;
- unsigned long idle_jiffies;
- unsigned long idle_calls;
- unsigned long idle_sleeps;
- int idle_active;
- ktime_t idle_entrytime;
- ktime_t idle_waketime;
- ktime_t idle_exittime;
- ktime_t idle_sleeptime;
- ktime_t iowait_sleeptime;
- ktime_t sleep_length;
- unsigned long last_jiffies;
- unsigned long next_jiffies;
- ktime_t idle_expires;
- int do_timer_last;
-};
-
extern void __init tick_init(void);
-extern int tick_is_oneshot_available(void);
-extern struct tick_device *tick_get_device(int cpu);
-
-# ifdef CONFIG_HIGH_RES_TIMERS
-extern int tick_init_highres(void);
-extern int tick_program_event(ktime_t expires, int force);
-extern void tick_setup_sched_timer(void);
-# endif
-
-# if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
-extern void tick_cancel_sched_timer(int cpu);
-# else
-static inline void tick_cancel_sched_timer(int cpu) { }
-# endif
-
-# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
-extern struct tick_device *tick_get_broadcast_device(void);
-extern struct cpumask *tick_get_broadcast_mask(void);
-
-# ifdef CONFIG_TICK_ONESHOT
-extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
-# endif
+/* Should be core only, but ARM BL switcher requires it */
+extern void tick_suspend_local(void);
+/* Should be core only, but XEN resume magic and ARM BL switcher require it */
+extern void tick_resume_local(void);
+extern void tick_handover_do_timer(void);
+extern void tick_cleanup_dead_cpu(int cpu);
+#else /* CONFIG_GENERIC_CLOCKEVENTS */
+static inline void tick_init(void) { }
+static inline void tick_suspend_local(void) { }
+static inline void tick_resume_local(void) { }
+static inline void tick_handover_do_timer(void) { }
+static inline void tick_cleanup_dead_cpu(int cpu) { }
+#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
-# endif /* BROADCAST */
+#if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_SUSPEND)
+extern void tick_freeze(void);
+extern void tick_unfreeze(void);
+#else
+static inline void tick_freeze(void) { }
+static inline void tick_unfreeze(void) { }
+#endif
-# ifdef CONFIG_TICK_ONESHOT
-extern void tick_clock_notify(void);
-extern int tick_check_oneshot_change(int allow_nohz);
-extern struct tick_sched *tick_get_tick_sched(int cpu);
+#ifdef CONFIG_TICK_ONESHOT
extern void tick_irq_enter(void);
-extern int tick_oneshot_mode_active(void);
# ifndef arch_needs_cpu
# define arch_needs_cpu() (0)
# endif
# else
-static inline void tick_clock_notify(void) { }
-static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
static inline void tick_irq_enter(void) { }
-static inline int tick_oneshot_mode_active(void) { return 0; }
-# endif
+#endif
-#else /* CONFIG_GENERIC_CLOCKEVENTS */
-static inline void tick_init(void) { }
-static inline void tick_cancel_sched_timer(int cpu) { }
-static inline void tick_clock_notify(void) { }
-static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
-static inline void tick_irq_enter(void) { }
-static inline int tick_oneshot_mode_active(void) { return 0; }
-#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
+#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
+extern void hotplug_cpu__broadcast_tick_pull(int dead_cpu);
+#else
+static inline void hotplug_cpu__broadcast_tick_pull(int dead_cpu) { }
+#endif
+
+enum tick_broadcast_mode {
+ TICK_BROADCAST_OFF,
+ TICK_BROADCAST_ON,
+ TICK_BROADCAST_FORCE,
+};
+
+enum tick_broadcast_state {
+ TICK_BROADCAST_EXIT,
+ TICK_BROADCAST_ENTER,
+};
-# ifdef CONFIG_NO_HZ_COMMON
-DECLARE_PER_CPU(struct tick_sched, tick_cpu_sched);
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+extern void tick_broadcast_control(enum tick_broadcast_mode mode);
+#else
+static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { }
+#endif /* BROADCAST */
-static inline int tick_nohz_tick_stopped(void)
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state);
+#else
+static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
{
- return __this_cpu_read(tick_cpu_sched.tick_stopped);
+ return 0;
}
+#endif
+static inline void tick_broadcast_enable(void)
+{
+ tick_broadcast_control(TICK_BROADCAST_ON);
+}
+static inline void tick_broadcast_disable(void)
+{
+ tick_broadcast_control(TICK_BROADCAST_OFF);
+}
+static inline void tick_broadcast_force(void)
+{
+ tick_broadcast_control(TICK_BROADCAST_FORCE);
+}
+static inline int tick_broadcast_enter(void)
+{
+ return tick_broadcast_oneshot_control(TICK_BROADCAST_ENTER);
+}
+static inline void tick_broadcast_exit(void)
+{
+ tick_broadcast_oneshot_control(TICK_BROADCAST_EXIT);
+}
+
+#ifdef CONFIG_NO_HZ_COMMON
+extern int tick_nohz_tick_stopped(void);
extern void tick_nohz_idle_enter(void);
extern void tick_nohz_idle_exit(void);
extern void tick_nohz_irq_exit(void);
extern ktime_t tick_nohz_get_sleep_length(void);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
-
-# else /* !CONFIG_NO_HZ_COMMON */
-static inline int tick_nohz_tick_stopped(void)
-{
- return 0;
-}
-
+#else /* !CONFIG_NO_HZ_COMMON */
+static inline int tick_nohz_tick_stopped(void) { return 0; }
static inline void tick_nohz_idle_enter(void) { }
static inline void tick_nohz_idle_exit(void) { }
@@ -158,7 +118,7 @@ static inline ktime_t tick_nohz_get_sleep_length(void)
}
static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
-# endif /* !CONFIG_NO_HZ_COMMON */
+#endif /* !CONFIG_NO_HZ_COMMON */
#ifdef CONFIG_NO_HZ_FULL
extern bool tick_nohz_full_running;
@@ -181,6 +141,12 @@ static inline bool tick_nohz_full_cpu(int cpu)
return cpumask_test_cpu(cpu, tick_nohz_full_mask);
}
+static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
+{
+ if (tick_nohz_full_enabled())
+ cpumask_or(mask, mask, tick_nohz_full_mask);
+}
+
extern void __tick_nohz_full_check(void);
extern void tick_nohz_full_kick(void);
extern void tick_nohz_full_kick_cpu(int cpu);
@@ -189,6 +155,7 @@ extern void __tick_nohz_task_switch(struct task_struct *tsk);
#else
static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
+static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
static inline void __tick_nohz_full_check(void) { }
static inline void tick_nohz_full_kick_cpu(int cpu) { }
static inline void tick_nohz_full_kick(void) { }
@@ -226,5 +193,4 @@ static inline void tick_nohz_task_switch(struct task_struct *tsk)
__tick_nohz_task_switch(tsk);
}
-
#endif
diff --git a/include/linux/time64.h b/include/linux/time64.h
index a383147..77b5df2 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -2,6 +2,7 @@
#define _LINUX_TIME64_H
#include <uapi/linux/time.h>
+#include <linux/math64.h>
typedef __s64 time64_t;
@@ -28,6 +29,7 @@ struct timespec64 {
#define FSEC_PER_SEC 1000000000000000LL
/* Located here for timespec[64]_valid_strict */
+#define TIME64_MAX ((s64)~((u64)1 << 63))
#define KTIME_MAX ((s64)~((u64)1 << 63))
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h
new file mode 100644
index 0000000..4382035
--- /dev/null
+++ b/include/linux/timecounter.h
@@ -0,0 +1,139 @@
+/*
+ * linux/include/linux/timecounter.h
+ *
+ * based on code that migrated away from
+ * linux/include/linux/clocksource.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _LINUX_TIMECOUNTER_H
+#define _LINUX_TIMECOUNTER_H
+
+#include <linux/types.h>
+
+/* simplify initialization of mask field */
+#define CYCLECOUNTER_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
+
+/**
+ * struct cyclecounter - hardware abstraction for a free running counter
+ * Provides completely state-free accessors to the underlying hardware.
+ * Depending on which hardware it reads, the cycle counter may wrap
+ * around quickly. Locking rules (if necessary) have to be defined
+ * by the implementor and user of specific instances of this API.
+ *
+ * @read: returns the current cycle value
+ * @mask: bitmask for two's complement
+ * subtraction of non 64 bit counters,
+ * see CYCLECOUNTER_MASK() helper macro
+ * @mult: cycle to nanosecond multiplier
+ * @shift: cycle to nanosecond divisor (power of two)
+ */
+struct cyclecounter {
+ cycle_t (*read)(const struct cyclecounter *cc);
+ cycle_t mask;
+ u32 mult;
+ u32 shift;
+};
+
+/**
+ * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds
+ * Contains the state needed by timecounter_read() to detect
+ * cycle counter wrap around. Initialize with
+ * timecounter_init(). Also used to convert cycle counts into the
+ * corresponding nanosecond counts with timecounter_cyc2time(). Users
+ * of this code are responsible for initializing the underlying
+ * cycle counter hardware, locking issues and reading the time
+ * more often than the cycle counter wraps around. The nanosecond
+ * counter will only wrap around after ~585 years.
+ *
+ * @cc: the cycle counter used by this instance
+ * @cycle_last: most recent cycle counter value seen by
+ * timecounter_read()
+ * @nsec: continuously increasing count
+ * @mask: bit mask for maintaining the 'frac' field
+ * @frac: accumulated fractional nanoseconds
+ */
+struct timecounter {
+ const struct cyclecounter *cc;
+ cycle_t cycle_last;
+ u64 nsec;
+ u64 mask;
+ u64 frac;
+};
+
+/**
+ * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
+ * @cc: Pointer to cycle counter.
+ * @cycles: Cycles
+ * @mask: bit mask for maintaining the 'frac' field
+ * @frac: pointer to storage for the fractional nanoseconds.
+ */
+static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
+ cycle_t cycles, u64 mask, u64 *frac)
+{
+ u64 ns = (u64) cycles;
+
+ ns = (ns * cc->mult) + *frac;
+ *frac = ns & mask;
+ return ns >> cc->shift;
+}
+
+/**
+ * timecounter_adjtime - Shifts the time of the clock.
+ * @delta: Desired change in nanoseconds.
+ */
+static inline void timecounter_adjtime(struct timecounter *tc, s64 delta)
+{
+ tc->nsec += delta;
+}
+
+/**
+ * timecounter_init - initialize a time counter
+ * @tc: Pointer to time counter which is to be initialized/reset
+ * @cc: A cycle counter, ready to be used.
+ * @start_tstamp: Arbitrary initial time stamp.
+ *
+ * After this call the current cycle register (roughly) corresponds to
+ * the initial time stamp. Every call to timecounter_read() increments
+ * the time stamp counter by the number of elapsed nanoseconds.
+ */
+extern void timecounter_init(struct timecounter *tc,
+ const struct cyclecounter *cc,
+ u64 start_tstamp);
+
+/**
+ * timecounter_read - return nanoseconds elapsed since timecounter_init()
+ * plus the initial time stamp
+ * @tc: Pointer to time counter.
+ *
+ * In other words, keeps track of time since the same epoch as
+ * the function which generated the initial time stamp.
+ */
+extern u64 timecounter_read(struct timecounter *tc);
+
+/**
+ * timecounter_cyc2time - convert a cycle counter to same
+ * time base as values returned by
+ * timecounter_read()
+ * @tc: Pointer to time counter.
+ * @cycle_tstamp: a value returned by tc->cc->read()
+ *
+ * Cycle counts that are converted correctly as long as they
+ * fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
+ * with "max cycle count" == cs->mask+1.
+ *
+ * This allows conversion of cycle counter values which were generated
+ * in the past.
+ */
+extern u64 timecounter_cyc2time(struct timecounter *tc,
+ cycle_t cycle_tstamp);
+
+#endif
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 05af9a3..2524722 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -16,16 +16,16 @@
* @read: Read function of @clock
* @mask: Bitmask for two's complement subtraction of non 64bit clocks
* @cycle_last: @clock cycle value at last update
- * @mult: NTP adjusted multiplier for scaled math conversion
+ * @mult: (NTP adjusted) multiplier for scaled math conversion
* @shift: Shift value for scaled math conversion
* @xtime_nsec: Shifted (fractional) nano seconds offset for readout
- * @base_mono: ktime_t (nanoseconds) base time for readout
+ * @base: ktime_t (nanoseconds) base time for readout
*
* This struct has size 56 byte on 64 bit. Together with a seqcount it
* occupies a single 64byte cache line.
*
* The struct is separate from struct timekeeper as it is also used
- * for a fast NMI safe accessor to clock monotonic.
+ * for a fast NMI safe accessors.
*/
struct tk_read_base {
struct clocksource *clock;
@@ -35,12 +35,13 @@ struct tk_read_base {
u32 mult;
u32 shift;
u64 xtime_nsec;
- ktime_t base_mono;
+ ktime_t base;
};
/**
* struct timekeeper - Structure holding internal timekeeping values.
- * @tkr: The readout base structure
+ * @tkr_mono: The readout base structure for CLOCK_MONOTONIC
+ * @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW
* @xtime_sec: Current CLOCK_REALTIME time in seconds
* @ktime_sec: Current CLOCK_MONOTONIC time in seconds
* @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset
@@ -48,7 +49,8 @@ struct tk_read_base {
* @offs_boot: Offset clock monotonic -> clock boottime
* @offs_tai: Offset clock monotonic -> clock tai
* @tai_offset: The current UTC to TAI offset in seconds
- * @base_raw: Monotonic raw base time in ktime_t format
+ * @clock_was_set_seq: The sequence number of clock was set events
+ * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
* @raw_time: Monotonic raw base time in timespec64 format
* @cycle_interval: Number of clock cycles in one NTP interval
* @xtime_interval: Number of clock shifted nano seconds in one NTP
@@ -60,6 +62,9 @@ struct tk_read_base {
* shifted nano seconds.
* @ntp_error_shift: Shift conversion between clock shifted nano seconds and
* ntp shifted nano seconds.
+ * @last_warning: Warning ratelimiter (DEBUG_TIMEKEEPING)
+ * @underflow_seen: Underflow warning flag (DEBUG_TIMEKEEPING)
+ * @overflow_seen: Overflow warning flag (DEBUG_TIMEKEEPING)
*
* Note: For timespec(64) based interfaces wall_to_monotonic is what
* we need to add to xtime (or xtime corrected for sub jiffie times)
@@ -76,7 +81,8 @@ struct tk_read_base {
* used instead.
*/
struct timekeeper {
- struct tk_read_base tkr;
+ struct tk_read_base tkr_mono;
+ struct tk_read_base tkr_raw;
u64 xtime_sec;
unsigned long ktime_sec;
struct timespec64 wall_to_monotonic;
@@ -84,7 +90,8 @@ struct timekeeper {
ktime_t offs_boot;
ktime_t offs_tai;
s32 tai_offset;
- ktime_t base_raw;
+ unsigned int clock_was_set_seq;
+ ktime_t next_leap_ktime;
struct timespec64 raw_time;
/* The following members are for timekeeping internal use */
@@ -104,6 +111,18 @@ struct timekeeper {
s64 ntp_error;
u32 ntp_error_shift;
u32 ntp_err_mult;
+#ifdef CONFIG_DEBUG_TIMEKEEPING
+ long last_warning;
+ /*
+ * These simple flag variables are managed
+ * without locks, which is racy, but they are
+ * ok since we don't really care about being
+ * super precise about how many events were
+ * seen, just that a problem was observed.
+ */
+ int underflow_seen;
+ int overflow_seen;
+#endif
};
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 9b63d13..6e191e4 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -33,6 +33,7 @@ extern time64_t ktime_get_real_seconds(void);
extern int __getnstimeofday64(struct timespec64 *tv);
extern void getnstimeofday64(struct timespec64 *tv);
+extern void getboottime64(struct timespec64 *ts);
#if BITS_PER_LONG == 64
/**
@@ -72,6 +73,11 @@ static inline struct timespec get_monotonic_coarse(void)
{
return get_monotonic_coarse64();
}
+
+static inline void getboottime(struct timespec *ts)
+{
+ return getboottime64(ts);
+}
#else
/**
* Deprecated. Use do_settimeofday64().
@@ -129,11 +135,16 @@ static inline struct timespec get_monotonic_coarse(void)
{
return timespec64_to_timespec(get_monotonic_coarse64());
}
-#endif
-extern void getboottime(struct timespec *ts);
+static inline void getboottime(struct timespec *ts)
+{
+ struct timespec64 ts64;
+
+ getboottime64(&ts64);
+ *ts = timespec64_to_timespec(ts64);
+}
+#endif
-#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
#define ktime_get_real_ts64(ts) getnstimeofday64(ts)
/*
@@ -151,6 +162,7 @@ extern ktime_t ktime_get(void);
extern ktime_t ktime_get_with_offset(enum tk_offsets offs);
extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs);
extern ktime_t ktime_get_raw(void);
+extern u32 ktime_get_resolution_ns(void);
/**
* ktime_get_real - get the real (wall-) time in ktime_t format
@@ -202,12 +214,18 @@ static inline u64 ktime_get_boot_ns(void)
return ktime_to_ns(ktime_get_boottime());
}
+static inline u64 ktime_get_tai_ns(void)
+{
+ return ktime_to_ns(ktime_get_clocktai());
+}
+
static inline u64 ktime_get_raw_ns(void)
{
return ktime_to_ns(ktime_get_raw());
}
extern u64 ktime_get_mono_fast_ns(void);
+extern u64 ktime_get_raw_fast_ns(void);
/*
* Timespec interfaces utilizing the ktime based ones
@@ -217,6 +235,11 @@ static inline void get_monotonic_boottime(struct timespec *ts)
*ts = ktime_to_timespec(ktime_get_boottime());
}
+static inline void get_monotonic_boottime64(struct timespec64 *ts)
+{
+ *ts = ktime_to_timespec64(ktime_get_boottime());
+}
+
static inline void timekeeping_clocktai(struct timespec *ts)
{
*ts = ktime_to_timespec(ktime_get_clocktai());
@@ -225,6 +248,9 @@ static inline void timekeeping_clocktai(struct timespec *ts)
/*
* RTC specific
*/
+extern bool timekeeping_rtc_skipsuspend(void);
+extern bool timekeeping_rtc_skipresume(void);
+
extern void timekeeping_inject_sleeptime64(struct timespec64 *delta);
/*
@@ -236,17 +262,13 @@ extern void getnstime_raw_and_real(struct timespec *ts_raw,
/*
* Persistent clock related interfaces
*/
-extern bool persistent_clock_exist;
extern int persistent_clock_is_local;
-static inline bool has_persistent_clock(void)
-{
- return persistent_clock_exist;
-}
-
extern void read_persistent_clock(struct timespec *ts);
-extern void read_boot_clock(struct timespec *ts);
+extern void read_persistent_clock64(struct timespec64 *ts);
+extern void read_boot_clock64(struct timespec64 *ts);
extern int update_persistent_clock(struct timespec now);
+extern int update_persistent_clock64(struct timespec64 now);
#endif
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 8c5a197..61aa61d 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -14,27 +14,23 @@ struct timer_list {
* All fields that change during normal runtime grouped to the
* same cacheline
*/
- struct list_head entry;
- unsigned long expires;
- struct tvec_base *base;
-
- void (*function)(unsigned long);
- unsigned long data;
-
- int slack;
+ struct hlist_node entry;
+ unsigned long expires;
+ void (*function)(unsigned long);
+ unsigned long data;
+ u32 flags;
+ int slack;
#ifdef CONFIG_TIMER_STATS
- int start_pid;
- void *start_site;
- char start_comm[16];
+ int start_pid;
+ void *start_site;
+ char start_comm[16];
#endif
#ifdef CONFIG_LOCKDEP
- struct lockdep_map lockdep_map;
+ struct lockdep_map lockdep_map;
#endif
};
-extern struct tvec_base boot_tvec_bases;
-
#ifdef CONFIG_LOCKDEP
/*
* NB: because we have to copy the lockdep_map, setting the lockdep_map key
@@ -49,9 +45,6 @@ extern struct tvec_base boot_tvec_bases;
#endif
/*
- * Note that all tvec_bases are at least 4 byte aligned and lower two bits
- * of base in timer_list is guaranteed to be zero. Use them for flags.
- *
* A deferrable timer will work normally when the system is busy, but
* will not cause a CPU to come out of idle just to service it; instead,
* the timer will be serviced when the CPU eventually wakes up with a
@@ -65,17 +58,18 @@ extern struct tvec_base boot_tvec_bases;
* workqueue locking issues. It's not meant for executing random crap
* with interrupts disabled. Abuse is monitored!
*/
-#define TIMER_DEFERRABLE 0x1LU
-#define TIMER_IRQSAFE 0x2LU
-
-#define TIMER_FLAG_MASK 0x3LU
+#define TIMER_CPUMASK 0x0007FFFF
+#define TIMER_MIGRATING 0x00080000
+#define TIMER_BASEMASK (TIMER_CPUMASK | TIMER_MIGRATING)
+#define TIMER_DEFERRABLE 0x00100000
+#define TIMER_IRQSAFE 0x00200000
#define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
- .entry = { .prev = TIMER_ENTRY_STATIC }, \
+ .entry = { .next = TIMER_ENTRY_STATIC }, \
.function = (_function), \
.expires = (_expires), \
.data = (_data), \
- .base = (void *)((unsigned long)&boot_tvec_bases + (_flags)), \
+ .flags = (_flags), \
.slack = -1, \
__TIMER_LOCKDEP_MAP_INITIALIZER( \
__FILE__ ":" __stringify(__LINE__)) \
@@ -168,7 +162,7 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
*/
static inline int timer_pending(const struct timer_list * timer)
{
- return timer->entry.next != NULL;
+ return timer->entry.pprev != NULL;
}
extern void add_timer_on(struct timer_list *timer, int cpu);
@@ -188,26 +182,16 @@ extern void set_timer_slack(struct timer_list *time, int slack_hz);
#define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1)
/*
- * Return when the next timer-wheel timeout occurs (in absolute jiffies),
- * locks the timer base and does the comparison against the given
- * jiffie.
- */
-extern unsigned long get_next_timer_interrupt(unsigned long now);
-
-/*
* Timer-statistics info:
*/
#ifdef CONFIG_TIMER_STATS
extern int timer_stats_active;
-#define TIMER_STATS_FLAG_DEFERRABLE 0x1
-
extern void init_timer_stats(void);
extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
- void *timerf, char *comm,
- unsigned int timer_flag);
+ void *timerf, char *comm, u32 flags);
extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
void *addr);
@@ -254,6 +238,15 @@ extern void run_local_timers(void);
struct hrtimer;
extern enum hrtimer_restart it_real_fn(struct hrtimer *);
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+#include <linux/sysctl.h>
+
+extern unsigned int sysctl_timer_migration;
+int timer_migration_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+#endif
+
unsigned long __round_jiffies(unsigned long j, int cpu);
unsigned long __round_jiffies_relative(unsigned long j, int cpu);
unsigned long round_jiffies(unsigned long j);
diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h
index a520fd7..7eec17a 100644
--- a/include/linux/timerqueue.h
+++ b/include/linux/timerqueue.h
@@ -16,10 +16,10 @@ struct timerqueue_head {
};
-extern void timerqueue_add(struct timerqueue_head *head,
- struct timerqueue_node *node);
-extern void timerqueue_del(struct timerqueue_head *head,
- struct timerqueue_node *node);
+extern bool timerqueue_add(struct timerqueue_head *head,
+ struct timerqueue_node *node);
+extern bool timerqueue_del(struct timerqueue_head *head,
+ struct timerqueue_node *node);
extern struct timerqueue_node *timerqueue_iterate_next(
struct timerqueue_node *node);
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 909b6e4..73ddad1 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -191,8 +191,8 @@ static inline int cpu_to_mem(int cpu)
#ifndef topology_core_id
#define topology_core_id(cpu) ((void)(cpu), 0)
#endif
-#ifndef topology_thread_cpumask
-#define topology_thread_cpumask(cpu) cpumask_of(cpu)
+#ifndef topology_sibling_cpumask
+#define topology_sibling_cpumask(cpu) cpumask_of(cpu)
#endif
#ifndef topology_core_cpumask
#define topology_core_cpumask(cpu) cpumask_of(cpu)
@@ -201,7 +201,7 @@ static inline int cpu_to_mem(int cpu)
#ifdef CONFIG_SCHED_SMT
static inline const struct cpumask *cpu_smt_mask(int cpu)
{
- return topology_thread_cpumask(cpu);
+ return topology_sibling_cpumask(cpu);
}
#endif
diff --git a/include/linux/ftrace_event.h b/include/linux/trace_events.h
index 0bebb5c..1063c85 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/trace_events.h
@@ -1,6 +1,6 @@
-#ifndef _LINUX_FTRACE_EVENT_H
-#define _LINUX_FTRACE_EVENT_H
+#ifndef _LINUX_TRACE_EVENT_H
+#define _LINUX_TRACE_EVENT_H
#include <linux/ring_buffer.h>
#include <linux/trace_seq.h>
@@ -13,6 +13,7 @@ struct trace_array;
struct trace_buffer;
struct tracer;
struct dentry;
+struct bpf_prog;
struct trace_print_flags {
unsigned long mask;
@@ -24,31 +25,35 @@ struct trace_print_flags_u64 {
const char *name;
};
-const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
- unsigned long flags,
- const struct trace_print_flags *flag_array);
+const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
+ unsigned long flags,
+ const struct trace_print_flags *flag_array);
-const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
- const struct trace_print_flags *symbol_array);
+const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
+ const struct trace_print_flags *symbol_array);
#if BITS_PER_LONG == 32
-const char *ftrace_print_symbols_seq_u64(struct trace_seq *p,
- unsigned long long val,
- const struct trace_print_flags_u64
+const char *trace_print_symbols_seq_u64(struct trace_seq *p,
+ unsigned long long val,
+ const struct trace_print_flags_u64
*symbol_array);
#endif
-const char *ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
- unsigned int bitmask_size);
+const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
+ unsigned int bitmask_size);
+
+const char *trace_print_hex_seq(struct trace_seq *p,
+ const unsigned char *buf, int len);
-const char *ftrace_print_hex_seq(struct trace_seq *p,
- const unsigned char *buf, int len);
+const char *trace_print_array_seq(struct trace_seq *p,
+ const void *buf, int count,
+ size_t el_size);
struct trace_iterator;
struct trace_event;
-int ftrace_raw_output_prep(struct trace_iterator *iter,
- struct trace_event *event);
+int trace_raw_output_prep(struct trace_iterator *iter,
+ struct trace_event *event);
/*
* The trace entry - the most basic unit of tracing. This is what
@@ -63,7 +68,7 @@ struct trace_entry {
int pid;
};
-#define FTRACE_MAX_EVENT \
+#define TRACE_EVENT_TYPE_MAX \
((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
/*
@@ -127,8 +132,8 @@ struct trace_event {
struct trace_event_functions *funcs;
};
-extern int register_ftrace_event(struct trace_event *event);
-extern int unregister_ftrace_event(struct trace_event *event);
+extern int register_trace_event(struct trace_event *event);
+extern int unregister_trace_event(struct trace_event *event);
/* Return values for print_line callback */
enum print_line_t {
@@ -152,11 +157,11 @@ static inline enum print_line_t trace_handle_return(struct trace_seq *s)
void tracing_generic_entry_update(struct trace_entry *entry,
unsigned long flags,
int pc);
-struct ftrace_event_file;
+struct trace_event_file;
struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
- struct ftrace_event_file *ftrace_file,
+ struct trace_event_file *trace_file,
int type, unsigned long len,
unsigned long flags, int pc);
struct ring_buffer_event *
@@ -178,7 +183,7 @@ void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
void tracing_record_cmdline(struct task_struct *tsk);
-int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
+int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
struct event_filter;
@@ -195,50 +200,39 @@ enum trace_reg {
#endif
};
-struct ftrace_event_call;
+struct trace_event_call;
-struct ftrace_event_class {
- char *system;
+struct trace_event_class {
+ const char *system;
void *probe;
#ifdef CONFIG_PERF_EVENTS
void *perf_probe;
#endif
- int (*reg)(struct ftrace_event_call *event,
+ int (*reg)(struct trace_event_call *event,
enum trace_reg type, void *data);
- int (*define_fields)(struct ftrace_event_call *);
- struct list_head *(*get_fields)(struct ftrace_event_call *);
+ int (*define_fields)(struct trace_event_call *);
+ struct list_head *(*get_fields)(struct trace_event_call *);
struct list_head fields;
- int (*raw_init)(struct ftrace_event_call *);
+ int (*raw_init)(struct trace_event_call *);
};
-extern int ftrace_event_reg(struct ftrace_event_call *event,
+extern int trace_event_reg(struct trace_event_call *event,
enum trace_reg type, void *data);
-int ftrace_output_event(struct trace_iterator *iter, struct ftrace_event_call *event,
- char *fmt, ...);
-
-int ftrace_event_define_field(struct ftrace_event_call *call,
- char *type, int len, char *item, int offset,
- int field_size, int sign, int filter);
-
-struct ftrace_event_buffer {
+struct trace_event_buffer {
struct ring_buffer *buffer;
struct ring_buffer_event *event;
- struct ftrace_event_file *ftrace_file;
+ struct trace_event_file *trace_file;
void *entry;
unsigned long flags;
int pc;
};
-void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
- struct ftrace_event_file *ftrace_file,
+void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
+ struct trace_event_file *trace_file,
unsigned long len);
-void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer);
-
-int ftrace_event_define_field(struct ftrace_event_call *call,
- char *type, int len, char *item, int offset,
- int field_size, int sign, int filter);
+void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
enum {
TRACE_EVENT_FL_FILTERED_BIT,
@@ -248,6 +242,7 @@ enum {
TRACE_EVENT_FL_WAS_ENABLED_BIT,
TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
TRACE_EVENT_FL_TRACEPOINT_BIT,
+ TRACE_EVENT_FL_KPROBE_BIT,
};
/*
@@ -255,12 +250,13 @@ enum {
* FILTERED - The event has a filter attached
* CAP_ANY - Any user can enable for perf
* NO_SET_FILTER - Set when filter has error and is to be ignored
- * IGNORE_ENABLE - For ftrace internal events, do not enable with debugfs file
+ * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
* WAS_ENABLED - Set and stays set when an event was ever enabled
* (used for module unloading, if a module event is enabled,
* it is best to clear the buffers that used it).
- * USE_CALL_FILTER - For ftrace internal events, don't use file filter
+ * USE_CALL_FILTER - For trace internal events, don't use file filter
* TRACEPOINT - Event is a tracepoint
+ * KPROBE - Event is a kprobe
*/
enum {
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
@@ -270,18 +266,19 @@ enum {
TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
+ TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
};
-struct ftrace_event_call {
+struct trace_event_call {
struct list_head list;
- struct ftrace_event_class *class;
+ struct trace_event_class *class;
union {
char *name;
/* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
struct tracepoint *tp;
};
struct trace_event event;
- const char *print_fmt;
+ char *print_fmt;
struct event_filter *filter;
void *mod;
void *data;
@@ -289,7 +286,7 @@ struct ftrace_event_call {
* bit 0: filter_active
* bit 1: allow trace by non root (cap any)
* bit 2: failed to apply filter
- * bit 3: ftrace internal event (do not enable)
+ * bit 3: trace internal event (do not enable)
* bit 4: Event was enabled by module
* bit 5: use call filter rather than file filter
* bit 6: Event is a tracepoint
@@ -299,14 +296,15 @@ struct ftrace_event_call {
#ifdef CONFIG_PERF_EVENTS
int perf_refcount;
struct hlist_head __percpu *perf_events;
+ struct bpf_prog *prog;
- int (*perf_perm)(struct ftrace_event_call *,
+ int (*perf_perm)(struct trace_event_call *,
struct perf_event *);
#endif
};
static inline const char *
-ftrace_event_name(struct ftrace_event_call *call)
+trace_event_name(struct trace_event_call *call)
{
if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
return call->tp ? call->tp->name : NULL;
@@ -315,21 +313,21 @@ ftrace_event_name(struct ftrace_event_call *call)
}
struct trace_array;
-struct ftrace_subsystem_dir;
+struct trace_subsystem_dir;
enum {
- FTRACE_EVENT_FL_ENABLED_BIT,
- FTRACE_EVENT_FL_RECORDED_CMD_BIT,
- FTRACE_EVENT_FL_FILTERED_BIT,
- FTRACE_EVENT_FL_NO_SET_FILTER_BIT,
- FTRACE_EVENT_FL_SOFT_MODE_BIT,
- FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
- FTRACE_EVENT_FL_TRIGGER_MODE_BIT,
- FTRACE_EVENT_FL_TRIGGER_COND_BIT,
+ EVENT_FILE_FL_ENABLED_BIT,
+ EVENT_FILE_FL_RECORDED_CMD_BIT,
+ EVENT_FILE_FL_FILTERED_BIT,
+ EVENT_FILE_FL_NO_SET_FILTER_BIT,
+ EVENT_FILE_FL_SOFT_MODE_BIT,
+ EVENT_FILE_FL_SOFT_DISABLED_BIT,
+ EVENT_FILE_FL_TRIGGER_MODE_BIT,
+ EVENT_FILE_FL_TRIGGER_COND_BIT,
};
/*
- * Ftrace event file flags:
+ * Event file flags:
* ENABLED - The event is enabled
* RECORDED_CMD - The comms should be recorded at sched_switch
* FILTERED - The event has a filter attached
@@ -341,23 +339,23 @@ enum {
* TRIGGER_COND - When set, one or more triggers has an associated filter
*/
enum {
- FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT),
- FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT),
- FTRACE_EVENT_FL_FILTERED = (1 << FTRACE_EVENT_FL_FILTERED_BIT),
- FTRACE_EVENT_FL_NO_SET_FILTER = (1 << FTRACE_EVENT_FL_NO_SET_FILTER_BIT),
- FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT),
- FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT),
- FTRACE_EVENT_FL_TRIGGER_MODE = (1 << FTRACE_EVENT_FL_TRIGGER_MODE_BIT),
- FTRACE_EVENT_FL_TRIGGER_COND = (1 << FTRACE_EVENT_FL_TRIGGER_COND_BIT),
+ EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
+ EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
+ EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT),
+ EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
+ EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
+ EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
+ EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
+ EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
};
-struct ftrace_event_file {
+struct trace_event_file {
struct list_head list;
- struct ftrace_event_call *event_call;
+ struct trace_event_call *event_call;
struct event_filter *filter;
struct dentry *dir;
struct trace_array *tr;
- struct ftrace_subsystem_dir *system;
+ struct trace_subsystem_dir *system;
struct list_head triggers;
/*
@@ -390,7 +388,7 @@ struct ftrace_event_file {
early_initcall(trace_init_flags_##name);
#define __TRACE_EVENT_PERF_PERM(name, expr...) \
- static int perf_perm_##name(struct ftrace_event_call *tp_event, \
+ static int perf_perm_##name(struct trace_event_call *tp_event, \
struct perf_event *p_event) \
{ \
return ({ expr; }); \
@@ -416,19 +414,19 @@ enum event_trigger_type {
extern int filter_match_preds(struct event_filter *filter, void *rec);
-extern int filter_check_discard(struct ftrace_event_file *file, void *rec,
+extern int filter_check_discard(struct trace_event_file *file, void *rec,
struct ring_buffer *buffer,
struct ring_buffer_event *event);
-extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
+extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
struct ring_buffer *buffer,
struct ring_buffer_event *event);
-extern enum event_trigger_type event_triggers_call(struct ftrace_event_file *file,
+extern enum event_trigger_type event_triggers_call(struct trace_event_file *file,
void *rec);
-extern void event_triggers_post_call(struct ftrace_event_file *file,
+extern void event_triggers_post_call(struct trace_event_file *file,
enum event_trigger_type tt);
/**
- * ftrace_trigger_soft_disabled - do triggers and test if soft disabled
+ * trace_trigger_soft_disabled - do triggers and test if soft disabled
* @file: The file pointer of the event to test
*
* If any triggers without filters are attached to this event, they
@@ -437,14 +435,14 @@ extern void event_triggers_post_call(struct ftrace_event_file *file,
* otherwise false.
*/
static inline bool
-ftrace_trigger_soft_disabled(struct ftrace_event_file *file)
+trace_trigger_soft_disabled(struct trace_event_file *file)
{
unsigned long eflags = file->flags;
- if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) {
- if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE)
+ if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
+ if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
event_triggers_call(file, NULL);
- if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED)
+ if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
return true;
}
return false;
@@ -464,7 +462,7 @@ ftrace_trigger_soft_disabled(struct ftrace_event_file *file)
* Returns true if the event is discarded, false otherwise.
*/
static inline bool
-__event_trigger_test_discard(struct ftrace_event_file *file,
+__event_trigger_test_discard(struct trace_event_file *file,
struct ring_buffer *buffer,
struct ring_buffer_event *event,
void *entry,
@@ -472,10 +470,10 @@ __event_trigger_test_discard(struct ftrace_event_file *file,
{
unsigned long eflags = file->flags;
- if (eflags & FTRACE_EVENT_FL_TRIGGER_COND)
+ if (eflags & EVENT_FILE_FL_TRIGGER_COND)
*tt = event_triggers_call(file, entry);
- if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags))
+ if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags))
ring_buffer_discard_commit(buffer, event);
else if (!filter_check_discard(file, entry, buffer, event))
return false;
@@ -497,7 +495,7 @@ __event_trigger_test_discard(struct ftrace_event_file *file,
* if the event is soft disabled and should be discarded.
*/
static inline void
-event_trigger_unlock_commit(struct ftrace_event_file *file,
+event_trigger_unlock_commit(struct trace_event_file *file,
struct ring_buffer *buffer,
struct ring_buffer_event *event,
void *entry, unsigned long irq_flags, int pc)
@@ -528,7 +526,7 @@ event_trigger_unlock_commit(struct ftrace_event_file *file,
* trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
*/
static inline void
-event_trigger_unlock_commit_regs(struct ftrace_event_file *file,
+event_trigger_unlock_commit_regs(struct trace_event_file *file,
struct ring_buffer *buffer,
struct ring_buffer_event *event,
void *entry, unsigned long irq_flags, int pc,
@@ -544,6 +542,15 @@ event_trigger_unlock_commit_regs(struct ftrace_event_file *file,
event_triggers_post_call(file, tt);
}
+#ifdef CONFIG_BPF_SYSCALL
+unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx);
+#else
+static inline unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
+{
+ return 1;
+}
+#endif
+
enum {
FILTER_OTHER = 0,
FILTER_STATIC_STRING,
@@ -552,12 +559,12 @@ enum {
FILTER_TRACE_FN,
};
-extern int trace_event_raw_init(struct ftrace_event_call *call);
-extern int trace_define_field(struct ftrace_event_call *call, const char *type,
+extern int trace_event_raw_init(struct trace_event_call *call);
+extern int trace_define_field(struct trace_event_call *call, const char *type,
const char *name, int offset, int size,
int is_signed, int filter_type);
-extern int trace_add_event_call(struct ftrace_event_call *call);
-extern int trace_remove_event_call(struct ftrace_event_call *call);
+extern int trace_add_event_call(struct trace_event_call *call);
+extern int trace_remove_event_call(struct trace_event_call *call);
#define is_signed_type(type) (((type)(-1)) < (type)1)
@@ -595,7 +602,7 @@ extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
char *filter_str);
extern void ftrace_profile_free_filter(struct perf_event *event);
extern void *perf_trace_buf_prepare(int size, unsigned short type,
- struct pt_regs *regs, int *rctxp);
+ struct pt_regs **regs, int *rctxp);
static inline void
perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
@@ -606,4 +613,4 @@ perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
}
#endif
-#endif /* _LINUX_FTRACE_EVENT_H */
+#endif /* _LINUX_TRACE_EVENT_H */
diff --git a/include/linux/tracefs.h b/include/linux/tracefs.h
new file mode 100644
index 0000000..5b727a1
--- /dev/null
+++ b/include/linux/tracefs.h
@@ -0,0 +1,45 @@
+/*
+ * tracefs.h - a pseudo file system for activating tracing
+ *
+ * Based on debugfs by: 2004 Greg Kroah-Hartman <greg@kroah.com>
+ *
+ * Copyright (C) 2014 Red Hat Inc, author: Steven Rostedt <srostedt@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * tracefs is the file system that is used by the tracing infrastructure.
+ *
+ */
+
+#ifndef _TRACEFS_H_
+#define _TRACEFS_H_
+
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+
+#include <linux/types.h>
+
+struct file_operations;
+
+#ifdef CONFIG_TRACING
+
+struct dentry *tracefs_create_file(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops);
+
+struct dentry *tracefs_create_dir(const char *name, struct dentry *parent);
+
+void tracefs_remove(struct dentry *dentry);
+void tracefs_remove_recursive(struct dentry *dentry);
+
+struct dentry *tracefs_create_instance_dir(const char *name, struct dentry *parent,
+ int (*mkdir)(const char *name),
+ int (*rmdir)(const char *name));
+
+bool tracefs_initialized(void);
+
+#endif /* CONFIG_TRACING */
+
+#endif
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index e08e21e..a5f7f3e 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -36,6 +36,12 @@ struct tracepoint {
struct tracepoint_func __rcu *funcs;
};
+struct trace_enum_map {
+ const char *system;
+ const char *enum_string;
+ unsigned long enum_value;
+};
+
extern int
tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data);
extern int
@@ -87,6 +93,8 @@ extern void syscall_unregfunc(void);
#define PARAMS(args...) args
+#define TRACE_DEFINE_ENUM(x)
+
#endif /* _LINUX_TRACEPOINT_H */
/*
@@ -173,7 +181,7 @@ extern void syscall_unregfunc(void);
TP_PROTO(data_proto), \
TP_ARGS(data_args), \
TP_CONDITION(cond),,); \
- if (IS_ENABLED(CONFIG_LOCKDEP)) { \
+ if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
rcu_read_lock_sched_notrace(); \
rcu_dereference_sched(__tracepoint_##name.funcs);\
rcu_read_unlock_sched_notrace(); \
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 7d66ae5..ad6c891 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -14,6 +14,29 @@
#include <linux/llist.h>
+/*
+ * Lock subclasses for tty locks
+ *
+ * TTY_LOCK_NORMAL is for normal ttys and master ptys.
+ * TTY_LOCK_SLAVE is for slave ptys only.
+ *
+ * Lock subclasses are necessary for handling nested locking with pty pairs.
+ * tty locks which use nested locking:
+ *
+ * legacy_mutex - Nested tty locks are necessary for releasing pty pairs.
+ * The stable lock order is master pty first, then slave pty.
+ * termios_rwsem - The stable lock order is tty_buffer lock->termios_rwsem.
+ * Subclassing this lock enables the slave pty to hold its
+ * termios_rwsem when claiming the master tty_buffer lock.
+ * tty_buffer lock - slave ptys can claim nested buffer lock when handling
+ * signal chars. The stable lock order is slave pty, then
+ * master.
+ */
+
+enum {
+ TTY_LOCK_NORMAL = 0,
+ TTY_LOCK_SLAVE,
+};
/*
* (Note: the *_driver.minor_start values 1, 64, 128, 192 are
@@ -316,6 +339,7 @@ struct tty_file_private {
#define TTY_EXCLUSIVE 3 /* Exclusive open mode */
#define TTY_DEBUG 4 /* Debugging */
#define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */
+#define TTY_OTHER_DONE 6 /* Closed pty has completed input processing */
#define TTY_LDISC_OPEN 11 /* Line discipline is open */
#define TTY_PTY_LOCK 16 /* pty private */
#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
@@ -398,7 +422,7 @@ static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
const char *routine);
-extern char *tty_name(struct tty_struct *tty, char *buf);
+extern const char *tty_name(const struct tty_struct *tty);
extern void tty_wait_until_sent(struct tty_struct *tty, long timeout);
extern int tty_check_change(struct tty_struct *tty);
extern void __stop_tty(struct tty_struct *tty);
@@ -439,10 +463,10 @@ extern int tty_hung_up_p(struct file *filp);
extern void do_SAK(struct tty_struct *tty);
extern void __do_SAK(struct tty_struct *tty);
extern void no_tty(void);
-extern void tty_flush_to_ldisc(struct tty_struct *tty);
extern void tty_buffer_free_all(struct tty_port *port);
extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld);
extern void tty_buffer_init(struct tty_port *port);
+extern void tty_buffer_set_lock_subclass(struct tty_port *port);
extern speed_t tty_termios_baud_rate(struct ktermios *termios);
extern speed_t tty_termios_input_baud_rate(struct ktermios *termios);
extern void tty_termios_encode_baud_rate(struct ktermios *termios,
diff --git a/include/linux/types.h b/include/linux/types.h
index a0bb704..8715287 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -135,26 +135,25 @@ typedef unsigned long blkcnt_t;
#endif
/*
- * The type of an index into the pagecache. Use a #define so asm/types.h
- * can override it.
+ * The type of an index into the pagecache.
*/
-#ifndef pgoff_t
#define pgoff_t unsigned long
-#endif
-/* A dma_addr_t can hold any valid DMA or bus address for the platform */
+/*
+ * A dma_addr_t can hold any valid DMA address, i.e., any address returned
+ * by the DMA API.
+ *
+ * If the DMA API only uses 32-bit addresses, dma_addr_t need only be 32
+ * bits wide. Bus addresses, e.g., PCI BARs, may be wider than 32 bits,
+ * but drivers do memory-mapped I/O to ioremapped kernel virtual addresses,
+ * so they don't care about the size of the actual bus addresses.
+ */
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
typedef u64 dma_addr_t;
#else
typedef u32 dma_addr_t;
-#endif /* dma_addr_t */
-
-#ifdef __CHECKER__
-#else
-#endif
-#ifdef __CHECK_ENDIAN__
-#else
#endif
+
typedef unsigned __bitwise__ gfp_t;
typedef unsigned __bitwise__ fmode_t;
typedef unsigned __bitwise__ oom_flags_t;
@@ -213,5 +212,8 @@ struct callback_head {
};
#define rcu_head callback_head
+/* clocksource cycle base type */
+typedef u64 cycle_t;
+
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_TYPES_H */
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 4b4439e..df89c9b 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -68,11 +68,12 @@ struct u64_stats_sync {
};
+static inline void u64_stats_init(struct u64_stats_sync *syncp)
+{
#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
-# define u64_stats_init(syncp) seqcount_init(syncp.seq)
-#else
-# define u64_stats_init(syncp) do { } while (0)
+ seqcount_init(&syncp->seq);
#endif
+}
static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
{
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index ecd3319..ae572c1 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -1,21 +1,30 @@
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__
-#include <linux/preempt.h>
+#include <linux/sched.h>
#include <asm/uaccess.h>
+static __always_inline void pagefault_disabled_inc(void)
+{
+ current->pagefault_disabled++;
+}
+
+static __always_inline void pagefault_disabled_dec(void)
+{
+ current->pagefault_disabled--;
+ WARN_ON(current->pagefault_disabled < 0);
+}
+
/*
- * These routines enable/disable the pagefault handler in that
- * it will not take any locks and go straight to the fixup table.
+ * These routines enable/disable the pagefault handler. If disabled, it will
+ * not take any locks and go straight to the fixup table.
*
- * They have great resemblance to the preempt_disable/enable calls
- * and in fact they are identical; this is because currently there is
- * no other way to make the pagefault handlers do this. So we do
- * disable preemption but we don't necessarily care about that.
+ * User access methods will not sleep when called from a pagefault_disabled()
+ * environment.
*/
static inline void pagefault_disable(void)
{
- preempt_count_inc();
+ pagefault_disabled_inc();
/*
* make sure to have issued the store before a pagefault
* can hit.
@@ -25,18 +34,31 @@ static inline void pagefault_disable(void)
static inline void pagefault_enable(void)
{
-#ifndef CONFIG_PREEMPT
/*
* make sure to issue those last loads/stores before enabling
* the pagefault handler again.
*/
barrier();
- preempt_count_dec();
-#else
- preempt_enable();
-#endif
+ pagefault_disabled_dec();
}
+/*
+ * Is the pagefault handler disabled? If so, user access methods will not sleep.
+ */
+#define pagefault_disabled() (current->pagefault_disabled != 0)
+
+/*
+ * The pagefault handler is in general disabled by pagefault_disable() or
+ * when in irq context (via in_atomic()).
+ *
+ * This function should only be used by the fault handlers. Other users should
+ * stick to pagefault_disabled().
+ * Please NEVER use preempt_disable() to disable the fault handler. With
+ * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
+ * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
+ */
+#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
+
#ifndef ARCH_HAS_NOCACHE_UACCESS
static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
diff --git a/include/linux/udp.h b/include/linux/udp.h
index ee32775..87c0949 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -34,7 +34,7 @@ static inline struct udphdr *inner_udp_hdr(const struct sk_buff *skb)
#define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256)
-static inline int udp_hashfn(struct net *net, unsigned num, unsigned mask)
+static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask)
{
return (num + net_hash_mix(net)) & mask;
}
@@ -49,11 +49,7 @@ struct udp_sock {
unsigned int corkflag; /* Cork is required */
__u8 encap_type; /* Is this an Encapsulation socket? */
unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
- no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */
- convert_csum:1;/* On receive, convert checksum
- * unnecessary to checksum complete
- * if possible.
- */
+ no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */
/*
* Following member retains the information to create a UDP header
* when the socket is uncorked.
@@ -102,16 +98,6 @@ static inline bool udp_get_no_check6_rx(struct sock *sk)
return udp_sk(sk)->no_check6_rx;
}
-static inline void udp_set_convert_csum(struct sock *sk, bool val)
-{
- udp_sk(sk)->convert_csum = val;
-}
-
-static inline bool udp_get_convert_csum(struct sock *sk)
-{
- return udp_sk(sk)->convert_csum;
-}
-
#define udp_portaddr_for_each_entry(__sk, node, list) \
hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node)
diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
index 2d1f9b6..0383552 100644
--- a/include/linux/uidgid.h
+++ b/include/linux/uidgid.h
@@ -29,6 +29,7 @@ typedef struct {
#define KUIDT_INIT(value) (kuid_t){ value }
#define KGIDT_INIT(value) (kgid_t){ value }
+#ifdef CONFIG_MULTIUSER
static inline uid_t __kuid_val(kuid_t uid)
{
return uid.val;
@@ -38,6 +39,17 @@ static inline gid_t __kgid_val(kgid_t gid)
{
return gid.val;
}
+#else
+static inline uid_t __kuid_val(kuid_t uid)
+{
+ return 0;
+}
+
+static inline gid_t __kgid_val(kgid_t gid)
+{
+ return 0;
+}
+#endif
#define GLOBAL_ROOT_UID KUIDT_INIT(0)
#define GLOBAL_ROOT_GID KGIDT_INIT(0)
@@ -97,12 +109,12 @@ static inline bool gid_lte(kgid_t left, kgid_t right)
static inline bool uid_valid(kuid_t uid)
{
- return !uid_eq(uid, INVALID_UID);
+ return __kuid_val(uid) != (uid_t) -1;
}
static inline bool gid_valid(kgid_t gid)
{
- return !gid_eq(gid, INVALID_GID);
+ return __kgid_val(gid) != (gid_t) -1;
}
#ifdef CONFIG_USER_NS
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 1c5e453..8b01e1c 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -76,6 +76,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
+int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes);
size_t iov_iter_single_seg_count(const struct iov_iter *i);
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
@@ -88,7 +89,9 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *);
unsigned long iov_iter_alignment(const struct iov_iter *i);
void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
unsigned long nr_segs, size_t count);
-void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *iov,
+void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec,
+ unsigned long nr_segs, size_t count);
+void iov_iter_bvec(struct iov_iter *i, int direction, const struct bio_vec *bvec,
unsigned long nr_segs, size_t count);
ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
size_t maxsize, unsigned maxpages, size_t *start);
@@ -96,6 +99,8 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
size_t maxsize, size_t *start);
int iov_iter_npages(const struct iov_iter *i, int maxpages);
+const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
+
static inline size_t iov_iter_count(struct iov_iter *i)
{
return i->count;
@@ -107,6 +112,14 @@ static inline bool iter_is_iovec(struct iov_iter *i)
}
/*
+ * Get one of READ or WRITE out of iter->type without any other flags OR'd in
+ * with it.
+ *
+ * The ?: is just for type safety.
+ */
+#define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & RW_MASK)
+
+/*
* Cap the iov_iter by given limit; note that the second argument is
* *not* the new size - it's upper limit for such. Passing it a value
* greater than the amount of data in iov_iter is fine - it'll just do
@@ -135,10 +148,18 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
-int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
-int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
- int offset, int len);
-int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
- int offset, int len);
+int import_iovec(int type, const struct iovec __user * uvector,
+ unsigned nr_segs, unsigned fast_segs,
+ struct iovec **iov, struct iov_iter *i);
+
+#ifdef CONFIG_COMPAT
+struct compat_iovec;
+int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
+ unsigned nr_segs, unsigned fast_segs,
+ struct iovec **iov, struct iov_iter *i);
+#endif
+
+int import_single_range(int type, void __user *buf, size_t len,
+ struct iovec *iov, struct iov_iter *i);
#endif
diff --git a/include/linux/ulpi/driver.h b/include/linux/ulpi/driver.h
new file mode 100644
index 0000000..388f6e0
--- /dev/null
+++ b/include/linux/ulpi/driver.h
@@ -0,0 +1,60 @@
+#ifndef __LINUX_ULPI_DRIVER_H
+#define __LINUX_ULPI_DRIVER_H
+
+#include <linux/mod_devicetable.h>
+
+#include <linux/device.h>
+
+struct ulpi_ops;
+
+/**
+ * struct ulpi - describes ULPI PHY device
+ * @id: vendor and product ids for ULPI device
+ * @ops: I/O access
+ * @dev: device interface
+ */
+struct ulpi {
+ struct ulpi_device_id id;
+ struct ulpi_ops *ops;
+ struct device dev;
+};
+
+#define to_ulpi_dev(d) container_of(d, struct ulpi, dev)
+
+static inline void ulpi_set_drvdata(struct ulpi *ulpi, void *data)
+{
+ dev_set_drvdata(&ulpi->dev, data);
+}
+
+static inline void *ulpi_get_drvdata(struct ulpi *ulpi)
+{
+ return dev_get_drvdata(&ulpi->dev);
+}
+
+/**
+ * struct ulpi_driver - describes a ULPI PHY driver
+ * @id_table: array of device identifiers supported by this driver
+ * @probe: binds this driver to ULPI device
+ * @remove: unbinds this driver from ULPI device
+ * @driver: the name and owner members must be initialized by the drivers
+ */
+struct ulpi_driver {
+ const struct ulpi_device_id *id_table;
+ int (*probe)(struct ulpi *ulpi);
+ void (*remove)(struct ulpi *ulpi);
+ struct device_driver driver;
+};
+
+#define to_ulpi_driver(d) container_of(d, struct ulpi_driver, driver)
+
+int ulpi_register_driver(struct ulpi_driver *drv);
+void ulpi_unregister_driver(struct ulpi_driver *drv);
+
+#define module_ulpi_driver(__ulpi_driver) \
+ module_driver(__ulpi_driver, ulpi_register_driver, \
+ ulpi_unregister_driver)
+
+int ulpi_read(struct ulpi *ulpi, u8 addr);
+int ulpi_write(struct ulpi *ulpi, u8 addr, u8 val);
+
+#endif /* __LINUX_ULPI_DRIVER_H */
diff --git a/include/linux/ulpi/interface.h b/include/linux/ulpi/interface.h
new file mode 100644
index 0000000..4de8ab4
--- /dev/null
+++ b/include/linux/ulpi/interface.h
@@ -0,0 +1,23 @@
+#ifndef __LINUX_ULPI_INTERFACE_H
+#define __LINUX_ULPI_INTERFACE_H
+
+#include <linux/types.h>
+
+struct ulpi;
+
+/**
+ * struct ulpi_ops - ULPI register access
+ * @dev: the interface provider
+ * @read: read operation for ULPI register access
+ * @write: write operation for ULPI register access
+ */
+struct ulpi_ops {
+ struct device *dev;
+ int (*read)(struct ulpi_ops *ops, u8 addr);
+ int (*write)(struct ulpi_ops *ops, u8 addr, u8 val);
+};
+
+struct ulpi *ulpi_register_interface(struct device *, struct ulpi_ops *);
+void ulpi_unregister_interface(struct ulpi *);
+
+#endif /* __LINUX_ULPI_INTERFACE_H */
diff --git a/include/linux/ulpi/regs.h b/include/linux/ulpi/regs.h
new file mode 100644
index 0000000..b5b8b88
--- /dev/null
+++ b/include/linux/ulpi/regs.h
@@ -0,0 +1,130 @@
+#ifndef __LINUX_ULPI_REGS_H
+#define __LINUX_ULPI_REGS_H
+
+/*
+ * Macros for Set and Clear
+ * See ULPI 1.1 specification to find the registers with Set and Clear offsets
+ */
+#define ULPI_SET(a) (a + 1)
+#define ULPI_CLR(a) (a + 2)
+
+/*
+ * Register Map
+ */
+#define ULPI_VENDOR_ID_LOW 0x00
+#define ULPI_VENDOR_ID_HIGH 0x01
+#define ULPI_PRODUCT_ID_LOW 0x02
+#define ULPI_PRODUCT_ID_HIGH 0x03
+#define ULPI_FUNC_CTRL 0x04
+#define ULPI_IFC_CTRL 0x07
+#define ULPI_OTG_CTRL 0x0a
+#define ULPI_USB_INT_EN_RISE 0x0d
+#define ULPI_USB_INT_EN_FALL 0x10
+#define ULPI_USB_INT_STS 0x13
+#define ULPI_USB_INT_LATCH 0x14
+#define ULPI_DEBUG 0x15
+#define ULPI_SCRATCH 0x16
+/* Optional Carkit Registers */
+#define ULPI_CARKIT_CTRL 0x19
+#define ULPI_CARKIT_INT_DELAY 0x1c
+#define ULPI_CARKIT_INT_EN 0x1d
+#define ULPI_CARKIT_INT_STS 0x20
+#define ULPI_CARKIT_INT_LATCH 0x21
+#define ULPI_CARKIT_PLS_CTRL 0x22
+/* Other Optional Registers */
+#define ULPI_TX_POS_WIDTH 0x25
+#define ULPI_TX_NEG_WIDTH 0x26
+#define ULPI_POLARITY_RECOVERY 0x27
+/* Access Extended Register Set */
+#define ULPI_ACCESS_EXTENDED 0x2f
+/* Vendor Specific */
+#define ULPI_VENDOR_SPECIFIC 0x30
+/* Extended Registers */
+#define ULPI_EXT_VENDOR_SPECIFIC 0x80
+
+/*
+ * Register Bits
+ */
+
+/* Function Control */
+#define ULPI_FUNC_CTRL_XCVRSEL BIT(0)
+#define ULPI_FUNC_CTRL_XCVRSEL_MASK 0x3
+#define ULPI_FUNC_CTRL_HIGH_SPEED 0x0
+#define ULPI_FUNC_CTRL_FULL_SPEED 0x1
+#define ULPI_FUNC_CTRL_LOW_SPEED 0x2
+#define ULPI_FUNC_CTRL_FS4LS 0x3
+#define ULPI_FUNC_CTRL_TERMSELECT BIT(2)
+#define ULPI_FUNC_CTRL_OPMODE BIT(3)
+#define ULPI_FUNC_CTRL_OPMODE_MASK (0x3 << 3)
+#define ULPI_FUNC_CTRL_OPMODE_NORMAL (0x0 << 3)
+#define ULPI_FUNC_CTRL_OPMODE_NONDRIVING (0x1 << 3)
+#define ULPI_FUNC_CTRL_OPMODE_DISABLE_NRZI (0x2 << 3)
+#define ULPI_FUNC_CTRL_OPMODE_NOSYNC_NOEOP (0x3 << 3)
+#define ULPI_FUNC_CTRL_RESET BIT(5)
+#define ULPI_FUNC_CTRL_SUSPENDM BIT(6)
+
+/* Interface Control */
+#define ULPI_IFC_CTRL_6_PIN_SERIAL_MODE BIT(0)
+#define ULPI_IFC_CTRL_3_PIN_SERIAL_MODE BIT(1)
+#define ULPI_IFC_CTRL_CARKITMODE BIT(2)
+#define ULPI_IFC_CTRL_CLOCKSUSPENDM BIT(3)
+#define ULPI_IFC_CTRL_AUTORESUME BIT(4)
+#define ULPI_IFC_CTRL_EXTERNAL_VBUS BIT(5)
+#define ULPI_IFC_CTRL_PASSTHRU BIT(6)
+#define ULPI_IFC_CTRL_PROTECT_IFC_DISABLE BIT(7)
+
+/* OTG Control */
+#define ULPI_OTG_CTRL_ID_PULLUP BIT(0)
+#define ULPI_OTG_CTRL_DP_PULLDOWN BIT(1)
+#define ULPI_OTG_CTRL_DM_PULLDOWN BIT(2)
+#define ULPI_OTG_CTRL_DISCHRGVBUS BIT(3)
+#define ULPI_OTG_CTRL_CHRGVBUS BIT(4)
+#define ULPI_OTG_CTRL_DRVVBUS BIT(5)
+#define ULPI_OTG_CTRL_DRVVBUS_EXT BIT(6)
+#define ULPI_OTG_CTRL_EXTVBUSIND BIT(7)
+
+/* USB Interrupt Enable Rising,
+ * USB Interrupt Enable Falling,
+ * USB Interrupt Status and
+ * USB Interrupt Latch
+ */
+#define ULPI_INT_HOST_DISCONNECT BIT(0)
+#define ULPI_INT_VBUS_VALID BIT(1)
+#define ULPI_INT_SESS_VALID BIT(2)
+#define ULPI_INT_SESS_END BIT(3)
+#define ULPI_INT_IDGRD BIT(4)
+
+/* Debug */
+#define ULPI_DEBUG_LINESTATE0 BIT(0)
+#define ULPI_DEBUG_LINESTATE1 BIT(1)
+
+/* Carkit Control */
+#define ULPI_CARKIT_CTRL_CARKITPWR BIT(0)
+#define ULPI_CARKIT_CTRL_IDGNDDRV BIT(1)
+#define ULPI_CARKIT_CTRL_TXDEN BIT(2)
+#define ULPI_CARKIT_CTRL_RXDEN BIT(3)
+#define ULPI_CARKIT_CTRL_SPKLEFTEN BIT(4)
+#define ULPI_CARKIT_CTRL_SPKRIGHTEN BIT(5)
+#define ULPI_CARKIT_CTRL_MICEN BIT(6)
+
+/* Carkit Interrupt Enable */
+#define ULPI_CARKIT_INT_EN_IDFLOAT_RISE BIT(0)
+#define ULPI_CARKIT_INT_EN_IDFLOAT_FALL BIT(1)
+#define ULPI_CARKIT_INT_EN_CARINTDET BIT(2)
+#define ULPI_CARKIT_INT_EN_DP_RISE BIT(3)
+#define ULPI_CARKIT_INT_EN_DP_FALL BIT(4)
+
+/* Carkit Interrupt Status and
+ * Carkit Interrupt Latch
+ */
+#define ULPI_CARKIT_INT_IDFLOAT BIT(0)
+#define ULPI_CARKIT_INT_CARINTDET BIT(1)
+#define ULPI_CARKIT_INT_DP BIT(2)
+
+/* Carkit Pulse Control*/
+#define ULPI_CARKIT_PLS_CTRL_TXPLSEN BIT(0)
+#define ULPI_CARKIT_PLS_CTRL_RXPLSEN BIT(1)
+#define ULPI_CARKIT_PLS_CTRL_SPKRLEFT_BIASEN BIT(2)
+#define ULPI_CARKIT_PLS_CTRL_SPKRRIGHT_BIASEN BIT(3)
+
+#endif /* __LINUX_ULPI_REGS_H */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index f89c24a..447fe29 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -82,7 +82,7 @@ struct usb_host_interface {
int extralen;
unsigned char *extra; /* Extra descriptors */
- /* array of desc.bNumEndpoint endpoints associated with this
+ /* array of desc.bNumEndpoints endpoints associated with this
* interface setting. these will be in no particular order.
*/
struct usb_host_endpoint *endpoint;
@@ -127,10 +127,6 @@ enum usb_interface_condition {
* to the sysfs representation for that device.
* @pm_usage_cnt: PM usage counter for this interface
* @reset_ws: Used for scheduling resets from atomic context.
- * @reset_running: set to 1 if the interface is currently running a
- * queued reset so that usb_cancel_queued_reset() doesn't try to
- * remove from the workqueue when running inside the worker
- * thread. See __usb_queue_reset_device().
* @resetting_device: USB core reset the device, so use alt setting 0 as
* current; needs bandwidth alloc after reset.
*
@@ -181,7 +177,6 @@ struct usb_interface {
unsigned needs_remote_wakeup:1; /* driver requires remote wakeup */
unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */
unsigned needs_binding:1; /* needs delayed unbind/rebind */
- unsigned reset_running:1;
unsigned resetting_device:1; /* true: bandwidth alloc after reset */
struct device dev; /* interface specific device info */
@@ -210,6 +205,32 @@ void usb_put_intf(struct usb_interface *intf);
#define USB_MAXINTERFACES 32
#define USB_MAXIADS (USB_MAXINTERFACES/2)
+/*
+ * USB Resume Timer: Every Host controller driver should drive the resume
+ * signalling on the bus for the amount of time defined by this macro.
+ *
+ * That way we will have a 'stable' behavior among all HCDs supported by Linux.
+ *
+ * Note that the USB Specification states we should drive resume for *at least*
+ * 20 ms, but it doesn't give an upper bound. This creates two possible
+ * situations which we want to avoid:
+ *
+ * (a) sometimes an msleep(20) might expire slightly before 20 ms, which causes
+ * us to fail USB Electrical Tests, thus failing Certification
+ *
+ * (b) Some (many) devices actually need more than 20 ms of resume signalling,
+ * and while we can argue that's against the USB Specification, we don't have
+ * control over which devices a certification laboratory will be using for
+ * certification. If CertLab uses a device which was tested against Windows and
+ * that happens to have relaxed resume signalling rules, we might fall into
+ * situations where we fail interoperability and electrical tests.
+ *
+ * In order to avoid both conditions, we're using a 40 ms resume timeout, which
+ * should cope with both LPJ calibration errors and devices not following every
+ * detail of the USB Specification.
+ */
+#define USB_RESUME_TIMEOUT 40 /* ms */
+
/**
* struct usb_interface_cache - long-term representation of a device interface
* @num_altsetting: number of altsettings defined.
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 7c9b484..1f6526c 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -80,6 +80,9 @@
#define CDC_NCM_TIMER_INTERVAL_MIN 5UL
#define CDC_NCM_TIMER_INTERVAL_MAX (U32_MAX / NSEC_PER_USEC)
+/* Driver flags */
+#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */
+
#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
(x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)
#define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB)
@@ -103,9 +106,11 @@ struct cdc_ncm_ctx {
spinlock_t mtx;
atomic_t stop;
+ int drvflags;
u32 timer_interval;
u32 max_ndp_size;
+ struct usb_cdc_ncm_ndp16 *delayed_ndp16;
u32 tx_timer_pending;
u32 tx_curr_frame_num;
@@ -133,7 +138,7 @@ struct cdc_ncm_ctx {
};
u8 cdc_ncm_select_altsetting(struct usb_interface *intf);
-int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting);
+int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags);
void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index 535997a..ab94f78 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -19,6 +19,7 @@ struct ci_hdrc_platform_data {
enum usb_phy_interface phy_mode;
unsigned long flags;
#define CI_HDRC_REGS_SHARED BIT(0)
+#define CI_HDRC_SUPPORTS_RUNTIME_PM BIT(2)
#define CI_HDRC_DISABLE_STREAMING BIT(3)
/*
* Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1,
@@ -27,6 +28,7 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_DUAL_ROLE_NOT_OTG BIT(4)
#define CI_HDRC_IMX28_WRITE_FIX BIT(5)
#define CI_HDRC_FORCE_FULLSPEED BIT(6)
+#define CI_HDRC_TURN_VBUS_EARLY_ON BIT(7)
enum usb_dr_mode dr_mode;
#define CI_HDRC_CONTROLLER_RESET_EVENT 0
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 3d87def..2511469 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -148,6 +148,7 @@ struct usb_os_desc_table {
* @disable: (REQUIRED) Indicates the function should be disabled. Reasons
* include host resetting or reconfiguring the gadget, and disconnection.
* @setup: Used for interface-specific control requests.
+ * @req_match: Tests if a given class request can be handled by this function.
* @suspend: Notifies functions when the host stops sending USB traffic.
* @resume: Notifies functions when the host restarts USB traffic.
* @get_status: Returns function status as a reply to
@@ -213,6 +214,8 @@ struct usb_function {
void (*disable)(struct usb_function *);
int (*setup)(struct usb_function *,
const struct usb_ctrlrequest *);
+ bool (*req_match)(struct usb_function *,
+ const struct usb_ctrlrequest *);
void (*suspend)(struct usb_function *);
void (*resume)(struct usb_function *);
diff --git a/include/linux/usb/ehci_pdriver.h b/include/linux/usb/ehci_pdriver.h
index 7eb4dcd..db0431b 100644
--- a/include/linux/usb/ehci_pdriver.h
+++ b/include/linux/usb/ehci_pdriver.h
@@ -34,6 +34,8 @@ struct usb_hcd;
* after initialization.
* @no_io_watchdog: set to 1 if the controller does not need the I/O
* watchdog to run.
+ * @reset_on_resume: set to 1 if the controller needs to be reset after
+ * a suspend / resume cycle (but can't detect that itself).
*
* These are general configuration options for the EHCI controller. All of
* these options are activating more or less workarounds for some hardware.
@@ -45,6 +47,8 @@ struct usb_ehci_pdata {
unsigned big_endian_desc:1;
unsigned big_endian_mmio:1;
unsigned no_io_watchdog:1;
+ unsigned reset_on_resume:1;
+ unsigned dma_mask_64:1;
/* Turn on all power and clocks */
int (*power_on)(struct platform_device *pdev);
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 70ddb39..4f3dfb7 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -190,7 +190,7 @@ struct usb_ep {
* @ep:the endpoint being configured
* @maxpacket_limit:value of maximum packet size limit
*
- * This function shoud be used only in UDC drivers to initialize endpoint
+ * This function should be used only in UDC drivers to initialize endpoint
* (usually in probe function).
*/
static inline void usb_ep_set_maxpacket_limit(struct usb_ep *ep,
@@ -474,6 +474,7 @@ struct usb_dcd_config_params {
struct usb_gadget;
struct usb_gadget_driver;
+struct usb_udc;
/* the rest of the api to the controller hardware: device operations,
* which don't involve endpoints (or i/o).
@@ -496,6 +497,7 @@ struct usb_gadget_ops {
/**
* struct usb_gadget - represents a usb slave device
* @work: (internal use) Workqueue to be used for sysfs_notify()
+ * @udc: struct usb_udc pointer for this gadget
* @ops: Function pointers used to access hardware-specific operations.
* @ep0: Endpoint zero, used when reading or writing responses to
* driver setup() requests
@@ -523,6 +525,7 @@ struct usb_gadget_ops {
* enabled HNP support.
* @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to
* MaxPacketSize.
+ * @is_selfpowered: if the gadget is self-powered.
*
* Gadgets have a mostly-portable "gadget driver" implementing device
* functions, handling all usb configurations and interfaces. Gadget
@@ -544,6 +547,7 @@ struct usb_gadget_ops {
*/
struct usb_gadget {
struct work_struct work;
+ struct usb_udc *udc;
/* readonly to gadget driver */
const struct usb_gadget_ops *ops;
struct usb_ep *ep0;
@@ -563,6 +567,7 @@ struct usb_gadget {
unsigned a_hnp_support:1;
unsigned a_alt_hnp_support:1;
unsigned quirk_ep_out_aligned_size:1;
+ unsigned is_selfpowered:1;
};
#define work_to_gadget(w) (container_of((w), struct usb_gadget, work))
@@ -1027,6 +1032,10 @@ extern void usb_gadget_udc_reset(struct usb_gadget *gadget,
extern void usb_gadget_giveback_request(struct usb_ep *ep,
struct usb_request *req);
+/*-------------------------------------------------------------------------*/
+
+/* utility to update vbus status for udc core, it may be scheduled */
+extern void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status);
/*-------------------------------------------------------------------------*/
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 086bf13..c9aa779 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -146,6 +146,8 @@ struct usb_hcd {
unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */
unsigned can_do_streams:1; /* HC supports streams */
unsigned tpl_support:1; /* OTG & EH TPL support */
+ unsigned cant_recv_wakeups:1;
+ /* wakeup requests from downstream aren't received */
unsigned int irq; /* irq allocated */
void __iomem *regs; /* device memory/io */
@@ -453,6 +455,7 @@ extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
#endif /* CONFIG_PCI */
/* pci-ish (pdev null is ok) buffer alloc/mapping support */
+void usb_init_pool_max(void);
int hcd_buffer_create(struct usb_hcd *hcd);
void hcd_buffer_destroy(struct usb_hcd *hcd);
@@ -619,8 +622,6 @@ extern struct list_head usb_bus_list;
extern struct mutex usb_bus_list_lock;
extern wait_queue_head_t usb_kill_urb_queue;
-extern int usb_find_interface_driver(struct usb_device *dev,
- struct usb_interface *interface);
#define usb_endpoint_out(ep_dir) (!((ep_dir) & USB_DIR_IN))
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index b0a3924..e55a150 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -18,6 +18,7 @@
#ifndef __ASM_ARCH_MSM_HSUSB_H
#define __ASM_ARCH_MSM_HSUSB_H
+#include <linux/extcon.h>
#include <linux/types.h>
#include <linux/usb/otg.h>
#include <linux/clk.h>
@@ -117,8 +118,17 @@ struct msm_otg_platform_data {
enum otg_control_type otg_control;
enum msm_usb_phy_type phy_type;
void (*setup_gpio)(enum usb_otg_state state);
- int (*link_clk_reset)(struct clk *link_clk, bool assert);
- int (*phy_clk_reset)(struct clk *phy_clk);
+};
+
+/**
+ * struct msm_usb_cable - structure for exteternal connector cable
+ * state tracking
+ * @nb: hold event notification callback
+ * @conn: used for notification registration
+ */
+struct msm_usb_cable {
+ struct notifier_block nb;
+ struct extcon_specific_cable_nb conn;
};
/**
@@ -128,7 +138,6 @@ struct msm_otg_platform_data {
* @irq: IRQ number assigned for HSUSB controller.
* @clk: clock struct of usb_hs_clk.
* @pclk: clock struct of usb_hs_pclk.
- * @phy_reset_clk: clock struct of usb_phy_clk.
* @core_clk: clock struct of usb_hs_core_clk.
* @regs: ioremapped register base address.
* @inputs: OTG state machine inputs(Id, SessValid etc).
@@ -141,6 +150,11 @@ struct msm_otg_platform_data {
* @chg_type: The type of charger attached.
* @dcd_retires: The retry count used to track Data contact
* detection process.
+ * @manual_pullup: true if VBUS is not routed to USB controller/phy
+ * and controller driver therefore enables pull-up explicitly before
+ * starting controller using usbcmd run/stop bit.
+ * @vbus: VBUS signal state trakining, using extcon framework
+ * @id: ID signal state trakining, using extcon framework
*/
struct msm_otg {
struct usb_phy phy;
@@ -148,7 +162,6 @@ struct msm_otg {
int irq;
struct clk *clk;
struct clk *pclk;
- struct clk *phy_reset_clk;
struct clk *core_clk;
void __iomem *regs;
#define ID 0
@@ -170,6 +183,11 @@ struct msm_otg {
struct reset_control *phy_rst;
struct reset_control *link_rst;
int vdd_levels[3];
+
+ bool manual_pullup;
+
+ struct msm_usb_cable vbus;
+ struct msm_usb_cable id;
};
#endif
diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h
index a29f603..e159b39 100644
--- a/include/linux/usb/msm_hsusb_hw.h
+++ b/include/linux/usb/msm_hsusb_hw.h
@@ -21,6 +21,8 @@
#define USB_AHBBURST (MSM_USB_BASE + 0x0090)
#define USB_AHBMODE (MSM_USB_BASE + 0x0098)
+#define USB_GENCONFIG_2 (MSM_USB_BASE + 0x00a0)
+
#define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */
#define USB_USBCMD (MSM_USB_BASE + 0x0140)
@@ -30,6 +32,9 @@
#define USB_PHY_CTRL (MSM_USB_BASE + 0x0240)
#define USB_PHY_CTRL2 (MSM_USB_BASE + 0x0278)
+#define GENCONFIG_2_SESS_VLD_CTRL_EN BIT(7)
+#define USBCMD_SESS_VLD_CTRL BIT(25)
+
#define USBCMD_RESET 2
#define USB_USBINTR (MSM_USB_BASE + 0x0148)
@@ -50,6 +55,10 @@
#define ULPI_PWR_CLK_MNG_REG 0x88
#define OTG_COMP_DISABLE BIT(0)
+#define ULPI_MISC_A 0x96
+#define ULPI_MISC_A_VBUSVLDEXTSEL BIT(1)
+#define ULPI_MISC_A_VBUSVLDEXT BIT(0)
+
#define ASYNC_INTR_CTRL (1 << 29) /* Enable async interrupt */
#define ULPI_STP_CTRL (1 << 30) /* Block communication with PHY */
#define PHY_RETEN (1 << 1) /* PHY retention enable/disable */
diff --git a/include/linux/usb/net2280.h b/include/linux/usb/net2280.h
index 148b8fa..7251202 100644
--- a/include/linux/usb/net2280.h
+++ b/include/linux/usb/net2280.h
@@ -168,6 +168,9 @@ struct net2280_regs {
#define ENDPOINT_B_INTERRUPT 2
#define ENDPOINT_A_INTERRUPT 1
#define ENDPOINT_0_INTERRUPT 0
+#define USB3380_IRQSTAT0_EP_INTR_MASK_IN (0xF << 17)
+#define USB3380_IRQSTAT0_EP_INTR_MASK_OUT (0xF << 1)
+
u32 irqstat1;
#define POWER_STATE_CHANGE_INTERRUPT 27
#define PCI_ARBITER_TIMEOUT_INTERRUPT 26
diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h
index b6ba1bf..f728f18 100644
--- a/include/linux/usb/otg-fsm.h
+++ b/include/linux/usb/otg-fsm.h
@@ -53,6 +53,8 @@ enum otg_fsm_timer {
B_SE0_SRP,
B_SRP_FAIL,
A_WAIT_ENUM,
+ B_DATA_PLS,
+ B_SSEND_SRP,
NUM_OTG_FSM_TIMERS,
};
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index f499c23..e39f251 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -1,5 +1,5 @@
-/* USB OTG (On The Go) defines */
/*
+ * USB PHY defines
*
* These APIs may be used between USB controllers. USB device drivers
* (for either host or peripheral roles) don't use these calls; they
@@ -106,7 +106,7 @@ struct usb_phy {
int (*set_power)(struct usb_phy *x,
unsigned mA);
- /* for non-OTG B devices: set transceiver into suspend mode */
+ /* Set transceiver into suspend mode */
int (*set_suspend)(struct usb_phy *x,
int suspend);
@@ -205,6 +205,8 @@ extern struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index);
extern struct usb_phy *devm_usb_get_phy_dev(struct device *dev, u8 index);
extern struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
const char *phandle, u8 index);
+extern struct usb_phy *devm_usb_get_phy_by_node(struct device *dev,
+ struct device_node *node, struct notifier_block *nb);
extern void usb_put_phy(struct usb_phy *);
extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x);
extern int usb_bind_phy(const char *dev_name, u8 index,
@@ -238,6 +240,12 @@ static inline struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
return ERR_PTR(-ENXIO);
}
+static inline struct usb_phy *devm_usb_get_phy_by_node(struct device *dev,
+ struct device_node *node, struct notifier_block *nb)
+{
+ return ERR_PTR(-ENXIO);
+}
+
static inline void usb_put_phy(struct usb_phy *x)
{
}
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index 9fd9e48..3dd5a78 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -165,10 +165,11 @@ struct renesas_usbhs_driver_param {
*/
u32 has_otg:1; /* for controlling PWEN/EXTLP */
u32 has_sudmac:1; /* for SUDMAC */
+ u32 has_usb_dmac:1; /* for USB-DMAC */
+#define USBHS_USB_DMAC_XFER_SIZE 32 /* hardcode the xfer size */
};
-#define USBHS_TYPE_R8A7790 1
-#define USBHS_TYPE_R8A7791 2
+#define USBHS_TYPE_RCAR_GEN2 1
/*
* option:
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 9bb547c..704a1ab 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -190,8 +190,7 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data)
* @num_ports: the number of different ports this device will have.
* @bulk_in_size: minimum number of bytes to allocate for bulk-in buffer
* (0 = end-point size)
- * @bulk_out_size: minimum number of bytes to allocate for bulk-out buffer
- * (0 = end-point size)
+ * @bulk_out_size: bytes to allocate for bulk-out buffer (0 = end-point size)
* @calc_num_ports: pointer to a function to determine how many ports this
* device has dynamically. It will be called after the probe()
* callback is called, but before attach()
diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h
index 5c295c2..5f07407 100644
--- a/include/linux/usb/ulpi.h
+++ b/include/linux/usb/ulpi.h
@@ -12,6 +12,8 @@
#define __LINUX_USB_ULPI_H
#include <linux/usb/otg.h>
+#include <linux/ulpi/regs.h>
+
/*-------------------------------------------------------------------------*/
/*
@@ -49,138 +51,6 @@
/*-------------------------------------------------------------------------*/
-/*
- * Macros for Set and Clear
- * See ULPI 1.1 specification to find the registers with Set and Clear offsets
- */
-#define ULPI_SET(a) (a + 1)
-#define ULPI_CLR(a) (a + 2)
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * Register Map
- */
-#define ULPI_VENDOR_ID_LOW 0x00
-#define ULPI_VENDOR_ID_HIGH 0x01
-#define ULPI_PRODUCT_ID_LOW 0x02
-#define ULPI_PRODUCT_ID_HIGH 0x03
-#define ULPI_FUNC_CTRL 0x04
-#define ULPI_IFC_CTRL 0x07
-#define ULPI_OTG_CTRL 0x0a
-#define ULPI_USB_INT_EN_RISE 0x0d
-#define ULPI_USB_INT_EN_FALL 0x10
-#define ULPI_USB_INT_STS 0x13
-#define ULPI_USB_INT_LATCH 0x14
-#define ULPI_DEBUG 0x15
-#define ULPI_SCRATCH 0x16
-/* Optional Carkit Registers */
-#define ULPI_CARCIT_CTRL 0x19
-#define ULPI_CARCIT_INT_DELAY 0x1c
-#define ULPI_CARCIT_INT_EN 0x1d
-#define ULPI_CARCIT_INT_STS 0x20
-#define ULPI_CARCIT_INT_LATCH 0x21
-#define ULPI_CARCIT_PLS_CTRL 0x22
-/* Other Optional Registers */
-#define ULPI_TX_POS_WIDTH 0x25
-#define ULPI_TX_NEG_WIDTH 0x26
-#define ULPI_POLARITY_RECOVERY 0x27
-/* Access Extended Register Set */
-#define ULPI_ACCESS_EXTENDED 0x2f
-/* Vendor Specific */
-#define ULPI_VENDOR_SPECIFIC 0x30
-/* Extended Registers */
-#define ULPI_EXT_VENDOR_SPECIFIC 0x80
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * Register Bits
- */
-
-/* Function Control */
-#define ULPI_FUNC_CTRL_XCVRSEL (1 << 0)
-#define ULPI_FUNC_CTRL_XCVRSEL_MASK (3 << 0)
-#define ULPI_FUNC_CTRL_HIGH_SPEED (0 << 0)
-#define ULPI_FUNC_CTRL_FULL_SPEED (1 << 0)
-#define ULPI_FUNC_CTRL_LOW_SPEED (2 << 0)
-#define ULPI_FUNC_CTRL_FS4LS (3 << 0)
-#define ULPI_FUNC_CTRL_TERMSELECT (1 << 2)
-#define ULPI_FUNC_CTRL_OPMODE (1 << 3)
-#define ULPI_FUNC_CTRL_OPMODE_MASK (3 << 3)
-#define ULPI_FUNC_CTRL_OPMODE_NORMAL (0 << 3)
-#define ULPI_FUNC_CTRL_OPMODE_NONDRIVING (1 << 3)
-#define ULPI_FUNC_CTRL_OPMODE_DISABLE_NRZI (2 << 3)
-#define ULPI_FUNC_CTRL_OPMODE_NOSYNC_NOEOP (3 << 3)
-#define ULPI_FUNC_CTRL_RESET (1 << 5)
-#define ULPI_FUNC_CTRL_SUSPENDM (1 << 6)
-
-/* Interface Control */
-#define ULPI_IFC_CTRL_6_PIN_SERIAL_MODE (1 << 0)
-#define ULPI_IFC_CTRL_3_PIN_SERIAL_MODE (1 << 1)
-#define ULPI_IFC_CTRL_CARKITMODE (1 << 2)
-#define ULPI_IFC_CTRL_CLOCKSUSPENDM (1 << 3)
-#define ULPI_IFC_CTRL_AUTORESUME (1 << 4)
-#define ULPI_IFC_CTRL_EXTERNAL_VBUS (1 << 5)
-#define ULPI_IFC_CTRL_PASSTHRU (1 << 6)
-#define ULPI_IFC_CTRL_PROTECT_IFC_DISABLE (1 << 7)
-
-/* OTG Control */
-#define ULPI_OTG_CTRL_ID_PULLUP (1 << 0)
-#define ULPI_OTG_CTRL_DP_PULLDOWN (1 << 1)
-#define ULPI_OTG_CTRL_DM_PULLDOWN (1 << 2)
-#define ULPI_OTG_CTRL_DISCHRGVBUS (1 << 3)
-#define ULPI_OTG_CTRL_CHRGVBUS (1 << 4)
-#define ULPI_OTG_CTRL_DRVVBUS (1 << 5)
-#define ULPI_OTG_CTRL_DRVVBUS_EXT (1 << 6)
-#define ULPI_OTG_CTRL_EXTVBUSIND (1 << 7)
-
-/* USB Interrupt Enable Rising,
- * USB Interrupt Enable Falling,
- * USB Interrupt Status and
- * USB Interrupt Latch
- */
-#define ULPI_INT_HOST_DISCONNECT (1 << 0)
-#define ULPI_INT_VBUS_VALID (1 << 1)
-#define ULPI_INT_SESS_VALID (1 << 2)
-#define ULPI_INT_SESS_END (1 << 3)
-#define ULPI_INT_IDGRD (1 << 4)
-
-/* Debug */
-#define ULPI_DEBUG_LINESTATE0 (1 << 0)
-#define ULPI_DEBUG_LINESTATE1 (1 << 1)
-
-/* Carkit Control */
-#define ULPI_CARKIT_CTRL_CARKITPWR (1 << 0)
-#define ULPI_CARKIT_CTRL_IDGNDDRV (1 << 1)
-#define ULPI_CARKIT_CTRL_TXDEN (1 << 2)
-#define ULPI_CARKIT_CTRL_RXDEN (1 << 3)
-#define ULPI_CARKIT_CTRL_SPKLEFTEN (1 << 4)
-#define ULPI_CARKIT_CTRL_SPKRIGHTEN (1 << 5)
-#define ULPI_CARKIT_CTRL_MICEN (1 << 6)
-
-/* Carkit Interrupt Enable */
-#define ULPI_CARKIT_INT_EN_IDFLOAT_RISE (1 << 0)
-#define ULPI_CARKIT_INT_EN_IDFLOAT_FALL (1 << 1)
-#define ULPI_CARKIT_INT_EN_CARINTDET (1 << 2)
-#define ULPI_CARKIT_INT_EN_DP_RISE (1 << 3)
-#define ULPI_CARKIT_INT_EN_DP_FALL (1 << 4)
-
-/* Carkit Interrupt Status and
- * Carkit Interrupt Latch
- */
-#define ULPI_CARKIT_INT_IDFLOAT (1 << 0)
-#define ULPI_CARKIT_INT_CARINTDET (1 << 1)
-#define ULPI_CARKIT_INT_DP (1 << 2)
-
-/* Carkit Pulse Control*/
-#define ULPI_CARKIT_PLS_CTRL_TXPLSEN (1 << 0)
-#define ULPI_CARKIT_PLS_CTRL_RXPLSEN (1 << 1)
-#define ULPI_CARKIT_PLS_CTRL_SPKRLEFT_BIASEN (1 << 2)
-#define ULPI_CARKIT_PLS_CTRL_SPKRRIGHT_BIASEN (1 << 3)
-
-/*-------------------------------------------------------------------------*/
-
#if IS_ENABLED(CONFIG_USB_ULPI)
struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops,
unsigned int flags);
diff --git a/include/linux/usb/usb338x.h b/include/linux/usb/usb338x.h
index f92eb63..11525d8 100644
--- a/include/linux/usb/usb338x.h
+++ b/include/linux/usb/usb338x.h
@@ -43,6 +43,10 @@
#define IN_ENDPOINT_TYPE 12
#define OUT_ENDPOINT_ENABLE 10
#define OUT_ENDPOINT_TYPE 8
+#define USB3380_EP_CFG_MASK_IN ((0x3 << IN_ENDPOINT_TYPE) | \
+ BIT(IN_ENDPOINT_ENABLE))
+#define USB3380_EP_CFG_MASK_OUT ((0x3 << OUT_ENDPOINT_TYPE) | \
+ BIT(OUT_ENDPOINT_ENABLE))
struct usb338x_usb_ext_regs {
u32 usbclass;
diff --git a/include/linux/usb/usb_phy_generic.h b/include/linux/usb/usb_phy_generic.h
index 68adae8..c13632d5 100644
--- a/include/linux/usb/usb_phy_generic.h
+++ b/include/linux/usb/usb_phy_generic.h
@@ -2,6 +2,7 @@
#define __LINUX_USB_NOP_XCEIV_H
#include <linux/usb/otg.h>
+#include <linux/gpio/consumer.h>
struct usb_phy_generic_platform_data {
enum usb_phy_type type;
@@ -11,6 +12,7 @@ struct usb_phy_generic_platform_data {
unsigned int needs_vcc:1;
unsigned int needs_reset:1; /* deprecated */
int gpio_reset;
+ struct gpio_desc *gpiod_vbus;
};
#if IS_ENABLED(CONFIG_NOP_USB_XCEIV)
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index d9a4905..6e0ce8c 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -227,9 +227,23 @@ struct skb_data { /* skb->cb is one of these */
struct urb *urb;
struct usbnet *dev;
enum skb_state state;
- size_t length;
+ long length;
+ unsigned long packets;
};
+/* Drivers that set FLAG_MULTI_PACKET must call this in their
+ * tx_fixup method before returning an skb.
+ */
+static inline void
+usbnet_set_skb_tx_stats(struct sk_buff *skb,
+ unsigned long packets, long bytes_delta)
+{
+ struct skb_data *entry = (struct skb_data *) skb->cb;
+
+ entry->packets = packets;
+ entry->length = bytes_delta;
+}
+
extern int usbnet_open(struct net_device *net);
extern int usbnet_stop(struct net_device *net);
extern netdev_tx_t usbnet_start_xmit(struct sk_buff *skb,
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index a7f2604..7f5f78b 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -77,6 +77,8 @@
/* Cannot handle ATA_12 or ATA_16 CDBs */ \
US_FLAG(NO_REPORT_OPCODES, 0x04000000) \
/* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \
+ US_FLAG(MAX_SECTORS_240, 0x08000000) \
+ /* Sets max_sectors to 240 */ \
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h
new file mode 100644
index 0000000..f9b2ce5
--- /dev/null
+++ b/include/linux/util_macros.h
@@ -0,0 +1,40 @@
+#ifndef _LINUX_HELPER_MACROS_H_
+#define _LINUX_HELPER_MACROS_H_
+
+#define __find_closest(x, a, as, op) \
+({ \
+ typeof(as) __fc_i, __fc_as = (as) - 1; \
+ typeof(x) __fc_x = (x); \
+ typeof(*a) const *__fc_a = (a); \
+ for (__fc_i = 0; __fc_i < __fc_as; __fc_i++) { \
+ if (__fc_x op DIV_ROUND_CLOSEST(__fc_a[__fc_i] + \
+ __fc_a[__fc_i + 1], 2)) \
+ break; \
+ } \
+ (__fc_i); \
+})
+
+/**
+ * find_closest - locate the closest element in a sorted array
+ * @x: The reference value.
+ * @a: The array in which to look for the closest element. Must be sorted
+ * in ascending order.
+ * @as: Size of 'a'.
+ *
+ * Returns the index of the element closest to 'x'.
+ */
+#define find_closest(x, a, as) __find_closest(x, a, as, <=)
+
+/**
+ * find_closest_descending - locate the closest element in a sorted array
+ * @x: The reference value.
+ * @a: The array in which to look for the closest element. Must be sorted
+ * in descending order.
+ * @as: Size of 'a'.
+ *
+ * Similar to find_closest() but 'a' is expected to be sorted in descending
+ * order.
+ */
+#define find_closest_descending(x, a, as) __find_closest(x, a, as, >=)
+
+#endif
diff --git a/include/linux/uwb/umc.h b/include/linux/uwb/umc.h
index ba82f03..0211229 100644
--- a/include/linux/uwb/umc.h
+++ b/include/linux/uwb/umc.h
@@ -87,8 +87,6 @@ struct umc_driver {
int (*probe)(struct umc_dev *);
void (*remove)(struct umc_dev *);
- int (*suspend)(struct umc_dev *, pm_message_t state);
- int (*resume)(struct umc_dev *);
int (*pre_reset)(struct umc_dev *);
int (*post_reset)(struct umc_dev *);
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index d320411..ddb4409 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -14,6 +14,8 @@
#include <linux/iommu.h>
#include <linux/mm.h>
+#include <linux/workqueue.h>
+#include <linux/poll.h>
#include <uapi/linux/vfio.h>
/**
@@ -26,6 +28,7 @@
* @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_*
* operations documented below
* @mmap: Perform mmap(2) on a region of the device file descriptor
+ * @request: Request for the bus driver to release the device
*/
struct vfio_device_ops {
char *name;
@@ -38,6 +41,7 @@ struct vfio_device_ops {
long (*ioctl)(void *device_data, unsigned int cmd,
unsigned long arg);
int (*mmap)(void *device_data, struct vm_area_struct *vma);
+ void (*request)(void *device_data, unsigned int count);
};
extern int vfio_add_group_dev(struct device *dev,
@@ -76,19 +80,6 @@ extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
extern void vfio_unregister_iommu_driver(
const struct vfio_iommu_driver_ops *ops);
-/**
- * offsetofend(TYPE, MEMBER)
- *
- * @TYPE: The type of the structure
- * @MEMBER: The member within the structure to get the end offset of
- *
- * Simple helper macro for dealing with variable sized structures passed
- * from user space. This allows us to easily determine if the provided
- * structure is sized to include various fields.
- */
-#define offsetofend(TYPE, MEMBER) \
- (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER))
-
/*
* External user API
*/
@@ -121,4 +112,27 @@ static inline long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group,
return -ENOTTY;
}
#endif /* CONFIG_EEH */
+
+/*
+ * IRQfd - generic
+ */
+struct virqfd {
+ void *opaque;
+ struct eventfd_ctx *eventfd;
+ int (*handler)(void *, void *);
+ void (*thread)(void *, void *);
+ void *data;
+ struct work_struct inject;
+ wait_queue_t wait;
+ poll_table pt;
+ struct work_struct shutdown;
+ struct virqfd **pvirqfd;
+};
+
+extern int vfio_virqfd_enable(void *opaque,
+ int (*handler)(void *, void *),
+ void (*thread)(void *, void *),
+ void *data, struct virqfd **pvirqfd, int fd);
+extern void vfio_virqfd_disable(struct virqfd **pvirqfd);
+
#endif /* VFIO_H */
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index c37bd4d..8c3b412 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -65,8 +65,13 @@ struct pci_dev;
* out of the arbitration process (and can be safe to take
* interrupts at any time.
*/
+#if defined(CONFIG_VGA_ARB)
extern void vga_set_legacy_decoding(struct pci_dev *pdev,
unsigned int decodes);
+#else
+static inline void vga_set_legacy_decoding(struct pci_dev *pdev,
+ unsigned int decodes) { };
+#endif
/**
* vga_get - acquire & locks VGA resources
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 28f0e65..8f4d4bf 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -108,8 +108,6 @@ struct virtio_device {
void *priv;
};
-bool virtio_device_is_legacy_only(struct virtio_device_id id);
-
static inline struct virtio_device *dev_to_virtio(struct device *_dev)
{
return container_of(_dev, struct virtio_device, dev);
diff --git a/include/linux/virtio_byteorder.h b/include/linux/virtio_byteorder.h
index 51865d0..ce63a2c 100644
--- a/include/linux/virtio_byteorder.h
+++ b/include/linux/virtio_byteorder.h
@@ -3,17 +3,21 @@
#include <linux/types.h>
#include <uapi/linux/virtio_types.h>
-/*
- * Low-level memory accessors for handling virtio in modern little endian and in
- * compatibility native endian format.
- */
+static inline bool virtio_legacy_is_little_endian(void)
+{
+#ifdef __LITTLE_ENDIAN
+ return true;
+#else
+ return false;
+#endif
+}
static inline u16 __virtio16_to_cpu(bool little_endian, __virtio16 val)
{
if (little_endian)
return le16_to_cpu((__force __le16)val);
else
- return (__force u16)val;
+ return be16_to_cpu((__force __be16)val);
}
static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val)
@@ -21,7 +25,7 @@ static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val)
if (little_endian)
return (__force __virtio16)cpu_to_le16(val);
else
- return (__force __virtio16)val;
+ return (__force __virtio16)cpu_to_be16(val);
}
static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val)
@@ -29,7 +33,7 @@ static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val)
if (little_endian)
return le32_to_cpu((__force __le32)val);
else
- return (__force u32)val;
+ return be32_to_cpu((__force __be32)val);
}
static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val)
@@ -37,7 +41,7 @@ static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val)
if (little_endian)
return (__force __virtio32)cpu_to_le32(val);
else
- return (__force __virtio32)val;
+ return (__force __virtio32)cpu_to_be32(val);
}
static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val)
@@ -45,7 +49,7 @@ static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val)
if (little_endian)
return le64_to_cpu((__force __le64)val);
else
- return (__force u64)val;
+ return be64_to_cpu((__force __be64)val);
}
static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val)
@@ -53,7 +57,7 @@ static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val)
if (little_endian)
return (__force __virtio64)cpu_to_le64(val);
else
- return (__force __virtio64)val;
+ return (__force __virtio64)cpu_to_be64(val);
}
#endif /* _LINUX_VIRTIO_BYTEORDER */
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index ca3ed78..e5ce8ab 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -205,35 +205,41 @@ int virtqueue_set_affinity(struct virtqueue *vq, int cpu)
return 0;
}
+static inline bool virtio_is_little_endian(struct virtio_device *vdev)
+{
+ return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
+ virtio_legacy_is_little_endian();
+}
+
/* Memory accessors */
static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
{
- return __virtio16_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+ return __virtio16_to_cpu(virtio_is_little_endian(vdev), val);
}
static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
{
- return __cpu_to_virtio16(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+ return __cpu_to_virtio16(virtio_is_little_endian(vdev), val);
}
static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
{
- return __virtio32_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+ return __virtio32_to_cpu(virtio_is_little_endian(vdev), val);
}
static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
{
- return __cpu_to_virtio32(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+ return __cpu_to_virtio32(virtio_is_little_endian(vdev), val);
}
static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
{
- return __virtio64_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+ return __virtio64_to_cpu(virtio_is_little_endian(vdev), val);
}
static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
{
- return __cpu_to_virtio64(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+ return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
}
/* Config space accessors. */
@@ -298,13 +304,6 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
} \
} while(0)
-static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
-{
- u8 ret;
- vdev->config->get(vdev, offset, &ret, sizeof(ret));
- return ret;
-}
-
/* Read @count fields, @bytes each. */
static inline void __virtio_cread_many(struct virtio_device *vdev,
unsigned int offset,
@@ -326,7 +325,6 @@ static inline void __virtio_cread_many(struct virtio_device *vdev,
} while (gen != old);
}
-
static inline void virtio_cread_bytes(struct virtio_device *vdev,
unsigned int offset,
void *buf, size_t len)
@@ -334,6 +332,13 @@ static inline void virtio_cread_bytes(struct virtio_device *vdev,
__virtio_cread_many(vdev, offset, buf, len, 1);
}
+static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
+{
+ u8 ret;
+ vdev->config->get(vdev, offset, &ret, sizeof(ret));
+ return ret;
+}
+
static inline void virtio_cwrite8(struct virtio_device *vdev,
unsigned int offset, u8 val)
{
@@ -374,7 +379,6 @@ static inline u64 virtio_cread64(struct virtio_device *vdev,
unsigned int offset)
{
u64 ret;
- vdev->config->get(vdev, offset, &ret, sizeof(ret));
__virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
return virtio64_to_cpu(vdev, (__force __virtio64)ret);
}
diff --git a/include/linux/virtio_mmio.h b/include/linux/virtio_mmio.h
index 5c7b6f0..c4b0968 100644
--- a/include/linux/virtio_mmio.h
+++ b/include/linux/virtio_mmio.h
@@ -51,23 +51,29 @@
/* Virtio vendor ID - Read Only */
#define VIRTIO_MMIO_VENDOR_ID 0x00c
-/* Bitmask of the features supported by the host
+/* Bitmask of the features supported by the device (host)
* (32 bits per set) - Read Only */
-#define VIRTIO_MMIO_HOST_FEATURES 0x010
+#define VIRTIO_MMIO_DEVICE_FEATURES 0x010
-/* Host features set selector - Write Only */
-#define VIRTIO_MMIO_HOST_FEATURES_SEL 0x014
+/* Device (host) features set selector - Write Only */
+#define VIRTIO_MMIO_DEVICE_FEATURES_SEL 0x014
-/* Bitmask of features activated by the guest
+/* Bitmask of features activated by the driver (guest)
* (32 bits per set) - Write Only */
-#define VIRTIO_MMIO_GUEST_FEATURES 0x020
+#define VIRTIO_MMIO_DRIVER_FEATURES 0x020
/* Activated features set selector - Write Only */
-#define VIRTIO_MMIO_GUEST_FEATURES_SEL 0x024
+#define VIRTIO_MMIO_DRIVER_FEATURES_SEL 0x024
+
+
+#ifndef VIRTIO_MMIO_NO_LEGACY /* LEGACY DEVICES ONLY! */
/* Guest's memory page size in bytes - Write Only */
#define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028
+#endif
+
+
/* Queue selector - Write Only */
#define VIRTIO_MMIO_QUEUE_SEL 0x030
@@ -77,12 +83,21 @@
/* Queue size for the currently selected queue - Write Only */
#define VIRTIO_MMIO_QUEUE_NUM 0x038
+
+#ifndef VIRTIO_MMIO_NO_LEGACY /* LEGACY DEVICES ONLY! */
+
/* Used Ring alignment for the currently selected queue - Write Only */
#define VIRTIO_MMIO_QUEUE_ALIGN 0x03c
/* Guest's PFN for the currently selected queue - Read Write */
#define VIRTIO_MMIO_QUEUE_PFN 0x040
+#endif
+
+
+/* Ready bit for the currently selected queue - Read Write */
+#define VIRTIO_MMIO_QUEUE_READY 0x044
+
/* Queue notifier - Write Only */
#define VIRTIO_MMIO_QUEUE_NOTIFY 0x050
@@ -95,6 +110,21 @@
/* Device status register - Read Write */
#define VIRTIO_MMIO_STATUS 0x070
+/* Selected queue's Descriptor Table address, 64 bits in two halves */
+#define VIRTIO_MMIO_QUEUE_DESC_LOW 0x080
+#define VIRTIO_MMIO_QUEUE_DESC_HIGH 0x084
+
+/* Selected queue's Available Ring address, 64 bits in two halves */
+#define VIRTIO_MMIO_QUEUE_AVAIL_LOW 0x090
+#define VIRTIO_MMIO_QUEUE_AVAIL_HIGH 0x094
+
+/* Selected queue's Used Ring address, 64 bits in two halves */
+#define VIRTIO_MMIO_QUEUE_USED_LOW 0x0a0
+#define VIRTIO_MMIO_QUEUE_USED_HIGH 0x0a4
+
+/* Configuration atomicity value */
+#define VIRTIO_MMIO_CONFIG_GENERATION 0x0fc
+
/* The config space is defined by each driver as
* the per-driver configuration space - Read Write */
#define VIRTIO_MMIO_CONFIG 0x100
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index 67e06fe..8e50888 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -21,19 +21,20 @@
* actually quite cheap.
*/
-#ifdef CONFIG_SMP
static inline void virtio_mb(bool weak_barriers)
{
+#ifdef CONFIG_SMP
if (weak_barriers)
smp_mb();
else
+#endif
mb();
}
static inline void virtio_rmb(bool weak_barriers)
{
if (weak_barriers)
- smp_rmb();
+ dma_rmb();
else
rmb();
}
@@ -41,26 +42,10 @@ static inline void virtio_rmb(bool weak_barriers)
static inline void virtio_wmb(bool weak_barriers)
{
if (weak_barriers)
- smp_wmb();
+ dma_wmb();
else
wmb();
}
-#else
-static inline void virtio_mb(bool weak_barriers)
-{
- mb();
-}
-
-static inline void virtio_rmb(bool weak_barriers)
-{
- rmb();
-}
-
-static inline void virtio_wmb(bool weak_barriers)
-{
- wmb();
-}
-#endif
struct virtio_device;
struct virtqueue;
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index b87696f..0ec5983 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -16,6 +16,8 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
+#define VM_NO_GUARD 0x00000040 /* don't add guard page */
+#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
/* bits [20..32] reserved for arch specific ioremap internals */
/*
@@ -75,7 +77,9 @@ extern void *vmalloc_32_user(unsigned long size);
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
- pgprot_t prot, int node, const void *caller);
+ pgprot_t prot, unsigned long vm_flags, int node,
+ const void *caller);
+
extern void vfree(const void *addr);
extern void *vmap(struct page **pages, unsigned int count,
@@ -96,8 +100,12 @@ void vmalloc_sync_all(void);
static inline size_t get_vm_area_size(const struct vm_struct *area)
{
- /* return actual size without guard page */
- return area->size - PAGE_SIZE;
+ if (!(area->flags & VM_NO_GUARD))
+ /* return actual size without guard page */
+ return area->size - PAGE_SIZE;
+ else
+ return area->size;
+
}
extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
diff --git a/include/linux/vme.h b/include/linux/vme.h
index 8cd6f19..c013135 100644
--- a/include/linux/vme.h
+++ b/include/linux/vme.h
@@ -120,6 +120,8 @@ void vme_free_consistent(struct vme_resource *, size_t, void *,
dma_addr_t);
size_t vme_get_size(struct vme_resource *);
+int vme_check_window(u32 aspace, unsigned long long vme_base,
+ unsigned long long size);
struct vme_resource *vme_slave_request(struct vme_dev *, u32, u32);
int vme_slave_set(struct vme_resource *, int, unsigned long long,
@@ -137,6 +139,7 @@ ssize_t vme_master_read(struct vme_resource *, void *, size_t, loff_t);
ssize_t vme_master_write(struct vme_resource *, void *, size_t, loff_t);
unsigned int vme_master_rmw(struct vme_resource *, unsigned int, unsigned int,
unsigned int, loff_t);
+int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma);
void vme_master_free(struct vme_resource *);
struct vme_resource *vme_dma_request(struct vme_dev *, u32);
diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h
index 5691f75..63df3a2 100644
--- a/include/linux/vmw_vmci_api.h
+++ b/include/linux/vmw_vmci_api.h
@@ -74,7 +74,7 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size,
int mode);
ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
- void *iov, size_t iov_size, int mode);
+ struct msghdr *msg, size_t iov_size, int mode);
ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
struct msghdr *msg, size_t iov_size, int mode);
ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size,
diff --git a/include/linux/vringh.h b/include/linux/vringh.h
index a3fa537..bc6c28d 100644
--- a/include/linux/vringh.h
+++ b/include/linux/vringh.h
@@ -226,33 +226,39 @@ static inline void vringh_notify(struct vringh *vrh)
vrh->notify(vrh);
}
+static inline bool vringh_is_little_endian(const struct vringh *vrh)
+{
+ return vrh->little_endian ||
+ virtio_legacy_is_little_endian();
+}
+
static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val)
{
- return __virtio16_to_cpu(vrh->little_endian, val);
+ return __virtio16_to_cpu(vringh_is_little_endian(vrh), val);
}
static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val)
{
- return __cpu_to_virtio16(vrh->little_endian, val);
+ return __cpu_to_virtio16(vringh_is_little_endian(vrh), val);
}
static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val)
{
- return __virtio32_to_cpu(vrh->little_endian, val);
+ return __virtio32_to_cpu(vringh_is_little_endian(vrh), val);
}
static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val)
{
- return __cpu_to_virtio32(vrh->little_endian, val);
+ return __cpu_to_virtio32(vringh_is_little_endian(vrh), val);
}
static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val)
{
- return __virtio64_to_cpu(vrh->little_endian, val);
+ return __virtio64_to_cpu(vringh_is_little_endian(vrh), val);
}
static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val)
{
- return __cpu_to_virtio64(vrh->little_endian, val);
+ return __cpu_to_virtio64(vringh_is_little_endian(vrh), val);
}
#endif /* _LINUX_VRINGH_H */
diff --git a/include/linux/vt_buffer.h b/include/linux/vt_buffer.h
index 057db7d..f38c10b 100644
--- a/include/linux/vt_buffer.h
+++ b/include/linux/vt_buffer.h
@@ -21,10 +21,6 @@
#ifndef VT_BUF_HAVE_RW
#define scr_writew(val, addr) (*(addr) = (val))
#define scr_readw(addr) (*(addr))
-#define scr_memcpyw(d, s, c) memcpy(d, s, c)
-#define scr_memmovew(d, s, c) memmove(d, s, c)
-#define VT_BUF_HAVE_MEMCPYW
-#define VT_BUF_HAVE_MEMMOVEW
#endif
#ifndef VT_BUF_HAVE_MEMSETW
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 2232ed1..1e1bf9f 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -267,6 +267,21 @@ do { \
__wait_event(wq, condition); \
} while (0)
+#define __io_wait_event(wq, condition) \
+ (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
+ io_schedule())
+
+/*
+ * io_wait_event() -- like wait_event() but with io_schedule()
+ */
+#define io_wait_event(wq, condition) \
+do { \
+ might_sleep(); \
+ if (condition) \
+ break; \
+ __io_wait_event(wq, condition); \
+} while (0)
+
#define __wait_event_freezable(wq, condition) \
___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
schedule(); try_to_freeze())
@@ -343,6 +358,19 @@ do { \
__ret; \
})
+#define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
+ (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
+ cmd1; schedule(); cmd2)
+/*
+ * Just like wait_event_cmd(), except it sets exclusive flag
+ */
+#define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
+do { \
+ if (condition) \
+ break; \
+ __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2); \
+} while (0)
+
#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
cmd1; schedule(); cmd2)
@@ -363,7 +391,6 @@ do { \
*/
#define wait_event_cmd(wq, condition, cmd1, cmd2) \
do { \
- might_sleep(); \
if (condition) \
break; \
__wait_event_cmd(wq, condition, cmd1, cmd2); \
@@ -955,7 +982,7 @@ extern int bit_wait_io_timeout(struct wait_bit_key *);
* on that signal.
*/
static inline int
-wait_on_bit(void *word, int bit, unsigned mode)
+wait_on_bit(unsigned long *word, int bit, unsigned mode)
{
might_sleep();
if (!test_bit(bit, word))
@@ -980,7 +1007,7 @@ wait_on_bit(void *word, int bit, unsigned mode)
* on that signal.
*/
static inline int
-wait_on_bit_io(void *word, int bit, unsigned mode)
+wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
{
might_sleep();
if (!test_bit(bit, word))
@@ -991,6 +1018,33 @@ wait_on_bit_io(void *word, int bit, unsigned mode)
}
/**
+ * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @mode: the task state to sleep in
+ * @timeout: timeout, in jiffies
+ *
+ * Use the standard hashed waitqueue table to wait for a bit
+ * to be cleared. This is similar to wait_on_bit(), except also takes a
+ * timeout parameter.
+ *
+ * Returned value will be zero if the bit was cleared before the
+ * @timeout elapsed, or non-zero if the @timeout elapsed or process
+ * received a signal and the mode permitted wakeup on that signal.
+ */
+static inline int
+wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
+ unsigned long timeout)
+{
+ might_sleep();
+ if (!test_bit(bit, word))
+ return 0;
+ return out_of_line_wait_on_bit_timeout(word, bit,
+ bit_wait_timeout,
+ mode, timeout);
+}
+
+/**
* wait_on_bit_action - wait for a bit to be cleared
* @word: the word being waited on, a kernel virtual address
* @bit: the bit of the word being waited on
@@ -1007,7 +1061,8 @@ wait_on_bit_io(void *word, int bit, unsigned mode)
* on that signal.
*/
static inline int
-wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
+wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
+ unsigned mode)
{
might_sleep();
if (!test_bit(bit, word))
@@ -1035,7 +1090,7 @@ wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode
* the @mode allows that signal to wake the process.
*/
static inline int
-wait_on_bit_lock(void *word, int bit, unsigned mode)
+wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
{
might_sleep();
if (!test_and_set_bit(bit, word))
@@ -1059,7 +1114,7 @@ wait_on_bit_lock(void *word, int bit, unsigned mode)
* the @mode allows that signal to wake the process.
*/
static inline int
-wait_on_bit_lock_io(void *word, int bit, unsigned mode)
+wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
{
might_sleep();
if (!test_and_set_bit(bit, word))
@@ -1085,7 +1140,8 @@ wait_on_bit_lock_io(void *word, int bit, unsigned mode)
* the @mode allows that signal to wake the process.
*/
static inline int
-wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
+wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
+ unsigned mode)
{
might_sleep();
if (!test_and_set_bit(bit, word))
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index 395b70e..f47fead 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -65,6 +65,8 @@ struct watchdog_ops {
* @driver-data:Pointer to the drivers private data.
* @lock: Lock for watchdog core internal use only.
* @status: Field that contains the devices internal status bits.
+ * @deferred: entry in wtd_deferred_reg_list which is used to
+ * register early initialized watchdogs.
*
* The watchdog_device structure contains all information about a
* watchdog timer device.
@@ -95,6 +97,7 @@ struct watchdog_device {
#define WDOG_ALLOW_RELEASE 2 /* Did we receive the magic char ? */
#define WDOG_NO_WAY_OUT 3 /* Is 'nowayout' feature set ? */
#define WDOG_UNREGISTERED 4 /* Has the device been unregistered */
+ struct list_head deferred;
};
#define WATCHDOG_NOWAYOUT IS_BUILTIN(CONFIG_WATCHDOG_NOWAYOUT)
@@ -137,4 +140,12 @@ extern int watchdog_init_timeout(struct watchdog_device *wdd,
extern int watchdog_register_device(struct watchdog_device *);
extern void watchdog_unregister_device(struct watchdog_device *);
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
+void watchdog_nmi_disable_all(void);
+void watchdog_nmi_enable_all(void);
+#else
+static inline void watchdog_nmi_disable_all(void) {}
+static inline void watchdog_nmi_enable_all(void) {}
+#endif
+
#endif /* ifndef _LINUX_WATCHDOG_H */
diff --git a/include/linux/wl12xx.h b/include/linux/wl12xx.h
index a9c723b..95704cd 100644
--- a/include/linux/wl12xx.h
+++ b/include/linux/wl12xx.h
@@ -26,28 +26,6 @@
#include <linux/err.h>
-/* Reference clock values */
-enum {
- WL12XX_REFCLOCK_19 = 0, /* 19.2 MHz */
- WL12XX_REFCLOCK_26 = 1, /* 26 MHz */
- WL12XX_REFCLOCK_38 = 2, /* 38.4 MHz */
- WL12XX_REFCLOCK_52 = 3, /* 52 MHz */
- WL12XX_REFCLOCK_38_XTAL = 4, /* 38.4 MHz, XTAL */
- WL12XX_REFCLOCK_26_XTAL = 5, /* 26 MHz, XTAL */
-};
-
-/* TCXO clock values */
-enum {
- WL12XX_TCXOCLOCK_19_2 = 0, /* 19.2MHz */
- WL12XX_TCXOCLOCK_26 = 1, /* 26 MHz */
- WL12XX_TCXOCLOCK_38_4 = 2, /* 38.4MHz */
- WL12XX_TCXOCLOCK_52 = 3, /* 52 MHz */
- WL12XX_TCXOCLOCK_16_368 = 4, /* 16.368 MHz */
- WL12XX_TCXOCLOCK_32_736 = 5, /* 32.736 MHz */
- WL12XX_TCXOCLOCK_16_8 = 6, /* 16.8 MHz */
- WL12XX_TCXOCLOCK_33_6 = 7, /* 33.6 MHz */
-};
-
struct wl1251_platform_data {
int power_gpio;
/* SDIO only: IRQ number if WLAN_IRQ line is used, 0 for SDIO IRQs */
@@ -55,23 +33,8 @@ struct wl1251_platform_data {
bool use_eeprom;
};
-struct wl12xx_platform_data {
- int irq;
- int board_ref_clock;
- int board_tcxo_clock;
- unsigned long platform_quirks;
- bool pwr_in_suspend;
-};
-
-/* Platform does not support level trigger interrupts */
-#define WL12XX_PLATFORM_QUIRK_EDGE_IRQ BIT(0)
-
#ifdef CONFIG_WILINK_PLATFORM_DATA
-int wl12xx_set_platform_data(const struct wl12xx_platform_data *data);
-
-struct wl12xx_platform_data *wl12xx_get_platform_data(void);
-
int wl1251_set_platform_data(const struct wl1251_platform_data *data);
struct wl1251_platform_data *wl1251_get_platform_data(void);
@@ -79,18 +42,6 @@ struct wl1251_platform_data *wl1251_get_platform_data(void);
#else
static inline
-int wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
-{
- return -ENOSYS;
-}
-
-static inline
-struct wl12xx_platform_data *wl12xx_get_platform_data(void)
-{
- return ERR_PTR(-ENODATA);
-}
-
-static inline
int wl1251_set_platform_data(const struct wl1251_platform_data *data)
{
return -ENOSYS;
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index b996e6cd..738b30b 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -70,7 +70,8 @@ enum {
/* data contains off-queue information when !WORK_STRUCT_PWQ */
WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
- WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE),
+ __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
+ WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
/*
* When a work item is off queue, its high bits point to the last
@@ -220,14 +221,10 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
#endif
#define INIT_WORK(_work, _func) \
- do { \
- __INIT_WORK((_work), (_func), 0); \
- } while (0)
+ __INIT_WORK((_work), (_func), 0)
#define INIT_WORK_ONSTACK(_work, _func) \
- do { \
- __INIT_WORK((_work), (_func), 1); \
- } while (0)
+ __INIT_WORK((_work), (_func), 1)
#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
do { \
@@ -427,6 +424,7 @@ struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
void free_workqueue_attrs(struct workqueue_attrs *attrs);
int apply_workqueue_attrs(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs);
+int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work);
@@ -437,7 +435,6 @@ extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
extern void flush_workqueue(struct workqueue_struct *wq);
extern void drain_workqueue(struct workqueue_struct *wq);
-extern void flush_scheduled_work(void);
extern int schedule_on_each_cpu(work_func_t func);
@@ -457,6 +454,7 @@ extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
extern unsigned int work_busy(struct work_struct *work);
extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
extern void print_worker_info(const char *log_lvl, struct task_struct *task);
+extern void show_workqueue_state(void);
/**
* queue_work - queue work on a workqueue
@@ -533,6 +531,35 @@ static inline bool schedule_work(struct work_struct *work)
}
/**
+ * flush_scheduled_work - ensure that any scheduled work has run to completion.
+ *
+ * Forces execution of the kernel-global workqueue and blocks until its
+ * completion.
+ *
+ * Think twice before calling this function! It's very easy to get into
+ * trouble if you don't take great care. Either of the following situations
+ * will lead to deadlock:
+ *
+ * One of the work items currently on the workqueue needs to acquire
+ * a lock held by your code or its caller.
+ *
+ * Your code is running in the context of a work routine.
+ *
+ * They will be detected by lockdep when they occur, but the first might not
+ * occur very often. It depends on what work items are on the workqueue and
+ * what locks they need, which you have no control over.
+ *
+ * In most situations flushing the entire workqueue is overkill; you merely
+ * need to know that a particular work item isn't queued and isn't running.
+ * In such cases you should use cancel_delayed_work_sync() or
+ * cancel_work_sync() instead.
+ */
+static inline void flush_scheduled_work(void)
+{
+ flush_workqueue(system_wq);
+}
+
+/**
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay
* @cpu: cpu to use
* @dwork: job to be done
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 0004833..b333c94 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -7,6 +7,8 @@
#include <linux/sched.h>
#include <linux/workqueue.h>
#include <linux/fs.h>
+#include <linux/flex_proportions.h>
+#include <linux/backing-dev-defs.h>
DECLARE_PER_CPU(int, dirty_throttle_leaks);
@@ -84,18 +86,95 @@ struct writeback_control {
unsigned for_reclaim:1; /* Invoked from the page allocator */
unsigned range_cyclic:1; /* range_start is cyclic */
unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct bdi_writeback *wb; /* wb this writeback is issued under */
+ struct inode *inode; /* inode being written out */
+
+ /* foreign inode detection, see wbc_detach_inode() */
+ int wb_id; /* current wb id */
+ int wb_lcand_id; /* last foreign candidate wb id */
+ int wb_tcand_id; /* this foreign candidate wb id */
+ size_t wb_bytes; /* bytes written by current wb */
+ size_t wb_lcand_bytes; /* bytes written by last candidate */
+ size_t wb_tcand_bytes; /* bytes written by this candidate */
+#endif
};
/*
+ * A wb_domain represents a domain that wb's (bdi_writeback's) belong to
+ * and are measured against each other in. There always is one global
+ * domain, global_wb_domain, that every wb in the system is a member of.
+ * This allows measuring the relative bandwidth of each wb to distribute
+ * dirtyable memory accordingly.
+ */
+struct wb_domain {
+ spinlock_t lock;
+
+ /*
+ * Scale the writeback cache size proportional to the relative
+ * writeout speed.
+ *
+ * We do this by keeping a floating proportion between BDIs, based
+ * on page writeback completions [end_page_writeback()]. Those
+ * devices that write out pages fastest will get the larger share,
+ * while the slower will get a smaller share.
+ *
+ * We use page writeout completions because we are interested in
+ * getting rid of dirty pages. Having them written out is the
+ * primary goal.
+ *
+ * We introduce a concept of time, a period over which we measure
+ * these events, because demand can/will vary over time. The length
+ * of this period itself is measured in page writeback completions.
+ */
+ struct fprop_global completions;
+ struct timer_list period_timer; /* timer for aging of completions */
+ unsigned long period_time;
+
+ /*
+ * The dirtyable memory and dirty threshold could be suddenly
+ * knocked down by a large amount (eg. on the startup of KVM in a
+ * swapless system). This may throw the system into deep dirty
+ * exceeded state and throttle heavy/light dirtiers alike. To
+ * retain good responsiveness, maintain global_dirty_limit for
+ * tracking slowly down to the knocked down dirty threshold.
+ *
+ * Both fields are protected by ->lock.
+ */
+ unsigned long dirty_limit_tstamp;
+ unsigned long dirty_limit;
+};
+
+/**
+ * wb_domain_size_changed - memory available to a wb_domain has changed
+ * @dom: wb_domain of interest
+ *
+ * This function should be called when the amount of memory available to
+ * @dom has changed. It resets @dom's dirty limit parameters to prevent
+ * the past values which don't match the current configuration from skewing
+ * dirty throttling. Without this, when memory size of a wb_domain is
+ * greatly reduced, the dirty throttling logic may allow too many pages to
+ * be dirtied leading to consecutive unnecessary OOMs and may get stuck in
+ * that situation.
+ */
+static inline void wb_domain_size_changed(struct wb_domain *dom)
+{
+ spin_lock(&dom->lock);
+ dom->dirty_limit_tstamp = jiffies;
+ dom->dirty_limit = 0;
+ spin_unlock(&dom->lock);
+}
+
+/*
* fs/fs-writeback.c
*/
struct bdi_writeback;
void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
enum wb_reason reason);
-int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
-int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
- enum wb_reason reason);
+bool try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
+bool try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
+ enum wb_reason reason);
void sync_inodes_sb(struct super_block *);
void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
void inode_wait_for_writeback(struct inode *inode);
@@ -107,6 +186,123 @@ static inline void wait_on_inode(struct inode *inode)
wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE);
}
+#ifdef CONFIG_CGROUP_WRITEBACK
+
+#include <linux/cgroup.h>
+#include <linux/bio.h>
+
+void __inode_attach_wb(struct inode *inode, struct page *page);
+void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
+ struct inode *inode)
+ __releases(&inode->i_lock);
+void wbc_detach_inode(struct writeback_control *wbc);
+void wbc_account_io(struct writeback_control *wbc, struct page *page,
+ size_t bytes);
+
+/**
+ * inode_attach_wb - associate an inode with its wb
+ * @inode: inode of interest
+ * @page: page being dirtied (may be NULL)
+ *
+ * If @inode doesn't have its wb, associate it with the wb matching the
+ * memcg of @page or, if @page is NULL, %current. May be called w/ or w/o
+ * @inode->i_lock.
+ */
+static inline void inode_attach_wb(struct inode *inode, struct page *page)
+{
+ if (!inode->i_wb)
+ __inode_attach_wb(inode, page);
+}
+
+/**
+ * inode_detach_wb - disassociate an inode from its wb
+ * @inode: inode of interest
+ *
+ * @inode is being freed. Detach from its wb.
+ */
+static inline void inode_detach_wb(struct inode *inode)
+{
+ if (inode->i_wb) {
+ wb_put(inode->i_wb);
+ inode->i_wb = NULL;
+ }
+}
+
+/**
+ * wbc_attach_fdatawrite_inode - associate wbc and inode for fdatawrite
+ * @wbc: writeback_control of interest
+ * @inode: target inode
+ *
+ * This function is to be used by __filemap_fdatawrite_range(), which is an
+ * alternative entry point into writeback code, and first ensures @inode is
+ * associated with a bdi_writeback and attaches it to @wbc.
+ */
+static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
+ struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ inode_attach_wb(inode, NULL);
+ wbc_attach_and_unlock_inode(wbc, inode);
+}
+
+/**
+ * wbc_init_bio - writeback specific initializtion of bio
+ * @wbc: writeback_control for the writeback in progress
+ * @bio: bio to be initialized
+ *
+ * @bio is a part of the writeback in progress controlled by @wbc. Perform
+ * writeback specific initialization. This is used to apply the cgroup
+ * writeback context.
+ */
+static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
+{
+ /*
+ * pageout() path doesn't attach @wbc to the inode being written
+ * out. This is intentional as we don't want the function to block
+ * behind a slow cgroup. Ultimately, we want pageout() to kick off
+ * regular writeback instead of writing things out itself.
+ */
+ if (wbc->wb)
+ bio_associate_blkcg(bio, wbc->wb->blkcg_css);
+}
+
+#else /* CONFIG_CGROUP_WRITEBACK */
+
+static inline void inode_attach_wb(struct inode *inode, struct page *page)
+{
+}
+
+static inline void inode_detach_wb(struct inode *inode)
+{
+}
+
+static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
+ struct inode *inode)
+ __releases(&inode->i_lock)
+{
+ spin_unlock(&inode->i_lock);
+}
+
+static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
+ struct inode *inode)
+{
+}
+
+static inline void wbc_detach_inode(struct writeback_control *wbc)
+{
+}
+
+static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
+{
+}
+
+static inline void wbc_account_io(struct writeback_control *wbc,
+ struct page *page, size_t bytes)
+{
+}
+
+#endif /* CONFIG_CGROUP_WRITEBACK */
+
/*
* mm/page-writeback.c
*/
@@ -120,8 +316,12 @@ static inline void laptop_sync_completion(void) { }
#endif
void throttle_vm_writeout(gfp_t gfp_mask);
bool zone_dirty_ok(struct zone *zone);
+int wb_domain_init(struct wb_domain *dom, gfp_t gfp);
+#ifdef CONFIG_CGROUP_WRITEBACK
+void wb_domain_exit(struct wb_domain *dom);
+#endif
-extern unsigned long global_dirty_limit;
+extern struct wb_domain global_wb_domain;
/* These are exported to sysctl. */
extern int dirty_background_ratio;
@@ -130,6 +330,7 @@ extern int vm_dirty_ratio;
extern unsigned long vm_dirty_bytes;
extern unsigned int dirty_writeback_interval;
extern unsigned int dirty_expire_interval;
+extern unsigned int dirtytime_expire_interval;
extern int vm_highmem_is_dirtyable;
extern int block_dump;
extern int laptop_mode;
@@ -146,25 +347,20 @@ extern int dirty_ratio_handler(struct ctl_table *table, int write,
extern int dirty_bytes_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
+int dirtytime_interval_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
struct ctl_table;
int dirty_writeback_centisecs_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
-unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
- unsigned long dirty);
-
-void __bdi_update_bandwidth(struct backing_dev_info *bdi,
- unsigned long thresh,
- unsigned long bg_thresh,
- unsigned long dirty,
- unsigned long bdi_thresh,
- unsigned long bdi_dirty,
- unsigned long start_time);
+unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
+void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time);
void page_writeback_init(void);
void balance_dirty_pages_ratelimited(struct address_space *mapping);
+bool wb_over_bg_thresh(struct bdi_writeback *wb);
typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
void *data);
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
index f14bd75..d30eff3 100644
--- a/include/linux/zpool.h
+++ b/include/linux/zpool.h
@@ -36,7 +36,8 @@ enum zpool_mapmode {
ZPOOL_MM_DEFAULT = ZPOOL_MM_RW
};
-struct zpool *zpool_create_pool(char *type, gfp_t gfp, struct zpool_ops *ops);
+struct zpool *zpool_create_pool(char *type, char *name,
+ gfp_t gfp, struct zpool_ops *ops);
char *zpool_get_type(struct zpool *pool);
@@ -80,7 +81,8 @@ struct zpool_driver {
atomic_t refcount;
struct list_head list;
- void *(*create)(gfp_t gfp, struct zpool_ops *ops);
+ void *(*create)(char *name, gfp_t gfp, struct zpool_ops *ops,
+ struct zpool *zpool);
void (*destroy)(void *pool);
int (*malloc)(void *pool, size_t size, gfp_t gfp,
@@ -101,6 +103,4 @@ void zpool_register_driver(struct zpool_driver *driver);
int zpool_unregister_driver(struct zpool_driver *driver);
-int zpool_evict(void *pool, unsigned long handle);
-
#endif
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 05c2147..1338190 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -36,7 +36,7 @@ enum zs_mapmode {
struct zs_pool;
-struct zs_pool *zs_create_pool(gfp_t flags);
+struct zs_pool *zs_create_pool(char *name, gfp_t flags);
void zs_destroy_pool(struct zs_pool *pool);
unsigned long zs_malloc(struct zs_pool *pool, size_t size);
@@ -47,5 +47,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
unsigned long zs_get_total_pages(struct zs_pool *pool);
+unsigned long zs_compact(struct zs_pool *pool);
#endif
diff --git a/include/media/adp1653.h b/include/media/adp1653.h
index 1d9b48a..9779c85 100644
--- a/include/media/adp1653.h
+++ b/include/media/adp1653.h
@@ -100,9 +100,11 @@ struct adp1653_platform_data {
int (*power)(struct v4l2_subdev *sd, int on);
u32 max_flash_timeout; /* flash light timeout in us */
- u32 max_flash_intensity; /* led intensity, flash mode */
- u32 max_torch_intensity; /* led intensity, torch mode */
- u32 max_indicator_intensity; /* indicator led intensity */
+ u32 max_flash_intensity; /* led intensity, flash mode, mA */
+ u32 max_torch_intensity; /* led intensity, torch mode, mA */
+ u32 max_indicator_intensity; /* indicator led intensity, uA */
+
+ struct gpio_desc *enable_gpio; /* for device-tree based boot */
};
#define to_adp1653_flash(sd) container_of(sd, struct adp1653_flash, subdev)
diff --git a/include/media/adv7511.h b/include/media/adv7511.h
index bb78bed..d83b91d 100644
--- a/include/media/adv7511.h
+++ b/include/media/adv7511.h
@@ -40,9 +40,10 @@ struct adv7511_cec_arg {
};
struct adv7511_platform_data {
- uint8_t i2c_edid;
- uint8_t i2c_cec;
- uint32_t cec_clk;
+ u8 i2c_edid;
+ u8 i2c_cec;
+ u8 i2c_pktmem;
+ u32 cec_clk;
};
#endif
diff --git a/include/media/adv7604.h b/include/media/adv7604.h
index aa1c447..a913859 100644
--- a/include/media/adv7604.h
+++ b/include/media/adv7604.h
@@ -47,16 +47,16 @@ enum adv7604_bus_order {
};
/* Input Color Space (IO register 0x02, [7:4]) */
-enum adv7604_inp_color_space {
- ADV7604_INP_COLOR_SPACE_LIM_RGB = 0,
- ADV7604_INP_COLOR_SPACE_FULL_RGB = 1,
- ADV7604_INP_COLOR_SPACE_LIM_YCbCr_601 = 2,
- ADV7604_INP_COLOR_SPACE_LIM_YCbCr_709 = 3,
- ADV7604_INP_COLOR_SPACE_XVYCC_601 = 4,
- ADV7604_INP_COLOR_SPACE_XVYCC_709 = 5,
- ADV7604_INP_COLOR_SPACE_FULL_YCbCr_601 = 6,
- ADV7604_INP_COLOR_SPACE_FULL_YCbCr_709 = 7,
- ADV7604_INP_COLOR_SPACE_AUTO = 0xf,
+enum adv76xx_inp_color_space {
+ ADV76XX_INP_COLOR_SPACE_LIM_RGB = 0,
+ ADV76XX_INP_COLOR_SPACE_FULL_RGB = 1,
+ ADV76XX_INP_COLOR_SPACE_LIM_YCbCr_601 = 2,
+ ADV76XX_INP_COLOR_SPACE_LIM_YCbCr_709 = 3,
+ ADV76XX_INP_COLOR_SPACE_XVYCC_601 = 4,
+ ADV76XX_INP_COLOR_SPACE_XVYCC_709 = 5,
+ ADV76XX_INP_COLOR_SPACE_FULL_YCbCr_601 = 6,
+ ADV76XX_INP_COLOR_SPACE_FULL_YCbCr_709 = 7,
+ ADV76XX_INP_COLOR_SPACE_AUTO = 0xf,
};
/* Select output format (IO register 0x03, [4:2]) */
@@ -66,38 +66,39 @@ enum adv7604_op_format_mode_sel {
ADV7604_OP_FORMAT_MODE2 = 0x08,
};
-enum adv7604_drive_strength {
- ADV7604_DR_STR_MEDIUM_LOW = 1,
- ADV7604_DR_STR_MEDIUM_HIGH = 2,
- ADV7604_DR_STR_HIGH = 3,
+enum adv76xx_drive_strength {
+ ADV76XX_DR_STR_MEDIUM_LOW = 1,
+ ADV76XX_DR_STR_MEDIUM_HIGH = 2,
+ ADV76XX_DR_STR_HIGH = 3,
};
-enum adv7604_int1_config {
- ADV7604_INT1_CONFIG_OPEN_DRAIN,
- ADV7604_INT1_CONFIG_ACTIVE_LOW,
- ADV7604_INT1_CONFIG_ACTIVE_HIGH,
- ADV7604_INT1_CONFIG_DISABLED,
+/* INT1 Configuration (IO register 0x40, [1:0]) */
+enum adv76xx_int1_config {
+ ADV76XX_INT1_CONFIG_OPEN_DRAIN,
+ ADV76XX_INT1_CONFIG_ACTIVE_LOW,
+ ADV76XX_INT1_CONFIG_ACTIVE_HIGH,
+ ADV76XX_INT1_CONFIG_DISABLED,
};
-enum adv7604_page {
- ADV7604_PAGE_IO,
+enum adv76xx_page {
+ ADV76XX_PAGE_IO,
ADV7604_PAGE_AVLINK,
- ADV7604_PAGE_CEC,
- ADV7604_PAGE_INFOFRAME,
+ ADV76XX_PAGE_CEC,
+ ADV76XX_PAGE_INFOFRAME,
ADV7604_PAGE_ESDP,
ADV7604_PAGE_DPP,
- ADV7604_PAGE_AFE,
- ADV7604_PAGE_REP,
- ADV7604_PAGE_EDID,
- ADV7604_PAGE_HDMI,
- ADV7604_PAGE_TEST,
- ADV7604_PAGE_CP,
+ ADV76XX_PAGE_AFE,
+ ADV76XX_PAGE_REP,
+ ADV76XX_PAGE_EDID,
+ ADV76XX_PAGE_HDMI,
+ ADV76XX_PAGE_TEST,
+ ADV76XX_PAGE_CP,
ADV7604_PAGE_VDP,
- ADV7604_PAGE_MAX,
+ ADV76XX_PAGE_MAX,
};
/* Platform dependent definition */
-struct adv7604_platform_data {
+struct adv76xx_platform_data {
/* DIS_PWRDNB: 1 if the PWRDNB pin is unused and unconnected */
unsigned disable_pwrdnb:1;
@@ -116,7 +117,7 @@ struct adv7604_platform_data {
enum adv7604_op_format_mode_sel op_format_mode_sel;
/* Configuration of the INT1 pin */
- enum adv7604_int1_config int1_config;
+ enum adv76xx_int1_config int1_config;
/* IO register 0x02 */
unsigned alt_gamma:1;
@@ -134,9 +135,9 @@ struct adv7604_platform_data {
unsigned inv_llc_pol:1;
/* IO register 0x14 */
- enum adv7604_drive_strength dr_str_data;
- enum adv7604_drive_strength dr_str_clk;
- enum adv7604_drive_strength dr_str_sync;
+ enum adv76xx_drive_strength dr_str_data;
+ enum adv76xx_drive_strength dr_str_clk;
+ enum adv76xx_drive_strength dr_str_sync;
/* IO register 0x30 */
unsigned output_bus_lsb_to_msb:1;
@@ -145,11 +146,11 @@ struct adv7604_platform_data {
unsigned hdmi_free_run_mode;
/* i2c addresses: 0 == use default */
- u8 i2c_addresses[ADV7604_PAGE_MAX];
+ u8 i2c_addresses[ADV76XX_PAGE_MAX];
};
-enum adv7604_pad {
- ADV7604_PAD_HDMI_PORT_A = 0,
+enum adv76xx_pad {
+ ADV76XX_PAD_HDMI_PORT_A = 0,
ADV7604_PAD_HDMI_PORT_B = 1,
ADV7604_PAD_HDMI_PORT_C = 2,
ADV7604_PAD_HDMI_PORT_D = 3,
@@ -158,7 +159,7 @@ enum adv7604_pad {
/* The source pad is either 1 (ADV7611) or 6 (ADV7604) */
ADV7604_PAD_SOURCE = 6,
ADV7611_PAD_SOURCE = 1,
- ADV7604_PAD_MAX = 7,
+ ADV76XX_PAD_MAX = 7,
};
#define V4L2_CID_ADV_RX_ANALOG_SAMPLING_PHASE (V4L2_CID_DV_CLASS_BASE + 0x1000)
@@ -166,7 +167,6 @@ enum adv7604_pad {
#define V4L2_CID_ADV_RX_FREE_RUN_COLOR (V4L2_CID_DV_CLASS_BASE + 0x1002)
/* notify events */
-#define ADV7604_HOTPLUG 1
-#define ADV7604_FMT_CHANGE 2
+#define ADV76XX_HOTPLUG 1
#endif
diff --git a/include/media/adv7842.h b/include/media/adv7842.h
index 924cbb8..bc24970 100644
--- a/include/media/adv7842.h
+++ b/include/media/adv7842.h
@@ -30,14 +30,38 @@ enum adv7842_ain_sel {
ADV7842_AIN9_4_5_6_SYNC_2_1 = 4,
};
-/* Bus rotation and reordering (IO register 0x04, [7:5]) */
-enum adv7842_op_ch_sel {
- ADV7842_OP_CH_SEL_GBR = 0,
- ADV7842_OP_CH_SEL_GRB = 1,
- ADV7842_OP_CH_SEL_BGR = 2,
- ADV7842_OP_CH_SEL_RGB = 3,
- ADV7842_OP_CH_SEL_BRG = 4,
- ADV7842_OP_CH_SEL_RBG = 5,
+/*
+ * Bus rotation and reordering. This is used to specify component reordering on
+ * the board and describes the components order on the bus when the ADV7842
+ * outputs RGB.
+ */
+enum adv7842_bus_order {
+ ADV7842_BUS_ORDER_RGB, /* No operation */
+ ADV7842_BUS_ORDER_GRB, /* Swap 1-2 */
+ ADV7842_BUS_ORDER_RBG, /* Swap 2-3 */
+ ADV7842_BUS_ORDER_BGR, /* Swap 1-3 */
+ ADV7842_BUS_ORDER_BRG, /* Rotate right */
+ ADV7842_BUS_ORDER_GBR, /* Rotate left */
+};
+
+/* Input Color Space (IO register 0x02, [7:4]) */
+enum adv7842_inp_color_space {
+ ADV7842_INP_COLOR_SPACE_LIM_RGB = 0,
+ ADV7842_INP_COLOR_SPACE_FULL_RGB = 1,
+ ADV7842_INP_COLOR_SPACE_LIM_YCbCr_601 = 2,
+ ADV7842_INP_COLOR_SPACE_LIM_YCbCr_709 = 3,
+ ADV7842_INP_COLOR_SPACE_XVYCC_601 = 4,
+ ADV7842_INP_COLOR_SPACE_XVYCC_709 = 5,
+ ADV7842_INP_COLOR_SPACE_FULL_YCbCr_601 = 6,
+ ADV7842_INP_COLOR_SPACE_FULL_YCbCr_709 = 7,
+ ADV7842_INP_COLOR_SPACE_AUTO = 0xf,
+};
+
+/* Select output format (IO register 0x03, [4:2]) */
+enum adv7842_op_format_mode_sel {
+ ADV7842_OP_FORMAT_MODE0 = 0x00,
+ ADV7842_OP_FORMAT_MODE1 = 0x04,
+ ADV7842_OP_FORMAT_MODE2 = 0x08,
};
/* Mode of operation */
@@ -61,44 +85,6 @@ enum adv7842_vid_std_select {
ADV7842_HDMI_COMP_VID_STD_HD_1250P = 0x1e,
};
-/* Input Color Space (IO register 0x02, [7:4]) */
-enum adv7842_inp_color_space {
- ADV7842_INP_COLOR_SPACE_LIM_RGB = 0,
- ADV7842_INP_COLOR_SPACE_FULL_RGB = 1,
- ADV7842_INP_COLOR_SPACE_LIM_YCbCr_601 = 2,
- ADV7842_INP_COLOR_SPACE_LIM_YCbCr_709 = 3,
- ADV7842_INP_COLOR_SPACE_XVYCC_601 = 4,
- ADV7842_INP_COLOR_SPACE_XVYCC_709 = 5,
- ADV7842_INP_COLOR_SPACE_FULL_YCbCr_601 = 6,
- ADV7842_INP_COLOR_SPACE_FULL_YCbCr_709 = 7,
- ADV7842_INP_COLOR_SPACE_AUTO = 0xf,
-};
-
-/* Select output format (IO register 0x03, [7:0]) */
-enum adv7842_op_format_sel {
- ADV7842_OP_FORMAT_SEL_SDR_ITU656_8 = 0x00,
- ADV7842_OP_FORMAT_SEL_SDR_ITU656_10 = 0x01,
- ADV7842_OP_FORMAT_SEL_SDR_ITU656_12_MODE0 = 0x02,
- ADV7842_OP_FORMAT_SEL_SDR_ITU656_12_MODE1 = 0x06,
- ADV7842_OP_FORMAT_SEL_SDR_ITU656_12_MODE2 = 0x0a,
- ADV7842_OP_FORMAT_SEL_DDR_422_8 = 0x20,
- ADV7842_OP_FORMAT_SEL_DDR_422_10 = 0x21,
- ADV7842_OP_FORMAT_SEL_DDR_422_12_MODE0 = 0x22,
- ADV7842_OP_FORMAT_SEL_DDR_422_12_MODE1 = 0x23,
- ADV7842_OP_FORMAT_SEL_DDR_422_12_MODE2 = 0x24,
- ADV7842_OP_FORMAT_SEL_SDR_444_24 = 0x40,
- ADV7842_OP_FORMAT_SEL_SDR_444_30 = 0x41,
- ADV7842_OP_FORMAT_SEL_SDR_444_36_MODE0 = 0x42,
- ADV7842_OP_FORMAT_SEL_DDR_444_24 = 0x60,
- ADV7842_OP_FORMAT_SEL_DDR_444_30 = 0x61,
- ADV7842_OP_FORMAT_SEL_DDR_444_36 = 0x62,
- ADV7842_OP_FORMAT_SEL_SDR_ITU656_16 = 0x80,
- ADV7842_OP_FORMAT_SEL_SDR_ITU656_20 = 0x81,
- ADV7842_OP_FORMAT_SEL_SDR_ITU656_24_MODE0 = 0x82,
- ADV7842_OP_FORMAT_SEL_SDR_ITU656_24_MODE1 = 0x86,
- ADV7842_OP_FORMAT_SEL_SDR_ITU656_24_MODE2 = 0x8a,
-};
-
enum adv7842_select_input {
ADV7842_SELECT_HDMI_PORT_A,
ADV7842_SELECT_HDMI_PORT_B,
@@ -117,35 +103,35 @@ enum adv7842_drive_strength {
struct adv7842_sdp_csc_coeff {
bool manual;
- uint16_t scaling;
- uint16_t A1;
- uint16_t A2;
- uint16_t A3;
- uint16_t A4;
- uint16_t B1;
- uint16_t B2;
- uint16_t B3;
- uint16_t B4;
- uint16_t C1;
- uint16_t C2;
- uint16_t C3;
- uint16_t C4;
+ u16 scaling;
+ u16 A1;
+ u16 A2;
+ u16 A3;
+ u16 A4;
+ u16 B1;
+ u16 B2;
+ u16 B3;
+ u16 B4;
+ u16 C1;
+ u16 C2;
+ u16 C3;
+ u16 C4;
};
struct adv7842_sdp_io_sync_adjustment {
bool adjust;
- uint16_t hs_beg;
- uint16_t hs_width;
- uint16_t de_beg;
- uint16_t de_end;
- uint8_t vs_beg_o;
- uint8_t vs_beg_e;
- uint8_t vs_end_o;
- uint8_t vs_end_e;
- uint8_t de_v_beg_o;
- uint8_t de_v_beg_e;
- uint8_t de_v_end_o;
- uint8_t de_v_end_e;
+ u16 hs_beg;
+ u16 hs_width;
+ u16 de_beg;
+ u16 de_end;
+ u8 vs_beg_o;
+ u8 vs_beg_e;
+ u8 vs_end_o;
+ u8 vs_end_e;
+ u8 de_v_beg_o;
+ u8 de_v_beg_e;
+ u8 de_v_end_o;
+ u8 de_v_end_e;
};
/* Platform dependent definition */
@@ -163,7 +149,10 @@ struct adv7842_platform_data {
enum adv7842_ain_sel ain_sel;
/* Bus rotation and reordering */
- enum adv7842_op_ch_sel op_ch_sel;
+ enum adv7842_bus_order bus_order;
+
+ /* Select output format mode */
+ enum adv7842_op_format_mode_sel op_format_mode_sel;
/* Default mode */
enum adv7842_mode mode;
@@ -174,20 +163,15 @@ struct adv7842_platform_data {
/* Video standard */
enum adv7842_vid_std_select vid_std_select;
- /* Select output format */
- enum adv7842_op_format_sel op_format_sel;
-
/* IO register 0x02 */
unsigned alt_gamma:1;
unsigned op_656_range:1;
- unsigned rgb_out:1;
unsigned alt_data_sat:1;
/* IO register 0x05 */
unsigned blank_data:1;
unsigned insert_av_codes:1;
unsigned replicate_av_codes:1;
- unsigned invert_cbcr:1;
/* IO register 0x30 */
unsigned output_bus_lsb_to_msb:1;
@@ -246,9 +230,6 @@ struct adv7842_platform_data {
#define V4L2_CID_ADV_RX_FREE_RUN_COLOR_MANUAL (V4L2_CID_DV_CLASS_BASE + 0x1001)
#define V4L2_CID_ADV_RX_FREE_RUN_COLOR (V4L2_CID_DV_CLASS_BASE + 0x1002)
-/* notify events */
-#define ADV7842_FMT_CHANGE 1
-
/* custom ioctl, used to test the external RAM that's used by the
* deinterlacer. */
#define ADV7842_CMD_RAM_TEST _IO('V', BASE_VIDIOC_PRIVATE)
@@ -256,5 +237,6 @@ struct adv7842_platform_data {
#define ADV7842_EDID_PORT_A 0
#define ADV7842_EDID_PORT_B 1
#define ADV7842_EDID_PORT_VGA 2
+#define ADV7842_PAD_SOURCE 3
#endif
diff --git a/include/media/atmel-isi.h b/include/media/atmel-isi.h
index c2e5703..6008b09 100644
--- a/include/media/atmel-isi.h
+++ b/include/media/atmel-isi.h
@@ -59,6 +59,10 @@
#define ISI_CFG1_FRATE_DIV_MASK (7 << 8)
#define ISI_CFG1_DISCR (1 << 11)
#define ISI_CFG1_FULL_MODE (1 << 12)
+/* Definition for THMASK(ISI_V2) */
+#define ISI_CFG1_THMASK_BEATS_4 (0 << 13)
+#define ISI_CFG1_THMASK_BEATS_8 (1 << 13)
+#define ISI_CFG1_THMASK_BEATS_16 (2 << 13)
/* Bitfields in CFG2 */
#define ISI_CFG2_GRAYSCALE (1 << 13)
diff --git a/include/media/davinci/vpfe_capture.h b/include/media/davinci/vpfe_capture.h
index 288772e..28bcd71 100644
--- a/include/media/davinci/vpfe_capture.h
+++ b/include/media/davinci/vpfe_capture.h
@@ -102,7 +102,7 @@ struct vpfe_config {
struct vpfe_device {
/* V4l2 specific parameters */
/* Identifies video device for this channel */
- struct video_device *video_dev;
+ struct video_device video_dev;
/* sub devices */
struct v4l2_subdev **sd;
/* vpfe cfg */
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index e004591..0c003d8 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -44,6 +44,15 @@ struct media_pad {
unsigned long flags; /* Pad flags (MEDIA_PAD_FL_*) */
};
+/**
+ * struct media_entity_operations - Media entity operations
+ * @link_setup: Notify the entity of link changes. The operation can
+ * return an error, in which case link setup will be
+ * cancelled. Optional.
+ * @link_validate: Return whether a link is valid from the entity point of
+ * view. The media_entity_pipeline_start() function
+ * validates all links by calling this operation. Optional.
+ */
struct media_entity_operations {
int (*link_setup)(struct media_entity *entity,
const struct media_pad *local,
@@ -87,17 +96,7 @@ struct media_entity {
struct {
u32 major;
u32 minor;
- } v4l;
- struct {
- u32 major;
- u32 minor;
- } fb;
- struct {
- u32 card;
- u32 device;
- u32 subdevice;
- } alsa;
- int dvb;
+ } dev;
/* Sub-device specifications */
/* Nothing needed yet */
diff --git a/include/media/mt9p031.h b/include/media/mt9p031.h
index b1e63f2..1ba3612 100644
--- a/include/media/mt9p031.h
+++ b/include/media/mt9p031.h
@@ -5,12 +5,10 @@ struct v4l2_subdev;
/*
* struct mt9p031_platform_data - MT9P031 platform data
- * @reset: Chip reset GPIO (set to -1 if not used)
* @ext_freq: Input clock frequency
* @target_freq: Pixel clock frequency
*/
struct mt9p031_platform_data {
- int reset;
int ext_freq;
int target_freq;
};
diff --git a/include/media/omap3isp.h b/include/media/omap3isp.h
index 398279d..048f8f9 100644
--- a/include/media/omap3isp.h
+++ b/include/media/omap3isp.h
@@ -45,7 +45,7 @@ enum {
};
/**
- * struct isp_parallel_platform_data - Parallel interface platform data
+ * struct isp_parallel_cfg - Parallel interface configuration
* @data_lane_shift: Data lane shifter
* ISP_LANE_SHIFT_0 - CAMEXT[13:0] -> CAM[13:0]
* ISP_LANE_SHIFT_2 - CAMEXT[13:2] -> CAM[11:0]
@@ -62,7 +62,7 @@ enum {
* @data_pol: Data polarity
* 0 - Normal, 1 - One's complement
*/
-struct isp_parallel_platform_data {
+struct isp_parallel_cfg {
unsigned int data_lane_shift:2;
unsigned int clk_pol:1;
unsigned int hs_pol:1;
@@ -105,7 +105,7 @@ struct isp_csiphy_lanes_cfg {
};
/**
- * struct isp_ccp2_platform_data - CCP2 interface platform data
+ * struct isp_ccp2_cfg - CCP2 interface configuration
* @strobe_clk_pol: Strobe/clock polarity
* 0 - Non Inverted, 1 - Inverted
* @crc: Enable the cyclic redundancy check
@@ -117,7 +117,7 @@ struct isp_csiphy_lanes_cfg {
* ISP_CCP2_PHY_DATA_STROBE - Data/strobe physical layer
* @vpclk_div: Video port output clock control
*/
-struct isp_ccp2_platform_data {
+struct isp_ccp2_cfg {
unsigned int strobe_clk_pol:1;
unsigned int crc:1;
unsigned int ccp2_mode:1;
@@ -127,39 +127,31 @@ struct isp_ccp2_platform_data {
};
/**
- * struct isp_csi2_platform_data - CSI2 interface platform data
+ * struct isp_csi2_cfg - CSI2 interface configuration
* @crc: Enable the cyclic redundancy check
- * @vpclk_div: Video port output clock control
*/
-struct isp_csi2_platform_data {
+struct isp_csi2_cfg {
unsigned crc:1;
- unsigned vpclk_div:2;
struct isp_csiphy_lanes_cfg lanecfg;
};
-struct isp_subdev_i2c_board_info {
- struct i2c_board_info *board_info;
- int i2c_adapter_id;
-};
-
-struct isp_v4l2_subdevs_group {
- struct isp_subdev_i2c_board_info *subdevs;
+struct isp_bus_cfg {
enum isp_interface_type interface;
union {
- struct isp_parallel_platform_data parallel;
- struct isp_ccp2_platform_data ccp2;
- struct isp_csi2_platform_data csi2;
+ struct isp_parallel_cfg parallel;
+ struct isp_ccp2_cfg ccp2;
+ struct isp_csi2_cfg csi2;
} bus; /* gcc < 4.6.0 chokes on anonymous union initializers */
};
-struct isp_platform_xclk {
- const char *dev_id;
- const char *con_id;
+struct isp_platform_subdev {
+ struct i2c_board_info *board_info;
+ int i2c_adapter_id;
+ struct isp_bus_cfg *bus;
};
struct isp_platform_data {
- struct isp_platform_xclk xclks[2];
- struct isp_v4l2_subdevs_group *subdevs;
+ struct isp_platform_subdev *subdevs;
void (*set_constraints)(struct isp_device *isp, bool enable);
};
diff --git a/include/media/ov2659.h b/include/media/ov2659.h
new file mode 100644
index 0000000..4216adc
--- /dev/null
+++ b/include/media/ov2659.h
@@ -0,0 +1,34 @@
+/*
+ * Omnivision OV2659 CMOS Image Sensor driver
+ *
+ * Copyright (C) 2015 Texas Instruments, Inc.
+ *
+ * Benoit Parrot <bparrot@ti.com>
+ * Lad, Prabhakar <prabhakar.csengg@gmail.com>
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef OV2659_H
+#define OV2659_H
+
+/**
+ * struct ov2659_platform_data - ov2659 driver platform data
+ * @link_frequency: target pixel clock frequency
+ */
+struct ov2659_platform_data {
+ s64 link_frequency;
+};
+
+#endif /* OV2659_H */
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 2c7fbca..45534da5 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -74,6 +74,8 @@ enum rc_filter_type {
* @input_dev: the input child device used to communicate events to userspace
* @driver_type: specifies if protocol decoding is done in hardware or software
* @idle: used to keep track of RX state
+ * @encode_wakeup: wakeup filtering uses IR encode API, therefore the allowed
+ * wakeup protocols is the set of all raw encoders
* @allowed_protocols: bitmask with the supported RC_BIT_* protocols
* @enabled_protocols: bitmask with the enabled RC_BIT_* protocols
* @allowed_wakeup_protocols: bitmask with the supported RC_BIT_* wakeup protocols
@@ -134,6 +136,7 @@ struct rc_dev {
struct input_dev *input_dev;
enum rc_driver_type driver_type;
bool idle;
+ bool encode_wakeup;
u64 allowed_protocols;
u64 enabled_protocols;
u64 allowed_wakeup_protocols;
@@ -239,10 +242,11 @@ static inline void init_ir_raw_event(struct ir_raw_event *ev)
memset(ev, 0, sizeof(*ev));
}
-#define IR_MAX_DURATION 0xFFFFFFFF /* a bit more than 4 seconds */
+#define IR_MAX_DURATION 500000000 /* 500 ms */
#define US_TO_NS(usec) ((usec) * 1000)
#define MS_TO_US(msec) ((msec) * 1000)
#define MS_TO_NS(msec) ((msec) * 1000 * 1000)
+#define NS_TO_US(nsec) DIV_ROUND_UP(nsec, 1000L)
void ir_raw_event_handle(struct rc_dev *dev);
int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev);
@@ -250,6 +254,9 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type);
int ir_raw_event_store_with_filter(struct rc_dev *dev,
struct ir_raw_event *ev);
void ir_raw_event_set_idle(struct rc_dev *dev, bool idle);
+int ir_raw_encode_scancode(u64 protocols,
+ const struct rc_scancode_filter *scancode,
+ struct ir_raw_event *events, unsigned int max);
static inline void ir_raw_event_reset(struct rc_dev *dev)
{
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index e7a1514..27763d5 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -194,7 +194,10 @@ void rc_map_init(void);
#define RC_MAP_SNAPSTREAM_FIREFLY "rc-snapstream-firefly"
#define RC_MAP_STREAMZAP "rc-streamzap"
#define RC_MAP_TBS_NEC "rc-tbs-nec"
+#define RC_MAP_TECHNISAT_TS35 "rc-technisat-ts35"
#define RC_MAP_TECHNISAT_USB2 "rc-technisat-usb2"
+#define RC_MAP_TERRATEC_CINERGY_C_PCI "rc-terratec-cinergy-c-pci"
+#define RC_MAP_TERRATEC_CINERGY_S2_HD "rc-terratec-cinergy-s2-hd"
#define RC_MAP_TERRATEC_CINERGY_XS "rc-terratec-cinergy-xs"
#define RC_MAP_TERRATEC_SLIM "rc-terratec-slim"
#define RC_MAP_TERRATEC_SLIM_2 "rc-terratec-slim-2"
@@ -204,6 +207,7 @@ void rc_map_init(void);
#define RC_MAP_TOTAL_MEDIA_IN_HAND_02 "rc-total-media-in-hand-02"
#define RC_MAP_TREKSTOR "rc-trekstor"
#define RC_MAP_TT_1500 "rc-tt-1500"
+#define RC_MAP_TWINHAN_DTV_CAB_CI "rc-twinhan-dtv-cab-ci"
#define RC_MAP_TWINHAN_VP1027_DVBS "rc-twinhan1027"
#define RC_MAP_VIDEOMATE_K100 "rc-videomate-k100"
#define RC_MAP_VIDEOMATE_S350 "rc-videomate-s350"
diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
index 944ecdf..92766f7 100644
--- a/include/media/saa7146_vv.h
+++ b/include/media/saa7146_vv.h
@@ -178,8 +178,8 @@ struct saa7146_use_ops {
};
/* from saa7146_fops.c */
-int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev, char *name, int type);
-int saa7146_unregister_device(struct video_device **vid, struct saa7146_dev* dev);
+int saa7146_register_device(struct video_device *vid, struct saa7146_dev *dev, char *name, int type);
+int saa7146_unregister_device(struct video_device *vid, struct saa7146_dev *dev);
void saa7146_buffer_finish(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, int state);
void saa7146_buffer_next(struct saa7146_dev *dev, struct saa7146_dmaqueue *q,int vbi);
int saa7146_buffer_queue(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, struct saa7146_buf *buf);
diff --git a/include/media/smiapp.h b/include/media/smiapp.h
index 0b8f124..268a3cd 100644
--- a/include/media/smiapp.h
+++ b/include/media/smiapp.h
@@ -65,19 +65,19 @@ struct smiapp_platform_data {
unsigned short i2c_addr_dfl; /* Default i2c addr */
unsigned short i2c_addr_alt; /* Alternate i2c addr */
- unsigned int nvm_size; /* bytes */
- unsigned int ext_clk; /* sensor external clk */
+ uint32_t nvm_size; /* bytes */
+ uint32_t ext_clk; /* sensor external clk */
unsigned int lanes; /* Number of CSI-2 lanes */
- u8 csi_signalling_mode; /* SMIAPP_CSI_SIGNALLING_MODE_* */
- const s64 *op_sys_clock;
+ uint32_t csi_signalling_mode; /* SMIAPP_CSI_SIGNALLING_MODE_* */
+ uint64_t *op_sys_clock;
enum smiapp_module_board_orient module_board_orient;
struct smiapp_flash_strobe_parms *strobe_setup;
int (*set_xclk)(struct v4l2_subdev *sd, int hz);
- int xshutdown; /* gpio or SMIAPP_NO_XSHUTDOWN */
+ int32_t xshutdown; /* gpio or SMIAPP_NO_XSHUTDOWN */
};
#endif /* __SMIAPP_H_ */
diff --git a/include/media/tea575x.h b/include/media/tea575x.h
index 2d4fa59..5d09657 100644
--- a/include/media/tea575x.h
+++ b/include/media/tea575x.h
@@ -71,6 +71,11 @@ struct snd_tea575x {
int (*ext_init)(struct snd_tea575x *tea);
};
+int snd_tea575x_enum_freq_bands(struct snd_tea575x *tea,
+ struct v4l2_frequency_band *band);
+int snd_tea575x_g_tuner(struct snd_tea575x *tea, struct v4l2_tuner *v);
+int snd_tea575x_s_hw_freq_seek(struct file *file, struct snd_tea575x *tea,
+ const struct v4l2_hw_freq_seek *a);
int snd_tea575x_hw_init(struct snd_tea575x *tea);
int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner);
void snd_tea575x_exit(struct snd_tea575x *tea);
diff --git a/include/media/v4l2-clk.h b/include/media/v4l2-clk.h
index 0b36cc1..3ef6e3d 100644
--- a/include/media/v4l2-clk.h
+++ b/include/media/v4l2-clk.h
@@ -22,14 +22,15 @@
struct module;
struct device;
+struct clk;
struct v4l2_clk {
struct list_head list;
const struct v4l2_clk_ops *ops;
const char *dev_id;
- const char *id;
int enable;
struct mutex lock; /* Protect the enable count */
atomic_t use_count;
+ struct clk *clk;
void *priv;
};
@@ -43,7 +44,7 @@ struct v4l2_clk_ops {
struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
const char *dev_name,
- const char *name, void *priv);
+ void *priv);
void v4l2_clk_unregister(struct v4l2_clk *clk);
struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id);
void v4l2_clk_put(struct v4l2_clk *clk);
@@ -55,14 +56,13 @@ int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate);
struct module;
struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
- const char *id, unsigned long rate, struct module *owner);
+ unsigned long rate, struct module *owner);
void v4l2_clk_unregister_fixed(struct v4l2_clk *clk);
static inline struct v4l2_clk *v4l2_clk_register_fixed(const char *dev_id,
- const char *id,
unsigned long rate)
{
- return __v4l2_clk_register_fixed(dev_id, id, rate, THIS_MODULE);
+ return __v4l2_clk_register_fixed(dev_id, rate, THIS_MODULE);
}
#define v4l2_clk_name_i2c(name, size, adap, client) snprintf(name, size, \
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index eb76cfd..acbcd2f 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -65,7 +65,6 @@ struct v4l2_file_operations {
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
unsigned int (*poll) (struct file *, struct poll_table_struct *);
- long (*ioctl) (struct file *, unsigned int, unsigned long);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
#ifdef CONFIG_COMPAT
long (*compat_ioctl32) (struct file *, unsigned int, unsigned long);
@@ -124,7 +123,8 @@ struct video_device
spinlock_t fh_lock; /* Lock for all v4l2_fhs */
struct list_head fh_list; /* List of struct v4l2_fh */
- int debug; /* Activates debug level*/
+ /* Internal device debug flags, not for use by drivers */
+ int dev_debug;
/* Video standard vars */
v4l2_std_id tvnorms; /* Supported tv norms */
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index ffb69da..9c58157 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -58,8 +58,6 @@ struct v4l2_device {
struct v4l2_ctrl_handler *ctrl_handler;
/* Device's priority state */
struct v4l2_prio_state prio;
- /* BKL replacement mutex. Temporary solution only. */
- struct mutex ioctl_lock;
/* Keep track of the references to this struct. */
struct kref ref;
/* Release function that is called when the ref count goes to 0. */
diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
index 4becc67..eecd310 100644
--- a/include/media/v4l2-dv-timings.h
+++ b/include/media/v4l2-dv-timings.h
@@ -117,6 +117,7 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
* @vsync - the height of the vertical sync in lines.
* @polarities - the horizontal and vertical polarities (same as struct
* v4l2_bt_timings polarities).
+ * @interlaced - if this flag is true, it indicates interlaced format
* @fmt - the resulting timings.
*
* This function will attempt to detect if the given values correspond to a
@@ -124,7 +125,7 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
* in with the found CVT timings.
*/
bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
- u32 polarities, struct v4l2_dv_timings *fmt);
+ u32 polarities, bool interlaced, struct v4l2_dv_timings *fmt);
/** v4l2_detect_gtf - detect if the given timings follow the GTF standard
* @frame_height - the total height of the frame (including blanking) in lines.
@@ -132,6 +133,7 @@ bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
* @vsync - the height of the vertical sync in lines.
* @polarities - the horizontal and vertical polarities (same as struct
* v4l2_bt_timings polarities).
+ * @interlaced - if this flag is true, it indicates interlaced format
* @aspect - preferred aspect ratio. GTF has no method of determining the
* aspect ratio in order to derive the image width from the
* image height, so it has to be passed explicitly. Usually
@@ -144,7 +146,7 @@ bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
* in with the found GTF timings.
*/
bool v4l2_detect_gtf(unsigned frame_height, unsigned hfreq, unsigned vsync,
- u32 polarities, struct v4l2_fract aspect,
+ u32 polarities, bool interlaced, struct v4l2_fract aspect,
struct v4l2_dv_timings *fmt);
/** v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
diff --git a/include/media/v4l2-flash-led-class.h b/include/media/v4l2-flash-led-class.h
new file mode 100644
index 0000000..098236c
--- /dev/null
+++ b/include/media/v4l2-flash-led-class.h
@@ -0,0 +1,148 @@
+/*
+ * V4L2 flash LED sub-device registration helpers.
+ *
+ * Copyright (C) 2015 Samsung Electronics Co., Ltd
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _V4L2_FLASH_H
+#define _V4L2_FLASH_H
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+struct led_classdev_flash;
+struct led_classdev;
+struct v4l2_flash;
+enum led_brightness;
+
+/*
+ * struct v4l2_flash_ctrl_data - flash control initialization data, filled
+ * basing on the features declared by the LED flash
+ * class driver in the v4l2_flash_config
+ * @config: initialization data for a control
+ * @cid: contains v4l2 flash control id if the config
+ * field was initialized, 0 otherwise
+ */
+struct v4l2_flash_ctrl_data {
+ struct v4l2_ctrl_config config;
+ u32 cid;
+};
+
+struct v4l2_flash_ops {
+ /* setup strobing the flash by hardware pin state assertion */
+ int (*external_strobe_set)(struct v4l2_flash *v4l2_flash,
+ bool enable);
+ /* convert intensity to brightness in a device specific manner */
+ enum led_brightness (*intensity_to_led_brightness)
+ (struct v4l2_flash *v4l2_flash, s32 intensity);
+ /* convert brightness to intensity in a device specific manner */
+ s32 (*led_brightness_to_intensity)
+ (struct v4l2_flash *v4l2_flash, enum led_brightness);
+};
+
+/**
+ * struct v4l2_flash_config - V4L2 Flash sub-device initialization data
+ * @dev_name: the name of the media entity,
+ unique in the system
+ * @torch_intensity: constraints for the LED in torch mode
+ * @indicator_intensity: constraints for the indicator LED
+ * @flash_faults: bitmask of flash faults that the LED flash class
+ device can report; corresponding LED_FAULT* bit
+ definitions are available in the header file
+ <linux/led-class-flash.h>
+ * @has_external_strobe: external strobe capability
+ */
+struct v4l2_flash_config {
+ char dev_name[32];
+ struct led_flash_setting torch_intensity;
+ struct led_flash_setting indicator_intensity;
+ u32 flash_faults;
+ unsigned int has_external_strobe:1;
+};
+
+/**
+ * struct v4l2_flash - Flash sub-device context
+ * @fled_cdev: LED flash class device controlled by this sub-device
+ * @iled_cdev: LED class device representing indicator LED associated
+ * with the LED flash class device
+ * @ops: V4L2 specific flash ops
+ * @sd: V4L2 sub-device
+ * @hdl: flash controls handler
+ * @ctrls: array of pointers to controls, whose values define
+ * the sub-device state
+ */
+struct v4l2_flash {
+ struct led_classdev_flash *fled_cdev;
+ struct led_classdev_flash *iled_cdev;
+ const struct v4l2_flash_ops *ops;
+
+ struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
+ struct v4l2_ctrl **ctrls;
+};
+
+static inline struct v4l2_flash *v4l2_subdev_to_v4l2_flash(
+ struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct v4l2_flash, sd);
+}
+
+static inline struct v4l2_flash *v4l2_ctrl_to_v4l2_flash(struct v4l2_ctrl *c)
+{
+ return container_of(c->handler, struct v4l2_flash, hdl);
+}
+
+#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
+/**
+ * v4l2_flash_init - initialize V4L2 flash led sub-device
+ * @dev: flash device, e.g. an I2C device
+ * @of_node: of_node of the LED, may be NULL if the same as device's
+ * @fled_cdev: LED flash class device to wrap
+ * @iled_cdev: LED flash class device representing indicator LED associated
+ * with fled_cdev, may be NULL
+ * @flash_ops: V4L2 Flash device ops
+ * @config: initialization data for V4L2 Flash sub-device
+ *
+ * Create V4L2 Flash sub-device wrapping given LED subsystem device.
+ *
+ * Returns: A valid pointer, or, when an error occurs, the return
+ * value is encoded using ERR_PTR(). Use IS_ERR() to check and
+ * PTR_ERR() to obtain the numeric return value.
+ */
+struct v4l2_flash *v4l2_flash_init(
+ struct device *dev, struct device_node *of_node,
+ struct led_classdev_flash *fled_cdev,
+ struct led_classdev_flash *iled_cdev,
+ const struct v4l2_flash_ops *ops,
+ struct v4l2_flash_config *config);
+
+/**
+ * v4l2_flash_release - release V4L2 Flash sub-device
+ * @flash: the V4L2 Flash sub-device to release
+ *
+ * Release V4L2 Flash sub-device.
+ */
+void v4l2_flash_release(struct v4l2_flash *v4l2_flash);
+
+#else
+static inline struct v4l2_flash *v4l2_flash_init(
+ struct device *dev, struct device_node *of_node,
+ struct led_classdev_flash *fled_cdev,
+ struct led_classdev_flash *iled_cdev,
+ const struct v4l2_flash_ops *ops,
+ struct v4l2_flash_config *config)
+{
+ return NULL;
+}
+
+static inline void v4l2_flash_release(struct v4l2_flash *v4l2_flash)
+{
+}
+#endif /* CONFIG_V4L2_FLASH_LED_CLASS */
+
+#endif /* _V4L2_FLASH_H */
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 53605f0..8fbbd76 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -23,12 +23,6 @@ struct v4l2_ioctl_ops {
/* VIDIOC_QUERYCAP handler */
int (*vidioc_querycap)(struct file *file, void *fh, struct v4l2_capability *cap);
- /* Priority handling */
- int (*vidioc_g_priority) (struct file *file, void *fh,
- enum v4l2_priority *p);
- int (*vidioc_s_priority) (struct file *file, void *fh,
- enum v4l2_priority p);
-
/* VIDIOC_ENUM_FMT handlers */
int (*vidioc_enum_fmt_vid_cap) (struct file *file, void *fh,
struct v4l2_fmtdesc *f);
@@ -291,9 +285,18 @@ struct v4l2_ioctl_ops {
/* v4l debugging and diagnostics */
-/* Debug bitmask flags to be used on V4L2 */
-#define V4L2_DEBUG_IOCTL 0x01
-#define V4L2_DEBUG_IOCTL_ARG 0x02
+/* Device debug flags to be used with the video device debug attribute */
+
+/* Just log the ioctl name + error code */
+#define V4L2_DEV_DEBUG_IOCTL 0x01
+/* Log the ioctl name arguments + error code */
+#define V4L2_DEV_DEBUG_IOCTL_ARG 0x02
+/* Log the file operations open, release, mmap and get_unmapped_area */
+#define V4L2_DEV_DEBUG_FOP 0x04
+/* Log the read and write file operations and the VIDIOC_(D)QBUF ioctls */
+#define V4L2_DEV_DEBUG_STREAMING 0x08
+/* Log poll() */
+#define V4L2_DEV_DEBUG_POLL 0x10
/* Video standard functions */
extern const char *v4l2_norm_to_name(v4l2_std_id id);
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
index 38d960d..73069e4 100644
--- a/include/media/v4l2-mediabus.h
+++ b/include/media/v4l2-mediabus.h
@@ -96,6 +96,7 @@ static inline void v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt,
pix_fmt->colorspace = mbus_fmt->colorspace;
pix_fmt->ycbcr_enc = mbus_fmt->ycbcr_enc;
pix_fmt->quantization = mbus_fmt->quantization;
+ pix_fmt->xfer_func = mbus_fmt->xfer_func;
}
static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt,
@@ -108,6 +109,7 @@ static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt,
mbus_fmt->colorspace = pix_fmt->colorspace;
mbus_fmt->ycbcr_enc = pix_fmt->ycbcr_enc;
mbus_fmt->quantization = pix_fmt->quantization;
+ mbus_fmt->xfer_func = pix_fmt->xfer_func;
mbus_fmt->code = code;
}
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index c5f3914..3bbd96d 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -116,6 +116,8 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf);
int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf);
+int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf);
int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_create_buffers *create);
@@ -248,6 +250,8 @@ int v4l2_m2m_ioctl_qbuf(struct file *file, void *fh,
struct v4l2_buffer *buf);
int v4l2_m2m_ioctl_dqbuf(struct file *file, void *fh,
struct v4l2_buffer *buf);
+int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *fh,
+ struct v4l2_buffer *buf);
int v4l2_m2m_ioctl_streamon(struct file *file, void *fh,
enum v4l2_buf_type type);
int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh,
diff --git a/include/media/v4l2-of.h b/include/media/v4l2-of.h
index 70fa7b7..4dc34b2 100644
--- a/include/media/v4l2-of.h
+++ b/include/media/v4l2-of.h
@@ -29,12 +29,15 @@ struct device_node;
* @data_lanes: an array of physical data lane indexes
* @clock_lane: physical lane index of the clock lane
* @num_data_lanes: number of data lanes
+ * @lane_polarities: polarity of the lanes. The order is the same of
+ * the physical lanes.
*/
struct v4l2_of_bus_mipi_csi2 {
unsigned int flags;
unsigned char data_lanes[4];
unsigned char clock_lane;
unsigned short num_data_lanes;
+ bool lane_polarities[5];
};
/**
@@ -54,21 +57,44 @@ struct v4l2_of_bus_parallel {
* @base: struct of_endpoint containing port, id, and local of_node
* @bus_type: bus type
* @bus: bus configuration data structure
- * @head: list head for this structure
+ * @link_frequencies: array of supported link frequencies
+ * @nr_of_link_frequencies: number of elements in link_frequenccies array
*/
struct v4l2_of_endpoint {
struct of_endpoint base;
+ /* Fields below this line will be zeroed by v4l2_of_parse_endpoint() */
enum v4l2_mbus_type bus_type;
union {
struct v4l2_of_bus_parallel parallel;
struct v4l2_of_bus_mipi_csi2 mipi_csi2;
} bus;
- struct list_head head;
+ u64 *link_frequencies;
+ unsigned int nr_of_link_frequencies;
+};
+
+/**
+ * struct v4l2_of_link - a link between two endpoints
+ * @local_node: pointer to device_node of this endpoint
+ * @local_port: identifier of the port this endpoint belongs to
+ * @remote_node: pointer to device_node of the remote endpoint
+ * @remote_port: identifier of the port the remote endpoint belongs to
+ */
+struct v4l2_of_link {
+ struct device_node *local_node;
+ unsigned int local_port;
+ struct device_node *remote_node;
+ unsigned int remote_port;
};
#ifdef CONFIG_OF
int v4l2_of_parse_endpoint(const struct device_node *node,
struct v4l2_of_endpoint *endpoint);
+struct v4l2_of_endpoint *v4l2_of_alloc_parse_endpoint(
+ const struct device_node *node);
+void v4l2_of_free_endpoint(struct v4l2_of_endpoint *endpoint);
+int v4l2_of_parse_link(const struct device_node *node,
+ struct v4l2_of_link *link);
+void v4l2_of_put_link(struct v4l2_of_link *link);
#else /* CONFIG_OF */
static inline int v4l2_of_parse_endpoint(const struct device_node *node,
@@ -77,6 +103,26 @@ static inline int v4l2_of_parse_endpoint(const struct device_node *node,
return -ENOSYS;
}
+static inline struct v4l2_of_endpoint *v4l2_of_alloc_parse_endpoint(
+ const struct device_node *node)
+{
+ return NULL;
+}
+
+static inline void v4l2_of_free_endpoint(struct v4l2_of_endpoint *endpoint)
+{
+}
+
+static inline int v4l2_of_parse_link(const struct device_node *node,
+ struct v4l2_of_link *link)
+{
+ return -ENOSYS;
+}
+
+static inline void v4l2_of_put_link(struct v4l2_of_link *link)
+{
+}
+
#endif /* CONFIG_OF */
#endif /* _V4L2_OF_H */
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 5860292..4e18318 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -40,6 +40,8 @@
#define V4L2_SUBDEV_IR_TX_NOTIFY _IOW('v', 1, u32)
#define V4L2_SUBDEV_IR_TX_FIFO_SERVICE_REQ 0x00000001
+#define V4L2_DEVICE_NOTIFY_EVENT _IOW('v', 2, struct v4l2_event)
+
struct v4l2_device;
struct v4l2_ctrl_handler;
struct v4l2_event_subscription;
@@ -293,14 +295,6 @@ struct v4l2_mbus_frame_desc {
g_dv_timings(): Get custom dv timings in the sub device.
- enum_mbus_fmt: enumerate pixel formats, provided by a video data source
-
- g_mbus_fmt: get the current pixel format, provided by a video data source
-
- try_mbus_fmt: try to set a pixel format on a video data source
-
- s_mbus_fmt: set a pixel format on a video data source
-
g_mbus_config: get supported mediabus configurations
s_mbus_config: set a certain mediabus configuration. This operation is added
@@ -332,24 +326,12 @@ struct v4l2_subdev_video_ops {
struct v4l2_subdev_frame_interval *interval);
int (*s_frame_interval)(struct v4l2_subdev *sd,
struct v4l2_subdev_frame_interval *interval);
- int (*enum_framesizes)(struct v4l2_subdev *sd, struct v4l2_frmsizeenum *fsize);
- int (*enum_frameintervals)(struct v4l2_subdev *sd, struct v4l2_frmivalenum *fival);
int (*s_dv_timings)(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings);
int (*g_dv_timings)(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings);
int (*query_dv_timings)(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings);
- int (*enum_mbus_fmt)(struct v4l2_subdev *sd, unsigned int index,
- u32 *code);
- int (*enum_mbus_fsizes)(struct v4l2_subdev *sd,
- struct v4l2_frmsizeenum *fsize);
- int (*g_mbus_fmt)(struct v4l2_subdev *sd,
- struct v4l2_mbus_framefmt *fmt);
- int (*try_mbus_fmt)(struct v4l2_subdev *sd,
- struct v4l2_mbus_framefmt *fmt);
- int (*s_mbus_fmt)(struct v4l2_subdev *sd,
- struct v4l2_mbus_framefmt *fmt);
int (*g_mbus_config)(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg);
int (*s_mbus_config)(struct v4l2_subdev *sd,
@@ -484,6 +466,18 @@ struct v4l2_subdev_ir_ops {
struct v4l2_subdev_ir_parameters *params);
};
+/*
+ * Used for storing subdev pad information. This structure only needs
+ * to be passed to the pad op if the 'which' field of the main argument
+ * is set to V4L2_SUBDEV_FORMAT_TRY. For V4L2_SUBDEV_FORMAT_ACTIVE it is
+ * safe to pass NULL.
+ */
+struct v4l2_subdev_pad_config {
+ struct v4l2_mbus_framefmt try_fmt;
+ struct v4l2_rect try_crop;
+ struct v4l2_rect try_compose;
+};
+
/**
* struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations
* @get_frame_desc: get the current low level media bus frame parameters.
@@ -491,25 +485,26 @@ struct v4l2_subdev_ir_ops {
* may be adjusted by the subdev driver to device capabilities.
*/
struct v4l2_subdev_pad_ops {
- int (*enum_mbus_code)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ int (*enum_mbus_code)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code);
int (*enum_frame_size)(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse);
int (*enum_frame_interval)(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_interval_enum *fie);
- int (*get_fmt)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ int (*get_fmt)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format);
- int (*set_fmt)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ int (*set_fmt)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format);
- int (*set_crop)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop);
- int (*get_crop)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop);
- int (*get_selection)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ int (*get_selection)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel);
- int (*set_selection)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ int (*set_selection)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel);
int (*get_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid);
int (*set_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid);
@@ -610,6 +605,8 @@ struct v4l2_subdev {
struct video_device *devnode;
/* pointer to the physical device, if any */
struct device *dev;
+ /* The device_node of the subdev, usually the same as dev->of_node. */
+ struct device_node *of_node;
/* Links this subdev to a global subdev_list or @notifier->done list. */
struct list_head async_list;
/* Pointer to respective struct v4l2_async_subdev. */
@@ -631,11 +628,7 @@ struct v4l2_subdev {
struct v4l2_subdev_fh {
struct v4l2_fh vfh;
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
- struct {
- struct v4l2_mbus_framefmt try_fmt;
- struct v4l2_rect try_crop;
- struct v4l2_rect try_compose;
- } *pad;
+ struct v4l2_subdev_pad_config *pad;
#endif
};
@@ -645,17 +638,17 @@ struct v4l2_subdev_fh {
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
#define __V4L2_SUBDEV_MK_GET_TRY(rtype, fun_name, field_name) \
static inline struct rtype * \
- v4l2_subdev_get_try_##fun_name(struct v4l2_subdev_fh *fh, \
- unsigned int pad) \
+ fun_name(struct v4l2_subdev *sd, \
+ struct v4l2_subdev_pad_config *cfg, \
+ unsigned int pad) \
{ \
- BUG_ON(pad >= vdev_to_v4l2_subdev( \
- fh->vfh.vdev)->entity.num_pads); \
- return &fh->pad[pad].field_name; \
+ BUG_ON(pad >= sd->entity.num_pads); \
+ return &cfg[pad].field_name; \
}
-__V4L2_SUBDEV_MK_GET_TRY(v4l2_mbus_framefmt, format, try_fmt)
-__V4L2_SUBDEV_MK_GET_TRY(v4l2_rect, crop, try_crop)
-__V4L2_SUBDEV_MK_GET_TRY(v4l2_rect, compose, try_compose)
+__V4L2_SUBDEV_MK_GET_TRY(v4l2_mbus_framefmt, v4l2_subdev_get_try_format, try_fmt)
+__V4L2_SUBDEV_MK_GET_TRY(v4l2_rect, v4l2_subdev_get_try_crop, try_crop)
+__V4L2_SUBDEV_MK_GET_TRY(v4l2_rect, v4l2_subdev_get_try_compose, try_compose)
#endif
extern const struct v4l2_file_operations v4l2_subdev_fops;
diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h
index fb6fd4d8..d8b27854 100644
--- a/include/media/videobuf-dma-sg.h
+++ b/include/media/videobuf-dma-sg.h
@@ -84,16 +84,8 @@ struct videobuf_dma_sg_memory {
* Despite the name, this is totally unrelated to videobuf, except that
* videobuf-dma-sg uses the same API internally.
*/
-void videobuf_dma_init(struct videobuf_dmabuf *dma);
-int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
- unsigned long data, unsigned long size);
-int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction,
- int nr_pages);
-int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction,
- dma_addr_t addr, int nr_pages);
int videobuf_dma_free(struct videobuf_dmabuf *dma);
-int videobuf_dma_map(struct device *dev, struct videobuf_dmabuf *dma);
int videobuf_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma);
struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf);
diff --git a/include/media/videobuf-dvb.h b/include/media/videobuf-dvb.h
index d63965a..c3bfa47 100644
--- a/include/media/videobuf-dvb.h
+++ b/include/media/videobuf-dvb.h
@@ -56,9 +56,3 @@ struct videobuf_dvb_frontend * videobuf_dvb_get_frontend(struct videobuf_dvb_fro
int videobuf_dvb_find_frontend(struct videobuf_dvb_frontends *f, struct dvb_frontend *p);
#endif /* _VIDEOBUF_DVB_H_ */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index bd2cec2..22a44c2 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -134,17 +134,6 @@ enum vb2_io_modes {
};
/**
- * enum vb2_fileio_flags - flags for selecting a mode of the file io emulator,
- * by default the 'streaming' style is used by the file io emulator
- * @VB2_FILEIO_READ_ONCE: report EOF after reading the first buffer
- * @VB2_FILEIO_WRITE_IMMEDIATELY: queue buffer after each write() call
- */
-enum vb2_fileio_flags {
- VB2_FILEIO_READ_ONCE = (1 << 0),
- VB2_FILEIO_WRITE_IMMEDIATELY = (1 << 1),
-};
-
-/**
* enum vb2_buffer_state - current video buffer state
* @VB2_BUF_STATE_DEQUEUED: buffer under userspace control
* @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf
@@ -346,7 +335,9 @@ struct v4l2_fh;
*
* @type: queue type (see V4L2_BUF_TYPE_* in linux/videodev2.h
* @io_modes: supported io methods (see vb2_io_modes enum)
- * @io_flags: additional io flags (see vb2_fileio_flags enum)
+ * @fileio_read_once: report EOF after reading the first buffer
+ * @fileio_write_immediately: queue buffer after each write() call
+ * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver
* @lock: pointer to a mutex that protects the vb2_queue struct. The
* driver can set this to a mutex to let the v4l2 core serialize
* the queuing ioctls. If the driver wants to handle locking
@@ -390,13 +381,19 @@ struct v4l2_fh;
* @waiting_for_buffers: used in poll() to check if vb2 is still waiting for
* buffers. Only set for capture queues if qbuf has not yet been
* called since poll() needs to return POLLERR in that situation.
+ * @last_buffer_dequeued: used in poll() and DQBUF to immediately return if the
+ * last decoded buffer was already dequeued. Set for capture queues
+ * when a buffer with the V4L2_BUF_FLAG_LAST is dequeued.
* @fileio: file io emulator internal data, used only if emulator is active
* @threadio: thread io internal data, used only if thread is active
*/
struct vb2_queue {
enum v4l2_buf_type type;
unsigned int io_modes;
- unsigned int io_flags;
+ unsigned fileio_read_once:1;
+ unsigned fileio_write_immediately:1;
+ unsigned allow_zero_bytesused:1;
+
struct mutex *lock;
struct v4l2_fh *owner;
@@ -429,6 +426,7 @@ struct vb2_queue {
unsigned int start_streaming_called:1;
unsigned int error:1;
unsigned int waiting_for_buffers:1;
+ unsigned int last_buffer_dequeued:1;
struct vb2_fileio_data *fileio;
struct vb2_threadio_data *threadio;
@@ -609,6 +607,15 @@ static inline bool vb2_start_streaming_called(struct vb2_queue *q)
return q->start_streaming_called;
}
+/**
+ * vb2_clear_last_buffer_dequeued() - clear last buffer dequeued flag of queue
+ * @q: videobuf queue
+ */
+static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q)
+{
+ q->last_buffer_dequeued = false;
+}
+
/*
* The following functions are not part of the vb2 core API, but are simple
* helper functions that you can use in your struct v4l2_file_operations,
diff --git a/include/misc/cxl-base.h b/include/misc/cxl-base.h
new file mode 100644
index 0000000..5ae9625
--- /dev/null
+++ b/include/misc/cxl-base.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _MISC_CXL_BASE_H
+#define _MISC_CXL_BASE_H
+
+#ifdef CONFIG_CXL_BASE
+
+#define CXL_IRQ_RANGES 4
+
+struct cxl_irq_ranges {
+ irq_hw_number_t offset[CXL_IRQ_RANGES];
+ irq_hw_number_t range[CXL_IRQ_RANGES];
+};
+
+extern atomic_t cxl_use_count;
+
+static inline bool cxl_ctx_in_use(void)
+{
+ return (atomic_read(&cxl_use_count) != 0);
+}
+
+static inline void cxl_ctx_get(void)
+{
+ atomic_inc(&cxl_use_count);
+}
+
+static inline void cxl_ctx_put(void)
+{
+ atomic_dec(&cxl_use_count);
+}
+
+void cxl_slbia(struct mm_struct *mm);
+
+#else /* CONFIG_CXL_BASE */
+
+static inline bool cxl_ctx_in_use(void) { return false; }
+static inline void cxl_slbia(struct mm_struct *mm) {}
+
+#endif /* CONFIG_CXL_BASE */
+
+#endif
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
index 975cc78..7a6c1d6 100644
--- a/include/misc/cxl.h
+++ b/include/misc/cxl.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2014 IBM Corp.
+ * Copyright 2015 IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -10,39 +10,194 @@
#ifndef _MISC_CXL_H
#define _MISC_CXL_H
-#ifdef CONFIG_CXL_BASE
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/interrupt.h>
+#include <uapi/misc/cxl.h>
-#define CXL_IRQ_RANGES 4
+/*
+ * This documents the in kernel API for driver to use CXL. It allows kernel
+ * drivers to bind to AFUs using an AFU configuration record exposed as a PCI
+ * configuration record.
+ *
+ * This API enables control over AFU and contexts which can't be part of the
+ * generic PCI API. This API is agnostic to the actual AFU.
+ */
+
+/* Get the AFU associated with a pci_dev */
+struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev);
+
+/* Get the AFU conf record number associated with a pci_dev */
+unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev);
+
+/* Get the physical device (ie. the PCIe card) which the AFU is attached */
+struct device *cxl_get_phys_dev(struct pci_dev *dev);
+
+
+/*
+ * Context lifetime overview:
+ *
+ * An AFU context may be inited and then started and stoppped multiple times
+ * before it's released. ie.
+ * - cxl_dev_context_init()
+ * - cxl_start_context()
+ * - cxl_stop_context()
+ * - cxl_start_context()
+ * - cxl_stop_context()
+ * ...repeat...
+ * - cxl_release_context()
+ * Once released, a context can't be started again.
+ *
+ * One context is inited by the cxl driver for every pci_dev. This is to be
+ * used as a default kernel context. cxl_get_context() will get this
+ * context. This context will be released by PCI hot unplug, so doesn't need to
+ * be released explicitly by drivers.
+ *
+ * Additional kernel contexts may be inited using cxl_dev_context_init().
+ * These must be released using cxl_context_detach().
+ *
+ * Once a context has been inited, IRQs may be configured. Firstly these IRQs
+ * must be allocated (cxl_allocate_afu_irqs()), then individually mapped to
+ * specific handlers (cxl_map_afu_irq()).
+ *
+ * These IRQs can be unmapped (cxl_unmap_afu_irq()) and finally released
+ * (cxl_free_afu_irqs()).
+ *
+ * The AFU can be reset (cxl_afu_reset()). This will cause the PSL/AFU
+ * hardware to lose track of all contexts. It's upto the caller of
+ * cxl_afu_reset() to restart these contexts.
+ */
+
+/*
+ * On pci_enabled_device(), the cxl driver will init a single cxl context for
+ * use by the driver. It doesn't start this context (as that will likely
+ * generate DMA traffic for most AFUs).
+ *
+ * This gets the default context associated with this pci_dev. This context
+ * doesn't need to be released as this will be done by the PCI subsystem on hot
+ * unplug.
+ */
+struct cxl_context *cxl_get_context(struct pci_dev *dev);
+/*
+ * Allocate and initalise a context associated with a AFU PCI device. This
+ * doesn't start the context in the AFU.
+ */
+struct cxl_context *cxl_dev_context_init(struct pci_dev *dev);
+/*
+ * Release and free a context. Context should be stopped before calling.
+ */
+int cxl_release_context(struct cxl_context *ctx);
-struct cxl_irq_ranges {
- irq_hw_number_t offset[CXL_IRQ_RANGES];
- irq_hw_number_t range[CXL_IRQ_RANGES];
-};
+/*
+ * Allocate AFU interrupts for this context. num=0 will allocate the default
+ * for this AFU as given in the AFU descriptor. This number doesn't include the
+ * interrupt 0 (CAIA defines AFU IRQ 0 for page faults). Each interrupt to be
+ * used must map a handler with cxl_map_afu_irq.
+ */
+int cxl_allocate_afu_irqs(struct cxl_context *cxl, int num);
+/* Free allocated interrupts */
+void cxl_free_afu_irqs(struct cxl_context *cxl);
+
+/*
+ * Map a handler for an AFU interrupt associated with a particular context. AFU
+ * IRQS numbers start from 1 (CAIA defines AFU IRQ 0 for page faults). cookie
+ * is private data is that will be provided to the interrupt handler.
+ */
+int cxl_map_afu_irq(struct cxl_context *cxl, int num,
+ irq_handler_t handler, void *cookie, char *name);
+/* unmap mapped IRQ handlers */
+void cxl_unmap_afu_irq(struct cxl_context *cxl, int num, void *cookie);
-extern atomic_t cxl_use_count;
+/*
+ * Start work on the AFU. This starts an cxl context and associates it with a
+ * task. task == NULL will make it a kernel context.
+ */
+int cxl_start_context(struct cxl_context *ctx, u64 wed,
+ struct task_struct *task);
+/*
+ * Stop a context and remove it from the PSL
+ */
+int cxl_stop_context(struct cxl_context *ctx);
-static inline bool cxl_ctx_in_use(void)
-{
- return (atomic_read(&cxl_use_count) != 0);
-}
+/* Reset the AFU */
+int cxl_afu_reset(struct cxl_context *ctx);
-static inline void cxl_ctx_get(void)
-{
- atomic_inc(&cxl_use_count);
-}
+/*
+ * Set a context as a master context.
+ * This sets the default problem space area mapped as the full space, rather
+ * than just the per context area (for slaves).
+ */
+void cxl_set_master(struct cxl_context *ctx);
-static inline void cxl_ctx_put(void)
-{
- atomic_dec(&cxl_use_count);
-}
+/*
+ * Map and unmap the AFU Problem Space area. The amount and location mapped
+ * depends on if this context is a master or slave.
+ */
+void __iomem *cxl_psa_map(struct cxl_context *ctx);
+void cxl_psa_unmap(void __iomem *addr);
-void cxl_slbia(struct mm_struct *mm);
+/* Get the process element for this context */
+int cxl_process_element(struct cxl_context *ctx);
-#else /* CONFIG_CXL_BASE */
-static inline bool cxl_ctx_in_use(void) { return false; }
-static inline void cxl_slbia(struct mm_struct *mm) {}
+/*
+ * These calls allow drivers to create their own file descriptors and make them
+ * identical to the cxl file descriptor user API. An example use case:
+ *
+ * struct file_operations cxl_my_fops = {};
+ * ......
+ * // Init the context
+ * ctx = cxl_dev_context_init(dev);
+ * if (IS_ERR(ctx))
+ * return PTR_ERR(ctx);
+ * // Create and attach a new file descriptor to my file ops
+ * file = cxl_get_fd(ctx, &cxl_my_fops, &fd);
+ * // Start context
+ * rc = cxl_start_work(ctx, &work.work);
+ * if (rc) {
+ * fput(file);
+ * put_unused_fd(fd);
+ * return -ENODEV;
+ * }
+ * // No error paths after installing the fd
+ * fd_install(fd, file);
+ * return fd;
+ *
+ * This inits a context, and gets a file descriptor and associates some file
+ * ops to that file descriptor. If the file ops are blank, the cxl driver will
+ * fill them in with the default ones that mimic the standard user API. Once
+ * completed, the file descriptor can be installed. Once the file descriptor is
+ * installed, it's visible to the user so no errors must occur past this point.
+ *
+ * If cxl_fd_release() file op call is installed, the context will be stopped
+ * and released when the fd is released. Hence the driver won't need to manage
+ * this itself.
+ */
-#endif /* CONFIG_CXL_BASE */
+/*
+ * Take a context and associate it with my file ops. Returns the associated
+ * file and file descriptor. Any file ops which are blank are filled in by the
+ * cxl driver with the default ops to mimic the standard API.
+ */
+struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
+ int *fd);
+/* Get the context associated with this file */
+struct cxl_context *cxl_fops_get_context(struct file *file);
+/*
+ * Start a context associated a struct cxl_ioctl_start_work used by the
+ * standard cxl user API.
+ */
+int cxl_start_work(struct cxl_context *ctx,
+ struct cxl_ioctl_start_work *work);
+/*
+ * Export all the existing fops so drivers can use them
+ */
+int cxl_fd_open(struct inode *inode, struct file *file);
+int cxl_fd_release(struct inode *inode, struct file *file);
+long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm);
+unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll);
+ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
+ loff_t *off);
-#endif
+#endif /* _MISC_CXL_H */
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 6fab66c..c6b97e5 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -211,6 +211,8 @@ struct p9_dirent {
char d_name[256];
};
+struct iov_iter;
+
int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb);
int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid,
const char *name);
@@ -236,10 +238,8 @@ int p9_client_clunk(struct p9_fid *fid);
int p9_client_fsync(struct p9_fid *fid, int datasync);
int p9_client_remove(struct p9_fid *fid);
int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags);
-int p9_client_read(struct p9_fid *fid, char *data, char __user *udata,
- u64 offset, u32 count);
-int p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
- u64 offset, u32 count);
+int p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err);
+int p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err);
int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset);
int p9dirent_read(struct p9_client *clnt, char *buf, int len,
struct p9_dirent *dirent);
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index 2a25dec..5122b5e 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -61,7 +61,7 @@ struct p9_trans_module {
int (*cancel) (struct p9_client *, struct p9_req_t *req);
int (*cancelled)(struct p9_client *, struct p9_req_t *req);
int (*zc_request)(struct p9_client *, struct p9_req_t *,
- char *, char *, int , int, int, int);
+ struct iov_iter *, struct iov_iter *, int , int, int);
};
void v9fs_register_trans(struct p9_trans_module *m);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index d13573b..def59d3 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -62,6 +62,9 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg);
int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
const struct net_device *dev, int strict);
+int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
+ const struct net_device *dev, int strict,
+ u32 banned_flags);
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr);
@@ -139,6 +142,7 @@ void ipv6_mc_unmap(struct inet6_dev *idev);
void ipv6_mc_remap(struct inet6_dev *idev);
void ipv6_mc_init_dev(struct inet6_dev *idev);
void ipv6_mc_destroy_dev(struct inet6_dev *idev);
+int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed);
void addrconf_dad_failure(struct inet6_ifaddr *ifp);
bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index a175ba4..4a167b3 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -39,7 +39,6 @@ struct unix_skb_parms {
};
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
-#define UNIXSID(skb) (&UNIXCB((skb)).secid)
#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock)
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 0d87674..db639a4 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -74,7 +74,7 @@ void vsock_pending_work(struct work_struct *work);
struct sock *__vsock_create(struct net *net,
struct socket *sock,
struct sock *parent,
- gfp_t priority, unsigned short type);
+ gfp_t priority, unsigned short type, int kern);
/**** TRANSPORT ****/
@@ -100,8 +100,8 @@ struct vsock_transport {
/* DGRAM. */
int (*dgram_bind)(struct vsock_sock *, struct sockaddr_vm *);
- int (*dgram_dequeue)(struct kiocb *kiocb, struct vsock_sock *vsk,
- struct msghdr *msg, size_t len, int flags);
+ int (*dgram_dequeue)(struct vsock_sock *vsk, struct msghdr *msg,
+ size_t len, int flags);
int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *,
struct msghdr *, size_t len);
bool (*dgram_allow)(u32 cid, u32 port);
diff --git a/include/net/arp.h b/include/net/arp.h
index 73c4986..5e0f891 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -9,28 +9,17 @@
extern struct neigh_table arp_tbl;
-static inline u32 arp_hashfn(u32 key, const struct net_device *dev, u32 hash_rnd)
+static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32 *hash_rnd)
{
+ u32 key = *(const u32 *)pkey;
u32 val = key ^ hash32_ptr(dev);
- return val * hash_rnd;
+ return val * hash_rnd[0];
}
static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
{
- struct neigh_hash_table *nht = rcu_dereference_bh(arp_tbl.nht);
- struct neighbour *n;
- u32 hash_val;
-
- hash_val = arp_hashfn(key, dev, nht->hash_rnd[0]) >> (32 - nht->hash_shift);
- for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
- n != NULL;
- n = rcu_dereference_bh(n->next)) {
- if (n->dev == dev && *(u32 *)n->primary_key == key)
- return n;
- }
-
- return NULL;
+ return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
}
static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key)
@@ -47,7 +36,6 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32
}
void arp_init(void);
-int arp_find(unsigned char *haddr, struct sk_buff *skb);
int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg);
void arp_send(int type, int ptype, __be32 dest_ip,
struct net_device *dev, __be32 src_ip,
diff --git a/include/net/ax25.h b/include/net/ax25.h
index bf0396e..e602f81 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -12,6 +12,8 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/atomic.h>
+#include <net/neighbour.h>
+#include <net/sock.h>
#define AX25_T1CLAMPLO 1
#define AX25_T1CLAMPHI (30 * HZ)
@@ -245,7 +247,20 @@ typedef struct ax25_cb {
atomic_t refcount;
} ax25_cb;
-#define ax25_sk(__sk) ((ax25_cb *)(__sk)->sk_protinfo)
+struct ax25_sock {
+ struct sock sk;
+ struct ax25_cb *cb;
+};
+
+static inline struct ax25_sock *ax25_sk(const struct sock *sk)
+{
+ return (struct ax25_sock *) sk;
+}
+
+static inline struct ax25_cb *sk_to_ax25(const struct sock *sk)
+{
+ return ax25_sk(sk)->cb;
+}
#define ax25_for_each(__ax25, list) \
hlist_for_each_entry(__ax25, list, ax25_node)
@@ -366,9 +381,7 @@ int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *,
struct net_device *);
/* ax25_ip.c */
-int ax25_hard_header(struct sk_buff *, struct net_device *, unsigned short,
- const void *, const void *, unsigned int);
-int ax25_rebuild_header(struct sk_buff *);
+netdev_tx_t ax25_ip_xmit(struct sk_buff *skb);
extern const struct header_ops ax25_header_ops;
/* ax25_out.c */
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 58695ff..38d8a34 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -245,10 +245,10 @@ int bt_sock_register(int proto, const struct net_proto_family *ops);
void bt_sock_unregister(int proto);
void bt_sock_link(struct bt_sock_list *l, struct sock *s);
void bt_sock_unlink(struct bt_sock_list *l, struct sock *s);
-int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags);
-int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags);
+int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags);
+int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t len, int flags);
uint bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
@@ -269,29 +269,34 @@ struct l2cap_ctrl {
__u16 reqseq;
__u16 txseq;
__u8 retries;
+ __le16 psm;
+ bdaddr_t bdaddr;
+ struct l2cap_chan *chan;
};
struct hci_dev;
-typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status);
+typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
+typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status,
+ u16 opcode, struct sk_buff *skb);
-struct hci_req_ctrl {
- bool start;
- u8 event;
- hci_req_complete_t complete;
+struct req_ctrl {
+ bool start;
+ u8 event;
+ hci_req_complete_t complete;
+ hci_req_complete_skb_t complete_skb;
};
struct bt_skb_cb {
__u8 pkt_type;
- __u8 incoming;
+ __u8 force_active;
__u16 opcode;
__u16 expect;
- __u8 force_active;
- struct l2cap_chan *chan;
- struct l2cap_ctrl control;
- struct hci_req_ctrl req;
- bdaddr_t bdaddr;
- __le16 psm;
+ __u8 incoming:1;
+ union {
+ struct l2cap_ctrl l2cap;
+ struct req_ctrl req;
+ };
};
#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
@@ -339,6 +344,11 @@ out:
int bt_to_errno(__u16 code);
+void hci_sock_set_flag(struct sock *sk, int nr);
+void hci_sock_clear_flag(struct sock *sk, int nr);
+int hci_sock_test_flag(struct sock *sk, int nr);
+unsigned short hci_sock_get_channel(struct sock *sk);
+
int hci_sock_init(void);
void hci_sock_cleanup(void);
@@ -355,8 +365,22 @@ extern struct dentry *bt_debugfs;
int l2cap_init(void);
void l2cap_exit(void);
+#if IS_ENABLED(CONFIG_BT_BREDR)
int sco_init(void);
void sco_exit(void);
+#else
+static inline int sco_init(void)
+{
+ return 0;
+}
+
+static inline void sco_exit(void)
+{
+}
+#endif
+
+int mgmt_init(void);
+void mgmt_exit(void);
void bt_sock_reclassify_lock(struct sock *sk, int proto);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 40129b3..7ca6690 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -102,6 +102,28 @@ enum {
*/
HCI_QUIRK_FIXUP_BUFFER_SIZE,
+ /* When this quirk is set, then a controller that does not
+ * indicate support for Inquiry Result with RSSI is assumed to
+ * support it anyway. Some early Bluetooth 1.2 controllers had
+ * wrongly configured local features that will require forcing
+ * them to enable this mode. Getting RSSI information with the
+ * inquiry responses is preferred since it allows for a better
+ * user expierence.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_FIXUP_INQUIRY_MODE,
+
+ /* When this quirk is set, then the HCI Read Local Supported
+ * Commands command is not supported. In general Bluetooth 1.2
+ * and later controllers should support this command. However
+ * some controllers indicate Bluetooth 1.2 support, but do
+ * not support this command.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_BROKEN_LOCAL_COMMANDS,
+
/* When this quirk is set, then no stored link key handling
* is performed. This is mainly due to the fact that the
* HCI Delete Stored Link Key command is advertised, but
@@ -138,6 +160,14 @@ enum {
* during the hdev->setup vendor callback.
*/
HCI_QUIRK_STRICT_DUPLICATE_FILTER,
+
+ /* When this quirk is set, LE scan and BR/EDR inquiry is done
+ * simultaneously, otherwise it's interleaved.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
};
/* HCI device flags */
@@ -157,14 +187,14 @@ enum {
HCI_RESET,
};
-/* BR/EDR and/or LE controller flags: the flags defined here should represent
- * states configured via debugfs for debugging and testing purposes only.
- */
+/* HCI socket flags */
enum {
- HCI_DUT_MODE,
- HCI_FORCE_SC,
- HCI_FORCE_LESC,
- HCI_FORCE_STATIC_ADDR,
+ HCI_SOCK_TRUSTED,
+ HCI_MGMT_INDEX_EVENTS,
+ HCI_MGMT_UNCONF_INDEX_EVENTS,
+ HCI_MGMT_EXT_INDEX_EVENTS,
+ HCI_MGMT_GENERIC_EVENTS,
+ HCI_MGMT_OOB_DATA_EVENTS,
};
/*
@@ -196,6 +226,8 @@ enum {
HCI_HS_ENABLED,
HCI_LE_ENABLED,
HCI_ADVERTISING,
+ HCI_ADVERTISING_CONNECTABLE,
+ HCI_ADVERTISING_INSTANCE,
HCI_CONNECTABLE,
HCI_DISCOVERABLE,
HCI_LIMITED_DISCOVERABLE,
@@ -204,13 +236,13 @@ enum {
HCI_FAST_CONNECTABLE,
HCI_BREDR_ENABLED,
HCI_LE_SCAN_INTERRUPTED,
-};
-/* A mask for the flags that are supposed to remain when a reset happens
- * or the HCI device is closed.
- */
-#define HCI_PERSISTENT_MASK (BIT(HCI_LE_SCAN) | BIT(HCI_PERIODIC_INQ) | \
- BIT(HCI_FAST_CONNECTABLE) | BIT(HCI_LE_ADV))
+ HCI_DUT_MODE,
+ HCI_FORCE_BREDR_SMP,
+ HCI_FORCE_STATIC_ADDR,
+
+ __HCI_NUM_FLAGS,
+};
/* HCI timeouts */
#define HCI_DISCONN_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
@@ -342,7 +374,9 @@ enum {
/* LE features */
#define HCI_LE_ENCRYPTION 0x01
#define HCI_LE_CONN_PARAM_REQ_PROC 0x02
+#define HCI_LE_SLAVE_FEATURES 0x08
#define HCI_LE_PING 0x10
+#define HCI_LE_DATA_LEN_EXT 0x20
#define HCI_LE_EXT_SCAN_POLICY 0x80
/* Connection modes */
@@ -430,9 +464,16 @@ enum {
#define EIR_NAME_COMPLETE 0x09 /* complete local name */
#define EIR_TX_POWER 0x0A /* transmit power level */
#define EIR_CLASS_OF_DEV 0x0D /* Class of Device */
-#define EIR_SSP_HASH_C 0x0E /* Simple Pairing Hash C */
-#define EIR_SSP_RAND_R 0x0F /* Simple Pairing Randomizer R */
+#define EIR_SSP_HASH_C192 0x0E /* Simple Pairing Hash C-192 */
+#define EIR_SSP_RAND_R192 0x0F /* Simple Pairing Randomizer R-192 */
#define EIR_DEVICE_ID 0x10 /* device ID */
+#define EIR_APPEARANCE 0x19 /* Device appearance */
+#define EIR_LE_BDADDR 0x1B /* LE Bluetooth device address */
+#define EIR_LE_ROLE 0x1C /* LE role */
+#define EIR_SSP_HASH_C256 0x1D /* Simple Pairing Hash C-256 */
+#define EIR_SSP_RAND_R256 0x1E /* Simple Pairing Rand R-256 */
+#define EIR_LE_SC_CONFIRM 0x22 /* LE SC Confirmation Value */
+#define EIR_LE_SC_RANDOM 0x23 /* LE SC Random Value */
/* Low Energy Advertising Flags */
#define LE_AD_LIMITED 0x01 /* Limited Discoverable */
@@ -833,11 +874,26 @@ struct hci_cp_set_event_flt {
#define HCI_CONN_SETUP_AUTO_OFF 0x01
#define HCI_CONN_SETUP_AUTO_ON 0x02
+#define HCI_OP_READ_STORED_LINK_KEY 0x0c0d
+struct hci_cp_read_stored_link_key {
+ bdaddr_t bdaddr;
+ __u8 read_all;
+} __packed;
+struct hci_rp_read_stored_link_key {
+ __u8 status;
+ __u8 max_keys;
+ __u8 num_keys;
+} __packed;
+
#define HCI_OP_DELETE_STORED_LINK_KEY 0x0c12
struct hci_cp_delete_stored_link_key {
bdaddr_t bdaddr;
__u8 delete_all;
} __packed;
+struct hci_rp_delete_stored_link_key {
+ __u8 status;
+ __u8 num_keys;
+} __packed;
#define HCI_MAX_NAME_LENGTH 248
@@ -1146,6 +1202,16 @@ struct hci_rp_read_clock {
__le16 accuracy;
} __packed;
+#define HCI_OP_READ_ENC_KEY_SIZE 0x1408
+struct hci_cp_read_enc_key_size {
+ __le16 handle;
+} __packed;
+struct hci_rp_read_enc_key_size {
+ __u8 status;
+ __le16 handle;
+ __u8 key_size;
+} __packed;
+
#define HCI_OP_READ_LOCAL_AMP_INFO 0x1409
struct hci_rp_read_local_amp_info {
__u8 status;
@@ -1321,6 +1387,11 @@ struct hci_cp_le_conn_update {
__le16 max_ce_len;
} __packed;
+#define HCI_OP_LE_READ_REMOTE_FEATURES 0x2016
+struct hci_cp_le_read_remote_features {
+ __le16 handle;
+} __packed;
+
#define HCI_OP_LE_START_ENC 0x2019
struct hci_cp_le_start_enc {
__le16 handle;
@@ -1371,6 +1442,39 @@ struct hci_cp_le_conn_param_req_neg_reply {
__u8 reason;
} __packed;
+#define HCI_OP_LE_SET_DATA_LEN 0x2022
+struct hci_cp_le_set_data_len {
+ __le16 handle;
+ __le16 tx_len;
+ __le16 tx_time;
+} __packed;
+struct hci_rp_le_set_data_len {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_READ_DEF_DATA_LEN 0x2023
+struct hci_rp_le_read_def_data_len {
+ __u8 status;
+ __le16 tx_len;
+ __le16 tx_time;
+} __packed;
+
+#define HCI_OP_LE_WRITE_DEF_DATA_LEN 0x2024
+struct hci_cp_le_write_def_data_len {
+ __le16 tx_len;
+ __le16 tx_time;
+} __packed;
+
+#define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f
+struct hci_rp_le_read_max_data_len {
+ __u8 status;
+ __le16 tx_len;
+ __le16 tx_time;
+ __le16 rx_len;
+ __le16 rx_time;
+} __packed;
+
/* ---- HCI Events ---- */
#define HCI_EV_INQUIRY_COMPLETE 0x01
@@ -1780,6 +1884,13 @@ struct hci_ev_le_conn_update_complete {
__le16 supervision_timeout;
} __packed;
+#define HCI_EV_LE_REMOTE_FEAT_COMPLETE 0x04
+struct hci_ev_le_remote_feat_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 features[8];
+} __packed;
+
#define HCI_EV_LE_LTK_REQ 0x05
struct hci_ev_le_ltk_req {
__le16 handle;
@@ -1796,6 +1907,15 @@ struct hci_ev_le_remote_conn_param_req {
__le16 timeout;
} __packed;
+#define HCI_EV_LE_DATA_LEN_CHANGE 0x07
+struct hci_ev_le_data_len_change {
+ __le16 handle;
+ __le16 tx_len;
+ __le16 tx_time;
+ __le16 rx_len;
+ __le16 rx_time;
+} __packed;
+
#define HCI_EV_LE_DIRECT_ADV_REPORT 0x0B
struct hci_ev_le_direct_adv_info {
__u8 evt_type;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 3c78270..3bd618d 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -76,9 +76,12 @@ struct discovery_state {
u8 last_adv_data[HCI_MAX_AD_LENGTH];
u8 last_adv_data_len;
bool report_invalid_rssi;
+ bool result_filtering;
s8 rssi;
u16 uuid_count;
u8 (*uuids)[16];
+ unsigned long scan_start;
+ unsigned long scan_duration;
};
struct hci_conn_hash {
@@ -106,7 +109,7 @@ struct bt_uuid {
struct smp_csrk {
bdaddr_t bdaddr;
u8 bdaddr_type;
- u8 master;
+ u8 type;
u8 val[16];
};
@@ -145,12 +148,30 @@ struct oob_data {
struct list_head list;
bdaddr_t bdaddr;
u8 bdaddr_type;
+ u8 present;
u8 hash192[16];
u8 rand192[16];
u8 hash256[16];
u8 rand256[16];
};
+struct adv_info {
+ struct list_head list;
+ bool pending;
+ __u8 instance;
+ __u32 flags;
+ __u16 timeout;
+ __u16 remaining_time;
+ __u16 duration;
+ __u16 adv_data_len;
+ __u8 adv_data[HCI_MAX_AD_LENGTH];
+ __u16 scan_rsp_len;
+ __u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
+};
+
+#define HCI_MAX_ADV_INSTANCES 5
+#define HCI_DEFAULT_ADV_DURATION 2
+
#define HCI_MAX_SHORT_NAME_LENGTH 10
/* Default LE RPA expiry time, 15 minutes */
@@ -170,7 +191,6 @@ struct amp_assoc {
#define HCI_MAX_PAGES 3
-#define NUM_REASSEMBLY 4
struct hci_dev {
struct list_head list;
struct mutex lock;
@@ -205,6 +225,8 @@ struct hci_dev {
__u16 lmp_subver;
__u16 voice_setting;
__u8 num_iac;
+ __u8 stored_max_keys;
+ __u8 stored_num_keys;
__u8 io_capability;
__s8 inq_tx_power;
__u16 page_scan_interval;
@@ -220,10 +242,17 @@ struct hci_dev {
__u16 le_conn_max_interval;
__u16 le_conn_latency;
__u16 le_supv_timeout;
+ __u16 le_def_tx_len;
+ __u16 le_def_tx_time;
+ __u16 le_max_tx_len;
+ __u16 le_max_tx_time;
+ __u16 le_max_rx_len;
+ __u16 le_max_rx_time;
__u16 discov_interleaved_timeout;
__u16 conn_info_min_age;
__u16 conn_info_max_age;
__u8 ssp_debug_mode;
+ __u8 hw_error_code;
__u32 clock;
__u16 devid_source;
@@ -285,6 +314,7 @@ struct hci_dev {
struct work_struct power_on;
struct delayed_work power_off;
+ struct work_struct error_reset;
__u16 discov_timeout;
struct delayed_work discov_off;
@@ -301,14 +331,13 @@ struct hci_dev {
struct sk_buff_head raw_q;
struct sk_buff_head cmd_q;
- struct sk_buff *recv_evt;
struct sk_buff *sent_cmd;
- struct sk_buff *reassembly[NUM_REASSEMBLY];
struct mutex req_lock;
wait_queue_head_t req_wait_q;
__u32 req_status;
__u32 req_result;
+ struct sk_buff *req_skb;
void *smp_data;
void *smp_bredr_data;
@@ -339,10 +368,10 @@ struct hci_dev {
struct rfkill *rfkill;
- unsigned long dbg_flags;
- unsigned long dev_flags;
+ DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS);
struct delayed_work le_scan_disable;
+ struct delayed_work le_scan_restart;
__s8 adv_tx_power;
__u8 adv_data[HCI_MAX_AD_LENGTH];
@@ -350,6 +379,12 @@ struct hci_dev {
__u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
__u8 scan_rsp_data_len;
+ struct list_head adv_instances;
+ unsigned int adv_instance_cnt;
+ __u8 cur_adv_instance;
+ __u16 adv_instance_timeout;
+ struct delayed_work adv_instance_expire;
+
__u8 irk[16];
__u32 rpa_timeout;
struct delayed_work rpa_expired;
@@ -359,8 +394,10 @@ struct hci_dev {
int (*close)(struct hci_dev *hdev);
int (*flush)(struct hci_dev *hdev);
int (*setup)(struct hci_dev *hdev);
+ int (*shutdown)(struct hci_dev *hdev);
int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
void (*notify)(struct hci_dev *hdev, unsigned int evt);
+ void (*hw_error)(struct hci_dev *hdev, u8 code);
int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
};
@@ -434,6 +471,7 @@ struct hci_conn {
struct delayed_work le_conn_timeout;
struct device dev;
+ struct dentry *debugfs;
struct hci_dev *hdev;
void *l2cap_data;
@@ -482,20 +520,42 @@ struct hci_conn_params {
extern struct list_head hci_dev_list;
extern struct list_head hci_cb_list;
extern rwlock_t hci_dev_list_lock;
-extern rwlock_t hci_cb_list_lock;
+extern struct mutex hci_cb_list_lock;
+
+#define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags)
+#define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags)
+#define hci_dev_change_flag(hdev, nr) change_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_flag(hdev, nr) test_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_and_set_flag(hdev, nr) test_and_set_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_and_clear_flag(hdev, nr) test_and_clear_bit((nr), (hdev)->dev_flags)
+#define hci_dev_test_and_change_flag(hdev, nr) test_and_change_bit((nr), (hdev)->dev_flags)
+
+#define hci_dev_clear_volatile_flags(hdev) \
+ do { \
+ hci_dev_clear_flag(hdev, HCI_LE_SCAN); \
+ hci_dev_clear_flag(hdev, HCI_LE_ADV); \
+ hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \
+ } while (0)
/* ----- HCI interface to upper protocols ----- */
int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
-void l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
int l2cap_disconn_ind(struct hci_conn *hcon);
-void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
-int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
-int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
+void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
+#if IS_ENABLED(CONFIG_BT_BREDR)
int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
-void sco_connect_cfm(struct hci_conn *hcon, __u8 status);
-void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
-int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
+void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
+#else
+static inline int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ __u8 *flags)
+{
+ return 0;
+}
+
+static inline void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
+{
+}
+#endif
/* ----- Inquiry cache ----- */
#define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */
@@ -513,11 +573,14 @@ static inline void discovery_init(struct hci_dev *hdev)
static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
{
+ hdev->discovery.result_filtering = false;
hdev->discovery.report_invalid_rssi = true;
hdev->discovery.rssi = HCI_RSSI_INVALID;
hdev->discovery.uuid_count = 0;
kfree(hdev->discovery.uuids);
hdev->discovery.uuids = NULL;
+ hdev->discovery.scan_start = 0;
+ hdev->discovery.scan_duration = 0;
}
bool hci_discovery_active(struct hci_dev *hdev);
@@ -566,7 +629,6 @@ enum {
HCI_CONN_SC_ENABLED,
HCI_CONN_AES_CCM,
HCI_CONN_POWER_SAVE,
- HCI_CONN_REMOTE_OOB,
HCI_CONN_FLUSH_KEY,
HCI_CONN_ENCRYPT,
HCI_CONN_AUTH,
@@ -582,14 +644,14 @@ enum {
static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- return test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
+ return hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
test_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
}
static inline bool hci_conn_sc_enabled(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- return test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
+ return hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
test_bit(HCI_CONN_SC_ENABLED, &conn->flags);
}
@@ -772,7 +834,6 @@ int hci_conn_check_link_mode(struct hci_conn *conn);
int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
bool initiator);
-int hci_conn_change_link_key(struct hci_conn *conn);
int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
@@ -920,8 +981,6 @@ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
bdaddr_t *addr, u8 addr_type);
struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
bdaddr_t *addr, u8 addr_type);
-int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
- u8 auto_connect);
void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
void hci_conn_params_clear_all(struct hci_dev *hdev);
void hci_conn_params_clear_disabled(struct hci_dev *hdev);
@@ -930,8 +989,6 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
bdaddr_t *addr,
u8 addr_type);
-void hci_update_background_scan(struct hci_dev *hdev);
-
void hci_uuids_clear(struct hci_dev *hdev);
void hci_link_keys_clear(struct hci_dev *hdev);
@@ -956,6 +1013,8 @@ struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type);
void hci_smp_irks_clear(struct hci_dev *hdev);
+bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
+
void hci_remote_oob_data_clear(struct hci_dev *hdev);
struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
bdaddr_t *bdaddr, u8 bdaddr_type);
@@ -965,10 +1024,18 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 bdaddr_type);
+void hci_adv_instances_clear(struct hci_dev *hdev);
+struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance);
+struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance);
+int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
+ u16 adv_data_len, u8 *adv_data,
+ u16 scan_rsp_len, u8 *scan_rsp_data,
+ u16 timeout, u16 duration);
+int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
+
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
-int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
void hci_init_sysfs(struct hci_dev *hdev);
void hci_conn_init_sysfs(struct hci_conn *conn);
@@ -1012,11 +1079,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE))
#define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
-#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
- !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
-#define bredr_sc_enabled(dev) ((lmp_sc_capable(dev) || \
- test_bit(HCI_FORCE_SC, &(dev)->dbg_flags)) && \
- test_bit(HCI_SC_ENABLED, &(dev)->dev_flags))
+#define hdev_is_powered(dev) (test_bit(HCI_UP, &(dev)->flags) && \
+ !hci_dev_test_flag(dev, HCI_AUTO_OFF))
+#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
+ hci_dev_test_flag(dev, HCI_SC_ENABLED))
/* ----- HCI protocols ----- */
#define HCI_PROTO_DEFER 0x01
@@ -1038,28 +1104,6 @@ static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
}
}
-static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
-{
- switch (conn->type) {
- case ACL_LINK:
- case LE_LINK:
- l2cap_connect_cfm(conn, status);
- break;
-
- case SCO_LINK:
- case ESCO_LINK:
- sco_connect_cfm(conn, status);
- break;
-
- default:
- BT_ERR("unknown link type %d", conn->type);
- break;
- }
-
- if (conn->connect_cfm_cb)
- conn->connect_cfm_cb(conn, status);
-}
-
static inline int hci_proto_disconn_ind(struct hci_conn *conn)
{
if (conn->type != ACL_LINK && conn->type != LE_LINK)
@@ -1068,91 +1112,69 @@ static inline int hci_proto_disconn_ind(struct hci_conn *conn)
return l2cap_disconn_ind(conn);
}
-static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
-{
- switch (conn->type) {
- case ACL_LINK:
- case LE_LINK:
- l2cap_disconn_cfm(conn, reason);
- break;
-
- case SCO_LINK:
- case ESCO_LINK:
- sco_disconn_cfm(conn, reason);
- break;
-
- /* L2CAP would be handled for BREDR chan */
- case AMP_LINK:
- break;
+/* ----- HCI callbacks ----- */
+struct hci_cb {
+ struct list_head list;
- default:
- BT_ERR("unknown link type %d", conn->type);
- break;
- }
+ char *name;
- if (conn->disconn_cfm_cb)
- conn->disconn_cfm_cb(conn, reason);
-}
+ void (*connect_cfm) (struct hci_conn *conn, __u8 status);
+ void (*disconn_cfm) (struct hci_conn *conn, __u8 status);
+ void (*security_cfm) (struct hci_conn *conn, __u8 status,
+ __u8 encrypt);
+ void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
+ void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
+};
-static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
+static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status)
{
- __u8 encrypt;
-
- if (conn->type != ACL_LINK && conn->type != LE_LINK)
- return;
-
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
- return;
+ struct hci_cb *cb;
- encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
- l2cap_security_cfm(conn, status, encrypt);
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
+ if (cb->connect_cfm)
+ cb->connect_cfm(conn, status);
+ }
+ mutex_unlock(&hci_cb_list_lock);
- if (conn->security_cfm_cb)
- conn->security_cfm_cb(conn, status);
+ if (conn->connect_cfm_cb)
+ conn->connect_cfm_cb(conn, status);
}
-static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status,
- __u8 encrypt)
+static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason)
{
- if (conn->type != ACL_LINK && conn->type != LE_LINK)
- return;
+ struct hci_cb *cb;
- l2cap_security_cfm(conn, status, encrypt);
+ mutex_lock(&hci_cb_list_lock);
+ list_for_each_entry(cb, &hci_cb_list, list) {
+ if (cb->disconn_cfm)
+ cb->disconn_cfm(conn, reason);
+ }
+ mutex_unlock(&hci_cb_list_lock);
- if (conn->security_cfm_cb)
- conn->security_cfm_cb(conn, status);
+ if (conn->disconn_cfm_cb)
+ conn->disconn_cfm_cb(conn, reason);
}
-/* ----- HCI callbacks ----- */
-struct hci_cb {
- struct list_head list;
-
- char *name;
-
- void (*security_cfm) (struct hci_conn *conn, __u8 status,
- __u8 encrypt);
- void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
- void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
-};
-
static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
{
struct hci_cb *cb;
__u8 encrypt;
- hci_proto_auth_cfm(conn, status);
-
if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
return;
encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
- read_lock(&hci_cb_list_lock);
+ mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->security_cfm)
cb->security_cfm(conn, status, encrypt);
}
- read_unlock(&hci_cb_list_lock);
+ mutex_unlock(&hci_cb_list_lock);
+
+ if (conn->security_cfm_cb)
+ conn->security_cfm_cb(conn, status);
}
static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
@@ -1166,26 +1188,27 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
if (conn->pending_sec_level > conn->sec_level)
conn->sec_level = conn->pending_sec_level;
- hci_proto_encrypt_cfm(conn, status, encrypt);
-
- read_lock(&hci_cb_list_lock);
+ mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->security_cfm)
cb->security_cfm(conn, status, encrypt);
}
- read_unlock(&hci_cb_list_lock);
+ mutex_unlock(&hci_cb_list_lock);
+
+ if (conn->security_cfm_cb)
+ conn->security_cfm_cb(conn, status);
}
static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
{
struct hci_cb *cb;
- read_lock(&hci_cb_list_lock);
+ mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->key_change_cfm)
cb->key_change_cfm(conn, status);
}
- read_unlock(&hci_cb_list_lock);
+ mutex_unlock(&hci_cb_list_lock);
}
static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
@@ -1193,12 +1216,12 @@ static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
{
struct hci_cb *cb;
- read_lock(&hci_cb_list_lock);
+ mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->role_switch_cfm)
cb->role_switch_cfm(conn, status, role);
}
- read_unlock(&hci_cb_list_lock);
+ mutex_unlock(&hci_cb_list_lock);
}
static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type)
@@ -1284,30 +1307,6 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
int hci_register_cb(struct hci_cb *hcb);
int hci_unregister_cb(struct hci_cb *hcb);
-struct hci_request {
- struct hci_dev *hdev;
- struct sk_buff_head cmd_q;
-
- /* If something goes wrong when building the HCI request, the error
- * value is stored in this field.
- */
- int err;
-};
-
-void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
-int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
-void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
- const void *param);
-void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
- const void *param, u8 event);
-void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status);
-bool hci_req_pending(struct hci_dev *hdev);
-
-void hci_req_add_le_scan_disable(struct hci_request *req);
-void hci_req_add_le_passive_scan(struct hci_request *req);
-
-void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req);
-
struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u32 timeout);
struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
@@ -1322,11 +1321,35 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
/* ----- HCI Sockets ----- */
void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
-void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk);
+void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
+ int flag, struct sock *skip_sk);
void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
void hci_sock_dev_event(struct hci_dev *hdev, int event);
+#define HCI_MGMT_VAR_LEN BIT(0)
+#define HCI_MGMT_NO_HDEV BIT(1)
+#define HCI_MGMT_UNTRUSTED BIT(2)
+#define HCI_MGMT_UNCONFIGURED BIT(3)
+
+struct hci_mgmt_handler {
+ int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 data_len);
+ size_t data_len;
+ unsigned long flags;
+};
+
+struct hci_mgmt_chan {
+ struct list_head list;
+ unsigned short channel;
+ size_t handler_count;
+ const struct hci_mgmt_handler *handlers;
+ void (*hdev_init) (struct sock *sk, struct hci_dev *hdev);
+};
+
+int hci_mgmt_chan_register(struct hci_mgmt_chan *c);
+void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);
+
/* Management interface */
#define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR))
#define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \
@@ -1344,8 +1367,8 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event);
#define DISCOV_INTERLEAVED_TIMEOUT 5120 /* msec */
#define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04
#define DISCOV_BREDR_INQUIRY_LEN 0x08
+#define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */
-int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
int mgmt_new_settings(struct hci_dev *hdev);
void mgmt_index_added(struct hci_dev *hdev);
void mgmt_index_removed(struct hci_dev *hdev);
@@ -1353,6 +1376,7 @@ void mgmt_set_powered_failed(struct hci_dev *hdev, int err);
int mgmt_powered(struct hci_dev *hdev, u8 powered);
int mgmt_update_adv_data(struct hci_dev *hdev);
void mgmt_discoverable_timeout(struct hci_dev *hdev);
+void mgmt_adv_timeout_expired(struct hci_dev *hdev);
void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
bool persistent);
void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
@@ -1388,13 +1412,9 @@ int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
void mgmt_auth_failed(struct hci_conn *conn, u8 status);
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
-void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
u8 status);
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
-void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
- u8 *rand192, u8 *hash256, u8 *rand256,
- u8 status);
void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len);
@@ -1415,10 +1435,8 @@ void mgmt_smp_complete(struct hci_conn *conn, bool complete);
u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
u16 to_multiplier);
void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
- __u8 ltk[16]);
+ __u8 ltk[16], __u8 key_size);
-int hci_update_random_address(struct hci_request *req, bool require_privacy,
- u8 *own_addr_type);
void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 *bdaddr_type);
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index d1bb342..2239a37 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -248,6 +248,7 @@ struct l2cap_conn_rsp {
#define L2CAP_PSM_SDP 0x0001
#define L2CAP_PSM_RFCOMM 0x0003
#define L2CAP_PSM_3DSP 0x0021
+#define L2CAP_PSM_IPSP 0x0023 /* 6LoWPAN */
/* channel identifier */
#define L2CAP_CID_SIGNALING 0x0001
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 95c34d5..b831242 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -43,6 +43,8 @@
#define MGMT_STATUS_CANCELLED 0x10
#define MGMT_STATUS_INVALID_INDEX 0x11
#define MGMT_STATUS_RFKILLED 0x12
+#define MGMT_STATUS_ALREADY_PAIRED 0x13
+#define MGMT_STATUS_PERMISSION_DENIED 0x14
struct mgmt_hdr {
__le16 opcode;
@@ -98,6 +100,7 @@ struct mgmt_rp_read_index_list {
#define MGMT_SETTING_DEBUG_KEYS 0x00001000
#define MGMT_SETTING_PRIVACY 0x00002000
#define MGMT_SETTING_CONFIGURATION 0x00004000
+#define MGMT_SETTING_STATIC_ADDRESS 0x00008000
#define MGMT_OP_READ_INFO 0x0004
#define MGMT_READ_INFO_SIZE 0
@@ -301,10 +304,6 @@ struct mgmt_cp_user_passkey_neg_reply {
#define MGMT_OP_READ_LOCAL_OOB_DATA 0x0020
#define MGMT_READ_LOCAL_OOB_DATA_SIZE 0
struct mgmt_rp_read_local_oob_data {
- __u8 hash[16];
- __u8 rand[16];
-} __packed;
-struct mgmt_rp_read_local_oob_ext_data {
__u8 hash192[16];
__u8 rand192[16];
__u8 hash256[16];
@@ -507,6 +506,71 @@ struct mgmt_cp_start_service_discovery {
} __packed;
#define MGMT_START_SERVICE_DISCOVERY_SIZE 4
+#define MGMT_OP_READ_LOCAL_OOB_EXT_DATA 0x003B
+struct mgmt_cp_read_local_oob_ext_data {
+ __u8 type;
+} __packed;
+#define MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE 1
+struct mgmt_rp_read_local_oob_ext_data {
+ __u8 type;
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
+
+#define MGMT_OP_READ_EXT_INDEX_LIST 0x003C
+#define MGMT_READ_EXT_INDEX_LIST_SIZE 0
+struct mgmt_rp_read_ext_index_list {
+ __le16 num_controllers;
+ struct {
+ __le16 index;
+ __u8 type;
+ __u8 bus;
+ } entry[0];
+} __packed;
+
+#define MGMT_OP_READ_ADV_FEATURES 0x0003D
+#define MGMT_READ_ADV_FEATURES_SIZE 0
+struct mgmt_rp_read_adv_features {
+ __le32 supported_flags;
+ __u8 max_adv_data_len;
+ __u8 max_scan_rsp_len;
+ __u8 max_instances;
+ __u8 num_instances;
+ __u8 instance[0];
+} __packed;
+
+#define MGMT_OP_ADD_ADVERTISING 0x003E
+struct mgmt_cp_add_advertising {
+ __u8 instance;
+ __le32 flags;
+ __le16 duration;
+ __le16 timeout;
+ __u8 adv_data_len;
+ __u8 scan_rsp_len;
+ __u8 data[0];
+} __packed;
+#define MGMT_ADD_ADVERTISING_SIZE 11
+struct mgmt_rp_add_advertising {
+ __u8 instance;
+} __packed;
+
+#define MGMT_ADV_FLAG_CONNECTABLE BIT(0)
+#define MGMT_ADV_FLAG_DISCOV BIT(1)
+#define MGMT_ADV_FLAG_LIMITED_DISCOV BIT(2)
+#define MGMT_ADV_FLAG_MANAGED_FLAGS BIT(3)
+#define MGMT_ADV_FLAG_TX_POWER BIT(4)
+#define MGMT_ADV_FLAG_APPEARANCE BIT(5)
+#define MGMT_ADV_FLAG_LOCAL_NAME BIT(6)
+
+#define MGMT_OP_REMOVE_ADVERTISING 0x003F
+struct mgmt_cp_remove_advertising {
+ __u8 instance;
+} __packed;
+#define MGMT_REMOVE_ADVERTISING_SIZE 1
+struct mgmt_rp_remove_advertising {
+ __u8 instance;
+} __packed;
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
@@ -651,9 +715,14 @@ struct mgmt_ev_new_irk {
struct mgmt_irk_info irk;
} __packed;
+#define MGMT_CSRK_LOCAL_UNAUTHENTICATED 0x00
+#define MGMT_CSRK_REMOTE_UNAUTHENTICATED 0x01
+#define MGMT_CSRK_LOCAL_AUTHENTICATED 0x02
+#define MGMT_CSRK_REMOTE_AUTHENTICATED 0x03
+
struct mgmt_csrk_info {
struct mgmt_addr_info addr;
- __u8 master;
+ __u8 type;
__u8 val[16];
} __packed;
@@ -689,3 +758,29 @@ struct mgmt_ev_new_conn_param {
#define MGMT_EV_UNCONF_INDEX_REMOVED 0x001e
#define MGMT_EV_NEW_CONFIG_OPTIONS 0x001f
+
+struct mgmt_ev_ext_index {
+ __u8 type;
+ __u8 bus;
+} __packed;
+
+#define MGMT_EV_EXT_INDEX_ADDED 0x0020
+
+#define MGMT_EV_EXT_INDEX_REMOVED 0x0021
+
+#define MGMT_EV_LOCAL_OOB_DATA_UPDATED 0x0022
+struct mgmt_ev_local_oob_data_updated {
+ __u8 type;
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
+
+#define MGMT_EV_ADVERTISING_ADDED 0x0023
+struct mgmt_ev_advertising_added {
+ __u8 instance;
+} __packed;
+
+#define MGMT_EV_ADVERTISING_REMOVED 0x0024
+struct mgmt_ev_advertising_removed {
+ __u8 instance;
+} __packed;
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
index 578b831..4190af5 100644
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -24,8 +24,6 @@
#ifndef __RFCOMM_H
#define __RFCOMM_H
-#define RFCOMM_PSM 3
-
#define RFCOMM_CONN_TIMEOUT (HZ * 30)
#define RFCOMM_DISC_TIMEOUT (HZ * 20)
#define RFCOMM_AUTH_TIMEOUT (HZ * 25)
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index e01d903..c2a40a17 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -82,6 +82,13 @@ typedef enum {
AD_TRANSMIT /* tx Machine */
} tx_states_t;
+/* churn machine states(43.4.17 in the 802.3ad standard) */
+typedef enum {
+ AD_CHURN_MONITOR, /* monitoring for churn */
+ AD_CHURN, /* churn detected (error) */
+ AD_NO_CHURN /* no churn (no error) */
+} churn_state_t;
+
/* rx indication types */
typedef enum {
AD_TYPE_LACPDU = 1, /* type lacpdu */
@@ -229,6 +236,12 @@ typedef struct port {
u16 sm_mux_timer_counter; /* state machine mux timer counter */
tx_states_t sm_tx_state; /* state machine tx state */
u16 sm_tx_timer_counter; /* state machine tx timer counter(allways on - enter to transmit state 3 time per second) */
+ u16 sm_churn_actor_timer_counter;
+ u16 sm_churn_partner_timer_counter;
+ u32 churn_actor_count;
+ u32 churn_partner_count;
+ churn_state_t sm_churn_actor_state;
+ churn_state_t sm_churn_partner_state;
struct slave *slave; /* pointer to the bond slave that this port belongs to */
struct aggregator *aggregator; /* pointer to an aggregator that this port related to */
struct port *next_port_in_aggregator; /* Next port on the linked list of the parent aggregator */
@@ -262,6 +275,22 @@ struct ad_slave_info {
u16 id;
};
+static inline const char *bond_3ad_churn_desc(churn_state_t state)
+{
+ static const char *const churn_description[] = {
+ "monitoring",
+ "churned",
+ "none",
+ "unknown"
+ };
+ int max_size = sizeof(churn_description) / sizeof(churn_description[0]);
+
+ if (state >= max_size)
+ state = max_size - 1;
+
+ return churn_description[state];
+}
+
/* ========== AD Exported functions to the main bonding code ========== */
void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution);
void bond_3ad_bind_slave(struct slave *slave);
@@ -274,7 +303,6 @@ void bond_3ad_handle_link_change(struct slave *slave, char link);
int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
int __bond_3ad_get_active_agg_info(struct bonding *bond,
struct ad_info *ad_info);
-int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave);
int bond_3ad_set_carrier(struct bonding *bond);
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
index ea6546d..c28aca2 100644
--- a/include/net/bond_options.h
+++ b/include/net/bond_options.h
@@ -63,6 +63,9 @@ enum {
BOND_OPT_LP_INTERVAL,
BOND_OPT_SLAVES,
BOND_OPT_TLB_DYNAMIC_LB,
+ BOND_OPT_AD_ACTOR_SYS_PRIO,
+ BOND_OPT_AD_ACTOR_SYSTEM,
+ BOND_OPT_AD_USER_PORT_KEY,
BOND_OPT_LAST
};
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 983a94b..20defc0 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -30,13 +30,6 @@
#include <net/bond_alb.h>
#include <net/bond_options.h>
-#define DRV_VERSION "3.7.1"
-#define DRV_RELDATE "April 27, 2011"
-#define DRV_NAME "bonding"
-#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
-
-#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
-
#define BOND_MAX_ARP_TARGETS 16
#define BOND_DEFAULT_MIIMON 100
@@ -143,6 +136,9 @@ struct bond_params {
int packets_per_slave;
int tlb_dynamic_lb;
struct reciprocal_value reciprocal_packets_per_slave;
+ u16 ad_actor_sys_prio;
+ u16 ad_user_port_key;
+ u8 ad_actor_system[ETH_ALEN];
};
struct bond_parm_tbl {
@@ -150,6 +146,12 @@ struct bond_parm_tbl {
int mode;
};
+struct netdev_notify_work {
+ struct delayed_work work;
+ struct net_device *dev;
+ struct netdev_bonding_info bonding_info;
+};
+
struct slave {
struct net_device *dev; /* first - useful for panic debug */
struct bonding *bond; /* our master */
@@ -243,6 +245,8 @@ struct bonding {
#define bond_slave_get_rtnl(dev) \
((struct slave *) rtnl_dereference(dev->rx_handler_data))
+void bond_queue_slave_event(struct slave *slave);
+
struct bond_vlan_tag {
__be16 vlan_proto;
unsigned short vlan_id;
@@ -315,6 +319,7 @@ static inline void bond_set_active_slave(struct slave *slave)
{
if (slave->backup) {
slave->backup = 0;
+ bond_queue_slave_event(slave);
rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
}
}
@@ -323,6 +328,7 @@ static inline void bond_set_backup_slave(struct slave *slave)
{
if (!slave->backup) {
slave->backup = 1;
+ bond_queue_slave_event(slave);
rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
}
}
@@ -336,6 +342,7 @@ static inline void bond_set_slave_state(struct slave *slave,
slave->backup = slave_state;
if (notify) {
rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
+ bond_queue_slave_event(slave);
slave->should_notify = 0;
} else {
if (slave->should_notify)
@@ -490,6 +497,12 @@ static inline bool bond_is_slave_inactive(struct slave *slave)
return slave->inactive;
}
+static inline void bond_set_slave_link_state(struct slave *slave, int state)
+{
+ slave->link = state;
+ bond_queue_slave_event(slave);
+}
+
static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be32 local)
{
struct in_device *in_dev;
@@ -525,6 +538,7 @@ void bond_sysfs_slave_del(struct slave *slave);
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb);
+int bond_set_carrier(struct bonding *bond);
void bond_select_active_slave(struct bonding *bond);
void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
void bond_create_debugfs(void);
diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h
index 1c1ad46..fe328c5 100644
--- a/include/net/caif/cfpkt.h
+++ b/include/net/caif/cfpkt.h
@@ -171,7 +171,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos);
* @return Checksum of buffer.
*/
-u16 cfpkt_iterate(struct cfpkt *pkt,
+int cfpkt_iterate(struct cfpkt *pkt,
u16 (*iter_func)(u16 chks, void *buf, u16 len),
u16 data);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 4ebb816..a741678 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -111,7 +111,7 @@ enum ieee80211_band {
* This may be due to the driver or due to regulatory bandwidth
* restrictions.
* @IEEE80211_CHAN_INDOOR_ONLY: see %NL80211_FREQUENCY_ATTR_INDOOR_ONLY
- * @IEEE80211_CHAN_GO_CONCURRENT: see %NL80211_FREQUENCY_ATTR_GO_CONCURRENT
+ * @IEEE80211_CHAN_IR_CONCURRENT: see %NL80211_FREQUENCY_ATTR_IR_CONCURRENT
* @IEEE80211_CHAN_NO_20MHZ: 20 MHz bandwidth is not permitted
* on this channel.
* @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted
@@ -129,7 +129,7 @@ enum ieee80211_channel_flags {
IEEE80211_CHAN_NO_80MHZ = 1<<7,
IEEE80211_CHAN_NO_160MHZ = 1<<8,
IEEE80211_CHAN_INDOOR_ONLY = 1<<9,
- IEEE80211_CHAN_GO_CONCURRENT = 1<<10,
+ IEEE80211_CHAN_IR_CONCURRENT = 1<<10,
IEEE80211_CHAN_NO_20MHZ = 1<<11,
IEEE80211_CHAN_NO_10MHZ = 1<<12,
};
@@ -215,6 +215,39 @@ enum ieee80211_rate_flags {
};
/**
+ * enum ieee80211_bss_type - BSS type filter
+ *
+ * @IEEE80211_BSS_TYPE_ESS: Infrastructure BSS
+ * @IEEE80211_BSS_TYPE_PBSS: Personal BSS
+ * @IEEE80211_BSS_TYPE_IBSS: Independent BSS
+ * @IEEE80211_BSS_TYPE_MBSS: Mesh BSS
+ * @IEEE80211_BSS_TYPE_ANY: Wildcard value for matching any BSS type
+ */
+enum ieee80211_bss_type {
+ IEEE80211_BSS_TYPE_ESS,
+ IEEE80211_BSS_TYPE_PBSS,
+ IEEE80211_BSS_TYPE_IBSS,
+ IEEE80211_BSS_TYPE_MBSS,
+ IEEE80211_BSS_TYPE_ANY
+};
+
+/**
+ * enum ieee80211_privacy - BSS privacy filter
+ *
+ * @IEEE80211_PRIVACY_ON: privacy bit set
+ * @IEEE80211_PRIVACY_OFF: privacy bit clear
+ * @IEEE80211_PRIVACY_ANY: Wildcard value for matching any privacy setting
+ */
+enum ieee80211_privacy {
+ IEEE80211_PRIVACY_ON,
+ IEEE80211_PRIVACY_OFF,
+ IEEE80211_PRIVACY_ANY
+};
+
+#define IEEE80211_PRIVACY(x) \
+ ((x) ? IEEE80211_PRIVACY_ON : IEEE80211_PRIVACY_OFF)
+
+/**
* struct ieee80211_rate - bitrate definition
*
* This structure describes a bitrate that an 802.11 PHY can
@@ -520,37 +553,41 @@ ieee80211_chandef_max_power(struct cfg80211_chan_def *chandef)
*
* @SURVEY_INFO_NOISE_DBM: noise (in dBm) was filled in
* @SURVEY_INFO_IN_USE: channel is currently being used
- * @SURVEY_INFO_CHANNEL_TIME: channel active time (in ms) was filled in
- * @SURVEY_INFO_CHANNEL_TIME_BUSY: channel busy time was filled in
- * @SURVEY_INFO_CHANNEL_TIME_EXT_BUSY: extension channel busy time was filled in
- * @SURVEY_INFO_CHANNEL_TIME_RX: channel receive time was filled in
- * @SURVEY_INFO_CHANNEL_TIME_TX: channel transmit time was filled in
+ * @SURVEY_INFO_TIME: active time (in ms) was filled in
+ * @SURVEY_INFO_TIME_BUSY: busy time was filled in
+ * @SURVEY_INFO_TIME_EXT_BUSY: extension channel busy time was filled in
+ * @SURVEY_INFO_TIME_RX: receive time was filled in
+ * @SURVEY_INFO_TIME_TX: transmit time was filled in
+ * @SURVEY_INFO_TIME_SCAN: scan time was filled in
*
* Used by the driver to indicate which info in &struct survey_info
* it has filled in during the get_survey().
*/
enum survey_info_flags {
- SURVEY_INFO_NOISE_DBM = 1<<0,
- SURVEY_INFO_IN_USE = 1<<1,
- SURVEY_INFO_CHANNEL_TIME = 1<<2,
- SURVEY_INFO_CHANNEL_TIME_BUSY = 1<<3,
- SURVEY_INFO_CHANNEL_TIME_EXT_BUSY = 1<<4,
- SURVEY_INFO_CHANNEL_TIME_RX = 1<<5,
- SURVEY_INFO_CHANNEL_TIME_TX = 1<<6,
+ SURVEY_INFO_NOISE_DBM = BIT(0),
+ SURVEY_INFO_IN_USE = BIT(1),
+ SURVEY_INFO_TIME = BIT(2),
+ SURVEY_INFO_TIME_BUSY = BIT(3),
+ SURVEY_INFO_TIME_EXT_BUSY = BIT(4),
+ SURVEY_INFO_TIME_RX = BIT(5),
+ SURVEY_INFO_TIME_TX = BIT(6),
+ SURVEY_INFO_TIME_SCAN = BIT(7),
};
/**
* struct survey_info - channel survey response
*
- * @channel: the channel this survey record reports, mandatory
+ * @channel: the channel this survey record reports, may be %NULL for a single
+ * record to report global statistics
* @filled: bitflag of flags from &enum survey_info_flags
* @noise: channel noise in dBm. This and all following fields are
* optional
- * @channel_time: amount of time in ms the radio spent on the channel
- * @channel_time_busy: amount of time the primary channel was sensed busy
- * @channel_time_ext_busy: amount of time the extension channel was sensed busy
- * @channel_time_rx: amount of time the radio spent receiving data
- * @channel_time_tx: amount of time the radio spent transmitting data
+ * @time: amount of time in ms the radio was turn on (on the channel)
+ * @time_busy: amount of time the primary channel was sensed busy
+ * @time_ext_busy: amount of time the extension channel was sensed busy
+ * @time_rx: amount of time the radio spent receiving data
+ * @time_tx: amount of time the radio spent transmitting data
+ * @time_scan: amount of time the radio spent for scanning
*
* Used by dump_survey() to report back per-channel survey information.
*
@@ -559,11 +596,12 @@ enum survey_info_flags {
*/
struct survey_info {
struct ieee80211_channel *channel;
- u64 channel_time;
- u64 channel_time_busy;
- u64 channel_time_ext_busy;
- u64 channel_time_rx;
- u64 channel_time_tx;
+ u64 time;
+ u64 time_busy;
+ u64 time_ext_busy;
+ u64 time_rx;
+ u64 time_tx;
+ u64 time_scan;
u32 filled;
s8 noise;
};
@@ -861,75 +899,6 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
enum cfg80211_station_type statype);
/**
- * enum station_info_flags - station information flags
- *
- * Used by the driver to indicate which info in &struct station_info
- * it has filled in during get_station() or dump_station().
- *
- * @STATION_INFO_INACTIVE_TIME: @inactive_time filled
- * @STATION_INFO_RX_BYTES: @rx_bytes filled
- * @STATION_INFO_TX_BYTES: @tx_bytes filled
- * @STATION_INFO_RX_BYTES64: @rx_bytes filled with 64-bit value
- * @STATION_INFO_TX_BYTES64: @tx_bytes filled with 64-bit value
- * @STATION_INFO_LLID: @llid filled
- * @STATION_INFO_PLID: @plid filled
- * @STATION_INFO_PLINK_STATE: @plink_state filled
- * @STATION_INFO_SIGNAL: @signal filled
- * @STATION_INFO_TX_BITRATE: @txrate fields are filled
- * (tx_bitrate, tx_bitrate_flags and tx_bitrate_mcs)
- * @STATION_INFO_RX_PACKETS: @rx_packets filled with 32-bit value
- * @STATION_INFO_TX_PACKETS: @tx_packets filled with 32-bit value
- * @STATION_INFO_TX_RETRIES: @tx_retries filled
- * @STATION_INFO_TX_FAILED: @tx_failed filled
- * @STATION_INFO_RX_DROP_MISC: @rx_dropped_misc filled
- * @STATION_INFO_SIGNAL_AVG: @signal_avg filled
- * @STATION_INFO_RX_BITRATE: @rxrate fields are filled
- * @STATION_INFO_BSS_PARAM: @bss_param filled
- * @STATION_INFO_CONNECTED_TIME: @connected_time filled
- * @STATION_INFO_ASSOC_REQ_IES: @assoc_req_ies filled
- * @STATION_INFO_STA_FLAGS: @sta_flags filled
- * @STATION_INFO_BEACON_LOSS_COUNT: @beacon_loss_count filled
- * @STATION_INFO_T_OFFSET: @t_offset filled
- * @STATION_INFO_LOCAL_PM: @local_pm filled
- * @STATION_INFO_PEER_PM: @peer_pm filled
- * @STATION_INFO_NONPEER_PM: @nonpeer_pm filled
- * @STATION_INFO_CHAIN_SIGNAL: @chain_signal filled
- * @STATION_INFO_CHAIN_SIGNAL_AVG: @chain_signal_avg filled
- * @STATION_INFO_EXPECTED_THROUGHPUT: @expected_throughput filled
- */
-enum station_info_flags {
- STATION_INFO_INACTIVE_TIME = BIT(0),
- STATION_INFO_RX_BYTES = BIT(1),
- STATION_INFO_TX_BYTES = BIT(2),
- STATION_INFO_LLID = BIT(3),
- STATION_INFO_PLID = BIT(4),
- STATION_INFO_PLINK_STATE = BIT(5),
- STATION_INFO_SIGNAL = BIT(6),
- STATION_INFO_TX_BITRATE = BIT(7),
- STATION_INFO_RX_PACKETS = BIT(8),
- STATION_INFO_TX_PACKETS = BIT(9),
- STATION_INFO_TX_RETRIES = BIT(10),
- STATION_INFO_TX_FAILED = BIT(11),
- STATION_INFO_RX_DROP_MISC = BIT(12),
- STATION_INFO_SIGNAL_AVG = BIT(13),
- STATION_INFO_RX_BITRATE = BIT(14),
- STATION_INFO_BSS_PARAM = BIT(15),
- STATION_INFO_CONNECTED_TIME = BIT(16),
- STATION_INFO_ASSOC_REQ_IES = BIT(17),
- STATION_INFO_STA_FLAGS = BIT(18),
- STATION_INFO_BEACON_LOSS_COUNT = BIT(19),
- STATION_INFO_T_OFFSET = BIT(20),
- STATION_INFO_LOCAL_PM = BIT(21),
- STATION_INFO_PEER_PM = BIT(22),
- STATION_INFO_NONPEER_PM = BIT(23),
- STATION_INFO_RX_BYTES64 = BIT(24),
- STATION_INFO_TX_BYTES64 = BIT(25),
- STATION_INFO_CHAIN_SIGNAL = BIT(26),
- STATION_INFO_CHAIN_SIGNAL_AVG = BIT(27),
- STATION_INFO_EXPECTED_THROUGHPUT = BIT(28),
-};
-
-/**
* enum station_info_rate_flags - bitrate info flags
*
* Used by the driver to indicate the specific rate transmission
@@ -937,22 +906,35 @@ enum station_info_flags {
*
* @RATE_INFO_FLAGS_MCS: mcs field filled with HT MCS
* @RATE_INFO_FLAGS_VHT_MCS: mcs field filled with VHT MCS
- * @RATE_INFO_FLAGS_40_MHZ_WIDTH: 40 MHz width transmission
- * @RATE_INFO_FLAGS_80_MHZ_WIDTH: 80 MHz width transmission
- * @RATE_INFO_FLAGS_80P80_MHZ_WIDTH: 80+80 MHz width transmission
- * @RATE_INFO_FLAGS_160_MHZ_WIDTH: 160 MHz width transmission
* @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval
* @RATE_INFO_FLAGS_60G: 60GHz MCS
*/
enum rate_info_flags {
RATE_INFO_FLAGS_MCS = BIT(0),
RATE_INFO_FLAGS_VHT_MCS = BIT(1),
- RATE_INFO_FLAGS_40_MHZ_WIDTH = BIT(2),
- RATE_INFO_FLAGS_80_MHZ_WIDTH = BIT(3),
- RATE_INFO_FLAGS_80P80_MHZ_WIDTH = BIT(4),
- RATE_INFO_FLAGS_160_MHZ_WIDTH = BIT(5),
- RATE_INFO_FLAGS_SHORT_GI = BIT(6),
- RATE_INFO_FLAGS_60G = BIT(7),
+ RATE_INFO_FLAGS_SHORT_GI = BIT(2),
+ RATE_INFO_FLAGS_60G = BIT(3),
+};
+
+/**
+ * enum rate_info_bw - rate bandwidth information
+ *
+ * Used by the driver to indicate the rate bandwidth.
+ *
+ * @RATE_INFO_BW_5: 5 MHz bandwidth
+ * @RATE_INFO_BW_10: 10 MHz bandwidth
+ * @RATE_INFO_BW_20: 20 MHz bandwidth
+ * @RATE_INFO_BW_40: 40 MHz bandwidth
+ * @RATE_INFO_BW_80: 80 MHz bandwidth
+ * @RATE_INFO_BW_160: 160 MHz bandwidth
+ */
+enum rate_info_bw {
+ RATE_INFO_BW_5,
+ RATE_INFO_BW_10,
+ RATE_INFO_BW_20,
+ RATE_INFO_BW_40,
+ RATE_INFO_BW_80,
+ RATE_INFO_BW_160,
};
/**
@@ -964,12 +946,14 @@ enum rate_info_flags {
* @mcs: mcs index if struct describes a 802.11n bitrate
* @legacy: bitrate in 100kbit/s for 802.11abg
* @nss: number of streams (VHT only)
+ * @bw: bandwidth (from &enum rate_info_bw)
*/
struct rate_info {
u8 flags;
u8 mcs;
u16 legacy;
u8 nss;
+ u8 bw;
};
/**
@@ -1003,6 +987,24 @@ struct sta_bss_parameters {
u16 beacon_interval;
};
+/**
+ * struct cfg80211_tid_stats - per-TID statistics
+ * @filled: bitmap of flags using the bits of &enum nl80211_tid_stats to
+ * indicate the relevant values in this struct are filled
+ * @rx_msdu: number of received MSDUs
+ * @tx_msdu: number of (attempted) transmitted MSDUs
+ * @tx_msdu_retries: number of retries (not counting the first) for
+ * transmitted MSDUs
+ * @tx_msdu_failed: number of failed transmitted MSDUs
+ */
+struct cfg80211_tid_stats {
+ u32 filled;
+ u64 rx_msdu;
+ u64 tx_msdu;
+ u64 tx_msdu_retries;
+ u64 tx_msdu_failed;
+};
+
#define IEEE80211_MAX_CHAINS 4
/**
@@ -1010,11 +1012,12 @@ struct sta_bss_parameters {
*
* Station information filled by driver for get_station() and dump_station.
*
- * @filled: bitflag of flags from &enum station_info_flags
+ * @filled: bitflag of flags using the bits of &enum nl80211_sta_info to
+ * indicate the relevant values in this struct for them
* @connected_time: time(in secs) since a station is last connected
* @inactive_time: time since last station activity (tx/rx) in milliseconds
- * @rx_bytes: bytes received from this station
- * @tx_bytes: bytes transmitted to this station
+ * @rx_bytes: bytes (size of MPDUs) received from this station
+ * @tx_bytes: bytes (size of MPDUs) transmitted to this station
* @llid: mesh local link id
* @plid: mesh peer link id
* @plink_state: mesh peer link state
@@ -1027,10 +1030,10 @@ struct sta_bss_parameters {
* @chain_signal_avg: per-chain signal strength average in dBm
* @txrate: current unicast bitrate from this station
* @rxrate: current unicast bitrate to this station
- * @rx_packets: packets received from this station
- * @tx_packets: packets transmitted to this station
- * @tx_retries: cumulative retry counts
- * @tx_failed: number of failed transmissions (retries exceeded, no ACK)
+ * @rx_packets: packets (MSDUs & MMPDUs) received from this station
+ * @tx_packets: packets (MSDUs & MMPDUs) transmitted to this station
+ * @tx_retries: cumulative retry counts (MPDUs)
+ * @tx_failed: number of failed transmissions (MPDUs) (retries exceeded, no ACK)
* @rx_dropped_misc: Dropped for un-specified reason.
* @bss_param: current BSS parameters
* @generation: generation number for nl80211 dumps.
@@ -1050,6 +1053,11 @@ struct sta_bss_parameters {
* @nonpeer_pm: non-peer mesh STA power save mode
* @expected_throughput: expected throughput in kbps (including 802.11 headers)
* towards this station.
+ * @rx_beacon: number of beacons received from this peer
+ * @rx_beacon_signal_avg: signal strength average (in dBm) for beacons received
+ * from this peer
+ * @pertid: per-TID statistics, see &struct cfg80211_tid_stats, using the last
+ * (IEEE80211_NUM_TIDS) index for MSDUs not encapsulated in QoS-MPDUs.
*/
struct station_info {
u32 filled;
@@ -1090,10 +1098,9 @@ struct station_info {
u32 expected_throughput;
- /*
- * Note: Add a new enum station_info_flags value for each new field and
- * use it to check which fields are initialized.
- */
+ u64 rx_beacon;
+ u8 rx_beacon_signal_avg;
+ struct cfg80211_tid_stats pertid[IEEE80211_NUM_TIDS + 1];
};
/**
@@ -1516,6 +1523,13 @@ struct cfg80211_match_set {
* @mac_addr_mask: MAC address mask used with randomisation, bits that
* are 0 in the mask should be randomised, bits that are 1 should
* be taken from the @mac_addr
+ * @rcu_head: RCU callback used to free the struct
+ * @owner_nlportid: netlink portid of owner (if this should is a request
+ * owned by a particular socket)
+ * @delay: delay in seconds to use before starting the first scan
+ * cycle. The driver may ignore this parameter and start
+ * immediately (or at any other time), if this feature is not
+ * supported.
*/
struct cfg80211_sched_scan_request {
struct cfg80211_ssid *ssids;
@@ -1529,6 +1543,7 @@ struct cfg80211_sched_scan_request {
struct cfg80211_match_set *match_sets;
int n_match_sets;
s32 min_rssi_thold;
+ u32 delay;
u8 mac_addr[ETH_ALEN] __aligned(2);
u8 mac_addr_mask[ETH_ALEN] __aligned(2);
@@ -1537,6 +1552,8 @@ struct cfg80211_sched_scan_request {
struct wiphy *wiphy;
struct net_device *dev;
unsigned long scan_start;
+ struct rcu_head rcu_head;
+ u32 owner_nlportid;
/* keep last */
struct ieee80211_channel *channels[0];
@@ -2439,6 +2456,7 @@ struct cfg80211_ops {
struct wireless_dev * (*add_virtual_intf)(struct wiphy *wiphy,
const char *name,
+ unsigned char name_assign_type,
enum nl80211_iftype type,
u32 *flags,
struct vif_params *params);
@@ -3011,6 +3029,8 @@ struct wiphy_vendor_command {
* @regulatory_flags: wiphy regulatory flags, see
* &enum ieee80211_regulatory_flags
* @features: features advertised to nl80211, see &enum nl80211_feature_flags.
+ * @ext_features: extended features advertised to nl80211, see
+ * &enum nl80211_ext_feature_index.
* @bss_priv_size: each BSS struct has private data allocated with it,
* this variable determines its size
* @max_scan_ssids: maximum number of SSIDs the device can scan for in
@@ -3120,6 +3140,7 @@ struct wiphy {
u16 max_acl_mac_addrs;
u32 flags, regulatory_flags, features;
+ u8 ext_features[DIV_ROUND_UP(NUM_NL80211_EXT_FEATURES, 8)];
u32 ap_sme_capa;
@@ -3196,10 +3217,8 @@ struct wiphy {
const struct ieee80211_ht_cap *ht_capa_mod_mask;
const struct ieee80211_vht_cap *vht_capa_mod_mask;
-#ifdef CONFIG_NET_NS
/* the network namespace this phy lives in currently */
- struct net *_net;
-#endif
+ possible_net_t _net;
#ifdef CONFIG_CFG80211_WEXT
const struct iw_handler_def *wext;
@@ -3808,6 +3827,34 @@ const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type,
int regulatory_hint(struct wiphy *wiphy, const char *alpha2);
/**
+ * regulatory_set_wiphy_regd - set regdom info for self managed drivers
+ * @wiphy: the wireless device we want to process the regulatory domain on
+ * @rd: the regulatory domain informatoin to use for this wiphy
+ *
+ * Set the regulatory domain information for self-managed wiphys, only they
+ * may use this function. See %REGULATORY_WIPHY_SELF_MANAGED for more
+ * information.
+ *
+ * Return: 0 on success. -EINVAL, -EPERM
+ */
+int regulatory_set_wiphy_regd(struct wiphy *wiphy,
+ struct ieee80211_regdomain *rd);
+
+/**
+ * regulatory_set_wiphy_regd_sync_rtnl - set regdom for self-managed drivers
+ * @wiphy: the wireless device we want to process the regulatory domain on
+ * @rd: the regulatory domain information to use for this wiphy
+ *
+ * This functions requires the RTNL to be held and applies the new regdomain
+ * synchronously to this wiphy. For more details see
+ * regulatory_set_wiphy_regd().
+ *
+ * Return: 0 on success. -EINVAL, -EPERM
+ */
+int regulatory_set_wiphy_regd_sync_rtnl(struct wiphy *wiphy,
+ struct ieee80211_regdomain *rd);
+
+/**
* wiphy_apply_custom_regulatory - apply a custom driver regulatory domain
* @wiphy: the wireless device we want to process the regulatory domain on
* @regd: the custom regulatory domain to use for this wiphy
@@ -3997,14 +4044,16 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
const u8 *bssid,
const u8 *ssid, size_t ssid_len,
- u16 capa_mask, u16 capa_val);
+ enum ieee80211_bss_type bss_type,
+ enum ieee80211_privacy);
static inline struct cfg80211_bss *
cfg80211_get_ibss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
const u8 *ssid, size_t ssid_len)
{
return cfg80211_get_bss(wiphy, channel, NULL, ssid, ssid_len,
- WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
+ IEEE80211_BSS_TYPE_IBSS,
+ IEEE80211_PRIVACY_ANY);
}
/**
@@ -4245,6 +4294,7 @@ struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy,
int approxlen);
struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
enum nl80211_commands cmd,
enum nl80211_attrs attr,
int vendor_event_idx,
@@ -4299,6 +4349,7 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb);
/**
* cfg80211_vendor_event_alloc - allocate vendor-specific event skb
* @wiphy: the wiphy
+ * @wdev: the wireless device
* @event_idx: index of the vendor event in the wiphy's vendor_events
* @approxlen: an upper bound of the length of the data that will
* be put into the skb
@@ -4307,16 +4358,20 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb);
* This function allocates and pre-fills an skb for an event on the
* vendor-specific multicast group.
*
+ * If wdev != NULL, both the ifindex and identifier of the specified
+ * wireless device are added to the event message before the vendor data
+ * attribute.
+ *
* When done filling the skb, call cfg80211_vendor_event() with the
* skb to send the event.
*
* Return: An allocated and pre-filled skb. %NULL if any errors happen.
*/
static inline struct sk_buff *
-cfg80211_vendor_event_alloc(struct wiphy *wiphy, int approxlen,
- int event_idx, gfp_t gfp)
+cfg80211_vendor_event_alloc(struct wiphy *wiphy, struct wireless_dev *wdev,
+ int approxlen, int event_idx, gfp_t gfp)
{
- return __cfg80211_alloc_event_skb(wiphy, NL80211_CMD_VENDOR,
+ return __cfg80211_alloc_event_skb(wiphy, wdev, NL80211_CMD_VENDOR,
NL80211_ATTR_VENDOR_DATA,
event_idx, approxlen, gfp);
}
@@ -4417,7 +4472,7 @@ static inline int cfg80211_testmode_reply(struct sk_buff *skb)
static inline struct sk_buff *
cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy, int approxlen, gfp_t gfp)
{
- return __cfg80211_alloc_event_skb(wiphy, NL80211_CMD_TESTMODE,
+ return __cfg80211_alloc_event_skb(wiphy, NULL, NL80211_CMD_TESTMODE,
NL80211_ATTR_TESTDATA, -1,
approxlen, gfp);
}
@@ -4520,13 +4575,15 @@ void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_bss *bss,
* @ie: information elements of the deauth/disassoc frame (may be %NULL)
* @ie_len: length of IEs
* @reason: reason code for the disconnection, set it to 0 if unknown
+ * @locally_generated: disconnection was requested locally
* @gfp: allocation flags
*
* After it calls this function, the driver should enter an idle state
* and not try to connect to any AP any more.
*/
void cfg80211_disconnected(struct net_device *dev, u16 reason,
- const u8 *ie, size_t ie_len, gfp_t gfp);
+ const u8 *ie, size_t ie_len,
+ bool locally_generated, gfp_t gfp);
/**
* cfg80211_ready_on_channel - notification of remain_on_channel start
@@ -4565,13 +4622,27 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
struct station_info *sinfo, gfp_t gfp);
/**
+ * cfg80211_del_sta_sinfo - notify userspace about deletion of a station
+ * @dev: the netdev
+ * @mac_addr: the station's address
+ * @sinfo: the station information/statistics
+ * @gfp: allocation flags
+ */
+void cfg80211_del_sta_sinfo(struct net_device *dev, const u8 *mac_addr,
+ struct station_info *sinfo, gfp_t gfp);
+
+/**
* cfg80211_del_sta - notify userspace about deletion of a station
*
* @dev: the netdev
* @mac_addr: the station's address
* @gfp: allocation flags
*/
-void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp);
+static inline void cfg80211_del_sta(struct net_device *dev,
+ const u8 *mac_addr, gfp_t gfp)
+{
+ cfg80211_del_sta_sinfo(dev, mac_addr, NULL, gfp);
+}
/**
* cfg80211_conn_failed - connection request failed notification
@@ -4833,6 +4904,17 @@ void cfg80211_ch_switch_started_notify(struct net_device *dev,
bool ieee80211_operating_class_to_band(u8 operating_class,
enum ieee80211_band *band);
+/**
+ * ieee80211_chandef_to_operating_class - convert chandef to operation class
+ *
+ * @chandef: the chandef to convert
+ * @op_class: a pointer to the resulting operating class
+ *
+ * Returns %true if the conversion was successful, %false otherwise.
+ */
+bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
+ u8 *op_class);
+
/*
* cfg80211_tdls_oper_request - request userspace to perform TDLS operation
* @dev: the device on which the operation is requested
@@ -4921,6 +5003,64 @@ int cfg80211_get_p2p_attr(const u8 *ies, unsigned int len,
u8 *buf, unsigned int bufsize);
/**
+ * ieee80211_ie_split_ric - split an IE buffer according to ordering (with RIC)
+ * @ies: the IE buffer
+ * @ielen: the length of the IE buffer
+ * @ids: an array with element IDs that are allowed before
+ * the split
+ * @n_ids: the size of the element ID array
+ * @after_ric: array IE types that come after the RIC element
+ * @n_after_ric: size of the @after_ric array
+ * @offset: offset where to start splitting in the buffer
+ *
+ * This function splits an IE buffer by updating the @offset
+ * variable to point to the location where the buffer should be
+ * split.
+ *
+ * It assumes that the given IE buffer is well-formed, this
+ * has to be guaranteed by the caller!
+ *
+ * It also assumes that the IEs in the buffer are ordered
+ * correctly, if not the result of using this function will not
+ * be ordered correctly either, i.e. it does no reordering.
+ *
+ * The function returns the offset where the next part of the
+ * buffer starts, which may be @ielen if the entire (remainder)
+ * of the buffer should be used.
+ */
+size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen,
+ const u8 *ids, int n_ids,
+ const u8 *after_ric, int n_after_ric,
+ size_t offset);
+
+/**
+ * ieee80211_ie_split - split an IE buffer according to ordering
+ * @ies: the IE buffer
+ * @ielen: the length of the IE buffer
+ * @ids: an array with element IDs that are allowed before
+ * the split
+ * @n_ids: the size of the element ID array
+ * @offset: offset where to start splitting in the buffer
+ *
+ * This function splits an IE buffer by updating the @offset
+ * variable to point to the location where the buffer should be
+ * split.
+ *
+ * It assumes that the given IE buffer is well-formed, this
+ * has to be guaranteed by the caller!
+ *
+ * It also assumes that the IEs in the buffer are ordered
+ * correctly, if not the result of using this function will not
+ * be ordered correctly either, i.e. it does no reordering.
+ *
+ * The function returns the offset where the next part of the
+ * buffer starts, which may be @ielen if the entire (remainder)
+ * of the buffer should be used.
+ */
+size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
+ const u8 *ids, int n_ids, size_t offset);
+
+/**
* cfg80211_report_wowlan_wakeup - report wakeup from WoWLAN
* @wdev: the wireless device reporting the wakeup
* @wakeup: the wakeup report
@@ -5033,6 +5173,42 @@ void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev,
*/
void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy);
+/**
+ * wiphy_ext_feature_set - set the extended feature flag
+ *
+ * @wiphy: the wiphy to modify.
+ * @ftidx: extended feature bit index.
+ *
+ * The extended features are flagged in multiple bytes (see
+ * &struct wiphy.@ext_features)
+ */
+static inline void wiphy_ext_feature_set(struct wiphy *wiphy,
+ enum nl80211_ext_feature_index ftidx)
+{
+ u8 *ft_byte;
+
+ ft_byte = &wiphy->ext_features[ftidx / 8];
+ *ft_byte |= BIT(ftidx % 8);
+}
+
+/**
+ * wiphy_ext_feature_isset - check the extended feature flag
+ *
+ * @wiphy: the wiphy to modify.
+ * @ftidx: extended feature bit index.
+ *
+ * The extended features are flagged in multiple bytes (see
+ * &struct wiphy.@ext_features)
+ */
+static inline bool
+wiphy_ext_feature_isset(struct wiphy *wiphy,
+ enum nl80211_ext_feature_index ftidx)
+{
+ u8 ft_byte;
+
+ ft_byte = wiphy->ext_features[ftidx / 8];
+ return (ft_byte & BIT(ftidx % 8)) != 0;
+}
/* ethtool helper */
void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info);
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index 7f713ac..290a9a6 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -25,20 +25,27 @@
#include <net/nl802154.h>
struct wpan_phy;
+struct wpan_phy_cca;
struct cfg802154_ops {
struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
const char *name,
+ unsigned char name_assign_type,
int type);
void (*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
struct net_device *dev);
int (*add_virtual_intf)(struct wpan_phy *wpan_phy,
const char *name,
+ unsigned char name_assign_type,
enum nl802154_iftype type,
__le64 extended_addr);
int (*del_virtual_intf)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev);
int (*set_channel)(struct wpan_phy *wpan_phy, u8 page, u8 channel);
+ int (*set_cca_mode)(struct wpan_phy *wpan_phy,
+ const struct wpan_phy_cca *cca);
+ int (*set_cca_ed_level)(struct wpan_phy *wpan_phy, s32 ed_level);
+ int (*set_tx_power)(struct wpan_phy *wpan_phy, s32 power);
int (*set_pan_id)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev, __le16 pan_id);
int (*set_short_addr)(struct wpan_phy *wpan_phy,
@@ -56,9 +63,66 @@ struct cfg802154_ops {
struct wpan_dev *wpan_dev, bool mode);
};
-struct wpan_phy {
- struct mutex pib_lock;
+static inline bool
+wpan_phy_supported_bool(bool b, enum nl802154_supported_bool_states st)
+{
+ switch (st) {
+ case NL802154_SUPPORTED_BOOL_TRUE:
+ return b;
+ case NL802154_SUPPORTED_BOOL_FALSE:
+ return !b;
+ case NL802154_SUPPORTED_BOOL_BOTH:
+ return true;
+ default:
+ WARN_ON(1);
+ }
+
+ return false;
+}
+
+struct wpan_phy_supported {
+ u32 channels[IEEE802154_MAX_PAGE + 1],
+ cca_modes, cca_opts, iftypes;
+ enum nl802154_supported_bool_states lbt;
+ u8 min_minbe, max_minbe, min_maxbe, max_maxbe,
+ min_csma_backoffs, max_csma_backoffs;
+ s8 min_frame_retries, max_frame_retries;
+ size_t tx_powers_size, cca_ed_levels_size;
+ const s32 *tx_powers, *cca_ed_levels;
+};
+
+struct wpan_phy_cca {
+ enum nl802154_cca_modes mode;
+ enum nl802154_cca_opts opt;
+};
+
+static inline bool
+wpan_phy_cca_cmp(const struct wpan_phy_cca *a, const struct wpan_phy_cca *b)
+{
+ if (a->mode != b->mode)
+ return false;
+ if (a->mode == NL802154_CCA_ENERGY_CARRIER)
+ return a->opt == b->opt;
+
+ return true;
+}
+
+/**
+ * @WPAN_PHY_FLAG_TRANSMIT_POWER: Indicates that transceiver will support
+ * transmit power setting.
+ * @WPAN_PHY_FLAG_CCA_ED_LEVEL: Indicates that transceiver will support cca ed
+ * level setting.
+ * @WPAN_PHY_FLAG_CCA_MODE: Indicates that transceiver will support cca mode
+ * setting.
+ */
+enum wpan_phy_flags {
+ WPAN_PHY_FLAG_TXPOWER = BIT(1),
+ WPAN_PHY_FLAG_CCA_ED_LEVEL = BIT(2),
+ WPAN_PHY_FLAG_CCA_MODE = BIT(3),
+};
+
+struct wpan_phy {
/* If multiple wpan_phys are registered and you're handed e.g.
* a regular netdev with assigned ieee802154_ptr, you won't
* know whether it points to a wpan_phy your driver has registered
@@ -67,6 +131,8 @@ struct wpan_phy {
*/
const void *privid;
+ u32 flags;
+
/*
* This is a PIB according to 802.15.4-2011.
* We do not provide timing-related variables, as they
@@ -74,12 +140,14 @@ struct wpan_phy {
*/
u8 current_channel;
u8 current_page;
- u32 channels_supported[IEEE802154_MAX_PAGE + 1];
- s8 transmit_power;
- u8 cca_mode;
+ struct wpan_phy_supported supported;
+ /* current transmit_power in mBm */
+ s32 transmit_power;
+ struct wpan_phy_cca cca;
__le64 perm_extended_addr;
+ /* current cca ed threshold in mBm */
s32 cca_ed_level;
/* PHY depended MAC PIB values */
@@ -111,9 +179,9 @@ struct wpan_dev {
__le64 extended_addr;
/* MAC BSN field */
- u8 bsn;
+ atomic_t bsn;
/* MAC DSN field */
- u8 dsn;
+ atomic_t dsn;
u8 min_be;
u8 max_be;
diff --git a/include/net/checksum.h b/include/net/checksum.h
index e339a95..2d1d73c 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -122,7 +122,9 @@ static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
{
- *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), from), to));
+ __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
+
+ *sum = csum_fold(csum_add(tmp, (__force __wsum)to));
}
/* Implements RFC 1624 (Incremental Internet Checksum)
@@ -167,4 +169,9 @@ static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
return delta;
}
+static inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
+{
+ *psum = csum_fold(csum_sub(delta, *psum));
+}
+
#endif
diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
index a6fd939..3ebb168 100644
--- a/include/net/cipso_ipv4.h
+++ b/include/net/cipso_ipv4.h
@@ -121,13 +121,6 @@ extern int cipso_v4_rbm_strictvalid;
#endif
/*
- * Helper Functions
- */
-
-#define CIPSO_V4_OPTEXIST(x) (IPCB(x)->opt.cipso != 0)
-#define CIPSO_V4_OPTPTR(x) (skb_network_header(x) + IPCB(x)->opt.cipso)
-
-/*
* DOI List Functions
*/
@@ -190,7 +183,7 @@ static inline int cipso_v4_doi_domhsh_remove(struct cipso_v4_doi *doi_def,
#ifdef CONFIG_NETLABEL
void cipso_v4_cache_invalidate(void);
-int cipso_v4_cache_add(const struct sk_buff *skb,
+int cipso_v4_cache_add(const unsigned char *cipso_ptr,
const struct netlbl_lsm_secattr *secattr);
#else
static inline void cipso_v4_cache_invalidate(void)
@@ -198,7 +191,7 @@ static inline void cipso_v4_cache_invalidate(void)
return;
}
-static inline int cipso_v4_cache_add(const struct sk_buff *skb,
+static inline int cipso_v4_cache_add(const unsigned char *cipso_ptr,
const struct netlbl_lsm_secattr *secattr)
{
return 0;
@@ -211,6 +204,8 @@ static inline int cipso_v4_cache_add(const struct sk_buff *skb,
#ifdef CONFIG_NETLABEL
void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway);
+int cipso_v4_getattr(const unsigned char *cipso,
+ struct netlbl_lsm_secattr *secattr);
int cipso_v4_sock_setattr(struct sock *sk,
const struct cipso_v4_doi *doi_def,
const struct netlbl_lsm_secattr *secattr);
@@ -226,6 +221,7 @@ int cipso_v4_skbuff_setattr(struct sk_buff *skb,
int cipso_v4_skbuff_delattr(struct sk_buff *skb);
int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
struct netlbl_lsm_secattr *secattr);
+unsigned char *cipso_v4_optptr(const struct sk_buff *skb);
int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option);
#else
static inline void cipso_v4_error(struct sk_buff *skb,
@@ -235,6 +231,12 @@ static inline void cipso_v4_error(struct sk_buff *skb,
return;
}
+static inline int cipso_v4_getattr(const unsigned char *cipso,
+ struct netlbl_lsm_secattr *secattr)
+{
+ return -ENOSYS;
+}
+
static inline int cipso_v4_sock_setattr(struct sock *sk,
const struct cipso_v4_doi *doi_def,
const struct netlbl_lsm_secattr *secattr)
@@ -282,6 +284,11 @@ static inline int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
return -ENOSYS;
}
+static inline unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
+{
+ return NULL;
+}
+
static inline int cipso_v4_validate(const struct sk_buff *skb,
unsigned char **option)
{
diff --git a/include/net/codel.h b/include/net/codel.h
index aeee280..267e702 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -7,7 +7,7 @@
* Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
* Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
* Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
- * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -119,12 +119,16 @@ static inline u32 codel_time_to_us(codel_time_t val)
/**
* struct codel_params - contains codel parameters
* @target: target queue size (in time units)
+ * @ce_threshold: threshold for marking packets with ECN CE
* @interval: width of moving time window
+ * @mtu: device mtu, or minimal queue backlog in bytes.
* @ecn: is Explicit Congestion Notification enabled
*/
struct codel_params {
codel_time_t target;
+ codel_time_t ce_threshold;
codel_time_t interval;
+ u32 mtu;
bool ecn;
};
@@ -159,17 +163,24 @@ struct codel_vars {
* @maxpacket: largest packet we've seen so far
* @drop_count: temp count of dropped packets in dequeue()
* ecn_mark: number of packets we ECN marked instead of dropping
+ * ce_mark: number of packets CE marked because sojourn time was above ce_threshold
*/
struct codel_stats {
u32 maxpacket;
u32 drop_count;
u32 ecn_mark;
+ u32 ce_mark;
};
-static void codel_params_init(struct codel_params *params)
+#define CODEL_DISABLED_THRESHOLD INT_MAX
+
+static void codel_params_init(struct codel_params *params,
+ const struct Qdisc *sch)
{
params->interval = MS2TIME(100);
params->target = MS2TIME(5);
+ params->mtu = psched_mtu(qdisc_dev(sch));
+ params->ce_threshold = CODEL_DISABLED_THRESHOLD;
params->ecn = false;
}
@@ -180,7 +191,7 @@ static void codel_vars_init(struct codel_vars *vars)
static void codel_stats_init(struct codel_stats *stats)
{
- stats->maxpacket = 256;
+ stats->maxpacket = 0;
}
/*
@@ -234,7 +245,7 @@ static bool codel_should_drop(const struct sk_buff *skb,
stats->maxpacket = qdisc_pkt_len(skb);
if (codel_time_before(vars->ldelay, params->target) ||
- sch->qstats.backlog <= stats->maxpacket) {
+ sch->qstats.backlog <= params->mtu) {
/* went below - stay below for at least interval */
vars->first_above_time = 0;
return false;
@@ -350,6 +361,9 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
vars->rec_inv_sqrt);
}
end:
+ if (skb && codel_time_after(vars->ldelay, params->ce_threshold) &&
+ INET_ECN_set_ce(skb))
+ stats->ce_mark++;
return skb;
}
#endif
diff --git a/include/net/compat.h b/include/net/compat.h
index 42a9c84..48103cf 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -40,7 +40,7 @@ int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
#define compat_mmsghdr mmsghdr
#endif /* defined(CONFIG_COMPAT) */
-ssize_t get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
+int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
struct sockaddr __user **, struct iovec **);
asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *,
unsigned int);
diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h
index 597b88a..207d9ba 100644
--- a/include/net/dcbnl.h
+++ b/include/net/dcbnl.h
@@ -49,6 +49,9 @@ struct dcbnl_rtnl_ops {
int (*ieee_setets) (struct net_device *, struct ieee_ets *);
int (*ieee_getmaxrate) (struct net_device *, struct ieee_maxrate *);
int (*ieee_setmaxrate) (struct net_device *, struct ieee_maxrate *);
+ int (*ieee_getqcn) (struct net_device *, struct ieee_qcn *);
+ int (*ieee_setqcn) (struct net_device *, struct ieee_qcn *);
+ int (*ieee_getqcnstats) (struct net_device *, struct ieee_qcn_stats *);
int (*ieee_getpfc) (struct net_device *, struct ieee_pfc *);
int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *);
int (*ieee_getapp) (struct net_device *, struct dcb_app *);
diff --git a/include/net/dn_neigh.h b/include/net/dn_neigh.h
index fac4e3f..d042426 100644
--- a/include/net/dn_neigh.h
+++ b/include/net/dn_neigh.h
@@ -18,10 +18,11 @@ struct dn_neigh {
void dn_neigh_init(void);
void dn_neigh_cleanup(void);
-int dn_neigh_router_hello(struct sk_buff *skb);
-int dn_neigh_endnode_hello(struct sk_buff *skb);
+int dn_neigh_router_hello(struct sock *sk, struct sk_buff *skb);
+int dn_neigh_endnode_hello(struct sock *sk, struct sk_buff *skb);
void dn_neigh_pointopoint_hello(struct sk_buff *skb);
int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
+int dn_to_neigh_output(struct sock *sk, struct sk_buff *skb);
extern struct neigh_table dn_neigh_table;
diff --git a/include/net/dsa.h b/include/net/dsa.h
index ed3c34b..fbca63b 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -72,6 +72,7 @@ struct dsa_platform_data {
* to the root switch chip of the tree.
*/
struct device *netdev;
+ struct net_device *of_netdev;
/*
* Info structs describing each of the switch chips
@@ -128,6 +129,11 @@ struct dsa_switch {
int index;
/*
+ * Tagging protocol understood by this switch
+ */
+ enum dsa_tag_protocol tag_protocol;
+
+ /*
* Configuration data for this switch.
*/
struct dsa_chip_data *pd;
@@ -165,6 +171,11 @@ static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
}
+static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
+{
+ return ds->phys_port_mask & (1 << p) && ds->ports[p];
+}
+
static inline u8 dsa_upstream_port(struct dsa_switch *ds)
{
struct dsa_switch_tree *dst = ds->dst;
@@ -275,6 +286,22 @@ struct dsa_switch_driver {
int (*get_regs_len)(struct dsa_switch *ds, int port);
void (*get_regs)(struct dsa_switch *ds, int port,
struct ethtool_regs *regs, void *p);
+
+ /*
+ * Bridge integration
+ */
+ int (*port_join_bridge)(struct dsa_switch *ds, int port,
+ u32 br_port_mask);
+ int (*port_leave_bridge)(struct dsa_switch *ds, int port,
+ u32 br_port_mask);
+ int (*port_stp_update)(struct dsa_switch *ds, int port,
+ u8 state);
+ int (*fdb_add)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+ int (*fdb_del)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+ int (*fdb_getnext)(struct dsa_switch *ds, int port,
+ unsigned char *addr, bool *is_static);
};
void register_switch_driver(struct dsa_switch_driver *type);
diff --git a/include/net/dst.h b/include/net/dst.h
index a8ae4e7..2bc73f8a 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -109,7 +109,6 @@ u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
extern const u32 dst_default_metrics[];
#define DST_METRICS_READ_ONLY 0x1UL
-#define DST_METRICS_FORCE_OVERWRITE 0x2UL
#define DST_METRICS_FLAGS 0x3UL
#define __DST_METRICS_PTR(Y) \
((u32 *)((Y) & ~DST_METRICS_FLAGS))
@@ -120,11 +119,6 @@ static inline bool dst_metrics_read_only(const struct dst_entry *dst)
return dst->_metrics & DST_METRICS_READ_ONLY;
}
-static inline void dst_metrics_set_force_overwrite(struct dst_entry *dst)
-{
- dst->_metrics |= DST_METRICS_FORCE_OVERWRITE;
-}
-
void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
@@ -355,18 +349,6 @@ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
__skb_tunnel_rx(skb, dev, net);
}
-/* Children define the path of the packet through the
- * Linux networking. Thus, destinations are stackable.
- */
-
-static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
-{
- struct dst_entry *child = dst_clone(skb_dst(skb)->child);
-
- skb_dst_drop(skb);
- return child;
-}
-
int dst_discard_sk(struct sock *sk, struct sk_buff *skb);
static inline int dst_discard(struct sk_buff *skb)
{
@@ -481,6 +463,7 @@ void dst_init(void);
enum {
XFRM_LOOKUP_ICMP = 1 << 0,
XFRM_LOOKUP_QUEUE = 1 << 1,
+ XFRM_LOOKUP_KEEP_DST_REF = 1 << 2,
};
struct flowi;
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 1f99a1d..d642539 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -12,7 +12,6 @@ struct sock;
struct dst_ops {
unsigned short family;
- __be16 protocol;
unsigned int gc_thresh;
int (*gc)(struct dst_ops *ops);
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index e584de1..903a55e 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -36,7 +36,8 @@ struct fib_lookup_arg {
void *result;
struct fib_rule *rule;
int flags;
-#define FIB_LOOKUP_NOREF 1
+#define FIB_LOOKUP_NOREF 1
+#define FIB_LOOKUP_IGNORE_LINKSTATE 2
};
struct fib_rules_ops {
@@ -58,7 +59,7 @@ struct fib_rules_ops {
struct sk_buff *,
struct fib_rule_hdr *,
struct nlattr **);
- void (*delete)(struct fib_rule *);
+ int (*delete)(struct fib_rule *);
int (*compare)(struct fib_rule *,
struct fib_rule_hdr *,
struct nlattr **);
@@ -95,17 +96,10 @@ static inline void fib_rule_get(struct fib_rule *rule)
atomic_inc(&rule->refcnt);
}
-static inline void fib_rule_put_rcu(struct rcu_head *head)
-{
- struct fib_rule *rule = container_of(head, struct fib_rule, rcu);
- release_net(rule->fr_net);
- kfree(rule);
-}
-
static inline void fib_rule_put(struct fib_rule *rule)
{
if (atomic_dec_and_test(&rule->refcnt))
- call_rcu(&rule->rcu, fib_rule_put_rcu);
+ kfree_rcu(rule, rcu);
}
static inline u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla)
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
new file mode 100644
index 0000000..1a8c224
--- /dev/null
+++ b/include/net/flow_dissector.h
@@ -0,0 +1,220 @@
+#ifndef _NET_FLOW_DISSECTOR_H
+#define _NET_FLOW_DISSECTOR_H
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/in6.h>
+#include <uapi/linux/if_ether.h>
+
+/**
+ * struct flow_dissector_key_control:
+ * @thoff: Transport header offset
+ */
+struct flow_dissector_key_control {
+ u16 thoff;
+ u16 addr_type;
+};
+
+/**
+ * struct flow_dissector_key_basic:
+ * @thoff: Transport header offset
+ * @n_proto: Network header protocol (eg. IPv4/IPv6)
+ * @ip_proto: Transport header protocol (eg. TCP/UDP)
+ */
+struct flow_dissector_key_basic {
+ __be16 n_proto;
+ u8 ip_proto;
+ u8 padding;
+};
+
+struct flow_dissector_key_tags {
+ u32 vlan_id:12,
+ flow_label:20;
+};
+
+struct flow_dissector_key_keyid {
+ __be32 keyid;
+};
+
+/**
+ * struct flow_dissector_key_ipv4_addrs:
+ * @src: source ip address
+ * @dst: destination ip address
+ */
+struct flow_dissector_key_ipv4_addrs {
+ /* (src,dst) must be grouped, in the same way than in IP header */
+ __be32 src;
+ __be32 dst;
+};
+
+/**
+ * struct flow_dissector_key_ipv6_addrs:
+ * @src: source ip address
+ * @dst: destination ip address
+ */
+struct flow_dissector_key_ipv6_addrs {
+ /* (src,dst) must be grouped, in the same way than in IP header */
+ struct in6_addr src;
+ struct in6_addr dst;
+};
+
+/**
+ * struct flow_dissector_key_tipc_addrs:
+ * @srcnode: source node address
+ */
+struct flow_dissector_key_tipc_addrs {
+ __be32 srcnode;
+};
+
+/**
+ * struct flow_dissector_key_addrs:
+ * @v4addrs: IPv4 addresses
+ * @v6addrs: IPv6 addresses
+ */
+struct flow_dissector_key_addrs {
+ union {
+ struct flow_dissector_key_ipv4_addrs v4addrs;
+ struct flow_dissector_key_ipv6_addrs v6addrs;
+ struct flow_dissector_key_tipc_addrs tipcaddrs;
+ };
+};
+
+/**
+ * flow_dissector_key_tp_ports:
+ * @ports: port numbers of Transport header
+ * src: source port number
+ * dst: destination port number
+ */
+struct flow_dissector_key_ports {
+ union {
+ __be32 ports;
+ struct {
+ __be16 src;
+ __be16 dst;
+ };
+ };
+};
+
+
+/**
+ * struct flow_dissector_key_eth_addrs:
+ * @src: source Ethernet address
+ * @dst: destination Ethernet address
+ */
+struct flow_dissector_key_eth_addrs {
+ /* (dst,src) must be grouped, in the same way than in ETH header */
+ unsigned char dst[ETH_ALEN];
+ unsigned char src[ETH_ALEN];
+};
+
+enum flow_dissector_key_id {
+ FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */
+ FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS, /* struct flow_dissector_key_ipv4_addrs */
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */
+ FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */
+ FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */
+ FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */
+ FLOW_DISSECTOR_KEY_VLANID, /* struct flow_dissector_key_flow_tags */
+ FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */
+ FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */
+ FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */
+
+ FLOW_DISSECTOR_KEY_MAX,
+};
+
+struct flow_dissector_key {
+ enum flow_dissector_key_id key_id;
+ size_t offset; /* offset of struct flow_dissector_key_*
+ in target the struct */
+};
+
+struct flow_dissector {
+ unsigned int used_keys; /* each bit repesents presence of one key id */
+ unsigned short int offset[FLOW_DISSECTOR_KEY_MAX];
+};
+
+void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
+ const struct flow_dissector_key *key,
+ unsigned int key_count);
+
+bool __skb_flow_dissect(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container,
+ void *data, __be16 proto, int nhoff, int hlen);
+
+static inline bool skb_flow_dissect(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container)
+{
+ return __skb_flow_dissect(skb, flow_dissector, target_container,
+ NULL, 0, 0, 0);
+}
+
+struct flow_keys {
+ struct flow_dissector_key_control control;
+#define FLOW_KEYS_HASH_START_FIELD basic
+ struct flow_dissector_key_basic basic;
+ struct flow_dissector_key_tags tags;
+ struct flow_dissector_key_keyid keyid;
+ struct flow_dissector_key_ports ports;
+ struct flow_dissector_key_addrs addrs;
+};
+
+#define FLOW_KEYS_HASH_OFFSET \
+ offsetof(struct flow_keys, FLOW_KEYS_HASH_START_FIELD)
+
+__be32 flow_get_u32_src(const struct flow_keys *flow);
+__be32 flow_get_u32_dst(const struct flow_keys *flow);
+
+extern struct flow_dissector flow_keys_dissector;
+extern struct flow_dissector flow_keys_buf_dissector;
+
+static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
+ struct flow_keys *flow)
+{
+ memset(flow, 0, sizeof(*flow));
+ return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
+ NULL, 0, 0, 0);
+}
+
+static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
+ void *data, __be16 proto,
+ int nhoff, int hlen)
+{
+ memset(flow, 0, sizeof(*flow));
+ return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
+ data, proto, nhoff, hlen);
+}
+
+__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
+ void *data, int hlen_proto);
+
+static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
+ int thoff, u8 ip_proto)
+{
+ return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
+}
+
+u32 flow_hash_from_keys(struct flow_keys *keys);
+void __skb_get_hash(struct sk_buff *skb);
+u32 skb_get_poff(const struct sk_buff *skb);
+u32 __skb_get_poff(const struct sk_buff *skb, void *data,
+ const struct flow_keys *keys, int hlen);
+
+/* struct flow_keys_digest:
+ *
+ * This structure is used to hold a digest of the full flow keys. This is a
+ * larger "hash" of a flow to allow definitively matching specific flows where
+ * the 32 bit skb->hash is not large enough. The size is limited to 16 bytes so
+ * that it can by used in CB of skb (see sch_choke for an example).
+ */
+#define FLOW_KEYS_DIGEST_LEN 16
+struct flow_keys_digest {
+ u8 data[FLOW_KEYS_DIGEST_LEN];
+};
+
+void make_flow_keys_digest(struct flow_keys_digest *digest,
+ const struct flow_keys *flow);
+
+#endif
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
deleted file mode 100644
index 7ee2df0..0000000
--- a/include/net/flow_keys.h
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef _NET_FLOW_KEYS_H
-#define _NET_FLOW_KEYS_H
-
-/* struct flow_keys:
- * @src: source ip address in case of IPv4
- * For IPv6 it contains 32bit hash of src address
- * @dst: destination ip address in case of IPv4
- * For IPv6 it contains 32bit hash of dst address
- * @ports: port numbers of Transport header
- * port16[0]: src port number
- * port16[1]: dst port number
- * @thoff: Transport header offset
- * @n_proto: Network header protocol (eg. IPv4/IPv6)
- * @ip_proto: Transport header protocol (eg. TCP/UDP)
- * All the members, except thoff, are in network byte order.
- */
-struct flow_keys {
- /* (src,dst) must be grouped, in the same way than in IP header */
- __be32 src;
- __be32 dst;
- union {
- __be32 ports;
- __be16 port16[2];
- };
- u16 thoff;
- u16 n_proto;
- u8 ip_proto;
-};
-
-bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
- void *data, __be16 proto, int nhoff, int hlen);
-static inline bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
-{
- return __skb_flow_dissect(skb, flow, NULL, 0, 0, 0);
-}
-__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
- void *data, int hlen_proto);
-static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
-{
- return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
-}
-u32 flow_hash_from_keys(struct flow_keys *keys);
-unsigned int flow_get_hlen(const unsigned char *data, unsigned int max_len,
- __be16 protocol);
-#endif
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 6c92415..a9af1cc 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -92,9 +92,7 @@ struct genl_info {
struct genlmsghdr * genlhdr;
void * userhdr;
struct nlattr ** attrs;
-#ifdef CONFIG_NET_NS
- struct net * _net;
-#endif
+ possible_net_t _net;
void * user_ptr[2];
struct sock * dst_sk;
};
@@ -211,6 +209,23 @@ static inline struct nlmsghdr *genlmsg_nlhdr(void *user_hdr,
}
/**
+ * genlmsg_parse - parse attributes of a genetlink message
+ * @nlh: netlink message header
+ * @family: genetlink message family
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ * */
+static inline int genlmsg_parse(const struct nlmsghdr *nlh,
+ const struct genl_family *family,
+ struct nlattr *tb[], int maxtype,
+ const struct nla_policy *policy)
+{
+ return nlmsg_parse(nlh, family->hdrsize + GENL_HDRLEN, tb, maxtype,
+ policy);
+}
+
+/**
* genl_dump_check_consistent - check if sequence is consistent and advertise if not
* @cb: netlink callback structure that stores the sequence number
* @user_hdr: user header as returned from genlmsg_put()
@@ -250,9 +265,9 @@ static inline void *genlmsg_put_reply(struct sk_buff *skb,
* @skb: socket buffer the message is stored in
* @hdr: user specific header
*/
-static inline int genlmsg_end(struct sk_buff *skb, void *hdr)
+static inline void genlmsg_end(struct sk_buff *skb, void *hdr)
{
- return nlmsg_end(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
+ nlmsg_end(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
}
/**
diff --git a/include/net/geneve.h b/include/net/geneve.h
index 112132c..2a0543a 100644
--- a/include/net/geneve.h
+++ b/include/net/geneve.h
@@ -62,19 +62,23 @@ struct genevehdr {
struct geneve_opt options[];
};
+static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
+{
+ return (struct genevehdr *)(udp_hdr(skb) + 1);
+}
+
#ifdef CONFIG_INET
struct geneve_sock;
typedef void (geneve_rcv_t)(struct geneve_sock *gs, struct sk_buff *skb);
struct geneve_sock {
- struct hlist_node hlist;
+ struct list_head list;
geneve_rcv_t *rcv;
void *rcv_data;
- struct work_struct del_work;
struct socket *sock;
struct rcu_head rcu;
- atomic_t refcnt;
+ int refcnt;
struct udp_offload udp_offloads;
};
@@ -91,7 +95,7 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
__u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
__be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
- bool xnet);
+ bool csum, bool xnet);
#endif /*ifdef CONFIG_INET */
#endif /*ifdef__NET_GENEVE_H */
diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
index 734d9b5..0f712c0 100644
--- a/include/net/gro_cells.h
+++ b/include/net/gro_cells.h
@@ -8,25 +8,23 @@
struct gro_cell {
struct sk_buff_head napi_skbs;
struct napi_struct napi;
-} ____cacheline_aligned_in_smp;
+};
struct gro_cells {
- unsigned int gro_cells_mask;
- struct gro_cell *cells;
+ struct gro_cell __percpu *cells;
};
static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
{
- struct gro_cell *cell = gcells->cells;
+ struct gro_cell *cell;
struct net_device *dev = skb->dev;
- if (!cell || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) {
+ if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) {
netif_rx(skb);
return;
}
- if (skb_rx_queue_recorded(skb))
- cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
+ cell = this_cpu_ptr(gcells->cells);
if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
atomic_long_inc(&dev->rx_dropped);
@@ -72,15 +70,12 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
{
int i;
- gcells->gro_cells_mask = roundup_pow_of_two(netif_get_num_default_rss_queues()) - 1;
- gcells->cells = kcalloc(gcells->gro_cells_mask + 1,
- sizeof(struct gro_cell),
- GFP_KERNEL);
+ gcells->cells = alloc_percpu(struct gro_cell);
if (!gcells->cells)
return -ENOMEM;
- for (i = 0; i <= gcells->gro_cells_mask; i++) {
- struct gro_cell *cell = gcells->cells + i;
+ for_each_possible_cpu(i) {
+ struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
skb_queue_head_init(&cell->napi_skbs);
netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
@@ -91,16 +86,16 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
static inline void gro_cells_destroy(struct gro_cells *gcells)
{
- struct gro_cell *cell = gcells->cells;
int i;
- if (!cell)
+ if (!gcells->cells)
return;
- for (i = 0; i <= gcells->gro_cells_mask; i++,cell++) {
+ for_each_possible_cpu(i) {
+ struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
netif_napi_del(&cell->napi);
skb_queue_purge(&cell->napi_skbs);
}
- kfree(gcells->cells);
+ free_percpu(gcells->cells);
gcells->cells = NULL;
}
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index 83bb8a73..2c10a9f 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -28,6 +28,8 @@
#include <linux/skbuff.h>
#include <linux/ieee802154.h>
+#include <net/cfg802154.h>
+
struct ieee802154_sechdr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 level:3,
@@ -337,22 +339,22 @@ struct ieee802154_mac_params {
s8 frame_retries;
bool lbt;
- u8 cca_mode;
+ struct wpan_phy_cca cca;
s32 cca_ed_level;
};
struct wpan_phy;
enum {
- IEEE802154_LLSEC_PARAM_ENABLED = 1 << 0,
- IEEE802154_LLSEC_PARAM_FRAME_COUNTER = 1 << 1,
- IEEE802154_LLSEC_PARAM_OUT_LEVEL = 1 << 2,
- IEEE802154_LLSEC_PARAM_OUT_KEY = 1 << 3,
- IEEE802154_LLSEC_PARAM_KEY_SOURCE = 1 << 4,
- IEEE802154_LLSEC_PARAM_PAN_ID = 1 << 5,
- IEEE802154_LLSEC_PARAM_HWADDR = 1 << 6,
- IEEE802154_LLSEC_PARAM_COORD_HWADDR = 1 << 7,
- IEEE802154_LLSEC_PARAM_COORD_SHORTADDR = 1 << 8,
+ IEEE802154_LLSEC_PARAM_ENABLED = BIT(0),
+ IEEE802154_LLSEC_PARAM_FRAME_COUNTER = BIT(1),
+ IEEE802154_LLSEC_PARAM_OUT_LEVEL = BIT(2),
+ IEEE802154_LLSEC_PARAM_OUT_KEY = BIT(3),
+ IEEE802154_LLSEC_PARAM_KEY_SOURCE = BIT(4),
+ IEEE802154_LLSEC_PARAM_PAN_ID = BIT(5),
+ IEEE802154_LLSEC_PARAM_HWADDR = BIT(6),
+ IEEE802154_LLSEC_PARAM_COORD_HWADDR = BIT(7),
+ IEEE802154_LLSEC_PARAM_COORD_SHORTADDR = BIT(8),
};
struct ieee802154_llsec_ops {
@@ -420,16 +422,6 @@ struct ieee802154_mlme_ops {
struct ieee802154_mac_params *params);
struct ieee802154_llsec_ops *llsec;
-
- /* The fields below are required. */
-
- /*
- * FIXME: these should become the part of PIB/MIB interface.
- * However we still don't have IB interface of any kind
- */
- __le16 (*get_pan_id)(const struct net_device *dev);
- __le16 (*get_short_addr)(const struct net_device *dev);
- u8 (*get_dsn)(const struct net_device *dev);
};
static inline struct ieee802154_mlme_ops *
@@ -438,10 +430,4 @@ ieee802154_mlme_ops(const struct net_device *dev)
return dev->ml_priv;
}
-static inline struct ieee802154_reduced_mlme_ops *
-ieee802154_reduced_mlme_ops(const struct net_device *dev)
-{
- return dev->ml_priv;
-}
-
#endif
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 98e5f95..1c8b682 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -41,18 +41,18 @@ enum {
struct inet6_ifaddr {
struct in6_addr addr;
__u32 prefix_len;
-
+
/* In seconds, relative to tstamp. Expiry is at tstamp + HZ * lft. */
__u32 valid_lft;
__u32 prefered_lft;
atomic_t refcnt;
spinlock_t lock;
- spinlock_t state_lock;
int state;
__u32 flags;
__u8 dad_probes;
+ __u8 stable_privacy_retry;
__u16 scope;
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 74af137..6d539e4 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -28,8 +28,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
struct dst_entry *inet6_csk_route_req(struct sock *sk, struct flowi6 *fl6,
const struct request_sock *req);
-struct request_sock *inet6_csk_search_req(const struct sock *sk,
- struct request_sock ***prevp,
+struct request_sock *inet6_csk_search_req(struct sock *sk,
const __be16 rport,
const struct in6_addr *raddr,
const struct in6_addr *laddr,
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index 9201afe0..7ff588c 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -38,8 +38,6 @@ static inline unsigned int __inet6_ehashfn(const u32 lhash,
return jhash_3words(lhash, fhash, ports, initval);
}
-int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp);
-
/*
* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
* we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index b2828a0..279f835 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -21,12 +21,11 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags);
int inet_accept(struct socket *sock, struct socket *newsock, int flags);
-int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
- size_t size);
+int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size);
ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags);
-int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
- size_t size, int flags);
+int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags);
int inet_shutdown(struct socket *sock, int how);
int inet_listen(struct socket *sock, int backlog);
void inet_sock_destruct(struct sock *sk);
@@ -42,7 +41,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
static inline void inet_ctl_sock_destroy(struct sock *sk)
{
- sk_release_kernel(sk);
+ sock_release(sk->sk_socket);
}
#endif
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 848e85c..0320bbb 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -98,7 +98,9 @@ struct inet_connection_sock {
const struct tcp_congestion_ops *icsk_ca_ops;
const struct inet_connection_sock_af_ops *icsk_af_ops;
unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
- __u8 icsk_ca_state;
+ __u8 icsk_ca_state:6,
+ icsk_ca_setsockopt:1,
+ icsk_ca_dst_locked:1;
__u8 icsk_retransmits;
__u8 icsk_pending;
__u8 icsk_backoff;
@@ -125,10 +127,13 @@ struct inet_connection_sock {
/* Information on the current probe. */
int probe_size;
+
+ u32 probe_timestamp;
} icsk_mtup;
- u32 icsk_ca_priv[16];
u32 icsk_user_timeout;
-#define ICSK_CA_PRIV_SIZE (16 * sizeof(u32))
+
+ u64 icsk_ca_priv[64 / sizeof(u64)];
+#define ICSK_CA_PRIV_SIZE (8 * sizeof(u64))
};
#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
@@ -253,8 +258,7 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
-struct request_sock *inet_csk_search_req(const struct sock *sk,
- struct request_sock ***prevp,
+struct request_sock *inet_csk_search_req(struct sock *sk,
const __be16 rport,
const __be32 raddr,
const __be32 laddr);
@@ -277,18 +281,10 @@ static inline void inet_csk_reqsk_queue_add(struct sock *sk,
void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
unsigned long timeout);
-static inline void inet_csk_reqsk_queue_removed(struct sock *sk,
- struct request_sock *req)
-{
- if (reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req) == 0)
- inet_csk_delete_keepalive_timer(sk);
-}
-
static inline void inet_csk_reqsk_queue_added(struct sock *sk,
const unsigned long timeout)
{
- if (reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue) == 0)
- inet_csk_reset_keepalive_timer(sk, timeout);
+ reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue);
}
static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
@@ -306,26 +302,7 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue);
}
-static inline void inet_csk_reqsk_queue_unlink(struct sock *sk,
- struct request_sock *req,
- struct request_sock **prev)
-{
- reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req, prev);
-}
-
-static inline void inet_csk_reqsk_queue_drop(struct sock *sk,
- struct request_sock *req,
- struct request_sock **prev)
-{
- inet_csk_reqsk_queue_unlink(sk, req, prev);
- inet_csk_reqsk_queue_removed(sk, req);
- reqsk_free(req);
-}
-
-void inet_csk_reqsk_queue_prune(struct sock *parent,
- const unsigned long interval,
- const unsigned long timeout,
- const unsigned long max_rto);
+void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
void inet_csk_destroy_sock(struct sock *sk);
void inet_csk_prepare_forced_close(struct sock *sk);
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 8d17655..e1300b3 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -43,7 +43,7 @@ enum {
* @len: total length of the original datagram
* @meat: length of received fragments so far
* @flags: fragment queue flags
- * @max_size: (ipv4 only) maximum received fragment size with IP_DF set
+ * @max_size: maximum received fragment size
* @net: namespace that this frag belongs to
*/
struct inet_frag_queue {
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index dd1950a..b73c88a 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -24,7 +24,6 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/wait.h>
-#include <linux/vmalloc.h>
#include <net/inet_connection_sock.h>
#include <net/inet_sock.h>
@@ -76,9 +75,7 @@ struct inet_ehash_bucket {
* ports are created in O(1) time? I thought so. ;-) -DaveM
*/
struct inet_bind_bucket {
-#ifdef CONFIG_NET_NS
- struct net *ib_net;
-#endif
+ possible_net_t ib_net;
unsigned short port;
signed char fastreuse;
signed char fastreuseport;
@@ -150,8 +147,6 @@ struct inet_hashinfo {
*/
struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE]
____cacheline_aligned_in_smp;
-
- atomic_t bsockets;
};
static inline struct inet_ehash_bucket *inet_ehash_bucket(
@@ -168,52 +163,12 @@ static inline spinlock_t *inet_ehash_lockp(
return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
}
-static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
-{
- unsigned int i, size = 256;
-#if defined(CONFIG_PROVE_LOCKING)
- unsigned int nr_pcpus = 2;
-#else
- unsigned int nr_pcpus = num_possible_cpus();
-#endif
- if (nr_pcpus >= 4)
- size = 512;
- if (nr_pcpus >= 8)
- size = 1024;
- if (nr_pcpus >= 16)
- size = 2048;
- if (nr_pcpus >= 32)
- size = 4096;
- if (sizeof(spinlock_t) != 0) {
-#ifdef CONFIG_NUMA
- if (size * sizeof(spinlock_t) > PAGE_SIZE)
- hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t));
- else
-#endif
- hashinfo->ehash_locks = kmalloc(size * sizeof(spinlock_t),
- GFP_KERNEL);
- if (!hashinfo->ehash_locks)
- return ENOMEM;
- for (i = 0; i < size; i++)
- spin_lock_init(&hashinfo->ehash_locks[i]);
- }
- hashinfo->ehash_locks_mask = size - 1;
- return 0;
-}
+int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
{
- if (hashinfo->ehash_locks) {
-#ifdef CONFIG_NUMA
- unsigned int size = (hashinfo->ehash_locks_mask + 1) *
- sizeof(spinlock_t);
- if (size > PAGE_SIZE)
- vfree(hashinfo->ehash_locks);
- else
-#endif
- kfree(hashinfo->ehash_locks);
- hashinfo->ehash_locks = NULL;
- }
+ kvfree(hashinfo->ehash_locks);
+ hashinfo->ehash_locks = NULL;
}
struct inet_bind_bucket *
@@ -223,8 +178,8 @@ inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
void inet_bind_bucket_destroy(struct kmem_cache *cachep,
struct inet_bind_bucket *tb);
-static inline int inet_bhashfn(struct net *net, const __u16 lport,
- const int bhash_size)
+static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
+ const u32 bhash_size)
{
return (lport + net_hash_mix(net)) & (bhash_size - 1);
}
@@ -233,7 +188,7 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
const unsigned short snum);
/* These can have wildcards, don't try too hard. */
-static inline int inet_lhashfn(struct net *net, const unsigned short num)
+static inline u32 inet_lhashfn(const struct net *net, const unsigned short num)
{
return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1);
}
@@ -251,6 +206,7 @@ void inet_put_port(struct sock *sk);
void inet_hashinfo_init(struct inet_hashinfo *h);
int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
+int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw);
void inet_hash(struct sock *sk);
void inet_unhash(struct sock *sk);
@@ -385,13 +341,32 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
iph->daddr, dport, inet_iif(skb));
}
+u32 sk_ehashfn(const struct sock *sk);
+u32 inet6_ehashfn(const struct net *net,
+ const struct in6_addr *laddr, const u16 lport,
+ const struct in6_addr *faddr, const __be16 fport);
+
+static inline void sk_daddr_set(struct sock *sk, __be32 addr)
+{
+ sk->sk_daddr = addr; /* alias of inet_daddr */
+#if IS_ENABLED(CONFIG_IPV6)
+ ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr);
+#endif
+}
+
+static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
+{
+ sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */
+#if IS_ENABLED(CONFIG_IPV6)
+ ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr);
+#endif
+}
+
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk, u32 port_offset,
int (*check_established)(struct inet_timewait_death_row *,
struct sock *, __u16,
- struct inet_timewait_sock **),
- int (*hash)(struct sock *sk,
- struct inet_timewait_sock *twp));
+ struct inet_timewait_sock **));
int inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk);
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index a829b77..47eb67b 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -16,7 +16,7 @@
#ifndef _INET_SOCK_H
#define _INET_SOCK_H
-
+#include <linux/bitops.h>
#include <linux/kmemcheck.h>
#include <linux/string.h>
#include <linux/types.h>
@@ -27,6 +27,7 @@
#include <net/sock.h>
#include <net/request_sock.h>
#include <net/netns/hash.h>
+#include <net/tcp_states.h>
/** struct ip_options - IP Options
*
@@ -77,6 +78,10 @@ struct inet_request_sock {
#define ir_v6_rmt_addr req.__req_common.skc_v6_daddr
#define ir_v6_loc_addr req.__req_common.skc_v6_rcv_saddr
#define ir_iif req.__req_common.skc_bound_dev_if
+#define ir_cookie req.__req_common.skc_cookie
+#define ireq_net req.__req_common.skc_net
+#define ireq_state req.__req_common.skc_state
+#define ireq_family req.__req_common.skc_family
kmemcheck_bitfield_begin(flags);
u16 snd_wscale : 4,
@@ -88,11 +93,11 @@ struct inet_request_sock {
acked : 1,
no_srccheck: 1;
kmemcheck_bitfield_end(flags);
+ u32 ir_mark;
union {
struct ip_options_rcu *opt;
struct sk_buff *pktopts;
};
- u32 ir_mark;
};
static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
@@ -100,13 +105,12 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
return (struct inet_request_sock *)sk;
}
-static inline u32 inet_request_mark(struct sock *sk, struct sk_buff *skb)
+static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb)
{
- if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept) {
+ if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)
return skb->mark;
- } else {
- return sk->sk_mark;
- }
+
+ return sk->sk_mark;
}
struct inet_cork {
@@ -183,7 +187,9 @@ struct inet_sock {
transparent:1,
mc_all:1,
nodefrag:1;
+ __u8 bind_address_no_port:1;
__u8 rcv_tos;
+ __u8 convert_csum;
int uc_index;
int mc_index;
__be32 mc_addr;
@@ -194,6 +200,16 @@ struct inet_sock {
#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */
#define IPCORK_ALLFRAG 2 /* always fragment (for ipv6 for now) */
+/* cmsg flags for inet */
+#define IP_CMSG_PKTINFO BIT(0)
+#define IP_CMSG_TTL BIT(1)
+#define IP_CMSG_TOS BIT(2)
+#define IP_CMSG_RECVOPTS BIT(3)
+#define IP_CMSG_RETOPTS BIT(4)
+#define IP_CMSG_PASSSEC BIT(5)
+#define IP_CMSG_ORIGDSTADDR BIT(6)
+#define IP_CMSG_CHECKSUM BIT(7)
+
static inline struct inet_sock *inet_sk(const struct sock *sk)
{
return (struct inet_sock *)sk;
@@ -228,18 +244,8 @@ static inline unsigned int __inet_ehashfn(const __be32 laddr,
initval);
}
-static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops)
-{
- struct request_sock *req = reqsk_alloc(ops);
- struct inet_request_sock *ireq = inet_rsk(req);
-
- if (req != NULL) {
- kmemcheck_annotate_bitfield(ireq, flags);
- ireq->opt = NULL;
- }
-
- return req;
-}
+struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
+ struct sock *sk_listener);
static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
{
@@ -250,4 +256,20 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
return flags;
}
+static inline void inet_inc_convert_csum(struct sock *sk)
+{
+ inet_sk(sk)->convert_csum++;
+}
+
+static inline void inet_dec_convert_csum(struct sock *sk)
+{
+ if (inet_sk(sk)->convert_csum > 0)
+ inet_sk(sk)->convert_csum--;
+}
+
+static inline bool inet_get_convert_csum(struct sock *sk)
+{
+ return !!inet_sk(sk)->convert_csum;
+}
+
#endif /* _INET_SOCK_H */
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 6c56603..360c480 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -31,67 +31,14 @@
struct inet_hashinfo;
-#define INET_TWDR_RECYCLE_SLOTS_LOG 5
-#define INET_TWDR_RECYCLE_SLOTS (1 << INET_TWDR_RECYCLE_SLOTS_LOG)
-
-/*
- * If time > 4sec, it is "slow" path, no recycling is required,
- * so that we select tick to get range about 4 seconds.
- */
-#if HZ <= 16 || HZ > 4096
-# error Unsupported: HZ <= 16 or HZ > 4096
-#elif HZ <= 32
-# define INET_TWDR_RECYCLE_TICK (5 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#elif HZ <= 64
-# define INET_TWDR_RECYCLE_TICK (6 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#elif HZ <= 128
-# define INET_TWDR_RECYCLE_TICK (7 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#elif HZ <= 256
-# define INET_TWDR_RECYCLE_TICK (8 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#elif HZ <= 512
-# define INET_TWDR_RECYCLE_TICK (9 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#elif HZ <= 1024
-# define INET_TWDR_RECYCLE_TICK (10 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#elif HZ <= 2048
-# define INET_TWDR_RECYCLE_TICK (11 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#else
-# define INET_TWDR_RECYCLE_TICK (12 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
-#endif
-
-static inline u32 inet_tw_time_stamp(void)
-{
- return jiffies;
-}
-
-/* TIME_WAIT reaping mechanism. */
-#define INET_TWDR_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
-
-#define INET_TWDR_TWKILL_QUOTA 100
-
struct inet_timewait_death_row {
- /* Short-time timewait calendar */
- int twcal_hand;
- unsigned long twcal_jiffie;
- struct timer_list twcal_timer;
- struct hlist_head twcal_row[INET_TWDR_RECYCLE_SLOTS];
-
- spinlock_t death_lock;
- int tw_count;
- int period;
- u32 thread_slots;
- struct work_struct twkill_work;
- struct timer_list tw_timer;
- int slot;
- struct hlist_head cells[INET_TWDR_TWKILL_SLOTS];
- struct inet_hashinfo *hashinfo;
+ atomic_t tw_count;
+
+ struct inet_hashinfo *hashinfo ____cacheline_aligned_in_smp;
int sysctl_tw_recycle;
int sysctl_max_tw_buckets;
};
-void inet_twdr_hangman(unsigned long data);
-void inet_twdr_twkill_work(struct work_struct *work);
-void inet_twdr_twcal_tick(unsigned long data);
-
struct inet_bind_bucket;
/*
@@ -122,6 +69,7 @@ struct inet_timewait_sock {
#define tw_v6_rcv_saddr __tw_common.skc_v6_rcv_saddr
#define tw_dport __tw_common.skc_dport
#define tw_num __tw_common.skc_num
+#define tw_cookie __tw_common.skc_cookie
int tw_timeout;
volatile unsigned char tw_substate;
@@ -132,52 +80,18 @@ struct inet_timewait_sock {
__be16 tw_sport;
kmemcheck_bitfield_begin(flags);
/* And these are ours. */
- unsigned int tw_pad0 : 1, /* 1 bit hole */
+ unsigned int tw_kill : 1,
tw_transparent : 1,
tw_flowlabel : 20,
tw_pad : 2, /* 2 bits hole */
tw_tos : 8;
kmemcheck_bitfield_end(flags);
- u32 tw_ttd;
+ struct timer_list tw_timer;
struct inet_bind_bucket *tw_tb;
- struct hlist_node tw_death_node;
+ struct inet_timewait_death_row *tw_dr;
};
#define tw_tclass tw_tos
-static inline int inet_twsk_dead_hashed(const struct inet_timewait_sock *tw)
-{
- return !hlist_unhashed(&tw->tw_death_node);
-}
-
-static inline void inet_twsk_dead_node_init(struct inet_timewait_sock *tw)
-{
- tw->tw_death_node.pprev = NULL;
-}
-
-static inline void __inet_twsk_del_dead_node(struct inet_timewait_sock *tw)
-{
- __hlist_del(&tw->tw_death_node);
- inet_twsk_dead_node_init(tw);
-}
-
-static inline int inet_twsk_del_dead_node(struct inet_timewait_sock *tw)
-{
- if (inet_twsk_dead_hashed(tw)) {
- __inet_twsk_del_dead_node(tw);
- return 1;
- }
- return 0;
-}
-
-#define inet_twsk_for_each(tw, node, head) \
- hlist_nulls_for_each_entry(tw, node, head, tw_node)
-
-#define inet_twsk_for_each_inmate(tw, jail) \
- hlist_for_each_entry(tw, jail, tw_death_node)
-
-#define inet_twsk_for_each_inmate_safe(tw, safe, jail) \
- hlist_for_each_entry_safe(tw, safe, jail, tw_death_node)
-
static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
{
return (struct inet_timewait_sock *)sk;
@@ -192,16 +106,14 @@ int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
struct inet_hashinfo *hashinfo);
struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
+ struct inet_timewait_death_row *dr,
const int state);
void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
struct inet_hashinfo *hashinfo);
-void inet_twsk_schedule(struct inet_timewait_sock *tw,
- struct inet_timewait_death_row *twdr,
- const int timeo, const int timewait_len);
-void inet_twsk_deschedule(struct inet_timewait_sock *tw,
- struct inet_timewait_death_row *twdr);
+void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo);
+void inet_twsk_deschedule(struct inet_timewait_sock *tw);
void inet_twsk_purge(struct inet_hashinfo *hashinfo,
struct inet_timewait_death_row *twdr, int family);
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 80479ab..d5332dd 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -19,6 +19,7 @@ struct inetpeer_addr_base {
union {
__be32 a4;
__be32 a6[4];
+ struct in6_addr in6;
};
};
@@ -151,7 +152,7 @@ static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
{
struct inetpeer_addr daddr;
- *(struct in6_addr *)daddr.addr.a6 = *v6daddr;
+ daddr.addr.in6 = *v6daddr;
daddr.family = AF_INET6;
return inet_getpeer(base, &daddr, create);
}
diff --git a/include/net/ip.h b/include/net/ip.h
index f7cbd70..0750a18 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -31,7 +31,7 @@
#include <net/route.h>
#include <net/snmp.h>
#include <net/flow.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
struct sock;
@@ -45,6 +45,7 @@ struct inet_skb_parm {
#define IPSKB_FRAG_COMPLETE BIT(3)
#define IPSKB_REROUTED BIT(4)
#define IPSKB_DOREDIRECT BIT(5)
+#define IPSKB_FRAG_PMTU BIT(6)
u16 frag_max_size;
};
@@ -108,8 +109,8 @@ int ip_local_deliver(struct sk_buff *skb);
int ip_mr_input(struct sk_buff *skb);
int ip_output(struct sock *sk, struct sk_buff *skb);
int ip_mc_output(struct sock *sk, struct sk_buff *skb);
-int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
-int ip_do_nat(struct sk_buff *skb);
+int ip_do_fragment(struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct sock *, struct sk_buff *));
void ip_send_check(struct iphdr *ip);
int __ip_local_out(struct sk_buff *skb);
int ip_local_out_sk(struct sock *sk, struct sk_buff *skb);
@@ -181,7 +182,7 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
}
-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
+void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
const struct ip_options *sopt,
__be32 daddr, __be32 saddr,
const struct ip_reply_arg *arg,
@@ -318,9 +319,10 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
}
u32 ip_idents_reserve(u32 hash, int segs);
-void __ip_select_ident(struct iphdr *iph, int segs);
+void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
-static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
+static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
+ struct sock *sk, int segs)
{
struct iphdr *iph = ip_hdr(skb);
@@ -337,13 +339,14 @@ static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, in
iph->id = 0;
}
} else {
- __ip_select_ident(iph, segs);
+ __ip_select_ident(net, iph, segs);
}
}
-static inline void ip_select_ident(struct sk_buff *skb, struct sock *sk)
+static inline void ip_select_ident(struct net *net, struct sk_buff *skb,
+ struct sock *sk)
{
- ip_select_ident_segs(skb, sk, 1);
+ ip_select_ident_segs(net, skb, sk, 1);
}
static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
@@ -352,15 +355,32 @@ static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
skb->len, proto, 0);
}
+/* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
+ * Equivalent to : flow->v4addrs.src = iph->saddr;
+ * flow->v4addrs.dst = iph->daddr;
+ */
+static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
+ const struct iphdr *iph)
+{
+ BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
+ offsetof(typeof(flow->addrs), v4addrs.src) +
+ sizeof(flow->addrs.v4addrs.src));
+ memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
+ flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+}
+
static inline void inet_set_txhash(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
struct flow_keys keys;
- keys.src = inet->inet_saddr;
- keys.dst = inet->inet_daddr;
- keys.port16[0] = inet->inet_sport;
- keys.port16[1] = inet->inet_dport;
+ memset(&keys, 0, sizeof(keys));
+
+ keys.addrs.v4addrs.src = inet->inet_saddr;
+ keys.addrs.v4addrs.dst = inet->inet_daddr;
+ keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ keys.ports.src = inet->inet_sport;
+ keys.ports.dst = inet->inet_dport;
sk->sk_txhash = flow_hash_from_keys(&keys);
}
@@ -453,22 +473,6 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
#endif
-static inline int sk_mc_loop(struct sock *sk)
-{
- if (!sk)
- return 1;
- switch (sk->sk_family) {
- case AF_INET:
- return inet_sk(sk)->mc_loop;
-#if IS_ENABLED(CONFIG_IPV6)
- case AF_INET6:
- return inet6_sk(sk)->mc_loop;
-#endif
- }
- WARN_ON(1);
- return 1;
-}
-
bool ip_call_ra_chain(struct sk_buff *skb);
/*
@@ -491,6 +495,16 @@ enum ip_defrag_users {
IP_DEFRAG_MACVLAN,
};
+/* Return true if the value of 'user' is between 'lower_bond'
+ * and 'upper_bond' inclusively.
+ */
+static inline bool ip_defrag_user_in_between(u32 user,
+ enum ip_defrag_users lower_bond,
+ enum ip_defrag_users upper_bond)
+{
+ return user >= lower_bond && user <= upper_bond;
+}
+
int ip_defrag(struct sk_buff *skb, u32 user);
#ifdef CONFIG_INET
struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user);
@@ -538,7 +552,7 @@ int ip_options_rcv_srr(struct sk_buff *skb);
*/
void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
-void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
+void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset);
int ip_cmsg_send(struct net *net, struct msghdr *msg,
struct ipcm_cookie *ipc, bool allow_ipv6);
int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
@@ -558,6 +572,11 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
u32 info);
+static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
+{
+ ip_cmsg_recv_offset(msg, skb, 0);
+}
+
bool icmp_global_allow(void);
extern int sysctl_icmp_msgs_per_sec;
extern int sysctl_icmp_msgs_burst;
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 8eea35d..3b76849 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -74,6 +74,11 @@ struct fib6_node {
#define FIB6_SUBTREE(fn) ((fn)->subtree)
#endif
+struct mx6_config {
+ const u32 *mx;
+ DECLARE_BITMAP(mx_valid, RTAX_MAX);
+};
+
/*
* routing information
*
@@ -115,45 +120,19 @@ struct rt6_info {
struct rt6key rt6i_src;
struct rt6key rt6i_prefsrc;
+ struct list_head rt6i_uncached;
+ struct uncached_list *rt6i_uncached_list;
+
struct inet6_dev *rt6i_idev;
- unsigned long _rt6i_peer;
+ struct rt6_info * __percpu *rt6i_pcpu;
u32 rt6i_metric;
+ u32 rt6i_pmtu;
/* more non-fragment space at head required */
unsigned short rt6i_nfheader_len;
u8 rt6i_protocol;
};
-static inline struct inet_peer *rt6_peer_ptr(struct rt6_info *rt)
-{
- return inetpeer_ptr(rt->_rt6i_peer);
-}
-
-static inline bool rt6_has_peer(struct rt6_info *rt)
-{
- return inetpeer_ptr_is_peer(rt->_rt6i_peer);
-}
-
-static inline void __rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer)
-{
- __inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer);
-}
-
-static inline bool rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer)
-{
- return inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer);
-}
-
-static inline void rt6_init_peer(struct rt6_info *rt, struct inet_peer_base *base)
-{
- inetpeer_init_ptr(&rt->_rt6i_peer, base);
-}
-
-static inline void rt6_transfer_peer(struct rt6_info *rt, struct rt6_info *ort)
-{
- inetpeer_transfer_peer(&rt->_rt6i_peer, &ort->_rt6i_peer);
-}
-
static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
{
return ((struct rt6_info *)dst)->rt6i_idev;
@@ -184,13 +163,12 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
rt0->rt6i_flags |= RTF_EXPIRES;
}
-static inline void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
+static inline u32 rt6_get_cookie(const struct rt6_info *rt)
{
- struct dst_entry *new = (struct dst_entry *) from;
+ if (rt->rt6i_flags & RTF_PCPU || unlikely(rt->dst.flags & DST_NOCACHE))
+ rt = (struct rt6_info *)(rt->dst.from);
- rt->rt6i_flags &= ~RTF_EXPIRES;
- dst_hold(new);
- rt->dst.from = new;
+ return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
}
static inline void ip6_rt_put(struct rt6_info *rt)
@@ -291,9 +269,8 @@ struct fib6_node *fib6_locate(struct fib6_node *root,
void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
void *arg);
-int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
- struct nlattr *mx, int mx_len);
-
+int fib6_add(struct fib6_node *root, struct rt6_info *rt,
+ struct nl_info *info, struct mx6_config *mxc);
int fib6_del(struct rt6_info *rt, struct nl_info *info);
void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 1d09b46..297629a 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -145,7 +145,7 @@ static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst,
#ifdef CONFIG_IPV6_SUBTREES
np->saddr_cache = saddr;
#endif
- np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
+ np->dst_cookie = rt6_get_cookie(rt);
}
static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
@@ -163,18 +163,23 @@ static inline bool ipv6_unicast_destination(const struct sk_buff *skb)
return rt->rt6i_flags & RTF_LOCAL;
}
-static inline bool ipv6_anycast_destination(const struct sk_buff *skb)
+static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
+ const struct in6_addr *daddr)
{
- struct rt6_info *rt = (struct rt6_info *) skb_dst(skb);
+ struct rt6_info *rt = (struct rt6_info *)dst;
- return rt->rt6i_flags & RTF_ANYCAST;
+ return rt->rt6i_flags & RTF_ANYCAST ||
+ (rt->rt6i_dst.plen != 128 &&
+ ipv6_addr_equal(&rt->rt6i_dst.addr, daddr));
}
-int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
+int ip6_fragment(struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct sock *, struct sk_buff *));
static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
{
- struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
+ struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
+ inet6_sk(skb->sk) : NULL;
return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ?
skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
@@ -192,9 +197,15 @@ static inline bool ip6_sk_ignore_df(const struct sock *sk)
inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT;
}
-static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt)
+static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt,
+ struct in6_addr *daddr)
{
- return &rt->rt6i_gateway;
+ if (rt->rt6i_flags & RTF_GATEWAY)
+ return &rt->rt6i_gateway;
+ else if (unlikely(rt->rt6i_flags & RTF_CACHE))
+ return &rt->rt6i_dst.addr;
+ else
+ return daddr;
}
#endif
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 9326c41..b8529aa 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -70,14 +70,17 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr);
+struct net *ip6_tnl_get_link_net(const struct net_device *dev);
+int ip6_tnl_get_iflink(const struct net_device *dev);
-static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
+ struct net_device *dev)
{
struct net_device_stats *stats = &dev->stats;
int pkt_len, err;
pkt_len = skb->len;
- err = ip6_local_out(skb);
+ err = ip6_local_out_sk(sk, skb);
if (net_xmit_eval(err) == 0) {
struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 09a819e..49c142b 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -136,7 +136,7 @@ struct fib_result {
u32 tclassid;
struct fib_info *fi;
struct fib_table *table;
- struct list_head *fa_head;
+ struct hlist_head *fa_head;
};
struct fib_result_nl {
@@ -185,7 +185,9 @@ struct fib_table {
u32 tb_id;
int tb_default;
int tb_num_default;
- unsigned long tb_data[0];
+ struct rcu_head rcu;
+ unsigned long *tb_data;
+ unsigned long __data[0];
};
int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
@@ -195,10 +197,10 @@ int fib_table_delete(struct fib_table *, struct fib_config *);
int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
struct netlink_callback *cb);
int fib_table_flush(struct fib_table *table);
+struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
+void fib_table_flush_external(struct fib_table *table);
void fib_free_table(struct fib_table *tb);
-
-
#ifndef CONFIG_IP_MULTIPLE_TABLES
#define TABLE_LOCAL_INDEX (RT_TABLE_LOCAL & (FIB_TABLE_HASHSZ - 1))
@@ -206,12 +208,16 @@ void fib_free_table(struct fib_table *tb);
static inline struct fib_table *fib_get_table(struct net *net, u32 id)
{
+ struct hlist_node *tb_hlist;
struct hlist_head *ptr;
ptr = id == RT_TABLE_LOCAL ?
&net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX] :
&net->ipv4.fib_table_hash[TABLE_MAIN_INDEX];
- return hlist_entry(ptr->first, struct fib_table, tb_hlist);
+
+ tb_hlist = rcu_dereference_rtnl(hlist_first_rcu(ptr));
+
+ return hlist_entry(tb_hlist, struct fib_table, tb_hlist);
}
static inline struct fib_table *fib_new_table(struct net *net, u32 id)
@@ -220,18 +226,20 @@ static inline struct fib_table *fib_new_table(struct net *net, u32 id)
}
static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
- struct fib_result *res)
+ struct fib_result *res, unsigned int flags)
{
- struct fib_table *table;
+ struct fib_table *tb;
+ int err = -ENETUNREACH;
+
+ rcu_read_lock();
+
+ tb = fib_get_table(net, RT_TABLE_MAIN);
+ if (tb && !fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF))
+ err = 0;
- table = fib_get_table(net, RT_TABLE_LOCAL);
- if (!fib_table_lookup(table, flp, res, FIB_LOOKUP_NOREF))
- return 0;
+ rcu_read_unlock();
- table = fib_get_table(net, RT_TABLE_MAIN);
- if (!fib_table_lookup(table, flp, res, FIB_LOOKUP_NOREF))
- return 0;
- return -ENETUNREACH;
+ return err;
}
#else /* CONFIG_IP_MULTIPLE_TABLES */
@@ -241,28 +249,36 @@ void __net_exit fib4_rules_exit(struct net *net);
struct fib_table *fib_new_table(struct net *net, u32 id);
struct fib_table *fib_get_table(struct net *net, u32 id);
-int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res);
+int __fib_lookup(struct net *net, struct flowi4 *flp,
+ struct fib_result *res, unsigned int flags);
static inline int fib_lookup(struct net *net, struct flowi4 *flp,
- struct fib_result *res)
+ struct fib_result *res, unsigned int flags)
{
- if (!net->ipv4.fib_has_custom_rules) {
- res->tclassid = 0;
- if (net->ipv4.fib_local &&
- !fib_table_lookup(net->ipv4.fib_local, flp, res,
- FIB_LOOKUP_NOREF))
- return 0;
- if (net->ipv4.fib_main &&
- !fib_table_lookup(net->ipv4.fib_main, flp, res,
- FIB_LOOKUP_NOREF))
- return 0;
- if (net->ipv4.fib_default &&
- !fib_table_lookup(net->ipv4.fib_default, flp, res,
- FIB_LOOKUP_NOREF))
- return 0;
- return -ENETUNREACH;
+ struct fib_table *tb;
+ int err;
+
+ flags |= FIB_LOOKUP_NOREF;
+ if (net->ipv4.fib_has_custom_rules)
+ return __fib_lookup(net, flp, res, flags);
+
+ rcu_read_lock();
+
+ res->tclassid = 0;
+
+ for (err = 0; !err; err = -ENETUNREACH) {
+ tb = rcu_dereference_rtnl(net->ipv4.fib_main);
+ if (tb && !fib_table_lookup(tb, flp, res, flags))
+ break;
+
+ tb = rcu_dereference_rtnl(net->ipv4.fib_default);
+ if (tb && !fib_table_lookup(tb, flp, res, flags))
+ break;
}
- return __fib_lookup(net, flp, res);
+
+ rcu_read_unlock();
+
+ return err;
}
#endif /* CONFIG_IP_MULTIPLE_TABLES */
@@ -286,17 +302,19 @@ static inline int fib_num_tclassid_users(struct net *net)
return 0;
}
#endif
+int fib_unmerge(struct net *net);
+void fib_flush_external(struct net *net);
/* Exported by fib_semantics.c */
int ip_fib_check_default(__be32 gw, struct net_device *dev);
-int fib_sync_down_dev(struct net_device *dev, int force);
+int fib_sync_down_dev(struct net_device *dev, unsigned long event);
int fib_sync_down_addr(struct net *net, __be32 local);
-int fib_sync_up(struct net_device *dev);
+int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
void fib_select_multipath(struct fib_result *res);
/* Exported by fib_trie.c */
void fib_trie_init(void);
-struct fib_table *fib_trie_table(u32 id);
+struct fib_table *fib_trie_table(u32 id, struct fib_table *alias);
static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
{
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 25a59eb..d8214cb 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -97,7 +97,10 @@ struct ip_tunnel {
#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
#define TUNNEL_OAM __cpu_to_be16(0x0200)
#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
-#define TUNNEL_OPTIONS_PRESENT __cpu_to_be16(0x0800)
+#define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
+#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
+
+#define TUNNEL_OPTIONS_PRESENT (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT)
struct tnl_ptk_info {
__be16 flags;
@@ -138,6 +141,8 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
int ip_tunnel_init(struct net_device *dev);
void ip_tunnel_uninit(struct net_device *dev);
void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
+struct net *ip_tunnel_get_link_net(const struct net_device *dev);
+int ip_tunnel_get_iflink(const struct net_device *dev);
int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 615b20b..4e3731e 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -47,13 +47,13 @@ static inline struct net *skb_net(const struct sk_buff *skb)
* Start with the most likely hit
* End with BUG
*/
- if (likely(skb->dev && skb->dev->nd_net))
+ if (likely(skb->dev && dev_net(skb->dev)))
return dev_net(skb->dev);
if (skb_dst(skb) && skb_dst(skb)->dev)
return dev_net(skb_dst(skb)->dev);
WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n",
__func__, __LINE__);
- if (likely(skb->sk && skb->sk->sk_net))
+ if (likely(skb->sk && sock_net(skb->sk)))
return sock_net(skb->sk);
pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
__func__, __LINE__);
@@ -71,11 +71,11 @@ static inline struct net *skb_sknet(const struct sk_buff *skb)
#ifdef CONFIG_NET_NS
#ifdef CONFIG_IP_VS_DEBUG
/* Start with the most likely hit */
- if (likely(skb->sk && skb->sk->sk_net))
+ if (likely(skb->sk && sock_net(skb->sk)))
return sock_net(skb->sk);
WARN(skb->dev, "Maybe skb_net should be used instead in %s() line:%d\n",
__func__, __LINE__);
- if (likely(skb->dev && skb->dev->nd_net))
+ if (likely(skb->dev && dev_net(skb->dev)))
return dev_net(skb->dev);
pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
__func__, __LINE__);
@@ -365,15 +365,15 @@ struct ip_vs_seq {
/* counters per cpu */
struct ip_vs_counters {
- __u32 conns; /* connections scheduled */
- __u32 inpkts; /* incoming packets */
- __u32 outpkts; /* outgoing packets */
+ __u64 conns; /* connections scheduled */
+ __u64 inpkts; /* incoming packets */
+ __u64 outpkts; /* outgoing packets */
__u64 inbytes; /* incoming bytes */
__u64 outbytes; /* outgoing bytes */
};
/* Stats per cpu */
struct ip_vs_cpu_stats {
- struct ip_vs_counters ustats;
+ struct ip_vs_counters cnt;
struct u64_stats_sync syncp;
};
@@ -383,23 +383,40 @@ struct ip_vs_estimator {
u64 last_inbytes;
u64 last_outbytes;
- u32 last_conns;
- u32 last_inpkts;
- u32 last_outpkts;
-
- u32 cps;
- u32 inpps;
- u32 outpps;
- u32 inbps;
- u32 outbps;
+ u64 last_conns;
+ u64 last_inpkts;
+ u64 last_outpkts;
+
+ u64 cps;
+ u64 inpps;
+ u64 outpps;
+ u64 inbps;
+ u64 outbps;
+};
+
+/*
+ * IPVS statistics object, 64-bit kernel version of struct ip_vs_stats_user
+ */
+struct ip_vs_kstats {
+ u64 conns; /* connections scheduled */
+ u64 inpkts; /* incoming packets */
+ u64 outpkts; /* outgoing packets */
+ u64 inbytes; /* incoming bytes */
+ u64 outbytes; /* outgoing bytes */
+
+ u64 cps; /* current connection rate */
+ u64 inpps; /* current in packet rate */
+ u64 outpps; /* current out packet rate */
+ u64 inbps; /* current in byte rate */
+ u64 outbps; /* current out byte rate */
};
struct ip_vs_stats {
- struct ip_vs_stats_user ustats; /* statistics */
+ struct ip_vs_kstats kstats; /* kernel statistics */
struct ip_vs_estimator est; /* estimator */
struct ip_vs_cpu_stats __percpu *cpustats; /* per cpu counters */
spinlock_t lock; /* spin lock */
- struct ip_vs_stats_user ustats0; /* reset values */
+ struct ip_vs_kstats kstats0; /* reset values */
};
struct dst_entry;
@@ -924,6 +941,7 @@ struct netns_ipvs {
int sysctl_nat_icmp_send;
int sysctl_pmtu_disc;
int sysctl_backup_only;
+ int sysctl_conn_reuse_mode;
/* ip_vs_lblc */
int sysctl_lblc_expiration;
@@ -1042,6 +1060,11 @@ static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
ipvs->sysctl_backup_only;
}
+static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_conn_reuse_mode;
+}
+
#else
static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
@@ -1109,6 +1132,11 @@ static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
return 0;
}
+static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs)
+{
+ return 1;
+}
+
#endif
/* IPVS core functions
@@ -1388,8 +1416,7 @@ void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
void ip_vs_zero_estimator(struct ip_vs_stats *stats);
-void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
- struct ip_vs_stats *stats);
+void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats);
/* Various IPVS packet transmitters (from ip_vs_xmit.c) */
int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 4292929..82dbdb0 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -19,7 +19,7 @@
#include <net/if_inet6.h>
#include <net/ndisc.h>
#include <net/flow.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
#include <net/snmp.h>
#define SIN6_LEN_RFC2133 24
@@ -47,8 +47,6 @@
#define NEXTHDR_MAX 255
-
-
#define IPV6_DEFAULT_HOPLIMIT 64
#define IPV6_DEFAULT_MCASTHOPS 1
@@ -241,8 +239,10 @@ struct ip6_flowlabel {
struct net *fl_net;
};
-#define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF)
-#define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF)
+#define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF)
+#define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF)
+#define IPV6_FLOWLABEL_STATELESS_FLAG cpu_to_be32(0x00080000)
+
#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
#define IPV6_TCLASS_SHIFT 20
@@ -671,7 +671,10 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
}
-void ipv6_proxy_select_ident(struct sk_buff *skb);
+__be32 ipv6_select_ident(struct net *net,
+ const struct in6_addr *daddr,
+ const struct in6_addr *saddr);
+void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb);
int ip6_dst_hoplimit(struct dst_entry *dst);
@@ -689,6 +692,20 @@ static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
return hlimit;
}
+/* copy IPv6 saddr & daddr to flow_keys, possibly using 64bit load/store
+ * Equivalent to : flow->v6addrs.src = iph->saddr;
+ * flow->v6addrs.dst = iph->daddr;
+ */
+static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
+ const struct ipv6hdr *iph)
+{
+ BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) !=
+ offsetof(typeof(flow->addrs), v6addrs.src) +
+ sizeof(flow->addrs.v6addrs.src));
+ memcpy(&flow->addrs.v6addrs, &iph->saddr, sizeof(flow->addrs.v6addrs));
+ flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+}
+
#if IS_ENABLED(CONFIG_IPV6)
static inline void ip6_set_txhash(struct sock *sk)
{
@@ -696,10 +713,15 @@ static inline void ip6_set_txhash(struct sock *sk)
struct ipv6_pinfo *np = inet6_sk(sk);
struct flow_keys keys;
- keys.src = (__force __be32)ipv6_addr_hash(&np->saddr);
- keys.dst = (__force __be32)ipv6_addr_hash(&sk->sk_v6_daddr);
- keys.port16[0] = inet->inet_sport;
- keys.port16[1] = inet->inet_dport;
+ memset(&keys, 0, sizeof(keys));
+
+ memcpy(&keys.addrs.v6addrs.src, &np->saddr,
+ sizeof(keys.addrs.v6addrs.src));
+ memcpy(&keys.addrs.v6addrs.dst, &sk->sk_v6_daddr,
+ sizeof(keys.addrs.v6addrs.dst));
+ keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ keys.ports.src = inet->inet_sport;
+ keys.ports.dst = inet->inet_dport;
sk->sk_txhash = flow_hash_from_keys(&keys);
}
@@ -708,7 +730,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
__be32 flowlabel, bool autolabel)
{
if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) {
- __be32 hash;
+ u32 hash;
hash = skb_get_hash(skb);
@@ -718,7 +740,10 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
*/
hash ^= hash >> 12;
- flowlabel = hash & IPV6_FLOWLABEL_MASK;
+ flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
+
+ if (net->ipv6.sysctl.flowlabel_state_ranges)
+ flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
}
return flowlabel;
@@ -767,7 +792,7 @@ static inline u8 ip6_tclass(__be32 flowinfo)
int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev);
-int ip6_rcv_finish(struct sk_buff *skb);
+int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb);
/*
* upper-layer output functions
@@ -788,6 +813,25 @@ int ip6_push_pending_frames(struct sock *sk);
void ip6_flush_pending_frames(struct sock *sk);
+int ip6_send_skb(struct sk_buff *skb);
+
+struct sk_buff *__ip6_make_skb(struct sock *sk, struct sk_buff_head *queue,
+ struct inet_cork_full *cork,
+ struct inet6_cork *v6_cork);
+struct sk_buff *ip6_make_skb(struct sock *sk,
+ int getfrag(void *from, char *to, int offset,
+ int len, int odd, struct sk_buff *skb),
+ void *from, int length, int transhdrlen,
+ int hlimit, int tclass, struct ipv6_txoptions *opt,
+ struct flowi6 *fl6, struct rt6_info *rt,
+ unsigned int flags, int dontfrag);
+
+static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
+{
+ return __ip6_make_skb(sk, &sk->sk_write_queue, &inet_sk(sk)->cork,
+ &inet6_sk(sk)->cork);
+}
+
int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6);
struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst);
@@ -806,6 +850,7 @@ int ip6_input(struct sk_buff *skb);
int ip6_mc_input(struct sk_buff *skb);
int __ip6_local_out(struct sk_buff *skb);
+int ip6_local_out_sk(struct sock *sk, struct sk_buff *skb);
int ip6_local_out(struct sk_buff *skb);
/*
@@ -920,4 +965,8 @@ int ipv6_sysctl_register(void);
void ipv6_sysctl_unregister(void);
#endif
+int ipv6_sock_mc_join(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
#endif /* _NET_IPV6_H */
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index a830b01..8f81bbb 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -519,6 +519,17 @@ iwe_stream_add_event(struct iw_request_info *info, char *stream, char *ends,
return stream;
}
+static inline char *
+iwe_stream_add_event_check(struct iw_request_info *info, char *stream,
+ char *ends, struct iw_event *iwe, int event_len)
+{
+ char *res = iwe_stream_add_event(info, stream, ends, iwe, event_len);
+
+ if (res == stream)
+ return ERR_PTR(-E2BIG);
+ return res;
+}
+
/*------------------------------------------------------------------*/
/*
* Wrapper to add an short Wireless Event containing a pointer to a
@@ -545,6 +556,17 @@ iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends,
return stream;
}
+static inline char *
+iwe_stream_add_point_check(struct iw_request_info *info, char *stream,
+ char *ends, struct iw_event *iwe, char *extra)
+{
+ char *res = iwe_stream_add_point(info, stream, ends, iwe, extra);
+
+ if (res == stream)
+ return ERR_PTR(-E2BIG);
+ return res;
+}
+
/*------------------------------------------------------------------*/
/*
* Wrapper to add a value to a Wireless Event in a stream of events.
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index 0134681..fe994d2 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -96,7 +96,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
}
struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
- struct proto *prot);
+ struct proto *prot, int kern);
void llc_sk_free(struct sock *sk);
void llc_sk_reset(struct sock *sk);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 29c7be8..6b1077c 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -84,6 +84,39 @@
*
*/
+/**
+ * DOC: mac80211 software tx queueing
+ *
+ * mac80211 provides an optional intermediate queueing implementation designed
+ * to allow the driver to keep hardware queues short and provide some fairness
+ * between different stations/interfaces.
+ * In this model, the driver pulls data frames from the mac80211 queue instead
+ * of letting mac80211 push them via drv_tx().
+ * Other frames (e.g. control or management) are still pushed using drv_tx().
+ *
+ * Drivers indicate that they use this model by implementing the .wake_tx_queue
+ * driver operation.
+ *
+ * Intermediate queues (struct ieee80211_txq) are kept per-sta per-tid, with a
+ * single per-vif queue for multicast data frames.
+ *
+ * The driver is expected to initialize its private per-queue data for stations
+ * and interfaces in the .add_interface and .sta_add ops.
+ *
+ * The driver can't access the queue directly. To dequeue a frame, it calls
+ * ieee80211_tx_dequeue(). Whenever mac80211 adds a new frame to a queue, it
+ * calls the .wake_tx_queue driver op.
+ *
+ * For AP powersave TIM handling, the driver only needs to indicate if it has
+ * buffered packets in the driver specific data structures by calling
+ * ieee80211_sta_set_buffered(). For frames buffered in the ieee80211_txq
+ * struct, mac80211 sets the appropriate TIM PVB bits and calls
+ * .release_buffered_frames().
+ * In that callback the driver is therefore expected to release its own
+ * buffered frames and afterwards also frames from the ieee80211_txq (obtained
+ * via the usual ieee80211_tx_dequeue).
+ */
+
struct device;
/**
@@ -301,17 +334,107 @@ enum ieee80211_bss_change {
#define IEEE80211_BSS_ARP_ADDR_LIST_LEN 4
/**
- * enum ieee80211_rssi_event - RSSI threshold event
- * An indicator for when RSSI goes below/above a certain threshold.
- * @RSSI_EVENT_HIGH: AP's rssi crossed the high threshold set by the driver.
- * @RSSI_EVENT_LOW: AP's rssi crossed the low threshold set by the driver.
+ * enum ieee80211_event_type - event to be notified to the low level driver
+ * @RSSI_EVENT: AP's rssi crossed the a threshold set by the driver.
+ * @MLME_EVENT: event related to MLME
+ * @BAR_RX_EVENT: a BAR was received
+ * @BA_FRAME_TIMEOUT: Frames were released from the reordering buffer because
+ * they timed out. This won't be called for each frame released, but only
+ * once each time the timeout triggers.
*/
-enum ieee80211_rssi_event {
+enum ieee80211_event_type {
+ RSSI_EVENT,
+ MLME_EVENT,
+ BAR_RX_EVENT,
+ BA_FRAME_TIMEOUT,
+};
+
+/**
+ * enum ieee80211_rssi_event_data - relevant when event type is %RSSI_EVENT
+ * @RSSI_EVENT_HIGH: AP's rssi went below the threshold set by the driver.
+ * @RSSI_EVENT_LOW: AP's rssi went above the threshold set by the driver.
+ */
+enum ieee80211_rssi_event_data {
RSSI_EVENT_HIGH,
RSSI_EVENT_LOW,
};
/**
+ * struct ieee80211_rssi_event - data attached to an %RSSI_EVENT
+ * @data: See &enum ieee80211_rssi_event_data
+ */
+struct ieee80211_rssi_event {
+ enum ieee80211_rssi_event_data data;
+};
+
+/**
+ * enum ieee80211_mlme_event_data - relevant when event type is %MLME_EVENT
+ * @AUTH_EVENT: the MLME operation is authentication
+ * @ASSOC_EVENT: the MLME operation is association
+ * @DEAUTH_RX_EVENT: deauth received..
+ * @DEAUTH_TX_EVENT: deauth sent.
+ */
+enum ieee80211_mlme_event_data {
+ AUTH_EVENT,
+ ASSOC_EVENT,
+ DEAUTH_RX_EVENT,
+ DEAUTH_TX_EVENT,
+};
+
+/**
+ * enum ieee80211_mlme_event_status - relevant when event type is %MLME_EVENT
+ * @MLME_SUCCESS: the MLME operation completed successfully.
+ * @MLME_DENIED: the MLME operation was denied by the peer.
+ * @MLME_TIMEOUT: the MLME operation timed out.
+ */
+enum ieee80211_mlme_event_status {
+ MLME_SUCCESS,
+ MLME_DENIED,
+ MLME_TIMEOUT,
+};
+
+/**
+ * struct ieee80211_mlme_event - data attached to an %MLME_EVENT
+ * @data: See &enum ieee80211_mlme_event_data
+ * @status: See &enum ieee80211_mlme_event_status
+ * @reason: the reason code if applicable
+ */
+struct ieee80211_mlme_event {
+ enum ieee80211_mlme_event_data data;
+ enum ieee80211_mlme_event_status status;
+ u16 reason;
+};
+
+/**
+ * struct ieee80211_ba_event - data attached for BlockAck related events
+ * @sta: pointer to the &ieee80211_sta to which this event relates
+ * @tid: the tid
+ * @ssn: the starting sequence number (for %BAR_RX_EVENT)
+ */
+struct ieee80211_ba_event {
+ struct ieee80211_sta *sta;
+ u16 tid;
+ u16 ssn;
+};
+
+/**
+ * struct ieee80211_event - event to be sent to the driver
+ * @type: The event itself. See &enum ieee80211_event_type.
+ * @rssi: relevant if &type is %RSSI_EVENT
+ * @mlme: relevant if &type is %AUTH_EVENT
+ * @ba: relevant if &type is %BAR_RX_EVENT or %BA_FRAME_TIMEOUT
+ * @u:union holding the fields above
+ */
+struct ieee80211_event {
+ enum ieee80211_event_type type;
+ union {
+ struct ieee80211_rssi_event rssi;
+ struct ieee80211_mlme_event mlme;
+ struct ieee80211_ba_event ba;
+ } u;
+};
+
+/**
* struct ieee80211_bss_conf - holds the BSS's changing parameters
*
* This structure keeps information about a BSS (and an association
@@ -323,12 +446,8 @@ enum ieee80211_rssi_event {
* @ibss_creator: indicates if a new IBSS network is being created
* @aid: association ID number, valid only when @assoc is true
* @use_cts_prot: use CTS protection
- * @use_short_preamble: use 802.11b short preamble;
- * if the hardware cannot handle this it must set the
- * IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE hardware flag
- * @use_short_slot: use short slot time (only relevant for ERP);
- * if the hardware cannot handle this it must set the
- * IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE hardware flag
+ * @use_short_preamble: use 802.11b short preamble
+ * @use_short_slot: use short slot time (only relevant for ERP)
* @dtim_period: num of beacons before the next DTIM, for beaconing,
* valid in station mode only if after the driver was notified
* with the %BSS_CHANGED_BEACON_INFO flag, will be non-zero then.
@@ -337,12 +456,15 @@ enum ieee80211_rssi_event {
* HW flag %IEEE80211_HW_TIMING_BEACON_ONLY is set, then this can
* only come from a beacon, but might not become valid until after
* association when a beacon is received (which is notified with the
- * %BSS_CHANGED_DTIM flag.)
+ * %BSS_CHANGED_DTIM flag.). See also sync_dtim_count important notice.
* @sync_device_ts: the device timestamp corresponding to the sync_tsf,
* the driver/device can use this to calculate synchronisation
- * (see @sync_tsf)
+ * (see @sync_tsf). See also sync_dtim_count important notice.
* @sync_dtim_count: Only valid when %IEEE80211_HW_TIMING_BEACON_ONLY
* is requested, see @sync_tsf/@sync_device_ts.
+ * IMPORTANT: These three sync_* parameters would possibly be out of sync
+ * by the time the driver will use them. The synchronized view is currently
+ * guaranteed only in certain callbacks.
* @beacon_int: beacon interval
* @assoc_capability: capabilities taken from assoc resp
* @basic_rates: bitmap of basic rates, each bit stands for an
@@ -376,6 +498,12 @@ enum ieee80211_rssi_event {
* @ssid_len: Length of SSID given in @ssid.
* @hidden_ssid: The SSID of the current vif is hidden. Only valid in AP-mode.
* @txpower: TX power in dBm
+ * @txpower_type: TX power adjustment used to control per packet Transmit
+ * Power Control (TPC) in lower driver for the current vif. In particular
+ * TPC is enabled if value passed in %txpower_type is
+ * NL80211_TX_POWER_LIMITED (allow using less than specified from
+ * userspace), whereas TPC is disabled if %txpower_type is set to
+ * NL80211_TX_POWER_FIXED (use value configured from userspace)
* @p2p_noa_attr: P2P NoA attribute for P2P powersave
*/
struct ieee80211_bss_conf {
@@ -411,6 +539,7 @@ struct ieee80211_bss_conf {
size_t ssid_len;
bool hidden_ssid;
int txpower;
+ enum nl80211_tx_power_setting txpower_type;
struct ieee80211_p2p_noa_attr p2p_noa_attr;
};
@@ -505,8 +634,11 @@ struct ieee80211_bss_conf {
* @IEEE80211_TX_CTL_DONTFRAG: Don't fragment this packet even if it
* would be fragmented by size (this is optional, only used for
* monitor injection).
- * @IEEE80211_TX_CTL_PS_RESPONSE: This frame is a response to a poll
- * frame (PS-Poll or uAPSD).
+ * @IEEE80211_TX_STAT_NOACK_TRANSMITTED: A frame that was marked with
+ * IEEE80211_TX_CTL_NO_ACK has been successfully transmitted without
+ * any errors (like issues specific to the driver/HW).
+ * This flag must not be set for frames that don't request no-ack
+ * behaviour with IEEE80211_TX_CTL_NO_ACK.
*
* Note: If you have to add new flags to the enumeration, then don't
* forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary.
@@ -542,7 +674,7 @@ enum mac80211_tx_info_flags {
IEEE80211_TX_STATUS_EOSP = BIT(28),
IEEE80211_TX_CTL_USE_MINRATE = BIT(29),
IEEE80211_TX_CTL_DONTFRAG = BIT(30),
- IEEE80211_TX_CTL_PS_RESPONSE = BIT(31),
+ IEEE80211_TX_STAT_NOACK_TRANSMITTED = BIT(31),
};
#define IEEE80211_TX_CTL_STBC_SHIFT 23
@@ -552,11 +684,14 @@ enum mac80211_tx_info_flags {
*
* @IEEE80211_TX_CTRL_PORT_CTRL_PROTO: this frame is a port control
* protocol frame (e.g. EAP)
+ * @IEEE80211_TX_CTRL_PS_RESPONSE: This frame is a response to a poll
+ * frame (PS-Poll or uAPSD).
*
* These flags are used in tx_info->control.flags.
*/
enum mac80211_tx_control_flags {
IEEE80211_TX_CTRL_PORT_CTRL_PROTO = BIT(0),
+ IEEE80211_TX_CTRL_PS_RESPONSE = BIT(1),
};
/*
@@ -736,6 +871,9 @@ struct ieee80211_tx_info {
/* 4 bytes free */
} control;
struct {
+ u64 cookie;
+ } ack;
+ struct {
struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES];
s32 ack_signal;
u8 ampdu_ack_len;
@@ -925,15 +1063,13 @@ enum mac80211_rx_flags {
* These flags are used with the @vht_flag member of
* &struct ieee80211_rx_status.
* @RX_VHT_FLAG_80MHZ: 80 MHz was used
- * @RX_VHT_FLAG_80P80MHZ: 80+80 MHz was used
* @RX_VHT_FLAG_160MHZ: 160 MHz was used
* @RX_VHT_FLAG_BF: packet was beamformed
*/
enum mac80211_rx_vht_flags {
RX_VHT_FLAG_80MHZ = BIT(0),
- RX_VHT_FLAG_80P80MHZ = BIT(1),
- RX_VHT_FLAG_160MHZ = BIT(2),
- RX_VHT_FLAG_BF = BIT(3),
+ RX_VHT_FLAG_160MHZ = BIT(1),
+ RX_VHT_FLAG_BF = BIT(2),
};
/**
@@ -1181,10 +1317,15 @@ struct ieee80211_channel_switch {
* monitoring on this virtual interface -- i.e. it can monitor
* connection quality related parameters, such as the RSSI level and
* provide notifications if configured trigger levels are reached.
+ * @IEEE80211_VIF_SUPPORTS_UAPSD: The device can do U-APSD for this
+ * interface. This flag should be set during interface addition,
+ * but may be set/cleared as late as authentication to an AP. It is
+ * only valid for managed/station mode interfaces.
*/
enum ieee80211_vif_flags {
IEEE80211_VIF_BEACON_FILTER = BIT(0),
IEEE80211_VIF_SUPPORTS_CQM_RSSI = BIT(1),
+ IEEE80211_VIF_SUPPORTS_UAPSD = BIT(2),
};
/**
@@ -1218,6 +1359,7 @@ enum ieee80211_vif_flags {
* monitor interface (if that is requested.)
* @drv_priv: data area for driver use, will always be aligned to
* sizeof(void *).
+ * @txq: the multicast data TX queue (if driver uses the TXQ abstraction)
*/
struct ieee80211_vif {
enum nl80211_iftype type;
@@ -1229,6 +1371,8 @@ struct ieee80211_vif {
u8 cab_queue;
u8 hw_queue[IEEE80211_NUM_ACS];
+ struct ieee80211_txq *txq;
+
struct ieee80211_chanctx_conf __rcu *chanctx_conf;
u32 driver_flags;
@@ -1263,6 +1407,19 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
/**
+ * ieee80211_vif_to_wdev - return a wdev struct from a vif
+ * @vif: the vif to get the wdev for
+ *
+ * This can be used by mac80211 drivers with direct cfg80211 APIs
+ * (like the vendor commands) that needs to get the wdev for a vif.
+ *
+ * Note that this function may return %NULL if the given wdev isn't
+ * associated with a vif that the driver knows about (e.g. monitor
+ * or AP_VLAN interfaces.)
+ */
+struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif);
+
+/**
* enum ieee80211_key_flags - key flags
*
* These flags are used for communication about keys between the driver
@@ -1270,19 +1427,22 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
*
* @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the
* driver to indicate that it requires IV generation for this
- * particular key.
+ * particular key. Setting this flag does not necessarily mean that SKBs
+ * will have sufficient tailroom for ICV or MIC.
* @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by
* the driver for a TKIP key if it requires Michael MIC
* generation in software.
* @IEEE80211_KEY_FLAG_PAIRWISE: Set by mac80211, this flag indicates
* that the key is pairwise rather then a shared key.
* @IEEE80211_KEY_FLAG_SW_MGMT_TX: This flag should be set by the driver for a
- * CCMP key if it requires CCMP encryption of management frames (MFP) to
- * be done in software.
+ * CCMP/GCMP key if it requires CCMP/GCMP encryption of management frames
+ * (MFP) to be done in software.
* @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
* if space should be prepared for the IV, but the IV
* itself should not be generated. Do not set together with
- * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key.
+ * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does
+ * not necessarily mean that SKBs will have sufficient tailroom for ICV or
+ * MIC.
* @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received
* management frames. The flag can help drivers that have a hardware
* crypto implementation that doesn't deal with management frames
@@ -1291,8 +1451,11 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
* RX, if your crypto engine can't deal with TX you can also set the
* %IEEE80211_KEY_FLAG_SW_MGMT_TX flag to encrypt such frames in SW.
* @IEEE80211_KEY_FLAG_GENERATE_IV_MGMT: This flag should be set by the
- * driver for a CCMP key to indicate that is requires IV generation
+ * driver for a CCMP/GCMP key to indicate that is requires IV generation
* only for managment frames (MFP).
+ * @IEEE80211_KEY_FLAG_RESERVE_TAILROOM: This flag should be set by the
+ * driver for a key to indicate that sufficient tailroom must always
+ * be reserved for ICV or MIC, even when HW encryption is enabled.
*/
enum ieee80211_key_flags {
IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = BIT(0),
@@ -1302,6 +1465,7 @@ enum ieee80211_key_flags {
IEEE80211_KEY_FLAG_SW_MGMT_TX = BIT(4),
IEEE80211_KEY_FLAG_PUT_IV_SPACE = BIT(5),
IEEE80211_KEY_FLAG_RX_MGMT = BIT(6),
+ IEEE80211_KEY_FLAG_RESERVE_TAILROOM = BIT(7),
};
/**
@@ -1314,6 +1478,9 @@ enum ieee80211_key_flags {
* wants to be given when a frame is transmitted and needs to be
* encrypted in hardware.
* @cipher: The key's cipher suite selector.
+ * @tx_pn: PN used for TX on non-TKIP keys, may be used by the driver
+ * as well if it needs to do software PN assignment by itself
+ * (e.g. due to TSO)
* @flags: key flags, see &enum ieee80211_key_flags.
* @keyidx: the key index (0-3)
* @keylen: key material length
@@ -1326,6 +1493,7 @@ enum ieee80211_key_flags {
* @iv_len: The IV length for this key type
*/
struct ieee80211_key_conf {
+ atomic64_t tx_pn;
u32 cipher;
u8 icv_len;
u8 iv_len;
@@ -1336,6 +1504,47 @@ struct ieee80211_key_conf {
u8 key[0];
};
+#define IEEE80211_MAX_PN_LEN 16
+
+/**
+ * struct ieee80211_key_seq - key sequence counter
+ *
+ * @tkip: TKIP data, containing IV32 and IV16 in host byte order
+ * @ccmp: PN data, most significant byte first (big endian,
+ * reverse order than in packet)
+ * @aes_cmac: PN data, most significant byte first (big endian,
+ * reverse order than in packet)
+ * @aes_gmac: PN data, most significant byte first (big endian,
+ * reverse order than in packet)
+ * @gcmp: PN data, most significant byte first (big endian,
+ * reverse order than in packet)
+ * @hw: data for HW-only (e.g. cipher scheme) keys
+ */
+struct ieee80211_key_seq {
+ union {
+ struct {
+ u32 iv32;
+ u16 iv16;
+ } tkip;
+ struct {
+ u8 pn[6];
+ } ccmp;
+ struct {
+ u8 pn[6];
+ } aes_cmac;
+ struct {
+ u8 pn[6];
+ } aes_gmac;
+ struct {
+ u8 pn[6];
+ } gcmp;
+ struct {
+ u8 seq[IEEE80211_MAX_PN_LEN];
+ u8 seq_len;
+ } hw;
+ };
+};
+
/**
* struct ieee80211_cipher_scheme - cipher scheme
*
@@ -1449,7 +1658,8 @@ struct ieee80211_sta_rates {
* @supp_rates: Bitmap of supported rates (per band)
* @ht_cap: HT capabilities of this STA; restricted to our own capabilities
* @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
- * @wme: indicates whether the STA supports QoS/WME.
+ * @wme: indicates whether the STA supports QoS/WME (if local devices does,
+ * otherwise always false)
* @drv_priv: data area for driver use, will always be aligned to
* sizeof(void *), size is determined in hw information.
* @uapsd_queues: bitmap of queues configured for uapsd. Only valid
@@ -1465,6 +1675,8 @@ struct ieee80211_sta_rates {
* @tdls: indicates whether the STA is a TDLS peer
* @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
* valid if the STA is a TDLS peer in the first place.
+ * @mfp: indicates whether the STA uses management frame protection or not.
+ * @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
*/
struct ieee80211_sta {
u32 supp_rates[IEEE80211_NUM_BANDS];
@@ -1481,6 +1693,9 @@ struct ieee80211_sta {
struct ieee80211_sta_rates __rcu *rates;
bool tdls;
bool tdls_initiator;
+ bool mfp;
+
+ struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
@@ -1510,6 +1725,28 @@ struct ieee80211_tx_control {
};
/**
+ * struct ieee80211_txq - Software intermediate tx queue
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @sta: station table entry, %NULL for per-vif queue
+ * @tid: the TID for this queue (unused for per-vif queue)
+ * @ac: the AC for this queue
+ * @drv_priv: driver private area, sized by hw->txq_data_size
+ *
+ * The driver can obtain packets from this queue by calling
+ * ieee80211_tx_dequeue().
+ */
+struct ieee80211_txq {
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+ u8 tid;
+ u8 ac;
+
+ /* must be last */
+ u8 drv_priv[0] __aligned(sizeof(void *));
+};
+
+/**
* enum ieee80211_hw_flags - hardware flags
*
* These flags are used to indicate hardware capabilities to
@@ -1543,13 +1780,6 @@ struct ieee80211_tx_control {
* multicast frames when there are power saving stations so that
* the driver can fetch them with ieee80211_get_buffered_bc().
*
- * @IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE:
- * Hardware is not capable of short slot operation on the 2.4 GHz band.
- *
- * @IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE:
- * Hardware is not capable of receiving frames with short preamble on
- * the 2.4 GHz band.
- *
* @IEEE80211_HW_SIGNAL_UNSPEC:
* Hardware can provide signal values but we don't know its units. We
* expect values between 0 and @max_signal.
@@ -1580,11 +1810,6 @@ struct ieee80211_tx_control {
* @IEEE80211_HW_MFP_CAPABLE:
* Hardware supports management frame protection (MFP, IEEE 802.11w).
*
- * @IEEE80211_HW_SUPPORTS_UAPSD:
- * Hardware supports Unscheduled Automatic Power Save Delivery
- * (U-APSD) in managed mode. The mode is configured with
- * conf_tx() operation.
- *
* @IEEE80211_HW_REPORTS_TX_ACK_STATUS:
* Hardware can provide ack status reports of Tx frames to
* the stack.
@@ -1623,6 +1848,16 @@ struct ieee80211_tx_control {
* be created. It is expected user-space will create vifs as
* desired (and thus have them named as desired).
*
+ * @IEEE80211_HW_SW_CRYPTO_CONTROL: The driver wants to control which of the
+ * crypto algorithms can be done in software - so don't automatically
+ * try to fall back to it if hardware crypto fails, but do so only if
+ * the driver returns 1. This also forces the driver to advertise its
+ * supported cipher suites.
+ *
+ * @IEEE80211_HW_SUPPORT_FAST_XMIT: The driver/hardware supports fast-xmit,
+ * this currently requires only the ability to calculate the duration
+ * for frames.
+ *
* @IEEE80211_HW_QUEUE_CONTROL: The driver wants to control per-interface
* queue mapping in order to use different queues (not just one per AC)
* for different virtual interfaces. See the doc section on HW queue
@@ -1650,41 +1885,44 @@ struct ieee80211_tx_control {
* @IEEE80211_HW_SUPPORTS_CLONED_SKBS: The driver will never modify the payload
* or tailroom of TX skbs without copying them first.
*
- * @IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands
+ * @IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands
* in one command, mac80211 doesn't have to run separate scans per band.
+ *
+ * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
- IEEE80211_HW_HAS_RATE_CONTROL = 1<<0,
- IEEE80211_HW_RX_INCLUDES_FCS = 1<<1,
- IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING = 1<<2,
- IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE = 1<<3,
- IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE = 1<<4,
- IEEE80211_HW_SIGNAL_UNSPEC = 1<<5,
- IEEE80211_HW_SIGNAL_DBM = 1<<6,
- IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC = 1<<7,
- IEEE80211_HW_SPECTRUM_MGMT = 1<<8,
- IEEE80211_HW_AMPDU_AGGREGATION = 1<<9,
- IEEE80211_HW_SUPPORTS_PS = 1<<10,
- IEEE80211_HW_PS_NULLFUNC_STACK = 1<<11,
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12,
- IEEE80211_HW_MFP_CAPABLE = 1<<13,
- IEEE80211_HW_WANT_MONITOR_VIF = 1<<14,
- IEEE80211_HW_NO_AUTO_VIF = 1<<15,
- /* free slot */
- IEEE80211_HW_SUPPORTS_UAPSD = 1<<17,
- IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18,
- IEEE80211_HW_CONNECTION_MONITOR = 1<<19,
- IEEE80211_HW_QUEUE_CONTROL = 1<<20,
- IEEE80211_HW_SUPPORTS_PER_STA_GTK = 1<<21,
- IEEE80211_HW_AP_LINK_PS = 1<<22,
- IEEE80211_HW_TX_AMPDU_SETUP_IN_HW = 1<<23,
- IEEE80211_HW_SUPPORTS_RC_TABLE = 1<<24,
- IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF = 1<<25,
- IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26,
- IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 1<<27,
- IEEE80211_HW_CHANCTX_STA_CSA = 1<<28,
- IEEE80211_HW_SUPPORTS_CLONED_SKBS = 1<<29,
- IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS = 1<<30,
+ IEEE80211_HW_HAS_RATE_CONTROL,
+ IEEE80211_HW_RX_INCLUDES_FCS,
+ IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING,
+ IEEE80211_HW_SIGNAL_UNSPEC,
+ IEEE80211_HW_SIGNAL_DBM,
+ IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC,
+ IEEE80211_HW_SPECTRUM_MGMT,
+ IEEE80211_HW_AMPDU_AGGREGATION,
+ IEEE80211_HW_SUPPORTS_PS,
+ IEEE80211_HW_PS_NULLFUNC_STACK,
+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS,
+ IEEE80211_HW_MFP_CAPABLE,
+ IEEE80211_HW_WANT_MONITOR_VIF,
+ IEEE80211_HW_NO_AUTO_VIF,
+ IEEE80211_HW_SW_CRYPTO_CONTROL,
+ IEEE80211_HW_SUPPORT_FAST_XMIT,
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS,
+ IEEE80211_HW_CONNECTION_MONITOR,
+ IEEE80211_HW_QUEUE_CONTROL,
+ IEEE80211_HW_SUPPORTS_PER_STA_GTK,
+ IEEE80211_HW_AP_LINK_PS,
+ IEEE80211_HW_TX_AMPDU_SETUP_IN_HW,
+ IEEE80211_HW_SUPPORTS_RC_TABLE,
+ IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF,
+ IEEE80211_HW_TIMING_BEACON_ONLY,
+ IEEE80211_HW_SUPPORTS_HT_CCK_RATES,
+ IEEE80211_HW_CHANCTX_STA_CSA,
+ IEEE80211_HW_SUPPORTS_CLONED_SKBS,
+ IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS,
+
+ /* keep last, obviously */
+ NUM_IEEE80211_HW_FLAGS
};
/**
@@ -1732,6 +1970,8 @@ enum ieee80211_hw_flags {
* within &struct ieee80211_sta.
* @chanctx_data_size: size (in bytes) of the drv_priv data area
* within &struct ieee80211_chanctx_conf.
+ * @txq_data_size: size (in bytes) of the drv_priv data area
+ * within @struct ieee80211_txq.
*
* @max_rates: maximum number of alternate rate retry stages the hw
* can handle.
@@ -1763,8 +2003,8 @@ enum ieee80211_hw_flags {
* Use the %IEEE80211_RADIOTAP_VHT_KNOWN_* values.
*
* @netdev_features: netdev features to be set in each netdev created
- * from this HW. Note only HW checksum features are currently
- * compatible with mac80211. Other feature bits will be rejected.
+ * from this HW. Note that not all features are usable with mac80211,
+ * other features will be rejected during HW registration.
*
* @uapsd_queues: This bitmap is included in (re)association frame to indicate
* for each access category if it is uAPSD trigger-enabled and delivery-
@@ -1780,18 +2020,22 @@ enum ieee80211_hw_flags {
* @n_cipher_schemes: a size of an array of cipher schemes definitions.
* @cipher_schemes: a pointer to an array of cipher scheme definitions
* supported by HW.
+ *
+ * @txq_ac_max_pending: maximum number of frames per AC pending in all txq
+ * entries for a vif.
*/
struct ieee80211_hw {
struct ieee80211_conf conf;
struct wiphy *wiphy;
const char *rate_control_algorithm;
void *priv;
- u32 flags;
+ unsigned long flags[BITS_TO_LONGS(NUM_IEEE80211_HW_FLAGS)];
unsigned int extra_tx_headroom;
unsigned int extra_beacon_tailroom;
int vif_data_size;
int sta_data_size;
int chanctx_data_size;
+ int txq_data_size;
u16 queues;
u16 max_listen_interval;
s8 max_signal;
@@ -1808,8 +2052,23 @@ struct ieee80211_hw {
u8 uapsd_max_sp_len;
u8 n_cipher_schemes;
const struct ieee80211_cipher_scheme *cipher_schemes;
+ int txq_ac_max_pending;
};
+static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw,
+ enum ieee80211_hw_flags flg)
+{
+ return test_bit(flg, hw->flags);
+}
+#define ieee80211_hw_check(hw, flg) _ieee80211_hw_check(hw, IEEE80211_HW_##flg)
+
+static inline void _ieee80211_hw_set(struct ieee80211_hw *hw,
+ enum ieee80211_hw_flags flg)
+{
+ return __set_bit(flg, hw->flags);
+}
+#define ieee80211_hw_set(hw, flg) _ieee80211_hw_set(hw, IEEE80211_HW_##flg)
+
/**
* struct ieee80211_scan_request - hw scan request
*
@@ -1945,6 +2204,11 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* added; if you return 0 then hw_key_idx must be assigned to the
* hardware key index, you are free to use the full u8 range.
*
+ * Note that in the case that the @IEEE80211_HW_SW_CRYPTO_CONTROL flag is
+ * set, mac80211 will not automatically fall back to software crypto if
+ * enabling hardware crypto failed. The set_key() call may also return the
+ * value 1 to permit this specific key/algorithm to be done in software.
+ *
* When the cmd is %DISABLE_KEY then it must succeed.
*
* Note that it is permissible to not decrypt a frame even if a key
@@ -2023,7 +2287,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* enabled whenever user has enabled powersave.
*
* Driver informs U-APSD client support by enabling
- * %IEEE80211_HW_SUPPORTS_UAPSD flag. The mode is configured through the
+ * %IEEE80211_VIF_SUPPORTS_UAPSD flag. The mode is configured through the
* uapsd parameter in conf_tx() operation. Hardware needs to send the QoS
* Nullfunc frames and stay awake until the service period has ended. To
* utilize U-APSD, dynamic powersave is disabled for voip AC and all frames
@@ -2318,10 +2582,6 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* stack. It is always safe to pass more frames than requested,
* but this has negative impact on power consumption.
*
- * @FIF_PROMISC_IN_BSS: promiscuous mode within your BSS,
- * think of the BSS as your network segment and then this corresponds
- * to the regular ethernet device promiscuous mode.
- *
* @FIF_ALLMULTI: pass all multicast frames, this is used if requested
* by the user or if the hardware is not capable of filtering by
* multicast address.
@@ -2338,18 +2598,16 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* mac80211 needs to do and the amount of CPU wakeups, so you should
* honour this flag if possible.
*
- * @FIF_CONTROL: pass control frames (except for PS Poll), if PROMISC_IN_BSS
- * is not set then only those addressed to this station.
+ * @FIF_CONTROL: pass control frames (except for PS Poll) addressed to this
+ * station
*
* @FIF_OTHER_BSS: pass frames destined to other BSSes
*
- * @FIF_PSPOLL: pass PS Poll frames, if PROMISC_IN_BSS is not set then only
- * those addressed to this station.
+ * @FIF_PSPOLL: pass PS Poll frames
*
* @FIF_PROBE_REQ: pass probe request frames
*/
enum ieee80211_filter_flags {
- FIF_PROMISC_IN_BSS = 1<<0,
FIF_ALLMULTI = 1<<1,
FIF_FCSFAIL = 1<<2,
FIF_PLCPFAIL = 1<<3,
@@ -2632,9 +2890,9 @@ enum ieee80211_reconfig_type {
* Returns zero if statistics are available.
* The callback can sleep.
*
- * @get_tkip_seq: If your device implements TKIP encryption in hardware this
- * callback should be provided to read the TKIP transmit IVs (both IV32
- * and IV16) for the given key from hardware.
+ * @get_key_seq: If your device implements encryption in hardware and does
+ * IV/PN assignment then this callback should be provided to read the
+ * IV/PN for the given key from hardware.
* The callback must be atomic.
*
* @set_frag_threshold: Configuration of fragmentation threshold. Assign this
@@ -2696,6 +2954,14 @@ enum ieee80211_reconfig_type {
* is only used if the configured rate control algorithm actually uses
* the new rate table API, and is therefore optional. Must be atomic.
*
+ * @sta_statistics: Get statistics for this station. For example with beacon
+ * filtering, the statistics kept by mac80211 might not be accurate, so
+ * let the driver pre-fill the statistics. The driver can fill most of
+ * the values (indicating which by setting the filled bitmap), but not
+ * all of them make sense - see the source for which ones are possible.
+ * Statistics that the driver doesn't fill will be filled by mac80211.
+ * The callback can sleep.
+ *
* @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
* bursting) for a hardware TX queue.
* Returns a negative error code on failure.
@@ -2807,8 +3073,9 @@ enum ieee80211_reconfig_type {
* @set_bitrate_mask: Set a mask of rates to be used for rate control selection
* when transmitting a frame. Currently only legacy rates are handled.
* The callback can sleep.
- * @rssi_callback: Notify driver when the average RSSI goes above/below
- * thresholds that were registered previously. The callback can sleep.
+ * @event_callback: Notify driver about any event in mac80211. See
+ * &enum ieee80211_event_type for the different types.
+ * The callback must be atomic.
*
* @release_buffered_frames: Release buffered frames according to the given
* parameters. In the case where the driver buffers some frames for
@@ -2856,9 +3123,6 @@ enum ieee80211_reconfig_type {
* @get_et_strings: Ethtool API to get a set of strings to describe stats
* and perhaps other supported types of ethtool data-sets.
*
- * @get_rssi: Get current signal strength in dBm, the function is optional
- * and can sleep.
- *
* @mgd_prepare_tx: Prepare for transmitting a management frame for association
* before associated. In multi-channel scenarios, a virtual interface is
* bound to a channel before it is associated, but as it isn't associated
@@ -2959,6 +3223,8 @@ enum ieee80211_reconfig_type {
* response template is provided, together with the location of the
* switch-timing IE within the template. The skb can only be used within
* the function call.
+ *
+ * @wake_tx_queue: Called when new packets have been added to the queue.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -3025,8 +3291,9 @@ struct ieee80211_ops {
struct ieee80211_vif *vif);
int (*get_stats)(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats);
- void (*get_tkip_seq)(struct ieee80211_hw *hw, u8 hw_key_idx,
- u32 *iv32, u16 *iv16);
+ void (*get_key_seq)(struct ieee80211_hw *hw,
+ struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq);
int (*set_frag_threshold)(struct ieee80211_hw *hw, u32 value);
int (*set_rts_threshold)(struct ieee80211_hw *hw, u32 value);
int (*sta_add)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -3059,6 +3326,10 @@ struct ieee80211_ops {
void (*sta_rate_tbl_update)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+ void (*sta_statistics)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo);
int (*conf_tx)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 ac,
const struct ieee80211_tx_queue_params *params);
@@ -3103,9 +3374,9 @@ struct ieee80211_ops {
bool (*tx_frames_pending)(struct ieee80211_hw *hw);
int (*set_bitrate_mask)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask);
- void (*rssi_callback)(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum ieee80211_rssi_event rssi_event);
+ void (*event_callback)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_event *event);
void (*allow_buffered_frames)(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
@@ -3126,8 +3397,6 @@ struct ieee80211_ops {
void (*get_et_strings)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u32 sset, u8 *data);
- int (*get_rssi)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, s8 *rssi_dbm);
void (*mgd_prepare_tx)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
@@ -3188,6 +3457,9 @@ struct ieee80211_ops {
void (*tdls_recv_channel_switch)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_tdls_ch_sw_params *params);
+
+ void (*wake_tx_queue)(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
};
/**
@@ -3269,14 +3541,15 @@ enum ieee80211_tpt_led_trigger_flags {
};
#ifdef CONFIG_MAC80211_LEDS
-char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw);
-char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw);
-char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw);
-char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw);
-char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
- unsigned int flags,
- const struct ieee80211_tpt_blink *blink_table,
- unsigned int blink_table_len);
+const char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw);
+const char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw);
+const char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw);
+const char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw);
+const char *
+__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
+ unsigned int flags,
+ const struct ieee80211_tpt_blink *blink_table,
+ unsigned int blink_table_len);
#endif
/**
* ieee80211_get_tx_led_name - get name of TX LED
@@ -3290,7 +3563,7 @@ char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
*
* Return: The name of the LED trigger. %NULL if not configured for LEDs.
*/
-static inline char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
+static inline const char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
{
#ifdef CONFIG_MAC80211_LEDS
return __ieee80211_get_tx_led_name(hw);
@@ -3311,7 +3584,7 @@ static inline char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
*
* Return: The name of the LED trigger. %NULL if not configured for LEDs.
*/
-static inline char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
+static inline const char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
{
#ifdef CONFIG_MAC80211_LEDS
return __ieee80211_get_rx_led_name(hw);
@@ -3332,7 +3605,7 @@ static inline char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
*
* Return: The name of the LED trigger. %NULL if not configured for LEDs.
*/
-static inline char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
+static inline const char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
{
#ifdef CONFIG_MAC80211_LEDS
return __ieee80211_get_assoc_led_name(hw);
@@ -3353,7 +3626,7 @@ static inline char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
*
* Return: The name of the LED trigger. %NULL if not configured for LEDs.
*/
-static inline char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
+static inline const char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
{
#ifdef CONFIG_MAC80211_LEDS
return __ieee80211_get_radio_led_name(hw);
@@ -3374,7 +3647,7 @@ static inline char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
*
* Note: This function must be called before ieee80211_register_hw().
*/
-static inline char *
+static inline const char *
ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw, unsigned int flags,
const struct ieee80211_tpt_blink *blink_table,
unsigned int blink_table_len)
@@ -4055,30 +4328,6 @@ void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
u8 *k1, u8 *k2);
/**
- * struct ieee80211_key_seq - key sequence counter
- *
- * @tkip: TKIP data, containing IV32 and IV16 in host byte order
- * @ccmp: PN data, most significant byte first (big endian,
- * reverse order than in packet)
- * @aes_cmac: PN data, most significant byte first (big endian,
- * reverse order than in packet)
- */
-struct ieee80211_key_seq {
- union {
- struct {
- u32 iv32;
- u16 iv16;
- } tkip;
- struct {
- u8 pn[6];
- } ccmp;
- struct {
- u8 pn[6];
- } aes_cmac;
- };
-};
-
-/**
* ieee80211_get_key_tx_seq - get key TX sequence counter
*
* @keyconf: the parameter passed with the set key
@@ -4099,7 +4348,7 @@ void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf,
* ieee80211_get_key_rx_seq - get key RX sequence counter
*
* @keyconf: the parameter passed with the set key
- * @tid: The TID, or -1 for the management frame value (CCMP only);
+ * @tid: The TID, or -1 for the management frame value (CCMP/GCMP only);
* the value on TID 0 is also used for non-QoS frames. For
* CMAC, only TID 0 is valid.
* @seq: buffer to receive the sequence data
@@ -4135,7 +4384,7 @@ void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
* ieee80211_set_key_rx_seq - set key RX sequence counter
*
* @keyconf: the parameter passed with the set key
- * @tid: The TID, or -1 for the management frame value (CCMP only);
+ * @tid: The TID, or -1 for the management frame value (CCMP/GCMP only);
* the value on TID 0 is also used for non-QoS frames. For
* CMAC, only TID 0 is valid.
* @seq: new sequence data
@@ -4297,13 +4546,33 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw);
* haven't been re-added to the driver yet.
* @IEEE80211_IFACE_ITER_RESUME_ALL: During resume, iterate over all
* interfaces, even if they haven't been re-added to the driver yet.
+ * @IEEE80211_IFACE_ITER_ACTIVE: Iterate only active interfaces (netdev is up).
*/
enum ieee80211_interface_iteration_flags {
IEEE80211_IFACE_ITER_NORMAL = 0,
IEEE80211_IFACE_ITER_RESUME_ALL = BIT(0),
+ IEEE80211_IFACE_ITER_ACTIVE = BIT(1),
};
/**
+ * ieee80211_iterate_interfaces - iterate interfaces
+ *
+ * This function iterates over the interfaces associated with a given
+ * hardware and calls the callback for them. This includes active as well as
+ * inactive interfaces. This function allows the iterator function to sleep.
+ * Will iterate over a new interface during add_interface().
+ *
+ * @hw: the hardware struct of which the interfaces should be iterated over
+ * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
+ * @iterator: the iterator function to call
+ * @data: first argument of the iterator function
+ */
+void ieee80211_iterate_interfaces(struct ieee80211_hw *hw, u32 iter_flags,
+ void (*iterator)(void *data, u8 *mac,
+ struct ieee80211_vif *vif),
+ void *data);
+
+/**
* ieee80211_iterate_active_interfaces - iterate active interfaces
*
* This function iterates over the interfaces associated with a given
@@ -4318,11 +4587,16 @@ enum ieee80211_interface_iteration_flags {
* @iterator: the iterator function to call
* @data: first argument of the iterator function
*/
-void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw,
- u32 iter_flags,
- void (*iterator)(void *data, u8 *mac,
- struct ieee80211_vif *vif),
- void *data);
+static inline void
+ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw, u32 iter_flags,
+ void (*iterator)(void *data, u8 *mac,
+ struct ieee80211_vif *vif),
+ void *data)
+{
+ ieee80211_iterate_interfaces(hw,
+ iter_flags | IEEE80211_IFACE_ITER_ACTIVE,
+ iterator, data);
+}
/**
* ieee80211_iterate_active_interfaces_atomic - iterate active interfaces
@@ -5148,30 +5422,13 @@ int ieee80211_reserve_tid(struct ieee80211_sta *sta, u8 tid);
void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid);
/**
- * ieee80211_ie_split - split an IE buffer according to ordering
- *
- * @ies: the IE buffer
- * @ielen: the length of the IE buffer
- * @ids: an array with element IDs that are allowed before
- * the split
- * @n_ids: the size of the element ID array
- * @offset: offset where to start splitting in the buffer
- *
- * This function splits an IE buffer by updating the @offset
- * variable to point to the location where the buffer should be
- * split.
+ * ieee80211_tx_dequeue - dequeue a packet from a software tx queue
*
- * It assumes that the given IE buffer is well-formed, this
- * has to be guaranteed by the caller!
- *
- * It also assumes that the IEs in the buffer are ordered
- * correctly, if not the result of using this function will not
- * be ordered correctly either, i.e. it does no reordering.
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @txq: pointer obtained from station or virtual interface
*
- * The function returns the offset where the next part of the
- * buffer starts, which may be @ielen if the entire (remainder)
- * of the buffer should be used.
+ * Returns the skb if successful, %NULL if no frame was available.
*/
-size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
- const u8 *ids, int n_ids, size_t offset);
+struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
#endif /* MAC80211_H */
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index c823d91..f534a46 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -19,6 +19,9 @@
#include <net/af_ieee802154.h>
#include <linux/ieee802154.h>
#include <linux/skbuff.h>
+#include <linux/unaligned/memmove.h>
+
+#include <net/cfg802154.h>
/* General MAC frame format:
* 2 bytes: Frame Control
@@ -28,99 +31,122 @@
*/
#define MAC802154_FRAME_HARD_HEADER_LEN (2 + 1 + 20 + 14)
-/* The following flags are used to indicate changed address settings from
+/**
+ * enum ieee802154_hw_addr_filt_flags - hardware address filtering flags
+ *
+ * The following flags are used to indicate changed address settings from
* the stack to the hardware.
+ *
+ * @IEEE802154_AFILT_SADDR_CHANGED: Indicates that the short address will be
+ * change.
+ *
+ * @IEEE802154_AFILT_IEEEADDR_CHANGED: Indicates that the extended address
+ * will be change.
+ *
+ * @IEEE802154_AFILT_PANID_CHANGED: Indicates that the pan id will be change.
+ *
+ * @IEEE802154_AFILT_PANC_CHANGED: Indicates that the address filter will
+ * do frame address filtering as a pan coordinator.
*/
+enum ieee802154_hw_addr_filt_flags {
+ IEEE802154_AFILT_SADDR_CHANGED = BIT(0),
+ IEEE802154_AFILT_IEEEADDR_CHANGED = BIT(1),
+ IEEE802154_AFILT_PANID_CHANGED = BIT(2),
+ IEEE802154_AFILT_PANC_CHANGED = BIT(3),
+};
-/* indicates that the Short Address changed */
-#define IEEE802154_AFILT_SADDR_CHANGED 0x00000001
-/* indicates that the IEEE Address changed */
-#define IEEE802154_AFILT_IEEEADDR_CHANGED 0x00000002
-/* indicates that the PAN ID changed */
-#define IEEE802154_AFILT_PANID_CHANGED 0x00000004
-/* indicates that PAN Coordinator status changed */
-#define IEEE802154_AFILT_PANC_CHANGED 0x00000008
-
+/**
+ * struct ieee802154_hw_addr_filt - hardware address filtering settings
+ *
+ * @pan_id: pan_id which should be set to the hardware address filter.
+ *
+ * @short_addr: short_addr which should be set to the hardware address filter.
+ *
+ * @ieee_addr: extended address which should be set to the hardware address
+ * filter.
+ *
+ * @pan_coord: boolean if hardware filtering should be operate as coordinator.
+ */
struct ieee802154_hw_addr_filt {
- __le16 pan_id; /* Each independent PAN selects a unique
- * identifier. This PAN id allows communication
- * between devices within a network using short
- * addresses and enables transmissions between
- * devices across independent networks.
- */
+ __le16 pan_id;
__le16 short_addr;
__le64 ieee_addr;
- u8 pan_coord;
-};
-
-struct ieee802154_vif {
- int type;
-
- /* must be last */
- u8 drv_priv[0] __aligned(sizeof(void *));
+ bool pan_coord;
};
+/**
+ * struct ieee802154_hw - ieee802154 hardware
+ *
+ * @extra_tx_headroom: headroom to reserve in each transmit skb for use by the
+ * driver (e.g. for transmit headers.)
+ *
+ * @flags: hardware flags, see &enum ieee802154_hw_flags
+ *
+ * @parent: parent device of the hardware.
+ *
+ * @priv: pointer to private area that was allocated for driver use along with
+ * this structure.
+ *
+ * @phy: This points to the &struct wpan_phy allocated for this 802.15.4 PHY.
+ */
struct ieee802154_hw {
/* filled by the driver */
int extra_tx_headroom;
u32 flags;
struct device *parent;
+ void *priv;
/* filled by mac802154 core */
- struct ieee802154_hw_addr_filt hw_filt;
- void *priv;
struct wpan_phy *phy;
- size_t vif_data_size;
};
-/* Checksum is in hardware and is omitted from a packet
+/**
+ * enum ieee802154_hw_flags - hardware flags
*
- * These following flags are used to indicate hardware capabilities to
+ * These flags are used to indicate hardware capabilities to
* the stack. Generally, flags here should have their meaning
* done in a way that the simplest hardware doesn't need setting
* any particular flags. There are some exceptions to this rule,
* however, so you are advised to review these flags carefully.
+ *
+ * @IEEE802154_HW_TX_OMIT_CKSUM: Indicates that xmitter will add FCS on it's
+ * own.
+ *
+ * @IEEE802154_HW_LBT: Indicates that transceiver will support listen before
+ * transmit.
+ *
+ * @IEEE802154_HW_CSMA_PARAMS: Indicates that transceiver will support csma
+ * parameters (max_be, min_be, backoff exponents).
+ *
+ * @IEEE802154_HW_FRAME_RETRIES: Indicates that transceiver will support ARET
+ * frame retries setting.
+ *
+ * @IEEE802154_HW_AFILT: Indicates that transceiver will support hardware
+ * address filter setting.
+ *
+ * @IEEE802154_HW_PROMISCUOUS: Indicates that transceiver will support
+ * promiscuous mode setting.
+ *
+ * @IEEE802154_HW_RX_OMIT_CKSUM: Indicates that receiver omits FCS.
+ *
+ * @IEEE802154_HW_RX_DROP_BAD_CKSUM: Indicates that receiver will not filter
+ * frames with bad checksum.
*/
-
-/* Indicates that xmitter will add FCS on it's own. */
-#define IEEE802154_HW_TX_OMIT_CKSUM 0x00000001
-/* Indicates that receiver will autorespond with ACK frames. */
-#define IEEE802154_HW_AACK 0x00000002
-/* Indicates that transceiver will support transmit power setting. */
-#define IEEE802154_HW_TXPOWER 0x00000004
-/* Indicates that transceiver will support listen before transmit. */
-#define IEEE802154_HW_LBT 0x00000008
-/* Indicates that transceiver will support cca mode setting. */
-#define IEEE802154_HW_CCA_MODE 0x00000010
-/* Indicates that transceiver will support cca ed level setting. */
-#define IEEE802154_HW_CCA_ED_LEVEL 0x00000020
-/* Indicates that transceiver will support csma (max_be, min_be, csma retries)
- * settings. */
-#define IEEE802154_HW_CSMA_PARAMS 0x00000040
-/* Indicates that transceiver will support ARET frame retries setting. */
-#define IEEE802154_HW_FRAME_RETRIES 0x00000080
-/* Indicates that transceiver will support hardware address filter setting. */
-#define IEEE802154_HW_AFILT 0x00000100
-/* Indicates that transceiver will support promiscuous mode setting. */
-#define IEEE802154_HW_PROMISCUOUS 0x00000200
-/* Indicates that receiver omits FCS. */
-#define IEEE802154_HW_RX_OMIT_CKSUM 0x00000400
-/* Indicates that receiver will not filter frames with bad checksum. */
-#define IEEE802154_HW_RX_DROP_BAD_CKSUM 0x00000800
+enum ieee802154_hw_flags {
+ IEEE802154_HW_TX_OMIT_CKSUM = BIT(0),
+ IEEE802154_HW_LBT = BIT(1),
+ IEEE802154_HW_CSMA_PARAMS = BIT(2),
+ IEEE802154_HW_FRAME_RETRIES = BIT(3),
+ IEEE802154_HW_AFILT = BIT(4),
+ IEEE802154_HW_PROMISCUOUS = BIT(5),
+ IEEE802154_HW_RX_OMIT_CKSUM = BIT(6),
+ IEEE802154_HW_RX_DROP_BAD_CKSUM = BIT(7),
+};
/* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */
#define IEEE802154_HW_OMIT_CKSUM (IEEE802154_HW_TX_OMIT_CKSUM | \
IEEE802154_HW_RX_OMIT_CKSUM)
-/* This groups the most common CSMA support fields into one. */
-#define IEEE802154_HW_CSMA (IEEE802154_HW_CCA_MODE | \
- IEEE802154_HW_CCA_ED_LEVEL | \
- IEEE802154_HW_CSMA_PARAMS)
-
-/* This groups the most common ARET support fields into one. */
-#define IEEE802154_HW_ARET (IEEE802154_HW_CSMA | \
- IEEE802154_HW_FRAME_RETRIES)
-
/* struct ieee802154_ops - callbacks from mac802154 to the driver
*
* This structure contains various callbacks that the driver may
@@ -168,7 +194,7 @@ struct ieee802154_hw {
* Returns either zero, or negative errno.
*
* set_txpower:
- * Set radio transmit power in dB. Called with pib_lock held.
+ * Set radio transmit power in mBm. Called with pib_lock held.
* Returns either zero, or negative errno.
*
* set_lbt
@@ -181,7 +207,7 @@ struct ieee802154_hw {
* Returns either zero, or negative errno.
*
* set_cca_ed_level
- * Sets the CCA energy detection threshold in dBm. Called with pib_lock
+ * Sets the CCA energy detection threshold in mBm. Called with pib_lock
* held.
* Returns either zero, or negative errno.
*
@@ -210,11 +236,11 @@ struct ieee802154_ops {
int (*set_hw_addr_filt)(struct ieee802154_hw *hw,
struct ieee802154_hw_addr_filt *filt,
unsigned long changed);
- int (*set_txpower)(struct ieee802154_hw *hw, int db);
+ int (*set_txpower)(struct ieee802154_hw *hw, s32 mbm);
int (*set_lbt)(struct ieee802154_hw *hw, bool on);
- int (*set_cca_mode)(struct ieee802154_hw *hw, u8 mode);
- int (*set_cca_ed_level)(struct ieee802154_hw *hw,
- s32 level);
+ int (*set_cca_mode)(struct ieee802154_hw *hw,
+ const struct wpan_phy_cca *cca);
+ int (*set_cca_ed_level)(struct ieee802154_hw *hw, s32 mbm);
int (*set_csma_params)(struct ieee802154_hw *hw,
u8 min_be, u8 max_be, u8 retries);
int (*set_frame_retries)(struct ieee802154_hw *hw,
@@ -230,9 +256,7 @@ struct ieee802154_ops {
*/
static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src)
{
- __le64 tmp = (__force __le64)swab64p(be64_src);
-
- memcpy(le64_dst, &tmp, IEEE802154_EXTENDED_ADDR_LEN);
+ __put_unaligned_memmove64(swab64p(be64_src), le64_dst);
}
/**
@@ -242,24 +266,112 @@ static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src)
*/
static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src)
{
- __be64 tmp = (__force __be64)swab64p(le64_src);
-
- memcpy(be64_dst, &tmp, IEEE802154_EXTENDED_ADDR_LEN);
+ __put_unaligned_memmove64(swab64p(le64_src), be64_dst);
}
-/* Basic interface to register ieee802154 hwice */
+/**
+ * ieee802154_alloc_hw - Allocate a new hardware device
+ *
+ * This must be called once for each hardware device. The returned pointer
+ * must be used to refer to this device when calling other functions.
+ * mac802154 allocates a private data area for the driver pointed to by
+ * @priv in &struct ieee802154_hw, the size of this area is given as
+ * @priv_data_len.
+ *
+ * @priv_data_len: length of private data
+ * @ops: callbacks for this device
+ *
+ * Return: A pointer to the new hardware device, or %NULL on error.
+ */
struct ieee802154_hw *
ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops);
+
+/**
+ * ieee802154_free_hw - free hardware descriptor
+ *
+ * This function frees everything that was allocated, including the
+ * private data for the driver. You must call ieee802154_unregister_hw()
+ * before calling this function.
+ *
+ * @hw: the hardware to free
+ */
void ieee802154_free_hw(struct ieee802154_hw *hw);
+
+/**
+ * ieee802154_register_hw - Register hardware device
+ *
+ * You must call this function before any other functions in
+ * mac802154. Note that before a hardware can be registered, you
+ * need to fill the contained wpan_phy's information.
+ *
+ * @hw: the device to register as returned by ieee802154_alloc_hw()
+ *
+ * Return: 0 on success. An error code otherwise.
+ */
int ieee802154_register_hw(struct ieee802154_hw *hw);
+
+/**
+ * ieee802154_unregister_hw - Unregister a hardware device
+ *
+ * This function instructs mac802154 to free allocated resources
+ * and unregister netdevices from the networking subsystem.
+ *
+ * @hw: the hardware to unregister
+ */
void ieee802154_unregister_hw(struct ieee802154_hw *hw);
+/**
+ * ieee802154_rx - receive frame
+ *
+ * Use this function to hand received frames to mac802154. The receive
+ * buffer in @skb must start with an IEEE 802.15.4 header. In case of a
+ * paged @skb is used, the driver is recommended to put the ieee802154
+ * header of the frame on the linear part of the @skb to avoid memory
+ * allocation and/or memcpy by the stack.
+ *
+ * This function may not be called in IRQ context. Calls to this function
+ * for a single hardware must be synchronized against each other.
+ *
+ * @hw: the hardware this frame came in on
+ * @skb: the buffer to receive, owned by mac802154 after this call
+ */
void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb);
+
+/**
+ * ieee802154_rx_irqsafe - receive frame
+ *
+ * Like ieee802154_rx() but can be called in IRQ context
+ * (internally defers to a tasklet.)
+ *
+ * @hw: the hardware this frame came in on
+ * @skb: the buffer to receive, owned by mac802154 after this call
+ * @lqi: link quality indicator
+ */
void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb,
u8 lqi);
-
+/**
+ * ieee802154_wake_queue - wake ieee802154 queue
+ * @hw: pointer as obtained from ieee802154_alloc_hw().
+ *
+ * Drivers should use this function instead of netif_wake_queue.
+ */
void ieee802154_wake_queue(struct ieee802154_hw *hw);
+
+/**
+ * ieee802154_stop_queue - stop ieee802154 queue
+ * @hw: pointer as obtained from ieee802154_alloc_hw().
+ *
+ * Drivers should use this function instead of netif_stop_queue.
+ */
void ieee802154_stop_queue(struct ieee802154_hw *hw);
+
+/**
+ * ieee802154_xmit_complete - frame transmission complete
+ *
+ * @hw: pointer as obtained from ieee802154_alloc_hw().
+ * @skb: buffer for transmission
+ * @ifs_handling: indicate interframe space handling
+ */
void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
bool ifs_handling);
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 6bbda34..b3a7751 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -156,24 +156,7 @@ static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, _
static inline struct neighbour *__ipv6_neigh_lookup_noref(struct net_device *dev, const void *pkey)
{
- struct neigh_hash_table *nht;
- const u32 *p32 = pkey;
- struct neighbour *n;
- u32 hash_val;
-
- nht = rcu_dereference_bh(nd_tbl.nht);
- hash_val = ndisc_hashfn(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
- for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
- n != NULL;
- n = rcu_dereference_bh(n->next)) {
- u32 *n32 = (u32 *) n->primary_key;
- if (n->dev == dev &&
- ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) |
- (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0)
- return n;
- }
-
- return NULL;
+ return ___neigh_lookup_noref(&nd_tbl, neigh_key_eq128, ndisc_hashfn, pkey, dev);
}
static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, const void *pkey)
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 76f7084..bd33e66 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -42,6 +42,7 @@ enum {
NEIGH_VAR_MCAST_PROBES,
NEIGH_VAR_UCAST_PROBES,
NEIGH_VAR_APP_PROBES,
+ NEIGH_VAR_MCAST_REPROBES,
NEIGH_VAR_RETRANS_TIME,
NEIGH_VAR_BASE_REACHABLE_TIME,
NEIGH_VAR_DELAY_PROBE_TIME,
@@ -65,9 +66,7 @@ enum {
};
struct neigh_parms {
-#ifdef CONFIG_NET_NS
- struct net *net;
-#endif
+ possible_net_t net;
struct net_device *dev;
struct list_head list;
int (*neigh_setup)(struct neighbour *);
@@ -167,9 +166,7 @@ struct neigh_ops {
struct pneigh_entry {
struct pneigh_entry *next;
-#ifdef CONFIG_NET_NS
- struct net *net;
-#endif
+ possible_net_t net;
struct net_device *dev;
u8 flags;
u8 key[0];
@@ -193,9 +190,11 @@ struct neigh_table {
int family;
int entry_size;
int key_len;
+ __be16 protocol;
__u32 (*hash)(const void *pkey,
const struct net_device *dev,
__u32 *hash_rnd);
+ bool (*key_eq)(const struct neighbour *, const void *pkey);
int (*constructor)(struct neighbour *);
int (*pconstructor)(struct pneigh_entry *);
void (*pdestructor)(struct pneigh_entry *);
@@ -224,6 +223,7 @@ enum {
NEIGH_ND_TABLE = 1,
NEIGH_DN_TABLE = 2,
NEIGH_NR_TABLES,
+ NEIGH_LINK_TABLE = NEIGH_NR_TABLES /* Pseudo table for neigh_xmit */
};
static inline int neigh_parms_family(struct neigh_parms *p)
@@ -246,6 +246,57 @@ static inline void *neighbour_priv(const struct neighbour *n)
#define NEIGH_UPDATE_F_ISROUTER 0x40000000
#define NEIGH_UPDATE_F_ADMIN 0x80000000
+
+static inline bool neigh_key_eq16(const struct neighbour *n, const void *pkey)
+{
+ return *(const u16 *)n->primary_key == *(const u16 *)pkey;
+}
+
+static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey)
+{
+ return *(const u32 *)n->primary_key == *(const u32 *)pkey;
+}
+
+static inline bool neigh_key_eq128(const struct neighbour *n, const void *pkey)
+{
+ const u32 *n32 = (const u32 *)n->primary_key;
+ const u32 *p32 = pkey;
+
+ return ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) |
+ (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0;
+}
+
+static inline struct neighbour *___neigh_lookup_noref(
+ struct neigh_table *tbl,
+ bool (*key_eq)(const struct neighbour *n, const void *pkey),
+ __u32 (*hash)(const void *pkey,
+ const struct net_device *dev,
+ __u32 *hash_rnd),
+ const void *pkey,
+ struct net_device *dev)
+{
+ struct neigh_hash_table *nht = rcu_dereference_bh(tbl->nht);
+ struct neighbour *n;
+ u32 hash_val;
+
+ hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
+ for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
+ n != NULL;
+ n = rcu_dereference_bh(n->next)) {
+ if (n->dev == dev && key_eq(n, pkey))
+ return n;
+ }
+
+ return NULL;
+}
+
+static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl,
+ const void *pkey,
+ struct net_device *dev)
+{
+ return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev);
+}
+
void neigh_table_init(int index, struct neigh_table *tbl);
int neigh_table_clear(int index, struct neigh_table *tbl);
struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
@@ -268,7 +319,6 @@ void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
-int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb);
int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
struct neighbour *neigh_event_ns(struct neigh_table *tbl,
u8 *lladdr, void *saddr,
@@ -306,6 +356,7 @@ void neigh_for_each(struct neigh_table *tbl,
void (*cb)(struct neighbour *, void *), void *cookie);
void __neigh_for_each_release(struct neigh_table *tbl,
int (*cb)(struct neighbour *));
+int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *);
void pneigh_for_each(struct neigh_table *tbl,
void (*cb)(struct pneigh_entry *));
@@ -459,4 +510,6 @@ static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
memcpy(dst, n->ha, dev->addr_len);
} while (read_seqretry(&n->ha_lock, seq));
}
+
+
#endif
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 2e8756b8..e951453 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -26,7 +26,10 @@
#endif
#include <net/netns/nftables.h>
#include <net/netns/xfrm.h>
+#include <net/netns/mpls.h>
#include <linux/ns_common.h>
+#include <linux/idr.h>
+#include <linux/skbuff.h>
struct user_namespace;
struct proc_dir_entry;
@@ -48,18 +51,17 @@ struct net {
atomic_t count; /* To decided when the network
* namespace should be shut down.
*/
-#ifdef NETNS_REFCNT_DEBUG
- atomic_t use_count; /* To track references we
- * destroy on demand
- */
-#endif
spinlock_t rules_mod_lock;
+ atomic64_t cookie_gen;
+
struct list_head list; /* list of network namespaces */
struct list_head cleanup_list; /* namespaces on death row */
struct list_head exit_list; /* Use only net_mutex */
struct user_namespace *user_ns; /* Owning user namespace */
+ spinlock_t nsid_lock;
+ struct idr netns_ids;
struct ns_common ns;
@@ -129,6 +131,9 @@ struct net {
#if IS_ENABLED(CONFIG_IP_VS)
struct netns_ipvs *ipvs;
#endif
+#if IS_ENABLED(CONFIG_MPLS)
+ struct netns_mpls mpls;
+#endif
struct sock *diag_nlsk;
atomic_t fnhe_genid;
};
@@ -229,48 +234,27 @@ int net_eq(const struct net *net1, const struct net *net2)
#endif
-#ifdef NETNS_REFCNT_DEBUG
-static inline struct net *hold_net(struct net *net)
-{
- if (net)
- atomic_inc(&net->use_count);
- return net;
-}
-
-static inline void release_net(struct net *net)
-{
- if (net)
- atomic_dec(&net->use_count);
-}
-#else
-static inline struct net *hold_net(struct net *net)
-{
- return net;
-}
-
-static inline void release_net(struct net *net)
-{
-}
-#endif
-
+typedef struct {
#ifdef CONFIG_NET_NS
+ struct net *net;
+#endif
+} possible_net_t;
-static inline void write_pnet(struct net **pnet, struct net *net)
+static inline void write_pnet(possible_net_t *pnet, struct net *net)
{
- *pnet = net;
+#ifdef CONFIG_NET_NS
+ pnet->net = net;
+#endif
}
-static inline struct net *read_pnet(struct net * const *pnet)
+static inline struct net *read_pnet(const possible_net_t *pnet)
{
- return *pnet;
-}
-
+#ifdef CONFIG_NET_NS
+ return pnet->net;
#else
-
-#define write_pnet(pnet, net) do { (void)(net);} while (0)
-#define read_pnet(pnet) (&init_net)
-
+ return &init_net;
#endif
+}
#define for_each_net(VAR) \
list_for_each_entry(VAR, &net_namespace_list, list)
@@ -290,6 +274,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
#define __net_initconst __initconst
#endif
+int peernet2id_alloc(struct net *net, struct net *peer);
+int peernet2id(struct net *net, struct net *peer);
+bool peernet_has_id(struct net *net, struct net *peer);
+struct net *get_net_ns_by_id(struct net *net, int id);
+
struct pernet_operations {
struct list_head list;
int (*init)(struct net *net);
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
index 2aa6048..bab824b 100644
--- a/include/net/netfilter/br_netfilter.h
+++ b/include/net/netfilter/br_netfilter.h
@@ -1,6 +1,66 @@
#ifndef _BR_NETFILTER_H_
#define _BR_NETFILTER_H_
+#include "../../../net/bridge/br_private.h"
+
+static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
+{
+ skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC);
+
+ if (likely(skb->nf_bridge))
+ atomic_set(&(skb->nf_bridge->use), 1);
+
+ return skb->nf_bridge;
+}
+
+void nf_bridge_update_protocol(struct sk_buff *skb);
+
+static inline struct nf_bridge_info *
+nf_bridge_info_get(const struct sk_buff *skb)
+{
+ return skb->nf_bridge;
+}
+
+unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb);
+
+static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
+{
+ unsigned int len = nf_bridge_encap_header_len(skb);
+
+ skb_push(skb, len);
+ skb->network_header -= len;
+}
+
+int br_nf_pre_routing_finish_bridge(struct sock *sk, struct sk_buff *skb);
+
+static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
+{
+ struct net_bridge_port *port;
+
+ port = br_port_get_rcu(dev);
+ return port ? &port->br->fake_rtable : NULL;
+}
+
+struct net_device *setup_pre_routing(struct sk_buff *skb);
void br_netfilter_enable(void);
+#if IS_ENABLED(CONFIG_IPV6)
+int br_validate_ipv6(struct sk_buff *skb);
+unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state);
+#else
+static inline int br_validate_ipv6(struct sk_buff *skb)
+{
+ return -1;
+}
+
+static inline unsigned int
+br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ return NF_DROP;
+}
+#endif
+
#endif /* _BR_NETFILTER_H_ */
diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h
index 03e928a..77862c3 100644
--- a/include/net/netfilter/ipv4/nf_reject.h
+++ b/include/net/netfilter/ipv4/nf_reject.h
@@ -5,18 +5,14 @@
#include <net/ip.h>
#include <net/icmp.h>
-static inline void nf_send_unreach(struct sk_buff *skb_in, int code)
-{
- icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
-}
-
+void nf_send_unreach(struct sk_buff *skb_in, int code, int hook);
void nf_send_reset(struct sk_buff *oldskb, int hook);
const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
struct tcphdr *_oth, int hook);
struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
const struct sk_buff *oldskb,
- __be16 protocol, int ttl);
+ __u8 protocol, int ttl);
void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
const struct tcphdr *oth);
diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h
index 23216d4..0ea4fa3 100644
--- a/include/net/netfilter/ipv6/nf_reject.h
+++ b/include/net/netfilter/ipv6/nf_reject.h
@@ -3,15 +3,8 @@
#include <linux/icmpv6.h>
-static inline void
-nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
- unsigned int hooknum)
-{
- if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
- skb_in->dev = net->loopback_dev;
-
- icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
-}
+void nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
+ unsigned int hooknum);
void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook);
@@ -20,7 +13,7 @@ const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
unsigned int *otcplen, int hook);
struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
const struct sk_buff *oldskb,
- __be16 protocol, int hoplimit);
+ __u8 protocol, int hoplimit);
void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
const struct sk_buff *oldskb,
const struct tcphdr *oth, unsigned int otcplen);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index f0daed2..095433b 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -95,9 +95,8 @@ struct nf_conn {
/* Timer function; drops refcnt when it goes off. */
struct timer_list timeout;
-#ifdef CONFIG_NET_NS
- struct net *ct_net;
-#endif
+ possible_net_t ct_net;
+
/* all members below initialized via memset */
u8 __nfct_init_offset[0];
@@ -191,8 +190,6 @@ __nf_conntrack_find(struct net *net, u16 zone,
int nf_conntrack_hash_check_insert(struct nf_conn *ct);
bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
-void nf_conntrack_flush_report(struct net *net, u32 portid, int report);
-
bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
u_int16_t l3num, struct nf_conntrack_tuple *tuple);
bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index 534e1f2..57639fc 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -79,6 +79,16 @@ void nf_log_packet(struct net *net,
const struct nf_loginfo *li,
const char *fmt, ...);
+__printf(8, 9)
+void nf_log_trace(struct net *net,
+ u_int8_t pf,
+ unsigned int hooknum,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct nf_loginfo *li,
+ const char *fmt, ...);
+
struct nf_log_buf;
struct nf_log_buf *nf_log_buf_open(void);
diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h
index 340c013..a312732 100644
--- a/include/net/netfilter/nf_nat_l3proto.h
+++ b/include/net/netfilter/nf_nat_l3proto.h
@@ -44,40 +44,32 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
unsigned int hooknum);
unsigned int nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
unsigned int nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
unsigned int nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
unsigned int nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
@@ -85,40 +77,32 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
unsigned int hooknum, unsigned int hdrlen);
unsigned int nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
unsigned int nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
unsigned int nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
unsigned int (*do_chain)(const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
+ const struct nf_hook_state *state,
struct nf_conn *ct));
#endif /* _NF_NAT_L3PROTO_H */
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 84a53d7..e863585 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -12,12 +12,8 @@ struct nf_queue_entry {
unsigned int id;
struct nf_hook_ops *elem;
- u_int8_t pf;
+ struct nf_hook_state state;
u16 size; /* sizeof(entry) + saved route keys */
- unsigned int hook;
- struct net_device *indev;
- struct net_device *outdev;
- int (*okfn)(struct sk_buff *);
/* extra space to store route keys */
};
@@ -28,6 +24,8 @@ struct nf_queue_entry {
struct nf_queue_handler {
int (*outfn)(struct nf_queue_entry *entry,
unsigned int queuenum);
+ void (*nf_hook_drop)(struct net *net,
+ struct nf_hook_ops *ops);
};
void nf_register_queue_handler(const struct nf_queue_handler *qh);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 3ae969e..2a24668 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -1,6 +1,7 @@
#ifndef _NET_NF_TABLES_H
#define _NET_NF_TABLES_H
+#include <linux/module.h>
#include <linux/list.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nfnetlink.h>
@@ -26,40 +27,53 @@ struct nft_pktinfo {
static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out)
+ const struct nf_hook_state *state)
{
pkt->skb = skb;
- pkt->in = pkt->xt.in = in;
- pkt->out = pkt->xt.out = out;
+ pkt->in = pkt->xt.in = state->in;
+ pkt->out = pkt->xt.out = state->out;
pkt->ops = ops;
pkt->xt.hooknum = ops->hooknum;
pkt->xt.family = ops->pf;
}
+/**
+ * struct nft_verdict - nf_tables verdict
+ *
+ * @code: nf_tables/netfilter verdict code
+ * @chain: destination chain for NFT_JUMP/NFT_GOTO
+ */
+struct nft_verdict {
+ u32 code;
+ struct nft_chain *chain;
+};
+
struct nft_data {
union {
- u32 data[4];
- struct {
- u32 verdict;
- struct nft_chain *chain;
- };
+ u32 data[4];
+ struct nft_verdict verdict;
};
} __attribute__((aligned(__alignof__(u64))));
-static inline int nft_data_cmp(const struct nft_data *d1,
- const struct nft_data *d2,
- unsigned int len)
-{
- return memcmp(d1->data, d2->data, len);
-}
+/**
+ * struct nft_regs - nf_tables register set
+ *
+ * @data: data registers
+ * @verdict: verdict register
+ *
+ * The first four data registers alias to the verdict register.
+ */
+struct nft_regs {
+ union {
+ u32 data[20];
+ struct nft_verdict verdict;
+ };
+};
-static inline void nft_data_copy(struct nft_data *dst,
- const struct nft_data *src)
+static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
+ unsigned int len)
{
- BUILD_BUG_ON(__alignof__(*dst) != __alignof__(u64));
- *(u64 *)&dst->data[0] = *(u64 *)&src->data[0];
- *(u64 *)&dst->data[2] = *(u64 *)&src->data[2];
+ memcpy(dst, src, len);
}
static inline void nft_data_debug(const struct nft_data *data)
@@ -97,7 +111,8 @@ struct nft_data_desc {
unsigned int len;
};
-int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data,
+int nft_data_init(const struct nft_ctx *ctx,
+ struct nft_data *data, unsigned int size,
struct nft_data_desc *desc, const struct nlattr *nla);
void nft_data_uninit(const struct nft_data *data, enum nft_data_types type);
int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
@@ -113,28 +128,42 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1;
}
-int nft_validate_input_register(enum nft_registers reg);
-int nft_validate_output_register(enum nft_registers reg);
-int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg,
- const struct nft_data *data,
- enum nft_data_types type);
+unsigned int nft_parse_register(const struct nlattr *attr);
+int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
+
+int nft_validate_register_load(enum nft_registers reg, unsigned int len);
+int nft_validate_register_store(const struct nft_ctx *ctx,
+ enum nft_registers reg,
+ const struct nft_data *data,
+ enum nft_data_types type, unsigned int len);
+
+/**
+ * struct nft_userdata - user defined data associated with an object
+ *
+ * @len: length of the data
+ * @data: content
+ *
+ * The presence of user data is indicated in an object specific fashion,
+ * so a length of zero can't occur and the value "len" indicates data
+ * of length len + 1.
+ */
+struct nft_userdata {
+ u8 len;
+ unsigned char data[0];
+};
/**
* struct nft_set_elem - generic representation of set elements
*
- * @cookie: implementation specific element cookie
* @key: element key
- * @data: element data (maps only)
- * @flags: element flags (end of interval)
- *
- * The cookie can be used to store a handle to the element for subsequent
- * removal.
+ * @priv: element private data and extensions
*/
struct nft_set_elem {
- void *cookie;
- struct nft_data key;
- struct nft_data data;
- u32 flags;
+ union {
+ u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)];
+ struct nft_data val;
+ } key;
+ void *priv;
};
struct nft_set;
@@ -186,11 +215,16 @@ struct nft_set_estimate {
enum nft_set_class class;
};
+struct nft_set_ext;
+struct nft_expr;
+
/**
* struct nft_set_ops - nf_tables set operations
*
* @lookup: look up an element within the set
* @insert: insert new element into set
+ * @activate: activate new element in the next generation
+ * @deactivate: deactivate element in the next generation
* @remove: remove element from set
* @walk: iterate over all set elemeennts
* @privsize: function to return size of set private data
@@ -198,16 +232,28 @@ struct nft_set_estimate {
* @destroy: destroy private data of set instance
* @list: nf_tables_set_ops list node
* @owner: module reference
+ * @elemsize: element private size
* @features: features supported by the implementation
*/
struct nft_set_ops {
bool (*lookup)(const struct nft_set *set,
- const struct nft_data *key,
- struct nft_data *data);
- int (*get)(const struct nft_set *set,
- struct nft_set_elem *elem);
+ const u32 *key,
+ const struct nft_set_ext **ext);
+ bool (*update)(struct nft_set *set,
+ const u32 *key,
+ void *(*new)(struct nft_set *,
+ const struct nft_expr *,
+ struct nft_regs *),
+ const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_set_ext **ext);
+
int (*insert)(const struct nft_set *set,
const struct nft_set_elem *elem);
+ void (*activate)(const struct nft_set *set,
+ const struct nft_set_elem *elem);
+ void * (*deactivate)(const struct nft_set *set,
+ const struct nft_set_elem *elem);
void (*remove)(const struct nft_set *set,
const struct nft_set_elem *elem);
void (*walk)(const struct nft_ctx *ctx,
@@ -225,6 +271,7 @@ struct nft_set_ops {
struct list_head list;
struct module *owner;
+ unsigned int elemsize;
u32 features;
};
@@ -241,8 +288,12 @@ void nft_unregister_set(struct nft_set_ops *ops);
* @dtype: data type (verdict or numeric type defined by userspace)
* @size: maximum set size
* @nelems: number of elements
+ * @ndeact: number of deactivated elements queued for removal
+ * @timeout: default timeout value in msecs
+ * @gc_int: garbage collection interval in msecs
* @policy: set parameterization (see enum nft_set_policies)
* @ops: set ops
+ * @pnet: network namespace
* @flags: set flags
* @klen: key length
* @dlen: data length
@@ -255,10 +306,14 @@ struct nft_set {
u32 ktype;
u32 dtype;
u32 size;
- u32 nelems;
+ atomic_t nelems;
+ u32 ndeact;
+ u64 timeout;
+ u32 gc_int;
u16 policy;
/* runtime data below here */
const struct nft_set_ops *ops ____cacheline_aligned;
+ possible_net_t pnet;
u16 flags;
u8 klen;
u8 dlen;
@@ -271,16 +326,27 @@ static inline void *nft_set_priv(const struct nft_set *set)
return (void *)set->data;
}
+static inline struct nft_set *nft_set_container_of(const void *priv)
+{
+ return (void *)priv - offsetof(struct nft_set, data);
+}
+
struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
const struct nlattr *nla);
struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
const struct nlattr *nla);
+static inline unsigned long nft_set_gc_interval(const struct nft_set *set)
+{
+ return set->gc_int ? msecs_to_jiffies(set->gc_int) : HZ;
+}
+
/**
* struct nft_set_binding - nf_tables set binding
*
* @list: set bindings list node
* @chain: chain containing the rule bound to the set
+ * @flags: set action flags
*
* A set binding contains all information necessary for validation
* of new elements added to a bound set.
@@ -288,6 +354,7 @@ struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
struct nft_set_binding {
struct list_head list;
const struct nft_chain *chain;
+ u32 flags;
};
int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
@@ -295,6 +362,215 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding);
+/**
+ * enum nft_set_extensions - set extension type IDs
+ *
+ * @NFT_SET_EXT_KEY: element key
+ * @NFT_SET_EXT_DATA: mapping data
+ * @NFT_SET_EXT_FLAGS: element flags
+ * @NFT_SET_EXT_TIMEOUT: element timeout
+ * @NFT_SET_EXT_EXPIRATION: element expiration time
+ * @NFT_SET_EXT_USERDATA: user data associated with the element
+ * @NFT_SET_EXT_EXPR: expression assiociated with the element
+ * @NFT_SET_EXT_NUM: number of extension types
+ */
+enum nft_set_extensions {
+ NFT_SET_EXT_KEY,
+ NFT_SET_EXT_DATA,
+ NFT_SET_EXT_FLAGS,
+ NFT_SET_EXT_TIMEOUT,
+ NFT_SET_EXT_EXPIRATION,
+ NFT_SET_EXT_USERDATA,
+ NFT_SET_EXT_EXPR,
+ NFT_SET_EXT_NUM
+};
+
+/**
+ * struct nft_set_ext_type - set extension type
+ *
+ * @len: fixed part length of the extension
+ * @align: alignment requirements of the extension
+ */
+struct nft_set_ext_type {
+ u8 len;
+ u8 align;
+};
+
+extern const struct nft_set_ext_type nft_set_ext_types[];
+
+/**
+ * struct nft_set_ext_tmpl - set extension template
+ *
+ * @len: length of extension area
+ * @offset: offsets of individual extension types
+ */
+struct nft_set_ext_tmpl {
+ u16 len;
+ u8 offset[NFT_SET_EXT_NUM];
+};
+
+/**
+ * struct nft_set_ext - set extensions
+ *
+ * @genmask: generation mask
+ * @offset: offsets of individual extension types
+ * @data: beginning of extension data
+ */
+struct nft_set_ext {
+ u8 genmask;
+ u8 offset[NFT_SET_EXT_NUM];
+ char data[0];
+};
+
+static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl)
+{
+ memset(tmpl, 0, sizeof(*tmpl));
+ tmpl->len = sizeof(struct nft_set_ext);
+}
+
+static inline void nft_set_ext_add_length(struct nft_set_ext_tmpl *tmpl, u8 id,
+ unsigned int len)
+{
+ tmpl->len = ALIGN(tmpl->len, nft_set_ext_types[id].align);
+ BUG_ON(tmpl->len > U8_MAX);
+ tmpl->offset[id] = tmpl->len;
+ tmpl->len += nft_set_ext_types[id].len + len;
+}
+
+static inline void nft_set_ext_add(struct nft_set_ext_tmpl *tmpl, u8 id)
+{
+ nft_set_ext_add_length(tmpl, id, 0);
+}
+
+static inline void nft_set_ext_init(struct nft_set_ext *ext,
+ const struct nft_set_ext_tmpl *tmpl)
+{
+ memcpy(ext->offset, tmpl->offset, sizeof(ext->offset));
+}
+
+static inline bool __nft_set_ext_exists(const struct nft_set_ext *ext, u8 id)
+{
+ return !!ext->offset[id];
+}
+
+static inline bool nft_set_ext_exists(const struct nft_set_ext *ext, u8 id)
+{
+ return ext && __nft_set_ext_exists(ext, id);
+}
+
+static inline void *nft_set_ext(const struct nft_set_ext *ext, u8 id)
+{
+ return (void *)ext + ext->offset[id];
+}
+
+static inline struct nft_data *nft_set_ext_key(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_KEY);
+}
+
+static inline struct nft_data *nft_set_ext_data(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_DATA);
+}
+
+static inline u8 *nft_set_ext_flags(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_FLAGS);
+}
+
+static inline u64 *nft_set_ext_timeout(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_TIMEOUT);
+}
+
+static inline unsigned long *nft_set_ext_expiration(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_EXPIRATION);
+}
+
+static inline struct nft_userdata *nft_set_ext_userdata(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_USERDATA);
+}
+
+static inline struct nft_expr *nft_set_ext_expr(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_EXPR);
+}
+
+static inline bool nft_set_elem_expired(const struct nft_set_ext *ext)
+{
+ return nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION) &&
+ time_is_before_eq_jiffies(*nft_set_ext_expiration(ext));
+}
+
+static inline struct nft_set_ext *nft_set_elem_ext(const struct nft_set *set,
+ void *elem)
+{
+ return elem + set->ops->elemsize;
+}
+
+void *nft_set_elem_init(const struct nft_set *set,
+ const struct nft_set_ext_tmpl *tmpl,
+ const u32 *key, const u32 *data,
+ u64 timeout, gfp_t gfp);
+void nft_set_elem_destroy(const struct nft_set *set, void *elem);
+
+/**
+ * struct nft_set_gc_batch_head - nf_tables set garbage collection batch
+ *
+ * @rcu: rcu head
+ * @set: set the elements belong to
+ * @cnt: count of elements
+ */
+struct nft_set_gc_batch_head {
+ struct rcu_head rcu;
+ const struct nft_set *set;
+ unsigned int cnt;
+};
+
+#define NFT_SET_GC_BATCH_SIZE ((PAGE_SIZE - \
+ sizeof(struct nft_set_gc_batch_head)) / \
+ sizeof(void *))
+
+/**
+ * struct nft_set_gc_batch - nf_tables set garbage collection batch
+ *
+ * @head: GC batch head
+ * @elems: garbage collection elements
+ */
+struct nft_set_gc_batch {
+ struct nft_set_gc_batch_head head;
+ void *elems[NFT_SET_GC_BATCH_SIZE];
+};
+
+struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set,
+ gfp_t gfp);
+void nft_set_gc_batch_release(struct rcu_head *rcu);
+
+static inline void nft_set_gc_batch_complete(struct nft_set_gc_batch *gcb)
+{
+ if (gcb != NULL)
+ call_rcu(&gcb->head.rcu, nft_set_gc_batch_release);
+}
+
+static inline struct nft_set_gc_batch *
+nft_set_gc_batch_check(const struct nft_set *set, struct nft_set_gc_batch *gcb,
+ gfp_t gfp)
+{
+ if (gcb != NULL) {
+ if (gcb->head.cnt + 1 < ARRAY_SIZE(gcb->elems))
+ return gcb;
+ nft_set_gc_batch_complete(gcb);
+ }
+ return nft_set_gc_batch_alloc(set, gfp);
+}
+
+static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb,
+ void *elem)
+{
+ gcb->elems[gcb->head.cnt++] = elem;
+}
/**
* struct nft_expr_type - nf_tables expression type
@@ -307,6 +583,7 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
* @policy: netlink attribute policy
* @maxattr: highest netlink attribute number
* @family: address family for AF-specific types
+ * @flags: expression type flags
*/
struct nft_expr_type {
const struct nft_expr_ops *(*select_ops)(const struct nft_ctx *,
@@ -318,8 +595,11 @@ struct nft_expr_type {
const struct nla_policy *policy;
unsigned int maxattr;
u8 family;
+ u8 flags;
};
+#define NFT_EXPR_STATEFUL 0x1
+
/**
* struct nft_expr_ops - nf_tables expression operations
*
@@ -335,7 +615,7 @@ struct nft_expr_type {
struct nft_expr;
struct nft_expr_ops {
void (*eval)(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt);
unsigned int size;
@@ -373,6 +653,18 @@ static inline void *nft_expr_priv(const struct nft_expr *expr)
return (void *)expr->data;
}
+struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
+ const struct nlattr *nla);
+void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr);
+int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
+ const struct nft_expr *expr);
+
+static inline void nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
+{
+ __module_get(src->ops->type->owner);
+ memcpy(dst, src, src->ops->size);
+}
+
/**
* struct nft_rule - nf_tables rule
*
@@ -380,7 +672,7 @@ static inline void *nft_expr_priv(const struct nft_expr *expr)
* @handle: rule handle
* @genmask: generation mask
* @dlen: length of expression data
- * @ulen: length of user data (used for comments)
+ * @udata: user data is appended to the rule
* @data: expression data
*/
struct nft_rule {
@@ -388,79 +680,11 @@ struct nft_rule {
u64 handle:42,
genmask:2,
dlen:12,
- ulen:8;
+ udata:1;
unsigned char data[]
__attribute__((aligned(__alignof__(struct nft_expr))));
};
-/**
- * struct nft_trans - nf_tables object update in transaction
- *
- * @list: used internally
- * @msg_type: message type
- * @ctx: transaction context
- * @data: internal information related to the transaction
- */
-struct nft_trans {
- struct list_head list;
- int msg_type;
- struct nft_ctx ctx;
- char data[0];
-};
-
-struct nft_trans_rule {
- struct nft_rule *rule;
-};
-
-#define nft_trans_rule(trans) \
- (((struct nft_trans_rule *)trans->data)->rule)
-
-struct nft_trans_set {
- struct nft_set *set;
- u32 set_id;
-};
-
-#define nft_trans_set(trans) \
- (((struct nft_trans_set *)trans->data)->set)
-#define nft_trans_set_id(trans) \
- (((struct nft_trans_set *)trans->data)->set_id)
-
-struct nft_trans_chain {
- bool update;
- char name[NFT_CHAIN_MAXNAMELEN];
- struct nft_stats __percpu *stats;
- u8 policy;
-};
-
-#define nft_trans_chain_update(trans) \
- (((struct nft_trans_chain *)trans->data)->update)
-#define nft_trans_chain_name(trans) \
- (((struct nft_trans_chain *)trans->data)->name)
-#define nft_trans_chain_stats(trans) \
- (((struct nft_trans_chain *)trans->data)->stats)
-#define nft_trans_chain_policy(trans) \
- (((struct nft_trans_chain *)trans->data)->policy)
-
-struct nft_trans_table {
- bool update;
- bool enable;
-};
-
-#define nft_trans_table_update(trans) \
- (((struct nft_trans_table *)trans->data)->update)
-#define nft_trans_table_enable(trans) \
- (((struct nft_trans_table *)trans->data)->enable)
-
-struct nft_trans_elem {
- struct nft_set *set;
- struct nft_set_elem elem;
-};
-
-#define nft_trans_elem_set(trans) \
- (((struct nft_trans_elem *)trans->data)->set)
-#define nft_trans_elem(trans) \
- (((struct nft_trans_elem *)trans->data)->elem)
-
static inline struct nft_expr *nft_expr_first(const struct nft_rule *rule)
{
return (struct nft_expr *)&rule->data[0];
@@ -476,7 +700,7 @@ static inline struct nft_expr *nft_expr_last(const struct nft_rule *rule)
return (struct nft_expr *)&rule->data[rule->dlen];
}
-static inline void *nft_userdata(const struct nft_rule *rule)
+static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule)
{
return (void *)&rule->data[rule->dlen];
}
@@ -501,7 +725,6 @@ enum nft_chain_flags {
*
* @rules: list of rules in the chain
* @list: used internally
- * @net: net namespace that this chain belongs to
* @table: table that this chain belongs to
* @handle: chain handle
* @use: number of jump references to this chain
@@ -512,7 +735,6 @@ enum nft_chain_flags {
struct nft_chain {
struct list_head rules;
struct list_head list;
- struct net *net;
struct nft_table *table;
u64 handle;
u32 use;
@@ -528,8 +750,29 @@ enum nft_chain_type {
NFT_CHAIN_T_MAX
};
+/**
+ * struct nf_chain_type - nf_tables chain type info
+ *
+ * @name: name of the type
+ * @type: numeric identifier
+ * @family: address family
+ * @owner: module owner
+ * @hook_mask: mask of valid hooks
+ * @hooks: hookfn overrides
+ */
+struct nf_chain_type {
+ const char *name;
+ enum nft_chain_type type;
+ int family;
+ struct module *owner;
+ unsigned int hook_mask;
+ nf_hookfn *hooks[NF_MAX_HOOKS];
+};
+
int nft_chain_validate_dependency(const struct nft_chain *chain,
enum nft_chain_type type);
+int nft_chain_validate_hooks(const struct nft_chain *chain,
+ unsigned int hook_flags);
struct nft_stats {
u64 bytes;
@@ -538,22 +781,28 @@ struct nft_stats {
};
#define NFT_HOOK_OPS_MAX 2
+#define NFT_BASECHAIN_DISABLED (1 << 0)
/**
* struct nft_base_chain - nf_tables base chain
*
* @ops: netfilter hook ops
+ * @pnet: net namespace that this chain belongs to
* @type: chain type
* @policy: default policy
* @stats: per-cpu chain stats
* @chain: the chain
+ * @dev_name: device name that this base chain is attached to (if any)
*/
struct nft_base_chain {
struct nf_hook_ops ops[NFT_HOOK_OPS_MAX];
+ possible_net_t pnet;
const struct nf_chain_type *type;
u8 policy;
+ u8 flags;
struct nft_stats __percpu *stats;
struct nft_chain chain;
+ char dev_name[IFNAMSIZ];
};
static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chain)
@@ -561,6 +810,11 @@ static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chai
return container_of(chain, struct nft_base_chain, chain);
}
+int nft_register_basechain(struct nft_base_chain *basechain,
+ unsigned int hook_nops);
+void nft_unregister_basechain(struct nft_base_chain *basechain,
+ unsigned int hook_nops);
+
unsigned int nft_do_chain(struct nft_pktinfo *pkt,
const struct nf_hook_ops *ops);
@@ -582,7 +836,11 @@ struct nft_table {
u64 hgenerator;
u32 use;
u16 flags;
- char name[];
+ char name[NFT_TABLE_MAXNAMELEN];
+};
+
+enum nft_af_flags {
+ NFT_AF_NEEDS_DEV = (1 << 0),
};
/**
@@ -593,6 +851,7 @@ struct nft_table {
* @nhooks: number of hooks in this family
* @owner: module owner
* @tables: used internally
+ * @flags: family flags
* @nops: number of hook ops in this family
* @hook_ops_init: initialization function for chain hook ops
* @hooks: hookfn overrides for packet validation
@@ -603,6 +862,7 @@ struct nft_af_info {
unsigned int nhooks;
struct module *owner;
struct list_head tables;
+ u32 flags;
unsigned int nops;
void (*hook_ops_init)(struct nf_hook_ops *,
unsigned int);
@@ -612,25 +872,6 @@ struct nft_af_info {
int nft_register_afinfo(struct net *, struct nft_af_info *);
void nft_unregister_afinfo(struct nft_af_info *);
-/**
- * struct nf_chain_type - nf_tables chain type info
- *
- * @name: name of the type
- * @type: numeric identifier
- * @family: address family
- * @owner: module owner
- * @hook_mask: mask of valid hooks
- * @hooks: hookfn overrides
- */
-struct nf_chain_type {
- const char *name;
- enum nft_chain_type type;
- int family;
- struct module *owner;
- unsigned int hook_mask;
- nf_hookfn *hooks[NF_MAX_HOOKS];
-};
-
int nft_register_chain_type(const struct nf_chain_type *);
void nft_unregister_chain_type(const struct nf_chain_type *);
@@ -655,4 +896,153 @@ void nft_unregister_expr(struct nft_expr_type *);
#define MODULE_ALIAS_NFT_SET() \
MODULE_ALIAS("nft-set")
+/*
+ * The gencursor defines two generations, the currently active and the
+ * next one. Objects contain a bitmask of 2 bits specifying the generations
+ * they're active in. A set bit means they're inactive in the generation
+ * represented by that bit.
+ *
+ * New objects start out as inactive in the current and active in the
+ * next generation. When committing the ruleset the bitmask is cleared,
+ * meaning they're active in all generations. When removing an object,
+ * it is set inactive in the next generation. After committing the ruleset,
+ * the objects are removed.
+ */
+static inline unsigned int nft_gencursor_next(const struct net *net)
+{
+ return net->nft.gencursor + 1 == 1 ? 1 : 0;
+}
+
+static inline u8 nft_genmask_next(const struct net *net)
+{
+ return 1 << nft_gencursor_next(net);
+}
+
+static inline u8 nft_genmask_cur(const struct net *net)
+{
+ /* Use ACCESS_ONCE() to prevent refetching the value for atomicity */
+ return 1 << ACCESS_ONCE(net->nft.gencursor);
+}
+
+#define NFT_GENMASK_ANY ((1 << 0) | (1 << 1))
+
+/*
+ * Set element transaction helpers
+ */
+
+static inline bool nft_set_elem_active(const struct nft_set_ext *ext,
+ u8 genmask)
+{
+ return !(ext->genmask & genmask);
+}
+
+static inline void nft_set_elem_change_active(const struct nft_set *set,
+ struct nft_set_ext *ext)
+{
+ ext->genmask ^= nft_genmask_next(read_pnet(&set->pnet));
+}
+
+/*
+ * We use a free bit in the genmask field to indicate the element
+ * is busy, meaning it is currently being processed either by
+ * the netlink API or GC.
+ *
+ * Even though the genmask is only a single byte wide, this works
+ * because the extension structure if fully constant once initialized,
+ * so there are no non-atomic write accesses unless it is already
+ * marked busy.
+ */
+#define NFT_SET_ELEM_BUSY_MASK (1 << 2)
+
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+#define NFT_SET_ELEM_BUSY_BIT 2
+#elif defined(__BIG_ENDIAN_BITFIELD)
+#define NFT_SET_ELEM_BUSY_BIT (BITS_PER_LONG - BITS_PER_BYTE + 2)
+#else
+#error
+#endif
+
+static inline int nft_set_elem_mark_busy(struct nft_set_ext *ext)
+{
+ unsigned long *word = (unsigned long *)ext;
+
+ BUILD_BUG_ON(offsetof(struct nft_set_ext, genmask) != 0);
+ return test_and_set_bit(NFT_SET_ELEM_BUSY_BIT, word);
+}
+
+static inline void nft_set_elem_clear_busy(struct nft_set_ext *ext)
+{
+ unsigned long *word = (unsigned long *)ext;
+
+ clear_bit(NFT_SET_ELEM_BUSY_BIT, word);
+}
+
+/**
+ * struct nft_trans - nf_tables object update in transaction
+ *
+ * @list: used internally
+ * @msg_type: message type
+ * @ctx: transaction context
+ * @data: internal information related to the transaction
+ */
+struct nft_trans {
+ struct list_head list;
+ int msg_type;
+ struct nft_ctx ctx;
+ char data[0];
+};
+
+struct nft_trans_rule {
+ struct nft_rule *rule;
+};
+
+#define nft_trans_rule(trans) \
+ (((struct nft_trans_rule *)trans->data)->rule)
+
+struct nft_trans_set {
+ struct nft_set *set;
+ u32 set_id;
+};
+
+#define nft_trans_set(trans) \
+ (((struct nft_trans_set *)trans->data)->set)
+#define nft_trans_set_id(trans) \
+ (((struct nft_trans_set *)trans->data)->set_id)
+
+struct nft_trans_chain {
+ bool update;
+ char name[NFT_CHAIN_MAXNAMELEN];
+ struct nft_stats __percpu *stats;
+ u8 policy;
+};
+
+#define nft_trans_chain_update(trans) \
+ (((struct nft_trans_chain *)trans->data)->update)
+#define nft_trans_chain_name(trans) \
+ (((struct nft_trans_chain *)trans->data)->name)
+#define nft_trans_chain_stats(trans) \
+ (((struct nft_trans_chain *)trans->data)->stats)
+#define nft_trans_chain_policy(trans) \
+ (((struct nft_trans_chain *)trans->data)->policy)
+
+struct nft_trans_table {
+ bool update;
+ bool enable;
+};
+
+#define nft_trans_table_update(trans) \
+ (((struct nft_trans_table *)trans->data)->update)
+#define nft_trans_table_enable(trans) \
+ (((struct nft_trans_table *)trans->data)->enable)
+
+struct nft_trans_elem {
+ struct nft_set *set;
+ struct nft_set_elem elem;
+};
+
+#define nft_trans_elem_set(trans) \
+ (((struct nft_trans_elem *)trans->data)->set)
+#define nft_trans_elem(trans) \
+ (((struct nft_trans_elem *)trans->data)->elem)
+
#endif /* _NET_NF_TABLES_H */
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index a75fc8e..c6f400c 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -31,6 +31,9 @@ void nft_cmp_module_exit(void);
int nft_lookup_module_init(void);
void nft_lookup_module_exit(void);
+int nft_dynset_module_init(void);
+void nft_dynset_module_exit(void);
+
int nft_bitwise_module_init(void);
void nft_bitwise_module_exit(void);
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index cba143f..2df7f96 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -8,12 +8,11 @@ static inline void
nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out)
+ const struct nf_hook_state *state)
{
struct iphdr *ip;
- nft_set_pktinfo(pkt, ops, skb, in, out);
+ nft_set_pktinfo(pkt, ops, skb, state);
ip = ip_hdr(pkt->skb);
pkt->tprot = ip->protocol;
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index 74d9761..97db2e3 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -8,13 +8,12 @@ static inline int
nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
const struct nf_hook_ops *ops,
struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out)
+ const struct nf_hook_state *state)
{
int protohdr, thoff = 0;
unsigned short frag_off;
- nft_set_pktinfo(pkt, ops, skb, in, out);
+ nft_set_pktinfo(pkt, ops, skb, state);
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
/* If malformed, drop it */
diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
index 0ee47c3..711887a 100644
--- a/include/net/netfilter/nft_meta.h
+++ b/include/net/netfilter/nft_meta.h
@@ -26,11 +26,11 @@ int nft_meta_set_dump(struct sk_buff *skb,
const struct nft_expr *expr);
void nft_meta_get_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt);
void nft_meta_set_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
+ struct nft_regs *regs,
const struct nft_pktinfo *pkt);
#endif
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 6415835..2a5dbcc 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/netlink.h>
#include <linux/jiffies.h>
+#include <linux/in6.h>
/* ========================================================================
* Netlink Messages and Attributes Interface (As Seen On TV)
@@ -105,6 +106,8 @@
* nla_put_string(skb, type, str) add string attribute to skb
* nla_put_flag(skb, type) add flag attribute to skb
* nla_put_msecs(skb, type, jiffies) add msecs attribute to skb
+ * nla_put_in_addr(skb, type, addr) add IPv4 address attribute to skb
+ * nla_put_in6_addr(skb, type, addr) add IPv6 address attribute to skb
*
* Nested Attributes Construction:
* nla_nest_start(skb, type) start a nested attribute
@@ -490,14 +493,10 @@ static inline struct sk_buff *nlmsg_new(size_t payload, gfp_t flags)
* Corrects the netlink message header to include the appeneded
* attributes. Only necessary if attributes have been added to
* the message.
- *
- * Returns the total data length of the skb.
*/
-static inline int nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh)
+static inline void nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh)
{
nlh->nlmsg_len = skb_tail_pointer(skb) - (unsigned char *)nlh;
-
- return skb->len;
}
/**
@@ -520,8 +519,10 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
*/
static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
{
- if (mark)
+ if (mark) {
+ WARN_ON((unsigned char *) mark < skb->data);
skb_trim(skb, (unsigned char *) mark - skb->data);
+ }
}
/**
@@ -959,6 +960,32 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
}
/**
+ * nla_put_in_addr - Add an IPv4 address netlink attribute to a socket
+ * buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @addr: IPv4 address
+ */
+static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype,
+ __be32 addr)
+{
+ return nla_put_be32(skb, attrtype, addr);
+}
+
+/**
+ * nla_put_in6_addr - Add an IPv6 address netlink attribute to a socket
+ * buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @addr: IPv6 address
+ */
+static inline int nla_put_in6_addr(struct sk_buff *skb, int attrtype,
+ const struct in6_addr *addr)
+{
+ return nla_put(skb, attrtype, sizeof(*addr), addr);
+}
+
+/**
* nla_get_u32 - return payload of u32 attribute
* @nla: u32 netlink attribute
*/
@@ -1101,6 +1128,27 @@ static inline unsigned long nla_get_msecs(const struct nlattr *nla)
}
/**
+ * nla_get_in_addr - return payload of IPv4 address attribute
+ * @nla: IPv4 address netlink attribute
+ */
+static inline __be32 nla_get_in_addr(const struct nlattr *nla)
+{
+ return *(__be32 *) nla_data(nla);
+}
+
+/**
+ * nla_get_in6_addr - return payload of IPv6 address attribute
+ * @nla: IPv6 address netlink attribute
+ */
+static inline struct in6_addr nla_get_in6_addr(const struct nlattr *nla)
+{
+ struct in6_addr tmp;
+
+ nla_memcpy(&tmp, nla, sizeof(tmp));
+ return tmp;
+}
+
+/**
* nla_nest_start - Start a new level of nested attributes
* @skb: socket buffer to add attributes to
* @attrtype: attribute type of container
diff --git a/include/net/netns/generic.h b/include/net/netns/generic.h
index 0931618..70e1585 100644
--- a/include/net/netns/generic.h
+++ b/include/net/netns/generic.h
@@ -38,11 +38,9 @@ static inline void *net_generic(const struct net *net, int id)
rcu_read_lock();
ng = rcu_dereference(net->gen);
- BUG_ON(id == 0 || id > ng->len);
ptr = ng->ptr[id - 1];
rcu_read_unlock();
- BUG_ON(!ptr);
return ptr;
}
#endif
diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
index c06ac58..69a6715 100644
--- a/include/net/netns/hash.h
+++ b/include/net/netns/hash.h
@@ -5,7 +5,7 @@
struct net;
-static inline unsigned int net_hash_mix(struct net *net)
+static inline u32 net_hash_mix(const struct net *net)
{
#ifdef CONFIG_NET_NS
/*
@@ -13,7 +13,7 @@ static inline unsigned int net_hash_mix(struct net *net)
* always zeroed
*/
- return (unsigned)(((unsigned long)net) >> L1_CACHE_SHIFT);
+ return (u32)(((unsigned long)net) >> L1_CACHE_SHIFT);
#else
return 0;
#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 24945ce..c68926b 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -7,6 +7,7 @@
#include <linux/uidgid.h>
#include <net/inet_frag.h>
+#include <linux/rcupdate.h>
struct tcpm_hash_bucket;
struct ctl_table_header;
@@ -18,6 +19,7 @@ struct sock;
struct local_ports {
seqlock_t lock;
int range[2];
+ bool warned;
};
struct ping_group_range {
@@ -38,20 +40,22 @@ struct netns_ipv4 {
#ifdef CONFIG_IP_MULTIPLE_TABLES
struct fib_rules_ops *rules_ops;
bool fib_has_custom_rules;
- struct fib_table *fib_local;
- struct fib_table *fib_main;
- struct fib_table *fib_default;
+ struct fib_table __rcu *fib_local;
+ struct fib_table __rcu *fib_main;
+ struct fib_table __rcu *fib_default;
#endif
#ifdef CONFIG_IP_ROUTE_CLASSID
int fib_num_tclassid_users;
#endif
struct hlist_head *fib_table_hash;
+ bool fib_offload_disabled;
struct sock *fibnl;
- struct sock **icmp_sk;
+ struct sock * __percpu *icmp_sk;
+ struct sock *mc_autojoin_sk;
+
struct inet_peer_base *peers;
- struct tcpm_hash_bucket *tcp_metrics_hash;
- unsigned int tcp_metrics_hash_log;
+ struct sock * __percpu *tcp_sk;
struct netns_frags frags;
#ifdef CONFIG_NETFILTER
struct xt_table *iptable_filter;
@@ -74,12 +78,18 @@ struct netns_ipv4 {
struct local_ports ip_local_ports;
int sysctl_tcp_ecn;
+ int sysctl_tcp_ecn_fallback;
+
int sysctl_ip_no_pmtu_disc;
int sysctl_ip_fwd_use_pmtu;
int sysctl_ip_nonlocal_bind;
int sysctl_fwmark_reflect;
int sysctl_tcp_fwmark_accept;
+ int sysctl_tcp_mtu_probing;
+ int sysctl_tcp_base_mss;
+ int sysctl_tcp_probe_threshold;
+ u32 sysctl_tcp_probe_interval;
struct ping_group_range ping_group_range;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 69ae41f..8d93544 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -32,6 +32,9 @@ struct netns_sysctl_ipv6 {
int icmpv6_time;
int anycast_src_echo_reply;
int fwmark_reflect;
+ int idgen_retries;
+ int idgen_delay;
+ int flowlabel_state_ranges;
};
struct netns_ipv6 {
@@ -67,6 +70,7 @@ struct netns_ipv6 {
struct sock *ndisc_sk;
struct sock *tcp_sk;
struct sock *igmp_sk;
+ struct sock *mc_autojoin_sk;
#ifdef CONFIG_IPV6_MROUTE
#ifndef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
struct mr6_table *mrt6;
diff --git a/include/net/netns/mpls.h b/include/net/netns/mpls.h
new file mode 100644
index 0000000..d292036
--- /dev/null
+++ b/include/net/netns/mpls.h
@@ -0,0 +1,17 @@
+/*
+ * mpls in net namespaces
+ */
+
+#ifndef __NETNS_MPLS_H__
+#define __NETNS_MPLS_H__
+
+struct mpls_route;
+struct ctl_table_header;
+
+struct netns_mpls {
+ size_t platform_labels;
+ struct mpls_route __rcu * __rcu *platform_label;
+ struct ctl_table_header *ctl;
+};
+
+#endif /* __NETNS_MPLS_H__ */
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 8874002..532e4ba 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -1,9 +1,9 @@
#ifndef __NETNS_NETFILTER_H
#define __NETNS_NETFILTER_H
-#include <linux/proc_fs.h>
-#include <linux/netfilter.h>
+#include <linux/netfilter_defs.h>
+struct proc_dir_entry;
struct nf_logger;
struct netns_nf {
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index eee608b..c807811 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -13,6 +13,7 @@ struct netns_nftables {
struct nft_af_info *inet;
struct nft_af_info *arp;
struct nft_af_info *bridge;
+ struct nft_af_info *netdev;
unsigned int base_seq;
u8 gencursor;
};
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
index 3573a81..8ba379f 100644
--- a/include/net/netns/sctp.h
+++ b/include/net/netns/sctp.h
@@ -31,6 +31,7 @@ struct netns_sctp {
struct list_head addr_waitq;
struct timer_list addr_wq_timer;
struct list_head auto_asconf_splist;
+ /* Lock that protects both addr_waitq and auto_asconf_splist */
spinlock_t addr_wq_lock;
/* Lock that protects the local_addr_list writers */
diff --git a/include/net/netns/x_tables.h b/include/net/netns/x_tables.h
index c24060e..c8a7681 100644
--- a/include/net/netns/x_tables.h
+++ b/include/net/netns/x_tables.h
@@ -2,13 +2,14 @@
#define __NETNS_X_TABLES_H
#include <linux/list.h>
-#include <linux/netfilter.h>
+#include <linux/netfilter_defs.h>
struct ebt_table;
struct netns_xt {
struct list_head tables[NFPROTO_NUMPROTO];
bool notrack_deprecated_warning;
+ bool clusterip_deprecated_warning;
#if defined(CONFIG_BRIDGE_NF_EBTABLES) || \
defined(CONFIG_BRIDGE_NF_EBTABLES_MODULE)
struct ebt_table *broute_table;
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 14bd0e1..316694d 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -51,8 +51,10 @@ struct nfc_hci_ops {
int (*tm_send)(struct nfc_hci_dev *hdev, struct sk_buff *skb);
int (*check_presence)(struct nfc_hci_dev *hdev,
struct nfc_target *target);
- int (*event_received)(struct nfc_hci_dev *hdev, u8 gate, u8 event,
+ int (*event_received)(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
struct sk_buff *skb);
+ void (*cmd_received)(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
+ struct sk_buff *skb);
int (*fw_download)(struct nfc_hci_dev *hdev, const char *firmware_name);
int (*discover_se)(struct nfc_hci_dev *dev);
int (*enable_se)(struct nfc_hci_dev *dev, u32 se_idx);
@@ -63,8 +65,10 @@ struct nfc_hci_ops {
};
/* Pipes */
-#define NFC_HCI_INVALID_PIPE 0x80
#define NFC_HCI_DO_NOT_CREATE_PIPE 0x81
+#define NFC_HCI_INVALID_PIPE 0x80
+#define NFC_HCI_INVALID_GATE 0xFF
+#define NFC_HCI_INVALID_HOST 0x80
#define NFC_HCI_LINK_MGMT_PIPE 0x00
#define NFC_HCI_ADMIN_PIPE 0x01
@@ -73,7 +77,17 @@ struct nfc_hci_gate {
u8 pipe;
};
+struct nfc_hci_pipe {
+ u8 gate;
+ u8 dest_host;
+};
+
#define NFC_HCI_MAX_CUSTOM_GATES 50
+/*
+ * According to specification 102 622 chapter 4.4 Pipes,
+ * the pipe identifier is 7 bits long.
+ */
+#define NFC_HCI_MAX_PIPES 127
struct nfc_hci_init_data {
u8 gate_count;
struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES];
@@ -125,6 +139,7 @@ struct nfc_hci_dev {
void *clientdata;
u8 gate2pipe[NFC_HCI_MAX_GATES];
+ struct nfc_hci_pipe pipes[NFC_HCI_MAX_PIPES];
u8 sw_romlib;
u8 sw_patch;
@@ -164,9 +179,18 @@ void nfc_hci_unregister_device(struct nfc_hci_dev *hdev);
void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata);
void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev);
+static inline int nfc_hci_set_vendor_cmds(struct nfc_hci_dev *hdev,
+ struct nfc_vendor_cmd *cmds,
+ int n_cmds)
+{
+ return nfc_set_vendor_cmds(hdev->ndev, cmds, n_cmds);
+}
+
void nfc_hci_driver_failure(struct nfc_hci_dev *hdev, int err);
int nfc_hci_result_to_errno(u8 result);
+void nfc_hci_reset_pipes(struct nfc_hci_dev *dev);
+void nfc_hci_reset_pipes_per_host(struct nfc_hci_dev *hdev, u8 host);
/* Host IDs */
#define NFC_HCI_HOST_CONTROLLER_ID 0x00
@@ -219,6 +243,12 @@ int nfc_hci_result_to_errno(u8 result);
#define NFC_HCI_EVT_POST_DATA 0x02
#define NFC_HCI_EVT_HOT_PLUG 0x03
+/* Generic commands */
+#define NFC_HCI_ANY_SET_PARAMETER 0x01
+#define NFC_HCI_ANY_GET_PARAMETER 0x02
+#define NFC_HCI_ANY_OPEN_PIPE 0x03
+#define NFC_HCI_ANY_CLOSE_PIPE 0x04
+
/* Reader RF gates events */
#define NFC_HCI_EVT_READER_REQUESTED 0x10
#define NFC_HCI_EVT_END_OPERATION 0x11
@@ -249,8 +279,6 @@ int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
int nfc_hci_send_cmd_async(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
const u8 *param, size_t param_len,
data_exchange_cb_t cb, void *cb_context);
-int nfc_hci_send_response(struct nfc_hci_dev *hdev, u8 gate, u8 response,
- const u8 *param, size_t param_len);
int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event,
const u8 *param, size_t param_len);
int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate);
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index e7257a4..75d2e18 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -35,6 +35,7 @@
#define NCI_MAX_NUM_RF_CONFIGS 10
#define NCI_MAX_NUM_CONN 10
#define NCI_MAX_PARAM_LEN 251
+#define NCI_MAX_PACKET_SIZE 258
/* NCI Status Codes */
#define NCI_STATUS_OK 0x00
@@ -62,6 +63,25 @@
#define NCI_STATUS_NFCEE_PROTOCOL_ERROR 0xc2
#define NCI_STATUS_NFCEE_TIMEOUT_ERROR 0xc3
+/* NFCEE Interface/Protocols */
+#define NCI_NFCEE_INTERFACE_APDU 0x00
+#define NCI_NFCEE_INTERFACE_HCI_ACCESS 0x01
+#define NCI_NFCEE_INTERFACE_TYPE3_CMD_SET 0x02
+#define NCI_NFCEE_INTERFACE_TRANSPARENT 0x03
+
+/* Destination type */
+#define NCI_DESTINATION_NFCC_LOOPBACK 0x01
+#define NCI_DESTINATION_REMOTE_NFC_ENDPOINT 0x02
+#define NCI_DESTINATION_NFCEE 0x03
+
+/* Destination-specific parameters type */
+#define NCI_DESTINATION_SPECIFIC_PARAM_RF_TYPE 0x00
+#define NCI_DESTINATION_SPECIFIC_PARAM_NFCEE_TYPE 0x01
+
+/* NFCEE Discovery Action */
+#define NCI_NFCEE_DISCOVERY_ACTION_DISABLE 0x00
+#define NCI_NFCEE_DISCOVERY_ACTION_ENABLE 0x01
+
/* NCI RF Technology and Mode */
#define NCI_NFC_A_PASSIVE_POLL_MODE 0x00
#define NCI_NFC_B_PASSIVE_POLL_MODE 0x01
@@ -224,6 +244,28 @@ struct nci_core_set_config_cmd {
struct set_config_param param; /* support 1 param per cmd is enough */
} __packed;
+#define NCI_OP_CORE_CONN_CREATE_CMD nci_opcode_pack(NCI_GID_CORE, 0x04)
+#define DEST_SPEC_PARAMS_ID_INDEX 0
+#define DEST_SPEC_PARAMS_PROTOCOL_INDEX 1
+struct dest_spec_params {
+ __u8 id;
+ __u8 protocol;
+} __packed;
+
+struct core_conn_create_dest_spec_params {
+ __u8 type;
+ __u8 length;
+ __u8 value[0];
+} __packed;
+
+struct nci_core_conn_create_cmd {
+ __u8 destination_type;
+ __u8 number_destination_params;
+ struct core_conn_create_dest_spec_params params[0];
+} __packed;
+
+#define NCI_OP_CORE_CONN_CLOSE_CMD nci_opcode_pack(NCI_GID_CORE, 0x05)
+
#define NCI_OP_RF_DISCOVER_MAP_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
struct disc_map_config {
__u8 rf_protocol;
@@ -260,6 +302,19 @@ struct nci_rf_deactivate_cmd {
__u8 type;
} __packed;
+#define NCI_OP_NFCEE_DISCOVER_CMD nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x00)
+struct nci_nfcee_discover_cmd {
+ __u8 discovery_action;
+} __packed;
+
+#define NCI_OP_NFCEE_MODE_SET_CMD nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x01)
+#define NCI_NFCEE_DISABLE 0x00
+#define NCI_NFCEE_ENABLE 0x01
+struct nci_nfcee_mode_set_cmd {
+ __u8 nfcee_id;
+ __u8 nfcee_mode;
+} __packed;
+
/* ----------------------- */
/* ---- NCI Responses ---- */
/* ----------------------- */
@@ -295,6 +350,16 @@ struct nci_core_set_config_rsp {
__u8 params_id[0]; /* variable size array */
} __packed;
+#define NCI_OP_CORE_CONN_CREATE_RSP nci_opcode_pack(NCI_GID_CORE, 0x04)
+struct nci_core_conn_create_rsp {
+ __u8 status;
+ __u8 max_ctrl_pkt_payload_len;
+ __u8 credits_cnt;
+ __u8 conn_id;
+} __packed;
+
+#define NCI_OP_CORE_CONN_CLOSE_RSP nci_opcode_pack(NCI_GID_CORE, 0x05)
+
#define NCI_OP_RF_DISCOVER_MAP_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
#define NCI_OP_RF_DISCOVER_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x03)
@@ -303,6 +368,13 @@ struct nci_core_set_config_rsp {
#define NCI_OP_RF_DEACTIVATE_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x06)
+#define NCI_OP_NFCEE_DISCOVER_RSP nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x00)
+struct nci_nfcee_discover_rsp {
+ __u8 status;
+ __u8 num_nfcee;
+} __packed;
+
+#define NCI_OP_NFCEE_MODE_SET_RSP nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x01)
/* --------------------------- */
/* ---- NCI Notifications ---- */
/* --------------------------- */
@@ -430,4 +502,30 @@ struct nci_rf_deactivate_ntf {
__u8 reason;
} __packed;
+#define NCI_OP_RF_NFCEE_ACTION_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x09)
+struct nci_rf_nfcee_action_ntf {
+ __u8 nfcee_id;
+ __u8 trigger;
+ __u8 supported_data_length;
+ __u8 supported_data[0];
+} __packed;
+
+#define NCI_OP_NFCEE_DISCOVER_NTF nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x00)
+struct nci_nfcee_supported_protocol {
+ __u8 num_protocol;
+ __u8 supported_protocol[0];
+} __packed;
+
+struct nci_nfcee_information_tlv {
+ __u8 num_tlv;
+ __u8 information_tlv[0];
+} __packed;
+
+struct nci_nfcee_discover_ntf {
+ __u8 nfcee_id;
+ __u8 nfcee_status;
+ struct nci_nfcee_supported_protocol supported_protocols;
+ struct nci_nfcee_information_tlv information_tlv;
+} __packed;
+
#endif /* __NCI_H */
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index 9e51bb4..01fc8c5 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -31,6 +31,7 @@
#include <linux/interrupt.h>
#include <linux/skbuff.h>
+#include <linux/tty.h>
#include <net/nfc/nfc.h>
#include <net/nfc/nci.h>
@@ -66,11 +67,19 @@ enum nci_state {
struct nci_dev;
+struct nci_prop_ops {
+ __u16 opcode;
+ int (*rsp)(struct nci_dev *dev, struct sk_buff *skb);
+ int (*ntf)(struct nci_dev *dev, struct sk_buff *skb);
+};
+
struct nci_ops {
+ int (*init)(struct nci_dev *ndev);
int (*open)(struct nci_dev *ndev);
int (*close)(struct nci_dev *ndev);
int (*send)(struct nci_dev *ndev, struct sk_buff *skb);
int (*setup)(struct nci_dev *ndev);
+ int (*fw_download)(struct nci_dev *ndev, const char *firmware_name);
__u32 (*get_rfprotocol)(struct nci_dev *ndev, __u8 rf_protocol);
int (*discover_se)(struct nci_dev *ndev);
int (*disable_se)(struct nci_dev *ndev, u32 se_idx);
@@ -78,15 +87,115 @@ struct nci_ops {
int (*se_io)(struct nci_dev *ndev, u32 se_idx,
u8 *apdu, size_t apdu_length,
se_io_cb_t cb, void *cb_context);
+ int (*hci_load_session)(struct nci_dev *ndev);
+ void (*hci_event_received)(struct nci_dev *ndev, u8 pipe, u8 event,
+ struct sk_buff *skb);
+ void (*hci_cmd_received)(struct nci_dev *ndev, u8 pipe, u8 cmd,
+ struct sk_buff *skb);
+
+ struct nci_prop_ops *prop_ops;
+ size_t n_prop_ops;
};
#define NCI_MAX_SUPPORTED_RF_INTERFACES 4
#define NCI_MAX_DISCOVERED_TARGETS 10
+#define NCI_MAX_NUM_NFCEE 255
+#define NCI_MAX_CONN_ID 7
+#define NCI_MAX_PROPRIETARY_CMD 64
+
+struct nci_conn_info {
+ struct list_head list;
+ __u8 id; /* can be an RF Discovery ID or an NFCEE ID */
+ __u8 conn_id;
+ __u8 max_pkt_payload_len;
+
+ atomic_t credits_cnt;
+ __u8 initial_num_credits;
+
+ data_exchange_cb_t data_exchange_cb;
+ void *data_exchange_cb_context;
+
+ struct sk_buff *rx_skb;
+};
+
+#define NCI_INVALID_CONN_ID 0x80
+
+#define NCI_HCI_ANY_OPEN_PIPE 0x03
+
+/* Gates */
+#define NCI_HCI_ADMIN_GATE 0x00
+#define NCI_HCI_LINK_MGMT_GATE 0x06
+
+/* Pipes */
+#define NCI_HCI_LINK_MGMT_PIPE 0x00
+#define NCI_HCI_ADMIN_PIPE 0x01
+
+/* Generic responses */
+#define NCI_HCI_ANY_OK 0x00
+#define NCI_HCI_ANY_E_NOT_CONNECTED 0x01
+#define NCI_HCI_ANY_E_CMD_PAR_UNKNOWN 0x02
+#define NCI_HCI_ANY_E_NOK 0x03
+#define NCI_HCI_ANY_E_PIPES_FULL 0x04
+#define NCI_HCI_ANY_E_REG_PAR_UNKNOWN 0x05
+#define NCI_HCI_ANY_E_PIPE_NOT_OPENED 0x06
+#define NCI_HCI_ANY_E_CMD_NOT_SUPPORTED 0x07
+#define NCI_HCI_ANY_E_INHIBITED 0x08
+#define NCI_HCI_ANY_E_TIMEOUT 0x09
+#define NCI_HCI_ANY_E_REG_ACCESS_DENIED 0x0a
+#define NCI_HCI_ANY_E_PIPE_ACCESS_DENIED 0x0b
+
+#define NCI_HCI_DO_NOT_OPEN_PIPE 0x81
+#define NCI_HCI_INVALID_PIPE 0x80
+#define NCI_HCI_INVALID_GATE 0xFF
+#define NCI_HCI_INVALID_HOST 0x80
+
+#define NCI_HCI_MAX_CUSTOM_GATES 50
+/*
+ * According to specification 102 622 chapter 4.4 Pipes,
+ * the pipe identifier is 7 bits long.
+ */
+#define NCI_HCI_MAX_PIPES 127
+
+struct nci_hci_gate {
+ u8 gate;
+ u8 pipe;
+ u8 dest_host;
+} __packed;
+
+struct nci_hci_pipe {
+ u8 gate;
+ u8 host;
+} __packed;
+
+struct nci_hci_init_data {
+ u8 gate_count;
+ struct nci_hci_gate gates[NCI_HCI_MAX_CUSTOM_GATES];
+ char session_id[9];
+};
+
+#define NCI_HCI_MAX_GATES 256
+
+struct nci_hci_dev {
+ u8 nfcee_id;
+ struct nci_dev *ndev;
+ struct nci_conn_info *conn_info;
+
+ struct nci_hci_init_data init_data;
+ struct nci_hci_pipe pipes[NCI_HCI_MAX_PIPES];
+ u8 gate2pipe[NCI_HCI_MAX_GATES];
+ int expected_pipes;
+ int count_pipes;
+
+ struct sk_buff_head rx_hcp_frags;
+ struct work_struct msg_rx_work;
+ struct sk_buff_head msg_rx_queue;
+};
/* NCI Core structures */
struct nci_dev {
struct nfc_dev *nfc_dev;
struct nci_ops *ops;
+ struct nci_hci_dev *hci_dev;
int tx_headroom;
int tx_tailroom;
@@ -95,7 +204,10 @@ struct nci_dev {
unsigned long flags;
atomic_t cmd_cnt;
- atomic_t credits_cnt;
+ __u8 cur_conn_id;
+
+ struct list_head conn_info_list;
+ struct nci_conn_info *rf_conn_info;
struct timer_list cmd_timer;
struct timer_list data_timer;
@@ -141,13 +253,10 @@ struct nci_dev {
__u8 manufact_id;
__u32 manufact_specific_info;
- /* received during NCI_OP_RF_INTF_ACTIVATED_NTF */
- __u8 max_data_pkt_payload_size;
- __u8 initial_num_credits;
+ /* Save RF Discovery ID or NFCEE ID under conn_create */
+ __u8 cur_id;
/* stored during nci_data_exchange */
- data_exchange_cb_t data_exchange_cb;
- void *data_exchange_cb_context;
struct sk_buff *rx_data_reassembly;
/* stored during intf_activated_ntf */
@@ -163,9 +272,38 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops,
void nci_free_device(struct nci_dev *ndev);
int nci_register_device(struct nci_dev *ndev);
void nci_unregister_device(struct nci_dev *ndev);
+int nci_request(struct nci_dev *ndev,
+ void (*req)(struct nci_dev *ndev,
+ unsigned long opt),
+ unsigned long opt, __u32 timeout);
+int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, __u8 *payload);
+
int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val);
+int nci_nfcee_discover(struct nci_dev *ndev, u8 action);
+int nci_nfcee_mode_set(struct nci_dev *ndev, u8 nfcee_id, u8 nfcee_mode);
+int nci_core_conn_create(struct nci_dev *ndev, u8 destination_type,
+ u8 number_destination_params,
+ size_t params_len,
+ struct core_conn_create_dest_spec_params *params);
+int nci_core_conn_close(struct nci_dev *ndev, u8 conn_id);
+
+struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev);
+int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event,
+ const u8 *param, size_t param_len);
+int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate,
+ u8 cmd, const u8 *param, size_t param_len,
+ struct sk_buff **skb);
+int nci_hci_open_pipe(struct nci_dev *ndev, u8 pipe);
+int nci_hci_connect_gate(struct nci_dev *ndev, u8 dest_host,
+ u8 dest_gate, u8 pipe);
+int nci_hci_set_param(struct nci_dev *ndev, u8 gate, u8 idx,
+ const u8 *param, size_t param_len);
+int nci_hci_get_param(struct nci_dev *ndev, u8 gate, u8 idx,
+ struct sk_buff **skb);
+int nci_hci_dev_session_init(struct nci_dev *ndev);
+
static inline struct sk_buff *nci_skb_alloc(struct nci_dev *ndev,
unsigned int len,
gfp_t how)
@@ -194,13 +332,26 @@ static inline void *nci_get_drvdata(struct nci_dev *ndev)
return ndev->driver_data;
}
+static inline int nci_set_vendor_cmds(struct nci_dev *ndev,
+ struct nfc_vendor_cmd *cmds,
+ int n_cmds)
+{
+ return nfc_set_vendor_cmds(ndev->nfc_dev, cmds, n_cmds);
+}
+
void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb);
void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb);
+int nci_prop_rsp_packet(struct nci_dev *ndev, __u16 opcode,
+ struct sk_buff *skb);
+int nci_prop_ntf_packet(struct nci_dev *ndev, __u16 opcode,
+ struct sk_buff *skb);
void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb);
int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload);
int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb);
void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
- int err);
+ __u8 conn_id, int err);
+void nci_hci_data_received_cb(void *context, struct sk_buff *skb, int err);
+
void nci_clear_target_list(struct nci_dev *ndev);
/* ----- NCI requests ----- */
@@ -209,6 +360,8 @@ void nci_clear_target_list(struct nci_dev *ndev);
#define NCI_REQ_CANCELED 2
void nci_req_complete(struct nci_dev *ndev, int result);
+struct nci_conn_info *nci_get_conn_info_by_conn_id(struct nci_dev *ndev,
+ int conn_id);
/* ----- NCI status code ----- */
int nci_to_errno(__u8 code);
@@ -239,4 +392,50 @@ int nci_spi_send(struct nci_spi *nspi,
struct sk_buff *skb);
struct sk_buff *nci_spi_read(struct nci_spi *nspi);
+/* ----- NCI UART ---- */
+
+/* Ioctl */
+#define NCIUARTSETDRIVER _IOW('U', 0, char *)
+
+enum nci_uart_driver {
+ NCI_UART_DRIVER_MARVELL = 0,
+ NCI_UART_DRIVER_MAX
+};
+
+struct nci_uart;
+
+struct nci_uart_ops {
+ int (*open)(struct nci_uart *nci_uart);
+ void (*close)(struct nci_uart *nci_uart);
+ int (*recv)(struct nci_uart *nci_uart, struct sk_buff *skb);
+ int (*recv_buf)(struct nci_uart *nci_uart, const u8 *data, char *flags,
+ int count);
+ int (*send)(struct nci_uart *nci_uart, struct sk_buff *skb);
+ void (*tx_start)(struct nci_uart *nci_uart);
+ void (*tx_done)(struct nci_uart *nci_uart);
+};
+
+struct nci_uart {
+ struct module *owner;
+ struct nci_uart_ops ops;
+ const char *name;
+ enum nci_uart_driver driver;
+
+ /* Dynamic data */
+ struct nci_dev *ndev;
+ spinlock_t rx_lock;
+ struct work_struct write_work;
+ struct tty_struct *tty;
+ unsigned long tx_state;
+ struct sk_buff_head tx_q;
+ struct sk_buff *tx_skb;
+ struct sk_buff *rx_skb;
+ int rx_packet_len;
+ void *drv_data;
+};
+
+int nci_uart_register(struct nci_uart *nu);
+void nci_uart_unregister(struct nci_uart *nu);
+void nci_uart_set_config(struct nci_uart *nu, int baudrate, int flow_ctrl);
+
#endif /* __NCI_CORE_H */
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 12adb81..f9e58ae 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -135,11 +135,42 @@ struct nfc_se {
u16 state;
};
+/**
+ * nfc_evt_transaction - A struct for NFC secure element event transaction.
+ *
+ * @aid: The application identifier triggering the event
+ *
+ * @aid_len: The application identifier length [5:16]
+ *
+ * @params: The application parameters transmitted during the transaction
+ *
+ * @params_len: The applications parameters length [0:255]
+ *
+ */
+#define NFC_MIN_AID_LENGTH 5
+#define NFC_MAX_AID_LENGTH 16
+#define NFC_MAX_PARAMS_LENGTH 255
+
+#define NFC_EVT_TRANSACTION_AID_TAG 0x81
+#define NFC_EVT_TRANSACTION_PARAMS_TAG 0x82
+struct nfc_evt_transaction {
+ u32 aid_len;
+ u8 aid[NFC_MAX_AID_LENGTH];
+ u8 params_len;
+ u8 params[0];
+} __packed;
+
struct nfc_genl_data {
u32 poll_req_portid;
struct mutex genl_data_mutex;
};
+struct nfc_vendor_cmd {
+ __u32 vendor_id;
+ __u32 subcmd;
+ int (*doit)(struct nfc_dev *dev, void *data, size_t data_len);
+};
+
struct nfc_dev {
int idx;
u32 target_next_idx;
@@ -168,6 +199,9 @@ struct nfc_dev {
struct rfkill *rfkill;
+ struct nfc_vendor_cmd *vendor_cmds;
+ int n_vendor_cmds;
+
struct nfc_ops *ops;
};
#define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev)
@@ -262,6 +296,8 @@ int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb);
void nfc_driver_failure(struct nfc_dev *dev, int err);
+int nfc_se_transaction(struct nfc_dev *dev, u8 se_idx,
+ struct nfc_evt_transaction *evt_transaction);
int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type);
int nfc_remove_se(struct nfc_dev *dev, u32 se_idx);
struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx);
@@ -269,4 +305,17 @@ struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx);
void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb,
u8 payload_type, u8 direction);
+static inline int nfc_set_vendor_cmds(struct nfc_dev *dev,
+ struct nfc_vendor_cmd *cmds,
+ int n_cmds)
+{
+ if (dev->vendor_cmds || dev->n_vendor_cmds)
+ return -EINVAL;
+
+ dev->vendor_cmds = cmds;
+ dev->n_vendor_cmds = n_cmds;
+
+ return 0;
+}
+
#endif /* __NET_NFC_H */
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index 6dbd406..b0ab530 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -82,7 +82,7 @@ enum nl802154_attrs {
NL802154_ATTR_TX_POWER,
NL802154_ATTR_CCA_MODE,
- NL802154_ATTR_CCA_MODE3_AND,
+ NL802154_ATTR_CCA_OPT,
NL802154_ATTR_CCA_ED_LEVEL,
NL802154_ATTR_MAX_FRAME_RETRIES,
@@ -100,6 +100,10 @@ enum nl802154_attrs {
NL802154_ATTR_EXTENDED_ADDR,
+ NL802154_ATTR_WPAN_PHY_CAPS,
+
+ NL802154_ATTR_SUPPORTED_COMMANDS,
+
/* add attributes here, update the policy in nl802154.c */
__NL802154_ATTR_AFTER_LAST,
@@ -119,4 +123,124 @@ enum nl802154_iftype {
NL802154_IFTYPE_MAX = NUM_NL802154_IFTYPES - 1
};
+/**
+ * enum nl802154_wpan_phy_capability_attr - wpan phy capability attributes
+ *
+ * @__NL802154_CAP_ATTR_INVALID: attribute number 0 is reserved
+ * @NL802154_CAP_ATTR_CHANNELS: a nested attribute for nl802154_channel_attr
+ * @NL802154_CAP_ATTR_TX_POWERS: a nested attribute for
+ * nl802154_wpan_phy_tx_power
+ * @NL802154_CAP_ATTR_MIN_CCA_ED_LEVEL: minimum value for cca_ed_level
+ * @NL802154_CAP_ATTR_MAX_CCA_ED_LEVEL: maxmimum value for cca_ed_level
+ * @NL802154_CAP_ATTR_CCA_MODES: nl802154_cca_modes flags
+ * @NL802154_CAP_ATTR_CCA_OPTS: nl802154_cca_opts flags
+ * @NL802154_CAP_ATTR_MIN_MINBE: minimum of minbe value
+ * @NL802154_CAP_ATTR_MAX_MINBE: maximum of minbe value
+ * @NL802154_CAP_ATTR_MIN_MAXBE: minimum of maxbe value
+ * @NL802154_CAP_ATTR_MAX_MINBE: maximum of maxbe value
+ * @NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS: minimum of csma backoff value
+ * @NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS: maximum of csma backoffs value
+ * @NL802154_CAP_ATTR_MIN_FRAME_RETRIES: minimum of frame retries value
+ * @NL802154_CAP_ATTR_MAX_FRAME_RETRIES: maximum of frame retries value
+ * @NL802154_CAP_ATTR_IFTYPES: nl802154_iftype flags
+ * @NL802154_CAP_ATTR_LBT: nl802154_supported_bool_states flags
+ * @NL802154_CAP_ATTR_MAX: highest cap attribute currently defined
+ * @__NL802154_CAP_ATTR_AFTER_LAST: internal use
+ */
+enum nl802154_wpan_phy_capability_attr {
+ __NL802154_CAP_ATTR_INVALID,
+
+ NL802154_CAP_ATTR_IFTYPES,
+
+ NL802154_CAP_ATTR_CHANNELS,
+ NL802154_CAP_ATTR_TX_POWERS,
+
+ NL802154_CAP_ATTR_CCA_ED_LEVELS,
+ NL802154_CAP_ATTR_CCA_MODES,
+ NL802154_CAP_ATTR_CCA_OPTS,
+
+ NL802154_CAP_ATTR_MIN_MINBE,
+ NL802154_CAP_ATTR_MAX_MINBE,
+
+ NL802154_CAP_ATTR_MIN_MAXBE,
+ NL802154_CAP_ATTR_MAX_MAXBE,
+
+ NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS,
+ NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS,
+
+ NL802154_CAP_ATTR_MIN_FRAME_RETRIES,
+ NL802154_CAP_ATTR_MAX_FRAME_RETRIES,
+
+ NL802154_CAP_ATTR_LBT,
+
+ /* keep last */
+ __NL802154_CAP_ATTR_AFTER_LAST,
+ NL802154_CAP_ATTR_MAX = __NL802154_CAP_ATTR_AFTER_LAST - 1
+};
+
+/**
+ * enum nl802154_cca_modes - cca modes
+ *
+ * @__NL802154_CCA_INVALID: cca mode number 0 is reserved
+ * @NL802154_CCA_ENERGY: Energy above threshold
+ * @NL802154_CCA_CARRIER: Carrier sense only
+ * @NL802154_CCA_ENERGY_CARRIER: Carrier sense with energy above threshold
+ * @NL802154_CCA_ALOHA: CCA shall always report an idle medium
+ * @NL802154_CCA_UWB_SHR: UWB preamble sense based on the SHR of a frame
+ * @NL802154_CCA_UWB_MULTIPLEXED: UWB preamble sense based on the packet with
+ * the multiplexed preamble
+ * @__NL802154_CCA_ATTR_AFTER_LAST: Internal
+ * @NL802154_CCA_ATTR_MAX: Maximum CCA attribute number
+ */
+enum nl802154_cca_modes {
+ __NL802154_CCA_INVALID,
+ NL802154_CCA_ENERGY,
+ NL802154_CCA_CARRIER,
+ NL802154_CCA_ENERGY_CARRIER,
+ NL802154_CCA_ALOHA,
+ NL802154_CCA_UWB_SHR,
+ NL802154_CCA_UWB_MULTIPLEXED,
+
+ /* keep last */
+ __NL802154_CCA_ATTR_AFTER_LAST,
+ NL802154_CCA_ATTR_MAX = __NL802154_CCA_ATTR_AFTER_LAST - 1
+};
+
+/**
+ * enum nl802154_cca_opts - additional options for cca modes
+ *
+ * @NL802154_CCA_OPT_ENERGY_CARRIER_OR: NL802154_CCA_ENERGY_CARRIER with OR
+ * @NL802154_CCA_OPT_ENERGY_CARRIER_AND: NL802154_CCA_ENERGY_CARRIER with AND
+ */
+enum nl802154_cca_opts {
+ NL802154_CCA_OPT_ENERGY_CARRIER_AND,
+ NL802154_CCA_OPT_ENERGY_CARRIER_OR,
+
+ /* keep last */
+ __NL802154_CCA_OPT_ATTR_AFTER_LAST,
+ NL802154_CCA_OPT_ATTR_MAX = __NL802154_CCA_OPT_ATTR_AFTER_LAST - 1
+};
+
+/**
+ * enum nl802154_supported_bool_states - bool states for bool capability entry
+ *
+ * @NL802154_SUPPORTED_BOOL_FALSE: indicates to set false
+ * @NL802154_SUPPORTED_BOOL_TRUE: indicates to set true
+ * @__NL802154_SUPPORTED_BOOL_INVALD: reserved
+ * @NL802154_SUPPORTED_BOOL_BOTH: indicates to set true and false
+ * @__NL802154_SUPPORTED_BOOL_AFTER_LAST: Internal
+ * @NL802154_SUPPORTED_BOOL_MAX: highest value for bool states
+ */
+enum nl802154_supported_bool_states {
+ NL802154_SUPPORTED_BOOL_FALSE,
+ NL802154_SUPPORTED_BOOL_TRUE,
+ /* to handle them in a mask */
+ __NL802154_SUPPORTED_BOOL_INVALD,
+ NL802154_SUPPORTED_BOOL_BOTH,
+
+ /* keep last */
+ __NL802154_SUPPORTED_BOOL_AFTER_LAST,
+ NL802154_SUPPORTED_BOOL_MAX = __NL802154_SUPPORTED_BOOL_AFTER_LAST - 1
+};
+
#endif /* __NL802154_H */
diff --git a/include/net/ping.h b/include/net/ping.h
index f074060..ac80cb4 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -59,7 +59,7 @@ extern struct pingv6_ops pingv6_ops;
struct pingfakehdr {
struct icmphdr icmph;
- struct iovec *iov;
+ struct msghdr *msg;
sa_family_t family;
__wsum wcheck;
};
@@ -75,12 +75,11 @@ void ping_err(struct sk_buff *skb, int offset, u32 info);
int ping_getfrag(void *from, char *to, int offset, int fraglen, int odd,
struct sk_buff *);
-int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len, int noblock, int flags, int *addr_len);
+int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
+ int flags, int *addr_len);
int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
void *user_icmph, size_t icmph_len);
-int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len);
+int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
bool ping_rcv(struct sk_buff *skb);
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 27a3383..2342bf1 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -3,6 +3,7 @@
#include <linux/jiffies.h>
#include <linux/ktime.h>
+#include <linux/if_vlan.h>
#include <net/sch_generic.h>
struct qdisc_walker {
@@ -114,6 +115,17 @@ int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res);
+static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
+{
+ /* We need to take extra care in case the skb came via
+ * vlan accelerated path. In that case, use skb->vlan_proto
+ * as the original vlan header was already stripped.
+ */
+ if (skb_vlan_tag_present(skb))
+ return skb->vlan_proto;
+ return skb->protocol;
+}
+
/* Calculate maximal size of packet seen by hard_start_xmit
routine of this device.
*/
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index b776d72..ebc5a2e 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -147,6 +147,24 @@ struct regulatory_request {
* NL80211_IFTYPE_P2P_CLIENT, NL80211_IFTYPE_P2P_GO,
* NL80211_IFTYPE_P2P_DEVICE. The flag will be set by default if a device
* includes any modes unsupported for enforcement checking.
+ * @REGULATORY_WIPHY_SELF_MANAGED: for devices that employ wiphy-specific
+ * regdom management. These devices will ignore all regdom changes not
+ * originating from their own wiphy.
+ * A self-managed wiphys only employs regulatory information obtained from
+ * the FW and driver and does not use other cfg80211 sources like
+ * beacon-hints, country-code IEs and hints from other devices on the same
+ * system. Conversely, a self-managed wiphy does not share its regulatory
+ * hints with other devices in the system. If a system contains several
+ * devices, one or more of which are self-managed, there might be
+ * contradictory regulatory settings between them. Usage of flag is
+ * generally discouraged. Only use it if the FW/driver is incompatible
+ * with non-locally originated hints.
+ * This flag is incompatible with the flags: %REGULATORY_CUSTOM_REG,
+ * %REGULATORY_STRICT_REG, %REGULATORY_COUNTRY_IE_FOLLOW_POWER,
+ * %REGULATORY_COUNTRY_IE_IGNORE and %REGULATORY_DISABLE_BEACON_HINTS.
+ * Mixing any of the above flags with this flag will result in a failure
+ * to register the wiphy. This flag implies
+ * %REGULATORY_DISABLE_BEACON_HINTS and %REGULATORY_COUNTRY_IE_IGNORE.
*/
enum ieee80211_regulatory_flags {
REGULATORY_CUSTOM_REG = BIT(0),
@@ -156,6 +174,7 @@ enum ieee80211_regulatory_flags {
REGULATORY_COUNTRY_IE_IGNORE = BIT(4),
REGULATORY_ENABLE_RELAX_NO_IR = BIT(5),
REGULATORY_IGNORE_STALE_KICKOFF = BIT(6),
+ REGULATORY_WIPHY_SELF_MANAGED = BIT(7),
};
struct ieee80211_freq_range {
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 7f830ff..87935ca 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -39,8 +39,7 @@ struct request_sock_ops {
void (*send_reset)(struct sock *sk,
struct sk_buff *skb);
void (*destructor)(struct request_sock *req);
- void (*syn_ack_timeout)(struct sock *sk,
- struct request_sock *req);
+ void (*syn_ack_timeout)(const struct request_sock *req);
};
int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req);
@@ -49,7 +48,11 @@ int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req);
*/
struct request_sock {
struct sock_common __req_common;
+#define rsk_refcnt __req_common.skc_refcnt
+#define rsk_hash __req_common.skc_hash
+
struct request_sock *dl_next;
+ struct sock *rsk_listener;
u16 mss;
u8 num_retrans; /* number of retransmits */
u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */
@@ -58,32 +61,58 @@ struct request_sock {
u32 window_clamp; /* window clamp at creation time */
u32 rcv_wnd; /* rcv_wnd offered first time */
u32 ts_recent;
- unsigned long expires;
+ struct timer_list rsk_timer;
const struct request_sock_ops *rsk_ops;
struct sock *sk;
+ u32 *saved_syn;
u32 secid;
u32 peer_secid;
};
-static inline struct request_sock *reqsk_alloc(const struct request_sock_ops *ops)
+static inline struct request_sock *
+reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener)
{
struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC);
- if (req != NULL)
+ if (req) {
req->rsk_ops = ops;
-
+ sock_hold(sk_listener);
+ req->rsk_listener = sk_listener;
+ req->saved_syn = NULL;
+ /* Following is temporary. It is coupled with debugging
+ * helpers in reqsk_put() & reqsk_free()
+ */
+ atomic_set(&req->rsk_refcnt, 0);
+ }
return req;
}
-static inline void __reqsk_free(struct request_sock *req)
+static inline struct request_sock *inet_reqsk(struct sock *sk)
{
- kmem_cache_free(req->rsk_ops->slab, req);
+ return (struct request_sock *)sk;
+}
+
+static inline struct sock *req_to_sk(struct request_sock *req)
+{
+ return (struct sock *)req;
}
static inline void reqsk_free(struct request_sock *req)
{
+ /* temporary debugging */
+ WARN_ON_ONCE(atomic_read(&req->rsk_refcnt) != 0);
+
req->rsk_ops->destructor(req);
- __reqsk_free(req);
+ if (req->rsk_listener)
+ sock_put(req->rsk_listener);
+ kfree(req->saved_syn);
+ kmem_cache_free(req->rsk_ops->slab, req);
+}
+
+static inline void reqsk_put(struct request_sock *req)
+{
+ if (atomic_dec_and_test(&req->rsk_refcnt))
+ reqsk_free(req);
}
extern int sysctl_max_syn_backlog;
@@ -93,12 +122,16 @@ extern int sysctl_max_syn_backlog;
* @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
*/
struct listen_sock {
- u8 max_qlen_log;
+ int qlen_inc; /* protected by listener lock */
+ int young_inc;/* protected by listener lock */
+
+ /* following fields can be updated by timer */
+ atomic_t qlen_dec; /* qlen = qlen_inc - qlen_dec */
+ atomic_t young_dec;
+
+ u8 max_qlen_log ____cacheline_aligned_in_smp;
u8 synflood_warned;
/* 2 bytes hole, try to use */
- int qlen;
- int qlen_young;
- int clock_hand;
u32 hash_rnd;
u32 nr_table_entries;
struct request_sock *syn_table[0];
@@ -142,18 +175,11 @@ struct fastopen_queue {
* %syn_wait_lock is necessary only to avoid proc interface having to grab the main
* lock sock while browsing the listening hash (otherwise it's deadlock prone).
*
- * This lock is acquired in read mode only from listening_get_next() seq_file
- * op and it's acquired in write mode _only_ from code that is actively
- * changing rskq_accept_head. All readers that are holding the master sock lock
- * don't need to grab this lock in read mode too as rskq_accept_head. writes
- * are always protected from the main sock lock.
*/
struct request_sock_queue {
struct request_sock *rskq_accept_head;
struct request_sock *rskq_accept_tail;
- rwlock_t syn_wait_lock;
u8 rskq_defer_accept;
- /* 3 bytes hole, try to pack */
struct listen_sock *listen_opt;
struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been
* enabled on this listener. Check
@@ -161,6 +187,9 @@ struct request_sock_queue {
* to determine if TFO is enabled
* right at this moment.
*/
+
+ /* temporary alignment, our goal is to get rid of this lock */
+ spinlock_t syn_wait_lock ____cacheline_aligned_in_smp;
};
int reqsk_queue_alloc(struct request_sock_queue *queue,
@@ -185,15 +214,6 @@ static inline int reqsk_queue_empty(struct request_sock_queue *queue)
return queue->rskq_accept_head == NULL;
}
-static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
- struct request_sock *req,
- struct request_sock **prev_req)
-{
- write_lock(&queue->syn_wait_lock);
- *prev_req = req->dl_next;
- write_unlock(&queue->syn_wait_lock);
-}
-
static inline void reqsk_queue_add(struct request_sock_queue *queue,
struct request_sock *req,
struct sock *parent,
@@ -224,57 +244,53 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue
return req;
}
-static inline int reqsk_queue_removed(struct request_sock_queue *queue,
- struct request_sock *req)
+static inline void reqsk_queue_removed(struct request_sock_queue *queue,
+ const struct request_sock *req)
{
struct listen_sock *lopt = queue->listen_opt;
if (req->num_timeout == 0)
- --lopt->qlen_young;
-
- return --lopt->qlen;
+ atomic_inc(&lopt->young_dec);
+ atomic_inc(&lopt->qlen_dec);
}
-static inline int reqsk_queue_added(struct request_sock_queue *queue)
+static inline void reqsk_queue_added(struct request_sock_queue *queue)
{
struct listen_sock *lopt = queue->listen_opt;
- const int prev_qlen = lopt->qlen;
- lopt->qlen_young++;
- lopt->qlen++;
- return prev_qlen;
+ lopt->young_inc++;
+ lopt->qlen_inc++;
}
-static inline int reqsk_queue_len(const struct request_sock_queue *queue)
+static inline int listen_sock_qlen(const struct listen_sock *lopt)
{
- return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0;
+ return lopt->qlen_inc - atomic_read(&lopt->qlen_dec);
}
-static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
+static inline int listen_sock_young(const struct listen_sock *lopt)
{
- return queue->listen_opt->qlen_young;
+ return lopt->young_inc - atomic_read(&lopt->young_dec);
}
-static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
+static inline int reqsk_queue_len(const struct request_sock_queue *queue)
{
- return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log;
+ const struct listen_sock *lopt = queue->listen_opt;
+
+ return lopt ? listen_sock_qlen(lopt) : 0;
}
-static inline void reqsk_queue_hash_req(struct request_sock_queue *queue,
- u32 hash, struct request_sock *req,
- unsigned long timeout)
+static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
{
- struct listen_sock *lopt = queue->listen_opt;
-
- req->expires = jiffies + timeout;
- req->num_retrans = 0;
- req->num_timeout = 0;
- req->sk = NULL;
- req->dl_next = lopt->syn_table[hash];
+ return listen_sock_young(queue->listen_opt);
+}
- write_lock(&queue->syn_wait_lock);
- lopt->syn_table[hash] = req;
- write_unlock(&queue->syn_wait_lock);
+static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
+{
+ return reqsk_queue_len(queue) >> queue->listen_opt->max_qlen_log;
}
+void reqsk_queue_hash_req(struct request_sock_queue *queue,
+ u32 hash, struct request_sock *req,
+ unsigned long timeout);
+
#endif /* _REQUEST_SOCK_H */
diff --git a/include/net/route.h b/include/net/route.h
index b17cf28..fe22d03 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -46,6 +46,7 @@
struct fib_nh;
struct fib_info;
+struct uncached_list;
struct rtable {
struct dst_entry dst;
@@ -64,6 +65,7 @@ struct rtable {
u32 rt_pmtu;
struct list_head rt_uncached;
+ struct uncached_list *rt_uncached_list;
};
static inline bool rt_is_input_route(const struct rtable *rt)
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index e21b9f9..343d922 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -46,6 +46,7 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
* to create when creating a new device.
* @get_num_rx_queues: Function to determine number of receive queues
* to create when creating a new device.
+ * @get_link_net: Function to get the i/o netns of the device
*/
struct rtnl_link_ops {
struct list_head list;
@@ -93,6 +94,7 @@ struct rtnl_link_ops {
int (*fill_slave_info)(struct sk_buff *skb,
const struct net_device *dev,
const struct net_device *slave_dev);
+ struct net *(*get_link_net)(const struct net_device *dev);
};
int __rtnl_link_register(struct rtnl_link_ops *ops);
@@ -135,7 +137,7 @@ void rtnl_af_register(struct rtnl_af_ops *ops);
void rtnl_af_unregister(struct rtnl_af_ops *ops);
struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
-struct net_device *rtnl_create_link(struct net *net, char *ifname,
+struct net_device *rtnl_create_link(struct net *net, const char *ifname,
unsigned char name_assign_type,
const struct rtnl_link_ops *ops,
struct nlattr *tb[]);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 3d282cb..2738f6f 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -79,6 +79,9 @@ struct Qdisc {
struct netdev_queue *dev_queue;
struct gnet_stats_rate_est64 rate_est;
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ struct gnet_stats_queue __percpu *cpu_qstats;
+
struct Qdisc *next_sched;
struct sk_buff *gso_skb;
/*
@@ -86,15 +89,9 @@ struct Qdisc {
*/
unsigned long state;
struct sk_buff_head q;
- union {
- struct gnet_stats_basic_packed bstats;
- struct gnet_stats_basic_cpu __percpu *cpu_bstats;
- } __packed;
+ struct gnet_stats_basic_packed bstats;
unsigned int __state;
- union {
- struct gnet_stats_queue qstats;
- struct gnet_stats_queue __percpu *cpu_qstats;
- } __packed;
+ struct gnet_stats_queue qstats;
struct rcu_head rcu_head;
int padded;
atomic_t refcnt;
@@ -216,7 +213,7 @@ struct tcf_proto_ops {
const struct tcf_proto *,
struct tcf_result *);
int (*init)(struct tcf_proto*);
- void (*destroy)(struct tcf_proto*);
+ bool (*destroy)(struct tcf_proto*, bool);
unsigned long (*get)(struct tcf_proto*, u32 handle);
int (*change)(struct net *net, struct sk_buff *,
@@ -402,7 +399,7 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops, u32 parentid);
void __qdisc_calculate_pkt_len(struct sk_buff *skb,
const struct qdisc_size_table *stab);
-void tcf_destroy(struct tcf_proto *tp);
+bool tcf_destroy(struct tcf_proto *tp, bool force);
void tcf_destroy_chain(struct tcf_proto __rcu **fl);
/* Reset all TX qdiscs greater then index of a device. */
@@ -504,12 +501,6 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return sch->enqueue(skb, sch);
}
-static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
-{
- qdisc_skb_cb(skb)->pkt_len = skb->len;
- return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
-}
-
static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
{
return q->flags & TCQ_F_CPUSTATS;
@@ -748,23 +739,6 @@ static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
return rtab->data[slot];
}
-#ifdef CONFIG_NET_CLS_ACT
-static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
- int action)
-{
- struct sk_buff *n;
-
- n = skb_clone(skb, gfp_mask);
-
- if (n) {
- n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
- n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
- n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
- }
- return n;
-}
-#endif
-
struct psched_ratecfg {
u64 rate_bytes_ps; /* bytes per second */
u32 mult;
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 856f01c..ce13cf2 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -166,6 +166,9 @@ void sctp_remaddr_proc_exit(struct net *net);
*/
extern struct kmem_cache *sctp_chunk_cachep __read_mostly;
extern struct kmem_cache *sctp_bucket_cachep __read_mostly;
+extern long sysctl_sctp_mem[3];
+extern int sysctl_sctp_rmem[3];
+extern int sysctl_sctp_wmem[3];
/*
* Section: Macros, externs, and inlines
@@ -571,11 +574,14 @@ static inline void sctp_v6_map_v4(union sctp_addr *addr)
/* Map v4 address to v4-mapped v6 address */
static inline void sctp_v4_map_v6(union sctp_addr *addr)
{
+ __be16 port;
+
+ port = addr->v4.sin_port;
+ addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
+ addr->v6.sin6_port = port;
addr->v6.sin6_family = AF_INET6;
addr->v6.sin6_flowinfo = 0;
addr->v6.sin6_scope_id = 0;
- addr->v6.sin6_port = addr->v4.sin_port;
- addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
addr->v6.sin6_addr.s6_addr32[0] = 0;
addr->v6.sin6_addr.s6_addr32[1] = 0;
addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 2bb2fcf..495c87e 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -223,6 +223,10 @@ struct sctp_sock {
atomic_t pd_mode;
/* Receive to here while partial delivery is in effect. */
struct sk_buff_head pd_lobby;
+
+ /* These must be the last fields, as they will skipped on copies,
+ * like on accept and peeloff operations
+ */
struct list_head auto_asconf_list;
int do_auto_asconf;
};
diff --git a/include/net/sock.h b/include/net/sock.h
index 2210fec..05a8c1a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -57,7 +57,6 @@
#include <linux/page_counter.h>
#include <linux/memcontrol.h>
#include <linux/static_key.h>
-#include <linux/aio.h>
#include <linux/sched.h>
#include <linux/filter.h>
@@ -67,6 +66,7 @@
#include <linux/atomic.h>
#include <net/dst.h>
#include <net/checksum.h>
+#include <net/tcp_states.h>
#include <linux/net_tstamp.h>
struct cgroup;
@@ -184,21 +184,22 @@ struct sock_common {
unsigned char skc_reuse:4;
unsigned char skc_reuseport:1;
unsigned char skc_ipv6only:1;
+ unsigned char skc_net_refcnt:1;
int skc_bound_dev_if;
union {
struct hlist_node skc_bind_node;
struct hlist_nulls_node skc_portaddr_node;
};
struct proto *skc_prot;
-#ifdef CONFIG_NET_NS
- struct net *skc_net;
-#endif
+ possible_net_t skc_net;
#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr skc_v6_daddr;
struct in6_addr skc_v6_rcv_saddr;
#endif
+ atomic64_t skc_cookie;
+
/*
* fields between dontcopy_begin/dontcopy_end
* are not copied in sock_copy()
@@ -276,7 +277,6 @@ struct cg_proto;
* @sk_incoming_cpu: record cpu processing incoming packets
* @sk_txhash: computed flow hash for use on transmit
* @sk_filter: socket filtering instructions
- * @sk_protinfo: private area, net family specific, when not using slab
* @sk_timer: sock cleanup timer
* @sk_stamp: time stamp of last packet received
* @sk_tsflags: SO_TIMESTAMPING socket options
@@ -323,12 +323,14 @@ struct sock {
#define sk_reuse __sk_common.skc_reuse
#define sk_reuseport __sk_common.skc_reuseport
#define sk_ipv6only __sk_common.skc_ipv6only
+#define sk_net_refcnt __sk_common.skc_net_refcnt
#define sk_bound_dev_if __sk_common.skc_bound_dev_if
#define sk_bind_node __sk_common.skc_bind_node
#define sk_prot __sk_common.skc_prot
#define sk_net __sk_common.skc_net
#define sk_v6_daddr __sk_common.skc_v6_daddr
#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
+#define sk_cookie __sk_common.skc_cookie
socket_lock_t sk_lock;
struct sk_buff_head sk_receive_queue;
@@ -403,8 +405,8 @@ struct sock {
rwlock_t sk_callback_lock;
int sk_err,
sk_err_soft;
- unsigned short sk_ack_backlog;
- unsigned short sk_max_ack_backlog;
+ u32 sk_ack_backlog;
+ u32 sk_max_ack_backlog;
__u32 sk_priority;
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
__u32 sk_cgrp_prioidx;
@@ -413,7 +415,6 @@ struct sock {
const struct cred *sk_peer_cred;
long sk_rcvtimeo;
long sk_sndtimeo;
- void *sk_protinfo;
struct timer_list sk_timer;
ktime_t sk_stamp;
u16 sk_tsflags;
@@ -857,18 +858,6 @@ static inline void sock_rps_record_flow_hash(__u32 hash)
#endif
}
-static inline void sock_rps_reset_flow_hash(__u32 hash)
-{
-#ifdef CONFIG_RPS
- struct rps_sock_flow_table *sock_flow_table;
-
- rcu_read_lock();
- sock_flow_table = rcu_dereference(rps_sock_flow_table);
- rps_reset_sock_flow(sock_flow_table, hash);
- rcu_read_unlock();
-#endif
-}
-
static inline void sock_rps_record_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
@@ -876,28 +865,18 @@ static inline void sock_rps_record_flow(const struct sock *sk)
#endif
}
-static inline void sock_rps_reset_flow(const struct sock *sk)
-{
-#ifdef CONFIG_RPS
- sock_rps_reset_flow_hash(sk->sk_rxhash);
-#endif
-}
-
static inline void sock_rps_save_rxhash(struct sock *sk,
const struct sk_buff *skb)
{
#ifdef CONFIG_RPS
- if (unlikely(sk->sk_rxhash != skb->hash)) {
- sock_rps_reset_flow(sk);
+ if (unlikely(sk->sk_rxhash != skb->hash))
sk->sk_rxhash = skb->hash;
- }
#endif
}
static inline void sock_rps_reset_rxhash(struct sock *sk)
{
#ifdef CONFIG_RPS
- sock_rps_reset_flow(sk);
sk->sk_rxhash = 0;
#endif
}
@@ -945,7 +924,6 @@ static inline void sk_prot_clear_nulls(struct sock *sk, int size)
/* Networking protocol blocks we attach to sockets.
* socket layer -> transport layer interface
- * transport -> network interface is defined by struct inet_proto
*/
struct proto {
void (*close)(struct sock *sk,
@@ -980,10 +958,9 @@ struct proto {
int (*compat_ioctl)(struct sock *sk,
unsigned int cmd, unsigned long arg);
#endif
- int (*sendmsg)(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg, size_t len);
- int (*recvmsg)(struct kiocb *iocb, struct sock *sk,
- struct msghdr *msg,
+ int (*sendmsg)(struct sock *sk, struct msghdr *msg,
+ size_t len);
+ int (*recvmsg)(struct sock *sk, struct msghdr *msg,
size_t len, int noblock, int flags,
int *addr_len);
int (*sendpage)(struct sock *sk, struct page *page,
@@ -1099,11 +1076,6 @@ static inline bool memcg_proto_active(struct cg_proto *cg_proto)
return test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
}
-static inline bool memcg_proto_activated(struct cg_proto *cg_proto)
-{
- return test_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags);
-}
-
#ifdef SOCK_REFCNT_DEBUG
static inline void sk_refcnt_debug_inc(struct sock *sk)
{
@@ -1374,29 +1346,6 @@ void sk_prot_clear_portaddr_nulls(struct sock *sk, int size);
#define SOCK_BINDADDR_LOCK 4
#define SOCK_BINDPORT_LOCK 8
-/* sock_iocb: used to kick off async processing of socket ios */
-struct sock_iocb {
- struct list_head list;
-
- int flags;
- int size;
- struct socket *sock;
- struct sock *sk;
- struct scm_cookie *scm;
- struct msghdr *msg, async_msg;
- struct kiocb *kiocb;
-};
-
-static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
-{
- return (struct sock_iocb *)iocb->private;
-}
-
-static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
-{
- return si->kiocb;
-}
-
struct socket_alloc {
struct socket socket;
struct inode vfs_inode;
@@ -1416,7 +1365,7 @@ static inline struct inode *SOCK_INODE(struct socket *socket)
* Functions for memory accounting
*/
int __sk_mem_schedule(struct sock *sk, int size, int kind);
-void __sk_mem_reclaim(struct sock *sk);
+void __sk_mem_reclaim(struct sock *sk, int amount);
#define SK_MEM_QUANTUM ((int)PAGE_SIZE)
#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
@@ -1457,7 +1406,7 @@ static inline void sk_mem_reclaim(struct sock *sk)
if (!sk_has_account(sk))
return;
if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk);
+ __sk_mem_reclaim(sk, sk->sk_forward_alloc);
}
static inline void sk_mem_reclaim_partial(struct sock *sk)
@@ -1465,7 +1414,7 @@ static inline void sk_mem_reclaim_partial(struct sock *sk)
if (!sk_has_account(sk))
return;
if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk);
+ __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
}
static inline void sk_mem_charge(struct sock *sk, int size)
@@ -1564,9 +1513,9 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
- struct proto *prot);
+ struct proto *prot, int kern);
void sk_free(struct sock *sk);
-void sk_release_kernel(struct sock *sk);
+void sk_destruct(struct sock *sk);
struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
@@ -1612,9 +1561,8 @@ int sock_no_listen(struct socket *, int);
int sock_no_shutdown(struct socket *, int);
int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *);
int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int);
-int sock_no_sendmsg(struct kiocb *, struct socket *, struct msghdr *, size_t);
-int sock_no_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t,
- int);
+int sock_no_sendmsg(struct socket *, struct msghdr *, size_t);
+int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
int sock_no_mmap(struct file *file, struct socket *sock,
struct vm_area_struct *vma);
ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
@@ -1626,8 +1574,8 @@ ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
*/
int sock_common_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen);
-int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t size, int flags);
+int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags);
int sock_common_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen);
int compat_sock_common_getsockopt(struct socket *sock, int level,
@@ -1676,7 +1624,7 @@ static inline void sock_put(struct sock *sk)
sk_free(sk);
}
/* Generic version of sock_put(), dealing with all sockets
- * (TCP_TIMEWAIT, ESTABLISHED...)
+ * (TCP_TIMEWAIT, TCP_NEW_SYN_RECV, ESTABLISHED...)
*/
void sock_gen_put(struct sock *sk);
@@ -1812,6 +1760,8 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
+bool sk_mc_loop(struct sock *sk);
+
static inline bool sk_can_gso(const struct sock *sk)
{
return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
@@ -1826,27 +1776,25 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
}
static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
- char __user *from, char *to,
+ struct iov_iter *from, char *to,
int copy, int offset)
{
if (skb->ip_summed == CHECKSUM_NONE) {
- int err = 0;
- __wsum csum = csum_and_copy_from_user(from, to, copy, 0, &err);
- if (err)
- return err;
+ __wsum csum = 0;
+ if (csum_and_copy_from_iter(to, copy, &csum, from) != copy)
+ return -EFAULT;
skb->csum = csum_block_add(skb->csum, csum, offset);
} else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
- if (!access_ok(VERIFY_READ, from, copy) ||
- __copy_from_user_nocache(to, from, copy))
+ if (copy_from_iter_nocache(to, copy, from) != copy)
return -EFAULT;
- } else if (copy_from_user(to, from, copy))
+ } else if (copy_from_iter(to, copy, from) != copy)
return -EFAULT;
return 0;
}
static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
- char __user *from, int copy)
+ struct iov_iter *from, int copy)
{
int err, offset = skb->len;
@@ -1858,7 +1806,7 @@ static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
return err;
}
-static inline int skb_copy_to_page_nocache(struct sock *sk, char __user *from,
+static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from,
struct sk_buff *skb,
struct page *page,
int off, int copy)
@@ -2075,7 +2023,8 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
}
}
-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
+struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
+ bool force_schedule);
/**
* sk_page_frag - return an appropriate page_frag
@@ -2130,6 +2079,29 @@ static inline int sock_intr_errno(long timeo)
return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
}
+struct sock_skb_cb {
+ u32 dropcount;
+};
+
+/* Store sock_skb_cb at the end of skb->cb[] so protocol families
+ * using skb->cb[] would keep using it directly and utilize its
+ * alignement guarantee.
+ */
+#define SOCK_SKB_CB_OFFSET ((FIELD_SIZEOF(struct sk_buff, cb) - \
+ sizeof(struct sock_skb_cb)))
+
+#define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \
+ SOCK_SKB_CB_OFFSET))
+
+#define sock_skb_cb_check_size(size) \
+ BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET)
+
+static inline void
+sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
+{
+ SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops);
+}
+
void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb);
void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
@@ -2220,22 +2192,6 @@ void sock_net_set(struct sock *sk, struct net *net)
write_pnet(&sk->sk_net, net);
}
-/*
- * Kernel sockets, f.e. rtnl or icmp_socket, are a part of a namespace.
- * They should not hold a reference to a namespace in order to allow
- * to stop it.
- * Sockets after sk_change_net should be released using sk_release_kernel
- */
-static inline void sk_change_net(struct sock *sk, struct net *net)
-{
- struct net *current_net = sock_net(sk);
-
- if (!net_eq(current_net, net)) {
- put_net(current_net);
- sock_net_set(sk, hold_net(net));
- }
-}
-
static inline struct sock *skb_steal_sock(struct sk_buff *skb)
{
if (skb->sk) {
@@ -2248,6 +2204,14 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb)
return NULL;
}
+/* This helper checks if a socket is a full socket,
+ * ie _not_ a timewait or request socket.
+ */
+static inline bool sk_fullsock(const struct sock *sk)
+{
+ return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
+}
+
void sock_enable_timestamp(struct sock *sk, int flag);
int sock_get_timestamp(struct sock *, struct timeval __user *);
int sock_get_timestampns(struct sock *, struct timespec __user *);
@@ -2262,6 +2226,7 @@ bool sk_net_capable(const struct sock *sk, int cap);
extern __u32 sysctl_wmem_max;
extern __u32 sysctl_rmem_max;
+extern int sysctl_tstamp_allow_data;
extern int sysctl_optmem_max;
extern __u32 sysctl_wmem_default;
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 8a6d164..d5671f1 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -1,6 +1,7 @@
/*
* include/net/switchdev.h - Switch device API
* Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -11,23 +12,261 @@
#define _LINUX_SWITCHDEV_H_
#include <linux/netdevice.h>
+#include <linux/notifier.h>
+
+#define SWITCHDEV_F_NO_RECURSE BIT(0)
+
+enum switchdev_trans {
+ SWITCHDEV_TRANS_NONE,
+ SWITCHDEV_TRANS_PREPARE,
+ SWITCHDEV_TRANS_ABORT,
+ SWITCHDEV_TRANS_COMMIT,
+};
+
+enum switchdev_attr_id {
+ SWITCHDEV_ATTR_UNDEFINED,
+ SWITCHDEV_ATTR_PORT_PARENT_ID,
+ SWITCHDEV_ATTR_PORT_STP_STATE,
+ SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
+};
+
+struct switchdev_attr {
+ enum switchdev_attr_id id;
+ enum switchdev_trans trans;
+ u32 flags;
+ union {
+ struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */
+ u8 stp_state; /* PORT_STP_STATE */
+ unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */
+ } u;
+};
+
+struct fib_info;
+
+enum switchdev_obj_id {
+ SWITCHDEV_OBJ_UNDEFINED,
+ SWITCHDEV_OBJ_PORT_VLAN,
+ SWITCHDEV_OBJ_IPV4_FIB,
+ SWITCHDEV_OBJ_PORT_FDB,
+};
+
+struct switchdev_obj {
+ enum switchdev_obj_id id;
+ enum switchdev_trans trans;
+ int (*cb)(struct net_device *dev, struct switchdev_obj *obj);
+ union {
+ struct switchdev_obj_vlan { /* PORT_VLAN */
+ u16 flags;
+ u16 vid_begin;
+ u16 vid_end;
+ } vlan;
+ struct switchdev_obj_ipv4_fib { /* IPV4_FIB */
+ u32 dst;
+ int dst_len;
+ struct fib_info *fi;
+ u8 tos;
+ u8 type;
+ u32 nlflags;
+ u32 tb_id;
+ } ipv4_fib;
+ struct switchdev_obj_fdb { /* PORT_FDB */
+ const unsigned char *addr;
+ u16 vid;
+ } fdb;
+ } u;
+};
+
+/**
+ * struct switchdev_ops - switchdev operations
+ *
+ * @switchdev_port_attr_get: Get a port attribute (see switchdev_attr).
+ *
+ * @switchdev_port_attr_set: Set a port attribute (see switchdev_attr).
+ *
+ * @switchdev_port_obj_add: Add an object to port (see switchdev_obj).
+ *
+ * @switchdev_port_obj_del: Delete an object from port (see switchdev_obj).
+ *
+ * @switchdev_port_obj_dump: Dump port objects (see switchdev_obj).
+ */
+struct switchdev_ops {
+ int (*switchdev_port_attr_get)(struct net_device *dev,
+ struct switchdev_attr *attr);
+ int (*switchdev_port_attr_set)(struct net_device *dev,
+ struct switchdev_attr *attr);
+ int (*switchdev_port_obj_add)(struct net_device *dev,
+ struct switchdev_obj *obj);
+ int (*switchdev_port_obj_del)(struct net_device *dev,
+ struct switchdev_obj *obj);
+ int (*switchdev_port_obj_dump)(struct net_device *dev,
+ struct switchdev_obj *obj);
+};
+
+enum switchdev_notifier_type {
+ SWITCHDEV_FDB_ADD = 1,
+ SWITCHDEV_FDB_DEL,
+};
+
+struct switchdev_notifier_info {
+ struct net_device *dev;
+};
+
+struct switchdev_notifier_fdb_info {
+ struct switchdev_notifier_info info; /* must be first */
+ const unsigned char *addr;
+ u16 vid;
+};
+
+static inline struct net_device *
+switchdev_notifier_info_to_dev(const struct switchdev_notifier_info *info)
+{
+ return info->dev;
+}
#ifdef CONFIG_NET_SWITCHDEV
-int netdev_switch_parent_id_get(struct net_device *dev,
- struct netdev_phys_item_id *psid);
-int netdev_switch_port_stp_update(struct net_device *dev, u8 state);
+int switchdev_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr);
+int switchdev_port_attr_set(struct net_device *dev,
+ struct switchdev_attr *attr);
+int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj);
+int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj);
+int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj);
+int register_switchdev_notifier(struct notifier_block *nb);
+int unregister_switchdev_notifier(struct notifier_block *nb);
+int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
+ struct switchdev_notifier_info *info);
+int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev, u32 filter_mask,
+ int nlflags);
+int switchdev_port_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh, u16 flags);
+int switchdev_port_bridge_dellink(struct net_device *dev,
+ struct nlmsghdr *nlh, u16 flags);
+int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
+ u8 tos, u8 type, u32 nlflags, u32 tb_id);
+int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
+ u8 tos, u8 type, u32 tb_id);
+void switchdev_fib_ipv4_abort(struct fib_info *fi);
+int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev, const unsigned char *addr,
+ u16 vid, u16 nlm_flags);
+int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev, const unsigned char *addr,
+ u16 vid);
+int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct net_device *dev,
+ struct net_device *filter_dev, int idx);
#else
-static inline int netdev_switch_parent_id_get(struct net_device *dev,
- struct netdev_phys_item_id *psid)
+static inline int switchdev_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_attr_set(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_obj_add(struct net_device *dev,
+ struct switchdev_obj *obj)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_obj_del(struct net_device *dev,
+ struct switchdev_obj *obj)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_obj_dump(struct net_device *dev,
+ struct switchdev_obj *obj)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int register_switchdev_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int unregister_switchdev_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int call_switchdev_notifiers(unsigned long val,
+ struct net_device *dev,
+ struct switchdev_notifier_info *info)
+{
+ return NOTIFY_DONE;
+}
+
+static inline int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid,
+ u32 seq, struct net_device *dev,
+ u32 filter_mask, int nlflags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh,
+ u16 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_bridge_dellink(struct net_device *dev,
+ struct nlmsghdr *nlh,
+ u16 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int switchdev_fib_ipv4_add(u32 dst, int dst_len,
+ struct fib_info *fi,
+ u8 tos, u8 type,
+ u32 nlflags, u32 tb_id)
+{
+ return 0;
+}
+
+static inline int switchdev_fib_ipv4_del(u32 dst, int dst_len,
+ struct fib_info *fi,
+ u8 tos, u8 type, u32 tb_id)
+{
+ return 0;
+}
+
+static inline void switchdev_fib_ipv4_abort(struct fib_info *fi)
+{
+}
+
+static inline int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 vid, u16 nlm_flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid)
{
return -EOPNOTSUPP;
}
-static inline int netdev_switch_port_stp_update(struct net_device *dev,
- u8 state)
+static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ struct net_device *filter_dev,
+ int idx)
{
return -EOPNOTSUPP;
}
diff --git a/include/net/tc_act/tc_bpf.h b/include/net/tc_act/tc_bpf.h
new file mode 100644
index 0000000..a152e98
--- /dev/null
+++ b/include/net/tc_act/tc_bpf.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __NET_TC_BPF_H
+#define __NET_TC_BPF_H
+
+#include <linux/filter.h>
+#include <net/act_api.h>
+
+struct tcf_bpf {
+ struct tcf_common common;
+ struct bpf_prog *filter;
+ union {
+ u32 bpf_fd;
+ u16 bpf_num_ops;
+ };
+ struct sock_filter *bpf_ops;
+ const char *bpf_name;
+};
+#define to_bpf(a) \
+ container_of(a->priv, struct tcf_bpf, common)
+
+#endif /* __NET_TC_BPF_H */
diff --git a/include/net/tc_act/tc_connmark.h b/include/net/tc_act/tc_connmark.h
new file mode 100644
index 0000000..5c1104c
--- /dev/null
+++ b/include/net/tc_act/tc_connmark.h
@@ -0,0 +1,14 @@
+#ifndef __NET_TC_CONNMARK_H
+#define __NET_TC_CONNMARK_H
+
+#include <net/act_api.h>
+
+struct tcf_connmark_info {
+ struct tcf_common common;
+ u16 zone;
+};
+
+#define to_connmark(a) \
+ container_of(a->priv, struct tcf_connmark_info, common)
+
+#endif /* __NET_TC_CONNMARK_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f50f29faf..950cfec 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -65,7 +65,13 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCP_MIN_MSS 88U
/* The least MTU to use for probing */
-#define TCP_BASE_MSS 512
+#define TCP_BASE_MSS 1024
+
+/* probing interval, default to 10 minutes as per RFC4821 */
+#define TCP_PROBE_INTERVAL 600
+
+/* Specify interval when tcp mtu probing will stop */
+#define TCP_PROBE_THRESHOLD 8
/* After receiving this amount of duplicate ACKs fast retransmit starts. */
#define TCP_FASTRETRANS_THRESH 3
@@ -173,6 +179,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCPOPT_SACK 5 /* SACK Block */
#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
+#define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
#define TCPOPT_EXP 254 /* Experimental */
/* Magic number to be after the option value for sharing TCP
* experimental options. See draft-ietf-tcpm-experimental-options-00.txt
@@ -188,6 +195,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCPOLEN_SACK_PERM 2
#define TCPOLEN_TIMESTAMP 10
#define TCPOLEN_MD5SIG 18
+#define TCPOLEN_FASTOPEN_BASE 2
#define TCPOLEN_EXP_FASTOPEN_BASE 4
/* But this is what stacks really send out. */
@@ -262,8 +270,6 @@ extern int sysctl_tcp_low_latency;
extern int sysctl_tcp_nometrics_save;
extern int sysctl_tcp_moderate_rcvbuf;
extern int sysctl_tcp_tso_win_divisor;
-extern int sysctl_tcp_mtu_probing;
-extern int sysctl_tcp_base_mss;
extern int sysctl_tcp_workaround_signed_windows;
extern int sysctl_tcp_slow_start_after_idle;
extern int sysctl_tcp_thin_linear_timeouts;
@@ -274,11 +280,20 @@ extern int sysctl_tcp_challenge_ack_limit;
extern unsigned int sysctl_tcp_notsent_lowat;
extern int sysctl_tcp_min_tso_segs;
extern int sysctl_tcp_autocorking;
+extern int sysctl_tcp_invalid_ratelimit;
extern atomic_long_t tcp_memory_allocated;
extern struct percpu_counter tcp_sockets_allocated;
extern int tcp_memory_pressure;
+/* optimized version of sk_under_memory_pressure() for TCP sockets */
+static inline bool tcp_under_memory_pressure(const struct sock *sk)
+{
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+ return !!sk->sk_cgrp->memory_pressure;
+
+ return tcp_memory_pressure;
+}
/*
* The next routines deal with comparing 32 bit unsigned ints
* and worry about wraparound (automatic with unsigned arithmetic).
@@ -304,6 +319,8 @@ static inline bool tcp_out_of_memory(struct sock *sk)
return false;
}
+void sk_forced_mem_schedule(struct sock *sk, int size);
+
static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
{
struct percpu_counter *ocp = sk->sk_prot->orphan_count;
@@ -319,18 +336,6 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
bool tcp_check_oom(struct sock *sk, int shift);
-/* syncookies: remember time of last synqueue overflow */
-static inline void tcp_synq_overflow(struct sock *sk)
-{
- tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
-}
-
-/* syncookies: no recent synqueue overflow on this listening socket? */
-static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
-{
- unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
- return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
-}
extern struct proto tcp_prot;
@@ -350,8 +355,7 @@ void tcp_v4_early_demux(struct sk_buff *skb);
int tcp_v4_rcv(struct sk_buff *skb);
int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
-int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t size);
+int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
int flags);
void tcp_release_cb(struct sock *sk);
@@ -402,8 +406,7 @@ enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
struct sk_buff *skb,
const struct tcphdr *th);
struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
- struct request_sock *req, struct request_sock **prev,
- bool fastopen);
+ struct request_sock *req, bool fastopen);
int tcp_child_process(struct sock *parent, struct sock *child,
struct sk_buff *skb);
void tcp_enter_loss(struct sock *sk);
@@ -430,9 +433,9 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen);
void tcp_set_keepalive(struct sock *sk, int val);
-void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
-int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len, int nonblock, int flags, int *addr_len);
+void tcp_syn_ack_timeout(const struct request_sock *req);
+int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
+ int flags, int *addr_len);
void tcp_parse_options(const struct sk_buff *skb,
struct tcp_options_received *opt_rx,
int estab, struct tcp_fastopen_cookie *foc);
@@ -444,10 +447,12 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
void tcp_v4_mtu_reduced(struct sock *sk);
+void tcp_req_err(struct sock *sk, u32 seq);
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
struct sock *tcp_create_openreq_child(struct sock *sk,
struct request_sock *req,
struct sk_buff *skb);
+void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst);
@@ -464,6 +469,9 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
/* From syncookies.c */
+struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req,
+ struct dst_entry *dst);
int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
u32 cookie);
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
@@ -476,13 +484,35 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
* i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
* the counter advances immediately after a cookie is generated).
*/
-#define MAX_SYNCOOKIE_AGE 2
+#define MAX_SYNCOOKIE_AGE 2
+#define TCP_SYNCOOKIE_PERIOD (60 * HZ)
+#define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
+
+/* syncookies: remember time of last synqueue overflow
+ * But do not dirty this field too often (once per second is enough)
+ */
+static inline void tcp_synq_overflow(struct sock *sk)
+{
+ unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+ unsigned long now = jiffies;
+
+ if (time_after(now, last_overflow + HZ))
+ tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
+}
+
+/* syncookies: no recent synqueue overflow on this listening socket? */
+static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
+{
+ unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+
+ return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
+}
static inline u32 tcp_cookie_time(void)
{
u64 val = get_jiffies_64();
- do_div(val, 60 * HZ);
+ do_div(val, TCP_SYNCOOKIE_PERIOD);
return val;
}
@@ -520,12 +550,10 @@ int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
void tcp_send_probe0(struct sock *);
void tcp_send_partial(struct sock *);
-int tcp_write_wakeup(struct sock *);
+int tcp_write_wakeup(struct sock *, int mib);
void tcp_send_fin(struct sock *sk);
void tcp_send_active_reset(struct sock *sk, gfp_t priority);
int tcp_send_synack(struct sock *);
-bool tcp_syn_flood_action(struct sock *sk, const struct sk_buff *skb,
- const char *proto);
void tcp_push_one(struct sock *, unsigned int mss_now);
void tcp_send_ack(struct sock *sk);
void tcp_send_delayed_ack(struct sock *sk);
@@ -571,7 +599,7 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
}
/* tcp.c */
-void tcp_get_info(const struct sock *, struct tcp_info *);
+void tcp_get_info(struct sock *, struct tcp_info *);
/* Read 'sendfile()'-style from a TCP socket */
typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
@@ -636,6 +664,11 @@ static inline u32 tcp_rto_min_us(struct sock *sk)
return jiffies_to_usecs(tcp_rto_min(sk));
}
+static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
+{
+ return dst_metric_locked(dst, RTAX_CC_ALGO);
+}
+
/* Compute the actual receive window we are currently advertising.
* Rcv_nxt can be after the window if our peer push more data
* than the offered window.
@@ -682,6 +715,8 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
#define TCPHDR_ECE 0x40
#define TCPHDR_CWR 0x80
+#define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
+
/* This is what the send packet queuing engine uses to pass
* TCP per-packet control information to the transmission code.
* We also store the host-order sequence numbers in here too.
@@ -695,11 +730,14 @@ struct tcp_skb_cb {
/* Note : tcp_tw_isn is used in input path only
* (isn chosen by tcp_timewait_state_process())
*
- * tcp_gso_segs is used in write queue only,
- * cf tcp_skb_pcount()
+ * tcp_gso_segs/size are used in write queue only,
+ * cf tcp_skb_pcount()/tcp_skb_mss()
*/
__u32 tcp_tw_isn;
- __u32 tcp_gso_segs;
+ struct {
+ u16 tcp_gso_segs;
+ u16 tcp_gso_size;
+ };
};
__u8 tcp_flags; /* TCP header flags. (tcp[13]) */
@@ -755,10 +793,10 @@ static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
TCP_SKB_CB(skb)->tcp_gso_segs += segs;
}
-/* This is valid iff tcp_skb_pcount() > 1. */
+/* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
static inline int tcp_skb_mss(const struct sk_buff *skb)
{
- return skb_shinfo(skb)->gso_size;
+ return TCP_SKB_CB(skb)->tcp_gso_size;
}
/* Events passed to congestion control interface */
@@ -787,14 +825,19 @@ enum tcp_ca_ack_event_flags {
#define TCP_CA_MAX 128
#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
+#define TCP_CA_UNSPEC 0
+
/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
#define TCP_CONG_NON_RESTRICTED 0x1
/* Requires ECN/ECT set on all packets */
#define TCP_CONG_NEEDS_ECN 0x2
+union tcp_cc_info;
+
struct tcp_congestion_ops {
struct list_head list;
- unsigned long flags;
+ u32 key;
+ u32 flags;
/* initialize private data (optional) */
void (*init)(struct sock *sk);
@@ -816,7 +859,8 @@ struct tcp_congestion_ops {
/* hook for packet ack accounting (optional) */
void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
/* get info for inet_diag (optional) */
- void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
+ size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
+ union tcp_cc_info *info);
char name[TCP_CA_NAME_MAX];
struct module *owner;
@@ -834,13 +878,24 @@ void tcp_get_available_congestion_control(char *buf, size_t len);
void tcp_get_allowed_congestion_control(char *buf, size_t len);
int tcp_set_allowed_congestion_control(char *allowed);
int tcp_set_congestion_control(struct sock *sk, const char *name);
-void tcp_slow_start(struct tcp_sock *tp, u32 acked);
-void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
+u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
+void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
u32 tcp_reno_ssthresh(struct sock *sk);
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
extern struct tcp_congestion_ops tcp_reno;
+struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
+u32 tcp_ca_get_key_by_name(const char *name);
+#ifdef CONFIG_INET
+char *tcp_ca_get_name_by_key(u32 key, char *buffer);
+#else
+static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
+{
+ return NULL;
+}
+#endif
+
static inline bool tcp_ca_needs_ecn(const struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1016,14 +1071,31 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
return tp->is_cwnd_limited;
}
-static inline void tcp_check_probe_timer(struct sock *sk)
+/* Something is really bad, we could not queue an additional packet,
+ * because qdisc is full or receiver sent a 0 window.
+ * We do not want to add fuel to the fire, or abort too early,
+ * so make sure the timer we arm now is at least 200ms in the future,
+ * regardless of current icsk_rto value (as it could be ~2ms)
+ */
+static inline unsigned long tcp_probe0_base(const struct sock *sk)
{
- const struct tcp_sock *tp = tcp_sk(sk);
- const struct inet_connection_sock *icsk = inet_csk(sk);
+ return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
+}
+
+/* Variant of inet_csk_rto_backoff() used for zero window probes */
+static inline unsigned long tcp_probe0_when(const struct sock *sk,
+ unsigned long max_when)
+{
+ u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
+
+ return (unsigned long)min_t(u64, when, max_when);
+}
- if (!tp->packets_out && !icsk->icsk_pending)
+static inline void tcp_check_probe_timer(struct sock *sk)
+{
+ if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- icsk->icsk_rto, TCP_RTO_MAX);
+ tcp_probe0_base(sk), TCP_RTO_MAX);
}
static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
@@ -1113,30 +1185,6 @@ static inline int tcp_full_space(const struct sock *sk)
return tcp_win_from_space(sk->sk_rcvbuf);
}
-static inline void tcp_openreq_init(struct request_sock *req,
- struct tcp_options_received *rx_opt,
- struct sk_buff *skb, struct sock *sk)
-{
- struct inet_request_sock *ireq = inet_rsk(req);
-
- req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
- req->cookie_ts = 0;
- tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
- tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
- tcp_rsk(req)->snt_synack = tcp_time_stamp;
- req->mss = rx_opt->mss_clamp;
- req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
- ireq->tstamp_ok = rx_opt->tstamp_ok;
- ireq->sack_ok = rx_opt->sack_ok;
- ireq->snd_wscale = rx_opt->snd_wscale;
- ireq->wscale_ok = rx_opt->wscale_ok;
- ireq->acked = 0;
- ireq->ecn_ok = 0;
- ireq->ir_rmt_port = tcp_hdr(skb)->source;
- ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
- ireq->ir_mark = inet_request_mark(sk, skb);
-}
-
extern void tcp_openreq_init_rwin(struct request_sock *req,
struct sock *sk, struct dst_entry *dst);
@@ -1216,6 +1264,9 @@ static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
return true;
}
+bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
+ int mib_idx, u32 *last_oow_ack_time);
+
static inline void tcp_mib_init(struct net *net)
{
/* See RFC 2012 */
@@ -1293,15 +1344,14 @@ struct tcp_md5sig_pool {
};
/* - functions */
-int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
- const struct sock *sk, const struct request_sock *req,
- const struct sk_buff *skb);
+int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
+ const struct sock *sk, const struct sk_buff *skb);
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
int family);
struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
- struct sock *addr_sk);
+ const struct sock *addr_sk);
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
@@ -1337,7 +1387,8 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
struct tcp_fastopen_cookie *cookie, int *syn_loss,
unsigned long *last_syn_loss);
void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
- struct tcp_fastopen_cookie *cookie, bool syn_lost);
+ struct tcp_fastopen_cookie *cookie, bool syn_lost,
+ u16 try_exp);
struct tcp_fastopen_request {
/* Fast Open cookie. Size 0 means a cookie request */
struct tcp_fastopen_cookie cookie;
@@ -1612,28 +1663,26 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
struct tcp_sock_af_ops {
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
- struct sock *addr_sk);
- int (*calc_md5_hash) (char *location,
- struct tcp_md5sig_key *md5,
- const struct sock *sk,
- const struct request_sock *req,
- const struct sk_buff *skb);
- int (*md5_parse) (struct sock *sk,
- char __user *optval,
- int optlen);
+ const struct sock *addr_sk);
+ int (*calc_md5_hash)(char *location,
+ const struct tcp_md5sig_key *md5,
+ const struct sock *sk,
+ const struct sk_buff *skb);
+ int (*md5_parse)(struct sock *sk,
+ char __user *optval,
+ int optlen);
#endif
};
struct tcp_request_sock_ops {
u16 mss_clamp;
#ifdef CONFIG_TCP_MD5SIG
- struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
- struct request_sock *req);
- int (*calc_md5_hash) (char *location,
- struct tcp_md5sig_key *md5,
- const struct sock *sk,
- const struct request_sock *req,
- const struct sk_buff *skb);
+ struct tcp_md5sig_key *(*req_md5_lookup)(struct sock *sk,
+ const struct sock *addr_sk);
+ int (*calc_md5_hash) (char *location,
+ const struct tcp_md5sig_key *md5,
+ const struct sock *sk,
+ const struct sk_buff *skb);
#endif
void (*init_req)(struct request_sock *req, struct sock *sk,
struct sk_buff *skb);
@@ -1693,4 +1742,19 @@ static inline struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
return dopt;
}
+/* locally generated TCP pure ACKs have skb->truesize == 2
+ * (check tcp_send_ack() in net/ipv4/tcp_output.c )
+ * This is much faster than dissecting the packet to find out.
+ * (Think of GRE encapsulations, IPv4, IPv6, ...)
+ */
+static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
+{
+ return skb->truesize == 2;
+}
+
+static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
+{
+ skb->truesize = 2;
+}
+
#endif /* _TCP_H */
diff --git a/include/net/tcp_states.h b/include/net/tcp_states.h
index b0b6459..50e78a7 100644
--- a/include/net/tcp_states.h
+++ b/include/net/tcp_states.h
@@ -25,6 +25,7 @@ enum {
TCP_LAST_ACK,
TCP_LISTEN,
TCP_CLOSING, /* Now a valid state */
+ TCP_NEW_SYN_RECV,
TCP_MAX_STATES /* Leave at the end! */
};
@@ -44,7 +45,8 @@ enum {
TCPF_CLOSE_WAIT = (1 << 8),
TCPF_LAST_ACK = (1 << 9),
TCPF_LISTEN = (1 << 10),
- TCPF_CLOSING = (1 << 11)
+ TCPF_CLOSING = (1 << 11),
+ TCPF_NEW_SYN_RECV = (1 << 12),
};
#endif /* _LINUX_TCP_STATES_H */
diff --git a/include/net/udp.h b/include/net/udp.h
index 07f9b70..6d4ed18 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -194,6 +194,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
int (*)(const struct sock *, const struct sock *),
unsigned int hash2_nulladdr);
+u32 udp_flow_hashrnd(void);
+
static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
int min, int max, bool use_eth)
{
@@ -205,12 +207,19 @@ static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
}
hash = skb_get_hash(skb);
- if (unlikely(!hash) && use_eth) {
- /* Can't find a normal hash, caller has indicated an Ethernet
- * packet so use that to compute a hash.
- */
- hash = jhash(skb->data, 2 * ETH_ALEN,
- (__force u32) skb->protocol);
+ if (unlikely(!hash)) {
+ if (use_eth) {
+ /* Can't find a normal hash, caller has indicated an
+ * Ethernet packet so use that to compute a hash.
+ */
+ hash = jhash(skb->data, 2 * ETH_ALEN,
+ (__force u32) skb->protocol);
+ } else {
+ /* Can't derive any sort of hash for the packet, set
+ * to some consistent random value.
+ */
+ hash = udp_flow_hashrnd();
+ }
}
/* Since this is being sent on the wire obfuscate hash a bit
@@ -229,8 +238,7 @@ int udp_get_port(struct sock *sk, unsigned short snum,
int (*saddr_cmp)(const struct sock *,
const struct sock *));
void udp_err(struct sk_buff *, u32);
-int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- size_t len);
+int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
int udp_push_pending_frames(struct sock *sk);
void udp_flush_pending_frames(struct sock *sk);
void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index 2a50a70..c491c12 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -77,17 +77,18 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
struct udp_tunnel_sock_cfg *sock_cfg);
/* Transmit the skb using UDP encapsulation. */
-int udp_tunnel_xmit_skb(struct socket *sock, struct rtable *rt,
- struct sk_buff *skb, __be32 src, __be32 dst,
- __u8 tos, __u8 ttl, __be16 df, __be16 src_port,
- __be16 dst_port, bool xnet);
+int udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
+ __be32 src, __be32 dst, __u8 tos, __u8 ttl,
+ __be16 df, __be16 src_port, __be16 dst_port,
+ bool xnet, bool nocheck);
#if IS_ENABLED(CONFIG_IPV6)
-int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst,
- struct sk_buff *skb, struct net_device *dev,
- struct in6_addr *saddr, struct in6_addr *daddr,
+int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb,
+ struct net_device *dev, struct in6_addr *saddr,
+ struct in6_addr *daddr,
__u8 prio, __u8 ttl, __be16 src_port,
- __be16 dst_port);
+ __be16 dst_port, bool nocheck);
#endif
void udp_tunnel_sock_release(struct socket *sock);
diff --git a/include/net/udplite.h b/include/net/udplite.h
index ae7c8d1..8076193 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -20,8 +20,7 @@ static __inline__ int udplite_getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb)
{
struct msghdr *msg = from;
- /* XXX: stripping const */
- return memcpy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len);
+ return copy_from_iter(to, len, &msg->msg_iter) != len ? -EFAULT : 0;
}
/* Designate sk as UDP-Lite socket */
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 903461a..0082b5d 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -11,14 +11,97 @@
#define VNI_HASH_BITS 10
#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
-/* VXLAN protocol header */
+/*
+ * VXLAN Group Based Policy Extension:
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |1|-|-|-|1|-|-|-|R|D|R|R|A|R|R|R| Group Policy ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VXLAN Network Identifier (VNI) | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * D = Don't Learn bit. When set, this bit indicates that the egress
+ * VTEP MUST NOT learn the source address of the encapsulated frame.
+ *
+ * A = Indicates that the group policy has already been applied to
+ * this packet. Policies MUST NOT be applied by devices when the
+ * A bit is set.
+ *
+ * [0] https://tools.ietf.org/html/draft-smith-vxlan-group-policy
+ */
+struct vxlanhdr_gbp {
+ __u8 vx_flags;
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ __u8 reserved_flags1:3,
+ policy_applied:1,
+ reserved_flags2:2,
+ dont_learn:1,
+ reserved_flags3:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 reserved_flags1:1,
+ dont_learn:1,
+ reserved_flags2:2,
+ policy_applied:1,
+ reserved_flags3:3;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be16 policy_id;
+ __be32 vx_vni;
+};
+
+#define VXLAN_GBP_USED_BITS (VXLAN_HF_GBP | 0xFFFFFF)
+
+/* skb->mark mapping
+ *
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |R|R|R|R|R|R|R|R|R|D|R|R|A|R|R|R| Group Policy ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+#define VXLAN_GBP_DONT_LEARN (BIT(6) << 16)
+#define VXLAN_GBP_POLICY_APPLIED (BIT(3) << 16)
+#define VXLAN_GBP_ID_MASK (0xFFFF)
+
+/* VXLAN protocol header:
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |G|R|R|R|I|R|R|C| Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VXLAN Network Identifier (VNI) | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * G = 1 Group Policy (VXLAN-GBP)
+ * I = 1 VXLAN Network Identifier (VNI) present
+ * C = 1 Remote checksum offload (RCO)
+ */
struct vxlanhdr {
__be32 vx_flags;
__be32 vx_vni;
};
+/* VXLAN header flags. */
+#define VXLAN_HF_RCO BIT(24)
+#define VXLAN_HF_VNI BIT(27)
+#define VXLAN_HF_GBP BIT(31)
+
+/* Remote checksum offload header option */
+#define VXLAN_RCO_MASK 0x7f /* Last byte of vni field */
+#define VXLAN_RCO_UDP 0x80 /* Indicate UDP RCO (TCP when not set *) */
+#define VXLAN_RCO_SHIFT 1 /* Left shift of start */
+#define VXLAN_RCO_SHIFT_MASK ((1 << VXLAN_RCO_SHIFT) - 1)
+#define VXLAN_MAX_REMCSUM_START (VXLAN_RCO_MASK << VXLAN_RCO_SHIFT)
+
+#define VXLAN_N_VID (1u << 24)
+#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
+#define VXLAN_VNI_MASK (VXLAN_VID_MASK << 8)
+#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
+
+struct vxlan_metadata {
+ __be32 vni;
+ u32 gbp;
+};
+
struct vxlan_sock;
-typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb, __be32 key);
+typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb,
+ struct vxlan_metadata *md);
/* per UDP socket information */
struct vxlan_sock {
@@ -31,6 +114,7 @@ struct vxlan_sock {
struct hlist_head vni_list[VNI_HASH_SIZE];
atomic_t refcnt;
struct udp_offload udp_offloads;
+ u32 flags;
};
#define VXLAN_F_LEARN 0x01
@@ -42,6 +126,18 @@ struct vxlan_sock {
#define VXLAN_F_UDP_CSUM 0x40
#define VXLAN_F_UDP_ZERO_CSUM6_TX 0x80
#define VXLAN_F_UDP_ZERO_CSUM6_RX 0x100
+#define VXLAN_F_REMCSUM_TX 0x200
+#define VXLAN_F_REMCSUM_RX 0x400
+#define VXLAN_F_GBP 0x800
+#define VXLAN_F_REMCSUM_NOPARTIAL 0x1000
+
+/* Flags that are used in the receive path. These flags must match in
+ * order for a socket to be shareable
+ */
+#define VXLAN_F_RCV_FLAGS (VXLAN_F_GBP | \
+ VXLAN_F_UDP_ZERO_CSUM6_RX | \
+ VXLAN_F_REMCSUM_RX | \
+ VXLAN_F_REMCSUM_NOPARTIAL)
struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
vxlan_rcv_t *rcv, void *data,
@@ -49,10 +145,10 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
void vxlan_sock_release(struct vxlan_sock *vs);
-int vxlan_xmit_skb(struct vxlan_sock *vs,
- struct rtable *rt, struct sk_buff *skb,
+int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
- __be16 src_port, __be16 dst_port, __be32 vni, bool xnet);
+ __be16 src_port, __be16 dst_port, struct vxlan_metadata *md,
+ bool xnet, u32 vxflags);
static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
netdev_features_t features)
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index dc4865e..f0ee97e 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -126,9 +126,7 @@ struct xfrm_state_walk {
/* Full description of state of transformer. */
struct xfrm_state {
-#ifdef CONFIG_NET_NS
- struct net *xs_net;
-#endif
+ possible_net_t xs_net;
union {
struct hlist_node gclist;
struct hlist_node bydst;
@@ -170,6 +168,7 @@ struct xfrm_state {
struct xfrm_algo *ealg;
struct xfrm_algo *calg;
struct xfrm_algo_aead *aead;
+ const char *geniv;
/* Data for encapsulator */
struct xfrm_encap_tmpl *encap;
@@ -334,7 +333,7 @@ struct xfrm_state_afinfo {
int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
int (*output)(struct sock *sk, struct sk_buff *skb);
- int (*output_finish)(struct sk_buff *skb);
+ int (*output_finish)(struct sock *sk, struct sk_buff *skb);
int (*extract_input)(struct xfrm_state *x,
struct sk_buff *skb);
int (*extract_output)(struct xfrm_state *x,
@@ -522,9 +521,7 @@ struct xfrm_policy_queue {
};
struct xfrm_policy {
-#ifdef CONFIG_NET_NS
- struct net *xp_net;
-#endif
+ possible_net_t xp_net;
struct hlist_node bydst;
struct hlist_node byidx;
@@ -1029,7 +1026,7 @@ xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
case AF_INET:
return addr->a4 == 0;
case AF_INET6:
- return ipv6_addr_any((struct in6_addr *)&addr->a6);
+ return ipv6_addr_any(&addr->in6);
}
return 0;
}
@@ -1242,8 +1239,8 @@ void xfrm_flowi_addr_get(const struct flowi *fl,
memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
break;
case AF_INET6:
- *(struct in6_addr *)saddr->a6 = fl->u.ip6.saddr;
- *(struct in6_addr *)daddr->a6 = fl->u.ip6.daddr;
+ saddr->in6 = fl->u.ip6.saddr;
+ daddr->in6 = fl->u.ip6.daddr;
break;
}
}
@@ -1318,6 +1315,7 @@ static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
* xfrm algorithm information
*/
struct xfrm_algo_aead_info {
+ char *geniv;
u16 icv_truncbits;
};
@@ -1327,6 +1325,7 @@ struct xfrm_algo_auth_info {
};
struct xfrm_algo_encr_info {
+ char *geniv;
u16 blockbits;
u16 defkeybits;
};
@@ -1507,7 +1506,7 @@ int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
int xfrm_output_resume(struct sk_buff *skb, int err);
-int xfrm_output(struct sk_buff *skb);
+int xfrm_output(struct sock *sk, struct sk_buff *skb);
int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
void xfrm_local_error(struct sk_buff *skb, int mtu);
int xfrm4_extract_header(struct sk_buff *skb);
@@ -1528,7 +1527,7 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
int xfrm4_output(struct sock *sk, struct sk_buff *skb);
-int xfrm4_output_finish(struct sk_buff *skb);
+int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb);
int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err);
int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
@@ -1553,7 +1552,7 @@ __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
int xfrm6_output(struct sock *sk, struct sk_buff *skb);
-int xfrm6_output_finish(struct sk_buff *skb);
+int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb);
int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
u8 **prevhdr);
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
index 79abb9c..1443d79 100644
--- a/include/ras/ras_event.h
+++ b/include/ras/ras_event.h
@@ -11,6 +11,7 @@
#include <linux/pci.h>
#include <linux/aer.h>
#include <linux/cper.h>
+#include <linux/mm.h>
/*
* MCE Extended Error Log trace event
@@ -232,6 +233,90 @@ TRACE_EVENT(aer_event,
__print_flags(__entry->status, "|", aer_uncorrectable_errors))
);
+/*
+ * memory-failure recovery action result event
+ *
+ * unsigned long pfn - Page Frame Number of the corrupted page
+ * int type - Page types of the corrupted page
+ * int result - Result of recovery action
+ */
+
+#ifdef CONFIG_MEMORY_FAILURE
+#define MF_ACTION_RESULT \
+ EM ( MF_IGNORED, "Ignored" ) \
+ EM ( MF_FAILED, "Failed" ) \
+ EM ( MF_DELAYED, "Delayed" ) \
+ EMe ( MF_RECOVERED, "Recovered" )
+
+#define MF_PAGE_TYPE \
+ EM ( MF_MSG_KERNEL, "reserved kernel page" ) \
+ EM ( MF_MSG_KERNEL_HIGH_ORDER, "high-order kernel page" ) \
+ EM ( MF_MSG_SLAB, "kernel slab page" ) \
+ EM ( MF_MSG_DIFFERENT_COMPOUND, "different compound page after locking" ) \
+ EM ( MF_MSG_POISONED_HUGE, "huge page already hardware poisoned" ) \
+ EM ( MF_MSG_HUGE, "huge page" ) \
+ EM ( MF_MSG_FREE_HUGE, "free huge page" ) \
+ EM ( MF_MSG_UNMAP_FAILED, "unmapping failed page" ) \
+ EM ( MF_MSG_DIRTY_SWAPCACHE, "dirty swapcache page" ) \
+ EM ( MF_MSG_CLEAN_SWAPCACHE, "clean swapcache page" ) \
+ EM ( MF_MSG_DIRTY_MLOCKED_LRU, "dirty mlocked LRU page" ) \
+ EM ( MF_MSG_CLEAN_MLOCKED_LRU, "clean mlocked LRU page" ) \
+ EM ( MF_MSG_DIRTY_UNEVICTABLE_LRU, "dirty unevictable LRU page" ) \
+ EM ( MF_MSG_CLEAN_UNEVICTABLE_LRU, "clean unevictable LRU page" ) \
+ EM ( MF_MSG_DIRTY_LRU, "dirty LRU page" ) \
+ EM ( MF_MSG_CLEAN_LRU, "clean LRU page" ) \
+ EM ( MF_MSG_TRUNCATED_LRU, "already truncated LRU page" ) \
+ EM ( MF_MSG_BUDDY, "free buddy page" ) \
+ EM ( MF_MSG_BUDDY_2ND, "free buddy page (2nd try)" ) \
+ EMe ( MF_MSG_UNKNOWN, "unknown page" )
+
+/*
+ * First define the enums in MM_ACTION_RESULT to be exported to userspace
+ * via TRACE_DEFINE_ENUM().
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+MF_ACTION_RESULT
+MF_PAGE_TYPE
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) { a, b },
+#define EMe(a, b) { a, b }
+
+TRACE_EVENT(memory_failure_event,
+ TP_PROTO(unsigned long pfn,
+ int type,
+ int result),
+
+ TP_ARGS(pfn, type, result),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pfn)
+ __field(int, type)
+ __field(int, result)
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = pfn;
+ __entry->type = type;
+ __entry->result = result;
+ ),
+
+ TP_printk("pfn %#lx: recovery action for %s: %s",
+ __entry->pfn,
+ __print_symbolic(__entry->type, MF_PAGE_TYPE),
+ __print_symbolic(__entry->result, MF_ACTION_RESULT)
+ )
+);
+#endif /* CONFIG_MEMORY_FAILURE */
#endif /* _TRACE_HW_EVENT_MC_H */
/* This part must be outside protection */
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index ce55906..fde33ac 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -111,8 +111,8 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
int rdma_addr_size(struct sockaddr *addr);
int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id);
-int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *smac,
- u16 *vlan_id);
+int rdma_addr_find_dmac_by_grh(const union ib_gid *sgid, const union ib_gid *dgid,
+ u8 *smac, u16 *vlan_id);
static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr)
{
@@ -160,7 +160,7 @@ static inline int rdma_ip2gid(struct sockaddr *addr, union ib_gid *gid)
}
/* Important - sockaddr should be a union of sockaddr_in and sockaddr_in6 */
-static inline int rdma_gid2ip(struct sockaddr *out, union ib_gid *gid)
+static inline void rdma_gid2ip(struct sockaddr *out, const union ib_gid *gid)
{
if (ipv6_addr_v4mapped((struct in6_addr *)gid)) {
struct sockaddr_in *out_in = (struct sockaddr_in *)out;
@@ -173,7 +173,6 @@ static inline int rdma_gid2ip(struct sockaddr *out, union ib_gid *gid)
out_in->sin6_family = AF_INET6;
memcpy(&out_in->sin6_addr.s6_addr, gid->raw, 16);
}
- return 0;
}
static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h
index ad9a3c2..bd92130 100644
--- a/include/rdma/ib_cache.h
+++ b/include/rdma/ib_cache.h
@@ -64,10 +64,10 @@ int ib_get_cached_gid(struct ib_device *device,
* ib_find_cached_gid() searches for the specified GID value in
* the local software cache.
*/
-int ib_find_cached_gid(struct ib_device *device,
- union ib_gid *gid,
- u8 *port_num,
- u16 *index);
+int ib_find_cached_gid(struct ib_device *device,
+ const union ib_gid *gid,
+ u8 *port_num,
+ u16 *index);
/**
* ib_get_cached_pkey - Returns a cached PKey table entry
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 0e3ff30..39ed2d2 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -105,7 +105,8 @@ enum ib_cm_data_size {
IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE = 216,
IB_CM_SIDR_REP_PRIVATE_DATA_SIZE = 136,
IB_CM_SIDR_REP_INFO_LENGTH = 72,
- IB_CM_COMPARE_SIZE = 64
+ /* compare done u32 at a time */
+ IB_CM_COMPARE_SIZE = (64 / sizeof(u32))
};
struct ib_cm_id;
@@ -337,8 +338,8 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id);
#define IB_SDP_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFFFF0000ULL)
struct ib_cm_compare_data {
- u8 data[IB_CM_COMPARE_SIZE];
- u8 mask[IB_CM_COMPARE_SIZE];
+ u32 data[IB_CM_COMPARE_SIZE];
+ u32 mask[IB_CM_COMPARE_SIZE];
};
/**
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 9bb99e9..c8422d5 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -42,8 +42,11 @@
#include <rdma/ib_verbs.h>
#include <uapi/rdma/ib_user_mad.h>
-/* Management base version */
+/* Management base versions */
#define IB_MGMT_BASE_VERSION 1
+#define OPA_MGMT_BASE_VERSION 0x80
+
+#define OPA_SMP_CLASS_VERSION 0x80
/* Management classes */
#define IB_MGMT_CLASS_SUBN_LID_ROUTED 0x01
@@ -135,6 +138,10 @@ enum {
IB_MGMT_SA_DATA = 200,
IB_MGMT_DEVICE_HDR = 64,
IB_MGMT_DEVICE_DATA = 192,
+ IB_MGMT_MAD_SIZE = IB_MGMT_MAD_HDR + IB_MGMT_MAD_DATA,
+ OPA_MGMT_MAD_DATA = 2024,
+ OPA_MGMT_RMPP_DATA = 2012,
+ OPA_MGMT_MAD_SIZE = IB_MGMT_MAD_HDR + OPA_MGMT_MAD_DATA,
};
struct ib_mad_hdr {
@@ -181,12 +188,23 @@ struct ib_mad {
u8 data[IB_MGMT_MAD_DATA];
};
+struct opa_mad {
+ struct ib_mad_hdr mad_hdr;
+ u8 data[OPA_MGMT_MAD_DATA];
+};
+
struct ib_rmpp_mad {
struct ib_mad_hdr mad_hdr;
struct ib_rmpp_hdr rmpp_hdr;
u8 data[IB_MGMT_RMPP_DATA];
};
+struct opa_rmpp_mad {
+ struct ib_mad_hdr mad_hdr;
+ struct ib_rmpp_hdr rmpp_hdr;
+ u8 data[OPA_MGMT_RMPP_DATA];
+};
+
struct ib_sa_mad {
struct ib_mad_hdr mad_hdr;
struct ib_rmpp_hdr rmpp_hdr;
@@ -235,7 +253,10 @@ struct ib_class_port_info {
* includes the common MAD, RMPP, and class specific headers.
* @data_len: Indicates the total size of user-transferred data.
* @seg_count: The number of RMPP segments allocated for this send.
- * @seg_size: Size of each RMPP segment.
+ * @seg_size: Size of the data in each RMPP segment. This does not include
+ * class specific headers.
+ * @seg_rmpp_size: Size of each RMPP segment including the class specific
+ * headers.
* @timeout_ms: Time to wait for a response.
* @retries: Number of times to retry a request for a response. For MADs
* using RMPP, this applies per window. On completion, returns the number
@@ -255,6 +276,7 @@ struct ib_mad_send_buf {
int data_len;
int seg_count;
int seg_size;
+ int seg_rmpp_size;
int timeout_ms;
int retries;
};
@@ -263,7 +285,7 @@ struct ib_mad_send_buf {
* ib_response_mad - Returns if the specified MAD has been generated in
* response to a sent request or trap.
*/
-int ib_response_mad(struct ib_mad *mad);
+int ib_response_mad(const struct ib_mad_hdr *hdr);
/**
* ib_get_rmpp_resptime - Returns the RMPP response time.
@@ -401,7 +423,10 @@ struct ib_mad_send_wc {
struct ib_mad_recv_buf {
struct list_head list;
struct ib_grh *grh;
- struct ib_mad *mad;
+ union {
+ struct ib_mad *mad;
+ struct opa_mad *opa_mad;
+ };
};
/**
@@ -410,6 +435,7 @@ struct ib_mad_recv_buf {
* @recv_buf: Specifies the location of the received data buffer(s).
* @rmpp_list: Specifies a list of RMPP reassembled received MAD buffers.
* @mad_len: The length of the received MAD, without duplicated headers.
+ * @mad_seg_size: The size of individual MAD segments
*
* For received response, the wr_id contains a pointer to the ib_mad_send_buf
* for the corresponding send request.
@@ -419,6 +445,7 @@ struct ib_mad_recv_wc {
struct ib_mad_recv_buf recv_buf;
struct list_head rmpp_list;
int mad_len;
+ size_t mad_seg_size;
};
/**
@@ -618,6 +645,7 @@ int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
* automatically adjust the allocated buffer size to account for any
* additional padding that may be necessary.
* @gfp_mask: GFP mask used for the memory allocation.
+ * @base_version: Base Version of this MAD
*
* This routine allocates a MAD for sending. The returned MAD send buffer
* will reference a data buffer usable for sending a MAD, along
@@ -633,7 +661,8 @@ struct ib_mad_send_buf *ib_create_send_mad(struct ib_mad_agent *mad_agent,
u32 remote_qpn, u16 pkey_index,
int rmpp_active,
int hdr_len, int data_len,
- gfp_t gfp_mask);
+ gfp_t gfp_mask,
+ u8 base_version);
/**
* ib_is_mad_class_rmpp - returns whether given management class
@@ -675,6 +704,6 @@ void ib_free_send_mad(struct ib_mad_send_buf *send_buf);
* @agent: the agent in question
* @return: true if agent is performing rmpp, false otherwise.
*/
-int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent);
+int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent);
#endif /* IB_MAD_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 0d74f1d..b0f898e 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -81,6 +81,13 @@ enum rdma_transport_type {
RDMA_TRANSPORT_USNIC_UDP
};
+enum rdma_protocol_type {
+ RDMA_PROTOCOL_IB,
+ RDMA_PROTOCOL_IBOE,
+ RDMA_PROTOCOL_IWARP,
+ RDMA_PROTOCOL_USNIC_UDP
+};
+
__attribute_const__ enum rdma_transport_type
rdma_node_get_transport(enum rdma_node_type node_type);
@@ -166,6 +173,16 @@ struct ib_odp_caps {
} per_transport_caps;
};
+enum ib_cq_creation_flags {
+ IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
+};
+
+struct ib_cq_init_attr {
+ unsigned int cqe;
+ int comp_vector;
+ u32 flags;
+};
+
struct ib_device_attr {
u64 fw_ver;
__be64 sys_image_guid;
@@ -210,6 +227,8 @@ struct ib_device_attr {
int sig_prot_cap;
int sig_guard_cap;
struct ib_odp_caps odp_caps;
+ uint64_t timestamp_mask;
+ uint64_t hca_core_clock; /* in KHZ */
};
enum ib_mtu {
@@ -346,6 +365,42 @@ union rdma_protocol_stats {
struct iw_protocol_stats iw;
};
+/* Define bits for the various functionality this port needs to be supported by
+ * the core.
+ */
+/* Management 0x00000FFF */
+#define RDMA_CORE_CAP_IB_MAD 0x00000001
+#define RDMA_CORE_CAP_IB_SMI 0x00000002
+#define RDMA_CORE_CAP_IB_CM 0x00000004
+#define RDMA_CORE_CAP_IW_CM 0x00000008
+#define RDMA_CORE_CAP_IB_SA 0x00000010
+#define RDMA_CORE_CAP_OPA_MAD 0x00000020
+
+/* Address format 0x000FF000 */
+#define RDMA_CORE_CAP_AF_IB 0x00001000
+#define RDMA_CORE_CAP_ETH_AH 0x00002000
+
+/* Protocol 0xFFF00000 */
+#define RDMA_CORE_CAP_PROT_IB 0x00100000
+#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
+#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
+
+#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
+ | RDMA_CORE_CAP_IB_MAD \
+ | RDMA_CORE_CAP_IB_SMI \
+ | RDMA_CORE_CAP_IB_CM \
+ | RDMA_CORE_CAP_IB_SA \
+ | RDMA_CORE_CAP_AF_IB)
+#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
+ | RDMA_CORE_CAP_IB_MAD \
+ | RDMA_CORE_CAP_IB_CM \
+ | RDMA_CORE_CAP_AF_IB \
+ | RDMA_CORE_CAP_ETH_AH)
+#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
+ | RDMA_CORE_CAP_IW_CM)
+#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
+ | RDMA_CORE_CAP_OPA_MAD)
+
struct ib_port_attr {
enum ib_port_state state;
enum ib_mtu max_mtu;
@@ -412,6 +467,8 @@ enum ib_event_type {
IB_EVENT_GID_CHANGE,
};
+__attribute_const__ const char *ib_event_msg(enum ib_event_type event);
+
struct ib_event {
struct ib_device *device;
union {
@@ -663,6 +720,8 @@ enum ib_wc_status {
IB_WC_GENERAL_ERR
};
+__attribute_const__ const char *ib_wc_status_msg(enum ib_wc_status status);
+
enum ib_wc_opcode {
IB_WC_SEND,
IB_WC_RDMA_WRITE,
@@ -1407,7 +1466,7 @@ struct ib_flow {
struct ib_uobject *uobject;
};
-struct ib_mad;
+struct ib_mad_hdr;
struct ib_grh;
enum ib_process_mad_flags {
@@ -1474,6 +1533,13 @@ struct ib_dma_mapping_ops {
struct iw_cm_verbs;
+struct ib_port_immutable {
+ int pkey_tbl_len;
+ int gid_tbl_len;
+ u32 core_cap_flags;
+ u32 max_mad_size;
+};
+
struct ib_device {
struct device *dma_device;
@@ -1487,8 +1553,10 @@ struct ib_device {
struct list_head client_data_list;
struct ib_cache cache;
- int *pkey_tbl_len;
- int *gid_tbl_len;
+ /**
+ * port_immutable is indexed by port number
+ */
+ struct ib_port_immutable *port_immutable;
int num_comp_vectors;
@@ -1497,7 +1565,8 @@ struct ib_device {
int (*get_protocol_stats)(struct ib_device *device,
union rdma_protocol_stats *stats);
int (*query_device)(struct ib_device *device,
- struct ib_device_attr *device_attr);
+ struct ib_device_attr *device_attr,
+ struct ib_udata *udata);
int (*query_port)(struct ib_device *device,
u8 port_num,
struct ib_port_attr *port_attr);
@@ -1561,8 +1630,8 @@ struct ib_device {
int (*post_recv)(struct ib_qp *qp,
struct ib_recv_wr *recv_wr,
struct ib_recv_wr **bad_recv_wr);
- struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
- int comp_vector,
+ struct ib_cq * (*create_cq)(struct ib_device *device,
+ const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata);
int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
@@ -1637,10 +1706,13 @@ struct ib_device {
int (*process_mad)(struct ib_device *device,
int process_mad_flags,
u8 port_num,
- struct ib_wc *in_wc,
- struct ib_grh *in_grh,
- struct ib_mad *in_mad,
- struct ib_mad *out_mad);
+ const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const struct ib_mad_hdr *in_mad,
+ size_t in_mad_size,
+ struct ib_mad_hdr *out_mad,
+ size_t *out_mad_size,
+ u16 *out_mad_pkey_index);
struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
struct ib_ucontext *ucontext,
struct ib_udata *udata);
@@ -1673,8 +1745,17 @@ struct ib_device {
char node_desc[64];
__be64 node_guid;
u32 local_dma_lkey;
+ u16 is_switch:1;
u8 node_type;
u8 phys_port_cnt;
+
+ /**
+ * The following mandatory functions are used only at device
+ * registration. Keep functions such as these at the end of this
+ * structure to avoid cache line misses when accessing struct ib_device
+ * in fast paths.
+ */
+ int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
};
struct ib_client {
@@ -1707,10 +1788,7 @@ static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t
static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
{
- size_t copy_sz;
-
- copy_sz = min_t(size_t, len, udata->outlen);
- return copy_to_user(udata->outbuf, src, copy_sz) ? -EFAULT : 0;
+ return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
}
/**
@@ -1746,6 +1824,297 @@ int ib_query_port(struct ib_device *device,
enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
u8 port_num);
+/**
+ * rdma_cap_ib_switch - Check if the device is IB switch
+ * @device: Device to check
+ *
+ * Device driver is responsible for setting is_switch bit on
+ * in ib_device structure at init time.
+ *
+ * Return: true if the device is IB switch.
+ */
+static inline bool rdma_cap_ib_switch(const struct ib_device *device)
+{
+ return device->is_switch;
+}
+
+/**
+ * rdma_start_port - Return the first valid port number for the device
+ * specified
+ *
+ * @device: Device to be checked
+ *
+ * Return start port number
+ */
+static inline u8 rdma_start_port(const struct ib_device *device)
+{
+ return rdma_cap_ib_switch(device) ? 0 : 1;
+}
+
+/**
+ * rdma_end_port - Return the last valid port number for the device
+ * specified
+ *
+ * @device: Device to be checked
+ *
+ * Return last port number
+ */
+static inline u8 rdma_end_port(const struct ib_device *device)
+{
+ return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
+}
+
+static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
+}
+
+static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
+}
+
+static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
+}
+
+static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags &
+ (RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_PROT_ROCE);
+}
+
+/**
+ * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
+ * Management Datagrams.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * Management Datagrams (MAD) are a required part of the InfiniBand
+ * specification and are supported on all InfiniBand devices. A slightly
+ * extended version are also supported on OPA interfaces.
+ *
+ * Return: true if the port supports sending/receiving of MAD packets.
+ */
+static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
+}
+
+/**
+ * rdma_cap_opa_mad - Check if the port of device provides support for OPA
+ * Management Datagrams.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * Intel OmniPath devices extend and/or replace the InfiniBand Management
+ * datagrams with their own versions. These OPA MADs share many but not all of
+ * the characteristics of InfiniBand MADs.
+ *
+ * OPA MADs differ in the following ways:
+ *
+ * 1) MADs are variable size up to 2K
+ * IBTA defined MADs remain fixed at 256 bytes
+ * 2) OPA SMPs must carry valid PKeys
+ * 3) OPA SMP packets are a different format
+ *
+ * Return: true if the port supports OPA MAD packet formats.
+ */
+static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
+{
+ return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
+ == RDMA_CORE_CAP_OPA_MAD;
+}
+
+/**
+ * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
+ * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * Each InfiniBand node is required to provide a Subnet Management Agent
+ * that the subnet manager can access. Prior to the fabric being fully
+ * configured by the subnet manager, the SMA is accessed via a well known
+ * interface called the Subnet Management Interface (SMI). This interface
+ * uses directed route packets to communicate with the SM to get around the
+ * chicken and egg problem of the SM needing to know what's on the fabric
+ * in order to configure the fabric, and needing to configure the fabric in
+ * order to send packets to the devices on the fabric. These directed
+ * route packets do not need the fabric fully configured in order to reach
+ * their destination. The SMI is the only method allowed to send
+ * directed route packets on an InfiniBand fabric.
+ *
+ * Return: true if the port provides an SMI.
+ */
+static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
+}
+
+/**
+ * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
+ * Communication Manager.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * The InfiniBand Communication Manager is one of many pre-defined General
+ * Service Agents (GSA) that are accessed via the General Service
+ * Interface (GSI). It's role is to facilitate establishment of connections
+ * between nodes as well as other management related tasks for established
+ * connections.
+ *
+ * Return: true if the port supports an IB CM (this does not guarantee that
+ * a CM is actually running however).
+ */
+static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
+}
+
+/**
+ * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
+ * Communication Manager.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * Similar to above, but specific to iWARP connections which have a different
+ * managment protocol than InfiniBand.
+ *
+ * Return: true if the port supports an iWARP CM (this does not guarantee that
+ * a CM is actually running however).
+ */
+static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
+}
+
+/**
+ * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
+ * Subnet Administration.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * An InfiniBand Subnet Administration (SA) service is a pre-defined General
+ * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand
+ * fabrics, devices should resolve routes to other hosts by contacting the
+ * SA to query the proper route.
+ *
+ * Return: true if the port should act as a client to the fabric Subnet
+ * Administration interface. This does not imply that the SA service is
+ * running locally.
+ */
+static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
+}
+
+/**
+ * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
+ * Multicast.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * InfiniBand multicast registration is more complex than normal IPv4 or
+ * IPv6 multicast registration. Each Host Channel Adapter must register
+ * with the Subnet Manager when it wishes to join a multicast group. It
+ * should do so only once regardless of how many queue pairs it subscribes
+ * to this group. And it should leave the group only after all queue pairs
+ * attached to the group have been detached.
+ *
+ * Return: true if the port must undertake the additional adminstrative
+ * overhead of registering/unregistering with the SM and tracking of the
+ * total number of queue pairs attached to the multicast group.
+ */
+static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
+{
+ return rdma_cap_ib_sa(device, port_num);
+}
+
+/**
+ * rdma_cap_af_ib - Check if the port of device has the capability
+ * Native Infiniband Address.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
+ * GID. RoCE uses a different mechanism, but still generates a GID via
+ * a prescribed mechanism and port specific data.
+ *
+ * Return: true if the port uses a GID address to identify devices on the
+ * network.
+ */
+static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
+}
+
+/**
+ * rdma_cap_eth_ah - Check if the port of device has the capability
+ * Ethernet Address Handle.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
+ * to fabricate GIDs over Ethernet/IP specific addresses native to the
+ * port. Normally, packet headers are generated by the sending host
+ * adapter, but when sending connectionless datagrams, we must manually
+ * inject the proper headers for the fabric we are communicating over.
+ *
+ * Return: true if we are running as a RoCE port and must force the
+ * addition of a Global Route Header built from our Ethernet Address
+ * Handle into our header list for connectionless packets.
+ */
+static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
+}
+
+/**
+ * rdma_cap_read_multi_sge - Check if the port of device has the capability
+ * RDMA Read Multiple Scatter-Gather Entries.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * iWARP has a restriction that RDMA READ requests may only have a single
+ * Scatter/Gather Entry (SGE) in the work request.
+ *
+ * NOTE: although the linux kernel currently assumes all devices are either
+ * single SGE RDMA READ devices or identical SGE maximums for RDMA READs and
+ * WRITEs, according to Tom Talpey, this is not accurate. There are some
+ * devices out there that support more than a single SGE on RDMA READ
+ * requests, but do not support the same number of SGEs as they do on
+ * RDMA WRITE requests. The linux kernel would need rearchitecting to
+ * support these imbalanced READ/WRITE SGEs allowed devices. So, for now,
+ * suffice with either the device supports the same READ/WRITE SGEs, or
+ * it only gets one READ sge.
+ *
+ * Return: true for any device that allows more than one SGE in RDMA READ
+ * requests.
+ */
+static inline bool rdma_cap_read_multi_sge(struct ib_device *device,
+ u8 port_num)
+{
+ return !(device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP);
+}
+
+/**
+ * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
+ *
+ * @device: Device
+ * @port_num: Port number
+ *
+ * This MAD size includes the MAD headers and MAD payload. No other headers
+ * are included.
+ *
+ * Return the max MAD size required by the Port. Will return 0 if the port
+ * does not support MADs
+ */
+static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
+{
+ return device->port_immutable[port_num].max_mad_size;
+}
+
int ib_query_gid(struct ib_device *device,
u8 port_num, int index, union ib_gid *gid);
@@ -1802,8 +2171,9 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
* @ah_attr: Returned attributes that can be used when creating an address
* handle for replying to the message.
*/
-int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
- struct ib_grh *grh, struct ib_ah_attr *ah_attr);
+int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
+ const struct ib_wc *wc, const struct ib_grh *grh,
+ struct ib_ah_attr *ah_attr);
/**
* ib_create_ah_from_wc - Creates an address handle associated with the
@@ -1817,8 +2187,8 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
* The address handle is used to reference a local or global destination
* in all UD QP post sends.
*/
-struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
- struct ib_grh *grh, u8 port_num);
+struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
+ const struct ib_grh *grh, u8 port_num);
/**
* ib_modify_ah - Modifies the address vector associated with an address
@@ -2014,16 +2384,15 @@ static inline int ib_post_recv(struct ib_qp *qp,
* asynchronous event not associated with a completion occurs on the CQ.
* @cq_context: Context associated with the CQ returned to the user via
* the associated completion and event handlers.
- * @cqe: The minimum size of the CQ.
- * @comp_vector - Completion vector used to signal completion events.
- * Must be >= 0 and < context->num_comp_vectors.
+ * @cq_attr: The attributes the CQ should be created upon.
*
* Users can examine the cq structure to determine the actual CQ size.
*/
struct ib_cq *ib_create_cq(struct ib_device *device,
ib_comp_handler comp_handler,
void (*event_handler)(struct ib_event *, void *),
- void *cq_context, int cqe, int comp_vector);
+ void *cq_context,
+ const struct ib_cq_init_attr *cq_attr);
/**
* ib_resize_cq - Modifies the capacity of the CQ.
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
index 1017e0b..036bd27 100644
--- a/include/rdma/iw_cm.h
+++ b/include/rdma/iw_cm.h
@@ -91,6 +91,7 @@ struct iw_cm_id {
/* Used by provider to add and remove refs on IW cm_id */
void (*add_ref)(struct iw_cm_id *);
void (*rem_ref)(struct iw_cm_id *);
+ u8 tos;
};
struct iw_cm_conn_param {
diff --git a/include/rdma/iw_portmap.h b/include/rdma/iw_portmap.h
index 928b277..fda3167 100644
--- a/include/rdma/iw_portmap.h
+++ b/include/rdma/iw_portmap.h
@@ -148,6 +148,16 @@ int iwpm_add_mapping_cb(struct sk_buff *, struct netlink_callback *);
int iwpm_add_and_query_mapping_cb(struct sk_buff *, struct netlink_callback *);
/**
+ * iwpm_remote_info_cb - Process remote connecting peer address info, which
+ * the port mapper has received from the connecting peer
+ *
+ * @cb: Contains the received message (payload and netlink header)
+ *
+ * Stores the IPv4/IPv6 address info in a hash table
+ */
+int iwpm_remote_info_cb(struct sk_buff *, struct netlink_callback *);
+
+/**
* iwpm_mapping_error_cb - Process port mapper notification for error
*
* @skb:
@@ -175,6 +185,21 @@ int iwpm_mapping_info_cb(struct sk_buff *, struct netlink_callback *);
int iwpm_ack_mapping_info_cb(struct sk_buff *, struct netlink_callback *);
/**
+ * iwpm_get_remote_info - Get the remote connecting peer address info
+ *
+ * @mapped_loc_addr: Mapped local address of the listening peer
+ * @mapped_rem_addr: Mapped remote address of the connecting peer
+ * @remote_addr: To store the remote address of the connecting peer
+ * @nl_client: The index of the netlink client
+ *
+ * The remote address info is retrieved and provided to the client in
+ * the remote_addr. After that it is removed from the hash table
+ */
+int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr,
+ struct sockaddr_storage *mapped_rem_addr,
+ struct sockaddr_storage *remote_addr, u8 nl_client);
+
+/**
* iwpm_create_mapinfo - Store local and mapped IPv4/IPv6 address
* info in a hash table
* @local_addr: Local ip/tcp address
diff --git a/include/rdma/opa_smi.h b/include/rdma/opa_smi.h
new file mode 100644
index 0000000..29063e8
--- /dev/null
+++ b/include/rdma/opa_smi.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2014 Intel Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if !defined(OPA_SMI_H)
+#define OPA_SMI_H
+
+#include <rdma/ib_mad.h>
+#include <rdma/ib_smi.h>
+
+#define OPA_SMP_LID_DATA_SIZE 2016
+#define OPA_SMP_DR_DATA_SIZE 1872
+#define OPA_SMP_MAX_PATH_HOPS 64
+
+#define OPA_SMI_CLASS_VERSION 0x80
+
+#define OPA_LID_PERMISSIVE cpu_to_be32(0xFFFFFFFF)
+
+struct opa_smp {
+ u8 base_version;
+ u8 mgmt_class;
+ u8 class_version;
+ u8 method;
+ __be16 status;
+ u8 hop_ptr;
+ u8 hop_cnt;
+ __be64 tid;
+ __be16 attr_id;
+ __be16 resv;
+ __be32 attr_mod;
+ __be64 mkey;
+ union {
+ struct {
+ uint8_t data[OPA_SMP_LID_DATA_SIZE];
+ } lid;
+ struct {
+ __be32 dr_slid;
+ __be32 dr_dlid;
+ u8 initial_path[OPA_SMP_MAX_PATH_HOPS];
+ u8 return_path[OPA_SMP_MAX_PATH_HOPS];
+ u8 reserved[8];
+ u8 data[OPA_SMP_DR_DATA_SIZE];
+ } dr;
+ } route;
+} __packed;
+
+
+static inline u8
+opa_get_smp_direction(struct opa_smp *smp)
+{
+ return ib_get_smp_direction((struct ib_smp *)smp);
+}
+
+static inline u8 *opa_get_smp_data(struct opa_smp *smp)
+{
+ if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ return smp->route.dr.data;
+
+ return smp->route.lid.data;
+}
+
+static inline size_t opa_get_smp_data_size(struct opa_smp *smp)
+{
+ if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ return sizeof(smp->route.dr.data);
+
+ return sizeof(smp->route.lid.data);
+}
+
+static inline size_t opa_get_smp_header_size(struct opa_smp *smp)
+{
+ if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ return sizeof(*smp) - sizeof(smp->route.dr.data);
+
+ return sizeof(*smp) - sizeof(smp->route.lid.data);
+}
+
+#endif /* OPA_SMI_H */
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 1ed2088..c92522c 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -62,6 +62,8 @@ enum rdma_cm_event_type {
RDMA_CM_EVENT_TIMEWAIT_EXIT
};
+__attribute_const__ const char *rdma_event_msg(enum rdma_cm_event_type event);
+
enum rdma_port_space {
RDMA_PS_SDP = 0x0001,
RDMA_PS_IPOIB = 0x0002,
diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h
index f2902ef..4dce116 100644
--- a/include/rxrpc/packet.h
+++ b/include/rxrpc/packet.h
@@ -47,7 +47,8 @@ struct rxrpc_header {
#define RXRPC_PACKET_TYPE_CHALLENGE 6 /* connection security challenge (SRVR->CLNT) */
#define RXRPC_PACKET_TYPE_RESPONSE 7 /* connection secutity response (CLNT->SRVR) */
#define RXRPC_PACKET_TYPE_DEBUG 8 /* debug info request */
-#define RXRPC_N_PACKET_TYPES 9 /* number of packet types (incl type 0) */
+#define RXRPC_PACKET_TYPE_VERSION 13 /* version string request */
+#define RXRPC_N_PACKET_TYPES 14 /* number of packet types (incl type 0) */
uint8_t flags; /* packet flags */
#define RXRPC_CLIENT_INITIATED 0x01 /* signifies a packet generated by a client */
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 8a7f8ad..e0a3398 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -1,9 +1,6 @@
/*
* This header file contains public constants and structures used by
- * the scsi code for linux.
- *
- * For documentation on the OPCODES, MESSAGES, and SENSE values,
- * please consult the SCSI standard.
+ * the SCSI initiator code.
*/
#ifndef _SCSI_SCSI_H
#define _SCSI_SCSI_H
@@ -11,6 +8,8 @@
#include <linux/types.h>
#include <linux/scatterlist.h>
#include <linux/kernel.h>
+#include <scsi/scsi_common.h>
+#include <scsi/scsi_proto.h>
struct scsi_cmnd;
@@ -49,184 +48,6 @@ enum scsi_timeouts {
*/
#define SCAN_WILD_CARD ~0
-/*
- * SCSI opcodes
- */
-
-#define TEST_UNIT_READY 0x00
-#define REZERO_UNIT 0x01
-#define REQUEST_SENSE 0x03
-#define FORMAT_UNIT 0x04
-#define READ_BLOCK_LIMITS 0x05
-#define REASSIGN_BLOCKS 0x07
-#define INITIALIZE_ELEMENT_STATUS 0x07
-#define READ_6 0x08
-#define WRITE_6 0x0a
-#define SEEK_6 0x0b
-#define READ_REVERSE 0x0f
-#define WRITE_FILEMARKS 0x10
-#define SPACE 0x11
-#define INQUIRY 0x12
-#define RECOVER_BUFFERED_DATA 0x14
-#define MODE_SELECT 0x15
-#define RESERVE 0x16
-#define RELEASE 0x17
-#define COPY 0x18
-#define ERASE 0x19
-#define MODE_SENSE 0x1a
-#define START_STOP 0x1b
-#define RECEIVE_DIAGNOSTIC 0x1c
-#define SEND_DIAGNOSTIC 0x1d
-#define ALLOW_MEDIUM_REMOVAL 0x1e
-
-#define READ_FORMAT_CAPACITIES 0x23
-#define SET_WINDOW 0x24
-#define READ_CAPACITY 0x25
-#define READ_10 0x28
-#define WRITE_10 0x2a
-#define SEEK_10 0x2b
-#define POSITION_TO_ELEMENT 0x2b
-#define WRITE_VERIFY 0x2e
-#define VERIFY 0x2f
-#define SEARCH_HIGH 0x30
-#define SEARCH_EQUAL 0x31
-#define SEARCH_LOW 0x32
-#define SET_LIMITS 0x33
-#define PRE_FETCH 0x34
-#define READ_POSITION 0x34
-#define SYNCHRONIZE_CACHE 0x35
-#define LOCK_UNLOCK_CACHE 0x36
-#define READ_DEFECT_DATA 0x37
-#define MEDIUM_SCAN 0x38
-#define COMPARE 0x39
-#define COPY_VERIFY 0x3a
-#define WRITE_BUFFER 0x3b
-#define READ_BUFFER 0x3c
-#define UPDATE_BLOCK 0x3d
-#define READ_LONG 0x3e
-#define WRITE_LONG 0x3f
-#define CHANGE_DEFINITION 0x40
-#define WRITE_SAME 0x41
-#define UNMAP 0x42
-#define READ_TOC 0x43
-#define READ_HEADER 0x44
-#define GET_EVENT_STATUS_NOTIFICATION 0x4a
-#define LOG_SELECT 0x4c
-#define LOG_SENSE 0x4d
-#define XDWRITEREAD_10 0x53
-#define MODE_SELECT_10 0x55
-#define RESERVE_10 0x56
-#define RELEASE_10 0x57
-#define MODE_SENSE_10 0x5a
-#define PERSISTENT_RESERVE_IN 0x5e
-#define PERSISTENT_RESERVE_OUT 0x5f
-#define VARIABLE_LENGTH_CMD 0x7f
-#define REPORT_LUNS 0xa0
-#define SECURITY_PROTOCOL_IN 0xa2
-#define MAINTENANCE_IN 0xa3
-#define MAINTENANCE_OUT 0xa4
-#define MOVE_MEDIUM 0xa5
-#define EXCHANGE_MEDIUM 0xa6
-#define READ_12 0xa8
-#define SERVICE_ACTION_OUT_12 0xa9
-#define WRITE_12 0xaa
-#define READ_MEDIA_SERIAL_NUMBER 0xab /* Obsolete with SPC-2 */
-#define SERVICE_ACTION_IN_12 0xab
-#define WRITE_VERIFY_12 0xae
-#define VERIFY_12 0xaf
-#define SEARCH_HIGH_12 0xb0
-#define SEARCH_EQUAL_12 0xb1
-#define SEARCH_LOW_12 0xb2
-#define SECURITY_PROTOCOL_OUT 0xb5
-#define READ_ELEMENT_STATUS 0xb8
-#define SEND_VOLUME_TAG 0xb6
-#define WRITE_LONG_2 0xea
-#define EXTENDED_COPY 0x83
-#define RECEIVE_COPY_RESULTS 0x84
-#define ACCESS_CONTROL_IN 0x86
-#define ACCESS_CONTROL_OUT 0x87
-#define READ_16 0x88
-#define COMPARE_AND_WRITE 0x89
-#define WRITE_16 0x8a
-#define READ_ATTRIBUTE 0x8c
-#define WRITE_ATTRIBUTE 0x8d
-#define VERIFY_16 0x8f
-#define SYNCHRONIZE_CACHE_16 0x91
-#define WRITE_SAME_16 0x93
-#define SERVICE_ACTION_BIDIRECTIONAL 0x9d
-#define SERVICE_ACTION_IN_16 0x9e
-#define SERVICE_ACTION_OUT_16 0x9f
-/* values for service action in */
-#define SAI_READ_CAPACITY_16 0x10
-#define SAI_GET_LBA_STATUS 0x12
-#define SAI_REPORT_REFERRALS 0x13
-/* values for VARIABLE_LENGTH_CMD service action codes
- * see spc4r17 Section D.3.5, table D.7 and D.8 */
-#define VLC_SA_RECEIVE_CREDENTIAL 0x1800
-/* values for maintenance in */
-#define MI_REPORT_IDENTIFYING_INFORMATION 0x05
-#define MI_REPORT_TARGET_PGS 0x0a
-#define MI_REPORT_ALIASES 0x0b
-#define MI_REPORT_SUPPORTED_OPERATION_CODES 0x0c
-#define MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS 0x0d
-#define MI_REPORT_PRIORITY 0x0e
-#define MI_REPORT_TIMESTAMP 0x0f
-#define MI_MANAGEMENT_PROTOCOL_IN 0x10
-/* value for MI_REPORT_TARGET_PGS ext header */
-#define MI_EXT_HDR_PARAM_FMT 0x20
-/* values for maintenance out */
-#define MO_SET_IDENTIFYING_INFORMATION 0x06
-#define MO_SET_TARGET_PGS 0x0a
-#define MO_CHANGE_ALIASES 0x0b
-#define MO_SET_PRIORITY 0x0e
-#define MO_SET_TIMESTAMP 0x0f
-#define MO_MANAGEMENT_PROTOCOL_OUT 0x10
-/* values for variable length command */
-#define XDREAD_32 0x03
-#define XDWRITE_32 0x04
-#define XPWRITE_32 0x06
-#define XDWRITEREAD_32 0x07
-#define READ_32 0x09
-#define VERIFY_32 0x0a
-#define WRITE_32 0x0b
-#define WRITE_SAME_32 0x0d
-
-/* Values for T10/04-262r7 */
-#define ATA_16 0x85 /* 16-byte pass-thru */
-#define ATA_12 0xa1 /* 12-byte pass-thru */
-
-/*
- * SCSI command lengths
- */
-
-#define SCSI_MAX_VARLEN_CDB_SIZE 260
-
-/* defined in T10 SCSI Primary Commands-2 (SPC2) */
-struct scsi_varlen_cdb_hdr {
- __u8 opcode; /* opcode always == VARIABLE_LENGTH_CMD */
- __u8 control;
- __u8 misc[5];
- __u8 additional_cdb_length; /* total cdb length - 8 */
- __be16 service_action;
- /* service specific data follows */
-};
-
-static inline unsigned
-scsi_varlen_cdb_length(const void *hdr)
-{
- return ((struct scsi_varlen_cdb_hdr *)hdr)->additional_cdb_length + 8;
-}
-
-extern const unsigned char scsi_command_size_tbl[8];
-#define COMMAND_SIZE(opcode) scsi_command_size_tbl[((opcode) >> 5) & 7]
-
-static inline unsigned
-scsi_command_size(const unsigned char *cmnd)
-{
- return (cmnd[0] == VARIABLE_LENGTH_CMD) ?
- scsi_varlen_cdb_length(cmnd) : COMMAND_SIZE(cmnd[0]);
-}
-
#ifdef CONFIG_ACPI
struct acpi_bus_type;
@@ -237,22 +58,6 @@ extern void
scsi_unregister_acpi_bus_type(struct acpi_bus_type *bus);
#endif
-/*
- * SCSI Architecture Model (SAM) Status codes. Taken from SAM-3 draft
- * T10/1561-D Revision 4 Draft dated 7th November 2002.
- */
-#define SAM_STAT_GOOD 0x00
-#define SAM_STAT_CHECK_CONDITION 0x02
-#define SAM_STAT_CONDITION_MET 0x04
-#define SAM_STAT_BUSY 0x08
-#define SAM_STAT_INTERMEDIATE 0x10
-#define SAM_STAT_INTERMEDIATE_CONDITION_MET 0x14
-#define SAM_STAT_RESERVATION_CONFLICT 0x18
-#define SAM_STAT_COMMAND_TERMINATED 0x22 /* obsolete in SAM-3 */
-#define SAM_STAT_TASK_SET_FULL 0x28
-#define SAM_STAT_ACA_ACTIVE 0x30
-#define SAM_STAT_TASK_ABORTED 0x40
-
/** scsi_status_is_good - check the status return.
*
* @status: the status passed up from the driver (including host and
@@ -276,86 +81,6 @@ static inline int scsi_status_is_good(int status)
(status == SAM_STAT_COMMAND_TERMINATED));
}
-/*
- * Status codes. These are deprecated as they are shifted 1 bit right
- * from those found in the SCSI standards. This causes confusion for
- * applications that are ported to several OSes. Prefer SAM Status codes
- * above.
- */
-
-#define GOOD 0x00
-#define CHECK_CONDITION 0x01
-#define CONDITION_GOOD 0x02
-#define BUSY 0x04
-#define INTERMEDIATE_GOOD 0x08
-#define INTERMEDIATE_C_GOOD 0x0a
-#define RESERVATION_CONFLICT 0x0c
-#define COMMAND_TERMINATED 0x11
-#define QUEUE_FULL 0x14
-#define ACA_ACTIVE 0x18
-#define TASK_ABORTED 0x20
-
-#define STATUS_MASK 0xfe
-
-/*
- * SENSE KEYS
- */
-
-#define NO_SENSE 0x00
-#define RECOVERED_ERROR 0x01
-#define NOT_READY 0x02
-#define MEDIUM_ERROR 0x03
-#define HARDWARE_ERROR 0x04
-#define ILLEGAL_REQUEST 0x05
-#define UNIT_ATTENTION 0x06
-#define DATA_PROTECT 0x07
-#define BLANK_CHECK 0x08
-#define COPY_ABORTED 0x0a
-#define ABORTED_COMMAND 0x0b
-#define VOLUME_OVERFLOW 0x0d
-#define MISCOMPARE 0x0e
-
-
-/*
- * DEVICE TYPES
- * Please keep them in 0x%02x format for $MODALIAS to work
- */
-
-#define TYPE_DISK 0x00
-#define TYPE_TAPE 0x01
-#define TYPE_PRINTER 0x02
-#define TYPE_PROCESSOR 0x03 /* HP scanners use this */
-#define TYPE_WORM 0x04 /* Treated as ROM by our system */
-#define TYPE_ROM 0x05
-#define TYPE_SCANNER 0x06
-#define TYPE_MOD 0x07 /* Magneto-optical disk -
- * - treated as TYPE_DISK */
-#define TYPE_MEDIUM_CHANGER 0x08
-#define TYPE_COMM 0x09 /* Communications device */
-#define TYPE_RAID 0x0c
-#define TYPE_ENCLOSURE 0x0d /* Enclosure Services Device */
-#define TYPE_RBC 0x0e
-#define TYPE_OSD 0x11
-#define TYPE_ZBC 0x14
-#define TYPE_WLUN 0x1e /* well-known logical unit */
-#define TYPE_NO_LUN 0x7f
-
-/* SCSI protocols; these are taken from SPC-3 section 7.5 */
-enum scsi_protocol {
- SCSI_PROTOCOL_FCP = 0, /* Fibre Channel */
- SCSI_PROTOCOL_SPI = 1, /* parallel SCSI */
- SCSI_PROTOCOL_SSA = 2, /* Serial Storage Architecture - Obsolete */
- SCSI_PROTOCOL_SBP = 3, /* firewire */
- SCSI_PROTOCOL_SRP = 4, /* Infiniband RDMA */
- SCSI_PROTOCOL_ISCSI = 5,
- SCSI_PROTOCOL_SAS = 6,
- SCSI_PROTOCOL_ADT = 7, /* Media Changers */
- SCSI_PROTOCOL_ATA = 8,
- SCSI_PROTOCOL_UNSPEC = 0xf, /* No specific protocol */
-};
-
-/* Returns a human-readable name for the device */
-extern const char * scsi_device_type(unsigned type);
/*
* standard mode-select header prepended to all mode-select commands
@@ -377,13 +102,6 @@ struct ccs_modesel_head {
};
/*
- * ScsiLun: 8 byte LUN.
- */
-struct scsi_lun {
- __u8 scsi_lun[8];
-};
-
-/*
* The Well Known LUNS (SAM-3) in our int representation of a LUN
*/
#define SCSI_W_LUN_BASE 0xc100
diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h
new file mode 100644
index 0000000..676b03b
--- /dev/null
+++ b/include/scsi/scsi_common.h
@@ -0,0 +1,64 @@
+/*
+ * Functions used by both the SCSI initiator code and the SCSI target code.
+ */
+
+#ifndef _SCSI_COMMON_H_
+#define _SCSI_COMMON_H_
+
+#include <linux/types.h>
+#include <scsi/scsi_proto.h>
+
+static inline unsigned
+scsi_varlen_cdb_length(const void *hdr)
+{
+ return ((struct scsi_varlen_cdb_hdr *)hdr)->additional_cdb_length + 8;
+}
+
+extern const unsigned char scsi_command_size_tbl[8];
+#define COMMAND_SIZE(opcode) scsi_command_size_tbl[((opcode) >> 5) & 7]
+
+static inline unsigned
+scsi_command_size(const unsigned char *cmnd)
+{
+ return (cmnd[0] == VARIABLE_LENGTH_CMD) ?
+ scsi_varlen_cdb_length(cmnd) : COMMAND_SIZE(cmnd[0]);
+}
+
+/* Returns a human-readable name for the device */
+extern const char *scsi_device_type(unsigned type);
+
+extern void int_to_scsilun(u64, struct scsi_lun *);
+extern u64 scsilun_to_int(struct scsi_lun *);
+
+/*
+ * This is a slightly modified SCSI sense "descriptor" format header.
+ * The addition is to allow the 0x70 and 0x71 response codes. The idea
+ * is to place the salient data from either "fixed" or "descriptor" sense
+ * format into one structure to ease application processing.
+ *
+ * The original sense buffer should be kept around for those cases
+ * in which more information is required (e.g. the LBA of a MEDIUM ERROR).
+ */
+struct scsi_sense_hdr { /* See SPC-3 section 4.5 */
+ u8 response_code; /* permit: 0x0, 0x70, 0x71, 0x72, 0x73 */
+ u8 sense_key;
+ u8 asc;
+ u8 ascq;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ u8 additional_length; /* always 0 for fixed sense format */
+};
+
+static inline bool scsi_sense_valid(const struct scsi_sense_hdr *sshdr)
+{
+ if (!sshdr)
+ return false;
+
+ return (sshdr->response_code & 0x70) == 0x70;
+}
+
+extern bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
+ struct scsi_sense_hdr *sshdr);
+
+#endif /* _SCSI_COMMON_H_ */
diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h
index 7982795..f8170e9 100644
--- a/include/scsi/scsi_dbg.h
+++ b/include/scsi/scsi_dbg.h
@@ -5,8 +5,11 @@ struct scsi_cmnd;
struct scsi_device;
struct scsi_sense_hdr;
+#define SCSI_LOG_BUFSIZE 128
+
extern void scsi_print_command(struct scsi_cmnd *);
-extern void __scsi_print_command(const unsigned char *, size_t);
+extern size_t __scsi_format_command(char *, size_t,
+ const unsigned char *, size_t);
extern void scsi_show_extd_sense(const struct scsi_device *, const char *,
unsigned char, unsigned char);
extern void scsi_show_sense_hdr(const struct scsi_device *, const char *,
@@ -17,12 +20,73 @@ extern void scsi_print_sense(const struct scsi_cmnd *);
extern void __scsi_print_sense(const struct scsi_device *, const char *name,
const unsigned char *sense_buffer,
int sense_len);
-extern void scsi_print_result(struct scsi_cmnd *, const char *, int);
-extern const char *scsi_hostbyte_string(int);
-extern const char *scsi_driverbyte_string(int);
-extern const char *scsi_mlreturn_string(int);
+extern void scsi_print_result(const struct scsi_cmnd *, const char *, int);
+
+#ifdef CONFIG_SCSI_CONSTANTS
+extern bool scsi_opcode_sa_name(int, int, const char **, const char **);
extern const char *scsi_sense_key_string(unsigned char);
extern const char *scsi_extd_sense_format(unsigned char, unsigned char,
const char **);
+extern const char *scsi_mlreturn_string(int);
+extern const char *scsi_hostbyte_string(int);
+extern const char *scsi_driverbyte_string(int);
+#else
+static inline bool
+scsi_opcode_sa_name(int cmd, int sa,
+ const char **cdb_name, const char **sa_name)
+{
+ *cdb_name = NULL;
+ switch (cmd) {
+ case VARIABLE_LENGTH_CMD:
+ case MAINTENANCE_IN:
+ case MAINTENANCE_OUT:
+ case PERSISTENT_RESERVE_IN:
+ case PERSISTENT_RESERVE_OUT:
+ case SERVICE_ACTION_IN_12:
+ case SERVICE_ACTION_OUT_12:
+ case SERVICE_ACTION_BIDIRECTIONAL:
+ case SERVICE_ACTION_IN_16:
+ case SERVICE_ACTION_OUT_16:
+ case EXTENDED_COPY:
+ case RECEIVE_COPY_RESULTS:
+ *sa_name = NULL;
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline const char *
+scsi_sense_key_string(unsigned char key)
+{
+ return NULL;
+}
+
+static inline const char *
+scsi_extd_sense_format(unsigned char asc, unsigned char ascq, const char **fmt)
+{
+ *fmt = NULL;
+ return NULL;
+}
+
+static inline const char *
+scsi_mlreturn_string(int result)
+{
+ return NULL;
+}
+
+static inline const char *
+scsi_hostbyte_string(int result)
+{
+ return NULL;
+}
+
+static inline const char *
+scsi_driverbyte_string(int result)
+{
+ return NULL;
+}
+
+#endif
#endif /* _SCSI_SCSI_DBG_H */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 3a4edd1..ae84b22 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -230,9 +230,6 @@ struct scsi_dh_data {
#define transport_class_to_sdev(class_dev) \
to_scsi_device(class_dev->parent)
-#define sdev_printk(prefix, sdev, fmt, a...) \
- dev_printk(prefix, &(sdev)->sdev_gendev, fmt, ##a)
-
#define sdev_dbg(sdev, fmt, a...) \
dev_dbg(&(sdev)->sdev_gendev, fmt, ##a)
@@ -240,16 +237,15 @@ struct scsi_dh_data {
* like scmd_printk, but the device name is passed in
* as a string pointer
*/
-#define sdev_prefix_printk(l, sdev, p, fmt, a...) \
- (p) ? \
- sdev_printk(l, sdev, "[%s] " fmt, p, ##a) : \
- sdev_printk(l, sdev, fmt, ##a)
+__printf(4, 5) void
+sdev_prefix_printk(const char *, const struct scsi_device *, const char *,
+ const char *, ...);
+
+#define sdev_printk(l, sdev, fmt, a...) \
+ sdev_prefix_printk(l, sdev, NULL, fmt, ##a)
-#define scmd_printk(prefix, scmd, fmt, a...) \
- (scmd)->request->rq_disk ? \
- sdev_printk(prefix, (scmd)->device, "[%s] " fmt, \
- (scmd)->request->rq_disk->disk_name, ##a) : \
- sdev_printk(prefix, (scmd)->device, fmt, ##a)
+__printf(3, 4) void
+scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...);
#define scmd_dbg(scmd, fmt, a...) \
do { \
@@ -417,8 +413,6 @@ extern void scsi_target_reap(struct scsi_target *);
extern void scsi_target_block(struct device *);
extern void scsi_target_unblock(struct device *, enum scsi_device_state);
extern void scsi_remove_target(struct device *);
-extern void int_to_scsilun(u64, struct scsi_lun *);
-extern u64 scsilun_to_int(struct scsi_lun *);
extern const char *scsi_device_state_name(enum scsi_device_state);
extern int scsi_is_sdev_device(const struct device *);
extern int scsi_is_target_device(const struct device *);
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index 183eaab..96e3f56 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -36,5 +36,6 @@
for sequential scan */
#define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */
#define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */
+#define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */
#endif
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 1e1421b..4942710 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -7,43 +7,12 @@
struct scsi_device;
struct Scsi_Host;
-/*
- * This is a slightly modified SCSI sense "descriptor" format header.
- * The addition is to allow the 0x70 and 0x71 response codes. The idea
- * is to place the salient data from either "fixed" or "descriptor" sense
- * format into one structure to ease application processing.
- *
- * The original sense buffer should be kept around for those cases
- * in which more information is required (e.g. the LBA of a MEDIUM ERROR).
- */
-struct scsi_sense_hdr { /* See SPC-3 section 4.5 */
- u8 response_code; /* permit: 0x0, 0x70, 0x71, 0x72, 0x73 */
- u8 sense_key;
- u8 asc;
- u8 ascq;
- u8 byte4;
- u8 byte5;
- u8 byte6;
- u8 additional_length; /* always 0 for fixed sense format */
-};
-
-static inline bool scsi_sense_valid(const struct scsi_sense_hdr *sshdr)
-{
- if (!sshdr)
- return false;
-
- return (sshdr->response_code & 0x70) == 0x70;
-}
-
-
extern void scsi_eh_finish_cmd(struct scsi_cmnd *scmd,
struct list_head *done_q);
extern void scsi_eh_flush_done_q(struct list_head *done_q);
extern void scsi_report_bus_reset(struct Scsi_Host *, int);
extern void scsi_report_device_reset(struct Scsi_Host *, int, int);
extern int scsi_block_when_processing_errors(struct scsi_device *);
-extern bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
- struct scsi_sense_hdr *sshdr);
extern bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
struct scsi_sense_hdr *sshdr);
@@ -59,6 +28,7 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
u64 * info_out);
extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
+extern void scsi_set_sense_information(u8 *buf, u64 info);
extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 019e668..e113c75 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -402,6 +402,9 @@ struct scsi_host_template {
*/
unsigned char present;
+ /* If use block layer to manage tags, this is tag allocation policy */
+ int tag_alloc_policy;
+
/*
* Let the block layer assigns tags to all commands.
*/
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
new file mode 100644
index 0000000..a9fbf1b
--- /dev/null
+++ b/include/scsi/scsi_proto.h
@@ -0,0 +1,281 @@
+/*
+ * This header file contains public constants and structures used by
+ * both the SCSI initiator and the SCSI target code.
+ *
+ * For documentation on the OPCODES, MESSAGES, and SENSE values,
+ * please consult the SCSI standard.
+ */
+
+#ifndef _SCSI_PROTO_H_
+#define _SCSI_PROTO_H_
+
+#include <linux/types.h>
+
+/*
+ * SCSI opcodes
+ */
+
+#define TEST_UNIT_READY 0x00
+#define REZERO_UNIT 0x01
+#define REQUEST_SENSE 0x03
+#define FORMAT_UNIT 0x04
+#define READ_BLOCK_LIMITS 0x05
+#define REASSIGN_BLOCKS 0x07
+#define INITIALIZE_ELEMENT_STATUS 0x07
+#define READ_6 0x08
+#define WRITE_6 0x0a
+#define SEEK_6 0x0b
+#define READ_REVERSE 0x0f
+#define WRITE_FILEMARKS 0x10
+#define SPACE 0x11
+#define INQUIRY 0x12
+#define RECOVER_BUFFERED_DATA 0x14
+#define MODE_SELECT 0x15
+#define RESERVE 0x16
+#define RELEASE 0x17
+#define COPY 0x18
+#define ERASE 0x19
+#define MODE_SENSE 0x1a
+#define START_STOP 0x1b
+#define RECEIVE_DIAGNOSTIC 0x1c
+#define SEND_DIAGNOSTIC 0x1d
+#define ALLOW_MEDIUM_REMOVAL 0x1e
+
+#define READ_FORMAT_CAPACITIES 0x23
+#define SET_WINDOW 0x24
+#define READ_CAPACITY 0x25
+#define READ_10 0x28
+#define WRITE_10 0x2a
+#define SEEK_10 0x2b
+#define POSITION_TO_ELEMENT 0x2b
+#define WRITE_VERIFY 0x2e
+#define VERIFY 0x2f
+#define SEARCH_HIGH 0x30
+#define SEARCH_EQUAL 0x31
+#define SEARCH_LOW 0x32
+#define SET_LIMITS 0x33
+#define PRE_FETCH 0x34
+#define READ_POSITION 0x34
+#define SYNCHRONIZE_CACHE 0x35
+#define LOCK_UNLOCK_CACHE 0x36
+#define READ_DEFECT_DATA 0x37
+#define MEDIUM_SCAN 0x38
+#define COMPARE 0x39
+#define COPY_VERIFY 0x3a
+#define WRITE_BUFFER 0x3b
+#define READ_BUFFER 0x3c
+#define UPDATE_BLOCK 0x3d
+#define READ_LONG 0x3e
+#define WRITE_LONG 0x3f
+#define CHANGE_DEFINITION 0x40
+#define WRITE_SAME 0x41
+#define UNMAP 0x42
+#define READ_TOC 0x43
+#define READ_HEADER 0x44
+#define GET_EVENT_STATUS_NOTIFICATION 0x4a
+#define LOG_SELECT 0x4c
+#define LOG_SENSE 0x4d
+#define XDWRITEREAD_10 0x53
+#define MODE_SELECT_10 0x55
+#define RESERVE_10 0x56
+#define RELEASE_10 0x57
+#define MODE_SENSE_10 0x5a
+#define PERSISTENT_RESERVE_IN 0x5e
+#define PERSISTENT_RESERVE_OUT 0x5f
+#define VARIABLE_LENGTH_CMD 0x7f
+#define REPORT_LUNS 0xa0
+#define SECURITY_PROTOCOL_IN 0xa2
+#define MAINTENANCE_IN 0xa3
+#define MAINTENANCE_OUT 0xa4
+#define MOVE_MEDIUM 0xa5
+#define EXCHANGE_MEDIUM 0xa6
+#define READ_12 0xa8
+#define SERVICE_ACTION_OUT_12 0xa9
+#define WRITE_12 0xaa
+#define READ_MEDIA_SERIAL_NUMBER 0xab /* Obsolete with SPC-2 */
+#define SERVICE_ACTION_IN_12 0xab
+#define WRITE_VERIFY_12 0xae
+#define VERIFY_12 0xaf
+#define SEARCH_HIGH_12 0xb0
+#define SEARCH_EQUAL_12 0xb1
+#define SEARCH_LOW_12 0xb2
+#define SECURITY_PROTOCOL_OUT 0xb5
+#define READ_ELEMENT_STATUS 0xb8
+#define SEND_VOLUME_TAG 0xb6
+#define WRITE_LONG_2 0xea
+#define EXTENDED_COPY 0x83
+#define RECEIVE_COPY_RESULTS 0x84
+#define ACCESS_CONTROL_IN 0x86
+#define ACCESS_CONTROL_OUT 0x87
+#define READ_16 0x88
+#define COMPARE_AND_WRITE 0x89
+#define WRITE_16 0x8a
+#define READ_ATTRIBUTE 0x8c
+#define WRITE_ATTRIBUTE 0x8d
+#define VERIFY_16 0x8f
+#define SYNCHRONIZE_CACHE_16 0x91
+#define WRITE_SAME_16 0x93
+#define SERVICE_ACTION_BIDIRECTIONAL 0x9d
+#define SERVICE_ACTION_IN_16 0x9e
+#define SERVICE_ACTION_OUT_16 0x9f
+/* values for service action in */
+#define SAI_READ_CAPACITY_16 0x10
+#define SAI_GET_LBA_STATUS 0x12
+#define SAI_REPORT_REFERRALS 0x13
+/* values for VARIABLE_LENGTH_CMD service action codes
+ * see spc4r17 Section D.3.5, table D.7 and D.8 */
+#define VLC_SA_RECEIVE_CREDENTIAL 0x1800
+/* values for maintenance in */
+#define MI_REPORT_IDENTIFYING_INFORMATION 0x05
+#define MI_REPORT_TARGET_PGS 0x0a
+#define MI_REPORT_ALIASES 0x0b
+#define MI_REPORT_SUPPORTED_OPERATION_CODES 0x0c
+#define MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS 0x0d
+#define MI_REPORT_PRIORITY 0x0e
+#define MI_REPORT_TIMESTAMP 0x0f
+#define MI_MANAGEMENT_PROTOCOL_IN 0x10
+/* value for MI_REPORT_TARGET_PGS ext header */
+#define MI_EXT_HDR_PARAM_FMT 0x20
+/* values for maintenance out */
+#define MO_SET_IDENTIFYING_INFORMATION 0x06
+#define MO_SET_TARGET_PGS 0x0a
+#define MO_CHANGE_ALIASES 0x0b
+#define MO_SET_PRIORITY 0x0e
+#define MO_SET_TIMESTAMP 0x0f
+#define MO_MANAGEMENT_PROTOCOL_OUT 0x10
+/* values for variable length command */
+#define XDREAD_32 0x03
+#define XDWRITE_32 0x04
+#define XPWRITE_32 0x06
+#define XDWRITEREAD_32 0x07
+#define READ_32 0x09
+#define VERIFY_32 0x0a
+#define WRITE_32 0x0b
+#define WRITE_SAME_32 0x0d
+
+/* Values for T10/04-262r7 */
+#define ATA_16 0x85 /* 16-byte pass-thru */
+#define ATA_12 0xa1 /* 12-byte pass-thru */
+
+/* Vendor specific CDBs start here */
+#define VENDOR_SPECIFIC_CDB 0xc0
+
+/*
+ * SCSI command lengths
+ */
+
+#define SCSI_MAX_VARLEN_CDB_SIZE 260
+
+/* defined in T10 SCSI Primary Commands-2 (SPC2) */
+struct scsi_varlen_cdb_hdr {
+ __u8 opcode; /* opcode always == VARIABLE_LENGTH_CMD */
+ __u8 control;
+ __u8 misc[5];
+ __u8 additional_cdb_length; /* total cdb length - 8 */
+ __be16 service_action;
+ /* service specific data follows */
+};
+
+/*
+ * SCSI Architecture Model (SAM) Status codes. Taken from SAM-3 draft
+ * T10/1561-D Revision 4 Draft dated 7th November 2002.
+ */
+#define SAM_STAT_GOOD 0x00
+#define SAM_STAT_CHECK_CONDITION 0x02
+#define SAM_STAT_CONDITION_MET 0x04
+#define SAM_STAT_BUSY 0x08
+#define SAM_STAT_INTERMEDIATE 0x10
+#define SAM_STAT_INTERMEDIATE_CONDITION_MET 0x14
+#define SAM_STAT_RESERVATION_CONFLICT 0x18
+#define SAM_STAT_COMMAND_TERMINATED 0x22 /* obsolete in SAM-3 */
+#define SAM_STAT_TASK_SET_FULL 0x28
+#define SAM_STAT_ACA_ACTIVE 0x30
+#define SAM_STAT_TASK_ABORTED 0x40
+
+/*
+ * Status codes. These are deprecated as they are shifted 1 bit right
+ * from those found in the SCSI standards. This causes confusion for
+ * applications that are ported to several OSes. Prefer SAM Status codes
+ * above.
+ */
+
+#define GOOD 0x00
+#define CHECK_CONDITION 0x01
+#define CONDITION_GOOD 0x02
+#define BUSY 0x04
+#define INTERMEDIATE_GOOD 0x08
+#define INTERMEDIATE_C_GOOD 0x0a
+#define RESERVATION_CONFLICT 0x0c
+#define COMMAND_TERMINATED 0x11
+#define QUEUE_FULL 0x14
+#define ACA_ACTIVE 0x18
+#define TASK_ABORTED 0x20
+
+#define STATUS_MASK 0xfe
+
+/*
+ * SENSE KEYS
+ */
+
+#define NO_SENSE 0x00
+#define RECOVERED_ERROR 0x01
+#define NOT_READY 0x02
+#define MEDIUM_ERROR 0x03
+#define HARDWARE_ERROR 0x04
+#define ILLEGAL_REQUEST 0x05
+#define UNIT_ATTENTION 0x06
+#define DATA_PROTECT 0x07
+#define BLANK_CHECK 0x08
+#define COPY_ABORTED 0x0a
+#define ABORTED_COMMAND 0x0b
+#define VOLUME_OVERFLOW 0x0d
+#define MISCOMPARE 0x0e
+
+
+/*
+ * DEVICE TYPES
+ * Please keep them in 0x%02x format for $MODALIAS to work
+ */
+
+#define TYPE_DISK 0x00
+#define TYPE_TAPE 0x01
+#define TYPE_PRINTER 0x02
+#define TYPE_PROCESSOR 0x03 /* HP scanners use this */
+#define TYPE_WORM 0x04 /* Treated as ROM by our system */
+#define TYPE_ROM 0x05
+#define TYPE_SCANNER 0x06
+#define TYPE_MOD 0x07 /* Magneto-optical disk -
+ * - treated as TYPE_DISK */
+#define TYPE_MEDIUM_CHANGER 0x08
+#define TYPE_COMM 0x09 /* Communications device */
+#define TYPE_RAID 0x0c
+#define TYPE_ENCLOSURE 0x0d /* Enclosure Services Device */
+#define TYPE_RBC 0x0e
+#define TYPE_OSD 0x11
+#define TYPE_ZBC 0x14
+#define TYPE_WLUN 0x1e /* well-known logical unit */
+#define TYPE_NO_LUN 0x7f
+
+/* SCSI protocols; these are taken from SPC-3 section 7.5 */
+enum scsi_protocol {
+ SCSI_PROTOCOL_FCP = 0, /* Fibre Channel */
+ SCSI_PROTOCOL_SPI = 1, /* parallel SCSI */
+ SCSI_PROTOCOL_SSA = 2, /* Serial Storage Architecture - Obsolete */
+ SCSI_PROTOCOL_SBP = 3, /* firewire */
+ SCSI_PROTOCOL_SRP = 4, /* Infiniband RDMA */
+ SCSI_PROTOCOL_ISCSI = 5,
+ SCSI_PROTOCOL_SAS = 6,
+ SCSI_PROTOCOL_ADT = 7, /* Media Changers */
+ SCSI_PROTOCOL_ATA = 8,
+ SCSI_PROTOCOL_UNSPEC = 0xf, /* No specific protocol */
+};
+
+/*
+ * ScsiLun: 8 byte LUN.
+ */
+struct scsi_lun {
+ __u8 scsi_lun[8];
+};
+
+
+#endif /* _SCSI_PROTO_H_ */
diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h
index 9708b28..b27977e 100644
--- a/include/scsi/scsi_tcq.h
+++ b/include/scsi/scsi_tcq.h
@@ -66,7 +66,8 @@ static inline int scsi_init_shared_tag_map(struct Scsi_Host *shost, int depth)
* devices on the shared host (for libata)
*/
if (!shost->bqt) {
- shost->bqt = blk_init_tags(depth);
+ shost->bqt = blk_init_tags(depth,
+ shost->hostt->tag_alloc_policy);
if (!shost->bqt)
return -ENOMEM;
}
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 007a0bc..784bc2c 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -135,6 +135,7 @@ enum fc_vport_state {
#define FC_PORTSPEED_40GBIT 0x100
#define FC_PORTSPEED_50GBIT 0x200
#define FC_PORTSPEED_100GBIT 0x400
+#define FC_PORTSPEED_25GBIT 0x800
#define FC_PORTSPEED_NOT_NEGOTIATED (1 << 15) /* Speed not established */
/*
diff --git a/include/scsi/scsi_transport_srp.h b/include/scsi/scsi_transport_srp.h
index cdb05dd1..d40d3ef 100644
--- a/include/scsi/scsi_transport_srp.h
+++ b/include/scsi/scsi_transport_srp.h
@@ -119,6 +119,7 @@ extern struct srp_rport *srp_rport_add(struct Scsi_Host *,
extern void srp_rport_del(struct srp_rport *);
extern int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo,
int dev_loss_tmo);
+int srp_parse_tmo(int *tmo, const char *buf);
extern int srp_reconnect_rport(struct srp_rport *rport);
extern void srp_start_tl_fail_timers(struct srp_rport *rport);
extern void srp_remove_host(struct Scsi_Host *);
diff --git a/include/scsi/srp.h b/include/scsi/srp.h
index 1ae84db..5be834d 100644
--- a/include/scsi/srp.h
+++ b/include/scsi/srp.h
@@ -42,6 +42,7 @@
*/
#include <linux/types.h>
+#include <scsi/scsi.h>
enum {
SRP_LOGIN_REQ = 0x00,
@@ -179,7 +180,7 @@ struct srp_tsk_mgmt {
u8 reserved1[6];
u64 tag;
u8 reserved2[4];
- __be64 lun __attribute__((packed));
+ struct scsi_lun lun;
u8 reserved3[2];
u8 tsk_mgmt_func;
u8 reserved4;
@@ -200,7 +201,7 @@ struct srp_cmd {
u8 data_in_desc_cnt;
u64 tag;
u8 reserved2[4];
- __be64 lun __attribute__((packed));
+ struct scsi_lun lun;
u8 reserved3;
u8 task_attr;
u8 reserved4;
@@ -265,7 +266,7 @@ struct srp_aer_req {
__be32 req_lim_delta;
u64 tag;
u32 reserved2;
- __be64 lun;
+ struct scsi_lun lun;
__be32 sense_data_len;
u32 reserved3;
u8 sense_data[0];
diff --git a/include/soc/at91/at91rm9200_sdramc.h b/include/soc/at91/at91rm9200_sdramc.h
deleted file mode 100644
index aa047f45..0000000
--- a/include/soc/at91/at91rm9200_sdramc.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/at91rm9200_sdramc.h
- *
- * Copyright (C) 2005 Ivan Kokshaysky
- * Copyright (C) SAN People
- *
- * Memory Controllers (SDRAMC only) - System peripherals registers.
- * Based on AT91RM9200 datasheet revision E.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef AT91RM9200_SDRAMC_H
-#define AT91RM9200_SDRAMC_H
-
-/* SDRAM Controller registers */
-#define AT91RM9200_SDRAMC_MR 0x90 /* Mode Register */
-#define AT91RM9200_SDRAMC_MODE (0xf << 0) /* Command Mode */
-#define AT91RM9200_SDRAMC_MODE_NORMAL (0 << 0)
-#define AT91RM9200_SDRAMC_MODE_NOP (1 << 0)
-#define AT91RM9200_SDRAMC_MODE_PRECHARGE (2 << 0)
-#define AT91RM9200_SDRAMC_MODE_LMR (3 << 0)
-#define AT91RM9200_SDRAMC_MODE_REFRESH (4 << 0)
-#define AT91RM9200_SDRAMC_DBW (1 << 4) /* Data Bus Width */
-#define AT91RM9200_SDRAMC_DBW_32 (0 << 4)
-#define AT91RM9200_SDRAMC_DBW_16 (1 << 4)
-
-#define AT91RM9200_SDRAMC_TR 0x94 /* Refresh Timer Register */
-#define AT91RM9200_SDRAMC_COUNT (0xfff << 0) /* Refresh Timer Count */
-
-#define AT91RM9200_SDRAMC_CR 0x98 /* Configuration Register */
-#define AT91RM9200_SDRAMC_NC (3 << 0) /* Number of Column Bits */
-#define AT91RM9200_SDRAMC_NC_8 (0 << 0)
-#define AT91RM9200_SDRAMC_NC_9 (1 << 0)
-#define AT91RM9200_SDRAMC_NC_10 (2 << 0)
-#define AT91RM9200_SDRAMC_NC_11 (3 << 0)
-#define AT91RM9200_SDRAMC_NR (3 << 2) /* Number of Row Bits */
-#define AT91RM9200_SDRAMC_NR_11 (0 << 2)
-#define AT91RM9200_SDRAMC_NR_12 (1 << 2)
-#define AT91RM9200_SDRAMC_NR_13 (2 << 2)
-#define AT91RM9200_SDRAMC_NB (1 << 4) /* Number of Banks */
-#define AT91RM9200_SDRAMC_NB_2 (0 << 4)
-#define AT91RM9200_SDRAMC_NB_4 (1 << 4)
-#define AT91RM9200_SDRAMC_CAS (3 << 5) /* CAS Latency */
-#define AT91RM9200_SDRAMC_CAS_2 (2 << 5)
-#define AT91RM9200_SDRAMC_TWR (0xf << 7) /* Write Recovery Delay */
-#define AT91RM9200_SDRAMC_TRC (0xf << 11) /* Row Cycle Delay */
-#define AT91RM9200_SDRAMC_TRP (0xf << 15) /* Row Precharge Delay */
-#define AT91RM9200_SDRAMC_TRCD (0xf << 19) /* Row to Column Delay */
-#define AT91RM9200_SDRAMC_TRAS (0xf << 23) /* Active to Precharge Delay */
-#define AT91RM9200_SDRAMC_TXSR (0xf << 27) /* Exit Self Refresh to Active Delay */
-
-#define AT91RM9200_SDRAMC_SRR 0x9c /* Self Refresh Register */
-#define AT91RM9200_SDRAMC_LPR 0xa0 /* Low Power Register */
-#define AT91RM9200_SDRAMC_IER 0xa4 /* Interrupt Enable Register */
-#define AT91RM9200_SDRAMC_IDR 0xa8 /* Interrupt Disable Register */
-#define AT91RM9200_SDRAMC_IMR 0xac /* Interrupt Mask Register */
-#define AT91RM9200_SDRAMC_ISR 0xb0 /* Interrupt Status Register */
-
-#endif
diff --git a/include/soc/at91/at91sam9_ddrsdr.h b/include/soc/at91/at91sam9_ddrsdr.h
index 0210797..dc10c52 100644
--- a/include/soc/at91/at91sam9_ddrsdr.h
+++ b/include/soc/at91/at91sam9_ddrsdr.h
@@ -92,7 +92,7 @@
#define AT91_DDRSDRC_UPD_MR (3 << 20) /* Update load mode register and extended mode register */
#define AT91_DDRSDRC_MDR 0x20 /* Memory Device Register */
-#define AT91_DDRSDRC_MD (3 << 0) /* Memory Device Type */
+#define AT91_DDRSDRC_MD (7 << 0) /* Memory Device Type */
#define AT91_DDRSDRC_MD_SDR 0
#define AT91_DDRSDRC_MD_LOW_POWER_SDR 1
#define AT91_DDRSDRC_MD_LOW_POWER_DDR 3
diff --git a/include/soc/imx/revision.h b/include/soc/imx/revision.h
new file mode 100644
index 0000000..9ea3469
--- /dev/null
+++ b/include/soc/imx/revision.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SOC_IMX_REVISION_H__
+#define __SOC_IMX_REVISION_H__
+
+#define IMX_CHIP_REVISION_1_0 0x10
+#define IMX_CHIP_REVISION_1_1 0x11
+#define IMX_CHIP_REVISION_1_2 0x12
+#define IMX_CHIP_REVISION_1_3 0x13
+#define IMX_CHIP_REVISION_1_4 0x14
+#define IMX_CHIP_REVISION_1_5 0x15
+#define IMX_CHIP_REVISION_2_0 0x20
+#define IMX_CHIP_REVISION_2_1 0x21
+#define IMX_CHIP_REVISION_2_2 0x22
+#define IMX_CHIP_REVISION_2_3 0x23
+#define IMX_CHIP_REVISION_3_0 0x30
+#define IMX_CHIP_REVISION_3_1 0x31
+#define IMX_CHIP_REVISION_3_2 0x32
+#define IMX_CHIP_REVISION_3_3 0x33
+#define IMX_CHIP_REVISION_UNKNOWN 0xff
+
+int mx27_revision(void);
+int mx31_revision(void);
+int mx35_revision(void);
+int mx51_revision(void);
+int mx53_revision(void);
+
+unsigned int imx_get_soc_revision(void);
+void imx_print_silicon_rev(const char *cpu, int srev);
+
+#endif /* __SOC_IMX_REVISION_H__ */
diff --git a/include/soc/imx/timer.h b/include/soc/imx/timer.h
new file mode 100644
index 0000000..bbbafd6
--- /dev/null
+++ b/include/soc/imx/timer.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2015 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SOC_IMX_TIMER_H__
+#define __SOC_IMX_TIMER_H__
+
+enum imx_gpt_type {
+ GPT_TYPE_IMX1, /* i.MX1 */
+ GPT_TYPE_IMX21, /* i.MX21/27 */
+ GPT_TYPE_IMX31, /* i.MX31/35/25/37/51/6Q */
+ GPT_TYPE_IMX6DL, /* i.MX6DL/SX/SL */
+};
+
+/*
+ * This is a stop-gap solution for clock drivers like imx1/imx21 which call
+ * mxc_timer_init() to initialize timer for non-DT boot. It can be removed
+ * when these legacy non-DT support is converted or dropped.
+ */
+void mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type);
+
+#endif /* __SOC_IMX_TIMER_H__ */
diff --git a/include/soc/sa1100/pwer.h b/include/soc/sa1100/pwer.h
new file mode 100644
index 0000000..15a545b
--- /dev/null
+++ b/include/soc/sa1100/pwer.h
@@ -0,0 +1,15 @@
+#ifndef SOC_SA1100_PWER_H
+#define SOC_SA1100_PWER_H
+
+/*
+ * Copyright (C) 2015, Dmitry Eremin-Solenikov
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+int sa11x0_gpio_set_wake(unsigned int gpio, unsigned int on);
+int sa11x0_sc_set_wake(unsigned int irq, unsigned int on);
+
+#endif
diff --git a/include/soc/tegra/emc.h b/include/soc/tegra/emc.h
new file mode 100644
index 0000000..f6db33b
--- /dev/null
+++ b/include/soc/tegra/emc.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2014 NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SOC_TEGRA_EMC_H__
+#define __SOC_TEGRA_EMC_H__
+
+struct tegra_emc;
+
+int tegra_emc_prepare_timing_change(struct tegra_emc *emc,
+ unsigned long rate);
+void tegra_emc_complete_timing_change(struct tegra_emc *emc,
+ unsigned long rate);
+
+#endif /* __SOC_TEGRA_EMC_H__ */
diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h
index 8e12494..b019e34 100644
--- a/include/soc/tegra/fuse.h
+++ b/include/soc/tegra/fuse.h
@@ -21,6 +21,7 @@
#define TEGRA30 0x30
#define TEGRA114 0x35
#define TEGRA124 0x40
+#define TEGRA132 0x13
#define TEGRA_FUSE_SKU_CALIB_0 0xf0
#define TEGRA30_FUSE_SATA_CALIB 0x124
@@ -55,6 +56,7 @@ struct tegra_sku_info {
};
u32 tegra_read_straps(void);
+u32 tegra_read_ram_code(void);
u32 tegra_read_chipid(void);
int tegra_fuse_readl(unsigned long offset, u32 *value);
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index 63deb8d..1ab2813 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -20,6 +20,12 @@ struct tegra_smmu_enable {
unsigned int bit;
};
+struct tegra_mc_timing {
+ unsigned long rate;
+
+ u32 *emem_data;
+};
+
/* latency allowance */
struct tegra_mc_la {
unsigned int reg;
@@ -40,6 +46,7 @@ struct tegra_mc_client {
};
struct tegra_smmu_swgroup {
+ const char *name;
unsigned int swgroup;
unsigned int reg;
};
@@ -71,6 +78,7 @@ struct tegra_smmu;
struct tegra_smmu *tegra_smmu_probe(struct device *dev,
const struct tegra_smmu_soc *soc,
struct tegra_mc *mc);
+void tegra_smmu_remove(struct tegra_smmu *smmu);
#else
static inline struct tegra_smmu *
tegra_smmu_probe(struct device *dev, const struct tegra_smmu_soc *soc,
@@ -78,13 +86,17 @@ tegra_smmu_probe(struct device *dev, const struct tegra_smmu_soc *soc,
{
return NULL;
}
+
+static inline void tegra_smmu_remove(struct tegra_smmu *smmu)
+{
+}
#endif
struct tegra_mc_soc {
const struct tegra_mc_client *clients;
unsigned int num_clients;
- const unsigned int *emem_regs;
+ const unsigned long *emem_regs;
unsigned int num_emem_regs;
unsigned int num_address_bits;
@@ -102,6 +114,12 @@ struct tegra_mc {
const struct tegra_mc_soc *soc;
unsigned long tick;
+
+ struct tegra_mc_timing *timings;
+ unsigned int num_timings;
};
+void tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate);
+unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc);
+
#endif /* __SOC_TEGRA_MC_H__ */
diff --git a/include/soc/tegra/pm.h b/include/soc/tegra/pm.h
index 30fe207..0390910 100644
--- a/include/soc/tegra/pm.h
+++ b/include/soc/tegra/pm.h
@@ -17,7 +17,7 @@ enum tegra_suspend_mode {
TEGRA_MAX_SUSPEND_MODE,
};
-#ifdef CONFIG_PM_SLEEP
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM)
enum tegra_suspend_mode
tegra_pm_validate_suspend_mode(enum tegra_suspend_mode mode);
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index 65a9327..f5c0de4 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -26,8 +26,6 @@
struct clk;
struct reset_control;
-void tegra_pmc_restart(enum reboot_mode mode, const char *cmd);
-
#ifdef CONFIG_PM_SLEEP
enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void);
void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode);
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index d315a08..0e9d75b 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -608,7 +608,9 @@ struct ac97_quirk {
int type; /* quirk type above */
};
-int snd_ac97_tune_hardware(struct snd_ac97 *ac97, struct ac97_quirk *quirk, const char *override);
+int snd_ac97_tune_hardware(struct snd_ac97 *ac97,
+ const struct ac97_quirk *quirk,
+ const char *override);
int snd_ac97_set_rate(struct snd_ac97 *ac97, int reg, unsigned int rate);
/*
diff --git a/include/sound/ad1816a.h b/include/sound/ad1816a.h
index abdf609..f2d3a6d 100644
--- a/include/sound/ad1816a.h
+++ b/include/sound/ad1816a.h
@@ -170,10 +170,9 @@ extern int snd_ad1816a_create(struct snd_card *card, unsigned long port,
int irq, int dma1, int dma2,
struct snd_ad1816a *chip);
-extern int snd_ad1816a_pcm(struct snd_ad1816a *chip, int device, struct snd_pcm **rpcm);
+extern int snd_ad1816a_pcm(struct snd_ad1816a *chip, int device);
extern int snd_ad1816a_mixer(struct snd_ad1816a *chip);
-extern int snd_ad1816a_timer(struct snd_ad1816a *chip, int device,
- struct snd_timer **rtimer);
+extern int snd_ad1816a_timer(struct snd_ad1816a *chip, int device);
#ifdef CONFIG_PM
extern void snd_ad1816a_suspend(struct snd_ad1816a *chip);
extern void snd_ad1816a_resume(struct snd_ad1816a *chip);
diff --git a/include/sound/ak4113.h b/include/sound/ak4113.h
index 2609048..58c1456 100644
--- a/include/sound/ak4113.h
+++ b/include/sound/ak4113.h
@@ -286,7 +286,8 @@ struct ak4113 {
ak4113_write_t *write;
ak4113_read_t *read;
void *private_data;
- unsigned int init:1;
+ atomic_t wq_processing;
+ struct mutex reinit_mutex;
spinlock_t lock;
unsigned char regmap[AK4113_WRITABLE_REGS];
struct snd_kcontrol *kctls[AK4113_CONTROLS];
@@ -317,5 +318,13 @@ int snd_ak4113_build(struct ak4113 *ak4113,
int snd_ak4113_external_rate(struct ak4113 *ak4113);
int snd_ak4113_check_rate_and_errors(struct ak4113 *ak4113, unsigned int flags);
+#ifdef CONFIG_PM
+void snd_ak4113_suspend(struct ak4113 *chip);
+void snd_ak4113_resume(struct ak4113 *chip);
+#else
+static inline void snd_ak4113_suspend(struct ak4113 *chip) {}
+static inline void snd_ak4113_resume(struct ak4113 *chip) {}
+#endif
+
#endif /* __SOUND_AK4113_H */
diff --git a/include/sound/ak4114.h b/include/sound/ak4114.h
index 52f02a6..b6feb7e 100644
--- a/include/sound/ak4114.h
+++ b/include/sound/ak4114.h
@@ -168,7 +168,8 @@ struct ak4114 {
ak4114_write_t * write;
ak4114_read_t * read;
void * private_data;
- unsigned int init: 1;
+ atomic_t wq_processing;
+ struct mutex reinit_mutex;
spinlock_t lock;
unsigned char regmap[6];
unsigned char txcsb[5];
@@ -199,5 +200,13 @@ int snd_ak4114_build(struct ak4114 *ak4114,
int snd_ak4114_external_rate(struct ak4114 *ak4114);
int snd_ak4114_check_rate_and_errors(struct ak4114 *ak4114, unsigned int flags);
+#ifdef CONFIG_PM
+void snd_ak4114_suspend(struct ak4114 *chip);
+void snd_ak4114_resume(struct ak4114 *chip);
+#else
+static inline void snd_ak4114_suspend(struct ak4114 *chip) {}
+static inline void snd_ak4114_resume(struct ak4114 *chip) {}
+#endif
+
#endif /* __SOUND_AK4114_H */
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index 396e8f7..fa1d055 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -27,6 +27,7 @@
#include <linux/types.h>
#include <linux/sched.h>
+#include <sound/core.h>
#include <sound/compress_offload.h>
#include <sound/asound.h>
#include <sound/pcm.h>
@@ -69,7 +70,7 @@ struct snd_compr_runtime {
* @device: device pointer
* @direction: stream direction, playback/recording
* @metadata_set: metadata set flag, true when set
- * @next_track: has userspace signall next track transistion, true when set
+ * @next_track: has userspace signal next track transition, true when set
* @private_data: pointer to DSP private data
*/
struct snd_compr_stream {
@@ -94,7 +95,7 @@ struct snd_compr_stream {
* and the stream properties
* @get_params: retrieve the codec parameters, mandatory
* @set_metadata: Set the metadata values for a stream
- * @get_metadata: retreives the requested metadata values from stream
+ * @get_metadata: retrieves the requested metadata values from stream
* @trigger: Trigger operations like start, pause, resume, drain, stop.
* This callback is mandatory
* @pointer: Retrieve current h/w pointer information. Mandatory
@@ -134,7 +135,7 @@ struct snd_compr_ops {
/**
* struct snd_compr: Compressed device
* @name: DSP device name
- * @dev: Device pointer
+ * @dev: associated device instance
* @ops: pointer to DSP callbacks
* @private_data: pointer to DSP pvt data
* @card: sound card pointer
@@ -144,7 +145,7 @@ struct snd_compr_ops {
*/
struct snd_compr {
const char *name;
- struct device *dev;
+ struct device dev;
struct snd_compr_ops *ops;
void *private_data;
struct snd_card *card;
diff --git a/include/sound/control.h b/include/sound/control.h
index 0426139..21d047f 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -93,12 +93,17 @@ struct snd_kctl_event {
struct pid;
+enum {
+ SND_CTL_SUBDEV_PCM,
+ SND_CTL_SUBDEV_RAWMIDI,
+ SND_CTL_SUBDEV_ITEMS,
+};
+
struct snd_ctl_file {
struct list_head list; /* list of all control files */
struct snd_card *card;
struct pid *pid;
- int prefer_pcm_subdevice;
- int prefer_rawmidi_subdevice;
+ int preferred_subdevice[SND_CTL_SUBDEV_ITEMS];
wait_queue_head_t change_sleep;
spinlock_t read_lock;
struct fasync_struct *fasync;
@@ -138,6 +143,8 @@ int snd_ctl_unregister_ioctl_compat(snd_kctl_ioctl_func_t fcn);
#define snd_ctl_unregister_ioctl_compat(fcn)
#endif
+int snd_ctl_get_preferred_subdevice(struct snd_card *card, int type);
+
static inline unsigned int snd_ctl_get_ioffnum(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
{
return id->numid - kctl->id.numid;
@@ -220,7 +227,7 @@ snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave)
* Add a virtual slave control to the given master.
* Unlike snd_ctl_add_slave(), the element added via this function
* is supposed to have volatile values, and get callback is called
- * at each time quried from the master.
+ * at each time queried from the master.
*
* When the control peeks the hardware values directly and the value
* can be changed by other means than the put callback of the element,
@@ -245,7 +252,7 @@ void snd_ctl_sync_vmaster(struct snd_kcontrol *kctl, bool hook_only);
* Helper functions for jack-detection controls
*/
struct snd_kcontrol *
-snd_kctl_jack_new(const char *name, int idx, void *private_data);
+snd_kctl_jack_new(const char *name, struct snd_card *card);
void snd_kctl_jack_report(struct snd_card *card,
struct snd_kcontrol *kctl, bool status);
diff --git a/include/sound/core.h b/include/sound/core.h
index 1df3f2f..cdfecaf 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -109,6 +109,7 @@ struct snd_card {
private data */
struct list_head devices; /* devices */
+ struct device ctl_dev; /* control device */
unsigned int last_numid; /* last used numeric ID */
struct rw_semaphore controls_rwsem; /* controls list lock */
rwlock_t ctl_files_rwlock; /* ctl_files list lock */
@@ -131,6 +132,7 @@ struct snd_card {
struct completion *release_completion;
struct device *dev; /* device assigned to this card */
struct device card_dev; /* cardX object for sysfs */
+ const struct attribute_group *dev_groups[4]; /* assigned sysfs attr */
bool registered; /* card_dev is registered? */
#ifdef CONFIG_PM
@@ -206,43 +208,13 @@ extern struct class *sound_class;
void snd_request_card(int card);
-int snd_register_device_for_dev(int type, struct snd_card *card,
- int dev,
- const struct file_operations *f_ops,
- void *private_data,
- const char *name,
- struct device *device);
+void snd_device_initialize(struct device *dev, struct snd_card *card);
-/**
- * snd_register_device - Register the ALSA device file for the card
- * @type: the device type, SNDRV_DEVICE_TYPE_XXX
- * @card: the card instance
- * @dev: the device index
- * @f_ops: the file operations
- * @private_data: user pointer for f_ops->open()
- * @name: the device file name
- *
- * Registers an ALSA device file for the given card.
- * The operators have to be set in reg parameter.
- *
- * This function uses the card's device pointer to link to the
- * correct &struct device.
- *
- * Return: Zero if successful, or a negative error code on failure.
- */
-static inline int snd_register_device(int type, struct snd_card *card, int dev,
- const struct file_operations *f_ops,
- void *private_data,
- const char *name)
-{
- return snd_register_device_for_dev(type, card, dev, f_ops,
- private_data, name,
- snd_card_get_device_link(card));
-}
-
-int snd_unregister_device(int type, struct snd_card *card, int dev);
+int snd_register_device(int type, struct snd_card *card, int dev,
+ const struct file_operations *f_ops,
+ void *private_data, struct device *device);
+int snd_unregister_device(struct device *dev);
void *snd_lookup_minor_data(unsigned int minor, int type);
-struct device *snd_get_device(int type, struct snd_card *card, int dev);
#ifdef CONFIG_SND_OSSEMUL
int snd_register_oss_device(int type, struct snd_card *card, int dev,
@@ -252,16 +224,13 @@ void *snd_lookup_oss_minor_data(unsigned int minor, int type);
#endif
int snd_minor_info_init(void);
-int snd_minor_info_done(void);
/* sound_oss.c */
#ifdef CONFIG_SND_OSSEMUL
int snd_minor_info_oss_init(void);
-int snd_minor_info_oss_done(void);
#else
static inline int snd_minor_info_oss_init(void) { return 0; }
-static inline int snd_minor_info_oss_done(void) { return 0; }
#endif
/* memory.c */
@@ -290,7 +259,8 @@ int snd_card_free_when_closed(struct snd_card *card);
void snd_card_set_id(struct snd_card *card, const char *id);
int snd_card_register(struct snd_card *card);
int snd_card_info_init(void);
-int snd_card_info_done(void);
+int snd_card_add_dev_attr(struct snd_card *card,
+ const struct attribute_group *group);
int snd_component_add(struct snd_card *card, const char *component);
int snd_card_file_add(struct snd_card *card, struct file *file);
int snd_card_file_remove(struct snd_card *card, struct file *file);
@@ -304,7 +274,8 @@ int snd_device_new(struct snd_card *card, enum snd_device_type type,
void *device_data, struct snd_device_ops *ops);
int snd_device_register(struct snd_card *card, void *device_data);
int snd_device_register_all(struct snd_card *card);
-int snd_device_disconnect_all(struct snd_card *card);
+void snd_device_disconnect(struct snd_card *card, void *device_data);
+void snd_device_disconnect_all(struct snd_card *card);
void snd_device_free(struct snd_card *card, void *device_data);
void snd_device_free_all(struct snd_card *card);
diff --git a/include/sound/designware_i2s.h b/include/sound/designware_i2s.h
index 26f406e..3a8fca9 100644
--- a/include/sound/designware_i2s.h
+++ b/include/sound/designware_i2s.h
@@ -1,5 +1,5 @@
/*
- * Copyright (ST) 2012 Rajeev Kumar (rajeev-dlh.kumar@st.com)
+ * Copyright (ST) 2012 Rajeev Kumar (rajeevkumar.linux@gmail.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index eb73a3a..f86ef5e 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -91,11 +91,6 @@ void snd_dmaengine_pcm_set_config_from_dai_data(
*/
#define SND_DMAENGINE_PCM_FLAG_NO_DT BIT(1)
/*
- * The platforms dmaengine driver does not support reporting the amount of
- * bytes that are still left to transfer.
- */
-#define SND_DMAENGINE_PCM_FLAG_NO_RESIDUE BIT(2)
-/*
* The PCM is half duplex and the DMA channel is shared between capture and
* playback.
*/
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index c46908c..5bd1346 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -33,15 +33,16 @@
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/firmware.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <uapi/sound/emu10k1.h>
/* ------------------- DEFINES -------------------- */
#define EMUPAGESIZE 4096
#define MAXREQVOICES 8
-#define MAXPAGES 8192
+#define MAXPAGES0 4096 /* 32 bit mode */
+#define MAXPAGES1 8192 /* 31 bit mode */
#define RESERVED 0
#define NUM_MIDI 16
#define NUM_G 64 /* use all channels */
@@ -50,8 +51,7 @@
/* FIXME? - according to the OSS driver the EMU10K1 needs a 29 bit DMA mask */
#define EMU10K1_DMA_MASK 0x7fffffffUL /* 31bit */
-#define AUDIGY_DMA_MASK 0x7fffffffUL /* 31bit FIXME - 32 should work? */
- /* See ALSA bug #1276 - rlrevell */
+#define AUDIGY_DMA_MASK 0xffffffffUL /* 32bit mode */
#define TMEMSIZE 256*1024
#define TMEMSIZEREG 4
@@ -466,8 +466,11 @@
#define MAPB 0x0d /* Cache map B */
-#define MAP_PTE_MASK 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */
-#define MAP_PTI_MASK 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */
+#define MAP_PTE_MASK0 0xfffff000 /* The 20 MSBs of the PTE indexed by the PTI */
+#define MAP_PTI_MASK0 0x00000fff /* The 12 bit index to one of the 4096 PTE dwords */
+
+#define MAP_PTE_MASK1 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */
+#define MAP_PTI_MASK1 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */
/* 0x0e, 0x0f: Not used */
@@ -1704,6 +1707,7 @@ struct snd_emu10k1 {
unsigned short model; /* subsystem id */
unsigned int card_type; /* EMU10K1_CARD_* */
unsigned int ecard_ctrl; /* ecard control bits */
+ unsigned int address_mode; /* address mode */
unsigned long dma_mask; /* PCI DMA mask */
unsigned int delay_pcm_irq; /* in samples */
int max_cache_pages; /* max memory size / PAGE_SIZE */
@@ -1809,17 +1813,17 @@ int snd_emu10k1_create(struct snd_card *card,
uint subsystem,
struct snd_emu10k1 ** remu);
-int snd_emu10k1_pcm(struct snd_emu10k1 * emu, int device, struct snd_pcm ** rpcm);
-int snd_emu10k1_pcm_mic(struct snd_emu10k1 * emu, int device, struct snd_pcm ** rpcm);
-int snd_emu10k1_pcm_efx(struct snd_emu10k1 * emu, int device, struct snd_pcm ** rpcm);
-int snd_p16v_pcm(struct snd_emu10k1 * emu, int device, struct snd_pcm ** rpcm);
+int snd_emu10k1_pcm(struct snd_emu10k1 *emu, int device);
+int snd_emu10k1_pcm_mic(struct snd_emu10k1 *emu, int device);
+int snd_emu10k1_pcm_efx(struct snd_emu10k1 *emu, int device);
+int snd_p16v_pcm(struct snd_emu10k1 *emu, int device);
int snd_p16v_free(struct snd_emu10k1 * emu);
int snd_p16v_mixer(struct snd_emu10k1 * emu);
-int snd_emu10k1_pcm_multi(struct snd_emu10k1 * emu, int device, struct snd_pcm ** rpcm);
-int snd_emu10k1_fx8010_pcm(struct snd_emu10k1 * emu, int device, struct snd_pcm ** rpcm);
+int snd_emu10k1_pcm_multi(struct snd_emu10k1 *emu, int device);
+int snd_emu10k1_fx8010_pcm(struct snd_emu10k1 *emu, int device);
int snd_emu10k1_mixer(struct snd_emu10k1 * emu, int pcm_device, int multi_device);
int snd_emu10k1_timer(struct snd_emu10k1 * emu, int device);
-int snd_emu10k1_fx8010_new(struct snd_emu10k1 *emu, int device, struct snd_hwdep ** rhwdep);
+int snd_emu10k1_fx8010_new(struct snd_emu10k1 *emu, int device);
irqreturn_t snd_emu10k1_interrupt(int irq, void *dev_id);
diff --git a/include/sound/emux_synth.h b/include/sound/emux_synth.h
index fb81f37..a0a40b7 100644
--- a/include/sound/emux_synth.h
+++ b/include/sound/emux_synth.h
@@ -125,7 +125,7 @@ struct snd_emux {
struct snd_util_memhdr *memhdr; /* memory chunk information */
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_SND_PROC_FS
struct snd_info_entry *proc;
#endif
diff --git a/include/sound/es1688.h b/include/sound/es1688.h
index 1d636a2..b34f23a 100644
--- a/include/sound/es1688.h
+++ b/include/sound/es1688.h
@@ -115,8 +115,7 @@ int snd_es1688_create(struct snd_card *card,
int mpu_irq,
int dma8,
unsigned short hardware);
-int snd_es1688_pcm(struct snd_card *card, struct snd_es1688 *chip, int device,
- struct snd_pcm **rpcm);
+int snd_es1688_pcm(struct snd_card *card, struct snd_es1688 *chip, int device);
int snd_es1688_mixer(struct snd_card *card, struct snd_es1688 *chip);
int snd_es1688_reset(struct snd_es1688 *chip);
diff --git a/include/sound/gus.h b/include/sound/gus.h
index 42905d8..07c116fe 100644
--- a/include/sound/gus.h
+++ b/include/sound/gus.h
@@ -27,7 +27,7 @@
#include <sound/timer.h>
#include <sound/seq_midi_emul.h>
#include <sound/seq_device.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* IO ports */
@@ -591,7 +591,7 @@ int snd_gf1_new_mixer(struct snd_gus_card * gus);
/* gus_pcm.c */
-int snd_gf1_pcm_new(struct snd_gus_card * gus, int pcm_dev, int control_index, struct snd_pcm ** rpcm);
+int snd_gf1_pcm_new(struct snd_gus_card *gus, int pcm_dev, int control_index);
#ifdef CONFIG_SND_DEBUG
extern void snd_gf1_print_voice_registers(struct snd_gus_card * gus);
@@ -620,7 +620,7 @@ void snd_gus_irq_profile_init(struct snd_gus_card *gus);
/* gus_uart.c */
-int snd_gf1_rawmidi_new(struct snd_gus_card * gus, int device, struct snd_rawmidi **rrawmidi);
+int snd_gf1_rawmidi_new(struct snd_gus_card *gus, int device);
/* gus_dram.c */
int snd_gus_dram_write(struct snd_gus_card *gus, char __user *ptr,
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
new file mode 100644
index 0000000..adb5ba5
--- /dev/null
+++ b/include/sound/hda_i915.h
@@ -0,0 +1,36 @@
+/*
+ * HD-Audio helpers to sync with i915 driver
+ */
+#ifndef __SOUND_HDA_I915_H
+#define __SOUND_HDA_I915_H
+
+#ifdef CONFIG_SND_HDA_I915
+int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
+int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
+int snd_hdac_get_display_clk(struct hdac_bus *bus);
+int snd_hdac_i915_init(struct hdac_bus *bus);
+int snd_hdac_i915_exit(struct hdac_bus *bus);
+#else
+static int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
+{
+ return 0;
+}
+static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
+{
+ return 0;
+}
+static inline int snd_hdac_get_display_clk(struct hdac_bus *bus)
+{
+ return 0;
+}
+static inline int snd_hdac_i915_init(struct hdac_bus *bus)
+{
+ return -ENODEV;
+}
+static inline int snd_hdac_i915_exit(struct hdac_bus *bus)
+{
+ return 0;
+}
+#endif
+
+#endif /* __SOUND_HDA_I915_H */
diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h
new file mode 100644
index 0000000..ae995e5
--- /dev/null
+++ b/include/sound/hda_register.h
@@ -0,0 +1,244 @@
+/*
+ * HD-audio controller (Azalia) registers and helpers
+ *
+ * For traditional reasons, we still use azx_ prefix here
+ */
+
+#ifndef __SOUND_HDA_REGISTER_H
+#define __SOUND_HDA_REGISTER_H
+
+#include <linux/io.h>
+#include <sound/hdaudio.h>
+
+#define AZX_REG_GCAP 0x00
+#define AZX_GCAP_64OK (1 << 0) /* 64bit address support */
+#define AZX_GCAP_NSDO (3 << 1) /* # of serial data out signals */
+#define AZX_GCAP_BSS (31 << 3) /* # of bidirectional streams */
+#define AZX_GCAP_ISS (15 << 8) /* # of input streams */
+#define AZX_GCAP_OSS (15 << 12) /* # of output streams */
+#define AZX_REG_VMIN 0x02
+#define AZX_REG_VMAJ 0x03
+#define AZX_REG_OUTPAY 0x04
+#define AZX_REG_INPAY 0x06
+#define AZX_REG_GCTL 0x08
+#define AZX_GCTL_RESET (1 << 0) /* controller reset */
+#define AZX_GCTL_FCNTRL (1 << 1) /* flush control */
+#define AZX_GCTL_UNSOL (1 << 8) /* accept unsol. response enable */
+#define AZX_REG_WAKEEN 0x0c
+#define AZX_REG_STATESTS 0x0e
+#define AZX_REG_GSTS 0x10
+#define AZX_GSTS_FSTS (1 << 1) /* flush status */
+#define AZX_REG_GCAP2 0x12
+#define AZX_REG_LLCH 0x14
+#define AZX_REG_OUTSTRMPAY 0x18
+#define AZX_REG_INSTRMPAY 0x1A
+#define AZX_REG_INTCTL 0x20
+#define AZX_REG_INTSTS 0x24
+#define AZX_REG_WALLCLK 0x30 /* 24Mhz source */
+#define AZX_REG_OLD_SSYNC 0x34 /* SSYNC for old ICH */
+#define AZX_REG_SSYNC 0x38
+#define AZX_REG_CORBLBASE 0x40
+#define AZX_REG_CORBUBASE 0x44
+#define AZX_REG_CORBWP 0x48
+#define AZX_REG_CORBRP 0x4a
+#define AZX_CORBRP_RST (1 << 15) /* read pointer reset */
+#define AZX_REG_CORBCTL 0x4c
+#define AZX_CORBCTL_RUN (1 << 1) /* enable DMA */
+#define AZX_CORBCTL_CMEIE (1 << 0) /* enable memory error irq */
+#define AZX_REG_CORBSTS 0x4d
+#define AZX_CORBSTS_CMEI (1 << 0) /* memory error indication */
+#define AZX_REG_CORBSIZE 0x4e
+
+#define AZX_REG_RIRBLBASE 0x50
+#define AZX_REG_RIRBUBASE 0x54
+#define AZX_REG_RIRBWP 0x58
+#define AZX_RIRBWP_RST (1 << 15) /* write pointer reset */
+#define AZX_REG_RINTCNT 0x5a
+#define AZX_REG_RIRBCTL 0x5c
+#define AZX_RBCTL_IRQ_EN (1 << 0) /* enable IRQ */
+#define AZX_RBCTL_DMA_EN (1 << 1) /* enable DMA */
+#define AZX_RBCTL_OVERRUN_EN (1 << 2) /* enable overrun irq */
+#define AZX_REG_RIRBSTS 0x5d
+#define AZX_RBSTS_IRQ (1 << 0) /* response irq */
+#define AZX_RBSTS_OVERRUN (1 << 2) /* overrun irq */
+#define AZX_REG_RIRBSIZE 0x5e
+
+#define AZX_REG_IC 0x60
+#define AZX_REG_IR 0x64
+#define AZX_REG_IRS 0x68
+#define AZX_IRS_VALID (1<<1)
+#define AZX_IRS_BUSY (1<<0)
+
+#define AZX_REG_DPLBASE 0x70
+#define AZX_REG_DPUBASE 0x74
+#define AZX_DPLBASE_ENABLE 0x1 /* Enable position buffer */
+
+/* SD offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
+enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
+
+/* stream register offsets from stream base */
+#define AZX_REG_SD_CTL 0x00
+#define AZX_REG_SD_STS 0x03
+#define AZX_REG_SD_LPIB 0x04
+#define AZX_REG_SD_CBL 0x08
+#define AZX_REG_SD_LVI 0x0c
+#define AZX_REG_SD_FIFOW 0x0e
+#define AZX_REG_SD_FIFOSIZE 0x10
+#define AZX_REG_SD_FORMAT 0x12
+#define AZX_REG_SD_FIFOL 0x14
+#define AZX_REG_SD_BDLPL 0x18
+#define AZX_REG_SD_BDLPU 0x1c
+
+/* Haswell/Broadwell display HD-A controller Extended Mode registers */
+#define AZX_REG_HSW_EM4 0x100c
+#define AZX_REG_HSW_EM5 0x1010
+
+/* PCI space */
+#define AZX_PCIREG_TCSEL 0x44
+
+/*
+ * other constants
+ */
+
+/* max number of fragments - we may use more if allocating more pages for BDL */
+#define BDL_SIZE 4096
+#define AZX_MAX_BDL_ENTRIES (BDL_SIZE / 16)
+#define AZX_MAX_FRAG 32
+/* max buffer size - no h/w limit, you can increase as you like */
+#define AZX_MAX_BUF_SIZE (1024*1024*1024)
+
+/* RIRB int mask: overrun[2], response[0] */
+#define RIRB_INT_RESPONSE 0x01
+#define RIRB_INT_OVERRUN 0x04
+#define RIRB_INT_MASK 0x05
+
+/* STATESTS int mask: S3,SD2,SD1,SD0 */
+#define STATESTS_INT_MASK ((1 << HDA_MAX_CODECS) - 1)
+
+/* SD_CTL bits */
+#define SD_CTL_STREAM_RESET 0x01 /* stream reset bit */
+#define SD_CTL_DMA_START 0x02 /* stream DMA start bit */
+#define SD_CTL_STRIPE (3 << 16) /* stripe control */
+#define SD_CTL_TRAFFIC_PRIO (1 << 18) /* traffic priority */
+#define SD_CTL_DIR (1 << 19) /* bi-directional stream */
+#define SD_CTL_STREAM_TAG_MASK (0xf << 20)
+#define SD_CTL_STREAM_TAG_SHIFT 20
+
+/* SD_CTL and SD_STS */
+#define SD_INT_DESC_ERR 0x10 /* descriptor error interrupt */
+#define SD_INT_FIFO_ERR 0x08 /* FIFO error interrupt */
+#define SD_INT_COMPLETE 0x04 /* completion interrupt */
+#define SD_INT_MASK (SD_INT_DESC_ERR|SD_INT_FIFO_ERR|\
+ SD_INT_COMPLETE)
+
+/* SD_STS */
+#define SD_STS_FIFO_READY 0x20 /* FIFO ready */
+
+/* INTCTL and INTSTS */
+#define AZX_INT_ALL_STREAM 0xff /* all stream interrupts */
+#define AZX_INT_CTRL_EN 0x40000000 /* controller interrupt enable bit */
+#define AZX_INT_GLOBAL_EN 0x80000000 /* global interrupt enable bit */
+
+/* below are so far hardcoded - should read registers in future */
+#define AZX_MAX_CORB_ENTRIES 256
+#define AZX_MAX_RIRB_ENTRIES 256
+
+/* Capability header Structure */
+#define AZX_REG_CAP_HDR 0x0
+#define AZX_CAP_HDR_VER_OFF 28
+#define AZX_CAP_HDR_VER_MASK (0xF << AZX_CAP_HDR_VER_OFF)
+#define AZX_CAP_HDR_ID_OFF 16
+#define AZX_CAP_HDR_ID_MASK (0xFFF << AZX_CAP_HDR_ID_OFF)
+#define AZX_CAP_HDR_NXT_PTR_MASK 0xFFFF
+
+/* registers of Software Position Based FIFO Capability Structure */
+#define AZX_SPB_CAP_ID 0x4
+#define AZX_REG_SPB_BASE_ADDR 0x700
+#define AZX_REG_SPB_SPBFCH 0x00
+#define AZX_REG_SPB_SPBFCCTL 0x04
+/* Base used to calculate the iterating register offset */
+#define AZX_SPB_BASE 0x08
+/* Interval used to calculate the iterating register offset */
+#define AZX_SPB_INTERVAL 0x08
+
+/* registers of Global Time Synchronization Capability Structure */
+#define AZX_GTS_CAP_ID 0x1
+#define AZX_REG_GTS_GTSCH 0x00
+#define AZX_REG_GTS_GTSCD 0x04
+#define AZX_REG_GTS_GTSCTLAC 0x0C
+#define AZX_GTS_BASE 0x20
+#define AZX_GTS_INTERVAL 0x20
+
+/* registers for Processing Pipe Capability Structure */
+#define AZX_PP_CAP_ID 0x3
+#define AZX_REG_PP_PPCH 0x10
+#define AZX_REG_PP_PPCTL 0x04
+#define AZX_PPCTL_PIE (1<<31)
+#define AZX_PPCTL_GPROCEN (1<<30)
+/* _X_ = dma engine # and cannot * exceed 29 (per spec max 30 dma engines) */
+#define AZX_PPCTL_PROCEN(_X_) (1<<(_X_))
+
+#define AZX_REG_PP_PPSTS 0x08
+
+#define AZX_PPHC_BASE 0x10
+#define AZX_PPHC_INTERVAL 0x10
+
+#define AZX_REG_PPHCLLPL 0x0
+#define AZX_REG_PPHCLLPU 0x4
+#define AZX_REG_PPHCLDPL 0x8
+#define AZX_REG_PPHCLDPU 0xC
+
+#define AZX_PPLC_BASE 0x10
+#define AZX_PPLC_MULTI 0x10
+#define AZX_PPLC_INTERVAL 0x10
+
+#define AZX_REG_PPLCCTL 0x0
+#define AZX_PPLCCTL_STRM_BITS 4
+#define AZX_PPLCCTL_STRM_SHIFT 20
+#define AZX_REG_MASK(bit_num, offset) \
+ (((1 << (bit_num)) - 1) << (offset))
+#define AZX_PPLCCTL_STRM_MASK \
+ AZX_REG_MASK(AZX_PPLCCTL_STRM_BITS, AZX_PPLCCTL_STRM_SHIFT)
+#define AZX_PPLCCTL_RUN (1<<1)
+#define AZX_PPLCCTL_STRST (1<<0)
+
+#define AZX_REG_PPLCFMT 0x4
+#define AZX_REG_PPLCLLPL 0x8
+#define AZX_REG_PPLCLLPU 0xC
+
+/* registers for Multiple Links Capability Structure */
+#define AZX_ML_CAP_ID 0x2
+#define AZX_REG_ML_MLCH 0x00
+#define AZX_REG_ML_MLCD 0x04
+#define AZX_ML_BASE 0x40
+#define AZX_ML_INTERVAL 0x40
+
+#define AZX_REG_ML_LCAP 0x00
+#define AZX_REG_ML_LCTL 0x04
+#define AZX_REG_ML_LOSIDV 0x08
+#define AZX_REG_ML_LSDIID 0x0C
+#define AZX_REG_ML_LPSOO 0x10
+#define AZX_REG_ML_LPSIO 0x12
+#define AZX_REG_ML_LWALFC 0x18
+#define AZX_REG_ML_LOUTPAY 0x20
+#define AZX_REG_ML_LINPAY 0x30
+
+#define AZX_MLCTL_SPA (1<<16)
+#define AZX_MLCTL_CPA 23
+
+/*
+ * helpers to read the stream position
+ */
+static inline unsigned int
+snd_hdac_stream_get_pos_lpib(struct hdac_stream *stream)
+{
+ return snd_hdac_stream_readl(stream, SD_LPIB);
+}
+
+static inline unsigned int
+snd_hdac_stream_get_pos_posbuf(struct hdac_stream *stream)
+{
+ return le32_to_cpu(*stream->posbuf);
+}
+
+#endif /* __SOUND_HDA_REGISTER_H */
diff --git a/include/sound/hda_regmap.h b/include/sound/hda_regmap.h
new file mode 100644
index 0000000..df70590
--- /dev/null
+++ b/include/sound/hda_regmap.h
@@ -0,0 +1,219 @@
+/*
+ * HD-audio regmap helpers
+ */
+
+#ifndef __SOUND_HDA_REGMAP_H
+#define __SOUND_HDA_REGMAP_H
+
+#include <linux/regmap.h>
+#include <sound/core.h>
+#include <sound/hdaudio.h>
+
+#define AC_AMP_FAKE_MUTE 0x10 /* fake mute bit set to amp verbs */
+
+int snd_hdac_regmap_init(struct hdac_device *codec);
+void snd_hdac_regmap_exit(struct hdac_device *codec);
+int snd_hdac_regmap_add_vendor_verb(struct hdac_device *codec,
+ unsigned int verb);
+int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
+ unsigned int *val);
+int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
+ unsigned int val);
+int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg,
+ unsigned int mask, unsigned int val);
+
+/**
+ * snd_hdac_regmap_encode_verb - encode the verb to a pseudo register
+ * @nid: widget NID
+ * @verb: codec verb
+ *
+ * Returns an encoded pseudo register.
+ */
+#define snd_hdac_regmap_encode_verb(nid, verb) \
+ (((verb) << 8) | 0x80000 | ((unsigned int)(nid) << 20))
+
+/**
+ * snd_hdac_regmap_encode_amp - encode the AMP verb to a pseudo register
+ * @nid: widget NID
+ * @ch: channel (left = 0, right = 1)
+ * @dir: direction (#HDA_INPUT, #HDA_OUTPUT)
+ * @idx: input index value
+ *
+ * Returns an encoded pseudo register.
+ */
+#define snd_hdac_regmap_encode_amp(nid, ch, dir, idx) \
+ (snd_hdac_regmap_encode_verb(nid, AC_VERB_GET_AMP_GAIN_MUTE) | \
+ ((ch) ? AC_AMP_GET_RIGHT : AC_AMP_GET_LEFT) | \
+ ((dir) == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT) | \
+ (idx))
+
+/**
+ * snd_hdac_regmap_encode_amp_stereo - encode a pseudo register for stereo AMPs
+ * @nid: widget NID
+ * @dir: direction (#HDA_INPUT, #HDA_OUTPUT)
+ * @idx: input index value
+ *
+ * Returns an encoded pseudo register.
+ */
+#define snd_hdac_regmap_encode_amp_stereo(nid, dir, idx) \
+ (snd_hdac_regmap_encode_verb(nid, AC_VERB_GET_AMP_GAIN_MUTE) | \
+ AC_AMP_SET_LEFT | AC_AMP_SET_RIGHT | /* both bits set! */ \
+ ((dir) == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT) | \
+ (idx))
+
+/**
+ * snd_hdac_regmap_write - Write a verb with caching
+ * @nid: codec NID
+ * @reg: verb to write
+ * @val: value to write
+ *
+ * For writing an amp value, use snd_hda_regmap_amp_update().
+ */
+static inline int
+snd_hdac_regmap_write(struct hdac_device *codec, hda_nid_t nid,
+ unsigned int verb, unsigned int val)
+{
+ unsigned int cmd = snd_hdac_regmap_encode_verb(nid, verb);
+
+ return snd_hdac_regmap_write_raw(codec, cmd, val);
+}
+
+/**
+ * snd_hda_regmap_update - Update a verb value with caching
+ * @nid: codec NID
+ * @verb: verb to update
+ * @mask: bit mask to update
+ * @val: value to update
+ *
+ * For updating an amp value, use snd_hda_regmap_amp_update().
+ */
+static inline int
+snd_hdac_regmap_update(struct hdac_device *codec, hda_nid_t nid,
+ unsigned int verb, unsigned int mask,
+ unsigned int val)
+{
+ unsigned int cmd = snd_hdac_regmap_encode_verb(nid, verb);
+
+ return snd_hdac_regmap_update_raw(codec, cmd, mask, val);
+}
+
+/**
+ * snd_hda_regmap_read - Read a verb with caching
+ * @nid: codec NID
+ * @verb: verb to read
+ * @val: pointer to store the value
+ *
+ * For reading an amp value, use snd_hda_regmap_get_amp().
+ */
+static inline int
+snd_hdac_regmap_read(struct hdac_device *codec, hda_nid_t nid,
+ unsigned int verb, unsigned int *val)
+{
+ unsigned int cmd = snd_hdac_regmap_encode_verb(nid, verb);
+
+ return snd_hdac_regmap_read_raw(codec, cmd, val);
+}
+
+/**
+ * snd_hdac_regmap_get_amp - Read AMP value
+ * @codec: HD-audio codec
+ * @nid: NID to read the AMP value
+ * @ch: channel (left=0 or right=1)
+ * @direction: #HDA_INPUT or #HDA_OUTPUT
+ * @index: the index value (only for input direction)
+ * @val: the pointer to store the value
+ *
+ * Read AMP value. The volume is between 0 to 0x7f, 0x80 = mute bit.
+ * Returns the value or a negative error.
+ */
+static inline int
+snd_hdac_regmap_get_amp(struct hdac_device *codec, hda_nid_t nid,
+ int ch, int dir, int idx)
+{
+ unsigned int cmd = snd_hdac_regmap_encode_amp(nid, ch, dir, idx);
+ int err, val;
+
+ err = snd_hdac_regmap_read_raw(codec, cmd, &val);
+ return err < 0 ? err : val;
+}
+
+/**
+ * snd_hdac_regmap_update_amp - update the AMP value
+ * @codec: HD-audio codec
+ * @nid: NID to read the AMP value
+ * @ch: channel (left=0 or right=1)
+ * @direction: #HDA_INPUT or #HDA_OUTPUT
+ * @idx: the index value (only for input direction)
+ * @mask: bit mask to set
+ * @val: the bits value to set
+ *
+ * Update the AMP value with a bit mask.
+ * Returns 0 if the value is unchanged, 1 if changed, or a negative error.
+ */
+static inline int
+snd_hdac_regmap_update_amp(struct hdac_device *codec, hda_nid_t nid,
+ int ch, int dir, int idx, int mask, int val)
+{
+ unsigned int cmd = snd_hdac_regmap_encode_amp(nid, ch, dir, idx);
+
+ return snd_hdac_regmap_update_raw(codec, cmd, mask, val);
+}
+
+/**
+ * snd_hdac_regmap_get_amp_stereo - Read stereo AMP values
+ * @codec: HD-audio codec
+ * @nid: NID to read the AMP value
+ * @ch: channel (left=0 or right=1)
+ * @direction: #HDA_INPUT or #HDA_OUTPUT
+ * @index: the index value (only for input direction)
+ * @val: the pointer to store the value
+ *
+ * Read stereo AMP values. The lower byte is left, the upper byte is right.
+ * Returns the value or a negative error.
+ */
+static inline int
+snd_hdac_regmap_get_amp_stereo(struct hdac_device *codec, hda_nid_t nid,
+ int dir, int idx)
+{
+ unsigned int cmd = snd_hdac_regmap_encode_amp_stereo(nid, dir, idx);
+ int err, val;
+
+ err = snd_hdac_regmap_read_raw(codec, cmd, &val);
+ return err < 0 ? err : val;
+}
+
+/**
+ * snd_hdac_regmap_update_amp_stereo - update the stereo AMP value
+ * @codec: HD-audio codec
+ * @nid: NID to read the AMP value
+ * @direction: #HDA_INPUT or #HDA_OUTPUT
+ * @idx: the index value (only for input direction)
+ * @mask: bit mask to set
+ * @val: the bits value to set
+ *
+ * Update the stereo AMP value with a bit mask.
+ * The lower byte is left, the upper byte is right.
+ * Returns 0 if the value is unchanged, 1 if changed, or a negative error.
+ */
+static inline int
+snd_hdac_regmap_update_amp_stereo(struct hdac_device *codec, hda_nid_t nid,
+ int dir, int idx, int mask, int val)
+{
+ unsigned int cmd = snd_hdac_regmap_encode_amp_stereo(nid, dir, idx);
+
+ return snd_hdac_regmap_update_raw(codec, cmd, mask, val);
+}
+
+/**
+ * snd_hdac_regmap_sync_node - sync the widget node attributes
+ * @codec: HD-audio codec
+ * @nid: NID to sync
+ */
+static inline void
+snd_hdac_regmap_sync_node(struct hdac_device *codec, hda_nid_t nid)
+{
+ regcache_mark_dirty(codec->regmap);
+ regcache_sync_region(codec->regmap, nid << 20, ((nid + 1) << 20) - 1);
+}
+
+#endif /* __SOUND_HDA_REGMAP_H */
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
new file mode 100644
index 0000000..4caf1fd
--- /dev/null
+++ b/include/sound/hdaudio.h
@@ -0,0 +1,550 @@
+/*
+ * HD-audio core stuff
+ */
+
+#ifndef __SOUND_HDAUDIO_H
+#define __SOUND_HDAUDIO_H
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/timecounter.h>
+#include <sound/core.h>
+#include <sound/memalloc.h>
+#include <sound/hda_verbs.h>
+#include <drm/i915_component.h>
+
+/* codec node id */
+typedef u16 hda_nid_t;
+
+struct hdac_bus;
+struct hdac_stream;
+struct hdac_device;
+struct hdac_driver;
+struct hdac_widget_tree;
+
+/*
+ * exported bus type
+ */
+extern struct bus_type snd_hda_bus_type;
+
+/*
+ * HDA device table
+ */
+struct hda_device_id {
+ __u32 vendor_id;
+ __u32 rev_id;
+ const char *name;
+ unsigned long driver_data;
+};
+
+/*
+ * generic arrays
+ */
+struct snd_array {
+ unsigned int used;
+ unsigned int alloced;
+ unsigned int elem_size;
+ unsigned int alloc_align;
+ void *list;
+};
+
+/*
+ * HD-audio codec base device
+ */
+struct hdac_device {
+ struct device dev;
+ int type;
+ struct hdac_bus *bus;
+ unsigned int addr; /* codec address */
+ struct list_head list; /* list point for bus codec_list */
+
+ hda_nid_t afg; /* AFG node id */
+ hda_nid_t mfg; /* MFG node id */
+
+ /* ids */
+ unsigned int vendor_id;
+ unsigned int subsystem_id;
+ unsigned int revision_id;
+ unsigned int afg_function_id;
+ unsigned int mfg_function_id;
+ unsigned int afg_unsol:1;
+ unsigned int mfg_unsol:1;
+
+ unsigned int power_caps; /* FG power caps */
+
+ const char *vendor_name; /* codec vendor name */
+ const char *chip_name; /* codec chip name */
+
+ /* verb exec op override */
+ int (*exec_verb)(struct hdac_device *dev, unsigned int cmd,
+ unsigned int flags, unsigned int *res);
+
+ /* widgets */
+ unsigned int num_nodes;
+ hda_nid_t start_nid, end_nid;
+
+ /* misc flags */
+ atomic_t in_pm; /* suspend/resume being performed */
+ bool link_power_control:1;
+
+ /* sysfs */
+ struct hdac_widget_tree *widgets;
+
+ /* regmap */
+ struct regmap *regmap;
+ struct snd_array vendor_verbs;
+ bool lazy_cache:1; /* don't wake up for writes */
+ bool caps_overwriting:1; /* caps overwrite being in process */
+ bool cache_coef:1; /* cache COEF read/write too */
+};
+
+/* device/driver type used for matching */
+enum {
+ HDA_DEV_CORE,
+ HDA_DEV_LEGACY,
+ HDA_DEV_ASOC,
+};
+
+/* direction */
+enum {
+ HDA_INPUT, HDA_OUTPUT
+};
+
+#define dev_to_hdac_dev(_dev) container_of(_dev, struct hdac_device, dev)
+
+int snd_hdac_device_init(struct hdac_device *dev, struct hdac_bus *bus,
+ const char *name, unsigned int addr);
+void snd_hdac_device_exit(struct hdac_device *dev);
+int snd_hdac_device_register(struct hdac_device *codec);
+void snd_hdac_device_unregister(struct hdac_device *codec);
+
+int snd_hdac_refresh_widgets(struct hdac_device *codec);
+
+unsigned int snd_hdac_make_cmd(struct hdac_device *codec, hda_nid_t nid,
+ unsigned int verb, unsigned int parm);
+int snd_hdac_exec_verb(struct hdac_device *codec, unsigned int cmd,
+ unsigned int flags, unsigned int *res);
+int snd_hdac_read(struct hdac_device *codec, hda_nid_t nid,
+ unsigned int verb, unsigned int parm, unsigned int *res);
+int _snd_hdac_read_parm(struct hdac_device *codec, hda_nid_t nid, int parm,
+ unsigned int *res);
+int snd_hdac_read_parm_uncached(struct hdac_device *codec, hda_nid_t nid,
+ int parm);
+int snd_hdac_override_parm(struct hdac_device *codec, hda_nid_t nid,
+ unsigned int parm, unsigned int val);
+int snd_hdac_get_connections(struct hdac_device *codec, hda_nid_t nid,
+ hda_nid_t *conn_list, int max_conns);
+int snd_hdac_get_sub_nodes(struct hdac_device *codec, hda_nid_t nid,
+ hda_nid_t *start_id);
+unsigned int snd_hdac_calc_stream_format(unsigned int rate,
+ unsigned int channels,
+ unsigned int format,
+ unsigned int maxbps,
+ unsigned short spdif_ctls);
+int snd_hdac_query_supported_pcm(struct hdac_device *codec, hda_nid_t nid,
+ u32 *ratesp, u64 *formatsp, unsigned int *bpsp);
+bool snd_hdac_is_supported_format(struct hdac_device *codec, hda_nid_t nid,
+ unsigned int format);
+
+/**
+ * snd_hdac_read_parm - read a codec parameter
+ * @codec: the codec object
+ * @nid: NID to read a parameter
+ * @parm: parameter to read
+ *
+ * Returns -1 for error. If you need to distinguish the error more
+ * strictly, use _snd_hdac_read_parm() directly.
+ */
+static inline int snd_hdac_read_parm(struct hdac_device *codec, hda_nid_t nid,
+ int parm)
+{
+ unsigned int val;
+
+ return _snd_hdac_read_parm(codec, nid, parm, &val) < 0 ? -1 : val;
+}
+
+#ifdef CONFIG_PM
+void snd_hdac_power_up(struct hdac_device *codec);
+void snd_hdac_power_down(struct hdac_device *codec);
+void snd_hdac_power_up_pm(struct hdac_device *codec);
+void snd_hdac_power_down_pm(struct hdac_device *codec);
+#else
+static inline void snd_hdac_power_up(struct hdac_device *codec) {}
+static inline void snd_hdac_power_down(struct hdac_device *codec) {}
+static inline void snd_hdac_power_up_pm(struct hdac_device *codec) {}
+static inline void snd_hdac_power_down_pm(struct hdac_device *codec) {}
+#endif
+
+/*
+ * HD-audio codec base driver
+ */
+struct hdac_driver {
+ struct device_driver driver;
+ int type;
+ const struct hda_device_id *id_table;
+ int (*match)(struct hdac_device *dev, struct hdac_driver *drv);
+ void (*unsol_event)(struct hdac_device *dev, unsigned int event);
+};
+
+#define drv_to_hdac_driver(_drv) container_of(_drv, struct hdac_driver, driver)
+
+const struct hda_device_id *
+hdac_get_device_id(struct hdac_device *hdev, struct hdac_driver *drv);
+
+/*
+ * Bus verb operators
+ */
+struct hdac_bus_ops {
+ /* send a single command */
+ int (*command)(struct hdac_bus *bus, unsigned int cmd);
+ /* get a response from the last command */
+ int (*get_response)(struct hdac_bus *bus, unsigned int addr,
+ unsigned int *res);
+ /* control the link power */
+ int (*link_power)(struct hdac_bus *bus, bool enable);
+};
+
+/*
+ * Lowlevel I/O operators
+ */
+struct hdac_io_ops {
+ /* mapped register accesses */
+ void (*reg_writel)(u32 value, u32 __iomem *addr);
+ u32 (*reg_readl)(u32 __iomem *addr);
+ void (*reg_writew)(u16 value, u16 __iomem *addr);
+ u16 (*reg_readw)(u16 __iomem *addr);
+ void (*reg_writeb)(u8 value, u8 __iomem *addr);
+ u8 (*reg_readb)(u8 __iomem *addr);
+ /* Allocation ops */
+ int (*dma_alloc_pages)(struct hdac_bus *bus, int type, size_t size,
+ struct snd_dma_buffer *buf);
+ void (*dma_free_pages)(struct hdac_bus *bus,
+ struct snd_dma_buffer *buf);
+};
+
+#define HDA_UNSOL_QUEUE_SIZE 64
+#define HDA_MAX_CODECS 8 /* limit by controller side */
+
+/* HD Audio class code */
+#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
+
+/*
+ * CORB/RIRB
+ *
+ * Each CORB entry is 4byte, RIRB is 8byte
+ */
+struct hdac_rb {
+ __le32 *buf; /* virtual address of CORB/RIRB buffer */
+ dma_addr_t addr; /* physical address of CORB/RIRB buffer */
+ unsigned short rp, wp; /* RIRB read/write pointers */
+ int cmds[HDA_MAX_CODECS]; /* number of pending requests */
+ u32 res[HDA_MAX_CODECS]; /* last read value */
+};
+
+/*
+ * HD-audio bus base driver
+ */
+struct hdac_bus {
+ struct device *dev;
+ const struct hdac_bus_ops *ops;
+ const struct hdac_io_ops *io_ops;
+
+ /* h/w resources */
+ unsigned long addr;
+ void __iomem *remap_addr;
+ int irq;
+
+ /* codec linked list */
+ struct list_head codec_list;
+ unsigned int num_codecs;
+
+ /* link caddr -> codec */
+ struct hdac_device *caddr_tbl[HDA_MAX_CODEC_ADDRESS + 1];
+
+ /* unsolicited event queue */
+ u32 unsol_queue[HDA_UNSOL_QUEUE_SIZE * 2]; /* ring buffer */
+ unsigned int unsol_rp, unsol_wp;
+ struct work_struct unsol_work;
+
+ /* bit flags of detected codecs */
+ unsigned long codec_mask;
+
+ /* bit flags of powered codecs */
+ unsigned long codec_powered;
+
+ /* CORB/RIRB */
+ struct hdac_rb corb;
+ struct hdac_rb rirb;
+ unsigned int last_cmd[HDA_MAX_CODECS]; /* last sent command */
+
+ /* CORB/RIRB and position buffers */
+ struct snd_dma_buffer rb;
+ struct snd_dma_buffer posbuf;
+
+ /* hdac_stream linked list */
+ struct list_head stream_list;
+
+ /* operation state */
+ bool chip_init:1; /* h/w initialized */
+
+ /* behavior flags */
+ bool sync_write:1; /* sync after verb write */
+ bool use_posbuf:1; /* use position buffer */
+ bool snoop:1; /* enable snooping */
+ bool align_bdle_4k:1; /* BDLE align 4K boundary */
+ bool reverse_assign:1; /* assign devices in reverse order */
+ bool corbrp_self_clear:1; /* CORBRP clears itself after reset */
+
+ int bdl_pos_adj; /* BDL position adjustment */
+
+ /* locks */
+ spinlock_t reg_lock;
+ struct mutex cmd_mutex;
+
+ /* i915 component interface */
+ struct i915_audio_component *audio_component;
+ int i915_power_refcount;
+};
+
+int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev,
+ const struct hdac_bus_ops *ops,
+ const struct hdac_io_ops *io_ops);
+void snd_hdac_bus_exit(struct hdac_bus *bus);
+int snd_hdac_bus_exec_verb(struct hdac_bus *bus, unsigned int addr,
+ unsigned int cmd, unsigned int *res);
+int snd_hdac_bus_exec_verb_unlocked(struct hdac_bus *bus, unsigned int addr,
+ unsigned int cmd, unsigned int *res);
+void snd_hdac_bus_queue_event(struct hdac_bus *bus, u32 res, u32 res_ex);
+
+int snd_hdac_bus_add_device(struct hdac_bus *bus, struct hdac_device *codec);
+void snd_hdac_bus_remove_device(struct hdac_bus *bus,
+ struct hdac_device *codec);
+
+static inline void snd_hdac_codec_link_up(struct hdac_device *codec)
+{
+ set_bit(codec->addr, &codec->bus->codec_powered);
+}
+
+static inline void snd_hdac_codec_link_down(struct hdac_device *codec)
+{
+ clear_bit(codec->addr, &codec->bus->codec_powered);
+}
+
+int snd_hdac_bus_send_cmd(struct hdac_bus *bus, unsigned int val);
+int snd_hdac_bus_get_response(struct hdac_bus *bus, unsigned int addr,
+ unsigned int *res);
+int snd_hdac_link_power(struct hdac_device *codec, bool enable);
+
+bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset);
+void snd_hdac_bus_stop_chip(struct hdac_bus *bus);
+void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus);
+void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus);
+void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus);
+void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus);
+
+void snd_hdac_bus_update_rirb(struct hdac_bus *bus);
+void snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
+ void (*ack)(struct hdac_bus *,
+ struct hdac_stream *));
+
+int snd_hdac_bus_alloc_stream_pages(struct hdac_bus *bus);
+void snd_hdac_bus_free_stream_pages(struct hdac_bus *bus);
+
+/*
+ * macros for easy use
+ */
+#define _snd_hdac_chip_write(type, chip, reg, value) \
+ ((chip)->io_ops->reg_write ## type(value, (chip)->remap_addr + (reg)))
+#define _snd_hdac_chip_read(type, chip, reg) \
+ ((chip)->io_ops->reg_read ## type((chip)->remap_addr + (reg)))
+
+/* read/write a register, pass without AZX_REG_ prefix */
+#define snd_hdac_chip_writel(chip, reg, value) \
+ _snd_hdac_chip_write(l, chip, AZX_REG_ ## reg, value)
+#define snd_hdac_chip_writew(chip, reg, value) \
+ _snd_hdac_chip_write(w, chip, AZX_REG_ ## reg, value)
+#define snd_hdac_chip_writeb(chip, reg, value) \
+ _snd_hdac_chip_write(b, chip, AZX_REG_ ## reg, value)
+#define snd_hdac_chip_readl(chip, reg) \
+ _snd_hdac_chip_read(l, chip, AZX_REG_ ## reg)
+#define snd_hdac_chip_readw(chip, reg) \
+ _snd_hdac_chip_read(w, chip, AZX_REG_ ## reg)
+#define snd_hdac_chip_readb(chip, reg) \
+ _snd_hdac_chip_read(b, chip, AZX_REG_ ## reg)
+
+/* update a register, pass without AZX_REG_ prefix */
+#define snd_hdac_chip_updatel(chip, reg, mask, val) \
+ snd_hdac_chip_writel(chip, reg, \
+ (snd_hdac_chip_readl(chip, reg) & ~(mask)) | (val))
+#define snd_hdac_chip_updatew(chip, reg, mask, val) \
+ snd_hdac_chip_writew(chip, reg, \
+ (snd_hdac_chip_readw(chip, reg) & ~(mask)) | (val))
+#define snd_hdac_chip_updateb(chip, reg, mask, val) \
+ snd_hdac_chip_writeb(chip, reg, \
+ (snd_hdac_chip_readb(chip, reg) & ~(mask)) | (val))
+
+/*
+ * HD-audio stream
+ */
+struct hdac_stream {
+ struct hdac_bus *bus;
+ struct snd_dma_buffer bdl; /* BDL buffer */
+ __le32 *posbuf; /* position buffer pointer */
+ int direction; /* playback / capture (SNDRV_PCM_STREAM_*) */
+
+ unsigned int bufsize; /* size of the play buffer in bytes */
+ unsigned int period_bytes; /* size of the period in bytes */
+ unsigned int frags; /* number for period in the play buffer */
+ unsigned int fifo_size; /* FIFO size */
+
+ void __iomem *sd_addr; /* stream descriptor pointer */
+
+ u32 sd_int_sta_mask; /* stream int status mask */
+
+ /* pcm support */
+ struct snd_pcm_substream *substream; /* assigned substream,
+ * set in PCM open
+ */
+ unsigned int format_val; /* format value to be set in the
+ * controller and the codec
+ */
+ unsigned char stream_tag; /* assigned stream */
+ unsigned char index; /* stream index */
+ int assigned_key; /* last device# key assigned to */
+
+ bool opened:1;
+ bool running:1;
+ bool prepared:1;
+ bool no_period_wakeup:1;
+ bool locked:1;
+
+ /* timestamp */
+ unsigned long start_wallclk; /* start + minimum wallclk */
+ unsigned long period_wallclk; /* wallclk for period */
+ struct timecounter tc;
+ struct cyclecounter cc;
+ int delay_negative_threshold;
+
+ struct list_head list;
+#ifdef CONFIG_SND_HDA_DSP_LOADER
+ /* DSP access mutex */
+ struct mutex dsp_mutex;
+#endif
+};
+
+void snd_hdac_stream_init(struct hdac_bus *bus, struct hdac_stream *azx_dev,
+ int idx, int direction, int tag);
+struct hdac_stream *snd_hdac_stream_assign(struct hdac_bus *bus,
+ struct snd_pcm_substream *substream);
+void snd_hdac_stream_release(struct hdac_stream *azx_dev);
+
+int snd_hdac_stream_setup(struct hdac_stream *azx_dev);
+void snd_hdac_stream_cleanup(struct hdac_stream *azx_dev);
+int snd_hdac_stream_setup_periods(struct hdac_stream *azx_dev);
+int snd_hdac_stream_set_params(struct hdac_stream *azx_dev,
+ unsigned int format_val);
+void snd_hdac_stream_start(struct hdac_stream *azx_dev, bool fresh_start);
+void snd_hdac_stream_clear(struct hdac_stream *azx_dev);
+void snd_hdac_stream_stop(struct hdac_stream *azx_dev);
+void snd_hdac_stream_reset(struct hdac_stream *azx_dev);
+void snd_hdac_stream_sync_trigger(struct hdac_stream *azx_dev, bool set,
+ unsigned int streams, unsigned int reg);
+void snd_hdac_stream_sync(struct hdac_stream *azx_dev, bool start,
+ unsigned int streams);
+void snd_hdac_stream_timecounter_init(struct hdac_stream *azx_dev,
+ unsigned int streams);
+/*
+ * macros for easy use
+ */
+#define _snd_hdac_stream_write(type, dev, reg, value) \
+ ((dev)->bus->io_ops->reg_write ## type(value, (dev)->sd_addr + (reg)))
+#define _snd_hdac_stream_read(type, dev, reg) \
+ ((dev)->bus->io_ops->reg_read ## type((dev)->sd_addr + (reg)))
+
+/* read/write a register, pass without AZX_REG_ prefix */
+#define snd_hdac_stream_writel(dev, reg, value) \
+ _snd_hdac_stream_write(l, dev, AZX_REG_ ## reg, value)
+#define snd_hdac_stream_writew(dev, reg, value) \
+ _snd_hdac_stream_write(w, dev, AZX_REG_ ## reg, value)
+#define snd_hdac_stream_writeb(dev, reg, value) \
+ _snd_hdac_stream_write(b, dev, AZX_REG_ ## reg, value)
+#define snd_hdac_stream_readl(dev, reg) \
+ _snd_hdac_stream_read(l, dev, AZX_REG_ ## reg)
+#define snd_hdac_stream_readw(dev, reg) \
+ _snd_hdac_stream_read(w, dev, AZX_REG_ ## reg)
+#define snd_hdac_stream_readb(dev, reg) \
+ _snd_hdac_stream_read(b, dev, AZX_REG_ ## reg)
+
+/* update a register, pass without AZX_REG_ prefix */
+#define snd_hdac_stream_updatel(dev, reg, mask, val) \
+ snd_hdac_stream_writel(dev, reg, \
+ (snd_hdac_stream_readl(dev, reg) & \
+ ~(mask)) | (val))
+#define snd_hdac_stream_updatew(dev, reg, mask, val) \
+ snd_hdac_stream_writew(dev, reg, \
+ (snd_hdac_stream_readw(dev, reg) & \
+ ~(mask)) | (val))
+#define snd_hdac_stream_updateb(dev, reg, mask, val) \
+ snd_hdac_stream_writeb(dev, reg, \
+ (snd_hdac_stream_readb(dev, reg) & \
+ ~(mask)) | (val))
+
+#ifdef CONFIG_SND_HDA_DSP_LOADER
+/* DSP lock helpers */
+#define snd_hdac_dsp_lock_init(dev) mutex_init(&(dev)->dsp_mutex)
+#define snd_hdac_dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex)
+#define snd_hdac_dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex)
+#define snd_hdac_stream_is_locked(dev) ((dev)->locked)
+/* DSP loader helpers */
+int snd_hdac_dsp_prepare(struct hdac_stream *azx_dev, unsigned int format,
+ unsigned int byte_size, struct snd_dma_buffer *bufp);
+void snd_hdac_dsp_trigger(struct hdac_stream *azx_dev, bool start);
+void snd_hdac_dsp_cleanup(struct hdac_stream *azx_dev,
+ struct snd_dma_buffer *dmab);
+#else /* CONFIG_SND_HDA_DSP_LOADER */
+#define snd_hdac_dsp_lock_init(dev) do {} while (0)
+#define snd_hdac_dsp_lock(dev) do {} while (0)
+#define snd_hdac_dsp_unlock(dev) do {} while (0)
+#define snd_hdac_stream_is_locked(dev) 0
+
+static inline int
+snd_hdac_dsp_prepare(struct hdac_stream *azx_dev, unsigned int format,
+ unsigned int byte_size, struct snd_dma_buffer *bufp)
+{
+ return 0;
+}
+
+static inline void snd_hdac_dsp_trigger(struct hdac_stream *azx_dev, bool start)
+{
+}
+
+static inline void snd_hdac_dsp_cleanup(struct hdac_stream *azx_dev,
+ struct snd_dma_buffer *dmab)
+{
+}
+#endif /* CONFIG_SND_HDA_DSP_LOADER */
+
+
+/*
+ * generic array helpers
+ */
+void *snd_array_new(struct snd_array *array);
+void snd_array_free(struct snd_array *array);
+static inline void snd_array_init(struct snd_array *array, unsigned int size,
+ unsigned int align)
+{
+ array->elem_size = size;
+ array->alloc_align = align;
+}
+
+static inline void *snd_array_elem(struct snd_array *array, unsigned int idx)
+{
+ return array->list + idx * array->elem_size;
+}
+
+static inline unsigned int snd_array_index(struct snd_array *array, void *ptr)
+{
+ return (unsigned long)(ptr - array->list) / array->elem_size;
+}
+
+#endif /* __SOUND_HDAUDIO_H */
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
new file mode 100644
index 0000000..0f89df1
--- /dev/null
+++ b/include/sound/hdaudio_ext.h
@@ -0,0 +1,132 @@
+#ifndef __SOUND_HDAUDIO_EXT_H
+#define __SOUND_HDAUDIO_EXT_H
+
+#include <sound/hdaudio.h>
+
+/**
+ * hdac_ext_bus: HDAC extended bus for extended HDA caps
+ *
+ * @bus: hdac bus
+ * @num_streams: streams supported
+ * @ppcap: pp capabilities pointer
+ * @spbcap: SPIB capabilities pointer
+ * @mlcap: MultiLink capabilities pointer
+ * @gtscap: gts capabilities pointer
+ * @hlink_list: link list of HDA links
+ */
+struct hdac_ext_bus {
+ struct hdac_bus bus;
+ int num_streams;
+ int idx;
+
+ void __iomem *ppcap;
+ void __iomem *spbcap;
+ void __iomem *mlcap;
+ void __iomem *gtscap;
+
+ struct list_head hlink_list;
+};
+
+int snd_hdac_ext_bus_init(struct hdac_ext_bus *sbus, struct device *dev,
+ const struct hdac_bus_ops *ops,
+ const struct hdac_io_ops *io_ops);
+
+void snd_hdac_ext_bus_exit(struct hdac_ext_bus *sbus);
+int snd_hdac_ext_bus_device_init(struct hdac_ext_bus *sbus, int addr);
+void snd_hdac_ext_bus_device_exit(struct hdac_device *hdev);
+
+#define ebus_to_hbus(ebus) (&(ebus)->bus)
+#define hbus_to_ebus(_bus) \
+ container_of(_bus, struct hdac_ext_bus, bus)
+
+int snd_hdac_ext_bus_parse_capabilities(struct hdac_ext_bus *sbus);
+void snd_hdac_ext_bus_ppcap_enable(struct hdac_ext_bus *chip, bool enable);
+void snd_hdac_ext_bus_ppcap_int_enable(struct hdac_ext_bus *chip, bool enable);
+
+void snd_hdac_ext_stream_spbcap_enable(struct hdac_ext_bus *chip,
+ bool enable, int index);
+
+int snd_hdac_ext_bus_get_ml_capabilities(struct hdac_ext_bus *bus);
+struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct hdac_ext_bus *bus,
+ const char *codec_name);
+
+enum hdac_ext_stream_type {
+ HDAC_EXT_STREAM_TYPE_COUPLED = 0,
+ HDAC_EXT_STREAM_TYPE_HOST,
+ HDAC_EXT_STREAM_TYPE_LINK
+};
+
+/**
+ * hdac_ext_stream: HDAC extended stream for extended HDA caps
+ *
+ * @hstream: hdac_stream
+ * @pphc_addr: processing pipe host stream pointer
+ * @pplc_addr: processing pipe link stream pointer
+ * @decoupled: stream host and link is decoupled
+ * @link_locked: link is locked
+ * @link_prepared: link is prepared
+ * link_substream: link substream
+ */
+struct hdac_ext_stream {
+ struct hdac_stream hstream;
+
+ void __iomem *pphc_addr;
+ void __iomem *pplc_addr;
+
+ bool decoupled:1;
+ bool link_locked:1;
+ bool link_prepared;
+
+ struct snd_pcm_substream *link_substream;
+};
+
+#define hdac_stream(s) (&(s)->hstream)
+#define stream_to_hdac_ext_stream(s) \
+ container_of(s, struct hdac_ext_stream, hstream)
+
+void snd_hdac_ext_stream_init(struct hdac_ext_bus *bus,
+ struct hdac_ext_stream *stream, int idx,
+ int direction, int tag);
+int snd_hdac_ext_stream_init_all(struct hdac_ext_bus *ebus, int start_idx,
+ int num_stream, int dir);
+void snd_hdac_stream_free_all(struct hdac_ext_bus *ebus);
+void snd_hdac_link_free_all(struct hdac_ext_bus *ebus);
+struct hdac_ext_stream *snd_hdac_ext_stream_assign(struct hdac_ext_bus *bus,
+ struct snd_pcm_substream *substream,
+ int type);
+void snd_hdac_ext_stream_release(struct hdac_ext_stream *azx_dev, int type);
+void snd_hdac_ext_stream_decouple(struct hdac_ext_bus *bus,
+ struct hdac_ext_stream *azx_dev, bool decouple);
+void snd_hdac_ext_stop_streams(struct hdac_ext_bus *sbus);
+
+void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *hstream);
+void snd_hdac_ext_link_stream_clear(struct hdac_ext_stream *hstream);
+void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *hstream);
+int snd_hdac_ext_link_stream_setup(struct hdac_ext_stream *stream, int fmt);
+
+struct hdac_ext_link {
+ struct hdac_bus *bus;
+ int index;
+ void __iomem *ml_addr; /* link output stream reg pointer */
+ u32 lcaps; /* link capablities */
+ u16 lsdiid; /* link sdi identifier */
+ struct list_head list;
+};
+
+int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *link);
+int snd_hdac_ext_bus_link_power_down(struct hdac_ext_link *link);
+void snd_hdac_ext_link_set_stream_id(struct hdac_ext_link *link,
+ int stream);
+void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
+ int stream);
+
+/* update register macro */
+#define snd_hdac_updatel(addr, reg, mask, val) \
+ writel(((readl(addr + reg) & ~(mask)) | (val)), \
+ addr + reg)
+
+#define snd_hdac_updatew(addr, reg, mask, val) \
+ writew(((readw(addr + reg) & ~(mask)) | (val)), \
+ addr + reg)
+
+#endif /* __SOUND_HDAUDIO_EXT_H */
diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
index ae04a3e..ab9fcb2 100644
--- a/include/sound/hwdep.h
+++ b/include/sound/hwdep.h
@@ -68,8 +68,7 @@ struct snd_hwdep {
wait_queue_head_t open_wait;
void *private_data;
void (*private_free) (struct snd_hwdep *hwdep);
- struct device *dev;
- const struct attribute_group **groups;
+ struct device dev;
struct mutex open_mutex;
int used; /* reference counter */
diff --git a/include/sound/info.h b/include/sound/info.h
index 9ca1a49..67390ee 100644
--- a/include/sound/info.h
+++ b/include/sound/info.h
@@ -23,6 +23,8 @@
*/
#include <linux/poll.h>
+#include <linux/seq_file.h>
+#include <sound/core.h>
/* buffer for information */
struct snd_info_buffer {
@@ -90,16 +92,14 @@ struct snd_info_entry {
struct list_head list;
};
-#if defined(CONFIG_SND_OSSEMUL) && defined(CONFIG_PROC_FS)
+#if defined(CONFIG_SND_OSSEMUL) && defined(CONFIG_SND_PROC_FS)
int snd_info_minor_register(void);
-int snd_info_minor_unregister(void);
#else
-#define snd_info_minor_register() /* NOP */
-#define snd_info_minor_unregister() /* NOP */
+#define snd_info_minor_register() 0
#endif
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_SND_PROC_FS
extern struct snd_info_entry *snd_seq_root;
#ifdef CONFIG_SND_OSSEMUL
@@ -110,8 +110,18 @@ void snd_card_info_read_oss(struct snd_info_buffer *buffer);
static inline void snd_card_info_read_oss(struct snd_info_buffer *buffer) {}
#endif
-__printf(2, 3)
-int snd_iprintf(struct snd_info_buffer *buffer, const char *fmt, ...);
+/**
+ * snd_iprintf - printf on the procfs buffer
+ * @buf: the procfs buffer
+ * @fmt: the printf format
+ *
+ * Outputs the string on the procfs buffer just like printf().
+ *
+ * Return: zero for success, or a negative error code.
+ */
+#define snd_iprintf(buf, fmt, args...) \
+ seq_printf((struct seq_file *)(buf)->buffer, fmt, ##args)
+
int snd_info_init(void);
int snd_info_done(void);
@@ -135,8 +145,12 @@ void snd_info_card_id_change(struct snd_card *card);
int snd_info_register(struct snd_info_entry *entry);
/* for card drivers */
-int snd_card_proc_new(struct snd_card *card, const char *name,
- struct snd_info_entry **entryp);
+static inline int snd_card_proc_new(struct snd_card *card, const char *name,
+ struct snd_info_entry **entryp)
+{
+ *entryp = snd_info_create_card_entry(card, name, card->proc_root);
+ return *entryp ? 0 : -ENOMEM;
+}
static inline void snd_info_set_text_ops(struct snd_info_entry *entry,
void *private_data,
@@ -175,7 +189,6 @@ static inline int snd_card_proc_new(struct snd_card *card, const char *name,
static inline void snd_info_set_text_ops(struct snd_info_entry *entry __attribute__((unused)),
void *private_data,
void (*read)(struct snd_info_entry *, struct snd_info_buffer *)) {}
-
static inline int snd_info_check_reserved_words(const char *str) { return 1; }
#endif
@@ -184,7 +197,7 @@ static inline int snd_info_check_reserved_words(const char *str) { return 1; }
* OSS info part
*/
-#if defined(CONFIG_SND_OSSEMUL) && defined(CONFIG_PROC_FS)
+#if defined(CONFIG_SND_OSSEMUL) && defined(CONFIG_SND_PROC_FS)
#define SNDRV_OSS_INFO_DEV_AUDIO 0
#define SNDRV_OSS_INFO_DEV_SYNTH 1
@@ -197,6 +210,6 @@ static inline int snd_info_check_reserved_words(const char *str) { return 1; }
int snd_oss_info_register(int dev, int num, char *string);
#define snd_oss_info_unregister(dev, num) snd_oss_info_register(dev, num, NULL)
-#endif /* CONFIG_SND_OSSEMUL && CONFIG_PROC_FS */
+#endif /* CONFIG_SND_OSSEMUL && CONFIG_SND_PROC_FS */
#endif /* __SOUND_INFO_H */
diff --git a/include/sound/jack.h b/include/sound/jack.h
index 2182350..23bede1 100644
--- a/include/sound/jack.h
+++ b/include/sound/jack.h
@@ -73,6 +73,8 @@ enum snd_jack_types {
struct snd_jack {
struct input_dev *input_dev;
+ struct list_head kctl_list;
+ struct snd_card *card;
int registered;
int type;
const char *id;
@@ -85,7 +87,8 @@ struct snd_jack {
#ifdef CONFIG_SND_JACK
int snd_jack_new(struct snd_card *card, const char *id, int type,
- struct snd_jack **jack);
+ struct snd_jack **jack, bool initial_kctl, bool phantom_jack);
+int snd_jack_add_new_kctl(struct snd_jack *jack, const char * name, int mask);
void snd_jack_set_parent(struct snd_jack *jack, struct device *parent);
int snd_jack_set_key(struct snd_jack *jack, enum snd_jack_types type,
int keytype);
@@ -93,9 +96,13 @@ int snd_jack_set_key(struct snd_jack *jack, enum snd_jack_types type,
void snd_jack_report(struct snd_jack *jack, int status);
#else
-
static inline int snd_jack_new(struct snd_card *card, const char *id, int type,
- struct snd_jack **jack)
+ struct snd_jack **jack, bool initial_kctl, bool phantom_jack)
+{
+ return 0;
+}
+
+static inline int snd_jack_add_new_kctl(struct snd_jack *jack, const char * name, int mask)
{
return 0;
}
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index b429b73..691e7ee 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -60,6 +60,9 @@ struct snd_pcm_hardware {
struct snd_pcm_substream;
+struct snd_pcm_audio_tstamp_config; /* definitions further down */
+struct snd_pcm_audio_tstamp_report;
+
struct snd_pcm_ops {
int (*open)(struct snd_pcm_substream *substream);
int (*close)(struct snd_pcm_substream *substream);
@@ -71,8 +74,10 @@ struct snd_pcm_ops {
int (*prepare)(struct snd_pcm_substream *substream);
int (*trigger)(struct snd_pcm_substream *substream, int cmd);
snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *substream);
- int (*wall_clock)(struct snd_pcm_substream *substream,
- struct timespec *audio_ts);
+ int (*get_time_info)(struct snd_pcm_substream *substream,
+ struct timespec *system_ts, struct timespec *audio_ts,
+ struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
+ struct snd_pcm_audio_tstamp_report *audio_tstamp_report);
int (*copy)(struct snd_pcm_substream *substream, int channel,
snd_pcm_uframes_t pos,
void __user *buf, snd_pcm_uframes_t count);
@@ -94,9 +99,6 @@ struct snd_pcm_ops {
#define SNDRV_PCM_DEVICES 8
#endif
-#define SNDRV_PCM_IOCTL1_FALSE ((void *)0)
-#define SNDRV_PCM_IOCTL1_TRUE ((void *)1)
-
#define SNDRV_PCM_IOCTL1_RESET 0
#define SNDRV_PCM_IOCTL1_INFO 1
#define SNDRV_PCM_IOCTL1_CHANNEL_INFO 2
@@ -109,6 +111,7 @@ struct snd_pcm_ops {
#define SNDRV_PCM_TRIGGER_PAUSE_RELEASE 4
#define SNDRV_PCM_TRIGGER_SUSPEND 5
#define SNDRV_PCM_TRIGGER_RESUME 6
+#define SNDRV_PCM_TRIGGER_DRAIN 7
#define SNDRV_PCM_POS_XRUN ((snd_pcm_uframes_t)-1)
@@ -221,9 +224,10 @@ typedef int (*snd_pcm_hw_rule_func_t)(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule {
unsigned int cond;
- snd_pcm_hw_rule_func_t func;
int var;
int deps[4];
+
+ snd_pcm_hw_rule_func_t func;
void *private;
};
@@ -270,17 +274,76 @@ struct snd_pcm_hw_constraint_ratdens {
};
struct snd_pcm_hw_constraint_list {
- unsigned int count;
const unsigned int *list;
+ unsigned int count;
+ unsigned int mask;
+};
+
+struct snd_pcm_hw_constraint_ranges {
+ unsigned int count;
+ const struct snd_interval *ranges;
unsigned int mask;
};
struct snd_pcm_hwptr_log;
+/*
+ * userspace-provided audio timestamp config to kernel,
+ * structure is for internal use only and filled with dedicated unpack routine
+ */
+struct snd_pcm_audio_tstamp_config {
+ /* 5 of max 16 bits used */
+ u32 type_requested:4;
+ u32 report_delay:1; /* add total delay to A/D or D/A */
+};
+
+static inline void snd_pcm_unpack_audio_tstamp_config(__u32 data,
+ struct snd_pcm_audio_tstamp_config *config)
+{
+ config->type_requested = data & 0xF;
+ config->report_delay = (data >> 4) & 1;
+}
+
+/*
+ * kernel-provided audio timestamp report to user-space
+ * structure is for internal use only and read by dedicated pack routine
+ */
+struct snd_pcm_audio_tstamp_report {
+ /* 6 of max 16 bits used for bit-fields */
+
+ /* for backwards compatibility */
+ u32 valid:1;
+
+ /* actual type if hardware could not support requested timestamp */
+ u32 actual_type:4;
+
+ /* accuracy represented in ns units */
+ u32 accuracy_report:1; /* 0 if accuracy unknown, 1 if accuracy field is valid */
+ u32 accuracy; /* up to 4.29s, will be packed in separate field */
+};
+
+static inline void snd_pcm_pack_audio_tstamp_report(__u32 *data, __u32 *accuracy,
+ const struct snd_pcm_audio_tstamp_report *report)
+{
+ u32 tmp;
+
+ tmp = report->accuracy_report;
+ tmp <<= 4;
+ tmp |= report->actual_type;
+ tmp <<= 1;
+ tmp |= report->valid;
+
+ *data &= 0xffff; /* zero-clear MSBs */
+ *data |= (tmp << 16);
+ *accuracy = report->accuracy;
+}
+
+
struct snd_pcm_runtime {
/* -- Status -- */
struct snd_pcm_substream *trigger_master;
struct timespec trigger_tstamp; /* trigger timestamp */
+ bool trigger_tstamp_latched; /* trigger timestamp latched in low-level driver/hardware */
int overrange;
snd_pcm_uframes_t avail_max;
snd_pcm_uframes_t hw_ptr_base; /* Position at buffer restart */
@@ -356,6 +419,11 @@ struct snd_pcm_runtime {
struct snd_dma_buffer *dma_buffer_p; /* allocated buffer */
+ /* -- audio timestamp config -- */
+ struct snd_pcm_audio_tstamp_config audio_tstamp_config;
+ struct snd_pcm_audio_tstamp_report audio_tstamp_report;
+ struct timespec driver_tstamp;
+
#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
/* -- OSS things -- */
struct snd_pcm_oss_runtime oss;
@@ -449,6 +517,7 @@ struct snd_pcm_str {
#endif
#endif
struct snd_kcontrol *chmap_kctl; /* channel-mapping controls */
+ struct device dev;
};
struct snd_pcm {
@@ -465,7 +534,6 @@ struct snd_pcm {
wait_queue_head_t open_wait;
void *private_data;
void (*private_free) (struct snd_pcm *pcm);
- struct device *dev; /* actual hw device this belongs to */
bool internal; /* pcm is for internal use only */
bool nonatomic; /* whole PCM operations are in non-atomic context */
#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
@@ -520,7 +588,6 @@ void snd_pcm_release_substream(struct snd_pcm_substream *substream);
int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream, struct file *file,
struct snd_pcm_substream **rsubstream);
void snd_pcm_detach_substream(struct snd_pcm_substream *substream);
-void snd_pcm_vma_notify_data(void *client, void *data);
int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area);
@@ -910,6 +977,8 @@ void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k,
const struct snd_interval *b, struct snd_interval *c);
int snd_interval_list(struct snd_interval *i, unsigned int count,
const unsigned int *list, unsigned int mask);
+int snd_interval_ranges(struct snd_interval *i, unsigned int count,
+ const struct snd_interval *list, unsigned int mask);
int snd_interval_ratnum(struct snd_interval *i,
unsigned int rats_count, struct snd_ratnum *rats,
unsigned int *nump, unsigned int *denp);
@@ -934,6 +1003,10 @@ int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
unsigned int cond,
snd_pcm_hw_param_t var,
const struct snd_pcm_hw_constraint_list *l);
+int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime,
+ unsigned int cond,
+ snd_pcm_hw_param_t var,
+ const struct snd_pcm_hw_constraint_ranges *r);
int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime,
unsigned int cond,
snd_pcm_hw_param_t var,
@@ -986,21 +1059,15 @@ int snd_pcm_format_physical_width(snd_pcm_format_t format); /* in bits */
ssize_t snd_pcm_format_size(snd_pcm_format_t format, size_t samples);
const unsigned char *snd_pcm_format_silence_64(snd_pcm_format_t format);
int snd_pcm_format_set_silence(snd_pcm_format_t format, void *buf, unsigned int frames);
-snd_pcm_format_t snd_pcm_build_linear_format(int width, int unsigned, int big_endian);
void snd_pcm_set_ops(struct snd_pcm * pcm, int direction,
const struct snd_pcm_ops *ops);
void snd_pcm_set_sync(struct snd_pcm_substream *substream);
-int snd_pcm_lib_interleave_len(struct snd_pcm_substream *substream);
int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg);
int snd_pcm_update_state(struct snd_pcm_substream *substream,
struct snd_pcm_runtime *runtime);
int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream);
-int snd_pcm_playback_xrun_check(struct snd_pcm_substream *substream);
-int snd_pcm_capture_xrun_check(struct snd_pcm_substream *substream);
-int snd_pcm_playback_xrun_asap(struct snd_pcm_substream *substream);
-int snd_pcm_capture_xrun_asap(struct snd_pcm_substream *substream);
void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr);
void snd_pcm_period_elapsed(struct snd_pcm_substream *substream);
snd_pcm_sframes_t snd_pcm_lib_write(struct snd_pcm_substream *substream,
diff --git a/include/sound/pcm_drm_eld.h b/include/sound/pcm_drm_eld.h
new file mode 100644
index 0000000..93357b2
--- /dev/null
+++ b/include/sound/pcm_drm_eld.h
@@ -0,0 +1,6 @@
+#ifndef __SOUND_PCM_DRM_ELD_H
+#define __SOUND_PCM_DRM_ELD_H
+
+int snd_pcm_hw_constraint_eld(struct snd_pcm_runtime *runtime, void *eld);
+
+#endif
diff --git a/include/sound/pcm_iec958.h b/include/sound/pcm_iec958.h
new file mode 100644
index 0000000..0eed397
--- /dev/null
+++ b/include/sound/pcm_iec958.h
@@ -0,0 +1,9 @@
+#ifndef __SOUND_PCM_IEC958_H
+#define __SOUND_PCM_IEC958_H
+
+#include <linux/types.h>
+
+int snd_pcm_create_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs,
+ size_t len);
+
+#endif
diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h
index 6b1c78f..c704357 100644
--- a/include/sound/pcm_params.h
+++ b/include/sound/pcm_params.h
@@ -38,31 +38,6 @@ int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
#define MASK_OFS(i) ((i) >> 5)
#define MASK_BIT(i) (1U << ((i) & 31))
-static inline unsigned int ld2(u_int32_t v)
-{
- unsigned r = 0;
-
- if (v >= 0x10000) {
- v >>= 16;
- r += 16;
- }
- if (v >= 0x100) {
- v >>= 8;
- r += 8;
- }
- if (v >= 0x10) {
- v >>= 4;
- r += 4;
- }
- if (v >= 4) {
- v >>= 2;
- r += 2;
- }
- if (v >= 2)
- r++;
- return r;
-}
-
static inline size_t snd_mask_sizeof(void)
{
return sizeof(struct snd_mask);
@@ -92,7 +67,7 @@ static inline unsigned int snd_mask_min(const struct snd_mask *mask)
int i;
for (i = 0; i < SNDRV_MASK_SIZE; i++) {
if (mask->bits[i])
- return ffs(mask->bits[i]) - 1 + (i << 5);
+ return __ffs(mask->bits[i]) + (i << 5);
}
return 0;
}
@@ -102,7 +77,7 @@ static inline unsigned int snd_mask_max(const struct snd_mask *mask)
int i;
for (i = SNDRV_MASK_SIZE - 1; i >= 0; i--) {
if (mask->bits[i])
- return ld2(mask->bits[i]) + (i << 5);
+ return __fls(mask->bits[i]) + (i << 5);
}
return 0;
}
@@ -325,45 +300,77 @@ static inline int snd_interval_eq(const struct snd_interval *i1, const struct sn
i1->max == i2->max && i1->openmax == i2->openmax;
}
-static inline unsigned int add(unsigned int a, unsigned int b)
+/**
+ * params_access - get the access type from the hw params
+ * @p: hw params
+ */
+static inline snd_pcm_access_t params_access(const struct snd_pcm_hw_params *p)
{
- if (a >= UINT_MAX - b)
- return UINT_MAX;
- return a + b;
+ return (__force snd_pcm_access_t)snd_mask_min(hw_param_mask_c(p,
+ SNDRV_PCM_HW_PARAM_ACCESS));
}
-static inline unsigned int sub(unsigned int a, unsigned int b)
+/**
+ * params_format - get the sample format from the hw params
+ * @p: hw params
+ */
+static inline snd_pcm_format_t params_format(const struct snd_pcm_hw_params *p)
{
- if (a > b)
- return a - b;
- return 0;
+ return (__force snd_pcm_format_t)snd_mask_min(hw_param_mask_c(p,
+ SNDRV_PCM_HW_PARAM_FORMAT));
}
-#define params_access(p) ((__force snd_pcm_access_t)\
- snd_mask_min(hw_param_mask_c((p), SNDRV_PCM_HW_PARAM_ACCESS)))
-#define params_format(p) ((__force snd_pcm_format_t)\
- snd_mask_min(hw_param_mask_c((p), SNDRV_PCM_HW_PARAM_FORMAT)))
-#define params_subformat(p) \
- snd_mask_min(hw_param_mask_c((p), SNDRV_PCM_HW_PARAM_SUBFORMAT))
+/**
+ * params_subformat - get the sample subformat from the hw params
+ * @p: hw params
+ */
+static inline snd_pcm_subformat_t
+params_subformat(const struct snd_pcm_hw_params *p)
+{
+ return (__force snd_pcm_subformat_t)snd_mask_min(hw_param_mask_c(p,
+ SNDRV_PCM_HW_PARAM_SUBFORMAT));
+}
+/**
+ * params_period_bytes - get the period size (in bytes) from the hw params
+ * @p: hw params
+ */
static inline unsigned int
params_period_bytes(const struct snd_pcm_hw_params *p)
{
- return (params_period_size(p) *
- snd_pcm_format_physical_width(params_format(p)) *
- params_channels(p)) / 8;
+ return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_PERIOD_BYTES)->min;
}
-static inline int
-params_width(const struct snd_pcm_hw_params *p)
+/**
+ * params_width - get the number of bits of the sample format from the hw params
+ * @p: hw params
+ *
+ * This function returns the number of bits per sample that the selected sample
+ * format of the hw params has.
+ */
+static inline int params_width(const struct snd_pcm_hw_params *p)
{
return snd_pcm_format_width(params_format(p));
}
-static inline int
-params_physical_width(const struct snd_pcm_hw_params *p)
+/*
+ * params_physical_width - get the storage size of the sample format from the hw params
+ * @p: hw params
+ *
+ * This functions returns the number of bits per sample that the selected sample
+ * format of the hw params takes up in memory. This will be equal or larger than
+ * params_width().
+ */
+static inline int params_physical_width(const struct snd_pcm_hw_params *p)
{
return snd_pcm_format_physical_width(params_format(p));
}
+static inline void
+params_set_format(struct snd_pcm_hw_params *p, snd_pcm_format_t fmt)
+{
+ snd_mask_set(hw_param_mask(p, SNDRV_PCM_HW_PARAM_FORMAT),
+ (__force int)fmt);
+}
+
#endif /* __SOUND_PCM_PARAMS_H */
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index 311dafe..f6cbef7 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -28,6 +28,7 @@
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
+#include <linux/device.h>
#if defined(CONFIG_SND_SEQUENCER) || defined(CONFIG_SND_SEQUENCER_MODULE)
#include <sound/seq_device.h>
@@ -139,7 +140,8 @@ struct snd_rawmidi {
struct mutex open_mutex;
wait_queue_head_t open_wait;
- struct snd_info_entry *dev;
+ struct device dev;
+
struct snd_info_entry *proc_entry;
#if defined(CONFIG_SND_SEQUENCER) || defined(CONFIG_SND_SEQUENCER_MODULE)
diff --git a/include/sound/rcar_snd.h b/include/sound/rcar_snd.h
index 83284ca..4cecd0c 100644
--- a/include/sound/rcar_snd.h
+++ b/include/sound/rcar_snd.h
@@ -55,6 +55,7 @@ struct rsnd_ssi_platform_info {
struct rsnd_src_platform_info {
u32 convert_rate; /* sampling rate convert */
int dma_id; /* for Gen2 SCU */
+ int irq;
};
/*
diff --git a/include/sound/rt5645.h b/include/sound/rt5645.h
index 120d961..22734bc 100644
--- a/include/sound/rt5645.h
+++ b/include/sound/rt5645.h
@@ -15,17 +15,11 @@ struct rt5645_platform_data {
/* IN2 can optionally be differential */
bool in2_diff;
- bool dmic_en;
unsigned int dmic1_data_pin;
/* 0 = IN2N; 1 = GPIO5; 2 = GPIO11 */
unsigned int dmic2_data_pin;
/* 0 = IN2P; 1 = GPIO6; 2 = GPIO10; 3 = GPIO12 */
- unsigned int hp_det_gpio;
- bool gpio_hp_det_active_high;
-
- /* true if codec's jd function is used */
- bool en_jd_func;
unsigned int jd_mode;
};
diff --git a/include/sound/rt5670.h b/include/sound/rt5670.h
index bd31119..b7d6051 100644
--- a/include/sound/rt5670.h
+++ b/include/sound/rt5670.h
@@ -14,6 +14,7 @@
struct rt5670_platform_data {
int jd_mode;
bool in2_diff;
+ bool dev_gpio;
bool dmic_en;
unsigned int dmic1_data_pin;
diff --git a/include/sound/rt5677.h b/include/sound/rt5677.h
index d9eb7d8..a620704 100644
--- a/include/sound/rt5677.h
+++ b/include/sound/rt5677.h
@@ -37,6 +37,9 @@ struct rt5677_platform_data {
OFF, GPIO4, GPIO5 and GPIO6 respectively */
unsigned int jd2_gpio;
unsigned int jd3_gpio;
+
+ /* Set MICBIAS1 VDD 1v8 or 3v3 */
+ bool micbias1_vdd_3v3;
};
#endif
diff --git a/include/sound/sb.h b/include/sound/sb.h
index ba39603..bacefae 100644
--- a/include/sound/sb.h
+++ b/include/sound/sb.h
@@ -25,7 +25,7 @@
#include <sound/pcm.h>
#include <sound/rawmidi.h>
#include <linux/interrupt.h>
-#include <asm/io.h>
+#include <linux/io.h>
enum sb_hw_type {
SB_HW_AUTO,
@@ -308,7 +308,7 @@ void snd_sbmixer_resume(struct snd_sb *chip);
#endif
/* sb8_init.c */
-int snd_sb8dsp_pcm(struct snd_sb *chip, int device, struct snd_pcm ** rpcm);
+int snd_sb8dsp_pcm(struct snd_sb *chip, int device);
/* sb8.c */
irqreturn_t snd_sb8dsp_interrupt(struct snd_sb *chip);
int snd_sb8_playback_open(struct snd_pcm_substream *substream);
@@ -317,10 +317,10 @@ int snd_sb8_playback_close(struct snd_pcm_substream *substream);
int snd_sb8_capture_close(struct snd_pcm_substream *substream);
/* midi8.c */
irqreturn_t snd_sb8dsp_midi_interrupt(struct snd_sb *chip);
-int snd_sb8dsp_midi(struct snd_sb *chip, int device, struct snd_rawmidi ** rrawmidi);
+int snd_sb8dsp_midi(struct snd_sb *chip, int device);
/* sb16_init.c */
-int snd_sb16dsp_pcm(struct snd_sb *chip, int device, struct snd_pcm ** rpcm);
+int snd_sb16dsp_pcm(struct snd_sb *chip, int device);
const struct snd_pcm_ops *snd_sb16dsp_get_pcm_ops(int direction);
int snd_sb16dsp_configure(struct snd_sb *chip);
/* sb16.c */
diff --git a/include/sound/seq_device.h b/include/sound/seq_device.h
index 2b5f24c..ddc0d50 100644
--- a/include/sound/seq_device.h
+++ b/include/sound/seq_device.h
@@ -25,29 +25,26 @@
* registered device information
*/
-#define ID_LEN 32
-
-/* status flag */
-#define SNDRV_SEQ_DEVICE_FREE 0
-#define SNDRV_SEQ_DEVICE_REGISTERED 1
-
struct snd_seq_device {
/* device info */
struct snd_card *card; /* sound card */
int device; /* device number */
- char id[ID_LEN]; /* driver id */
+ const char *id; /* driver id */
char name[80]; /* device name */
int argsize; /* size of the argument */
void *driver_data; /* private data for driver */
- int status; /* flag - read only */
void *private_data; /* private data for the caller */
void (*private_free)(struct snd_seq_device *device);
- struct list_head list; /* link to next device */
+ struct device dev;
};
+#define to_seq_dev(_dev) \
+ container_of(_dev, struct snd_seq_device, dev)
+
+/* sequencer driver */
/* driver operators
- * init_device:
+ * probe:
* Initialize the device with given parameters.
* Typically,
* 1. call snd_hwdep_new
@@ -55,25 +52,40 @@ struct snd_seq_device {
* 3. call snd_hwdep_register
* 4. store the instance to dev->driver_data pointer.
*
- * free_device:
+ * remove:
* Release the private data.
* Typically, call snd_device_free(dev->card, dev->driver_data)
*/
-struct snd_seq_dev_ops {
- int (*init_device)(struct snd_seq_device *dev);
- int (*free_device)(struct snd_seq_device *dev);
+struct snd_seq_driver {
+ struct device_driver driver;
+ char *id;
+ int argsize;
};
+#define to_seq_drv(_drv) \
+ container_of(_drv, struct snd_seq_driver, driver)
+
/*
* prototypes
*/
+#ifdef CONFIG_MODULES
void snd_seq_device_load_drivers(void);
-int snd_seq_device_new(struct snd_card *card, int device, char *id, int argsize, struct snd_seq_device **result);
-int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry, int argsize);
-int snd_seq_device_unregister_driver(char *id);
+#else
+#define snd_seq_device_load_drivers()
+#endif
+int snd_seq_device_new(struct snd_card *card, int device, const char *id,
+ int argsize, struct snd_seq_device **result);
#define SNDRV_SEQ_DEVICE_ARGPTR(dev) (void *)((char *)(dev) + sizeof(struct snd_seq_device))
+int __must_check __snd_seq_driver_register(struct snd_seq_driver *drv,
+ struct module *mod);
+#define snd_seq_driver_register(drv) \
+ __snd_seq_driver_register(drv, THIS_MODULE)
+void snd_seq_driver_unregister(struct snd_seq_driver *drv);
+
+#define module_snd_seq_driver(drv) \
+ module_driver(drv, snd_seq_driver_register, snd_seq_driver_unregister)
/*
* id strings for generic devices
diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h
index eea5400..feb58d4 100644
--- a/include/sound/seq_kernel.h
+++ b/include/sound/seq_kernel.h
@@ -27,11 +27,8 @@
typedef struct snd_seq_real_time snd_seq_real_time_t;
typedef union snd_seq_timestamp snd_seq_timestamp_t;
-/* maximum number of events dequeued per schedule interval */
-#define SNDRV_SEQ_MAX_DEQUEUE 50
-
/* maximum number of queues */
-#define SNDRV_SEQ_MAX_QUEUES 8
+#define SNDRV_SEQ_MAX_QUEUES 32
/* max number of concurrent clients */
#define SNDRV_SEQ_MAX_CLIENTS 192
@@ -42,9 +39,6 @@ typedef union snd_seq_timestamp snd_seq_timestamp_t;
/* max number of events in memory pool */
#define SNDRV_SEQ_MAX_EVENTS 2000
-/* default number of events in memory chunk */
-#define SNDRV_SEQ_DEFAULT_CHUNK_EVENTS 64
-
/* default number of events in memory pool */
#define SNDRV_SEQ_DEFAULT_EVENTS 500
@@ -70,7 +64,6 @@ struct snd_seq_port_callback {
int (*unuse)(void *private_data, struct snd_seq_port_subscribe *info);
int (*event_input)(struct snd_seq_event *ev, int direct, void *private_data, int atomic, int hop);
void (*private_free)(void *private_data);
- unsigned int callback_all; /* call subscribe callbacks at each connection/disconnection */
/*...*/
};
@@ -106,13 +99,9 @@ int snd_seq_event_port_attach(int client, struct snd_seq_port_callback *pcbp,
int snd_seq_event_port_detach(int client, int port);
#ifdef CONFIG_MODULES
-void snd_seq_autoload_lock(void);
-void snd_seq_autoload_unlock(void);
void snd_seq_autoload_init(void);
-#define snd_seq_autoload_exit() snd_seq_autoload_lock()
+void snd_seq_autoload_exit(void);
#else
-#define snd_seq_autoload_lock()
-#define snd_seq_autoload_unlock()
#define snd_seq_autoload_init()
#define snd_seq_autoload_exit()
#endif
diff --git a/include/sound/simple_card.h b/include/sound/simple_card.h
index 9b0ac77..b9b4f28 100644
--- a/include/sound/simple_card.h
+++ b/include/sound/simple_card.h
@@ -16,10 +16,10 @@
struct asoc_simple_dai {
const char *name;
- unsigned int fmt;
unsigned int sysclk;
int slots;
int slot_width;
+ struct clk *clk;
};
struct asoc_simple_card_info {
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 89823cf..37d95a8 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -15,6 +15,8 @@
#include <linux/types.h>
#include <sound/control.h>
+#include <sound/soc-topology.h>
+#include <sound/asoc.h>
struct device;
@@ -107,6 +109,10 @@ struct device;
{ .id = snd_soc_dapm_mux, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = 1}
+#define SND_SOC_DAPM_DEMUX(wname, wreg, wshift, winvert, wcontrols) \
+{ .id = snd_soc_dapm_demux, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = wcontrols, .num_kcontrols = 1}
/* Simplified versions of above macros, assuming wncontrols = ARRAY_SIZE(wcontrols) */
#define SOC_PGA_ARRAY(wname, wreg, wshift, winvert,\
@@ -287,7 +293,7 @@ struct device;
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\
.tlv.p = (tlv_array), \
.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
- .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
+ .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 1) }
#define SOC_DAPM_SINGLE_TLV_VIRT(xname, max, tlv_array) \
SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0, tlv_array)
#define SOC_DAPM_ENUM(xname, xenum) \
@@ -378,6 +384,7 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card);
void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
const struct snd_soc_pcm_stream *params,
+ unsigned int num_params,
struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink);
@@ -405,7 +412,7 @@ int snd_soc_dapm_mux_update_power(struct snd_soc_dapm_context *dapm,
struct snd_soc_dapm_update *update);
/* dapm sys fs - used by the core */
-int snd_soc_dapm_sys_add(struct device *dev);
+extern struct attribute *soc_dapm_dev_attrs[];
void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm,
struct dentry *parent);
@@ -431,7 +438,6 @@ int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm,
const char *pin);
int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm,
const char *pin);
-void snd_soc_dapm_auto_nc_pins(struct snd_soc_card *card);
unsigned int dapm_kcontrol_get_value(const struct snd_kcontrol *kcontrol);
/* Mostly internal - should not normally be used */
@@ -441,15 +447,18 @@ void dapm_mark_endpoints_dirty(struct snd_soc_card *card);
int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
struct snd_soc_dapm_widget_list **list);
-struct snd_soc_codec *snd_soc_dapm_kcontrol_codec(struct snd_kcontrol *kcontrol);
struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_dapm(
struct snd_kcontrol *kcontrol);
+int snd_soc_dapm_force_bias_level(struct snd_soc_dapm_context *dapm,
+ enum snd_soc_bias_level level);
+
/* dapm widget types */
enum snd_soc_dapm_type {
snd_soc_dapm_input = 0, /* input pin */
snd_soc_dapm_output, /* output pin */
snd_soc_dapm_mux, /* selects 1 analog signal from many inputs */
+ snd_soc_dapm_demux, /* connects the input to one of multiple outputs */
snd_soc_dapm_mixer, /* mixes several analog signals together */
snd_soc_dapm_mixer_named_ctl, /* mixer with named controls */
snd_soc_dapm_pga, /* programmable gain/attenuation (volume) */
@@ -526,13 +535,14 @@ struct snd_soc_dapm_widget {
enum snd_soc_dapm_type id;
const char *name; /* widget name */
const char *sname; /* stream name */
- struct snd_soc_codec *codec;
struct list_head list;
struct snd_soc_dapm_context *dapm;
void *priv; /* widget specific data */
struct regulator *regulator; /* attached regulator */
const struct snd_soc_pcm_stream *params; /* params for dai links */
+ unsigned int num_params; /* number of params for dai links */
+ unsigned int params_select; /* currently selected param for dai link */
/* dapm control */
int reg; /* negative reg = no direct dapm */
@@ -563,6 +573,7 @@ struct snd_soc_dapm_widget {
int num_kcontrols;
const struct snd_kcontrol_new *kcontrol_news;
struct snd_kcontrol **kcontrols;
+ struct snd_soc_dobj dobj;
/* widget input and outputs */
struct list_head sources;
@@ -585,11 +596,13 @@ struct snd_soc_dapm_update {
int val;
};
+struct snd_soc_dapm_wcache {
+ struct snd_soc_dapm_widget *widget;
+};
+
/* DAPM context */
struct snd_soc_dapm_context {
enum snd_soc_bias_level bias_level;
- enum snd_soc_bias_level suspend_bias_level;
- struct delayed_work delayed_work;
unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */
/* Go to BIAS_OFF in suspend if the DAPM context is idle */
unsigned int suspend_bias_off:1;
@@ -608,6 +621,9 @@ struct snd_soc_dapm_context {
int (*set_bias_level)(struct snd_soc_dapm_context *dapm,
enum snd_soc_bias_level level);
+ struct snd_soc_dapm_wcache path_sink_cache;
+ struct snd_soc_dapm_wcache path_source_cache;
+
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_dapm;
#endif
@@ -625,4 +641,35 @@ struct snd_soc_dapm_stats {
int neighbour_checks;
};
+/**
+ * snd_soc_dapm_init_bias_level() - Initialize DAPM bias level
+ * @dapm: The DAPM context to initialize
+ * @level: The DAPM level to initialize to
+ *
+ * This function only sets the driver internal state of the DAPM level and will
+ * not modify the state of the device. Hence it should not be used during normal
+ * operation, but only to synchronize the internal state to the device state.
+ * E.g. during driver probe to set the DAPM level to the one corresponding with
+ * the power-on reset state of the device.
+ *
+ * To change the DAPM state of the device use snd_soc_dapm_set_bias_level().
+ */
+static inline void snd_soc_dapm_init_bias_level(
+ struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level)
+{
+ dapm->bias_level = level;
+}
+
+/**
+ * snd_soc_dapm_get_bias_level() - Get current DAPM bias level
+ * @dapm: The context for which to get the bias level
+ *
+ * Returns: The current bias level of the passed DAPM context.
+ */
+static inline enum snd_soc_bias_level snd_soc_dapm_get_bias_level(
+ struct snd_soc_dapm_context *dapm)
+{
+ return dapm->bias_level;
+}
+
#endif
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index 98f2ade..8060590 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -135,7 +135,7 @@ void snd_soc_dpcm_be_set_state(struct snd_soc_pcm_runtime *be, int stream,
/* internal use only */
int soc_dpcm_be_digital_mute(struct snd_soc_pcm_runtime *fe, int mute);
-int soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd);
+void soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd);
int soc_dpcm_runtime_update(struct snd_soc_card *);
int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
new file mode 100644
index 0000000..865a141
--- /dev/null
+++ b/include/sound/soc-topology.h
@@ -0,0 +1,168 @@
+/*
+ * linux/sound/soc-topology.h -- ALSA SoC Firmware Controls and DAPM
+ *
+ * Copyright (C) 2012 Texas Instruments Inc.
+ * Copyright (C) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Simple file API to load FW that includes mixers, coefficients, DAPM graphs,
+ * algorithms, equalisers, DAIs, widgets, FE caps, BE caps, codec link caps etc.
+ */
+
+#ifndef __LINUX_SND_SOC_TPLG_H
+#define __LINUX_SND_SOC_TPLG_H
+
+#include <sound/asoc.h>
+#include <linux/list.h>
+
+struct firmware;
+struct snd_kcontrol;
+struct snd_soc_tplg_pcm_be;
+struct snd_ctl_elem_value;
+struct snd_ctl_elem_info;
+struct snd_soc_dapm_widget;
+struct snd_soc_component;
+struct snd_soc_tplg_pcm_fe;
+struct snd_soc_dapm_context;
+struct snd_soc_card;
+
+/* object scan be loaded and unloaded in groups with identfying indexes */
+#define SND_SOC_TPLG_INDEX_ALL 0 /* ID that matches all FW objects */
+
+/* dynamic object type */
+enum snd_soc_dobj_type {
+ SND_SOC_DOBJ_NONE = 0, /* object is not dynamic */
+ SND_SOC_DOBJ_MIXER,
+ SND_SOC_DOBJ_ENUM,
+ SND_SOC_DOBJ_BYTES,
+ SND_SOC_DOBJ_PCM,
+ SND_SOC_DOBJ_DAI_LINK,
+ SND_SOC_DOBJ_CODEC_LINK,
+ SND_SOC_DOBJ_WIDGET,
+};
+
+/* dynamic control object */
+struct snd_soc_dobj_control {
+ struct snd_kcontrol *kcontrol;
+ char **dtexts;
+ unsigned long *dvalues;
+};
+
+/* dynamic widget object */
+struct snd_soc_dobj_widget {
+ unsigned int kcontrol_enum:1; /* this widget is an enum kcontrol */
+};
+
+/* dynamic PCM DAI object */
+struct snd_soc_dobj_pcm_dai {
+ struct snd_soc_tplg_pcm_dai *pd;
+ unsigned int count;
+};
+
+/* generic dynamic object - all dynamic objects belong to this struct */
+struct snd_soc_dobj {
+ enum snd_soc_dobj_type type;
+ unsigned int index; /* objects can belong in different groups */
+ struct list_head list;
+ struct snd_soc_tplg_ops *ops;
+ union {
+ struct snd_soc_dobj_control control;
+ struct snd_soc_dobj_widget widget;
+ struct snd_soc_dobj_pcm_dai pcm_dai;
+ };
+ void *private; /* core does not touch this */
+};
+
+/*
+ * Kcontrol operations - used to map handlers onto firmware based controls.
+ */
+struct snd_soc_tplg_kcontrol_ops {
+ u32 id;
+ int (*get)(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+ int (*put)(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+ int (*info)(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo);
+};
+
+/*
+ * DAPM widget event handlers - used to map handlers onto widgets.
+ */
+struct snd_soc_tplg_widget_events {
+ u16 type;
+ int (*event_handler)(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event);
+};
+
+/*
+ * Public API - Used by component drivers to load and unload dynamic objects
+ * and their resources.
+ */
+struct snd_soc_tplg_ops {
+
+ /* external kcontrol init - used for any driver specific init */
+ int (*control_load)(struct snd_soc_component *,
+ struct snd_kcontrol_new *, struct snd_soc_tplg_ctl_hdr *);
+ int (*control_unload)(struct snd_soc_component *,
+ struct snd_soc_dobj *);
+
+ /* external widget init - used for any driver specific init */
+ int (*widget_load)(struct snd_soc_component *,
+ struct snd_soc_dapm_widget *,
+ struct snd_soc_tplg_dapm_widget *);
+ int (*widget_unload)(struct snd_soc_component *,
+ struct snd_soc_dobj *);
+
+ /* FE - used for any driver specific init */
+ int (*pcm_dai_load)(struct snd_soc_component *,
+ struct snd_soc_tplg_pcm_dai *pcm_dai, int num_fe);
+ int (*pcm_dai_unload)(struct snd_soc_component *,
+ struct snd_soc_dobj *);
+
+ /* callback to handle vendor bespoke data */
+ int (*vendor_load)(struct snd_soc_component *,
+ struct snd_soc_tplg_hdr *);
+ int (*vendor_unload)(struct snd_soc_component *,
+ struct snd_soc_tplg_hdr *);
+
+ /* completion - called at completion of firmware loading */
+ void (*complete)(struct snd_soc_component *);
+
+ /* manifest - optional to inform component of manifest */
+ int (*manifest)(struct snd_soc_component *,
+ struct snd_soc_tplg_manifest *);
+
+ /* bespoke kcontrol handlers available for binding */
+ const struct snd_soc_tplg_kcontrol_ops *io_ops;
+ int io_ops_count;
+};
+
+/* gets a pointer to data from the firmware block header */
+static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr)
+{
+ const void *ptr = hdr;
+
+ return ptr + sizeof(*hdr);
+}
+
+/* Dynamic Object loading and removal for component drivers */
+int snd_soc_tplg_component_load(struct snd_soc_component *comp,
+ struct snd_soc_tplg_ops *ops, const struct firmware *fw,
+ u32 index);
+int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index);
+
+/* Widget removal - widgets also removed wth component API */
+void snd_soc_tplg_widget_remove(struct snd_soc_dapm_widget *w);
+void snd_soc_tplg_widget_remove_all(struct snd_soc_dapm_context *dapm,
+ u32 index);
+
+/* Binds event handlers to dynamic widgets */
+int snd_soc_tplg_widget_bind_event(struct snd_soc_dapm_widget *w,
+ const struct snd_soc_tplg_widget_events *events, int num_events,
+ u16 event_type);
+
+#endif
diff --git a/include/sound/soc.h b/include/sound/soc.h
index b4fca9a..93df8bf 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -27,6 +27,7 @@
#include <sound/compress_driver.h>
#include <sound/control.h>
#include <sound/ac97_codec.h>
+#include <sound/soc-topology.h>
/*
* Convenience kcontrol builders
@@ -190,8 +191,12 @@
#define SOC_VALUE_ENUM_DOUBLE(xreg, xshift_l, xshift_r, xmask, xitems, xtexts, xvalues) \
{ .reg = xreg, .shift_l = xshift_l, .shift_r = xshift_r, \
.mask = xmask, .items = xitems, .texts = xtexts, .values = xvalues}
-#define SOC_VALUE_ENUM_SINGLE(xreg, xshift, xmask, xnitmes, xtexts, xvalues) \
- SOC_VALUE_ENUM_DOUBLE(xreg, xshift, xshift, xmask, xnitmes, xtexts, xvalues)
+#define SOC_VALUE_ENUM_SINGLE(xreg, xshift, xmask, xitems, xtexts, xvalues) \
+ SOC_VALUE_ENUM_DOUBLE(xreg, xshift, xshift, xmask, xitems, xtexts, xvalues)
+#define SOC_VALUE_ENUM_SINGLE_AUTODISABLE(xreg, xshift, xmask, xitems, xtexts, xvalues) \
+{ .reg = xreg, .shift_l = xshift, .shift_r = xshift, \
+ .mask = xmask, .items = xitems, .texts = xtexts, \
+ .values = xvalues, .autodisable = 1}
#define SOC_ENUM_SINGLE_VIRT(xitems, xtexts) \
SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, xitems, xtexts)
#define SOC_ENUM(xname, xenum) \
@@ -312,6 +317,11 @@
ARRAY_SIZE(xtexts), xtexts, xvalues)
#define SOC_VALUE_ENUM_SINGLE_DECL(name, xreg, xshift, xmask, xtexts, xvalues) \
SOC_VALUE_ENUM_DOUBLE_DECL(name, xreg, xshift, xshift, xmask, xtexts, xvalues)
+
+#define SOC_VALUE_ENUM_SINGLE_AUTODISABLE_DECL(name, xreg, xshift, xmask, xtexts, xvalues) \
+ const struct soc_enum name = SOC_VALUE_ENUM_SINGLE_AUTODISABLE(xreg, \
+ xshift, xmask, ARRAY_SIZE(xtexts), xtexts, xvalues)
+
#define SOC_ENUM_SINGLE_VIRT_DECL(name, xtexts) \
const struct soc_enum name = SOC_ENUM_SINGLE_VIRT(ARRAY_SIZE(xtexts), xtexts)
@@ -387,8 +397,20 @@ int snd_soc_codec_set_pll(struct snd_soc_codec *codec, int pll_id, int source,
int snd_soc_register_card(struct snd_soc_card *card);
int snd_soc_unregister_card(struct snd_soc_card *card);
int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card);
+#ifdef CONFIG_PM_SLEEP
int snd_soc_suspend(struct device *dev);
int snd_soc_resume(struct device *dev);
+#else
+static inline int snd_soc_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static inline int snd_soc_resume(struct device *dev)
+{
+ return 0;
+}
+#endif
int snd_soc_poweroff(struct device *dev);
int snd_soc_register_platform(struct device *dev,
const struct snd_soc_platform_driver *platform_drv);
@@ -429,6 +451,9 @@ bool snd_soc_runtime_ignore_pmdown_time(struct snd_soc_pcm_runtime *rtd);
void snd_soc_runtime_activate(struct snd_soc_pcm_runtime *rtd, int stream);
void snd_soc_runtime_deactivate(struct snd_soc_pcm_runtime *rtd, int stream);
+int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
+ unsigned int dai_fmt);
+
/* Utility functions to get clock rates from various things */
int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots);
int snd_soc_params_to_frame_size(struct snd_pcm_hw_params *params);
@@ -447,8 +472,10 @@ int soc_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai);
/* Jack reporting */
-int snd_soc_jack_new(struct snd_soc_codec *codec, const char *id, int type,
- struct snd_soc_jack *jack);
+int snd_soc_card_jack_new(struct snd_soc_card *card, const char *id, int type,
+ struct snd_soc_jack *jack, struct snd_soc_jack_pin *pins,
+ unsigned int num_pins);
+
void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask);
int snd_soc_jack_add_pins(struct snd_soc_jack *jack, int count,
struct snd_soc_jack_pin *pins);
@@ -498,6 +525,7 @@ int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned int reg,
unsigned int mask, unsigned int value);
#ifdef CONFIG_SND_SOC_AC97_BUS
+struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec);
struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec);
void snd_soc_free_ac97_codec(struct snd_ac97 *ac97);
@@ -655,7 +683,7 @@ struct snd_soc_jack_gpio {
struct snd_soc_jack {
struct mutex mutex;
struct snd_jack *jack;
- struct snd_soc_codec *codec;
+ struct snd_soc_card *card;
struct list_head pins;
int status;
struct blocking_notifier_head notifier;
@@ -749,6 +777,9 @@ struct snd_soc_component {
struct mutex io_mutex;
+ /* attached dynamic objects */
+ struct list_head dobj_list;
+
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_root;
#endif
@@ -801,7 +832,7 @@ struct snd_soc_codec {
/* component */
struct snd_soc_component component;
- /* dapm */
+ /* Don't access this directly, use snd_soc_codec_get_dapm() */
struct snd_soc_dapm_context dapm;
#ifdef CONFIG_DEBUG_FS
@@ -937,11 +968,30 @@ struct snd_soc_dai_link {
int be_id; /* optional ID for machine driver BE identification */
const struct snd_soc_pcm_stream *params;
+ unsigned int num_params;
unsigned int dai_fmt; /* format to set on init */
enum snd_soc_dpcm_trigger trigger[2]; /* trigger type for DPCM */
+ /* codec/machine specific init - e.g. add machine controls */
+ int (*init)(struct snd_soc_pcm_runtime *rtd);
+
+ /* optional hw_params re-writing for BE and FE sync */
+ int (*be_hw_params_fixup)(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params);
+
+ /* machine stream operations */
+ const struct snd_soc_ops *ops;
+ const struct snd_soc_compr_ops *compr_ops;
+
+ /* For unidirectional dai links */
+ bool playback_only;
+ bool capture_only;
+
+ /* Mark this pcm with non atomic ops */
+ bool nonatomic;
+
/* Keep DAI active over suspend */
unsigned int ignore_suspend:1;
@@ -960,23 +1010,11 @@ struct snd_soc_dai_link {
unsigned int dpcm_capture:1;
unsigned int dpcm_playback:1;
+ /* DPCM used FE & BE merged format */
+ unsigned int dpcm_merged_format:1;
+
/* pmdown_time is ignored at stop */
unsigned int ignore_pmdown_time:1;
-
- /* codec/machine specific init - e.g. add machine controls */
- int (*init)(struct snd_soc_pcm_runtime *rtd);
-
- /* optional hw_params re-writing for BE and FE sync */
- int (*be_hw_params_fixup)(struct snd_soc_pcm_runtime *rtd,
- struct snd_pcm_hw_params *params);
-
- /* machine stream operations */
- const struct snd_soc_ops *ops;
- const struct snd_soc_compr_ops *compr_ops;
-
- /* For unidirectional dai links */
- bool playback_only;
- bool capture_only;
};
struct snd_soc_codec_conf {
@@ -1067,11 +1105,16 @@ struct snd_soc_card {
/*
* Card-specific routes and widgets.
+ * Note: of_dapm_xxx for Device Tree; Otherwise for driver build-in.
*/
const struct snd_soc_dapm_widget *dapm_widgets;
int num_dapm_widgets;
const struct snd_soc_dapm_route *dapm_routes;
int num_dapm_routes;
+ const struct snd_soc_dapm_widget *of_dapm_widgets;
+ int num_of_dapm_widgets;
+ const struct snd_soc_dapm_route *of_dapm_routes;
+ int num_of_dapm_routes;
bool fully_routed;
struct work_struct deferred_resume_work;
@@ -1084,6 +1127,9 @@ struct snd_soc_card {
struct list_head dapm_list;
struct list_head dapm_dirty;
+ /* attached dynamic objects */
+ struct list_head dobj_list;
+
/* Generic DAPM context for the card */
struct snd_soc_dapm_context dapm;
struct snd_soc_dapm_stats dapm_stats;
@@ -1143,6 +1189,7 @@ struct soc_mixer_control {
unsigned int sign_bit;
unsigned int invert:1;
unsigned int autodisable:1;
+ struct snd_soc_dobj dobj;
};
struct soc_bytes {
@@ -1153,6 +1200,8 @@ struct soc_bytes {
struct soc_bytes_ext {
int max;
+ struct snd_soc_dobj dobj;
+
/* used for TLV byte control */
int (*get)(unsigned int __user *bytes, unsigned int size);
int (*put)(const unsigned int __user *bytes, unsigned int size);
@@ -1173,6 +1222,8 @@ struct soc_enum {
unsigned int mask;
const char * const *texts;
const unsigned int *values;
+ unsigned int autodisable:1;
+ struct snd_soc_dobj dobj;
};
/**
@@ -1254,6 +1305,71 @@ static inline struct snd_soc_dapm_context *snd_soc_component_get_dapm(
return component->dapm_ptr;
}
+/**
+ * snd_soc_codec_get_dapm() - Returns the DAPM context for the CODEC
+ * @codec: The CODEC for which to get the DAPM context
+ *
+ * Note: Use this function instead of directly accessing the CODEC's dapm field
+ */
+static inline struct snd_soc_dapm_context *snd_soc_codec_get_dapm(
+ struct snd_soc_codec *codec)
+{
+ return &codec->dapm;
+}
+
+/**
+ * snd_soc_dapm_init_bias_level() - Initialize CODEC DAPM bias level
+ * @dapm: The CODEC for which to initialize the DAPM bias level
+ * @level: The DAPM level to initialize to
+ *
+ * Initializes the CODEC DAPM bias level. See snd_soc_dapm_init_bias_level().
+ */
+static inline void snd_soc_codec_init_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ snd_soc_dapm_init_bias_level(snd_soc_codec_get_dapm(codec), level);
+}
+
+/**
+ * snd_soc_dapm_get_bias_level() - Get current CODEC DAPM bias level
+ * @codec: The CODEC for which to get the DAPM bias level
+ *
+ * Returns: The current DAPM bias level of the CODEC.
+ */
+static inline enum snd_soc_bias_level snd_soc_codec_get_bias_level(
+ struct snd_soc_codec *codec)
+{
+ return snd_soc_dapm_get_bias_level(snd_soc_codec_get_dapm(codec));
+}
+
+/**
+ * snd_soc_codec_force_bias_level() - Set the CODEC DAPM bias level
+ * @codec: The CODEC for which to set the level
+ * @level: The level to set to
+ *
+ * Forces the CODEC bias level to a specific state. See
+ * snd_soc_dapm_force_bias_level().
+ */
+static inline int snd_soc_codec_force_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ return snd_soc_dapm_force_bias_level(snd_soc_codec_get_dapm(codec),
+ level);
+}
+
+/**
+ * snd_soc_dapm_kcontrol_codec() - Returns the codec associated to a kcontrol
+ * @kcontrol: The kcontrol
+ *
+ * This function must only be used on DAPM contexts that are known to be part of
+ * a CODEC (e.g. in a CODEC driver). Otherwise the behavior is undefined.
+ */
+static inline struct snd_soc_codec *snd_soc_dapm_kcontrol_codec(
+ struct snd_kcontrol *kcontrol)
+{
+ return snd_soc_dapm_to_codec(snd_soc_dapm_kcontrol_dapm(kcontrol));
+}
+
/* codec IO */
unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg);
int snd_soc_write(struct snd_soc_codec *codec, unsigned int reg,
@@ -1465,7 +1581,7 @@ static inline struct snd_soc_codec *snd_soc_kcontrol_codec(
}
/**
- * snd_soc_kcontrol_platform() - Returns the platform that registerd the control
+ * snd_soc_kcontrol_platform() - Returns the platform that registered the control
* @kcontrol: The control for which to get the platform
*
* Note: This function will only work correctly if the control has been
diff --git a/include/sound/spear_dma.h b/include/sound/spear_dma.h
index 65aca51..e290de4 100644
--- a/include/sound/spear_dma.h
+++ b/include/sound/spear_dma.h
@@ -1,7 +1,7 @@
/*
* linux/spear_dma.h
*
-* Copyright (ST) 2012 Rajeev Kumar (rajeev-dlh.kumar@st.com)
+* Copyright (ST) 2012 Rajeev Kumar (rajeevkumar.linux@gmail.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/sound/sta32x.h b/include/sound/sta32x.h
index 8d93b03..a894f7d 100644
--- a/include/sound/sta32x.h
+++ b/include/sound/sta32x.h
@@ -24,12 +24,20 @@
#define STA32X_THERMAL_RECOVERY_ENABLE 2
struct sta32x_platform_data {
- int output_conf;
- int ch1_output_mapping;
- int ch2_output_mapping;
- int ch3_output_mapping;
- int thermal_conf;
+ u8 output_conf;
+ u8 ch1_output_mapping;
+ u8 ch2_output_mapping;
+ u8 ch3_output_mapping;
int needs_esd_watchdog;
+ u8 drop_compensation_ns;
+ unsigned int thermal_warning_recovery:1;
+ unsigned int thermal_warning_adjustment:1;
+ unsigned int fault_detect_recovery:1;
+ unsigned int max_power_use_mpcc:1;
+ unsigned int max_power_correction:1;
+ unsigned int am_reduction_mode:1;
+ unsigned int odd_pwm_speed_mode:1;
+ unsigned int invalid_input_detect_mute:1;
};
#endif /* __LINUX_SND__STA32X_H */
diff --git a/include/sound/tlv.h b/include/sound/tlv.h
index e11e179..df97d19 100644
--- a/include/sound/tlv.h
+++ b/include/sound/tlv.h
@@ -31,12 +31,7 @@
* ~(sizeof(unsigned int) - 1)) ....
*/
-#define SNDRV_CTL_TLVT_CONTAINER 0 /* one level down - group of TLVs */
-#define SNDRV_CTL_TLVT_DB_SCALE 1 /* dB scale */
-#define SNDRV_CTL_TLVT_DB_LINEAR 2 /* linear volume */
-#define SNDRV_CTL_TLVT_DB_RANGE 3 /* dB range container */
-#define SNDRV_CTL_TLVT_DB_MINMAX 4 /* dB scale with min/max */
-#define SNDRV_CTL_TLVT_DB_MINMAX_MUTE 5 /* dB scale with min/max with mute */
+#include <uapi/sound/tlv.h>
#define TLV_ITEM(type, ...) \
(type), TLV_LENGTH(__VA_ARGS__), __VA_ARGS__
@@ -90,12 +85,4 @@
#define TLV_DB_GAIN_MUTE -9999999
-/*
- * channel-mapping TLV items
- * TLV length must match with num_channels
- */
-#define SNDRV_CTL_TLVT_CHMAP_FIXED 0x101 /* fixed channel position */
-#define SNDRV_CTL_TLVT_CHMAP_VAR 0x102 /* channels freely swappable */
-#define SNDRV_CTL_TLVT_CHMAP_PAIRED 0x103 /* pair-wise swappable */
-
#endif /* __SOUND_TLV_H */
diff --git a/include/sound/wss.h b/include/sound/wss.h
index 0c7f034..1823e3a 100644
--- a/include/sound/wss.h
+++ b/include/sound/wss.h
@@ -154,8 +154,8 @@ int snd_wss_create(struct snd_card *card,
unsigned short hardware,
unsigned short hwshare,
struct snd_wss **rchip);
-int snd_wss_pcm(struct snd_wss *chip, int device, struct snd_pcm **rpcm);
-int snd_wss_timer(struct snd_wss *chip, int device, struct snd_timer **rtimer);
+int snd_wss_pcm(struct snd_wss *chip, int device);
+int snd_wss_timer(struct snd_wss *chip, int device);
int snd_wss_mixer(struct snd_wss *chip);
const struct snd_pcm_ops *snd_wss_get_pcm_ops(int direction);
@@ -167,7 +167,7 @@ int snd_cs4236_create(struct snd_card *card,
unsigned short hardware,
unsigned short hwshare,
struct snd_wss **rchip);
-int snd_cs4236_pcm(struct snd_wss *chip, int device, struct snd_pcm **rpcm);
+int snd_cs4236_pcm(struct snd_wss *chip, int device);
int snd_cs4236_mixer(struct snd_wss *chip);
/*
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
new file mode 100644
index 0000000..34117b8
--- /dev/null
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -0,0 +1,893 @@
+#ifndef ISCSI_TARGET_CORE_H
+#define ISCSI_TARGET_CORE_H
+
+#include <linux/in.h>
+#include <linux/configfs.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+
+#define ISCSIT_VERSION "v4.1.0"
+#define ISCSI_MAX_DATASN_MISSING_COUNT 16
+#define ISCSI_TX_THREAD_TCP_TIMEOUT 2
+#define ISCSI_RX_THREAD_TCP_TIMEOUT 2
+#define SECONDS_FOR_ASYNC_LOGOUT 10
+#define SECONDS_FOR_ASYNC_TEXT 10
+#define SECONDS_FOR_LOGOUT_COMP 15
+#define WHITE_SPACE " \t\v\f\n\r"
+#define ISCSIT_MIN_TAGS 16
+#define ISCSIT_EXTRA_TAGS 8
+#define ISCSIT_TCP_BACKLOG 256
+#define ISCSI_RX_THREAD_NAME "iscsi_trx"
+#define ISCSI_TX_THREAD_NAME "iscsi_ttx"
+
+/* struct iscsi_node_attrib sanity values */
+#define NA_DATAOUT_TIMEOUT 3
+#define NA_DATAOUT_TIMEOUT_MAX 60
+#define NA_DATAOUT_TIMEOUT_MIX 2
+#define NA_DATAOUT_TIMEOUT_RETRIES 5
+#define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
+#define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
+#define NA_NOPIN_TIMEOUT 15
+#define NA_NOPIN_TIMEOUT_MAX 60
+#define NA_NOPIN_TIMEOUT_MIN 3
+#define NA_NOPIN_RESPONSE_TIMEOUT 30
+#define NA_NOPIN_RESPONSE_TIMEOUT_MAX 60
+#define NA_NOPIN_RESPONSE_TIMEOUT_MIN 3
+#define NA_RANDOM_DATAIN_PDU_OFFSETS 0
+#define NA_RANDOM_DATAIN_SEQ_OFFSETS 0
+#define NA_RANDOM_R2T_OFFSETS 0
+
+/* struct iscsi_tpg_attrib sanity values */
+#define TA_AUTHENTICATION 1
+#define TA_LOGIN_TIMEOUT 15
+#define TA_LOGIN_TIMEOUT_MAX 30
+#define TA_LOGIN_TIMEOUT_MIN 5
+#define TA_NETIF_TIMEOUT 2
+#define TA_NETIF_TIMEOUT_MAX 15
+#define TA_NETIF_TIMEOUT_MIN 2
+#define TA_GENERATE_NODE_ACLS 0
+#define TA_DEFAULT_CMDSN_DEPTH 64
+#define TA_DEFAULT_CMDSN_DEPTH_MAX 512
+#define TA_DEFAULT_CMDSN_DEPTH_MIN 1
+#define TA_CACHE_DYNAMIC_ACLS 0
+/* Enabled by default in demo mode (generic_node_acls=1) */
+#define TA_DEMO_MODE_WRITE_PROTECT 1
+/* Disabled by default in production mode w/ explict ACLs */
+#define TA_PROD_MODE_WRITE_PROTECT 0
+#define TA_DEMO_MODE_DISCOVERY 1
+#define TA_DEFAULT_ERL 0
+#define TA_CACHE_CORE_NPS 0
+/* T10 protection information disabled by default */
+#define TA_DEFAULT_T10_PI 0
+#define TA_DEFAULT_FABRIC_PROT_TYPE 0
+
+#define ISCSI_IOV_DATA_BUFFER 5
+
+enum iscsit_transport_type {
+ ISCSI_TCP = 0,
+ ISCSI_SCTP_TCP = 1,
+ ISCSI_SCTP_UDP = 2,
+ ISCSI_IWARP_TCP = 3,
+ ISCSI_IWARP_SCTP = 4,
+ ISCSI_INFINIBAND = 5,
+};
+
+/* RFC-3720 7.1.4 Standard Connection State Diagram for a Target */
+enum target_conn_state_table {
+ TARG_CONN_STATE_FREE = 0x1,
+ TARG_CONN_STATE_XPT_UP = 0x3,
+ TARG_CONN_STATE_IN_LOGIN = 0x4,
+ TARG_CONN_STATE_LOGGED_IN = 0x5,
+ TARG_CONN_STATE_IN_LOGOUT = 0x6,
+ TARG_CONN_STATE_LOGOUT_REQUESTED = 0x7,
+ TARG_CONN_STATE_CLEANUP_WAIT = 0x8,
+};
+
+/* RFC-3720 7.3.2 Session State Diagram for a Target */
+enum target_sess_state_table {
+ TARG_SESS_STATE_FREE = 0x1,
+ TARG_SESS_STATE_ACTIVE = 0x2,
+ TARG_SESS_STATE_LOGGED_IN = 0x3,
+ TARG_SESS_STATE_FAILED = 0x4,
+ TARG_SESS_STATE_IN_CONTINUE = 0x5,
+};
+
+/* struct iscsi_data_count->type */
+enum data_count_type {
+ ISCSI_RX_DATA = 1,
+ ISCSI_TX_DATA = 2,
+};
+
+/* struct iscsi_datain_req->dr_complete */
+enum datain_req_comp_table {
+ DATAIN_COMPLETE_NORMAL = 1,
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY = 2,
+ DATAIN_COMPLETE_CONNECTION_RECOVERY = 3,
+};
+
+/* struct iscsi_datain_req->recovery */
+enum datain_req_rec_table {
+ DATAIN_WITHIN_COMMAND_RECOVERY = 1,
+ DATAIN_CONNECTION_RECOVERY = 2,
+};
+
+/* struct iscsi_portal_group->state */
+enum tpg_state_table {
+ TPG_STATE_FREE = 0,
+ TPG_STATE_ACTIVE = 1,
+ TPG_STATE_INACTIVE = 2,
+ TPG_STATE_COLD_RESET = 3,
+};
+
+/* struct iscsi_tiqn->tiqn_state */
+enum tiqn_state_table {
+ TIQN_STATE_ACTIVE = 1,
+ TIQN_STATE_SHUTDOWN = 2,
+};
+
+/* struct iscsi_cmd->cmd_flags */
+enum cmd_flags_table {
+ ICF_GOT_LAST_DATAOUT = 0x00000001,
+ ICF_GOT_DATACK_SNACK = 0x00000002,
+ ICF_NON_IMMEDIATE_UNSOLICITED_DATA = 0x00000004,
+ ICF_SENT_LAST_R2T = 0x00000008,
+ ICF_WITHIN_COMMAND_RECOVERY = 0x00000010,
+ ICF_CONTIG_MEMORY = 0x00000020,
+ ICF_ATTACHED_TO_RQUEUE = 0x00000040,
+ ICF_OOO_CMDSN = 0x00000080,
+ ICF_SENDTARGETS_ALL = 0x00000100,
+ ICF_SENDTARGETS_SINGLE = 0x00000200,
+};
+
+/* struct iscsi_cmd->i_state */
+enum cmd_i_state_table {
+ ISTATE_NO_STATE = 0,
+ ISTATE_NEW_CMD = 1,
+ ISTATE_DEFERRED_CMD = 2,
+ ISTATE_UNSOLICITED_DATA = 3,
+ ISTATE_RECEIVE_DATAOUT = 4,
+ ISTATE_RECEIVE_DATAOUT_RECOVERY = 5,
+ ISTATE_RECEIVED_LAST_DATAOUT = 6,
+ ISTATE_WITHIN_DATAOUT_RECOVERY = 7,
+ ISTATE_IN_CONNECTION_RECOVERY = 8,
+ ISTATE_RECEIVED_TASKMGT = 9,
+ ISTATE_SEND_ASYNCMSG = 10,
+ ISTATE_SENT_ASYNCMSG = 11,
+ ISTATE_SEND_DATAIN = 12,
+ ISTATE_SEND_LAST_DATAIN = 13,
+ ISTATE_SENT_LAST_DATAIN = 14,
+ ISTATE_SEND_LOGOUTRSP = 15,
+ ISTATE_SENT_LOGOUTRSP = 16,
+ ISTATE_SEND_NOPIN = 17,
+ ISTATE_SENT_NOPIN = 18,
+ ISTATE_SEND_REJECT = 19,
+ ISTATE_SENT_REJECT = 20,
+ ISTATE_SEND_R2T = 21,
+ ISTATE_SENT_R2T = 22,
+ ISTATE_SEND_R2T_RECOVERY = 23,
+ ISTATE_SENT_R2T_RECOVERY = 24,
+ ISTATE_SEND_LAST_R2T = 25,
+ ISTATE_SENT_LAST_R2T = 26,
+ ISTATE_SEND_LAST_R2T_RECOVERY = 27,
+ ISTATE_SENT_LAST_R2T_RECOVERY = 28,
+ ISTATE_SEND_STATUS = 29,
+ ISTATE_SEND_STATUS_BROKEN_PC = 30,
+ ISTATE_SENT_STATUS = 31,
+ ISTATE_SEND_STATUS_RECOVERY = 32,
+ ISTATE_SENT_STATUS_RECOVERY = 33,
+ ISTATE_SEND_TASKMGTRSP = 34,
+ ISTATE_SENT_TASKMGTRSP = 35,
+ ISTATE_SEND_TEXTRSP = 36,
+ ISTATE_SENT_TEXTRSP = 37,
+ ISTATE_SEND_NOPIN_WANT_RESPONSE = 38,
+ ISTATE_SENT_NOPIN_WANT_RESPONSE = 39,
+ ISTATE_SEND_NOPIN_NO_RESPONSE = 40,
+ ISTATE_REMOVE = 41,
+ ISTATE_FREE = 42,
+};
+
+/* Used for iscsi_recover_cmdsn() return values */
+enum recover_cmdsn_ret_table {
+ CMDSN_ERROR_CANNOT_RECOVER = -1,
+ CMDSN_NORMAL_OPERATION = 0,
+ CMDSN_LOWER_THAN_EXP = 1,
+ CMDSN_HIGHER_THAN_EXP = 2,
+ CMDSN_MAXCMDSN_OVERRUN = 3,
+};
+
+/* Used for iscsi_handle_immediate_data() return values */
+enum immedate_data_ret_table {
+ IMMEDIATE_DATA_CANNOT_RECOVER = -1,
+ IMMEDIATE_DATA_NORMAL_OPERATION = 0,
+ IMMEDIATE_DATA_ERL1_CRC_FAILURE = 1,
+};
+
+/* Used for iscsi_decide_dataout_action() return values */
+enum dataout_action_ret_table {
+ DATAOUT_CANNOT_RECOVER = -1,
+ DATAOUT_NORMAL = 0,
+ DATAOUT_SEND_R2T = 1,
+ DATAOUT_SEND_TO_TRANSPORT = 2,
+ DATAOUT_WITHIN_COMMAND_RECOVERY = 3,
+};
+
+/* Used for struct iscsi_node_auth->naf_flags */
+enum naf_flags_table {
+ NAF_USERID_SET = 0x01,
+ NAF_PASSWORD_SET = 0x02,
+ NAF_USERID_IN_SET = 0x04,
+ NAF_PASSWORD_IN_SET = 0x08,
+};
+
+/* Used by various struct timer_list to manage iSCSI specific state */
+enum iscsi_timer_flags_table {
+ ISCSI_TF_RUNNING = 0x01,
+ ISCSI_TF_STOP = 0x02,
+ ISCSI_TF_EXPIRED = 0x04,
+};
+
+/* Used for struct iscsi_np->np_flags */
+enum np_flags_table {
+ NPF_IP_NETWORK = 0x00,
+};
+
+/* Used for struct iscsi_np->np_thread_state */
+enum np_thread_state_table {
+ ISCSI_NP_THREAD_ACTIVE = 1,
+ ISCSI_NP_THREAD_INACTIVE = 2,
+ ISCSI_NP_THREAD_RESET = 3,
+ ISCSI_NP_THREAD_SHUTDOWN = 4,
+ ISCSI_NP_THREAD_EXIT = 5,
+};
+
+struct iscsi_conn_ops {
+ u8 HeaderDigest; /* [0,1] == [None,CRC32C] */
+ u8 DataDigest; /* [0,1] == [None,CRC32C] */
+ u32 MaxRecvDataSegmentLength; /* [512..2**24-1] */
+ u32 MaxXmitDataSegmentLength; /* [512..2**24-1] */
+ /*
+ * iSER specific connection parameters
+ */
+ u32 InitiatorRecvDataSegmentLength; /* [512..2**24-1] */
+ u32 TargetRecvDataSegmentLength; /* [512..2**24-1] */
+};
+
+struct iscsi_sess_ops {
+ char InitiatorName[224];
+ char InitiatorAlias[256];
+ char TargetName[224];
+ char TargetAlias[256];
+ char TargetAddress[256];
+ u16 TargetPortalGroupTag; /* [0..65535] */
+ u16 MaxConnections; /* [1..65535] */
+ u8 InitialR2T; /* [0,1] == [No,Yes] */
+ u8 ImmediateData; /* [0,1] == [No,Yes] */
+ u32 MaxBurstLength; /* [512..2**24-1] */
+ u32 FirstBurstLength; /* [512..2**24-1] */
+ u16 DefaultTime2Wait; /* [0..3600] */
+ u16 DefaultTime2Retain; /* [0..3600] */
+ u16 MaxOutstandingR2T; /* [1..65535] */
+ u8 DataPDUInOrder; /* [0,1] == [No,Yes] */
+ u8 DataSequenceInOrder; /* [0,1] == [No,Yes] */
+ u8 ErrorRecoveryLevel; /* [0..2] */
+ u8 SessionType; /* [0,1] == [Normal,Discovery]*/
+ /*
+ * iSER specific session parameters
+ */
+ u8 RDMAExtensions; /* [0,1] == [No,Yes] */
+};
+
+struct iscsi_queue_req {
+ int state;
+ struct iscsi_cmd *cmd;
+ struct list_head qr_list;
+};
+
+struct iscsi_data_count {
+ int data_length;
+ int sync_and_steering;
+ enum data_count_type type;
+ u32 iov_count;
+ u32 ss_iov_count;
+ u32 ss_marker_count;
+ struct kvec *iov;
+};
+
+struct iscsi_param_list {
+ bool iser;
+ struct list_head param_list;
+ struct list_head extra_response_list;
+};
+
+struct iscsi_datain_req {
+ enum datain_req_comp_table dr_complete;
+ int generate_recovery_values;
+ enum datain_req_rec_table recovery;
+ u32 begrun;
+ u32 runlength;
+ u32 data_length;
+ u32 data_offset;
+ u32 data_sn;
+ u32 next_burst_len;
+ u32 read_data_done;
+ u32 seq_send_order;
+ struct list_head cmd_datain_node;
+} ____cacheline_aligned;
+
+struct iscsi_ooo_cmdsn {
+ u16 cid;
+ u32 batch_count;
+ u32 cmdsn;
+ u32 exp_cmdsn;
+ struct iscsi_cmd *cmd;
+ struct list_head ooo_list;
+} ____cacheline_aligned;
+
+struct iscsi_datain {
+ u8 flags;
+ u32 data_sn;
+ u32 length;
+ u32 offset;
+} ____cacheline_aligned;
+
+struct iscsi_r2t {
+ int seq_complete;
+ int recovery_r2t;
+ int sent_r2t;
+ u32 r2t_sn;
+ u32 offset;
+ u32 targ_xfer_tag;
+ u32 xfer_len;
+ struct list_head r2t_list;
+} ____cacheline_aligned;
+
+struct iscsi_cmd {
+ enum iscsi_timer_flags_table dataout_timer_flags;
+ /* DataOUT timeout retries */
+ u8 dataout_timeout_retries;
+ /* Within command recovery count */
+ u8 error_recovery_count;
+ /* iSCSI dependent state for out or order CmdSNs */
+ enum cmd_i_state_table deferred_i_state;
+ /* iSCSI dependent state */
+ enum cmd_i_state_table i_state;
+ /* Command is an immediate command (ISCSI_OP_IMMEDIATE set) */
+ u8 immediate_cmd;
+ /* Immediate data present */
+ u8 immediate_data;
+ /* iSCSI Opcode */
+ u8 iscsi_opcode;
+ /* iSCSI Response Code */
+ u8 iscsi_response;
+ /* Logout reason when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
+ u8 logout_reason;
+ /* Logout response code when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
+ u8 logout_response;
+ /* MaxCmdSN has been incremented */
+ u8 maxcmdsn_inc;
+ /* Immediate Unsolicited Dataout */
+ u8 unsolicited_data;
+ /* Reject reason code */
+ u8 reject_reason;
+ /* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */
+ u16 logout_cid;
+ /* Command flags */
+ enum cmd_flags_table cmd_flags;
+ /* Initiator Task Tag assigned from Initiator */
+ itt_t init_task_tag;
+ /* Target Transfer Tag assigned from Target */
+ u32 targ_xfer_tag;
+ /* CmdSN assigned from Initiator */
+ u32 cmd_sn;
+ /* ExpStatSN assigned from Initiator */
+ u32 exp_stat_sn;
+ /* StatSN assigned to this ITT */
+ u32 stat_sn;
+ /* DataSN Counter */
+ u32 data_sn;
+ /* R2TSN Counter */
+ u32 r2t_sn;
+ /* Last DataSN acknowledged via DataAck SNACK */
+ u32 acked_data_sn;
+ /* Used for echoing NOPOUT ping data */
+ u32 buf_ptr_size;
+ /* Used to store DataDigest */
+ u32 data_crc;
+ /* Counter for MaxOutstandingR2T */
+ u32 outstanding_r2ts;
+ /* Next R2T Offset when DataSequenceInOrder=Yes */
+ u32 r2t_offset;
+ /* Iovec current and orig count for iscsi_cmd->iov_data */
+ u32 iov_data_count;
+ u32 orig_iov_data_count;
+ /* Number of miscellaneous iovecs used for IP stack calls */
+ u32 iov_misc_count;
+ /* Number of struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+ u32 pdu_count;
+ /* Next struct iscsi_pdu to send in struct iscsi_cmd->pdu_list */
+ u32 pdu_send_order;
+ /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+ u32 pdu_start;
+ /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
+ u32 seq_send_order;
+ /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
+ u32 seq_count;
+ /* Current struct iscsi_seq in struct iscsi_cmd->seq_list */
+ u32 seq_no;
+ /* Lowest offset in current DataOUT sequence */
+ u32 seq_start_offset;
+ /* Highest offset in current DataOUT sequence */
+ u32 seq_end_offset;
+ /* Total size in bytes received so far of READ data */
+ u32 read_data_done;
+ /* Total size in bytes received so far of WRITE data */
+ u32 write_data_done;
+ /* Counter for FirstBurstLength key */
+ u32 first_burst_len;
+ /* Counter for MaxBurstLength key */
+ u32 next_burst_len;
+ /* Transfer size used for IP stack calls */
+ u32 tx_size;
+ /* Buffer used for various purposes */
+ void *buf_ptr;
+ /* Used by SendTargets=[iqn.,eui.] discovery */
+ void *text_in_ptr;
+ /* See include/linux/dma-mapping.h */
+ enum dma_data_direction data_direction;
+ /* iSCSI PDU Header + CRC */
+ unsigned char pdu[ISCSI_HDR_LEN + ISCSI_CRC_LEN];
+ /* Number of times struct iscsi_cmd is present in immediate queue */
+ atomic_t immed_queue_count;
+ atomic_t response_queue_count;
+ spinlock_t datain_lock;
+ spinlock_t dataout_timeout_lock;
+ /* spinlock for protecting struct iscsi_cmd->i_state */
+ spinlock_t istate_lock;
+ /* spinlock for adding within command recovery entries */
+ spinlock_t error_lock;
+ /* spinlock for adding R2Ts */
+ spinlock_t r2t_lock;
+ /* DataIN List */
+ struct list_head datain_list;
+ /* R2T List */
+ struct list_head cmd_r2t_list;
+ /* Timer for DataOUT */
+ struct timer_list dataout_timer;
+ /* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
+ struct kvec *iov_data;
+ /* Iovecs for miscellaneous purposes */
+#define ISCSI_MISC_IOVECS 5
+ struct kvec iov_misc[ISCSI_MISC_IOVECS];
+ /* Array of struct iscsi_pdu used for DataPDUInOrder=No */
+ struct iscsi_pdu *pdu_list;
+ /* Current struct iscsi_pdu used for DataPDUInOrder=No */
+ struct iscsi_pdu *pdu_ptr;
+ /* Array of struct iscsi_seq used for DataSequenceInOrder=No */
+ struct iscsi_seq *seq_list;
+ /* Current struct iscsi_seq used for DataSequenceInOrder=No */
+ struct iscsi_seq *seq_ptr;
+ /* TMR Request when iscsi_opcode == ISCSI_OP_SCSI_TMFUNC */
+ struct iscsi_tmr_req *tmr_req;
+ /* Connection this command is alligient to */
+ struct iscsi_conn *conn;
+ /* Pointer to connection recovery entry */
+ struct iscsi_conn_recovery *cr;
+ /* Session the command is part of, used for connection recovery */
+ struct iscsi_session *sess;
+ /* list_head for connection list */
+ struct list_head i_conn_node;
+ /* The TCM I/O descriptor that is accessed via container_of() */
+ struct se_cmd se_cmd;
+ /* Sense buffer that will be mapped into outgoing status */
+#define ISCSI_SENSE_BUFFER_LEN (TRANSPORT_SENSE_BUFFER + 2)
+ unsigned char sense_buffer[ISCSI_SENSE_BUFFER_LEN];
+
+ u32 padding;
+ u8 pad_bytes[4];
+
+ struct scatterlist *first_data_sg;
+ u32 first_data_sg_off;
+ u32 kmapped_nents;
+ sense_reason_t sense_reason;
+} ____cacheline_aligned;
+
+struct iscsi_tmr_req {
+ bool task_reassign:1;
+ u32 exp_data_sn;
+ struct iscsi_cmd *ref_cmd;
+ struct iscsi_conn_recovery *conn_recovery;
+ struct se_tmr_req *se_tmr_req;
+};
+
+struct iscsi_conn {
+ wait_queue_head_t queues_wq;
+ /* Authentication Successful for this connection */
+ u8 auth_complete;
+ /* State connection is currently in */
+ u8 conn_state;
+ u8 conn_logout_reason;
+ u8 network_transport;
+ enum iscsi_timer_flags_table nopin_timer_flags;
+ enum iscsi_timer_flags_table nopin_response_timer_flags;
+ /* Used to know what thread encountered a transport failure */
+ u8 which_thread;
+ /* connection id assigned by the Initiator */
+ u16 cid;
+ /* Remote TCP Port */
+ u16 login_port;
+ u16 local_port;
+ int net_size;
+ int login_family;
+ u32 auth_id;
+ u32 conn_flags;
+ /* Used for iscsi_tx_login_rsp() */
+ itt_t login_itt;
+ u32 exp_statsn;
+ /* Per connection status sequence number */
+ u32 stat_sn;
+#define IPV6_ADDRESS_SPACE 48
+ unsigned char login_ip[IPV6_ADDRESS_SPACE];
+ unsigned char local_ip[IPV6_ADDRESS_SPACE];
+ int conn_usage_count;
+ int conn_waiting_on_uc;
+ atomic_t check_immediate_queue;
+ atomic_t conn_logout_remove;
+ atomic_t connection_exit;
+ atomic_t connection_recovery;
+ atomic_t connection_reinstatement;
+ atomic_t connection_wait_rcfr;
+ atomic_t sleep_on_conn_wait_comp;
+ atomic_t transport_failed;
+ struct completion conn_post_wait_comp;
+ struct completion conn_wait_comp;
+ struct completion conn_wait_rcfr_comp;
+ struct completion conn_waiting_on_uc_comp;
+ struct completion conn_logout_comp;
+ struct completion tx_half_close_comp;
+ struct completion rx_half_close_comp;
+ /* socket used by this connection */
+ struct socket *sock;
+ void (*orig_data_ready)(struct sock *);
+ void (*orig_state_change)(struct sock *);
+#define LOGIN_FLAGS_READ_ACTIVE 1
+#define LOGIN_FLAGS_CLOSED 2
+#define LOGIN_FLAGS_READY 4
+ unsigned long login_flags;
+ struct delayed_work login_work;
+ struct delayed_work login_cleanup_work;
+ struct iscsi_login *login;
+ struct timer_list nopin_timer;
+ struct timer_list nopin_response_timer;
+ struct timer_list transport_timer;
+ struct task_struct *login_kworker;
+ /* Spinlock used for add/deleting cmd's from conn_cmd_list */
+ spinlock_t cmd_lock;
+ spinlock_t conn_usage_lock;
+ spinlock_t immed_queue_lock;
+ spinlock_t nopin_timer_lock;
+ spinlock_t response_queue_lock;
+ spinlock_t state_lock;
+ /* libcrypto RX and TX contexts for crc32c */
+ struct hash_desc conn_rx_hash;
+ struct hash_desc conn_tx_hash;
+ /* Used for scheduling TX and RX connection kthreads */
+ cpumask_var_t conn_cpumask;
+ unsigned int conn_rx_reset_cpumask:1;
+ unsigned int conn_tx_reset_cpumask:1;
+ /* list_head of struct iscsi_cmd for this connection */
+ struct list_head conn_cmd_list;
+ struct list_head immed_queue_list;
+ struct list_head response_queue_list;
+ struct iscsi_conn_ops *conn_ops;
+ struct iscsi_login *conn_login;
+ struct iscsit_transport *conn_transport;
+ struct iscsi_param_list *param_list;
+ /* Used for per connection auth state machine */
+ void *auth_protocol;
+ void *context;
+ struct iscsi_login_thread_s *login_thread;
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tpg_np *tpg_np;
+ /* Pointer to parent session */
+ struct iscsi_session *sess;
+ int bitmap_id;
+ int rx_thread_active;
+ struct task_struct *rx_thread;
+ int tx_thread_active;
+ struct task_struct *tx_thread;
+ /* list_head for session connection list */
+ struct list_head conn_list;
+} ____cacheline_aligned;
+
+struct iscsi_conn_recovery {
+ u16 cid;
+ u32 cmd_count;
+ u32 maxrecvdatasegmentlength;
+ u32 maxxmitdatasegmentlength;
+ int ready_for_reallegiance;
+ struct list_head conn_recovery_cmd_list;
+ spinlock_t conn_recovery_cmd_lock;
+ struct timer_list time2retain_timer;
+ struct iscsi_session *sess;
+ struct list_head cr_list;
+} ____cacheline_aligned;
+
+struct iscsi_session {
+ u8 initiator_vendor;
+ u8 isid[6];
+ enum iscsi_timer_flags_table time2retain_timer_flags;
+ u8 version_active;
+ u16 cid_called;
+ u16 conn_recovery_count;
+ u16 tsih;
+ /* state session is currently in */
+ u32 session_state;
+ /* session wide counter: initiator assigned task tag */
+ itt_t init_task_tag;
+ /* session wide counter: target assigned task tag */
+ u32 targ_xfer_tag;
+ u32 cmdsn_window;
+
+ /* protects cmdsn values */
+ struct mutex cmdsn_mutex;
+ /* session wide counter: expected command sequence number */
+ u32 exp_cmd_sn;
+ /* session wide counter: maximum allowed command sequence number */
+ u32 max_cmd_sn;
+ struct list_head sess_ooo_cmdsn_list;
+
+ /* LIO specific session ID */
+ u32 sid;
+ char auth_type[8];
+ /* unique within the target */
+ int session_index;
+ /* Used for session reference counting */
+ int session_usage_count;
+ int session_waiting_on_uc;
+ atomic_long_t cmd_pdus;
+ atomic_long_t rsp_pdus;
+ atomic_long_t tx_data_octets;
+ atomic_long_t rx_data_octets;
+ atomic_long_t conn_digest_errors;
+ atomic_long_t conn_timeout_errors;
+ u64 creation_time;
+ /* Number of active connections */
+ atomic_t nconn;
+ atomic_t session_continuation;
+ atomic_t session_fall_back_to_erl0;
+ atomic_t session_logout;
+ atomic_t session_reinstatement;
+ atomic_t session_stop_active;
+ atomic_t sleep_on_sess_wait_comp;
+ /* connection list */
+ struct list_head sess_conn_list;
+ struct list_head cr_active_list;
+ struct list_head cr_inactive_list;
+ spinlock_t conn_lock;
+ spinlock_t cr_a_lock;
+ spinlock_t cr_i_lock;
+ spinlock_t session_usage_lock;
+ spinlock_t ttt_lock;
+ struct completion async_msg_comp;
+ struct completion reinstatement_comp;
+ struct completion session_wait_comp;
+ struct completion session_waiting_on_uc_comp;
+ struct timer_list time2retain_timer;
+ struct iscsi_sess_ops *sess_ops;
+ struct se_session *se_sess;
+ struct iscsi_portal_group *tpg;
+} ____cacheline_aligned;
+
+struct iscsi_login {
+ u8 auth_complete;
+ u8 checked_for_existing;
+ u8 current_stage;
+ u8 leading_connection;
+ u8 first_request;
+ u8 version_min;
+ u8 version_max;
+ u8 login_complete;
+ u8 login_failed;
+ bool zero_tsih;
+ char isid[6];
+ u32 cmd_sn;
+ itt_t init_task_tag;
+ u32 initial_exp_statsn;
+ u32 rsp_length;
+ u16 cid;
+ u16 tsih;
+ char req[ISCSI_HDR_LEN];
+ char rsp[ISCSI_HDR_LEN];
+ char *req_buf;
+ char *rsp_buf;
+ struct iscsi_conn *conn;
+ struct iscsi_np *np;
+} ____cacheline_aligned;
+
+struct iscsi_node_attrib {
+ u32 dataout_timeout;
+ u32 dataout_timeout_retries;
+ u32 default_erl;
+ u32 nopin_timeout;
+ u32 nopin_response_timeout;
+ u32 random_datain_pdu_offsets;
+ u32 random_datain_seq_offsets;
+ u32 random_r2t_offsets;
+ u32 tmr_cold_reset;
+ u32 tmr_warm_reset;
+ struct iscsi_node_acl *nacl;
+};
+
+struct se_dev_entry_s;
+
+struct iscsi_node_auth {
+ enum naf_flags_table naf_flags;
+ int authenticate_target;
+ /* Used for iscsit_global->discovery_auth,
+ * set to zero (auth disabled) by default */
+ int enforce_discovery_auth;
+#define MAX_USER_LEN 256
+#define MAX_PASS_LEN 256
+ char userid[MAX_USER_LEN];
+ char password[MAX_PASS_LEN];
+ char userid_mutual[MAX_USER_LEN];
+ char password_mutual[MAX_PASS_LEN];
+};
+
+#include "iscsi_target_stat.h"
+
+struct iscsi_node_stat_grps {
+ struct config_group iscsi_sess_stats_group;
+ struct config_group iscsi_conn_stats_group;
+};
+
+struct iscsi_node_acl {
+ struct se_node_acl se_node_acl;
+ struct iscsi_node_attrib node_attrib;
+ struct iscsi_node_auth node_auth;
+ struct iscsi_node_stat_grps node_stat_grps;
+};
+
+struct iscsi_tpg_attrib {
+ u32 authentication;
+ u32 login_timeout;
+ u32 netif_timeout;
+ u32 generate_node_acls;
+ u32 cache_dynamic_acls;
+ u32 default_cmdsn_depth;
+ u32 demo_mode_write_protect;
+ u32 prod_mode_write_protect;
+ u32 demo_mode_discovery;
+ u32 default_erl;
+ u8 t10_pi;
+ u32 fabric_prot_type;
+ struct iscsi_portal_group *tpg;
+};
+
+struct iscsi_np {
+ int np_network_transport;
+ int np_ip_proto;
+ int np_sock_type;
+ enum np_thread_state_table np_thread_state;
+ bool enabled;
+ enum iscsi_timer_flags_table np_login_timer_flags;
+ u32 np_exports;
+ enum np_flags_table np_flags;
+ unsigned char np_ip[IPV6_ADDRESS_SPACE];
+ u16 np_port;
+ spinlock_t np_thread_lock;
+ struct completion np_restart_comp;
+ struct socket *np_socket;
+ struct __kernel_sockaddr_storage np_sockaddr;
+ struct task_struct *np_thread;
+ struct timer_list np_login_timer;
+ void *np_context;
+ struct iscsit_transport *np_transport;
+ struct list_head np_list;
+} ____cacheline_aligned;
+
+struct iscsi_tpg_np {
+ struct iscsi_np *tpg_np;
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tpg_np *tpg_np_parent;
+ struct list_head tpg_np_list;
+ struct list_head tpg_np_child_list;
+ struct list_head tpg_np_parent_list;
+ struct se_tpg_np se_tpg_np;
+ spinlock_t tpg_np_parent_lock;
+ struct completion tpg_np_comp;
+ struct kref tpg_np_kref;
+};
+
+struct iscsi_portal_group {
+ unsigned char tpg_chap_id;
+ /* TPG State */
+ enum tpg_state_table tpg_state;
+ /* Target Portal Group Tag */
+ u16 tpgt;
+ /* Id assigned to target sessions */
+ u16 ntsih;
+ /* Number of active sessions */
+ u32 nsessions;
+ /* Number of Network Portals available for this TPG */
+ u32 num_tpg_nps;
+ /* Per TPG LIO specific session ID. */
+ u32 sid;
+ /* Spinlock for adding/removing Network Portals */
+ spinlock_t tpg_np_lock;
+ spinlock_t tpg_state_lock;
+ struct se_portal_group tpg_se_tpg;
+ struct mutex tpg_access_lock;
+ struct semaphore np_login_sem;
+ struct iscsi_tpg_attrib tpg_attrib;
+ struct iscsi_node_auth tpg_demo_auth;
+ /* Pointer to default list of iSCSI parameters for TPG */
+ struct iscsi_param_list *param_list;
+ struct iscsi_tiqn *tpg_tiqn;
+ struct list_head tpg_gnp_list;
+ struct list_head tpg_list;
+} ____cacheline_aligned;
+
+struct iscsi_wwn_stat_grps {
+ struct config_group iscsi_stat_group;
+ struct config_group iscsi_instance_group;
+ struct config_group iscsi_sess_err_group;
+ struct config_group iscsi_tgt_attr_group;
+ struct config_group iscsi_login_stats_group;
+ struct config_group iscsi_logout_stats_group;
+};
+
+struct iscsi_tiqn {
+#define ISCSI_IQN_LEN 224
+ unsigned char tiqn[ISCSI_IQN_LEN];
+ enum tiqn_state_table tiqn_state;
+ int tiqn_access_count;
+ u32 tiqn_active_tpgs;
+ u32 tiqn_ntpgs;
+ u32 tiqn_num_tpg_nps;
+ u32 tiqn_nsessions;
+ struct list_head tiqn_list;
+ struct list_head tiqn_tpg_list;
+ spinlock_t tiqn_state_lock;
+ spinlock_t tiqn_tpg_lock;
+ struct se_wwn tiqn_wwn;
+ struct iscsi_wwn_stat_grps tiqn_stat_grps;
+ int tiqn_index;
+ struct iscsi_sess_err_stats sess_err_stats;
+ struct iscsi_login_stats login_stats;
+ struct iscsi_logout_stats logout_stats;
+} ____cacheline_aligned;
+
+struct iscsit_global {
+ /* In core shutdown */
+ u32 in_shutdown;
+ u32 active_ts;
+ /* Unique identifier used for the authentication daemon */
+ u32 auth_id;
+ u32 inactive_ts;
+#define ISCSIT_BITMAP_BITS 262144
+ /* Thread Set bitmap pointer */
+ unsigned long *ts_bitmap;
+ spinlock_t ts_bitmap_lock;
+ /* Used for iSCSI discovery session authentication */
+ struct iscsi_node_acl discovery_acl;
+ struct iscsi_portal_group *discovery_tpg;
+};
+
+static inline u32 session_get_next_ttt(struct iscsi_session *session)
+{
+ u32 ttt;
+
+ spin_lock_bh(&session->ttt_lock);
+ ttt = session->targ_xfer_tag++;
+ if (ttt == 0xFFFFFFFF)
+ ttt = session->targ_xfer_tag++;
+ spin_unlock_bh(&session->ttt_lock);
+
+ return ttt;
+}
+
+extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
+#endif /* ISCSI_TARGET_CORE_H */
diff --git a/include/target/iscsi/iscsi_target_stat.h b/include/target/iscsi/iscsi_target_stat.h
new file mode 100644
index 0000000..3ff76b4
--- /dev/null
+++ b/include/target/iscsi/iscsi_target_stat.h
@@ -0,0 +1,64 @@
+#ifndef ISCSI_TARGET_STAT_H
+#define ISCSI_TARGET_STAT_H
+
+/*
+ * For struct iscsi_tiqn->tiqn_wwn default groups
+ */
+extern struct config_item_type iscsi_stat_instance_cit;
+extern struct config_item_type iscsi_stat_sess_err_cit;
+extern struct config_item_type iscsi_stat_tgt_attr_cit;
+extern struct config_item_type iscsi_stat_login_cit;
+extern struct config_item_type iscsi_stat_logout_cit;
+
+/*
+ * For struct iscsi_session->se_sess default groups
+ */
+extern struct config_item_type iscsi_stat_sess_cit;
+
+/* iSCSI session error types */
+#define ISCSI_SESS_ERR_UNKNOWN 0
+#define ISCSI_SESS_ERR_DIGEST 1
+#define ISCSI_SESS_ERR_CXN_TIMEOUT 2
+#define ISCSI_SESS_ERR_PDU_FORMAT 3
+
+/* iSCSI session error stats */
+struct iscsi_sess_err_stats {
+ spinlock_t lock;
+ u32 digest_errors;
+ u32 cxn_timeout_errors;
+ u32 pdu_format_errors;
+ u32 last_sess_failure_type;
+ char last_sess_fail_rem_name[224];
+} ____cacheline_aligned;
+
+/* iSCSI login failure types (sub oids) */
+#define ISCSI_LOGIN_FAIL_OTHER 2
+#define ISCSI_LOGIN_FAIL_REDIRECT 3
+#define ISCSI_LOGIN_FAIL_AUTHORIZE 4
+#define ISCSI_LOGIN_FAIL_AUTHENTICATE 5
+#define ISCSI_LOGIN_FAIL_NEGOTIATE 6
+
+/* iSCSI login stats */
+struct iscsi_login_stats {
+ spinlock_t lock;
+ u32 accepts;
+ u32 other_fails;
+ u32 redirects;
+ u32 authorize_fails;
+ u32 authenticate_fails;
+ u32 negotiate_fails; /* used for notifications */
+ u64 last_fail_time; /* time stamp (jiffies) */
+ u32 last_fail_type;
+ int last_intr_fail_ip_family;
+ unsigned char last_intr_fail_ip_addr[IPV6_ADDRESS_SPACE];
+ char last_intr_fail_name[224];
+} ____cacheline_aligned;
+
+/* iSCSI logout stats */
+struct iscsi_logout_stats {
+ spinlock_t lock;
+ u32 normal_logouts;
+ u32 abnormal_logouts;
+} ____cacheline_aligned;
+
+#endif /*** ISCSI_TARGET_STAT_H ***/
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index daef9da..e6bb166 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -1,6 +1,6 @@
#include <linux/module.h>
#include <linux/list.h>
-#include "../../../drivers/target/iscsi/iscsi_target_core.h"
+#include "iscsi_target_core.h"
struct iscsit_transport {
#define ISCSIT_TRANSPORT_NAME 16
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index db81c65..1e5c8f9 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -1,28 +1,15 @@
#ifndef TARGET_CORE_BACKEND_H
#define TARGET_CORE_BACKEND_H
-#define TRANSPORT_PLUGIN_PHBA_PDEV 1
-#define TRANSPORT_PLUGIN_VHBA_PDEV 2
-#define TRANSPORT_PLUGIN_VHBA_VDEV 3
-
-struct target_backend_cits {
- struct config_item_type tb_dev_cit;
- struct config_item_type tb_dev_attrib_cit;
- struct config_item_type tb_dev_pr_cit;
- struct config_item_type tb_dev_wwn_cit;
- struct config_item_type tb_dev_alua_tg_pt_gps_cit;
- struct config_item_type tb_dev_stat_cit;
-};
-
-struct se_subsystem_api {
- struct list_head sub_api_list;
+#define TRANSPORT_FLAG_PASSTHROUGH 1
+struct target_backend_ops {
char name[16];
char inquiry_prod[16];
char inquiry_rev[4];
struct module *owner;
- u8 transport_type;
+ u8 transport_flags;
int (*attach_hba)(struct se_hba *, u32);
void (*detach_hba)(struct se_hba *);
@@ -54,7 +41,7 @@ struct se_subsystem_api {
int (*format_prot)(struct se_device *);
void (*free_prot)(struct se_device *);
- struct target_backend_cits tb_cits;
+ struct configfs_attribute **tb_dev_attrib_attrs;
};
struct sbc_ops {
@@ -62,12 +49,12 @@ struct sbc_ops {
u32, enum dma_data_direction);
sense_reason_t (*execute_sync_cache)(struct se_cmd *cmd);
sense_reason_t (*execute_write_same)(struct se_cmd *cmd);
- sense_reason_t (*execute_write_same_unmap)(struct se_cmd *cmd);
- sense_reason_t (*execute_unmap)(struct se_cmd *cmd);
+ sense_reason_t (*execute_unmap)(struct se_cmd *cmd,
+ sector_t lba, sector_t nolb);
};
-int transport_subsystem_register(struct se_subsystem_api *);
-void transport_subsystem_release(struct se_subsystem_api *);
+int transport_backend_register(const struct target_backend_ops *);
+void target_backend_unregister(const struct target_backend_ops *);
void target_complete_cmd(struct se_cmd *, u8);
void target_complete_cmd_with_length(struct se_cmd *, u8, int);
@@ -81,22 +68,19 @@ sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops);
u32 sbc_get_device_rev(struct se_device *dev);
u32 sbc_get_device_type(struct se_device *dev);
sector_t sbc_get_write_same_sectors(struct se_cmd *cmd);
-sense_reason_t sbc_execute_unmap(struct se_cmd *cmd,
- sense_reason_t (*do_unmap_fn)(struct se_cmd *cmd, void *priv,
- sector_t lba, sector_t nolb),
- void *priv);
void sbc_dif_generate(struct se_cmd *);
-sense_reason_t sbc_dif_verify_write(struct se_cmd *, sector_t, unsigned int,
+sense_reason_t sbc_dif_verify(struct se_cmd *, sector_t, unsigned int,
unsigned int, struct scatterlist *, int);
-sense_reason_t sbc_dif_verify_read(struct se_cmd *, sector_t, unsigned int,
- unsigned int, struct scatterlist *, int);
-sense_reason_t sbc_dif_read_strip(struct se_cmd *);
-
+void sbc_dif_copy_prot(struct se_cmd *, unsigned int, bool,
+ struct scatterlist *, int);
void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);
int transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
+extern struct configfs_attribute *sbc_attrib_attrs[];
+extern struct configfs_attribute *passthrough_attrib_attrs[];
+
/* core helpers also used by command snooping in pscsi */
void *transport_kmap_data_sg(struct se_cmd *);
void transport_kunmap_data_sg(struct se_cmd *);
@@ -105,37 +89,8 @@ int target_alloc_sgl(struct scatterlist **, unsigned int *, u32, bool);
sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *,
struct scatterlist *, u32, struct scatterlist *, u32);
-void array_free(void *array, int n);
-
-/* From target_core_configfs.c to setup default backend config_item_types */
-void target_core_setup_sub_cits(struct se_subsystem_api *);
-
-/* attribute helpers from target_core_device.c for backend drivers */
-int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
-int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
-int se_dev_set_unmap_granularity(struct se_device *, u32);
-int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
-int se_dev_set_max_write_same_len(struct se_device *, u32);
-int se_dev_set_emulate_model_alias(struct se_device *, int);
-int se_dev_set_emulate_dpo(struct se_device *, int);
-int se_dev_set_emulate_fua_write(struct se_device *, int);
-int se_dev_set_emulate_fua_read(struct se_device *, int);
-int se_dev_set_emulate_write_cache(struct se_device *, int);
-int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
-int se_dev_set_emulate_tas(struct se_device *, int);
-int se_dev_set_emulate_tpu(struct se_device *, int);
-int se_dev_set_emulate_tpws(struct se_device *, int);
-int se_dev_set_emulate_caw(struct se_device *, int);
-int se_dev_set_emulate_3pc(struct se_device *, int);
-int se_dev_set_pi_prot_type(struct se_device *, int);
-int se_dev_set_pi_prot_format(struct se_device *, int);
-int se_dev_set_enforce_pr_isids(struct se_device *, int);
-int se_dev_set_force_pr_aptpl(struct se_device *, int);
-int se_dev_set_is_nonrot(struct se_device *, int);
-int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
-int se_dev_set_queue_depth(struct se_device *, u32);
-int se_dev_set_max_sectors(struct se_device *, u32);
-int se_dev_set_optimal_sectors(struct se_device *, u32);
-int se_dev_set_block_size(struct se_device *, u32);
+bool target_lun_is_rdonly(struct se_cmd *);
+sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
+ sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
#endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_backend_configfs.h b/include/target/target_core_backend_configfs.h
deleted file mode 100644
index 186f7a9..0000000
--- a/include/target/target_core_backend_configfs.h
+++ /dev/null
@@ -1,118 +0,0 @@
-#ifndef TARGET_CORE_BACKEND_CONFIGFS_H
-#define TARGET_CORE_BACKEND_CONFIGFS_H
-
-#include <target/configfs_macros.h>
-
-#define DEF_TB_DEV_ATTRIB_SHOW(_backend, _name) \
-static ssize_t _backend##_dev_show_attr_##_name( \
- struct se_dev_attrib *da, \
- char *page) \
-{ \
- return snprintf(page, PAGE_SIZE, "%u\n", \
- (u32)da->da_dev->dev_attrib._name); \
-}
-
-#define DEF_TB_DEV_ATTRIB_STORE(_backend, _name) \
-static ssize_t _backend##_dev_store_attr_##_name( \
- struct se_dev_attrib *da, \
- const char *page, \
- size_t count) \
-{ \
- unsigned long val; \
- int ret; \
- \
- ret = kstrtoul(page, 0, &val); \
- if (ret < 0) { \
- pr_err("kstrtoul() failed with ret: %d\n", ret); \
- return -EINVAL; \
- } \
- ret = se_dev_set_##_name(da->da_dev, (u32)val); \
- \
- return (!ret) ? count : -EINVAL; \
-}
-
-#define DEF_TB_DEV_ATTRIB(_backend, _name) \
-DEF_TB_DEV_ATTRIB_SHOW(_backend, _name); \
-DEF_TB_DEV_ATTRIB_STORE(_backend, _name);
-
-#define DEF_TB_DEV_ATTRIB_RO(_backend, name) \
-DEF_TB_DEV_ATTRIB_SHOW(_backend, name);
-
-CONFIGFS_EATTR_STRUCT(target_backend_dev_attrib, se_dev_attrib);
-#define TB_DEV_ATTR(_backend, _name, _mode) \
-static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- _backend##_dev_show_attr_##_name, \
- _backend##_dev_store_attr_##_name);
-
-#define TB_DEV_ATTR_RO(_backend, _name) \
-static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
- __CONFIGFS_EATTR_RO(_name, \
- _backend##_dev_show_attr_##_name);
-
-/*
- * Default list of target backend device attributes as defined by
- * struct se_dev_attrib
- */
-
-#define DEF_TB_DEFAULT_ATTRIBS(_backend) \
- DEF_TB_DEV_ATTRIB(_backend, emulate_model_alias); \
- TB_DEV_ATTR(_backend, emulate_model_alias, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB(_backend, emulate_dpo); \
- TB_DEV_ATTR(_backend, emulate_dpo, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB(_backend, emulate_fua_write); \
- TB_DEV_ATTR(_backend, emulate_fua_write, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB(_backend, emulate_fua_read); \
- TB_DEV_ATTR(_backend, emulate_fua_read, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB(_backend, emulate_write_cache); \
- TB_DEV_ATTR(_backend, emulate_write_cache, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB(_backend, emulate_ua_intlck_ctrl); \
- TB_DEV_ATTR(_backend, emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB(_backend, emulate_tas); \
- TB_DEV_ATTR(_backend, emulate_tas, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB(_backend, emulate_tpu); \
- TB_DEV_ATTR(_backend, emulate_tpu, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB(_backend, emulate_tpws); \
- TB_DEV_ATTR(_backend, emulate_tpws, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB(_backend, emulate_caw); \
- TB_DEV_ATTR(_backend, emulate_caw, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB(_backend, emulate_3pc); \
- TB_DEV_ATTR(_backend, emulate_3pc, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB(_backend, pi_prot_type); \
- TB_DEV_ATTR(_backend, pi_prot_type, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB_RO(_backend, hw_pi_prot_type); \
- TB_DEV_ATTR_RO(_backend, hw_pi_prot_type); \
- DEF_TB_DEV_ATTRIB(_backend, pi_prot_format); \
- TB_DEV_ATTR(_backend, pi_prot_format, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB(_backend, enforce_pr_isids); \
- TB_DEV_ATTR(_backend, enforce_pr_isids, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB(_backend, is_nonrot); \
- TB_DEV_ATTR(_backend, is_nonrot, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB(_backend, emulate_rest_reord); \
- TB_DEV_ATTR(_backend, emulate_rest_reord, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB(_backend, force_pr_aptpl); \
- TB_DEV_ATTR(_backend, force_pr_aptpl, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB_RO(_backend, hw_block_size); \
- TB_DEV_ATTR_RO(_backend, hw_block_size); \
- DEF_TB_DEV_ATTRIB(_backend, block_size); \
- TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \
- TB_DEV_ATTR_RO(_backend, hw_max_sectors); \
- DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \
- TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \
- TB_DEV_ATTR_RO(_backend, hw_queue_depth); \
- DEF_TB_DEV_ATTRIB(_backend, queue_depth); \
- TB_DEV_ATTR(_backend, queue_depth, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB(_backend, max_unmap_lba_count); \
- TB_DEV_ATTR(_backend, max_unmap_lba_count, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB(_backend, max_unmap_block_desc_count); \
- TB_DEV_ATTR(_backend, max_unmap_block_desc_count, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB(_backend, unmap_granularity); \
- TB_DEV_ATTR(_backend, unmap_granularity, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB(_backend, unmap_granularity_alignment); \
- TB_DEV_ATTR(_backend, unmap_granularity_alignment, S_IRUGO | S_IWUSR); \
- DEF_TB_DEV_ATTRIB(_backend, max_write_same_len); \
- TB_DEV_ATTR(_backend, max_write_same_len, S_IRUGO | S_IWUSR);
-
-#endif /* TARGET_CORE_BACKEND_CONFIGFS_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 4a8795a..17ae2d6 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -6,34 +6,21 @@
#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
#include <linux/percpu_ida.h>
-#include <scsi/scsi_cmnd.h>
#include <net/sock.h>
#include <net/tcp.h>
-#define TARGET_CORE_MOD_VERSION "v4.1.0"
-#define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION
+#define TARGET_CORE_VERSION "v5.0"
-/* Maximum Number of LUNs per Target Portal Group */
-/* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */
-#define TRANSPORT_MAX_LUNS_PER_TPG 256
/*
- * By default we use 32-byte CDBs in TCM Core and subsystem plugin code.
- *
- * Note that both include/scsi/scsi_cmnd.h:MAX_COMMAND_SIZE and
- * include/linux/blkdev.h:BLOCK_MAX_CDB as of v2.6.36-rc4 still use
- * 16-byte CDBs by default and require an extra allocation for
- * 32-byte CDBs to because of legacy issues.
- *
- * Within TCM Core there are no such legacy limitiations, so we go ahead
- * use 32-byte CDBs by default and use include/scsi/scsi.h:scsi_command_size()
- * within all TCM Core and subsystem plugin code.
+ * Maximum size of a CDB that can be stored in se_cmd without allocating
+ * memory dynamically for the CDB.
*/
#define TCM_MAX_COMMAND_SIZE 32
/*
* From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently
* defined 96, but the real limit is 252 (or 260 including the header)
*/
-#define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE
+#define TRANSPORT_SENSE_BUFFER 96
/* Used by transport_send_check_condition_and_sense() */
#define SPC_SENSE_KEY_OFFSET 2
#define SPC_ADD_SENSE_LEN_OFFSET 7
@@ -79,12 +66,6 @@
#define DA_MAX_WRITE_SAME_LEN 0
/* Use a model alias based on the configfs backend device name */
#define DA_EMULATE_MODEL_ALIAS 0
-/* Emulation for Direct Page Out */
-#define DA_EMULATE_DPO 0
-/* Emulation for Forced Unit Access WRITEs */
-#define DA_EMULATE_FUA_WRITE 1
-/* Emulation for Forced Unit Access READs */
-#define DA_EMULATE_FUA_READ 0
/* Emulation for WriteCache and SYNCHRONIZE_CACHE */
#define DA_EMULATE_WRITE_CACHE 0
/* Emulation for UNIT ATTENTION Interlock Control */
@@ -125,18 +106,6 @@ enum hba_flags_table {
HBA_FLAGS_PSCSI_MODE = 0x02,
};
-/* struct se_lun->lun_status */
-enum transport_lun_status_table {
- TRANSPORT_LUN_STATUS_FREE = 0,
- TRANSPORT_LUN_STATUS_ACTIVE = 1,
-};
-
-/* struct se_portal_group->se_tpg_type */
-enum transport_tpg_type_table {
- TRANSPORT_TPG_TYPE_NORMAL = 0,
- TRANSPORT_TPG_TYPE_DISCOVERY = 1,
-};
-
/* Special transport agnostic struct se_cmd->t_states */
enum transport_state_table {
TRANSPORT_NO_STATE = 0,
@@ -165,18 +134,15 @@ enum se_cmd_flags_table {
SCF_SEND_DELAYED_TAS = 0x00004000,
SCF_ALUA_NON_OPTIMIZED = 0x00008000,
SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
- SCF_ACK_KREF = 0x00040000,
SCF_COMPARE_AND_WRITE = 0x00080000,
SCF_COMPARE_AND_WRITE_POST = 0x00100000,
- SCF_CMD_XCOPY_PASSTHROUGH = 0x00200000,
+ SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
};
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
enum transport_lunflags_table {
- TRANSPORT_LUNFLAGS_NO_ACCESS = 0x00,
- TRANSPORT_LUNFLAGS_INITIATOR_ACCESS = 0x01,
- TRANSPORT_LUNFLAGS_READ_ONLY = 0x02,
- TRANSPORT_LUNFLAGS_READ_WRITE = 0x04,
+ TRANSPORT_LUNFLAGS_READ_ONLY = 0x01,
+ TRANSPORT_LUNFLAGS_READ_WRITE = 0x02,
};
/*
@@ -325,22 +291,13 @@ struct t10_alua_tg_pt_gp {
struct se_device *tg_pt_gp_dev;
struct config_group tg_pt_gp_group;
struct list_head tg_pt_gp_list;
- struct list_head tg_pt_gp_mem_list;
- struct se_port *tg_pt_gp_alua_port;
+ struct list_head tg_pt_gp_lun_list;
+ struct se_lun *tg_pt_gp_alua_lun;
struct se_node_acl *tg_pt_gp_alua_nacl;
struct delayed_work tg_pt_gp_transition_work;
struct completion *tg_pt_gp_transition_complete;
};
-struct t10_alua_tg_pt_gp_member {
- bool tg_pt_gp_assoc;
- atomic_t tg_pt_gp_mem_ref_cnt;
- spinlock_t tg_pt_gp_mem_lock;
- struct t10_alua_tg_pt_gp *tg_pt_gp;
- struct se_port *tg_pt;
- struct list_head tg_pt_gp_mem_list;
-};
-
struct t10_vpd {
unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN];
int protocol_identifier_set;
@@ -385,15 +342,16 @@ struct t10_pr_registration {
int pr_res_scope;
/* Used for fabric initiator WWPNs using a ISID */
bool isid_present_at_reg;
- u32 pr_res_mapped_lun;
- u32 pr_aptpl_target_lun;
+ u64 pr_res_mapped_lun;
+ u64 pr_aptpl_target_lun;
+ u16 tg_pt_sep_rtpi;
u32 pr_res_generation;
u64 pr_reg_bin_isid;
u64 pr_res_key;
atomic_t pr_res_holders;
struct se_node_acl *pr_reg_nacl;
+ /* Used by ALL_TG_PT=1 registration with deve->pr_ref taken */
struct se_dev_entry *pr_reg_deve;
- struct se_lun *pr_reg_tg_pt_lun;
struct list_head pr_reg_list;
struct list_head pr_reg_abort_list;
struct list_head pr_reg_aptpl_list;
@@ -407,7 +365,7 @@ struct t10_reservation {
/* Activate Persistence across Target Power Loss enabled
* for SCSI device */
int pr_aptpl_active;
-#define PR_APTPL_BUF_LEN 8192
+#define PR_APTPL_BUF_LEN 262144
u32 pr_generation;
spinlock_t registration_lock;
spinlock_t aptpl_reg_lock;
@@ -433,7 +391,7 @@ struct se_tmr_req {
u8 response;
int call_transport;
/* Reference to ITT that Task Mgmt should be performed */
- u32 ref_task_tag;
+ u64 ref_task_tag;
void *fabric_tmr_ptr;
struct se_cmd *task_cmd;
struct se_device *tmr_dev;
@@ -486,6 +444,7 @@ struct se_cmd {
u8 scsi_asc;
u8 scsi_ascq;
u16 scsi_sense_length;
+ u64 tag; /* SAM command identifier aka task tag */
/* Delay for ALUA Active/NonOptimized state access in milliseconds */
int alua_nonop_delay;
/* See include/linux/dma-mapping.h */
@@ -504,7 +463,7 @@ struct se_cmd {
/* Total size in bytes associated with command */
u32 data_length;
u32 residual_count;
- u32 orig_fe_lun;
+ u64 orig_fe_lun;
/* Persistent Reservation key */
u64 pr_res_key;
/* Used for sense data */
@@ -512,7 +471,6 @@ struct se_cmd {
struct list_head se_delayed_node;
struct list_head se_qf_node;
struct se_device *se_dev;
- struct se_dev_entry *se_deve;
struct se_lun *se_lun;
/* Only used for internal passthrough and legacy TCM fabric modules */
struct se_session *se_sess;
@@ -520,11 +478,10 @@ struct se_cmd {
struct list_head se_cmd_list;
struct completion cmd_wait_comp;
struct kref cmd_kref;
- struct target_core_fabric_ops *se_tfo;
+ const struct target_core_fabric_ops *se_tfo;
sense_reason_t (*execute_cmd)(struct se_cmd *);
- sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *,
- u32, enum dma_data_direction);
- sense_reason_t (*transport_complete_callback)(struct se_cmd *);
+ sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
+ void *protocol_data;
unsigned char *t_task_cdb;
unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
@@ -580,7 +537,6 @@ struct se_cmd {
struct se_ua {
u8 ua_asc;
u8 ua_ascq;
- struct se_node_acl *ua_nacl;
struct list_head ua_nacl_list;
};
@@ -591,14 +547,15 @@ struct se_node_acl {
bool acl_stop:1;
u32 queue_depth;
u32 acl_index;
+ enum target_prot_type saved_prot_type;
#define MAX_ACL_TAG_SIZE 64
char acl_tag[MAX_ACL_TAG_SIZE];
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
atomic_t acl_pr_ref_count;
- struct se_dev_entry **device_list;
+ struct hlist_head lun_entry_hlist;
struct se_session *nacl_sess;
struct se_portal_group *se_tpg;
- spinlock_t device_list_lock;
+ struct mutex lun_entry_mutex;
spinlock_t nacl_sess_lock;
struct config_group acl_group;
struct config_group acl_attrib_group;
@@ -616,6 +573,7 @@ struct se_session {
unsigned sess_tearing_down:1;
u64 sess_bin_isid;
enum target_prot_op sup_prot_ops;
+ enum target_prot_type sess_prot_type;
struct se_node_acl *se_node_acl;
struct se_portal_group *se_tpg;
void *fabric_sess_ptr;
@@ -641,33 +599,37 @@ struct se_ml_stat_grps {
struct se_lun_acl {
char initiatorname[TRANSPORT_IQN_LEN];
- u32 mapped_lun;
+ u64 mapped_lun;
struct se_node_acl *se_lun_nacl;
struct se_lun *se_lun;
- struct list_head lacl_list;
struct config_group se_lun_group;
struct se_ml_stat_grps ml_stat_grps;
};
struct se_dev_entry {
- bool def_pr_registered;
/* See transport_lunflags_table */
- u32 lun_flags;
- u32 mapped_lun;
- u32 total_cmds;
+ u64 mapped_lun;
u64 pr_res_key;
u64 creation_time;
+ u32 lun_flags;
u32 attach_count;
- u64 read_bytes;
- u64 write_bytes;
+ atomic_long_t total_cmds;
+ atomic_long_t read_bytes;
+ atomic_long_t write_bytes;
atomic_t ua_count;
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
- atomic_t pr_ref_count;
- struct se_lun_acl *se_lun_acl;
+ struct kref pr_kref;
+ struct completion pr_comp;
+ struct se_lun_acl __rcu *se_lun_acl;
spinlock_t ua_lock;
- struct se_lun *se_lun;
+ struct se_lun __rcu *se_lun;
+#define DEF_PR_REG_ACTIVE 1
+ unsigned long deve_flags;
struct list_head alua_port_list;
+ struct list_head lun_link;
struct list_head ua_list;
+ struct hlist_node link;
+ struct rcu_head rcu_head;
};
struct se_dev_attrib {
@@ -712,25 +674,48 @@ struct se_port_stat_grps {
struct config_group scsi_transport_group;
};
+struct scsi_port_stats {
+ atomic_long_t cmd_pdus;
+ atomic_long_t tx_data_octets;
+ atomic_long_t rx_data_octets;
+};
+
struct se_lun {
+ u64 unpacked_lun;
#define SE_LUN_LINK_MAGIC 0xffff7771
u32 lun_link_magic;
- /* See transport_lun_status_table */
- enum transport_lun_status_table lun_status;
u32 lun_access;
u32 lun_flags;
- u32 unpacked_lun;
+ u32 lun_index;
+
+ /* RELATIVE TARGET PORT IDENTIFER */
+ u16 lun_rtpi;
atomic_t lun_acl_count;
- spinlock_t lun_acl_lock;
- spinlock_t lun_sep_lock;
- struct completion lun_shutdown_comp;
- struct list_head lun_acl_list;
- struct se_device *lun_se_dev;
- struct se_port *lun_sep;
+ struct se_device __rcu *lun_se_dev;
+
+ struct list_head lun_deve_list;
+ spinlock_t lun_deve_lock;
+
+ /* ALUA state */
+ int lun_tg_pt_secondary_stat;
+ int lun_tg_pt_secondary_write_md;
+ atomic_t lun_tg_pt_secondary_offline;
+ struct mutex lun_tg_pt_md_mutex;
+
+ /* ALUA target port group linkage */
+ struct list_head lun_tg_pt_gp_link;
+ struct t10_alua_tg_pt_gp *lun_tg_pt_gp;
+ spinlock_t lun_tg_pt_gp_lock;
+
+ struct se_portal_group *lun_tpg;
+ struct scsi_port_stats lun_stats;
struct config_group lun_group;
struct se_port_stat_grps port_stat_grps;
struct completion lun_ref_comp;
struct percpu_ref lun_ref;
+ struct list_head lun_dev_link;
+ struct hlist_node link;
+ struct rcu_head rcu_head;
};
struct se_dev_stat_grps {
@@ -753,7 +738,6 @@ struct se_device {
#define DF_EMULATED_VPD_UNIT_SERIAL 0x00000004
#define DF_USING_UDEV_PATH 0x00000008
#define DF_USING_ALIAS 0x00000010
- u32 dev_port_count;
/* Physical device queue depth */
u32 queue_depth;
/* Used for SPC-2 reservations enforce of ISIDs */
@@ -770,7 +754,7 @@ struct se_device {
atomic_t dev_ordered_id;
atomic_t dev_ordered_sync;
atomic_t dev_qf_count;
- int export_count;
+ u32 export_count;
spinlock_t delayed_cmd_lock;
spinlock_t execute_task_lock;
spinlock_t dev_reservation_lock;
@@ -812,12 +796,15 @@ struct se_device {
#define SE_UDEV_PATH_LEN 512 /* must be less than PAGE_SIZE */
unsigned char udev_path[SE_UDEV_PATH_LEN];
/* Pointer to template of function pointers for transport */
- struct se_subsystem_api *transport;
+ const struct target_backend_ops *transport;
/* Linked list for struct se_hba struct se_device list */
struct list_head dev_list;
struct se_lun xcopy_lun;
/* Protection Information */
int prot_length;
+ /* For se_lun->lun_se_dev RCU read-side critical access */
+ u32 hba_index;
+ struct rcu_head rcu_head;
};
struct se_hba {
@@ -834,33 +821,7 @@ struct se_hba {
spinlock_t device_lock;
struct config_group hba_group;
struct mutex hba_access_mutex;
- struct se_subsystem_api *transport;
-};
-
-struct scsi_port_stats {
- u64 cmd_pdus;
- u64 tx_data_octets;
- u64 rx_data_octets;
-};
-
-struct se_port {
- /* RELATIVE TARGET PORT IDENTIFER */
- u16 sep_rtpi;
- int sep_tg_pt_secondary_stat;
- int sep_tg_pt_secondary_write_md;
- u32 sep_index;
- struct scsi_port_stats sep_stats;
- /* Used for ALUA Target Port Groups membership */
- atomic_t sep_tg_pt_secondary_offline;
- /* Used for PR ALL_TG_PT=1 */
- atomic_t sep_tg_pt_ref_cnt;
- spinlock_t sep_alua_lock;
- struct mutex sep_tg_pt_md_mutex;
- struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem;
- struct se_lun *sep_lun;
- struct se_portal_group *sep_tpg;
- struct list_head sep_alua_list;
- struct list_head sep_list;
+ struct target_backend *backend;
};
struct se_tpg_np {
@@ -869,28 +830,30 @@ struct se_tpg_np {
};
struct se_portal_group {
- /* Type of target portal group, see transport_tpg_type_table */
- enum transport_tpg_type_table se_tpg_type;
+ /*
+ * PROTOCOL IDENTIFIER value per SPC4, 7.5.1.
+ *
+ * Negative values can be used by fabric drivers for internal use TPGs.
+ */
+ int proto_id;
/* Number of ACLed Initiator Nodes for this TPG */
u32 num_node_acls;
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
atomic_t tpg_pr_ref_count;
/* Spinlock for adding/removing ACLed Nodes */
- spinlock_t acl_node_lock;
+ struct mutex acl_node_mutex;
/* Spinlock for adding/removing sessions */
spinlock_t session_lock;
- spinlock_t tpg_lun_lock;
- /* Pointer to $FABRIC_MOD portal group */
- void *se_tpg_fabric_ptr;
+ struct mutex tpg_lun_mutex;
struct list_head se_tpg_node;
/* linked list for initiator ACL list */
struct list_head acl_node_list;
- struct se_lun **tpg_lun_list;
- struct se_lun tpg_virt_lun0;
+ struct hlist_head tpg_lun_hlist;
+ struct se_lun *tpg_virt_lun0;
/* List of TCM sessions associated wth this TPG */
struct list_head tpg_sess_list;
/* Pointer to $FABRIC_MOD dependent code */
- struct target_core_fabric_ops *se_tpg_tfo;
+ const struct target_core_fabric_ops *se_tpg_tfo;
struct se_wwn *se_tpg_wwn;
struct config_group tpg_group;
struct config_group *tpg_default_groups[7];
diff --git a/include/target/target_core_configfs.h b/include/target/target_core_configfs.h
deleted file mode 100644
index e080138..0000000
--- a/include/target/target_core_configfs.h
+++ /dev/null
@@ -1,56 +0,0 @@
-#define TARGET_CORE_CONFIGFS_VERSION TARGET_CORE_MOD_VERSION
-
-#define TARGET_CORE_CONFIG_ROOT "/sys/kernel/config"
-
-#define TARGET_CORE_NAME_MAX_LEN 64
-#define TARGET_FABRIC_NAME_SIZE 32
-
-extern struct target_fabric_configfs *target_fabric_configfs_init(
- struct module *, const char *);
-extern void target_fabric_configfs_free(struct target_fabric_configfs *);
-extern int target_fabric_configfs_register(struct target_fabric_configfs *);
-extern void target_fabric_configfs_deregister(struct target_fabric_configfs *);
-
-struct target_fabric_configfs_template {
- struct config_item_type tfc_discovery_cit;
- struct config_item_type tfc_wwn_cit;
- struct config_item_type tfc_wwn_fabric_stats_cit;
- struct config_item_type tfc_tpg_cit;
- struct config_item_type tfc_tpg_base_cit;
- struct config_item_type tfc_tpg_lun_cit;
- struct config_item_type tfc_tpg_port_cit;
- struct config_item_type tfc_tpg_port_stat_cit;
- struct config_item_type tfc_tpg_np_cit;
- struct config_item_type tfc_tpg_np_base_cit;
- struct config_item_type tfc_tpg_attrib_cit;
- struct config_item_type tfc_tpg_auth_cit;
- struct config_item_type tfc_tpg_param_cit;
- struct config_item_type tfc_tpg_nacl_cit;
- struct config_item_type tfc_tpg_nacl_base_cit;
- struct config_item_type tfc_tpg_nacl_attrib_cit;
- struct config_item_type tfc_tpg_nacl_auth_cit;
- struct config_item_type tfc_tpg_nacl_param_cit;
- struct config_item_type tfc_tpg_nacl_stat_cit;
- struct config_item_type tfc_tpg_mappedlun_cit;
- struct config_item_type tfc_tpg_mappedlun_stat_cit;
-};
-
-struct target_fabric_configfs {
- char tf_name[TARGET_FABRIC_NAME_SIZE];
- atomic_t tf_access_cnt;
- struct list_head tf_list;
- struct config_group tf_group;
- struct config_group tf_disc_group;
- struct config_group *tf_default_groups[2];
- /* Pointer to fabric's config_item */
- struct config_item *tf_fabric;
- /* Passed from fabric modules */
- struct config_item_type *tf_fabric_cit;
- /* Pointer to target core subsystem */
- struct configfs_subsystem *tf_subsys;
- /* Pointer to fabric's struct module */
- struct module *tf_module;
- struct target_core_fabric_ops tf_ops;
- struct target_fabric_configfs_template tf_cit_tmpl;
-};
-
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 22a4e98e..18afef9 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -2,21 +2,13 @@
#define TARGET_CORE_FABRIC_H
struct target_core_fabric_ops {
- struct configfs_subsystem *tf_subsys;
+ struct module *module;
+ const char *name;
+ size_t node_acl_size;
char *(*get_fabric_name)(void);
- u8 (*get_fabric_proto_ident)(struct se_portal_group *);
char *(*tpg_get_wwn)(struct se_portal_group *);
u16 (*tpg_get_tag)(struct se_portal_group *);
u32 (*tpg_get_default_depth)(struct se_portal_group *);
- u32 (*tpg_get_pr_transport_id)(struct se_portal_group *,
- struct se_node_acl *,
- struct t10_pr_registration *, int *,
- unsigned char *);
- u32 (*tpg_get_pr_transport_id_len)(struct se_portal_group *,
- struct se_node_acl *,
- struct t10_pr_registration *, int *);
- char *(*tpg_parse_pr_out_transport_id)(struct se_portal_group *,
- const char *, u32 *, char **);
int (*tpg_check_demo_mode)(struct se_portal_group *);
int (*tpg_check_demo_mode_cache)(struct se_portal_group *);
int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *);
@@ -27,10 +19,14 @@ struct target_core_fabric_ops {
* inquiry response
*/
int (*tpg_check_demo_mode_login_only)(struct se_portal_group *);
- struct se_node_acl *(*tpg_alloc_fabric_acl)(
- struct se_portal_group *);
- void (*tpg_release_fabric_acl)(struct se_portal_group *,
- struct se_node_acl *);
+ /*
+ * Optionally used as a configfs tunable to determine when
+ * target-core should signal the PROTECT=1 feature bit for
+ * backends that don't support T10-PI, so that either fabric
+ * HW offload or target-core emulation performs the associated
+ * WRITE_STRIP and READ_INSERT operations.
+ */
+ int (*tpg_check_prot_fabric_only)(struct se_portal_group *);
u32 (*tpg_get_inst_index)(struct se_portal_group *);
/*
* Optional to release struct se_cmd and fabric dependent allocated
@@ -41,7 +37,6 @@ struct target_core_fabric_ops {
*/
int (*check_stop_free)(struct se_cmd *);
void (*release_cmd)(struct se_cmd *);
- void (*put_session)(struct se_session *);
/*
* Called with spin_lock_bh(struct se_portal_group->session_lock held.
*/
@@ -57,7 +52,6 @@ struct target_core_fabric_ops {
int (*write_pending)(struct se_cmd *);
int (*write_pending_status)(struct se_cmd *);
void (*set_default_node_attributes)(struct se_node_acl *);
- u32 (*get_task_tag)(struct se_cmd *);
int (*get_cmd_state)(struct se_cmd *);
int (*queue_data_in)(struct se_cmd *);
int (*queue_status)(struct se_cmd *);
@@ -79,11 +73,28 @@ struct target_core_fabric_ops {
struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *,
struct config_group *, const char *);
void (*fabric_drop_np)(struct se_tpg_np *);
- struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group *,
- struct config_group *, const char *);
- void (*fabric_drop_nodeacl)(struct se_node_acl *);
+ int (*fabric_init_nodeacl)(struct se_node_acl *, const char *);
+ void (*fabric_cleanup_nodeacl)(struct se_node_acl *);
+
+ struct configfs_attribute **tfc_discovery_attrs;
+ struct configfs_attribute **tfc_wwn_attrs;
+ struct configfs_attribute **tfc_tpg_base_attrs;
+ struct configfs_attribute **tfc_tpg_np_base_attrs;
+ struct configfs_attribute **tfc_tpg_attrib_attrs;
+ struct configfs_attribute **tfc_tpg_auth_attrs;
+ struct configfs_attribute **tfc_tpg_param_attrs;
+ struct configfs_attribute **tfc_tpg_nacl_base_attrs;
+ struct configfs_attribute **tfc_tpg_nacl_attrib_attrs;
+ struct configfs_attribute **tfc_tpg_nacl_auth_attrs;
+ struct configfs_attribute **tfc_tpg_nacl_param_attrs;
};
+int target_register_template(const struct target_core_fabric_ops *fo);
+void target_unregister_template(const struct target_core_fabric_ops *fo);
+
+int target_depend_item(struct config_item *item);
+void target_undepend_item(struct config_item *item);
+
struct se_session *transport_init_session(enum target_prot_op);
int transport_alloc_session_tags(struct se_session *, unsigned int,
unsigned int);
@@ -95,24 +106,26 @@ void transport_register_session(struct se_portal_group *,
struct se_node_acl *, struct se_session *, void *);
void target_get_session(struct se_session *);
void target_put_session(struct se_session *);
+ssize_t target_show_dynamic_sessions(struct se_portal_group *, char *);
void transport_free_session(struct se_session *);
void target_put_nacl(struct se_node_acl *);
void transport_deregister_session_configfs(struct se_session *);
void transport_deregister_session(struct se_session *);
-void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *,
+void transport_init_se_cmd(struct se_cmd *,
+ const struct target_core_fabric_ops *,
struct se_session *, u32, int, int, unsigned char *);
-sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u32);
+sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u64);
sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *,
- unsigned char *, unsigned char *, u32, u32, int, int, int,
+ unsigned char *, unsigned char *, u64, u32, int, int, int,
struct scatterlist *, u32, struct scatterlist *, u32,
struct scatterlist *, u32);
int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
- unsigned char *, u32, u32, int, int, int);
+ unsigned char *, u64, u32, int, int, int);
int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
- unsigned char *sense, u32 unpacked_lun,
+ unsigned char *sense, u64 unpacked_lun,
void *fabric_tmr_ptr, unsigned char tm_type,
gfp_t, unsigned int, int);
int transport_handle_cdb_direct(struct se_cmd *);
@@ -126,8 +139,8 @@ bool transport_wait_for_tasks(struct se_cmd *);
int transport_check_aborted_status(struct se_cmd *, int);
int transport_send_check_condition_and_sense(struct se_cmd *,
sense_reason_t, int);
-int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
-int target_put_sess_cmd(struct se_session *, struct se_cmd *);
+int target_get_sess_cmd(struct se_cmd *, bool);
+int target_put_sess_cmd(struct se_cmd *);
void target_sess_cmd_list_set_waiting(struct se_session *);
void target_wait_for_sess_cmds(struct se_session *);
@@ -138,52 +151,19 @@ void core_tmr_release_req(struct se_tmr_req *);
int transport_generic_handle_tmr(struct se_cmd *);
void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
void __target_execute_cmd(struct se_cmd *);
-int transport_lookup_tmr_lun(struct se_cmd *, u32);
+int transport_lookup_tmr_lun(struct se_cmd *, u64);
struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
unsigned char *);
struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
unsigned char *);
-void core_tpg_clear_object_luns(struct se_portal_group *);
-struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *,
- struct se_node_acl *, const char *, u32);
-int core_tpg_del_initiator_node_acl(struct se_portal_group *,
- struct se_node_acl *, int);
int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
unsigned char *, u32, int);
int core_tpg_set_initiator_node_tag(struct se_portal_group *,
struct se_node_acl *, const char *);
-int core_tpg_register(struct target_core_fabric_ops *, struct se_wwn *,
- struct se_portal_group *, void *, int);
+int core_tpg_register(struct se_wwn *, struct se_portal_group *, int);
int core_tpg_deregister(struct se_portal_group *);
-/* SAS helpers */
-u8 sas_get_fabric_proto_ident(struct se_portal_group *);
-u32 sas_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
- struct t10_pr_registration *, int *, unsigned char *);
-u32 sas_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
- struct t10_pr_registration *, int *);
-char *sas_parse_pr_out_transport_id(struct se_portal_group *, const char *,
- u32 *, char **);
-
-/* FC helpers */
-u8 fc_get_fabric_proto_ident(struct se_portal_group *);
-u32 fc_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
- struct t10_pr_registration *, int *, unsigned char *);
-u32 fc_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
- struct t10_pr_registration *, int *);
-char *fc_parse_pr_out_transport_id(struct se_portal_group *, const char *,
- u32 *, char **);
-
-/* iSCSI helpers */
-u8 iscsi_get_fabric_proto_ident(struct se_portal_group *);
-u32 iscsi_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
- struct t10_pr_registration *, int *, unsigned char *);
-u32 iscsi_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
- struct t10_pr_registration *, int *);
-char *iscsi_parse_pr_out_transport_id(struct se_portal_group *, const char *,
- u32 *, char **);
-
/*
* The LIO target core uses DMA_TO_DEVICE to mean that data is going
* to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
diff --git a/include/target/target_core_fabric_configfs.h b/include/target/target_core_fabric_configfs.h
index b32a149..7a0649c 100644
--- a/include/target/target_core_fabric_configfs.h
+++ b/include/target/target_core_fabric_configfs.h
@@ -90,6 +90,11 @@ static struct target_fabric_tpg_attribute _fabric##_tpg_##_name = \
_fabric##_tpg_store_##_name);
+#define TF_TPG_BASE_ATTR_RO(_fabric, _name) \
+static struct target_fabric_tpg_attribute _fabric##_tpg_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ _fabric##_tpg_show_##_name);
+
CONFIGFS_EATTR_STRUCT(target_fabric_wwn, target_fabric_configfs);
#define TF_WWN_ATTR(_fabric, _name, _mode) \
static struct target_fabric_wwn_attribute _fabric##_wwn_##_name = \
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index 02e1003..09b3880 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -87,7 +87,8 @@
#define DECLARE_TRACE(name, proto, args)
#ifdef CONFIG_EVENT_TRACING
-#include <trace/ftrace.h>
+#include <trace/trace_events.h>
+#include <trace/perf.h>
#endif
#undef TRACE_EVENT
diff --git a/include/trace/events/9p.h b/include/trace/events/9p.h
index a066636..633ee9e 100644
--- a/include/trace/events/9p.h
+++ b/include/trace/events/9p.h
@@ -6,76 +6,95 @@
#include <linux/tracepoint.h>
+#define P9_MSG_T \
+ EM( P9_TLERROR, "P9_TLERROR" ) \
+ EM( P9_RLERROR, "P9_RLERROR" ) \
+ EM( P9_TSTATFS, "P9_TSTATFS" ) \
+ EM( P9_RSTATFS, "P9_RSTATFS" ) \
+ EM( P9_TLOPEN, "P9_TLOPEN" ) \
+ EM( P9_RLOPEN, "P9_RLOPEN" ) \
+ EM( P9_TLCREATE, "P9_TLCREATE" ) \
+ EM( P9_RLCREATE, "P9_RLCREATE" ) \
+ EM( P9_TSYMLINK, "P9_TSYMLINK" ) \
+ EM( P9_RSYMLINK, "P9_RSYMLINK" ) \
+ EM( P9_TMKNOD, "P9_TMKNOD" ) \
+ EM( P9_RMKNOD, "P9_RMKNOD" ) \
+ EM( P9_TRENAME, "P9_TRENAME" ) \
+ EM( P9_RRENAME, "P9_RRENAME" ) \
+ EM( P9_TREADLINK, "P9_TREADLINK" ) \
+ EM( P9_RREADLINK, "P9_RREADLINK" ) \
+ EM( P9_TGETATTR, "P9_TGETATTR" ) \
+ EM( P9_RGETATTR, "P9_RGETATTR" ) \
+ EM( P9_TSETATTR, "P9_TSETATTR" ) \
+ EM( P9_RSETATTR, "P9_RSETATTR" ) \
+ EM( P9_TXATTRWALK, "P9_TXATTRWALK" ) \
+ EM( P9_RXATTRWALK, "P9_RXATTRWALK" ) \
+ EM( P9_TXATTRCREATE, "P9_TXATTRCREATE" ) \
+ EM( P9_RXATTRCREATE, "P9_RXATTRCREATE" ) \
+ EM( P9_TREADDIR, "P9_TREADDIR" ) \
+ EM( P9_RREADDIR, "P9_RREADDIR" ) \
+ EM( P9_TFSYNC, "P9_TFSYNC" ) \
+ EM( P9_RFSYNC, "P9_RFSYNC" ) \
+ EM( P9_TLOCK, "P9_TLOCK" ) \
+ EM( P9_RLOCK, "P9_RLOCK" ) \
+ EM( P9_TGETLOCK, "P9_TGETLOCK" ) \
+ EM( P9_RGETLOCK, "P9_RGETLOCK" ) \
+ EM( P9_TLINK, "P9_TLINK" ) \
+ EM( P9_RLINK, "P9_RLINK" ) \
+ EM( P9_TMKDIR, "P9_TMKDIR" ) \
+ EM( P9_RMKDIR, "P9_RMKDIR" ) \
+ EM( P9_TRENAMEAT, "P9_TRENAMEAT" ) \
+ EM( P9_RRENAMEAT, "P9_RRENAMEAT" ) \
+ EM( P9_TUNLINKAT, "P9_TUNLINKAT" ) \
+ EM( P9_RUNLINKAT, "P9_RUNLINKAT" ) \
+ EM( P9_TVERSION, "P9_TVERSION" ) \
+ EM( P9_RVERSION, "P9_RVERSION" ) \
+ EM( P9_TAUTH, "P9_TAUTH" ) \
+ EM( P9_RAUTH, "P9_RAUTH" ) \
+ EM( P9_TATTACH, "P9_TATTACH" ) \
+ EM( P9_RATTACH, "P9_RATTACH" ) \
+ EM( P9_TERROR, "P9_TERROR" ) \
+ EM( P9_RERROR, "P9_RERROR" ) \
+ EM( P9_TFLUSH, "P9_TFLUSH" ) \
+ EM( P9_RFLUSH, "P9_RFLUSH" ) \
+ EM( P9_TWALK, "P9_TWALK" ) \
+ EM( P9_RWALK, "P9_RWALK" ) \
+ EM( P9_TOPEN, "P9_TOPEN" ) \
+ EM( P9_ROPEN, "P9_ROPEN" ) \
+ EM( P9_TCREATE, "P9_TCREATE" ) \
+ EM( P9_RCREATE, "P9_RCREATE" ) \
+ EM( P9_TREAD, "P9_TREAD" ) \
+ EM( P9_RREAD, "P9_RREAD" ) \
+ EM( P9_TWRITE, "P9_TWRITE" ) \
+ EM( P9_RWRITE, "P9_RWRITE" ) \
+ EM( P9_TCLUNK, "P9_TCLUNK" ) \
+ EM( P9_RCLUNK, "P9_RCLUNK" ) \
+ EM( P9_TREMOVE, "P9_TREMOVE" ) \
+ EM( P9_RREMOVE, "P9_RREMOVE" ) \
+ EM( P9_TSTAT, "P9_TSTAT" ) \
+ EM( P9_RSTAT, "P9_RSTAT" ) \
+ EM( P9_TWSTAT, "P9_TWSTAT" ) \
+ EMe(P9_RWSTAT, "P9_RWSTAT" )
+
+/* Define EM() to export the enums to userspace via TRACE_DEFINE_ENUM() */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+P9_MSG_T
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) { a, b },
+#define EMe(a, b) { a, b }
+
#define show_9p_op(type) \
- __print_symbolic(type, \
- { P9_TLERROR, "P9_TLERROR" }, \
- { P9_RLERROR, "P9_RLERROR" }, \
- { P9_TSTATFS, "P9_TSTATFS" }, \
- { P9_RSTATFS, "P9_RSTATFS" }, \
- { P9_TLOPEN, "P9_TLOPEN" }, \
- { P9_RLOPEN, "P9_RLOPEN" }, \
- { P9_TLCREATE, "P9_TLCREATE" }, \
- { P9_RLCREATE, "P9_RLCREATE" }, \
- { P9_TSYMLINK, "P9_TSYMLINK" }, \
- { P9_RSYMLINK, "P9_RSYMLINK" }, \
- { P9_TMKNOD, "P9_TMKNOD" }, \
- { P9_RMKNOD, "P9_RMKNOD" }, \
- { P9_TRENAME, "P9_TRENAME" }, \
- { P9_RRENAME, "P9_RRENAME" }, \
- { P9_TREADLINK, "P9_TREADLINK" }, \
- { P9_RREADLINK, "P9_RREADLINK" }, \
- { P9_TGETATTR, "P9_TGETATTR" }, \
- { P9_RGETATTR, "P9_RGETATTR" }, \
- { P9_TSETATTR, "P9_TSETATTR" }, \
- { P9_RSETATTR, "P9_RSETATTR" }, \
- { P9_TXATTRWALK, "P9_TXATTRWALK" }, \
- { P9_RXATTRWALK, "P9_RXATTRWALK" }, \
- { P9_TXATTRCREATE, "P9_TXATTRCREATE" }, \
- { P9_RXATTRCREATE, "P9_RXATTRCREATE" }, \
- { P9_TREADDIR, "P9_TREADDIR" }, \
- { P9_RREADDIR, "P9_RREADDIR" }, \
- { P9_TFSYNC, "P9_TFSYNC" }, \
- { P9_RFSYNC, "P9_RFSYNC" }, \
- { P9_TLOCK, "P9_TLOCK" }, \
- { P9_RLOCK, "P9_RLOCK" }, \
- { P9_TGETLOCK, "P9_TGETLOCK" }, \
- { P9_RGETLOCK, "P9_RGETLOCK" }, \
- { P9_TLINK, "P9_TLINK" }, \
- { P9_RLINK, "P9_RLINK" }, \
- { P9_TMKDIR, "P9_TMKDIR" }, \
- { P9_RMKDIR, "P9_RMKDIR" }, \
- { P9_TRENAMEAT, "P9_TRENAMEAT" }, \
- { P9_RRENAMEAT, "P9_RRENAMEAT" }, \
- { P9_TUNLINKAT, "P9_TUNLINKAT" }, \
- { P9_RUNLINKAT, "P9_RUNLINKAT" }, \
- { P9_TVERSION, "P9_TVERSION" }, \
- { P9_RVERSION, "P9_RVERSION" }, \
- { P9_TAUTH, "P9_TAUTH" }, \
- { P9_RAUTH, "P9_RAUTH" }, \
- { P9_TATTACH, "P9_TATTACH" }, \
- { P9_RATTACH, "P9_RATTACH" }, \
- { P9_TERROR, "P9_TERROR" }, \
- { P9_RERROR, "P9_RERROR" }, \
- { P9_TFLUSH, "P9_TFLUSH" }, \
- { P9_RFLUSH, "P9_RFLUSH" }, \
- { P9_TWALK, "P9_TWALK" }, \
- { P9_RWALK, "P9_RWALK" }, \
- { P9_TOPEN, "P9_TOPEN" }, \
- { P9_ROPEN, "P9_ROPEN" }, \
- { P9_TCREATE, "P9_TCREATE" }, \
- { P9_RCREATE, "P9_RCREATE" }, \
- { P9_TREAD, "P9_TREAD" }, \
- { P9_RREAD, "P9_RREAD" }, \
- { P9_TWRITE, "P9_TWRITE" }, \
- { P9_RWRITE, "P9_RWRITE" }, \
- { P9_TCLUNK, "P9_TCLUNK" }, \
- { P9_RCLUNK, "P9_RCLUNK" }, \
- { P9_TREMOVE, "P9_TREMOVE" }, \
- { P9_RREMOVE, "P9_RREMOVE" }, \
- { P9_TSTAT, "P9_TSTAT" }, \
- { P9_RSTAT, "P9_RSTAT" }, \
- { P9_TWSTAT, "P9_TWSTAT" }, \
- { P9_RWSTAT, "P9_RWSTAT" })
+ __print_symbolic(type, P9_MSG_T)
TRACE_EVENT(9p_client_req,
TP_PROTO(struct p9_client *clnt, int8_t type, int tag),
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 1faecea..0b73af9 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -407,10 +407,10 @@ TRACE_EVENT(btrfs_sync_file,
TP_fast_assign(
struct dentry *dentry = file->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
__entry->ino = inode->i_ino;
- __entry->parent = dentry->d_parent->d_inode->i_ino;
+ __entry->parent = d_inode(dentry->d_parent)->i_ino;
__entry->datasync = datasync;
__entry->root_objectid =
BTRFS_I(inode)->root->root_key.objectid;
@@ -962,7 +962,7 @@ TRACE_EVENT(alloc_extent_state,
__entry->ip = IP
),
- TP_printk("state=%p; mask = %s; caller = %pF", __entry->state,
+ TP_printk("state=%p; mask = %s; caller = %pS", __entry->state,
show_gfp_flags(__entry->mask), (void *)__entry->ip)
);
@@ -982,7 +982,7 @@ TRACE_EVENT(free_extent_state,
__entry->ip = IP
),
- TP_printk(" state=%p; caller = %pF", __entry->state,
+ TP_printk(" state=%p; caller = %pS", __entry->state,
(void *)__entry->ip)
);
@@ -1117,61 +1117,6 @@ DEFINE_EVENT(btrfs__workqueue_done, btrfs_workqueue_destroy,
TP_ARGS(wq)
);
-#define show_oper_type(type) \
- __print_symbolic(type, \
- { BTRFS_QGROUP_OPER_ADD_EXCL, "OPER_ADD_EXCL" }, \
- { BTRFS_QGROUP_OPER_ADD_SHARED, "OPER_ADD_SHARED" }, \
- { BTRFS_QGROUP_OPER_SUB_EXCL, "OPER_SUB_EXCL" }, \
- { BTRFS_QGROUP_OPER_SUB_SHARED, "OPER_SUB_SHARED" })
-
-DECLARE_EVENT_CLASS(btrfs_qgroup_oper,
-
- TP_PROTO(struct btrfs_qgroup_operation *oper),
-
- TP_ARGS(oper),
-
- TP_STRUCT__entry(
- __field( u64, ref_root )
- __field( u64, bytenr )
- __field( u64, num_bytes )
- __field( u64, seq )
- __field( int, type )
- __field( u64, elem_seq )
- ),
-
- TP_fast_assign(
- __entry->ref_root = oper->ref_root;
- __entry->bytenr = oper->bytenr,
- __entry->num_bytes = oper->num_bytes;
- __entry->seq = oper->seq;
- __entry->type = oper->type;
- __entry->elem_seq = oper->elem.seq;
- ),
-
- TP_printk("ref_root = %llu, bytenr = %llu, num_bytes = %llu, "
- "seq = %llu, elem.seq = %llu, type = %s",
- (unsigned long long)__entry->ref_root,
- (unsigned long long)__entry->bytenr,
- (unsigned long long)__entry->num_bytes,
- (unsigned long long)__entry->seq,
- (unsigned long long)__entry->elem_seq,
- show_oper_type(__entry->type))
-);
-
-DEFINE_EVENT(btrfs_qgroup_oper, btrfs_qgroup_account,
-
- TP_PROTO(struct btrfs_qgroup_operation *oper),
-
- TP_ARGS(oper)
-);
-
-DEFINE_EVENT(btrfs_qgroup_oper, btrfs_qgroup_record_ref,
-
- TP_PROTO(struct btrfs_qgroup_operation *oper),
-
- TP_ARGS(oper)
-);
-
#endif /* _TRACE_BTRFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h
new file mode 100644
index 0000000..7586072
--- /dev/null
+++ b/include/trace/events/clk.h
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM clk
+
+#if !defined(_TRACE_CLK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CLK_H
+
+#include <linux/tracepoint.h>
+
+struct clk_core;
+
+DECLARE_EVENT_CLASS(clk,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core),
+
+ TP_STRUCT__entry(
+ __string( name, core->name )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, core->name);
+ ),
+
+ TP_printk("%s", __get_str(name))
+);
+
+DEFINE_EVENT(clk, clk_enable,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DEFINE_EVENT(clk, clk_enable_complete,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DEFINE_EVENT(clk, clk_disable,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DEFINE_EVENT(clk, clk_disable_complete,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DEFINE_EVENT(clk, clk_prepare,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DEFINE_EVENT(clk, clk_prepare_complete,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DEFINE_EVENT(clk, clk_unprepare,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DEFINE_EVENT(clk, clk_unprepare_complete,
+
+ TP_PROTO(struct clk_core *core),
+
+ TP_ARGS(core)
+);
+
+DECLARE_EVENT_CLASS(clk_rate,
+
+ TP_PROTO(struct clk_core *core, unsigned long rate),
+
+ TP_ARGS(core, rate),
+
+ TP_STRUCT__entry(
+ __string( name, core->name )
+ __field(unsigned long, rate )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, core->name);
+ __entry->rate = rate;
+ ),
+
+ TP_printk("%s %lu", __get_str(name), (unsigned long)__entry->rate)
+);
+
+DEFINE_EVENT(clk_rate, clk_set_rate,
+
+ TP_PROTO(struct clk_core *core, unsigned long rate),
+
+ TP_ARGS(core, rate)
+);
+
+DEFINE_EVENT(clk_rate, clk_set_rate_complete,
+
+ TP_PROTO(struct clk_core *core, unsigned long rate),
+
+ TP_ARGS(core, rate)
+);
+
+DECLARE_EVENT_CLASS(clk_parent,
+
+ TP_PROTO(struct clk_core *core, struct clk_core *parent),
+
+ TP_ARGS(core, parent),
+
+ TP_STRUCT__entry(
+ __string( name, core->name )
+ __string( pname, parent->name )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, core->name);
+ __assign_str(pname, parent->name);
+ ),
+
+ TP_printk("%s %s", __get_str(name), __get_str(pname))
+);
+
+DEFINE_EVENT(clk_parent, clk_set_parent,
+
+ TP_PROTO(struct clk_core *core, struct clk_core *parent),
+
+ TP_ARGS(core, parent)
+);
+
+DEFINE_EVENT(clk_parent, clk_set_parent_complete,
+
+ TP_PROTO(struct clk_core *core, struct clk_core *parent),
+
+ TP_ARGS(core, parent)
+);
+
+DECLARE_EVENT_CLASS(clk_phase,
+
+ TP_PROTO(struct clk_core *core, int phase),
+
+ TP_ARGS(core, phase),
+
+ TP_STRUCT__entry(
+ __string( name, core->name )
+ __field( int, phase )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, core->name);
+ __entry->phase = phase;
+ ),
+
+ TP_printk("%s %d", __get_str(name), (int)__entry->phase)
+);
+
+DEFINE_EVENT(clk_phase, clk_set_phase,
+
+ TP_PROTO(struct clk_core *core, int phase),
+
+ TP_ARGS(core, phase)
+);
+
+DEFINE_EVENT(clk_phase, clk_set_phase_complete,
+
+ TP_PROTO(struct clk_core *core, int phase),
+
+ TP_ARGS(core, phase)
+);
+
+#endif /* _TRACE_CLK_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h
new file mode 100644
index 0000000..d7cd961
--- /dev/null
+++ b/include/trace/events/cma.h
@@ -0,0 +1,66 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cma
+
+#if !defined(_TRACE_CMA_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CMA_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(cma_alloc,
+
+ TP_PROTO(unsigned long pfn, const struct page *page,
+ unsigned int count, unsigned int align),
+
+ TP_ARGS(pfn, page, count, align),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pfn)
+ __field(const struct page *, page)
+ __field(unsigned int, count)
+ __field(unsigned int, align)
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = pfn;
+ __entry->page = page;
+ __entry->count = count;
+ __entry->align = align;
+ ),
+
+ TP_printk("pfn=%lx page=%p count=%u align=%u",
+ __entry->pfn,
+ __entry->page,
+ __entry->count,
+ __entry->align)
+);
+
+TRACE_EVENT(cma_release,
+
+ TP_PROTO(unsigned long pfn, const struct page *page,
+ unsigned int count),
+
+ TP_ARGS(pfn, page, count),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pfn)
+ __field(const struct page *, page)
+ __field(unsigned int, count)
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = pfn;
+ __entry->page = page;
+ __entry->count = count;
+ ),
+
+ TP_printk("pfn=%lx page=%p count=%u",
+ __entry->pfn,
+ __entry->page,
+ __entry->count)
+);
+
+#endif /* _TRACE_CMA_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index c6814b9..9a6a3fe 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -11,39 +11,55 @@
DECLARE_EVENT_CLASS(mm_compaction_isolate_template,
- TP_PROTO(unsigned long nr_scanned,
+ TP_PROTO(
+ unsigned long start_pfn,
+ unsigned long end_pfn,
+ unsigned long nr_scanned,
unsigned long nr_taken),
- TP_ARGS(nr_scanned, nr_taken),
+ TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken),
TP_STRUCT__entry(
+ __field(unsigned long, start_pfn)
+ __field(unsigned long, end_pfn)
__field(unsigned long, nr_scanned)
__field(unsigned long, nr_taken)
),
TP_fast_assign(
+ __entry->start_pfn = start_pfn;
+ __entry->end_pfn = end_pfn;
__entry->nr_scanned = nr_scanned;
__entry->nr_taken = nr_taken;
),
- TP_printk("nr_scanned=%lu nr_taken=%lu",
+ TP_printk("range=(0x%lx ~ 0x%lx) nr_scanned=%lu nr_taken=%lu",
+ __entry->start_pfn,
+ __entry->end_pfn,
__entry->nr_scanned,
__entry->nr_taken)
);
DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_migratepages,
- TP_PROTO(unsigned long nr_scanned,
+ TP_PROTO(
+ unsigned long start_pfn,
+ unsigned long end_pfn,
+ unsigned long nr_scanned,
unsigned long nr_taken),
- TP_ARGS(nr_scanned, nr_taken)
+ TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken)
);
DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages,
- TP_PROTO(unsigned long nr_scanned,
+
+ TP_PROTO(
+ unsigned long start_pfn,
+ unsigned long end_pfn,
+ unsigned long nr_scanned,
unsigned long nr_taken),
- TP_ARGS(nr_scanned, nr_taken)
+ TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken)
);
TRACE_EVENT(mm_compaction_migratepages,
@@ -85,47 +101,198 @@ TRACE_EVENT(mm_compaction_migratepages,
);
TRACE_EVENT(mm_compaction_begin,
- TP_PROTO(unsigned long zone_start, unsigned long migrate_start,
- unsigned long free_start, unsigned long zone_end),
+ TP_PROTO(unsigned long zone_start, unsigned long migrate_pfn,
+ unsigned long free_pfn, unsigned long zone_end, bool sync),
- TP_ARGS(zone_start, migrate_start, free_start, zone_end),
+ TP_ARGS(zone_start, migrate_pfn, free_pfn, zone_end, sync),
TP_STRUCT__entry(
__field(unsigned long, zone_start)
- __field(unsigned long, migrate_start)
- __field(unsigned long, free_start)
+ __field(unsigned long, migrate_pfn)
+ __field(unsigned long, free_pfn)
__field(unsigned long, zone_end)
+ __field(bool, sync)
),
TP_fast_assign(
__entry->zone_start = zone_start;
- __entry->migrate_start = migrate_start;
- __entry->free_start = free_start;
+ __entry->migrate_pfn = migrate_pfn;
+ __entry->free_pfn = free_pfn;
__entry->zone_end = zone_end;
+ __entry->sync = sync;
),
- TP_printk("zone_start=%lu migrate_start=%lu free_start=%lu zone_end=%lu",
+ TP_printk("zone_start=0x%lx migrate_pfn=0x%lx free_pfn=0x%lx zone_end=0x%lx, mode=%s",
__entry->zone_start,
- __entry->migrate_start,
- __entry->free_start,
- __entry->zone_end)
+ __entry->migrate_pfn,
+ __entry->free_pfn,
+ __entry->zone_end,
+ __entry->sync ? "sync" : "async")
);
TRACE_EVENT(mm_compaction_end,
- TP_PROTO(int status),
+ TP_PROTO(unsigned long zone_start, unsigned long migrate_pfn,
+ unsigned long free_pfn, unsigned long zone_end, bool sync,
+ int status),
- TP_ARGS(status),
+ TP_ARGS(zone_start, migrate_pfn, free_pfn, zone_end, sync, status),
TP_STRUCT__entry(
+ __field(unsigned long, zone_start)
+ __field(unsigned long, migrate_pfn)
+ __field(unsigned long, free_pfn)
+ __field(unsigned long, zone_end)
+ __field(bool, sync)
__field(int, status)
),
TP_fast_assign(
+ __entry->zone_start = zone_start;
+ __entry->migrate_pfn = migrate_pfn;
+ __entry->free_pfn = free_pfn;
+ __entry->zone_end = zone_end;
+ __entry->sync = sync;
__entry->status = status;
),
- TP_printk("status=%d", __entry->status)
+ TP_printk("zone_start=0x%lx migrate_pfn=0x%lx free_pfn=0x%lx zone_end=0x%lx, mode=%s status=%s",
+ __entry->zone_start,
+ __entry->migrate_pfn,
+ __entry->free_pfn,
+ __entry->zone_end,
+ __entry->sync ? "sync" : "async",
+ compaction_status_string[__entry->status])
+);
+
+TRACE_EVENT(mm_compaction_try_to_compact_pages,
+
+ TP_PROTO(
+ int order,
+ gfp_t gfp_mask,
+ enum migrate_mode mode),
+
+ TP_ARGS(order, gfp_mask, mode),
+
+ TP_STRUCT__entry(
+ __field(int, order)
+ __field(gfp_t, gfp_mask)
+ __field(enum migrate_mode, mode)
+ ),
+
+ TP_fast_assign(
+ __entry->order = order;
+ __entry->gfp_mask = gfp_mask;
+ __entry->mode = mode;
+ ),
+
+ TP_printk("order=%d gfp_mask=0x%x mode=%d",
+ __entry->order,
+ __entry->gfp_mask,
+ (int)__entry->mode)
+);
+
+DECLARE_EVENT_CLASS(mm_compaction_suitable_template,
+
+ TP_PROTO(struct zone *zone,
+ int order,
+ int ret),
+
+ TP_ARGS(zone, order, ret),
+
+ TP_STRUCT__entry(
+ __field(int, nid)
+ __field(char *, name)
+ __field(int, order)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->nid = zone_to_nid(zone);
+ __entry->name = (char *)zone->name;
+ __entry->order = order;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("node=%d zone=%-8s order=%d ret=%s",
+ __entry->nid,
+ __entry->name,
+ __entry->order,
+ compaction_status_string[__entry->ret])
+);
+
+DEFINE_EVENT(mm_compaction_suitable_template, mm_compaction_finished,
+
+ TP_PROTO(struct zone *zone,
+ int order,
+ int ret),
+
+ TP_ARGS(zone, order, ret)
+);
+
+DEFINE_EVENT(mm_compaction_suitable_template, mm_compaction_suitable,
+
+ TP_PROTO(struct zone *zone,
+ int order,
+ int ret),
+
+ TP_ARGS(zone, order, ret)
+);
+
+#ifdef CONFIG_COMPACTION
+DECLARE_EVENT_CLASS(mm_compaction_defer_template,
+
+ TP_PROTO(struct zone *zone, int order),
+
+ TP_ARGS(zone, order),
+
+ TP_STRUCT__entry(
+ __field(int, nid)
+ __field(char *, name)
+ __field(int, order)
+ __field(unsigned int, considered)
+ __field(unsigned int, defer_shift)
+ __field(int, order_failed)
+ ),
+
+ TP_fast_assign(
+ __entry->nid = zone_to_nid(zone);
+ __entry->name = (char *)zone->name;
+ __entry->order = order;
+ __entry->considered = zone->compact_considered;
+ __entry->defer_shift = zone->compact_defer_shift;
+ __entry->order_failed = zone->compact_order_failed;
+ ),
+
+ TP_printk("node=%d zone=%-8s order=%d order_failed=%d consider=%u limit=%lu",
+ __entry->nid,
+ __entry->name,
+ __entry->order,
+ __entry->order_failed,
+ __entry->considered,
+ 1UL << __entry->defer_shift)
+);
+
+DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_deferred,
+
+ TP_PROTO(struct zone *zone, int order),
+
+ TP_ARGS(zone, order)
+);
+
+DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_defer_compaction,
+
+ TP_PROTO(struct zone *zone, int order),
+
+ TP_ARGS(zone, order)
+);
+
+DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_defer_reset,
+
+ TP_PROTO(struct zone *zone, int order),
+
+ TP_ARGS(zone, order)
);
+#endif
#endif /* _TRACE_COMPACTION_H */
diff --git a/include/trace/events/ext3.h b/include/trace/events/ext3.h
index 6797b9d..fc733d2 100644
--- a/include/trace/events/ext3.h
+++ b/include/trace/events/ext3.h
@@ -144,7 +144,7 @@ TRACE_EVENT(ext3_mark_inode_dirty,
__entry->ip = IP;
),
- TP_printk("dev %d,%d ino %lu caller %pF",
+ TP_printk("dev %d,%d ino %lu caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino, (void *)__entry->ip)
);
@@ -439,10 +439,10 @@ TRACE_EVENT(ext3_sync_file_enter,
TP_fast_assign(
struct dentry *dentry = file->f_path.dentry;
- __entry->dev = dentry->d_inode->i_sb->s_dev;
- __entry->ino = dentry->d_inode->i_ino;
+ __entry->dev = d_inode(dentry)->i_sb->s_dev;
+ __entry->ino = d_inode(dentry)->i_ino;
__entry->datasync = datasync;
- __entry->parent = dentry->d_parent->d_inode->i_ino;
+ __entry->parent = d_inode(dentry->d_parent)->i_ino;
),
TP_printk("dev %d,%d ino %lu parent %ld datasync %d ",
@@ -710,9 +710,9 @@ TRACE_EVENT(ext3_unlink_enter,
TP_fast_assign(
__entry->parent = parent->i_ino;
- __entry->ino = dentry->d_inode->i_ino;
- __entry->size = dentry->d_inode->i_size;
- __entry->dev = dentry->d_inode->i_sb->s_dev;
+ __entry->ino = d_inode(dentry)->i_ino;
+ __entry->size = d_inode(dentry)->i_size;
+ __entry->dev = d_inode(dentry)->i_sb->s_dev;
),
TP_printk("dev %d,%d ino %lu size %lld parent %ld",
@@ -734,8 +734,8 @@ TRACE_EVENT(ext3_unlink_exit,
),
TP_fast_assign(
- __entry->ino = dentry->d_inode->i_ino;
- __entry->dev = dentry->d_inode->i_sb->s_dev;
+ __entry->ino = d_inode(dentry)->i_ino;
+ __entry->dev = d_inode(dentry)->i_sb->s_dev;
__entry->ret = ret;
),
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 6cfb841..594b4b2 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -73,6 +73,36 @@ struct extent_status;
{ FALLOC_FL_ZERO_RANGE, "ZERO_RANGE"})
+TRACE_EVENT(ext4_other_inode_update_time,
+ TP_PROTO(struct inode *inode, ino_t orig_ino),
+
+ TP_ARGS(inode, orig_ino),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( ino_t, orig_ino )
+ __field( uid_t, uid )
+ __field( gid_t, gid )
+ __field( __u16, mode )
+ ),
+
+ TP_fast_assign(
+ __entry->orig_ino = orig_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->uid = i_uid_read(inode);
+ __entry->gid = i_gid_read(inode);
+ __entry->mode = inode->i_mode;
+ ),
+
+ TP_printk("dev %d,%d orig_ino %lu ino %lu mode 0%o uid %u gid %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->orig_ino,
+ (unsigned long) __entry->ino, __entry->mode,
+ __entry->uid, __entry->gid)
+);
+
TRACE_EVENT(ext4_free_inode,
TP_PROTO(struct inode *inode),
@@ -210,7 +240,7 @@ TRACE_EVENT(ext4_mark_inode_dirty,
__entry->ip = IP;
),
- TP_printk("dev %d,%d ino %lu caller %pF",
+ TP_printk("dev %d,%d ino %lu caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino, (void *)__entry->ip)
);
@@ -842,10 +872,10 @@ TRACE_EVENT(ext4_sync_file_enter,
TP_fast_assign(
struct dentry *dentry = file->f_path.dentry;
- __entry->dev = dentry->d_inode->i_sb->s_dev;
- __entry->ino = dentry->d_inode->i_ino;
+ __entry->dev = d_inode(dentry)->i_sb->s_dev;
+ __entry->ino = d_inode(dentry)->i_ino;
__entry->datasync = datasync;
- __entry->parent = dentry->d_parent->d_inode->i_ino;
+ __entry->parent = d_inode(dentry->d_parent)->i_ino;
),
TP_printk("dev %d,%d ino %lu parent %lu datasync %d ",
@@ -1155,15 +1185,14 @@ TRACE_EVENT(ext4_da_update_reserve_space,
);
TRACE_EVENT(ext4_da_reserve_space,
- TP_PROTO(struct inode *inode, int md_needed),
+ TP_PROTO(struct inode *inode),
- TP_ARGS(inode, md_needed),
+ TP_ARGS(inode),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( __u64, i_blocks )
- __field( int, md_needed )
__field( int, reserved_data_blocks )
__field( int, reserved_meta_blocks )
__field( __u16, mode )
@@ -1173,18 +1202,17 @@ TRACE_EVENT(ext4_da_reserve_space,
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->i_blocks = inode->i_blocks;
- __entry->md_needed = md_needed;
__entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
__entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
__entry->mode = inode->i_mode;
),
- TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu md_needed %d "
+ TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu "
"reserved_data_blocks %d reserved_meta_blocks %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->mode, __entry->i_blocks,
- __entry->md_needed, __entry->reserved_data_blocks,
+ __entry->reserved_data_blocks,
__entry->reserved_meta_blocks)
);
@@ -1423,10 +1451,10 @@ TRACE_EVENT(ext4_unlink_enter,
),
TP_fast_assign(
- __entry->dev = dentry->d_inode->i_sb->s_dev;
- __entry->ino = dentry->d_inode->i_ino;
+ __entry->dev = d_inode(dentry)->i_sb->s_dev;
+ __entry->ino = d_inode(dentry)->i_ino;
__entry->parent = parent->i_ino;
- __entry->size = dentry->d_inode->i_size;
+ __entry->size = d_inode(dentry)->i_size;
),
TP_printk("dev %d,%d ino %lu size %lld parent %lu",
@@ -1447,8 +1475,8 @@ TRACE_EVENT(ext4_unlink_exit,
),
TP_fast_assign(
- __entry->dev = dentry->d_inode->i_sb->s_dev;
- __entry->ino = dentry->d_inode->i_ino;
+ __entry->dev = d_inode(dentry)->i_sb->s_dev;
+ __entry->ino = d_inode(dentry)->i_ino;
__entry->ret = ret;
),
@@ -1732,7 +1760,7 @@ TRACE_EVENT(ext4_journal_start,
__entry->rsv_blocks = rsv_blocks;
),
- TP_printk("dev %d,%d blocks, %d rsv_blocks, %d caller %pF",
+ TP_printk("dev %d,%d blocks, %d rsv_blocks, %d caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->blocks, __entry->rsv_blocks, (void *)__entry->ip)
);
@@ -1754,7 +1782,7 @@ TRACE_EVENT(ext4_journal_start_reserved,
__entry->blocks = blocks;
),
- TP_printk("dev %d,%d blocks, %d caller %pF",
+ TP_printk("dev %d,%d blocks, %d caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->blocks, (void *)__entry->ip)
);
@@ -2448,6 +2476,31 @@ TRACE_EVENT(ext4_collapse_range,
__entry->offset, __entry->len)
);
+TRACE_EVENT(ext4_insert_range,
+ TP_PROTO(struct inode *inode, loff_t offset, loff_t len),
+
+ TP_ARGS(inode, offset, len),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(loff_t, offset)
+ __field(loff_t, len)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->offset = offset;
+ __entry->len = len;
+ ),
+
+ TP_printk("dev %d,%d ino %lu offset %lld len %lld",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->offset, __entry->len)
+);
+
TRACE_EVENT(ext4_es_shrink,
TP_PROTO(struct super_block *sb, int nr_shrunk, u64 scan_time,
int nr_skipped, int retried),
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index bbc4de9..04856a2 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -9,12 +9,51 @@
#define show_dev(entry) MAJOR(entry->dev), MINOR(entry->dev)
#define show_dev_ino(entry) show_dev(entry), (unsigned long)entry->ino
+TRACE_DEFINE_ENUM(NODE);
+TRACE_DEFINE_ENUM(DATA);
+TRACE_DEFINE_ENUM(META);
+TRACE_DEFINE_ENUM(META_FLUSH);
+TRACE_DEFINE_ENUM(INMEM);
+TRACE_DEFINE_ENUM(INMEM_DROP);
+TRACE_DEFINE_ENUM(IPU);
+TRACE_DEFINE_ENUM(OPU);
+TRACE_DEFINE_ENUM(CURSEG_HOT_DATA);
+TRACE_DEFINE_ENUM(CURSEG_WARM_DATA);
+TRACE_DEFINE_ENUM(CURSEG_COLD_DATA);
+TRACE_DEFINE_ENUM(CURSEG_HOT_NODE);
+TRACE_DEFINE_ENUM(CURSEG_WARM_NODE);
+TRACE_DEFINE_ENUM(CURSEG_COLD_NODE);
+TRACE_DEFINE_ENUM(NO_CHECK_TYPE);
+TRACE_DEFINE_ENUM(GC_GREEDY);
+TRACE_DEFINE_ENUM(GC_CB);
+TRACE_DEFINE_ENUM(FG_GC);
+TRACE_DEFINE_ENUM(BG_GC);
+TRACE_DEFINE_ENUM(LFS);
+TRACE_DEFINE_ENUM(SSR);
+TRACE_DEFINE_ENUM(__REQ_RAHEAD);
+TRACE_DEFINE_ENUM(__REQ_WRITE);
+TRACE_DEFINE_ENUM(__REQ_SYNC);
+TRACE_DEFINE_ENUM(__REQ_NOIDLE);
+TRACE_DEFINE_ENUM(__REQ_FLUSH);
+TRACE_DEFINE_ENUM(__REQ_FUA);
+TRACE_DEFINE_ENUM(__REQ_PRIO);
+TRACE_DEFINE_ENUM(__REQ_META);
+TRACE_DEFINE_ENUM(CP_UMOUNT);
+TRACE_DEFINE_ENUM(CP_FASTBOOT);
+TRACE_DEFINE_ENUM(CP_SYNC);
+TRACE_DEFINE_ENUM(CP_RECOVERY);
+TRACE_DEFINE_ENUM(CP_DISCARD);
+
#define show_block_type(type) \
__print_symbolic(type, \
{ NODE, "NODE" }, \
{ DATA, "DATA" }, \
{ META, "META" }, \
- { META_FLUSH, "META_FLUSH" })
+ { META_FLUSH, "META_FLUSH" }, \
+ { INMEM, "INMEM" }, \
+ { INMEM_DROP, "INMEM_DROP" }, \
+ { IPU, "IN-PLACE" }, \
+ { OPU, "OUT-OF-PLACE" })
#define F2FS_BIO_MASK(t) (t & (READA | WRITE_FLUSH_FUA))
#define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO))
@@ -72,10 +111,13 @@
#define show_cpreason(type) \
__print_symbolic(type, \
{ CP_UMOUNT, "Umount" }, \
+ { CP_FASTBOOT, "Fastboot" }, \
{ CP_SYNC, "Sync" }, \
+ { CP_RECOVERY, "Recovery" }, \
{ CP_DISCARD, "Discard" })
struct victim_sel_policy;
+struct f2fs_map_blocks;
DECLARE_EVENT_CLASS(f2fs__inode,
@@ -148,14 +190,14 @@ DEFINE_EVENT(f2fs__inode, f2fs_sync_file_enter,
TRACE_EVENT(f2fs_sync_file_exit,
- TP_PROTO(struct inode *inode, bool need_cp, int datasync, int ret),
+ TP_PROTO(struct inode *inode, int need_cp, int datasync, int ret),
TP_ARGS(inode, need_cp, datasync, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
- __field(bool, need_cp)
+ __field(int, need_cp)
__field(int, datasync)
__field(int, ret)
),
@@ -190,7 +232,7 @@ TRACE_EVENT(f2fs_sync_fs,
TP_fast_assign(
__entry->dev = sb->s_dev;
- __entry->dirty = F2FS_SB(sb)->s_dirty;
+ __entry->dirty = is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY);
__entry->wait = wait;
),
@@ -440,68 +482,35 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
__entry->err)
);
-TRACE_EVENT_CONDITION(f2fs_submit_page_bio,
-
- TP_PROTO(struct page *page, sector_t blkaddr, int type),
-
- TP_ARGS(page, blkaddr, type),
+TRACE_EVENT(f2fs_map_blocks,
+ TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map, int ret),
- TP_CONDITION(page->mapping),
+ TP_ARGS(inode, map, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
- __field(pgoff_t, index)
- __field(sector_t, blkaddr)
- __field(int, type)
- ),
-
- TP_fast_assign(
- __entry->dev = page->mapping->host->i_sb->s_dev;
- __entry->ino = page->mapping->host->i_ino;
- __entry->index = page->index;
- __entry->blkaddr = blkaddr;
- __entry->type = type;
- ),
-
- TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
- "blkaddr = 0x%llx, bio_type = %s%s",
- show_dev_ino(__entry),
- (unsigned long)__entry->index,
- (unsigned long long)__entry->blkaddr,
- show_bio_type(__entry->type))
-);
-
-TRACE_EVENT(f2fs_get_data_block,
- TP_PROTO(struct inode *inode, sector_t iblock,
- struct buffer_head *bh, int ret),
-
- TP_ARGS(inode, iblock, bh, ret),
-
- TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
- __field(sector_t, iblock)
- __field(sector_t, bh_start)
- __field(size_t, bh_size)
+ __field(block_t, m_lblk)
+ __field(block_t, m_pblk)
+ __field(unsigned int, m_len)
__field(int, ret)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->iblock = iblock;
- __entry->bh_start = bh->b_blocknr;
- __entry->bh_size = bh->b_size;
+ __entry->m_lblk = map->m_lblk;
+ __entry->m_pblk = map->m_pblk;
+ __entry->m_len = map->m_len;
__entry->ret = ret;
),
TP_printk("dev = (%d,%d), ino = %lu, file offset = %llu, "
- "start blkaddr = 0x%llx, len = 0x%llx bytes, err = %d",
+ "start blkaddr = 0x%llx, len = 0x%llx, err = %d",
show_dev_ino(__entry),
- (unsigned long long)__entry->iblock,
- (unsigned long long)__entry->bh_start,
- (unsigned long long)__entry->bh_size,
+ (unsigned long long)__entry->m_lblk,
+ (unsigned long long)__entry->m_pblk,
+ (unsigned long long)__entry->m_len,
__entry->ret)
);
@@ -680,11 +689,63 @@ TRACE_EVENT(f2fs_reserve_new_block,
__entry->ofs_in_node)
);
+DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
+
+ TP_PROTO(struct page *page, struct f2fs_io_info *fio),
+
+ TP_ARGS(page, fio),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(pgoff_t, index)
+ __field(block_t, blkaddr)
+ __field(int, rw)
+ __field(int, type)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = page->mapping->host->i_sb->s_dev;
+ __entry->ino = page->mapping->host->i_ino;
+ __entry->index = page->index;
+ __entry->blkaddr = fio->blk_addr;
+ __entry->rw = fio->rw;
+ __entry->type = fio->type;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
+ "blkaddr = 0x%llx, rw = %s%s, type = %s",
+ show_dev_ino(__entry),
+ (unsigned long)__entry->index,
+ (unsigned long long)__entry->blkaddr,
+ show_bio_type(__entry->rw),
+ show_block_type(__entry->type))
+);
+
+DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_bio,
+
+ TP_PROTO(struct page *page, struct f2fs_io_info *fio),
+
+ TP_ARGS(page, fio),
+
+ TP_CONDITION(page->mapping)
+);
+
+DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_mbio,
+
+ TP_PROTO(struct page *page, struct f2fs_io_info *fio),
+
+ TP_ARGS(page, fio),
+
+ TP_CONDITION(page->mapping)
+);
+
DECLARE_EVENT_CLASS(f2fs__submit_bio,
- TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio),
+ TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio,
+ struct bio *bio),
- TP_ARGS(sb, rw, type, bio),
+ TP_ARGS(sb, fio, bio),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -696,8 +757,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio,
TP_fast_assign(
__entry->dev = sb->s_dev;
- __entry->rw = rw;
- __entry->type = type;
+ __entry->rw = fio->rw;
+ __entry->type = fio->type;
__entry->sector = bio->bi_iter.bi_sector;
__entry->size = bio->bi_iter.bi_size;
),
@@ -712,18 +773,20 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio,
DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_write_bio,
- TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio),
+ TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio,
+ struct bio *bio),
- TP_ARGS(sb, rw, type, bio),
+ TP_ARGS(sb, fio, bio),
TP_CONDITION(bio)
);
DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_read_bio,
- TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio),
+ TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio,
+ struct bio *bio),
- TP_ARGS(sb, rw, type, bio),
+ TP_ARGS(sb, fio, bio),
TP_CONDITION(bio)
);
@@ -831,6 +894,13 @@ DEFINE_EVENT(f2fs__page, f2fs_writepage,
TP_ARGS(page, type)
);
+DEFINE_EVENT(f2fs__page, f2fs_do_write_data_page,
+
+ TP_PROTO(struct page *page, int type),
+
+ TP_ARGS(page, type)
+);
+
DEFINE_EVENT(f2fs__page, f2fs_readpage,
TP_PROTO(struct page *page, int type),
@@ -852,6 +922,20 @@ DEFINE_EVENT(f2fs__page, f2fs_vm_page_mkwrite,
TP_ARGS(page, type)
);
+DEFINE_EVENT(f2fs__page, f2fs_register_inmem_page,
+
+ TP_PROTO(struct page *page, int type),
+
+ TP_ARGS(page, type)
+);
+
+DEFINE_EVENT(f2fs__page, f2fs_commit_inmem_page,
+
+ TP_PROTO(struct page *page, int type),
+
+ TP_ARGS(page, type)
+);
+
TRACE_EVENT(f2fs_writepages,
TP_PROTO(struct inode *inode, struct writeback_control *wbc, int type),
@@ -916,38 +1000,6 @@ TRACE_EVENT(f2fs_writepages,
__entry->for_sync)
);
-TRACE_EVENT(f2fs_submit_page_mbio,
-
- TP_PROTO(struct page *page, int rw, int type, block_t blk_addr),
-
- TP_ARGS(page, rw, type, blk_addr),
-
- TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(ino_t, ino)
- __field(int, rw)
- __field(int, type)
- __field(pgoff_t, index)
- __field(block_t, block)
- ),
-
- TP_fast_assign(
- __entry->dev = page->mapping->host->i_sb->s_dev;
- __entry->ino = page->mapping->host->i_ino;
- __entry->rw = rw;
- __entry->type = type;
- __entry->index = page->index;
- __entry->block = blk_addr;
- ),
-
- TP_printk("dev = (%d,%d), ino = %lu, %s%s, %s, index = %lu, blkaddr = 0x%llx",
- show_dev_ino(__entry),
- show_bio_type(__entry->rw),
- show_block_type(__entry->type),
- (unsigned long)__entry->index,
- (unsigned long long)__entry->block)
-);
-
TRACE_EVENT(f2fs_write_checkpoint,
TP_PROTO(struct super_block *sb, int reason, char *msg),
@@ -998,14 +1050,15 @@ TRACE_EVENT(f2fs_issue_discard,
TRACE_EVENT(f2fs_issue_flush,
- TP_PROTO(struct super_block *sb, bool nobarrier, bool flush_merge),
+ TP_PROTO(struct super_block *sb, unsigned int nobarrier,
+ unsigned int flush_merge),
TP_ARGS(sb, nobarrier, flush_merge),
TP_STRUCT__entry(
__field(dev_t, dev)
- __field(bool, nobarrier)
- __field(bool, flush_merge)
+ __field(unsigned int, nobarrier)
+ __field(unsigned int, flush_merge)
),
TP_fast_assign(
@@ -1019,6 +1072,140 @@ TRACE_EVENT(f2fs_issue_flush,
__entry->nobarrier ? "skip (nobarrier)" : "issue",
__entry->flush_merge ? " with flush_merge" : "")
);
+
+TRACE_EVENT(f2fs_lookup_extent_tree_start,
+
+ TP_PROTO(struct inode *inode, unsigned int pgofs),
+
+ TP_ARGS(inode, pgofs),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(unsigned int, pgofs)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pgofs = pgofs;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u",
+ show_dev_ino(__entry),
+ __entry->pgofs)
+);
+
+TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
+
+ TP_PROTO(struct inode *inode, unsigned int pgofs,
+ struct extent_node *en),
+
+ TP_ARGS(inode, pgofs, en),
+
+ TP_CONDITION(en),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(unsigned int, pgofs)
+ __field(unsigned int, fofs)
+ __field(u32, blk)
+ __field(unsigned int, len)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pgofs = pgofs;
+ __entry->fofs = en->ei.fofs;
+ __entry->blk = en->ei.blk;
+ __entry->len = en->ei.len;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
+ "ext_info(fofs: %u, blk: %u, len: %u)",
+ show_dev_ino(__entry),
+ __entry->pgofs,
+ __entry->fofs,
+ __entry->blk,
+ __entry->len)
+);
+
+TRACE_EVENT(f2fs_update_extent_tree,
+
+ TP_PROTO(struct inode *inode, unsigned int pgofs, block_t blkaddr),
+
+ TP_ARGS(inode, pgofs, blkaddr),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(unsigned int, pgofs)
+ __field(u32, blk)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pgofs = pgofs;
+ __entry->blk = blkaddr;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, blkaddr = %u",
+ show_dev_ino(__entry),
+ __entry->pgofs,
+ __entry->blk)
+);
+
+TRACE_EVENT(f2fs_shrink_extent_tree,
+
+ TP_PROTO(struct f2fs_sb_info *sbi, unsigned int node_cnt,
+ unsigned int tree_cnt),
+
+ TP_ARGS(sbi, node_cnt, tree_cnt),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned int, node_cnt)
+ __field(unsigned int, tree_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sbi->sb->s_dev;
+ __entry->node_cnt = node_cnt;
+ __entry->tree_cnt = tree_cnt;
+ ),
+
+ TP_printk("dev = (%d,%d), shrunk: node_cnt = %u, tree_cnt = %u",
+ show_dev(__entry),
+ __entry->node_cnt,
+ __entry->tree_cnt)
+);
+
+TRACE_EVENT(f2fs_destroy_extent_tree,
+
+ TP_PROTO(struct inode *inode, unsigned int node_cnt),
+
+ TP_ARGS(inode, node_cnt),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(unsigned int, node_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->node_cnt = node_cnt;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, destroyed: node_cnt = %u",
+ show_dev_ino(__entry),
+ __entry->node_cnt)
+);
+
#endif /* _TRACE_F2FS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h
index 0421f49..42febb6 100644
--- a/include/trace/events/filemap.h
+++ b/include/trace/events/filemap.h
@@ -18,14 +18,14 @@ DECLARE_EVENT_CLASS(mm_filemap_op_page_cache,
TP_ARGS(page),
TP_STRUCT__entry(
- __field(struct page *, page)
+ __field(unsigned long, pfn)
__field(unsigned long, i_ino)
__field(unsigned long, index)
__field(dev_t, s_dev)
),
TP_fast_assign(
- __entry->page = page;
+ __entry->pfn = page_to_pfn(page);
__entry->i_ino = page->mapping->host->i_ino;
__entry->index = page->index;
if (page->mapping->host->i_sb)
@@ -37,8 +37,8 @@ DECLARE_EVENT_CLASS(mm_filemap_op_page_cache,
TP_printk("dev %d:%d ino %lx page=%p pfn=%lu ofs=%lu",
MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
__entry->i_ino,
- __entry->page,
- page_to_pfn(__entry->page),
+ pfn_to_page(__entry->pfn),
+ __entry->pfn,
__entry->index << PAGE_SHIFT)
);
diff --git a/include/trace/events/intel-sst.h b/include/trace/events/intel-sst.h
index 76c72d3..edc24e6 100644
--- a/include/trace/events/intel-sst.h
+++ b/include/trace/events/intel-sst.h
@@ -1,6 +1,13 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM intel-sst
+/*
+ * The TRACE_SYSTEM_VAR defaults to TRACE_SYSTEM, but must be a
+ * legitimate C variable. It is not exported to user space.
+ */
+#undef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR intel_sst
+
#if !defined(_TRACE_INTEL_SST_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_INTEL_SST_H
diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h
index a8f5c32..2c7befb 100644
--- a/include/trace/events/iommu.h
+++ b/include/trace/events/iommu.h
@@ -83,7 +83,7 @@ DEFINE_EVENT(iommu_device_event, detach_device_from_domain,
TP_ARGS(dev)
);
-DECLARE_EVENT_CLASS(iommu_map_unmap,
+TRACE_EVENT(map,
TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
@@ -92,7 +92,7 @@ DECLARE_EVENT_CLASS(iommu_map_unmap,
TP_STRUCT__entry(
__field(u64, iova)
__field(u64, paddr)
- __field(int, size)
+ __field(size_t, size)
),
TP_fast_assign(
@@ -101,26 +101,31 @@ DECLARE_EVENT_CLASS(iommu_map_unmap,
__entry->size = size;
),
- TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=0x%x",
+ TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu",
__entry->iova, __entry->paddr, __entry->size
)
);
-DEFINE_EVENT(iommu_map_unmap, map,
+TRACE_EVENT(unmap,
- TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
-
- TP_ARGS(iova, paddr, size)
-);
+ TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size),
-DEFINE_EVENT_PRINT(iommu_map_unmap, unmap,
+ TP_ARGS(iova, size, unmapped_size),
- TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
+ TP_STRUCT__entry(
+ __field(u64, iova)
+ __field(size_t, size)
+ __field(size_t, unmapped_size)
+ ),
- TP_ARGS(iova, paddr, size),
+ TP_fast_assign(
+ __entry->iova = iova;
+ __entry->size = size;
+ __entry->unmapped_size = unmapped_size;
+ ),
- TP_printk("IOMMU: iova=0x%016llx size=0x%x",
- __entry->iova, __entry->size
+ TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu",
+ __entry->iova, __entry->size, __entry->unmapped_size
)
);
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index 3608beb..ff8f6c0 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -9,19 +9,34 @@
struct irqaction;
struct softirq_action;
-#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
+#define SOFTIRQ_NAME_LIST \
+ softirq_name(HI) \
+ softirq_name(TIMER) \
+ softirq_name(NET_TX) \
+ softirq_name(NET_RX) \
+ softirq_name(BLOCK) \
+ softirq_name(BLOCK_IOPOLL) \
+ softirq_name(TASKLET) \
+ softirq_name(SCHED) \
+ softirq_name(HRTIMER) \
+ softirq_name_end(RCU)
+
+#undef softirq_name
+#undef softirq_name_end
+
+#define softirq_name(sirq) TRACE_DEFINE_ENUM(sirq##_SOFTIRQ);
+#define softirq_name_end(sirq) TRACE_DEFINE_ENUM(sirq##_SOFTIRQ);
+
+SOFTIRQ_NAME_LIST
+
+#undef softirq_name
+#undef softirq_name_end
+
+#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq },
+#define softirq_name_end(sirq) { sirq##_SOFTIRQ, #sirq }
+
#define show_softirq_name(val) \
- __print_symbolic(val, \
- softirq_name(HI), \
- softirq_name(TIMER), \
- softirq_name(NET_TX), \
- softirq_name(NET_RX), \
- softirq_name(BLOCK), \
- softirq_name(BLOCK_IOPOLL), \
- softirq_name(TASKLET), \
- softirq_name(SCHED), \
- softirq_name(HRTIMER), \
- softirq_name(RCU))
+ __print_symbolic(val, SOFTIRQ_NAME_LIST)
/**
* irq_handler_entry - called immediately before the irq action handler
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index aece134..f7554fd 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -140,32 +140,55 @@ DEFINE_EVENT(kmem_free, kfree,
TP_ARGS(call_site, ptr)
);
-DEFINE_EVENT(kmem_free, kmem_cache_free,
+DEFINE_EVENT_CONDITION(kmem_free, kmem_cache_free,
TP_PROTO(unsigned long call_site, const void *ptr),
- TP_ARGS(call_site, ptr)
+ TP_ARGS(call_site, ptr),
+
+ /*
+ * This trace can be potentially called from an offlined cpu.
+ * Since trace points use RCU and RCU should not be used from
+ * offline cpus, filter such calls out.
+ * While this trace can be called from a preemptable section,
+ * it has no impact on the condition since tasks can migrate
+ * only from online cpus to other online cpus. Thus its safe
+ * to use raw_smp_processor_id.
+ */
+ TP_CONDITION(cpu_online(raw_smp_processor_id()))
);
-TRACE_EVENT(mm_page_free,
+TRACE_EVENT_CONDITION(mm_page_free,
TP_PROTO(struct page *page, unsigned int order),
TP_ARGS(page, order),
+
+ /*
+ * This trace can be potentially called from an offlined cpu.
+ * Since trace points use RCU and RCU should not be used from
+ * offline cpus, filter such calls out.
+ * While this trace can be called from a preemptable section,
+ * it has no impact on the condition since tasks can migrate
+ * only from online cpus to other online cpus. Thus its safe
+ * to use raw_smp_processor_id.
+ */
+ TP_CONDITION(cpu_online(raw_smp_processor_id())),
+
TP_STRUCT__entry(
- __field( struct page *, page )
+ __field( unsigned long, pfn )
__field( unsigned int, order )
),
TP_fast_assign(
- __entry->page = page;
+ __entry->pfn = page_to_pfn(page);
__entry->order = order;
),
TP_printk("page=%p pfn=%lu order=%d",
- __entry->page,
- page_to_pfn(__entry->page),
+ pfn_to_page(__entry->pfn),
+ __entry->pfn,
__entry->order)
);
@@ -176,18 +199,18 @@ TRACE_EVENT(mm_page_free_batched,
TP_ARGS(page, cold),
TP_STRUCT__entry(
- __field( struct page *, page )
+ __field( unsigned long, pfn )
__field( int, cold )
),
TP_fast_assign(
- __entry->page = page;
+ __entry->pfn = page_to_pfn(page);
__entry->cold = cold;
),
TP_printk("page=%p pfn=%lu order=0 cold=%d",
- __entry->page,
- page_to_pfn(__entry->page),
+ pfn_to_page(__entry->pfn),
+ __entry->pfn,
__entry->cold)
);
@@ -199,22 +222,22 @@ TRACE_EVENT(mm_page_alloc,
TP_ARGS(page, order, gfp_flags, migratetype),
TP_STRUCT__entry(
- __field( struct page *, page )
+ __field( unsigned long, pfn )
__field( unsigned int, order )
__field( gfp_t, gfp_flags )
__field( int, migratetype )
),
TP_fast_assign(
- __entry->page = page;
+ __entry->pfn = page ? page_to_pfn(page) : -1UL;
__entry->order = order;
__entry->gfp_flags = gfp_flags;
__entry->migratetype = migratetype;
),
TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
- __entry->page,
- __entry->page ? page_to_pfn(__entry->page) : 0,
+ __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
+ __entry->pfn != -1UL ? __entry->pfn : 0,
__entry->order,
__entry->migratetype,
show_gfp_flags(__entry->gfp_flags))
@@ -227,20 +250,20 @@ DECLARE_EVENT_CLASS(mm_page,
TP_ARGS(page, order, migratetype),
TP_STRUCT__entry(
- __field( struct page *, page )
+ __field( unsigned long, pfn )
__field( unsigned int, order )
__field( int, migratetype )
),
TP_fast_assign(
- __entry->page = page;
+ __entry->pfn = page ? page_to_pfn(page) : -1UL;
__entry->order = order;
__entry->migratetype = migratetype;
),
TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
- __entry->page,
- __entry->page ? page_to_pfn(__entry->page) : 0,
+ __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
+ __entry->pfn != -1UL ? __entry->pfn : 0,
__entry->order,
__entry->migratetype,
__entry->order == 0)
@@ -253,14 +276,37 @@ DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
TP_ARGS(page, order, migratetype)
);
-DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain,
+TRACE_EVENT_CONDITION(mm_page_pcpu_drain,
TP_PROTO(struct page *page, unsigned int order, int migratetype),
TP_ARGS(page, order, migratetype),
+ /*
+ * This trace can be potentially called from an offlined cpu.
+ * Since trace points use RCU and RCU should not be used from
+ * offline cpus, filter such calls out.
+ * While this trace can be called from a preemptable section,
+ * it has no impact on the condition since tasks can migrate
+ * only from online cpus to other online cpus. Thus its safe
+ * to use raw_smp_processor_id.
+ */
+ TP_CONDITION(cpu_online(raw_smp_processor_id())),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, pfn )
+ __field( unsigned int, order )
+ __field( int, migratetype )
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = page ? page_to_pfn(page) : -1UL;
+ __entry->order = order;
+ __entry->migratetype = migratetype;
+ ),
+
TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
- __entry->page, page_to_pfn(__entry->page),
+ pfn_to_page(__entry->pfn), __entry->pfn,
__entry->order, __entry->migratetype)
);
@@ -268,14 +314,14 @@ TRACE_EVENT(mm_page_alloc_extfrag,
TP_PROTO(struct page *page,
int alloc_order, int fallback_order,
- int alloc_migratetype, int fallback_migratetype, int new_migratetype),
+ int alloc_migratetype, int fallback_migratetype),
TP_ARGS(page,
alloc_order, fallback_order,
- alloc_migratetype, fallback_migratetype, new_migratetype),
+ alloc_migratetype, fallback_migratetype),
TP_STRUCT__entry(
- __field( struct page *, page )
+ __field( unsigned long, pfn )
__field( int, alloc_order )
__field( int, fallback_order )
__field( int, alloc_migratetype )
@@ -284,17 +330,18 @@ TRACE_EVENT(mm_page_alloc_extfrag,
),
TP_fast_assign(
- __entry->page = page;
+ __entry->pfn = page_to_pfn(page);
__entry->alloc_order = alloc_order;
__entry->fallback_order = fallback_order;
__entry->alloc_migratetype = alloc_migratetype;
__entry->fallback_migratetype = fallback_migratetype;
- __entry->change_ownership = (new_migratetype == alloc_migratetype);
+ __entry->change_ownership = (alloc_migratetype ==
+ get_pageblock_migratetype(page));
),
TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
- __entry->page,
- page_to_pfn(__entry->page),
+ pfn_to_page(__entry->pfn),
+ __entry->pfn,
__entry->alloc_order,
__entry->fallback_order,
pageblock_order,
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 86b399c..a44062d 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -37,6 +37,25 @@ TRACE_EVENT(kvm_userspace_exit,
__entry->errno < 0 ? -__entry->errno : __entry->reason)
);
+TRACE_EVENT(kvm_vcpu_wakeup,
+ TP_PROTO(__u64 ns, bool waited),
+ TP_ARGS(ns, waited),
+
+ TP_STRUCT__entry(
+ __field( __u64, ns )
+ __field( bool, waited )
+ ),
+
+ TP_fast_assign(
+ __entry->ns = ns;
+ __entry->waited = waited;
+ ),
+
+ TP_printk("%s time %lld ns",
+ __entry->waited ? "wait" : "poll",
+ __entry->ns)
+);
+
#if defined(CONFIG_HAVE_KVM_IRQFD)
TRACE_EVENT(kvm_set_irq,
TP_PROTO(unsigned int gsi, int level, int irq_source_id),
diff --git a/include/trace/events/libata.h b/include/trace/events/libata.h
new file mode 100644
index 0000000..8b0fbd9
--- /dev/null
+++ b/include/trace/events/libata.h
@@ -0,0 +1,325 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM libata
+
+#if !defined(_TRACE_LIBATA_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_LIBATA_H
+
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#define ata_opcode_name(opcode) { opcode, #opcode }
+#define show_opcode_name(val) \
+ __print_symbolic(val, \
+ ata_opcode_name(ATA_CMD_DEV_RESET), \
+ ata_opcode_name(ATA_CMD_CHK_POWER), \
+ ata_opcode_name(ATA_CMD_STANDBY), \
+ ata_opcode_name(ATA_CMD_IDLE), \
+ ata_opcode_name(ATA_CMD_EDD), \
+ ata_opcode_name(ATA_CMD_DOWNLOAD_MICRO), \
+ ata_opcode_name(ATA_CMD_DOWNLOAD_MICRO_DMA), \
+ ata_opcode_name(ATA_CMD_NOP), \
+ ata_opcode_name(ATA_CMD_FLUSH), \
+ ata_opcode_name(ATA_CMD_FLUSH_EXT), \
+ ata_opcode_name(ATA_CMD_ID_ATA), \
+ ata_opcode_name(ATA_CMD_ID_ATAPI), \
+ ata_opcode_name(ATA_CMD_SERVICE), \
+ ata_opcode_name(ATA_CMD_READ), \
+ ata_opcode_name(ATA_CMD_READ_EXT), \
+ ata_opcode_name(ATA_CMD_READ_QUEUED), \
+ ata_opcode_name(ATA_CMD_READ_STREAM_EXT), \
+ ata_opcode_name(ATA_CMD_READ_STREAM_DMA_EXT), \
+ ata_opcode_name(ATA_CMD_WRITE), \
+ ata_opcode_name(ATA_CMD_WRITE_EXT), \
+ ata_opcode_name(ATA_CMD_WRITE_QUEUED), \
+ ata_opcode_name(ATA_CMD_WRITE_STREAM_EXT), \
+ ata_opcode_name(ATA_CMD_WRITE_STREAM_DMA_EXT), \
+ ata_opcode_name(ATA_CMD_WRITE_FUA_EXT), \
+ ata_opcode_name(ATA_CMD_WRITE_QUEUED_FUA_EXT), \
+ ata_opcode_name(ATA_CMD_FPDMA_READ), \
+ ata_opcode_name(ATA_CMD_FPDMA_WRITE), \
+ ata_opcode_name(ATA_CMD_FPDMA_SEND), \
+ ata_opcode_name(ATA_CMD_FPDMA_RECV), \
+ ata_opcode_name(ATA_CMD_PIO_READ), \
+ ata_opcode_name(ATA_CMD_PIO_READ_EXT), \
+ ata_opcode_name(ATA_CMD_PIO_WRITE), \
+ ata_opcode_name(ATA_CMD_PIO_WRITE_EXT), \
+ ata_opcode_name(ATA_CMD_READ_MULTI), \
+ ata_opcode_name(ATA_CMD_READ_MULTI_EXT), \
+ ata_opcode_name(ATA_CMD_WRITE_MULTI), \
+ ata_opcode_name(ATA_CMD_WRITE_MULTI_EXT), \
+ ata_opcode_name(ATA_CMD_WRITE_MULTI_FUA_EXT), \
+ ata_opcode_name(ATA_CMD_SET_FEATURES), \
+ ata_opcode_name(ATA_CMD_SET_MULTI), \
+ ata_opcode_name(ATA_CMD_PACKET), \
+ ata_opcode_name(ATA_CMD_VERIFY), \
+ ata_opcode_name(ATA_CMD_VERIFY_EXT), \
+ ata_opcode_name(ATA_CMD_WRITE_UNCORR_EXT), \
+ ata_opcode_name(ATA_CMD_STANDBYNOW1), \
+ ata_opcode_name(ATA_CMD_IDLEIMMEDIATE), \
+ ata_opcode_name(ATA_CMD_SLEEP), \
+ ata_opcode_name(ATA_CMD_INIT_DEV_PARAMS), \
+ ata_opcode_name(ATA_CMD_READ_NATIVE_MAX), \
+ ata_opcode_name(ATA_CMD_READ_NATIVE_MAX_EXT), \
+ ata_opcode_name(ATA_CMD_SET_MAX), \
+ ata_opcode_name(ATA_CMD_SET_MAX_EXT), \
+ ata_opcode_name(ATA_CMD_READ_LOG_EXT), \
+ ata_opcode_name(ATA_CMD_WRITE_LOG_EXT), \
+ ata_opcode_name(ATA_CMD_READ_LOG_DMA_EXT), \
+ ata_opcode_name(ATA_CMD_WRITE_LOG_DMA_EXT), \
+ ata_opcode_name(ATA_CMD_TRUSTED_NONDATA), \
+ ata_opcode_name(ATA_CMD_TRUSTED_RCV), \
+ ata_opcode_name(ATA_CMD_TRUSTED_RCV_DMA), \
+ ata_opcode_name(ATA_CMD_TRUSTED_SND), \
+ ata_opcode_name(ATA_CMD_TRUSTED_SND_DMA), \
+ ata_opcode_name(ATA_CMD_PMP_READ), \
+ ata_opcode_name(ATA_CMD_PMP_READ_DMA), \
+ ata_opcode_name(ATA_CMD_PMP_WRITE), \
+ ata_opcode_name(ATA_CMD_PMP_WRITE_DMA), \
+ ata_opcode_name(ATA_CMD_CONF_OVERLAY), \
+ ata_opcode_name(ATA_CMD_SEC_SET_PASS), \
+ ata_opcode_name(ATA_CMD_SEC_UNLOCK), \
+ ata_opcode_name(ATA_CMD_SEC_ERASE_PREP), \
+ ata_opcode_name(ATA_CMD_SEC_ERASE_UNIT), \
+ ata_opcode_name(ATA_CMD_SEC_FREEZE_LOCK), \
+ ata_opcode_name(ATA_CMD_SEC_DISABLE_PASS), \
+ ata_opcode_name(ATA_CMD_CONFIG_STREAM), \
+ ata_opcode_name(ATA_CMD_SMART), \
+ ata_opcode_name(ATA_CMD_MEDIA_LOCK), \
+ ata_opcode_name(ATA_CMD_MEDIA_UNLOCK), \
+ ata_opcode_name(ATA_CMD_DSM), \
+ ata_opcode_name(ATA_CMD_CHK_MED_CRD_TYP), \
+ ata_opcode_name(ATA_CMD_CFA_REQ_EXT_ERR), \
+ ata_opcode_name(ATA_CMD_CFA_WRITE_NE), \
+ ata_opcode_name(ATA_CMD_CFA_TRANS_SECT), \
+ ata_opcode_name(ATA_CMD_CFA_ERASE), \
+ ata_opcode_name(ATA_CMD_CFA_WRITE_MULT_NE), \
+ ata_opcode_name(ATA_CMD_REQ_SENSE_DATA), \
+ ata_opcode_name(ATA_CMD_SANITIZE_DEVICE), \
+ ata_opcode_name(ATA_CMD_RESTORE), \
+ ata_opcode_name(ATA_CMD_READ_LONG), \
+ ata_opcode_name(ATA_CMD_READ_LONG_ONCE), \
+ ata_opcode_name(ATA_CMD_WRITE_LONG), \
+ ata_opcode_name(ATA_CMD_WRITE_LONG_ONCE))
+
+#define ata_error_name(result) { result, #result }
+#define show_error_name(val) \
+ __print_symbolic(val, \
+ ata_error_name(ATA_ICRC), \
+ ata_error_name(ATA_UNC), \
+ ata_error_name(ATA_MC), \
+ ata_error_name(ATA_IDNF), \
+ ata_error_name(ATA_MCR), \
+ ata_error_name(ATA_ABORTED), \
+ ata_error_name(ATA_TRK0NF), \
+ ata_error_name(ATA_AMNF))
+
+#define ata_protocol_name(proto) { proto, #proto }
+#define show_protocol_name(val) \
+ __print_symbolic(val, \
+ ata_protocol_name(ATA_PROT_UNKNOWN), \
+ ata_protocol_name(ATA_PROT_NODATA), \
+ ata_protocol_name(ATA_PROT_PIO), \
+ ata_protocol_name(ATA_PROT_DMA), \
+ ata_protocol_name(ATA_PROT_NCQ), \
+ ata_protocol_name(ATAPI_PROT_NODATA), \
+ ata_protocol_name(ATAPI_PROT_PIO), \
+ ata_protocol_name(ATAPI_PROT_DMA))
+
+const char *libata_trace_parse_status(struct trace_seq*, unsigned char);
+#define __parse_status(s) libata_trace_parse_status(p, s)
+
+const char *libata_trace_parse_eh_action(struct trace_seq *, unsigned int);
+#define __parse_eh_action(a) libata_trace_parse_eh_action(p, a)
+
+const char *libata_trace_parse_eh_err_mask(struct trace_seq *, unsigned int);
+#define __parse_eh_err_mask(m) libata_trace_parse_eh_err_mask(p, m)
+
+const char *libata_trace_parse_qc_flags(struct trace_seq *, unsigned int);
+#define __parse_qc_flags(f) libata_trace_parse_qc_flags(p, f)
+
+TRACE_EVENT(ata_qc_issue,
+
+ TP_PROTO(struct ata_queued_cmd *qc),
+
+ TP_ARGS(qc),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned int, ata_dev )
+ __field( unsigned int, tag )
+ __field( unsigned char, cmd )
+ __field( unsigned char, dev )
+ __field( unsigned char, lbal )
+ __field( unsigned char, lbam )
+ __field( unsigned char, lbah )
+ __field( unsigned char, nsect )
+ __field( unsigned char, feature )
+ __field( unsigned char, hob_lbal )
+ __field( unsigned char, hob_lbam )
+ __field( unsigned char, hob_lbah )
+ __field( unsigned char, hob_nsect )
+ __field( unsigned char, hob_feature )
+ __field( unsigned char, ctl )
+ __field( unsigned char, proto )
+ __field( unsigned long, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = qc->ap->print_id;
+ __entry->ata_dev = qc->dev->link->pmp + qc->dev->devno;
+ __entry->tag = qc->tag;
+ __entry->proto = qc->tf.protocol;
+ __entry->cmd = qc->tf.command;
+ __entry->dev = qc->tf.device;
+ __entry->lbal = qc->tf.lbal;
+ __entry->lbam = qc->tf.lbam;
+ __entry->lbah = qc->tf.lbah;
+ __entry->hob_lbal = qc->tf.hob_lbal;
+ __entry->hob_lbam = qc->tf.hob_lbam;
+ __entry->hob_lbah = qc->tf.hob_lbah;
+ __entry->feature = qc->tf.feature;
+ __entry->hob_feature = qc->tf.hob_feature;
+ __entry->nsect = qc->tf.nsect;
+ __entry->hob_nsect = qc->tf.hob_nsect;
+ ),
+
+ TP_printk("ata_port=%u ata_dev=%u tag=%d proto=%s cmd=%s " \
+ " tf=(%02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x)",
+ __entry->ata_port, __entry->ata_dev, __entry->tag,
+ show_protocol_name(__entry->proto),
+ show_opcode_name(__entry->cmd),
+ __entry->cmd, __entry->feature, __entry->nsect,
+ __entry->lbal, __entry->lbam, __entry->lbah,
+ __entry->hob_feature, __entry->hob_nsect,
+ __entry->hob_lbal, __entry->hob_lbam, __entry->hob_lbah,
+ __entry->dev)
+);
+
+DECLARE_EVENT_CLASS(ata_qc_complete_template,
+
+ TP_PROTO(struct ata_queued_cmd *qc),
+
+ TP_ARGS(qc),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned int, ata_dev )
+ __field( unsigned int, tag )
+ __field( unsigned char, status )
+ __field( unsigned char, dev )
+ __field( unsigned char, lbal )
+ __field( unsigned char, lbam )
+ __field( unsigned char, lbah )
+ __field( unsigned char, nsect )
+ __field( unsigned char, error )
+ __field( unsigned char, hob_lbal )
+ __field( unsigned char, hob_lbam )
+ __field( unsigned char, hob_lbah )
+ __field( unsigned char, hob_nsect )
+ __field( unsigned char, hob_feature )
+ __field( unsigned char, ctl )
+ __field( unsigned long, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = qc->ap->print_id;
+ __entry->ata_dev = qc->dev->link->pmp + qc->dev->devno;
+ __entry->tag = qc->tag;
+ __entry->status = qc->result_tf.command;
+ __entry->dev = qc->result_tf.device;
+ __entry->lbal = qc->result_tf.lbal;
+ __entry->lbam = qc->result_tf.lbam;
+ __entry->lbah = qc->result_tf.lbah;
+ __entry->hob_lbal = qc->result_tf.hob_lbal;
+ __entry->hob_lbam = qc->result_tf.hob_lbam;
+ __entry->hob_lbah = qc->result_tf.hob_lbah;
+ __entry->error = qc->result_tf.feature;
+ __entry->hob_feature = qc->result_tf.hob_feature;
+ __entry->nsect = qc->result_tf.nsect;
+ __entry->hob_nsect = qc->result_tf.hob_nsect;
+ ),
+
+ TP_printk("ata_port=%u ata_dev=%u tag=%d flags=%s status=%s " \
+ " res=(%02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x)",
+ __entry->ata_port, __entry->ata_dev, __entry->tag,
+ __parse_qc_flags(__entry->flags),
+ __parse_status(__entry->status),
+ __entry->status, __entry->error, __entry->nsect,
+ __entry->lbal, __entry->lbam, __entry->lbah,
+ __entry->hob_feature, __entry->hob_nsect,
+ __entry->hob_lbal, __entry->hob_lbam, __entry->hob_lbah,
+ __entry->dev)
+);
+
+DEFINE_EVENT(ata_qc_complete_template, ata_qc_complete_internal,
+ TP_PROTO(struct ata_queued_cmd *qc),
+ TP_ARGS(qc));
+
+DEFINE_EVENT(ata_qc_complete_template, ata_qc_complete_failed,
+ TP_PROTO(struct ata_queued_cmd *qc),
+ TP_ARGS(qc));
+
+DEFINE_EVENT(ata_qc_complete_template, ata_qc_complete_done,
+ TP_PROTO(struct ata_queued_cmd *qc),
+ TP_ARGS(qc));
+
+TRACE_EVENT(ata_eh_link_autopsy,
+
+ TP_PROTO(struct ata_device *dev, unsigned int eh_action, unsigned int eh_err_mask),
+
+ TP_ARGS(dev, eh_action, eh_err_mask),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned int, ata_dev )
+ __field( unsigned int, eh_action )
+ __field( unsigned int, eh_err_mask)
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = dev->link->ap->print_id;
+ __entry->ata_dev = dev->link->pmp + dev->devno;
+ __entry->eh_action = eh_action;
+ __entry->eh_err_mask = eh_err_mask;
+ ),
+
+ TP_printk("ata_port=%u ata_dev=%u eh_action=%s err_mask=%s",
+ __entry->ata_port, __entry->ata_dev,
+ __parse_eh_action(__entry->eh_action),
+ __parse_eh_err_mask(__entry->eh_err_mask))
+);
+
+TRACE_EVENT(ata_eh_link_autopsy_qc,
+
+ TP_PROTO(struct ata_queued_cmd *qc),
+
+ TP_ARGS(qc),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned int, ata_dev )
+ __field( unsigned int, tag )
+ __field( unsigned int, qc_flags )
+ __field( unsigned int, eh_err_mask)
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = qc->ap->print_id;
+ __entry->ata_dev = qc->dev->link->pmp + qc->dev->devno;
+ __entry->tag = qc->tag;
+ __entry->qc_flags = qc->flags;
+ __entry->eh_err_mask = qc->err_mask;
+ ),
+
+ TP_printk("ata_port=%u ata_dev=%u tag=%d flags=%s err_mask=%s",
+ __entry->ata_port, __entry->ata_dev, __entry->tag,
+ __parse_qc_flags(__entry->qc_flags),
+ __parse_eh_err_mask(__entry->eh_err_mask))
+);
+
+#endif /* _TRACE_LIBATA_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index dd2b546..539b25a 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -7,18 +7,40 @@
#include <linux/tracepoint.h>
#define MIGRATE_MODE \
- {MIGRATE_ASYNC, "MIGRATE_ASYNC"}, \
- {MIGRATE_SYNC_LIGHT, "MIGRATE_SYNC_LIGHT"}, \
- {MIGRATE_SYNC, "MIGRATE_SYNC"}
+ EM( MIGRATE_ASYNC, "MIGRATE_ASYNC") \
+ EM( MIGRATE_SYNC_LIGHT, "MIGRATE_SYNC_LIGHT") \
+ EMe(MIGRATE_SYNC, "MIGRATE_SYNC")
+
#define MIGRATE_REASON \
- {MR_COMPACTION, "compaction"}, \
- {MR_MEMORY_FAILURE, "memory_failure"}, \
- {MR_MEMORY_HOTPLUG, "memory_hotplug"}, \
- {MR_SYSCALL, "syscall_or_cpuset"}, \
- {MR_MEMPOLICY_MBIND, "mempolicy_mbind"}, \
- {MR_NUMA_MISPLACED, "numa_misplaced"}, \
- {MR_CMA, "cma"}
+ EM( MR_COMPACTION, "compaction") \
+ EM( MR_MEMORY_FAILURE, "memory_failure") \
+ EM( MR_MEMORY_HOTPLUG, "memory_hotplug") \
+ EM( MR_SYSCALL, "syscall_or_cpuset") \
+ EM( MR_MEMPOLICY_MBIND, "mempolicy_mbind") \
+ EM( MR_NUMA_MISPLACED, "numa_misplaced") \
+ EMe(MR_CMA, "cma")
+
+/*
+ * First define the enums in the above macros to be exported to userspace
+ * via TRACE_DEFINE_ENUM().
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+MIGRATE_MODE
+MIGRATE_REASON
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) {a, b},
+#define EMe(a, b) {a, b}
TRACE_EVENT(mm_migrate_pages,
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
index 81c4c18..28c4599 100644
--- a/include/trace/events/module.h
+++ b/include/trace/events/module.h
@@ -84,7 +84,7 @@ DECLARE_EVENT_CLASS(module_refcnt,
__assign_str(name, mod->name);
),
- TP_printk("%s call_site=%pf refcnt=%d",
+ TP_printk("%s call_site=%ps refcnt=%d",
__get_str(name), (void *)__entry->ip, __entry->refcnt)
);
@@ -121,7 +121,7 @@ TRACE_EVENT(module_request,
__assign_str(name, name);
),
- TP_printk("%s wait=%d call_site=%pf",
+ TP_printk("%s wait=%d call_site=%ps",
__get_str(name), (int)__entry->wait, (void *)__entry->ip)
);
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
index 1de256b..49cc7c3 100644
--- a/include/trace/events/net.h
+++ b/include/trace/events/net.h
@@ -40,9 +40,9 @@ TRACE_EVENT(net_dev_start_xmit,
__assign_str(name, dev->name);
__entry->queue_mapping = skb->queue_mapping;
__entry->skbaddr = skb;
- __entry->vlan_tagged = vlan_tx_tag_present(skb);
+ __entry->vlan_tagged = skb_vlan_tag_present(skb);
__entry->vlan_proto = ntohs(skb->vlan_proto);
- __entry->vlan_tci = vlan_tx_tag_get(skb);
+ __entry->vlan_tci = skb_vlan_tag_get(skb);
__entry->protocol = ntohs(skb->protocol);
__entry->ip_summed = skb->ip_summed;
__entry->len = skb->len;
@@ -174,9 +174,9 @@ DECLARE_EVENT_CLASS(net_dev_rx_verbose_template,
#endif
__entry->queue_mapping = skb->queue_mapping;
__entry->skbaddr = skb;
- __entry->vlan_tagged = vlan_tx_tag_present(skb);
+ __entry->vlan_tagged = skb_vlan_tag_present(skb);
__entry->vlan_proto = ntohs(skb->vlan_proto);
- __entry->vlan_tci = vlan_tx_tag_get(skb);
+ __entry->vlan_tci = skb_vlan_tag_get(skb);
__entry->protocol = ntohs(skb->protocol);
__entry->ip_summed = skb->ip_summed;
__entry->hash = skb->hash;
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index d19840b..284244e 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -7,7 +7,7 @@
#include <linux/ktime.h>
#include <linux/pm_qos.h>
#include <linux/tracepoint.h>
-#include <linux/ftrace_event.h>
+#include <linux/trace_events.h>
#define TPS(x) tracepoint_string(x)
@@ -42,45 +42,54 @@ TRACE_EVENT(pstate_sample,
TP_PROTO(u32 core_busy,
u32 scaled_busy,
- u32 state,
+ u32 from,
+ u32 to,
u64 mperf,
u64 aperf,
+ u64 tsc,
u32 freq
),
TP_ARGS(core_busy,
scaled_busy,
- state,
+ from,
+ to,
mperf,
aperf,
+ tsc,
freq
),
TP_STRUCT__entry(
__field(u32, core_busy)
__field(u32, scaled_busy)
- __field(u32, state)
+ __field(u32, from)
+ __field(u32, to)
__field(u64, mperf)
__field(u64, aperf)
+ __field(u64, tsc)
__field(u32, freq)
-
- ),
+ ),
TP_fast_assign(
__entry->core_busy = core_busy;
__entry->scaled_busy = scaled_busy;
- __entry->state = state;
+ __entry->from = from;
+ __entry->to = to;
__entry->mperf = mperf;
__entry->aperf = aperf;
+ __entry->tsc = tsc;
__entry->freq = freq;
),
- TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu freq=%lu ",
+ TP_printk("core_busy=%lu scaled=%lu from=%lu to=%lu mperf=%llu aperf=%llu tsc=%llu freq=%lu ",
(unsigned long)__entry->core_busy,
(unsigned long)__entry->scaled_busy,
- (unsigned long)__entry->state,
+ (unsigned long)__entry->from,
+ (unsigned long)__entry->to,
(unsigned long long)__entry->mperf,
(unsigned long long)__entry->aperf,
+ (unsigned long long)__entry->tsc,
(unsigned long)__entry->freq
)
diff --git a/include/trace/events/random.h b/include/trace/events/random.h
index 805af6d..4684de3 100644
--- a/include/trace/events/random.h
+++ b/include/trace/events/random.h
@@ -22,7 +22,7 @@ TRACE_EVENT(add_device_randomness,
__entry->IP = IP;
),
- TP_printk("bytes %d caller %pF",
+ TP_printk("bytes %d caller %pS",
__entry->bytes, (void *)__entry->IP)
);
@@ -43,7 +43,7 @@ DECLARE_EVENT_CLASS(random__mix_pool_bytes,
__entry->IP = IP;
),
- TP_printk("%s pool: bytes %d caller %pF",
+ TP_printk("%s pool: bytes %d caller %pS",
__entry->pool_name, __entry->bytes, (void *)__entry->IP)
);
@@ -82,7 +82,7 @@ TRACE_EVENT(credit_entropy_bits,
),
TP_printk("%s pool: bits %d entropy_count %d entropy_total %d "
- "caller %pF", __entry->pool_name, __entry->bits,
+ "caller %pS", __entry->pool_name, __entry->bits,
__entry->entropy_count, __entry->entropy_total,
(void *)__entry->IP)
);
@@ -207,7 +207,7 @@ DECLARE_EVENT_CLASS(random__get_random_bytes,
__entry->IP = IP;
),
- TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP)
+ TP_printk("nbytes %d caller %pS", __entry->nbytes, (void *)__entry->IP)
);
DEFINE_EVENT(random__get_random_bytes, get_random_bytes,
@@ -242,7 +242,7 @@ DECLARE_EVENT_CLASS(random__extract_entropy,
__entry->IP = IP;
),
- TP_printk("%s pool: nbytes %d entropy_count %d caller %pF",
+ TP_printk("%s pool: nbytes %d entropy_count %d caller %pS",
__entry->pool_name, __entry->nbytes, __entry->entropy_count,
(void *)__entry->IP)
);
diff --git a/include/trace/events/regmap.h b/include/trace/events/regmap.h
deleted file mode 100644
index 23d5615..0000000
--- a/include/trace/events/regmap.h
+++ /dev/null
@@ -1,252 +0,0 @@
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM regmap
-
-#if !defined(_TRACE_REGMAP_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_REGMAP_H
-
-#include <linux/ktime.h>
-#include <linux/tracepoint.h>
-
-struct device;
-struct regmap;
-
-/*
- * Log register events
- */
-DECLARE_EVENT_CLASS(regmap_reg,
-
- TP_PROTO(struct device *dev, unsigned int reg,
- unsigned int val),
-
- TP_ARGS(dev, reg, val),
-
- TP_STRUCT__entry(
- __string( name, dev_name(dev) )
- __field( unsigned int, reg )
- __field( unsigned int, val )
- ),
-
- TP_fast_assign(
- __assign_str(name, dev_name(dev));
- __entry->reg = reg;
- __entry->val = val;
- ),
-
- TP_printk("%s reg=%x val=%x", __get_str(name),
- (unsigned int)__entry->reg,
- (unsigned int)__entry->val)
-);
-
-DEFINE_EVENT(regmap_reg, regmap_reg_write,
-
- TP_PROTO(struct device *dev, unsigned int reg,
- unsigned int val),
-
- TP_ARGS(dev, reg, val)
-
-);
-
-DEFINE_EVENT(regmap_reg, regmap_reg_read,
-
- TP_PROTO(struct device *dev, unsigned int reg,
- unsigned int val),
-
- TP_ARGS(dev, reg, val)
-
-);
-
-DEFINE_EVENT(regmap_reg, regmap_reg_read_cache,
-
- TP_PROTO(struct device *dev, unsigned int reg,
- unsigned int val),
-
- TP_ARGS(dev, reg, val)
-
-);
-
-DECLARE_EVENT_CLASS(regmap_block,
-
- TP_PROTO(struct device *dev, unsigned int reg, int count),
-
- TP_ARGS(dev, reg, count),
-
- TP_STRUCT__entry(
- __string( name, dev_name(dev) )
- __field( unsigned int, reg )
- __field( int, count )
- ),
-
- TP_fast_assign(
- __assign_str(name, dev_name(dev));
- __entry->reg = reg;
- __entry->count = count;
- ),
-
- TP_printk("%s reg=%x count=%d", __get_str(name),
- (unsigned int)__entry->reg,
- (int)__entry->count)
-);
-
-DEFINE_EVENT(regmap_block, regmap_hw_read_start,
-
- TP_PROTO(struct device *dev, unsigned int reg, int count),
-
- TP_ARGS(dev, reg, count)
-);
-
-DEFINE_EVENT(regmap_block, regmap_hw_read_done,
-
- TP_PROTO(struct device *dev, unsigned int reg, int count),
-
- TP_ARGS(dev, reg, count)
-);
-
-DEFINE_EVENT(regmap_block, regmap_hw_write_start,
-
- TP_PROTO(struct device *dev, unsigned int reg, int count),
-
- TP_ARGS(dev, reg, count)
-);
-
-DEFINE_EVENT(regmap_block, regmap_hw_write_done,
-
- TP_PROTO(struct device *dev, unsigned int reg, int count),
-
- TP_ARGS(dev, reg, count)
-);
-
-TRACE_EVENT(regcache_sync,
-
- TP_PROTO(struct device *dev, const char *type,
- const char *status),
-
- TP_ARGS(dev, type, status),
-
- TP_STRUCT__entry(
- __string( name, dev_name(dev) )
- __string( status, status )
- __string( type, type )
- __field( int, type )
- ),
-
- TP_fast_assign(
- __assign_str(name, dev_name(dev));
- __assign_str(status, status);
- __assign_str(type, type);
- ),
-
- TP_printk("%s type=%s status=%s", __get_str(name),
- __get_str(type), __get_str(status))
-);
-
-DECLARE_EVENT_CLASS(regmap_bool,
-
- TP_PROTO(struct device *dev, bool flag),
-
- TP_ARGS(dev, flag),
-
- TP_STRUCT__entry(
- __string( name, dev_name(dev) )
- __field( int, flag )
- ),
-
- TP_fast_assign(
- __assign_str(name, dev_name(dev));
- __entry->flag = flag;
- ),
-
- TP_printk("%s flag=%d", __get_str(name),
- (int)__entry->flag)
-);
-
-DEFINE_EVENT(regmap_bool, regmap_cache_only,
-
- TP_PROTO(struct device *dev, bool flag),
-
- TP_ARGS(dev, flag)
-
-);
-
-DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
-
- TP_PROTO(struct device *dev, bool flag),
-
- TP_ARGS(dev, flag)
-
-);
-
-DECLARE_EVENT_CLASS(regmap_async,
-
- TP_PROTO(struct device *dev),
-
- TP_ARGS(dev),
-
- TP_STRUCT__entry(
- __string( name, dev_name(dev) )
- ),
-
- TP_fast_assign(
- __assign_str(name, dev_name(dev));
- ),
-
- TP_printk("%s", __get_str(name))
-);
-
-DEFINE_EVENT(regmap_block, regmap_async_write_start,
-
- TP_PROTO(struct device *dev, unsigned int reg, int count),
-
- TP_ARGS(dev, reg, count)
-);
-
-DEFINE_EVENT(regmap_async, regmap_async_io_complete,
-
- TP_PROTO(struct device *dev),
-
- TP_ARGS(dev)
-
-);
-
-DEFINE_EVENT(regmap_async, regmap_async_complete_start,
-
- TP_PROTO(struct device *dev),
-
- TP_ARGS(dev)
-
-);
-
-DEFINE_EVENT(regmap_async, regmap_async_complete_done,
-
- TP_PROTO(struct device *dev),
-
- TP_ARGS(dev)
-
-);
-
-TRACE_EVENT(regcache_drop_region,
-
- TP_PROTO(struct device *dev, unsigned int from,
- unsigned int to),
-
- TP_ARGS(dev, from, to),
-
- TP_STRUCT__entry(
- __string( name, dev_name(dev) )
- __field( unsigned int, from )
- __field( unsigned int, to )
- ),
-
- TP_fast_assign(
- __assign_str(name, dev_name(dev));
- __entry->from = from;
- __entry->to = to;
- ),
-
- TP_printk("%s %u-%u", __get_str(name), (unsigned int)__entry->from,
- (unsigned int)__entry->to)
-);
-
-#endif /* _TRACE_REGMAP_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 30fedaf..d57a575 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -147,7 +147,8 @@ TRACE_EVENT(sched_switch,
__print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
{ 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
{ 16, "Z" }, { 32, "X" }, { 64, "x" },
- { 128, "K" }, { 256, "W" }, { 512, "P" }) : "R",
+ { 128, "K" }, { 256, "W" }, { 512, "P" },
+ { 1024, "N" }) : "R",
__entry->prev_state & TASK_STATE_MAX ? "+" : "",
__entry->next_comm, __entry->next_pid, __entry->next_prio)
);
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index b9c1dc6..fd1a02c 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -179,27 +179,53 @@ DEFINE_EVENT(rpc_task_queued, rpc_task_wakeup,
);
+/*
+ * First define the enums in the below macros to be exported to userspace
+ * via TRACE_DEFINE_ENUM().
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+#define RPC_SHOW_SOCKET \
+ EM( SS_FREE, "FREE" ) \
+ EM( SS_UNCONNECTED, "UNCONNECTED" ) \
+ EM( SS_CONNECTING, "CONNECTING," ) \
+ EM( SS_CONNECTED, "CONNECTED," ) \
+ EMe(SS_DISCONNECTING, "DISCONNECTING" )
+
#define rpc_show_socket_state(state) \
- __print_symbolic(state, \
- { SS_FREE, "FREE" }, \
- { SS_UNCONNECTED, "UNCONNECTED" }, \
- { SS_CONNECTING, "CONNECTING," }, \
- { SS_CONNECTED, "CONNECTED," }, \
- { SS_DISCONNECTING, "DISCONNECTING" })
+ __print_symbolic(state, RPC_SHOW_SOCKET)
+
+RPC_SHOW_SOCKET
+
+#define RPC_SHOW_SOCK \
+ EM( TCP_ESTABLISHED, "ESTABLISHED" ) \
+ EM( TCP_SYN_SENT, "SYN_SENT" ) \
+ EM( TCP_SYN_RECV, "SYN_RECV" ) \
+ EM( TCP_FIN_WAIT1, "FIN_WAIT1" ) \
+ EM( TCP_FIN_WAIT2, "FIN_WAIT2" ) \
+ EM( TCP_TIME_WAIT, "TIME_WAIT" ) \
+ EM( TCP_CLOSE, "CLOSE" ) \
+ EM( TCP_CLOSE_WAIT, "CLOSE_WAIT" ) \
+ EM( TCP_LAST_ACK, "LAST_ACK" ) \
+ EM( TCP_LISTEN, "LISTEN" ) \
+ EMe( TCP_CLOSING, "CLOSING" )
#define rpc_show_sock_state(state) \
- __print_symbolic(state, \
- { TCP_ESTABLISHED, "ESTABLISHED" }, \
- { TCP_SYN_SENT, "SYN_SENT" }, \
- { TCP_SYN_RECV, "SYN_RECV" }, \
- { TCP_FIN_WAIT1, "FIN_WAIT1" }, \
- { TCP_FIN_WAIT2, "FIN_WAIT2" }, \
- { TCP_TIME_WAIT, "TIME_WAIT" }, \
- { TCP_CLOSE, "CLOSE" }, \
- { TCP_CLOSE_WAIT, "CLOSE_WAIT" }, \
- { TCP_LAST_ACK, "LAST_ACK" }, \
- { TCP_LISTEN, "LISTEN" }, \
- { TCP_CLOSING, "CLOSING" })
+ __print_symbolic(state, RPC_SHOW_SOCK)
+
+RPC_SHOW_SOCK
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) {a, b},
+#define EMe(a, b) {a, b}
DECLARE_EVENT_CLASS(xs_socket_event,
diff --git a/include/trace/events/target.h b/include/trace/events/target.h
index 04c3c6e..50fea66 100644
--- a/include/trace/events/target.h
+++ b/include/trace/events/target.h
@@ -6,7 +6,7 @@
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
-#include <scsi/scsi.h>
+#include <scsi/scsi_proto.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h
index 0f4f95d..8b1f806 100644
--- a/include/trace/events/thermal.h
+++ b/include/trace/events/thermal.h
@@ -77,6 +77,64 @@ TRACE_EVENT(thermal_zone_trip,
__entry->trip_type)
);
+TRACE_EVENT(thermal_power_cpu_get_power,
+ TP_PROTO(const struct cpumask *cpus, unsigned long freq, u32 *load,
+ size_t load_len, u32 dynamic_power, u32 static_power),
+
+ TP_ARGS(cpus, freq, load, load_len, dynamic_power, static_power),
+
+ TP_STRUCT__entry(
+ __bitmask(cpumask, num_possible_cpus())
+ __field(unsigned long, freq )
+ __dynamic_array(u32, load, load_len)
+ __field(size_t, load_len )
+ __field(u32, dynamic_power )
+ __field(u32, static_power )
+ ),
+
+ TP_fast_assign(
+ __assign_bitmask(cpumask, cpumask_bits(cpus),
+ num_possible_cpus());
+ __entry->freq = freq;
+ memcpy(__get_dynamic_array(load), load,
+ load_len * sizeof(*load));
+ __entry->load_len = load_len;
+ __entry->dynamic_power = dynamic_power;
+ __entry->static_power = static_power;
+ ),
+
+ TP_printk("cpus=%s freq=%lu load={%s} dynamic_power=%d static_power=%d",
+ __get_bitmask(cpumask), __entry->freq,
+ __print_array(__get_dynamic_array(load), __entry->load_len, 4),
+ __entry->dynamic_power, __entry->static_power)
+);
+
+TRACE_EVENT(thermal_power_cpu_limit,
+ TP_PROTO(const struct cpumask *cpus, unsigned int freq,
+ unsigned long cdev_state, u32 power),
+
+ TP_ARGS(cpus, freq, cdev_state, power),
+
+ TP_STRUCT__entry(
+ __bitmask(cpumask, num_possible_cpus())
+ __field(unsigned int, freq )
+ __field(unsigned long, cdev_state)
+ __field(u32, power )
+ ),
+
+ TP_fast_assign(
+ __assign_bitmask(cpumask, cpumask_bits(cpus),
+ num_possible_cpus());
+ __entry->freq = freq;
+ __entry->cdev_state = cdev_state;
+ __entry->power = power;
+ ),
+
+ TP_printk("cpus=%s freq=%u cdev_state=%lu power=%u",
+ __get_bitmask(cpumask), __entry->freq, __entry->cdev_state,
+ __entry->power)
+);
+
#endif /* _TRACE_THERMAL_H */
/* This part must be outside protection */
diff --git a/include/trace/events/thermal_power_allocator.h b/include/trace/events/thermal_power_allocator.h
new file mode 100644
index 0000000..12e1321
--- /dev/null
+++ b/include/trace/events/thermal_power_allocator.h
@@ -0,0 +1,87 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM thermal_power_allocator
+
+#if !defined(_TRACE_THERMAL_POWER_ALLOCATOR_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_THERMAL_POWER_ALLOCATOR_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(thermal_power_allocator,
+ TP_PROTO(struct thermal_zone_device *tz, u32 *req_power,
+ u32 total_req_power, u32 *granted_power,
+ u32 total_granted_power, size_t num_actors,
+ u32 power_range, u32 max_allocatable_power,
+ unsigned long current_temp, s32 delta_temp),
+ TP_ARGS(tz, req_power, total_req_power, granted_power,
+ total_granted_power, num_actors, power_range,
+ max_allocatable_power, current_temp, delta_temp),
+ TP_STRUCT__entry(
+ __field(int, tz_id )
+ __dynamic_array(u32, req_power, num_actors )
+ __field(u32, total_req_power )
+ __dynamic_array(u32, granted_power, num_actors)
+ __field(u32, total_granted_power )
+ __field(size_t, num_actors )
+ __field(u32, power_range )
+ __field(u32, max_allocatable_power )
+ __field(unsigned long, current_temp )
+ __field(s32, delta_temp )
+ ),
+ TP_fast_assign(
+ __entry->tz_id = tz->id;
+ memcpy(__get_dynamic_array(req_power), req_power,
+ num_actors * sizeof(*req_power));
+ __entry->total_req_power = total_req_power;
+ memcpy(__get_dynamic_array(granted_power), granted_power,
+ num_actors * sizeof(*granted_power));
+ __entry->total_granted_power = total_granted_power;
+ __entry->num_actors = num_actors;
+ __entry->power_range = power_range;
+ __entry->max_allocatable_power = max_allocatable_power;
+ __entry->current_temp = current_temp;
+ __entry->delta_temp = delta_temp;
+ ),
+
+ TP_printk("thermal_zone_id=%d req_power={%s} total_req_power=%u granted_power={%s} total_granted_power=%u power_range=%u max_allocatable_power=%u current_temperature=%lu delta_temperature=%d",
+ __entry->tz_id,
+ __print_array(__get_dynamic_array(req_power),
+ __entry->num_actors, 4),
+ __entry->total_req_power,
+ __print_array(__get_dynamic_array(granted_power),
+ __entry->num_actors, 4),
+ __entry->total_granted_power, __entry->power_range,
+ __entry->max_allocatable_power, __entry->current_temp,
+ __entry->delta_temp)
+);
+
+TRACE_EVENT(thermal_power_allocator_pid,
+ TP_PROTO(struct thermal_zone_device *tz, s32 err, s32 err_integral,
+ s64 p, s64 i, s64 d, s32 output),
+ TP_ARGS(tz, err, err_integral, p, i, d, output),
+ TP_STRUCT__entry(
+ __field(int, tz_id )
+ __field(s32, err )
+ __field(s32, err_integral)
+ __field(s64, p )
+ __field(s64, i )
+ __field(s64, d )
+ __field(s32, output )
+ ),
+ TP_fast_assign(
+ __entry->tz_id = tz->id;
+ __entry->err = err;
+ __entry->err_integral = err_integral;
+ __entry->p = p;
+ __entry->i = i;
+ __entry->d = d;
+ __entry->output = output;
+ ),
+
+ TP_printk("thermal_zone_id=%d err=%d err_integral=%d p=%lld i=%lld d=%lld output=%d",
+ __entry->tz_id, __entry->err, __entry->err_integral,
+ __entry->p, __entry->i, __entry->d, __entry->output)
+);
+#endif /* _TRACE_THERMAL_POWER_ALLOCATOR_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 68c2c20..073b9ac 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -43,15 +43,18 @@ DEFINE_EVENT(timer_class, timer_init,
*/
TRACE_EVENT(timer_start,
- TP_PROTO(struct timer_list *timer, unsigned long expires),
+ TP_PROTO(struct timer_list *timer,
+ unsigned long expires,
+ unsigned int flags),
- TP_ARGS(timer, expires),
+ TP_ARGS(timer, expires, flags),
TP_STRUCT__entry(
__field( void *, timer )
__field( void *, function )
__field( unsigned long, expires )
__field( unsigned long, now )
+ __field( unsigned int, flags )
),
TP_fast_assign(
@@ -59,11 +62,12 @@ TRACE_EVENT(timer_start,
__entry->function = timer->function;
__entry->expires = expires;
__entry->now = jiffies;
+ __entry->flags = flags;
),
- TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]",
+ TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] flags=0x%08x",
__entry->timer, __entry->function, __entry->expires,
- (long)__entry->expires - __entry->now)
+ (long)__entry->expires - __entry->now, __entry->flags)
);
/**
diff --git a/include/trace/events/tlb.h b/include/trace/events/tlb.h
index 13391d2..4250f36 100644
--- a/include/trace/events/tlb.h
+++ b/include/trace/events/tlb.h
@@ -7,17 +7,39 @@
#include <linux/mm_types.h>
#include <linux/tracepoint.h>
-#define TLB_FLUSH_REASON \
- { TLB_FLUSH_ON_TASK_SWITCH, "flush on task switch" }, \
- { TLB_REMOTE_SHOOTDOWN, "remote shootdown" }, \
- { TLB_LOCAL_SHOOTDOWN, "local shootdown" }, \
- { TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" }
-
-TRACE_EVENT(tlb_flush,
+#define TLB_FLUSH_REASON \
+ EM( TLB_FLUSH_ON_TASK_SWITCH, "flush on task switch" ) \
+ EM( TLB_REMOTE_SHOOTDOWN, "remote shootdown" ) \
+ EM( TLB_LOCAL_SHOOTDOWN, "local shootdown" ) \
+ EMe( TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" )
+
+/*
+ * First define the enums in TLB_FLUSH_REASON to be exported to userspace
+ * via TRACE_DEFINE_ENUM().
+ */
+#undef EM
+#undef EMe
+#define EM(a,b) TRACE_DEFINE_ENUM(a);
+#define EMe(a,b) TRACE_DEFINE_ENUM(a);
+
+TLB_FLUSH_REASON
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a,b) { a, b },
+#define EMe(a,b) { a, b }
+
+TRACE_EVENT_CONDITION(tlb_flush,
TP_PROTO(int reason, unsigned long pages),
TP_ARGS(reason, pages),
+ TP_CONDITION(cpu_online(smp_processor_id())),
+
TP_STRUCT__entry(
__field( int, reason)
__field(unsigned long, pages)
diff --git a/include/trace/events/v4l2.h b/include/trace/events/v4l2.h
index b9bb1f2..89d0497 100644
--- a/include/trace/events/v4l2.h
+++ b/include/trace/events/v4l2.h
@@ -6,33 +6,58 @@
#include <linux/tracepoint.h>
-#define show_type(type) \
- __print_symbolic(type, \
- { V4L2_BUF_TYPE_VIDEO_CAPTURE, "VIDEO_CAPTURE" }, \
- { V4L2_BUF_TYPE_VIDEO_OUTPUT, "VIDEO_OUTPUT" }, \
- { V4L2_BUF_TYPE_VIDEO_OVERLAY, "VIDEO_OVERLAY" }, \
- { V4L2_BUF_TYPE_VBI_CAPTURE, "VBI_CAPTURE" }, \
- { V4L2_BUF_TYPE_VBI_OUTPUT, "VBI_OUTPUT" }, \
- { V4L2_BUF_TYPE_SLICED_VBI_CAPTURE, "SLICED_VBI_CAPTURE" }, \
- { V4L2_BUF_TYPE_SLICED_VBI_OUTPUT, "SLICED_VBI_OUTPUT" }, \
- { V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY, "VIDEO_OUTPUT_OVERLAY" },\
- { V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, "VIDEO_CAPTURE_MPLANE" },\
- { V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, "VIDEO_OUTPUT_MPLANE" }, \
- { V4L2_BUF_TYPE_SDR_CAPTURE, "SDR_CAPTURE" }, \
- { V4L2_BUF_TYPE_PRIVATE, "PRIVATE" })
+/* Enums require being exported to userspace, for user tool parsing */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+#define show_type(type) \
+ __print_symbolic(type, SHOW_TYPE)
+
+#define SHOW_TYPE \
+ EM( V4L2_BUF_TYPE_VIDEO_CAPTURE, "VIDEO_CAPTURE" ) \
+ EM( V4L2_BUF_TYPE_VIDEO_OUTPUT, "VIDEO_OUTPUT" ) \
+ EM( V4L2_BUF_TYPE_VIDEO_OVERLAY, "VIDEO_OVERLAY" ) \
+ EM( V4L2_BUF_TYPE_VBI_CAPTURE, "VBI_CAPTURE" ) \
+ EM( V4L2_BUF_TYPE_VBI_OUTPUT, "VBI_OUTPUT" ) \
+ EM( V4L2_BUF_TYPE_SLICED_VBI_CAPTURE, "SLICED_VBI_CAPTURE" ) \
+ EM( V4L2_BUF_TYPE_SLICED_VBI_OUTPUT, "SLICED_VBI_OUTPUT" ) \
+ EM( V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY, "VIDEO_OUTPUT_OVERLAY" ) \
+ EM( V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, "VIDEO_CAPTURE_MPLANE" ) \
+ EM( V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, "VIDEO_OUTPUT_MPLANE" ) \
+ EM( V4L2_BUF_TYPE_SDR_CAPTURE, "SDR_CAPTURE" ) \
+ EMe(V4L2_BUF_TYPE_PRIVATE, "PRIVATE" )
+
+SHOW_TYPE
#define show_field(field) \
- __print_symbolic(field, \
- { V4L2_FIELD_ANY, "ANY" }, \
- { V4L2_FIELD_NONE, "NONE" }, \
- { V4L2_FIELD_TOP, "TOP" }, \
- { V4L2_FIELD_BOTTOM, "BOTTOM" }, \
- { V4L2_FIELD_INTERLACED, "INTERLACED" }, \
- { V4L2_FIELD_SEQ_TB, "SEQ_TB" }, \
- { V4L2_FIELD_SEQ_BT, "SEQ_BT" }, \
- { V4L2_FIELD_ALTERNATE, "ALTERNATE" }, \
- { V4L2_FIELD_INTERLACED_TB, "INTERLACED_TB" }, \
- { V4L2_FIELD_INTERLACED_BT, "INTERLACED_BT" })
+ __print_symbolic(field, SHOW_FIELD)
+
+#define SHOW_FIELD \
+ EM( V4L2_FIELD_ANY, "ANY" ) \
+ EM( V4L2_FIELD_NONE, "NONE" ) \
+ EM( V4L2_FIELD_TOP, "TOP" ) \
+ EM( V4L2_FIELD_BOTTOM, "BOTTOM" ) \
+ EM( V4L2_FIELD_INTERLACED, "INTERLACED" ) \
+ EM( V4L2_FIELD_SEQ_TB, "SEQ_TB" ) \
+ EM( V4L2_FIELD_SEQ_BT, "SEQ_BT" ) \
+ EM( V4L2_FIELD_ALTERNATE, "ALTERNATE" ) \
+ EM( V4L2_FIELD_INTERLACED_TB, "INTERLACED_TB" ) \
+ EMe( V4L2_FIELD_INTERLACED_BT, "INTERLACED_BT" )
+
+SHOW_FIELD
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) {a, b},
+#define EMe(a, b) {a, b}
+
+/* V4L2_TC_TYPE_* are macros, not defines, they do not need processing */
#define show_timecode_type(type) \
__print_symbolic(type, \
@@ -58,7 +83,8 @@
{ V4L2_BUF_FLAG_TIMESTAMP_MASK, "TIMESTAMP_MASK" }, \
{ V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN, "TIMESTAMP_UNKNOWN" }, \
{ V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC, "TIMESTAMP_MONOTONIC" }, \
- { V4L2_BUF_FLAG_TIMESTAMP_COPY, "TIMESTAMP_COPY" })
+ { V4L2_BUF_FLAG_TIMESTAMP_COPY, "TIMESTAMP_COPY" }, \
+ { V4L2_BUF_FLAG_LAST, "LAST" })
#define show_timecode_flags(flags) \
__print_flags(flags, "|", \
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index 69590b6..f66476b 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -336,18 +336,18 @@ TRACE_EVENT(mm_vmscan_writepage,
TP_ARGS(page, reclaim_flags),
TP_STRUCT__entry(
- __field(struct page *, page)
+ __field(unsigned long, pfn)
__field(int, reclaim_flags)
),
TP_fast_assign(
- __entry->page = page;
+ __entry->pfn = page_to_pfn(page);
__entry->reclaim_flags = reclaim_flags;
),
TP_printk("page=%p pfn=%lu flags=%s",
- __entry->page,
- page_to_pfn(__entry->page),
+ pfn_to_page(__entry->pfn),
+ __entry->pfn,
show_reclaim_flags(__entry->reclaim_flags))
);
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index cee02d6..a7aa607 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -18,18 +18,37 @@
{I_FREEING, "I_FREEING"}, \
{I_CLEAR, "I_CLEAR"}, \
{I_SYNC, "I_SYNC"}, \
+ {I_DIRTY_TIME, "I_DIRTY_TIME"}, \
+ {I_DIRTY_TIME_EXPIRED, "I_DIRTY_TIME_EXPIRED"}, \
{I_REFERENCED, "I_REFERENCED"} \
)
+/* enums need to be exported to user space */
+#undef EM
+#undef EMe
+#define EM(a,b) TRACE_DEFINE_ENUM(a);
+#define EMe(a,b) TRACE_DEFINE_ENUM(a);
+
#define WB_WORK_REASON \
- {WB_REASON_BACKGROUND, "background"}, \
- {WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages"}, \
- {WB_REASON_SYNC, "sync"}, \
- {WB_REASON_PERIODIC, "periodic"}, \
- {WB_REASON_LAPTOP_TIMER, "laptop_timer"}, \
- {WB_REASON_FREE_MORE_MEM, "free_more_memory"}, \
- {WB_REASON_FS_FREE_SPACE, "fs_free_space"}, \
- {WB_REASON_FORKER_THREAD, "forker_thread"}
+ EM( WB_REASON_BACKGROUND, "background") \
+ EM( WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages") \
+ EM( WB_REASON_SYNC, "sync") \
+ EM( WB_REASON_PERIODIC, "periodic") \
+ EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \
+ EM( WB_REASON_FREE_MORE_MEM, "free_more_memory") \
+ EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \
+ EMe(WB_REASON_FORKER_THREAD, "forker_thread")
+
+WB_WORK_REASON
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a,b) { a, b },
+#define EMe(a,b) { a, b }
struct wb_writeback_work;
@@ -47,7 +66,7 @@ TRACE_EVENT(writeback_dirty_page,
TP_fast_assign(
strncpy(__entry->name,
- mapping ? dev_name(mapping->backing_dev_info->dev) : "(unknown)", 32);
+ mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
__entry->ino = mapping ? mapping->host->i_ino : 0;
__entry->index = page->index;
),
@@ -68,26 +87,36 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
TP_STRUCT__entry (
__array(char, name, 32)
__field(unsigned long, ino)
+ __field(unsigned long, state)
__field(unsigned long, flags)
),
TP_fast_assign(
- struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
+ struct backing_dev_info *bdi = inode_to_bdi(inode);
/* may be called for files on pseudo FSes w/ unregistered bdi */
strncpy(__entry->name,
bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
__entry->ino = inode->i_ino;
+ __entry->state = inode->i_state;
__entry->flags = flags;
),
- TP_printk("bdi %s: ino=%lu flags=%s",
+ TP_printk("bdi %s: ino=%lu state=%s flags=%s",
__entry->name,
__entry->ino,
+ show_inode_state(__entry->state),
show_inode_state(__entry->flags)
)
);
+DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
+
+ TP_PROTO(struct inode *inode, int flags),
+
+ TP_ARGS(inode, flags)
+);
+
DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
TP_PROTO(struct inode *inode, int flags),
@@ -116,7 +145,7 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
TP_fast_assign(
strncpy(__entry->name,
- dev_name(inode->i_mapping->backing_dev_info->dev), 32);
+ dev_name(inode_to_bdi(inode)->dev), 32);
__entry->ino = inode->i_ino;
__entry->sync_mode = wbc->sync_mode;
),
@@ -156,10 +185,8 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__field(int, reason)
),
TP_fast_assign(
- struct device *dev = bdi->dev;
- if (!dev)
- dev = default_backing_dev_info.dev;
- strncpy(__entry->name, dev_name(dev), 32);
+ strncpy(__entry->name,
+ bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
__entry->nr_pages = work->nr_pages;
__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
__entry->sync_mode = work->sync_mode;
@@ -223,7 +250,6 @@ DEFINE_EVENT(writeback_class, name, \
DEFINE_WRITEBACK_EVENT(writeback_nowork);
DEFINE_WRITEBACK_EVENT(writeback_wake_background);
DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
-DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
DECLARE_EVENT_CLASS(wbc_class,
TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
@@ -334,7 +360,7 @@ TRACE_EVENT(global_dirty_state,
__entry->nr_written = global_page_state(NR_WRITTEN);
__entry->background_thresh = background_thresh;
__entry->dirty_thresh = dirty_thresh;
- __entry->dirty_limit = global_dirty_limit;
+ __entry->dirty_limit = global_wb_domain.dirty_limit;
),
TP_printk("dirty=%lu writeback=%lu unstable=%lu "
@@ -373,13 +399,13 @@ TRACE_EVENT(bdi_dirty_ratelimit,
TP_fast_assign(
strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
- __entry->write_bw = KBps(bdi->write_bandwidth);
- __entry->avg_write_bw = KBps(bdi->avg_write_bandwidth);
+ __entry->write_bw = KBps(bdi->wb.write_bandwidth);
+ __entry->avg_write_bw = KBps(bdi->wb.avg_write_bandwidth);
__entry->dirty_rate = KBps(dirty_rate);
- __entry->dirty_ratelimit = KBps(bdi->dirty_ratelimit);
+ __entry->dirty_ratelimit = KBps(bdi->wb.dirty_ratelimit);
__entry->task_ratelimit = KBps(task_ratelimit);
__entry->balanced_dirty_ratelimit =
- KBps(bdi->balanced_dirty_ratelimit);
+ KBps(bdi->wb.balanced_dirty_ratelimit);
),
TP_printk("bdi %s: "
@@ -436,8 +462,9 @@ TRACE_EVENT(balance_dirty_pages,
unsigned long freerun = (thresh + bg_thresh) / 2;
strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
- __entry->limit = global_dirty_limit;
- __entry->setpoint = (global_dirty_limit + freerun) / 2;
+ __entry->limit = global_wb_domain.dirty_limit;
+ __entry->setpoint = (global_wb_domain.dirty_limit +
+ freerun) / 2;
__entry->dirty = dirty;
__entry->bdi_setpoint = __entry->setpoint *
bdi_thresh / (thresh + 1);
@@ -598,6 +625,52 @@ DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
TP_ARGS(inode, wbc, nr_to_write)
);
+DECLARE_EVENT_CLASS(writeback_lazytime_template,
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field(unsigned long, ino )
+ __field(unsigned long, state )
+ __field( __u16, mode )
+ __field(unsigned long, dirtied_when )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->state = inode->i_state;
+ __entry->mode = inode->i_mode;
+ __entry->dirtied_when = inode->dirtied_when;
+ ),
+
+ TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino, __entry->dirtied_when,
+ show_inode_state(__entry->state), __entry->mode)
+);
+
+DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime,
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode)
+);
+
+DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime_iput,
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode)
+);
+
+DEFINE_EVENT(writeback_lazytime_template, writeback_dirty_inode_enqueue,
+
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode)
+);
+
#endif /* _TRACE_WRITEBACK_H */
/* This part must be outside protection */
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index d06b6da..bce990f 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -224,7 +224,7 @@ TRACE_EVENT(xen_mmu_pmd_clear,
TP_printk("pmdp %p", __entry->pmdp)
);
-#if PAGETABLE_LEVELS >= 4
+#if CONFIG_PGTABLE_LEVELS >= 4
TRACE_EVENT(xen_mmu_set_pud,
TP_PROTO(pud_t *pudp, pud_t pudval),
diff --git a/include/trace/perf.h b/include/trace/perf.h
new file mode 100644
index 0000000..1b5443c
--- /dev/null
+++ b/include/trace/perf.h
@@ -0,0 +1,350 @@
+/*
+ * Stage 4 of the trace events.
+ *
+ * Override the macros in <trace/trace_events.h> to include the following:
+ *
+ * For those macros defined with TRACE_EVENT:
+ *
+ * static struct trace_event_call event_<call>;
+ *
+ * static void trace_event_raw_event_<call>(void *__data, proto)
+ * {
+ * struct trace_event_file *trace_file = __data;
+ * struct trace_event_call *event_call = trace_file->event_call;
+ * struct trace_event_data_offsets_<call> __maybe_unused __data_offsets;
+ * unsigned long eflags = trace_file->flags;
+ * enum event_trigger_type __tt = ETT_NONE;
+ * struct ring_buffer_event *event;
+ * struct trace_event_raw_<call> *entry; <-- defined in stage 1
+ * struct ring_buffer *buffer;
+ * unsigned long irq_flags;
+ * int __data_size;
+ * int pc;
+ *
+ * if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
+ * if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
+ * event_triggers_call(trace_file, NULL);
+ * if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
+ * return;
+ * }
+ *
+ * local_save_flags(irq_flags);
+ * pc = preempt_count();
+ *
+ * __data_size = trace_event_get_offsets_<call>(&__data_offsets, args);
+ *
+ * event = trace_event_buffer_lock_reserve(&buffer, trace_file,
+ * event_<call>->event.type,
+ * sizeof(*entry) + __data_size,
+ * irq_flags, pc);
+ * if (!event)
+ * return;
+ * entry = ring_buffer_event_data(event);
+ *
+ * { <assign>; } <-- Here we assign the entries by the __field and
+ * __array macros.
+ *
+ * if (eflags & EVENT_FILE_FL_TRIGGER_COND)
+ * __tt = event_triggers_call(trace_file, entry);
+ *
+ * if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT,
+ * &trace_file->flags))
+ * ring_buffer_discard_commit(buffer, event);
+ * else if (!filter_check_discard(trace_file, entry, buffer, event))
+ * trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
+ *
+ * if (__tt)
+ * event_triggers_post_call(trace_file, __tt);
+ * }
+ *
+ * static struct trace_event ftrace_event_type_<call> = {
+ * .trace = trace_raw_output_<call>, <-- stage 2
+ * };
+ *
+ * static char print_fmt_<call>[] = <TP_printk>;
+ *
+ * static struct trace_event_class __used event_class_<template> = {
+ * .system = "<system>",
+ * .define_fields = trace_event_define_fields_<call>,
+ * .fields = LIST_HEAD_INIT(event_class_##call.fields),
+ * .raw_init = trace_event_raw_init,
+ * .probe = trace_event_raw_event_##call,
+ * .reg = trace_event_reg,
+ * };
+ *
+ * static struct trace_event_call event_<call> = {
+ * .class = event_class_<template>,
+ * {
+ * .tp = &__tracepoint_<call>,
+ * },
+ * .event = &ftrace_event_type_<call>,
+ * .print_fmt = print_fmt_<call>,
+ * .flags = TRACE_EVENT_FL_TRACEPOINT,
+ * };
+ * // its only safe to use pointers when doing linker tricks to
+ * // create an array.
+ * static struct trace_event_call __used
+ * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
+ *
+ */
+
+#ifdef CONFIG_PERF_EVENTS
+
+#define _TRACE_PERF_PROTO(call, proto) \
+ static notrace void \
+ perf_trace_##call(void *__data, proto);
+
+#define _TRACE_PERF_INIT(call) \
+ .perf_probe = perf_trace_##call,
+
+#else
+#define _TRACE_PERF_PROTO(call, proto)
+#define _TRACE_PERF_INIT(call)
+#endif /* CONFIG_PERF_EVENTS */
+
+#undef __entry
+#define __entry entry
+
+#undef __field
+#define __field(type, item)
+
+#undef __field_struct
+#define __field_struct(type, item)
+
+#undef __array
+#define __array(type, item, len)
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) \
+ __entry->__data_loc_##item = __data_offsets.item;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef __assign_str
+#define __assign_str(dst, src) \
+ strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
+
+#undef __bitmask
+#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
+
+#undef __get_bitmask
+#define __get_bitmask(field) (char *)__get_dynamic_array(field)
+
+#undef __assign_bitmask
+#define __assign_bitmask(dst, src, nr_bits) \
+ memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
+
+#undef TP_fast_assign
+#define TP_fast_assign(args...) args
+
+#undef __perf_addr
+#define __perf_addr(a) (a)
+
+#undef __perf_count
+#define __perf_count(c) (c)
+
+#undef __perf_task
+#define __perf_task(t) (t)
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+ \
+static notrace void \
+trace_event_raw_event_##call(void *__data, proto) \
+{ \
+ struct trace_event_file *trace_file = __data; \
+ struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
+ struct trace_event_buffer fbuffer; \
+ struct trace_event_raw_##call *entry; \
+ int __data_size; \
+ \
+ if (trace_trigger_soft_disabled(trace_file)) \
+ return; \
+ \
+ __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
+ \
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file, \
+ sizeof(*entry) + __data_size); \
+ \
+ if (!entry) \
+ return; \
+ \
+ tstruct \
+ \
+ { assign; } \
+ \
+ trace_event_buffer_commit(&fbuffer); \
+}
+/*
+ * The ftrace_test_probe is compiled out, it is only here as a build time check
+ * to make sure that if the tracepoint handling changes, the ftrace probe will
+ * fail to compile unless it too is updated.
+ */
+
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, call, proto, args) \
+static inline void ftrace_test_probe_##call(void) \
+{ \
+ check_trace_callback_type_##call(trace_event_raw_event_##template); \
+}
+
+#undef DEFINE_EVENT_PRINT
+#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#undef __entry
+#define __entry REC
+
+#undef __print_flags
+#undef __print_symbolic
+#undef __print_hex
+#undef __get_dynamic_array
+#undef __get_dynamic_array_len
+#undef __get_str
+#undef __get_bitmask
+#undef __print_array
+
+#undef TP_printk
+#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+_TRACE_PERF_PROTO(call, PARAMS(proto)); \
+static char print_fmt_##call[] = print; \
+static struct trace_event_class __used __refdata event_class_##call = { \
+ .system = TRACE_SYSTEM_STRING, \
+ .define_fields = trace_event_define_fields_##call, \
+ .fields = LIST_HEAD_INIT(event_class_##call.fields),\
+ .raw_init = trace_event_raw_init, \
+ .probe = trace_event_raw_event_##call, \
+ .reg = trace_event_reg, \
+ _TRACE_PERF_INIT(call) \
+};
+
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, call, proto, args) \
+ \
+static struct trace_event_call __used event_##call = { \
+ .class = &event_class_##template, \
+ { \
+ .tp = &__tracepoint_##call, \
+ }, \
+ .event.funcs = &trace_event_type_funcs_##template, \
+ .print_fmt = print_fmt_##template, \
+ .flags = TRACE_EVENT_FL_TRACEPOINT, \
+}; \
+static struct trace_event_call __used \
+__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
+
+#undef DEFINE_EVENT_PRINT
+#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
+ \
+static char print_fmt_##call[] = print; \
+ \
+static struct trace_event_call __used event_##call = { \
+ .class = &event_class_##template, \
+ { \
+ .tp = &__tracepoint_##call, \
+ }, \
+ .event.funcs = &trace_event_type_funcs_##call, \
+ .print_fmt = print_fmt_##call, \
+ .flags = TRACE_EVENT_FL_TRACEPOINT, \
+}; \
+static struct trace_event_call __used \
+__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#undef TRACE_SYSTEM_VAR
+
+#ifdef CONFIG_PERF_EVENTS
+
+#undef __entry
+#define __entry entry
+
+#undef __get_dynamic_array
+#define __get_dynamic_array(field) \
+ ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
+
+#undef __get_dynamic_array_len
+#define __get_dynamic_array_len(field) \
+ ((__entry->__data_loc_##field >> 16) & 0xffff)
+
+#undef __get_str
+#define __get_str(field) (char *)__get_dynamic_array(field)
+
+#undef __get_bitmask
+#define __get_bitmask(field) (char *)__get_dynamic_array(field)
+
+#undef __perf_addr
+#define __perf_addr(a) (__addr = (a))
+
+#undef __perf_count
+#define __perf_count(c) (__count = (c))
+
+#undef __perf_task
+#define __perf_task(t) (__task = (t))
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+static notrace void \
+perf_trace_##call(void *__data, proto) \
+{ \
+ struct trace_event_call *event_call = __data; \
+ struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
+ struct trace_event_raw_##call *entry; \
+ struct pt_regs *__regs; \
+ u64 __addr = 0, __count = 1; \
+ struct task_struct *__task = NULL; \
+ struct hlist_head *head; \
+ int __entry_size; \
+ int __data_size; \
+ int rctx; \
+ \
+ __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
+ \
+ head = this_cpu_ptr(event_call->perf_events); \
+ if (__builtin_constant_p(!__task) && !__task && \
+ hlist_empty(head)) \
+ return; \
+ \
+ __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
+ sizeof(u64)); \
+ __entry_size -= sizeof(u32); \
+ \
+ entry = perf_trace_buf_prepare(__entry_size, \
+ event_call->event.type, &__regs, &rctx); \
+ if (!entry) \
+ return; \
+ \
+ perf_fetch_caller_regs(__regs); \
+ \
+ tstruct \
+ \
+ { assign; } \
+ \
+ perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
+ __count, __regs, head, __task); \
+}
+
+/*
+ * This part is compiled out, it is only here as a build time check
+ * to make sure that if the tracepoint handling changes, the
+ * perf probe will fail to compile unless it too is updated.
+ */
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, call, proto, args) \
+static inline void perf_test_probe_##call(void) \
+{ \
+ check_trace_callback_type_##call(perf_trace_##template); \
+}
+
+
+#undef DEFINE_EVENT_PRINT
+#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
+ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#endif /* CONFIG_PERF_EVENTS */
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 9674145..7434f0f 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -3,7 +3,7 @@
#include <linux/tracepoint.h>
#include <linux/unistd.h>
-#include <linux/ftrace_event.h>
+#include <linux/trace_events.h>
#include <linux/thread_info.h>
#include <asm/ptrace.h>
@@ -29,8 +29,8 @@ struct syscall_metadata {
const char **args;
struct list_head enter_fields;
- struct ftrace_event_call *enter_event;
- struct ftrace_event_call *exit_event;
+ struct trace_event_call *enter_event;
+ struct trace_event_call *exit_event;
};
#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
diff --git a/include/trace/ftrace.h b/include/trace/trace_events.h
index 139b506..43be3b0 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/trace_events.h
@@ -3,7 +3,7 @@
*
* Override the macros in <trace/trace_events.h> to include the following:
*
- * struct ftrace_raw_<call> {
+ * struct trace_event_raw_<call> {
* struct trace_entry ent;
* <type> <item>;
* <type2> <item2>[<len>];
@@ -16,7 +16,35 @@
* in the structure.
*/
-#include <linux/ftrace_event.h>
+#include <linux/trace_events.h>
+
+#ifndef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR TRACE_SYSTEM
+#endif
+
+#define __app__(x, y) str__##x##y
+#define __app(x, y) __app__(x, y)
+
+#define TRACE_SYSTEM_STRING __app(TRACE_SYSTEM_VAR,__trace_system_name)
+
+#define TRACE_MAKE_SYSTEM_STR() \
+ static const char TRACE_SYSTEM_STRING[] = \
+ __stringify(TRACE_SYSTEM)
+
+TRACE_MAKE_SYSTEM_STR();
+
+#undef TRACE_DEFINE_ENUM
+#define TRACE_DEFINE_ENUM(a) \
+ static struct trace_enum_map __used __initdata \
+ __##TRACE_SYSTEM##_##a = \
+ { \
+ .system = TRACE_SYSTEM_STRING, \
+ .enum_string = #a, \
+ .enum_value = a \
+ }; \
+ static struct trace_enum_map __used \
+ __attribute__((section("_ftrace_enum_map"))) \
+ *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
/*
* DECLARE_EVENT_CLASS can be used to add a generic function
@@ -67,17 +95,17 @@
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
- struct ftrace_raw_##name { \
+ struct trace_event_raw_##name { \
struct trace_entry ent; \
tstruct \
char __data[0]; \
}; \
\
- static struct ftrace_event_class event_class_##name;
+ static struct trace_event_class event_class_##name;
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args) \
- static struct ftrace_event_call __used \
+ static struct trace_event_call __used \
__attribute__((__aligned__(4))) event_##name
#undef DEFINE_EVENT_FN
@@ -105,13 +133,12 @@
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-
/*
* Stage 2 of the trace events.
*
* Include the following:
*
- * struct ftrace_data_offsets_<call> {
+ * struct trace_event_data_offsets_<call> {
* u32 <item1>;
* u32 <item2>;
* [...]
@@ -122,6 +149,9 @@
* The size of an array is also encoded, in the higher 16 bits of <item>.
*/
+#undef TRACE_DEFINE_ENUM
+#define TRACE_DEFINE_ENUM(a)
+
#undef __field
#define __field(type, item)
@@ -148,7 +178,7 @@
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
- struct ftrace_data_offsets_##call { \
+ struct trace_event_data_offsets_##call { \
tstruct; \
};
@@ -173,10 +203,10 @@
* Override the macros in <trace/trace_events.h> to include the following:
*
* enum print_line_t
- * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
+ * trace_raw_output_<call>(struct trace_iterator *iter, int flags)
* {
* struct trace_seq *s = &iter->seq;
- * struct ftrace_raw_<call> *field; <-- defined in stage 1
+ * struct trace_event_raw_<call> *field; <-- defined in stage 1
* struct trace_entry *entry;
* struct trace_seq *p = &iter->tmp_seq;
* int ret;
@@ -228,7 +258,7 @@
void *__bitmask = __get_dynamic_array(field); \
unsigned int __bitmask_size; \
__bitmask_size = __get_dynamic_array_len(field); \
- ftrace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
+ trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
})
#undef __print_flags
@@ -236,7 +266,7 @@
({ \
static const struct trace_print_flags __flags[] = \
{ flag_array, { -1, NULL }}; \
- ftrace_print_flags_seq(p, delim, flag, __flags); \
+ trace_print_flags_seq(p, delim, flag, __flags); \
})
#undef __print_symbolic
@@ -244,7 +274,7 @@
({ \
static const struct trace_print_flags symbols[] = \
{ symbol_array, { -1, NULL }}; \
- ftrace_print_symbols_seq(p, value, symbols); \
+ trace_print_symbols_seq(p, value, symbols); \
})
#undef __print_symbolic_u64
@@ -253,7 +283,7 @@
({ \
static const struct trace_print_flags_u64 symbols[] = \
{ symbol_array, { -1, NULL } }; \
- ftrace_print_symbols_seq_u64(p, value, symbols); \
+ trace_print_symbols_seq_u64(p, value, symbols); \
})
#else
#define __print_symbolic_u64(value, symbol_array...) \
@@ -261,22 +291,30 @@
#endif
#undef __print_hex
-#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
+#define __print_hex(buf, buf_len) trace_print_hex_seq(p, buf, buf_len)
+
+#undef __print_array
+#define __print_array(array, count, el_size) \
+ ({ \
+ BUILD_BUG_ON(el_size != 1 && el_size != 2 && \
+ el_size != 4 && el_size != 8); \
+ trace_print_array_seq(p, array, count, el_size); \
+ })
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static notrace enum print_line_t \
-ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
- struct trace_event *trace_event) \
+trace_raw_output_##call(struct trace_iterator *iter, int flags, \
+ struct trace_event *trace_event) \
{ \
struct trace_seq *s = &iter->seq; \
struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
- struct ftrace_raw_##call *field; \
+ struct trace_event_raw_##call *field; \
int ret; \
\
field = (typeof(field))iter->ent; \
\
- ret = ftrace_raw_output_prep(iter, trace_event); \
+ ret = trace_raw_output_prep(iter, trace_event); \
if (ret != TRACE_TYPE_HANDLED) \
return ret; \
\
@@ -284,17 +322,17 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
\
return trace_handle_return(s); \
} \
-static struct trace_event_functions ftrace_event_type_funcs_##call = { \
- .trace = ftrace_raw_output_##call, \
+static struct trace_event_functions trace_event_type_funcs_##call = { \
+ .trace = trace_raw_output_##call, \
};
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
static notrace enum print_line_t \
-ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
+trace_raw_output_##call(struct trace_iterator *iter, int flags, \
struct trace_event *event) \
{ \
- struct ftrace_raw_##template *field; \
+ struct trace_event_raw_##template *field; \
struct trace_entry *entry; \
struct trace_seq *p = &iter->tmp_seq; \
\
@@ -308,10 +346,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
field = (typeof(field))entry; \
\
trace_seq_init(p); \
- return ftrace_output_call(iter, #call, print); \
+ return trace_output_call(iter, #call, print); \
} \
-static struct trace_event_functions ftrace_event_type_funcs_##call = { \
- .trace = ftrace_raw_output_##call, \
+static struct trace_event_functions trace_event_type_funcs_##call = { \
+ .trace = trace_raw_output_##call, \
};
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
@@ -369,9 +407,9 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
static int notrace __init \
-ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
+trace_event_define_fields_##call(struct trace_event_call *event_call) \
{ \
- struct ftrace_raw_##call field; \
+ struct trace_event_raw_##call field; \
int ret; \
\
tstruct; \
@@ -447,12 +485,12 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
-static inline notrace int ftrace_get_offsets_##call( \
- struct ftrace_data_offsets_##call *__data_offsets, proto) \
+static inline notrace int trace_event_get_offsets_##call( \
+ struct trace_event_data_offsets_##call *__data_offsets, proto) \
{ \
int __data_size = 0; \
int __maybe_unused __item_length; \
- struct ftrace_raw_##call __maybe_unused *entry; \
+ struct trace_event_raw_##call __maybe_unused *entry; \
\
tstruct; \
\
@@ -468,351 +506,3 @@ static inline notrace int ftrace_get_offsets_##call( \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-/*
- * Stage 4 of the trace events.
- *
- * Override the macros in <trace/trace_events.h> to include the following:
- *
- * For those macros defined with TRACE_EVENT:
- *
- * static struct ftrace_event_call event_<call>;
- *
- * static void ftrace_raw_event_<call>(void *__data, proto)
- * {
- * struct ftrace_event_file *ftrace_file = __data;
- * struct ftrace_event_call *event_call = ftrace_file->event_call;
- * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
- * unsigned long eflags = ftrace_file->flags;
- * enum event_trigger_type __tt = ETT_NONE;
- * struct ring_buffer_event *event;
- * struct ftrace_raw_<call> *entry; <-- defined in stage 1
- * struct ring_buffer *buffer;
- * unsigned long irq_flags;
- * int __data_size;
- * int pc;
- *
- * if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) {
- * if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE)
- * event_triggers_call(ftrace_file, NULL);
- * if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED)
- * return;
- * }
- *
- * local_save_flags(irq_flags);
- * pc = preempt_count();
- *
- * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
- *
- * event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
- * event_<call>->event.type,
- * sizeof(*entry) + __data_size,
- * irq_flags, pc);
- * if (!event)
- * return;
- * entry = ring_buffer_event_data(event);
- *
- * { <assign>; } <-- Here we assign the entries by the __field and
- * __array macros.
- *
- * if (eflags & FTRACE_EVENT_FL_TRIGGER_COND)
- * __tt = event_triggers_call(ftrace_file, entry);
- *
- * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
- * &ftrace_file->flags))
- * ring_buffer_discard_commit(buffer, event);
- * else if (!filter_check_discard(ftrace_file, entry, buffer, event))
- * trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
- *
- * if (__tt)
- * event_triggers_post_call(ftrace_file, __tt);
- * }
- *
- * static struct trace_event ftrace_event_type_<call> = {
- * .trace = ftrace_raw_output_<call>, <-- stage 2
- * };
- *
- * static const char print_fmt_<call>[] = <TP_printk>;
- *
- * static struct ftrace_event_class __used event_class_<template> = {
- * .system = "<system>",
- * .define_fields = ftrace_define_fields_<call>,
- * .fields = LIST_HEAD_INIT(event_class_##call.fields),
- * .raw_init = trace_event_raw_init,
- * .probe = ftrace_raw_event_##call,
- * .reg = ftrace_event_reg,
- * };
- *
- * static struct ftrace_event_call event_<call> = {
- * .class = event_class_<template>,
- * {
- * .tp = &__tracepoint_<call>,
- * },
- * .event = &ftrace_event_type_<call>,
- * .print_fmt = print_fmt_<call>,
- * .flags = TRACE_EVENT_FL_TRACEPOINT,
- * };
- * // its only safe to use pointers when doing linker tricks to
- * // create an array.
- * static struct ftrace_event_call __used
- * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
- *
- */
-
-#ifdef CONFIG_PERF_EVENTS
-
-#define _TRACE_PERF_PROTO(call, proto) \
- static notrace void \
- perf_trace_##call(void *__data, proto);
-
-#define _TRACE_PERF_INIT(call) \
- .perf_probe = perf_trace_##call,
-
-#else
-#define _TRACE_PERF_PROTO(call, proto)
-#define _TRACE_PERF_INIT(call)
-#endif /* CONFIG_PERF_EVENTS */
-
-#undef __entry
-#define __entry entry
-
-#undef __field
-#define __field(type, item)
-
-#undef __field_struct
-#define __field_struct(type, item)
-
-#undef __array
-#define __array(type, item, len)
-
-#undef __dynamic_array
-#define __dynamic_array(type, item, len) \
- __entry->__data_loc_##item = __data_offsets.item;
-
-#undef __string
-#define __string(item, src) __dynamic_array(char, item, -1)
-
-#undef __assign_str
-#define __assign_str(dst, src) \
- strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
-
-#undef __bitmask
-#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
-
-#undef __get_bitmask
-#define __get_bitmask(field) (char *)__get_dynamic_array(field)
-
-#undef __assign_bitmask
-#define __assign_bitmask(dst, src, nr_bits) \
- memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
-
-#undef TP_fast_assign
-#define TP_fast_assign(args...) args
-
-#undef __perf_addr
-#define __perf_addr(a) (a)
-
-#undef __perf_count
-#define __perf_count(c) (c)
-
-#undef __perf_task
-#define __perf_task(t) (t)
-
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
- \
-static notrace void \
-ftrace_raw_event_##call(void *__data, proto) \
-{ \
- struct ftrace_event_file *ftrace_file = __data; \
- struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
- struct ftrace_event_buffer fbuffer; \
- struct ftrace_raw_##call *entry; \
- int __data_size; \
- \
- if (ftrace_trigger_soft_disabled(ftrace_file)) \
- return; \
- \
- __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
- \
- entry = ftrace_event_buffer_reserve(&fbuffer, ftrace_file, \
- sizeof(*entry) + __data_size); \
- \
- if (!entry) \
- return; \
- \
- tstruct \
- \
- { assign; } \
- \
- ftrace_event_buffer_commit(&fbuffer); \
-}
-/*
- * The ftrace_test_probe is compiled out, it is only here as a build time check
- * to make sure that if the tracepoint handling changes, the ftrace probe will
- * fail to compile unless it too is updated.
- */
-
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, call, proto, args) \
-static inline void ftrace_test_probe_##call(void) \
-{ \
- check_trace_callback_type_##call(ftrace_raw_event_##template); \
-}
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
-
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-
-#undef __entry
-#define __entry REC
-
-#undef __print_flags
-#undef __print_symbolic
-#undef __print_hex
-#undef __get_dynamic_array
-#undef __get_dynamic_array_len
-#undef __get_str
-#undef __get_bitmask
-
-#undef TP_printk
-#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
-
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
-_TRACE_PERF_PROTO(call, PARAMS(proto)); \
-static const char print_fmt_##call[] = print; \
-static struct ftrace_event_class __used __refdata event_class_##call = { \
- .system = __stringify(TRACE_SYSTEM), \
- .define_fields = ftrace_define_fields_##call, \
- .fields = LIST_HEAD_INIT(event_class_##call.fields),\
- .raw_init = trace_event_raw_init, \
- .probe = ftrace_raw_event_##call, \
- .reg = ftrace_event_reg, \
- _TRACE_PERF_INIT(call) \
-};
-
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, call, proto, args) \
- \
-static struct ftrace_event_call __used event_##call = { \
- .class = &event_class_##template, \
- { \
- .tp = &__tracepoint_##call, \
- }, \
- .event.funcs = &ftrace_event_type_funcs_##template, \
- .print_fmt = print_fmt_##template, \
- .flags = TRACE_EVENT_FL_TRACEPOINT, \
-}; \
-static struct ftrace_event_call __used \
-__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
- \
-static const char print_fmt_##call[] = print; \
- \
-static struct ftrace_event_call __used event_##call = { \
- .class = &event_class_##template, \
- { \
- .tp = &__tracepoint_##call, \
- }, \
- .event.funcs = &ftrace_event_type_funcs_##call, \
- .print_fmt = print_fmt_##call, \
- .flags = TRACE_EVENT_FL_TRACEPOINT, \
-}; \
-static struct ftrace_event_call __used \
-__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
-
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-
-
-#ifdef CONFIG_PERF_EVENTS
-
-#undef __entry
-#define __entry entry
-
-#undef __get_dynamic_array
-#define __get_dynamic_array(field) \
- ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
-
-#undef __get_dynamic_array_len
-#define __get_dynamic_array_len(field) \
- ((__entry->__data_loc_##field >> 16) & 0xffff)
-
-#undef __get_str
-#define __get_str(field) (char *)__get_dynamic_array(field)
-
-#undef __get_bitmask
-#define __get_bitmask(field) (char *)__get_dynamic_array(field)
-
-#undef __perf_addr
-#define __perf_addr(a) (__addr = (a))
-
-#undef __perf_count
-#define __perf_count(c) (__count = (c))
-
-#undef __perf_task
-#define __perf_task(t) (__task = (t))
-
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
-static notrace void \
-perf_trace_##call(void *__data, proto) \
-{ \
- struct ftrace_event_call *event_call = __data; \
- struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
- struct ftrace_raw_##call *entry; \
- struct pt_regs __regs; \
- u64 __addr = 0, __count = 1; \
- struct task_struct *__task = NULL; \
- struct hlist_head *head; \
- int __entry_size; \
- int __data_size; \
- int rctx; \
- \
- __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
- \
- head = this_cpu_ptr(event_call->perf_events); \
- if (__builtin_constant_p(!__task) && !__task && \
- hlist_empty(head)) \
- return; \
- \
- __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
- sizeof(u64)); \
- __entry_size -= sizeof(u32); \
- \
- perf_fetch_caller_regs(&__regs); \
- entry = perf_trace_buf_prepare(__entry_size, \
- event_call->event.type, &__regs, &rctx); \
- if (!entry) \
- return; \
- \
- tstruct \
- \
- { assign; } \
- \
- perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
- __count, &__regs, head, __task); \
-}
-
-/*
- * This part is compiled out, it is only here as a build time check
- * to make sure that if the tracepoint handling changes, the
- * perf probe will fail to compile unless it too is updated.
- */
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, call, proto, args) \
-static inline void perf_test_probe_##call(void) \
-{ \
- check_trace_callback_type_##call(perf_trace_##template); \
-}
-
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
-
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-#endif /* CONFIG_PERF_EVENTS */
-
diff --git a/include/uapi/asm-generic/errno.h b/include/uapi/asm-generic/errno.h
index 1e1ea6e..88e0914c 100644
--- a/include/uapi/asm-generic/errno.h
+++ b/include/uapi/asm-generic/errno.h
@@ -6,7 +6,16 @@
#define EDEADLK 35 /* Resource deadlock would occur */
#define ENAMETOOLONG 36 /* File name too long */
#define ENOLCK 37 /* No record locks available */
-#define ENOSYS 38 /* Function not implemented */
+
+/*
+ * This error code is special: arch syscall entry code will return
+ * -ENOSYS if users try to call a syscall that doesn't exist. To keep
+ * failures of syscalls that really do exist distinguishable from
+ * failures due to attempts to use a nonexistent syscall, syscall
+ * implementations should refrain from returning -ENOSYS.
+ */
+#define ENOSYS 38 /* Invalid system call number */
+
#define ENOTEMPTY 39 /* Directory not empty */
#define ELOOP 40 /* Too many symbolic links encountered */
#define EWOULDBLOCK EAGAIN /* Operation would block */
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
new file mode 100644
index 0000000..b6fce90
--- /dev/null
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -0,0 +1,643 @@
+/* amdgpu_drm.h -- Public header for the amdgpu driver -*- linux-c -*-
+ *
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Kevin E. Martin <martin@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef __AMDGPU_DRM_H__
+#define __AMDGPU_DRM_H__
+
+#include <drm/drm.h>
+
+#define DRM_AMDGPU_GEM_CREATE 0x00
+#define DRM_AMDGPU_GEM_MMAP 0x01
+#define DRM_AMDGPU_CTX 0x02
+#define DRM_AMDGPU_BO_LIST 0x03
+#define DRM_AMDGPU_CS 0x04
+#define DRM_AMDGPU_INFO 0x05
+#define DRM_AMDGPU_GEM_METADATA 0x06
+#define DRM_AMDGPU_GEM_WAIT_IDLE 0x07
+#define DRM_AMDGPU_GEM_VA 0x08
+#define DRM_AMDGPU_WAIT_CS 0x09
+#define DRM_AMDGPU_GEM_OP 0x10
+#define DRM_AMDGPU_GEM_USERPTR 0x11
+
+#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
+#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
+#define DRM_IOCTL_AMDGPU_CTX DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CTX, union drm_amdgpu_ctx)
+#define DRM_IOCTL_AMDGPU_BO_LIST DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_BO_LIST, union drm_amdgpu_bo_list)
+#define DRM_IOCTL_AMDGPU_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CS, union drm_amdgpu_cs)
+#define DRM_IOCTL_AMDGPU_INFO DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_INFO, struct drm_amdgpu_info)
+#define DRM_IOCTL_AMDGPU_GEM_METADATA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_METADATA, struct drm_amdgpu_gem_metadata)
+#define DRM_IOCTL_AMDGPU_GEM_WAIT_IDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_WAIT_IDLE, union drm_amdgpu_gem_wait_idle)
+#define DRM_IOCTL_AMDGPU_GEM_VA DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_VA, struct drm_amdgpu_gem_va)
+#define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs)
+#define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
+#define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
+
+#define AMDGPU_GEM_DOMAIN_CPU 0x1
+#define AMDGPU_GEM_DOMAIN_GTT 0x2
+#define AMDGPU_GEM_DOMAIN_VRAM 0x4
+#define AMDGPU_GEM_DOMAIN_GDS 0x8
+#define AMDGPU_GEM_DOMAIN_GWS 0x10
+#define AMDGPU_GEM_DOMAIN_OA 0x20
+
+/* Flag that CPU access will be required for the case of VRAM domain */
+#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
+/* Flag that CPU access will not work, this VRAM domain is invisible */
+#define AMDGPU_GEM_CREATE_NO_CPU_ACCESS (1 << 1)
+/* Flag that USWC attributes should be used for GTT */
+#define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2)
+
+struct drm_amdgpu_gem_create_in {
+ /** the requested memory size */
+ uint64_t bo_size;
+ /** physical start_addr alignment in bytes for some HW requirements */
+ uint64_t alignment;
+ /** the requested memory domains */
+ uint64_t domains;
+ /** allocation flags */
+ uint64_t domain_flags;
+};
+
+struct drm_amdgpu_gem_create_out {
+ /** returned GEM object handle */
+ uint32_t handle;
+ uint32_t _pad;
+};
+
+union drm_amdgpu_gem_create {
+ struct drm_amdgpu_gem_create_in in;
+ struct drm_amdgpu_gem_create_out out;
+};
+
+/** Opcode to create new residency list. */
+#define AMDGPU_BO_LIST_OP_CREATE 0
+/** Opcode to destroy previously created residency list */
+#define AMDGPU_BO_LIST_OP_DESTROY 1
+/** Opcode to update resource information in the list */
+#define AMDGPU_BO_LIST_OP_UPDATE 2
+
+struct drm_amdgpu_bo_list_in {
+ /** Type of operation */
+ uint32_t operation;
+ /** Handle of list or 0 if we want to create one */
+ uint32_t list_handle;
+ /** Number of BOs in list */
+ uint32_t bo_number;
+ /** Size of each element describing BO */
+ uint32_t bo_info_size;
+ /** Pointer to array describing BOs */
+ uint64_t bo_info_ptr;
+};
+
+struct drm_amdgpu_bo_list_entry {
+ /** Handle of BO */
+ uint32_t bo_handle;
+ /** New (if specified) BO priority to be used during migration */
+ uint32_t bo_priority;
+};
+
+struct drm_amdgpu_bo_list_out {
+ /** Handle of resource list */
+ uint32_t list_handle;
+ uint32_t _pad;
+};
+
+union drm_amdgpu_bo_list {
+ struct drm_amdgpu_bo_list_in in;
+ struct drm_amdgpu_bo_list_out out;
+};
+
+/* context related */
+#define AMDGPU_CTX_OP_ALLOC_CTX 1
+#define AMDGPU_CTX_OP_FREE_CTX 2
+#define AMDGPU_CTX_OP_QUERY_STATE 3
+
+/* GPU reset status */
+#define AMDGPU_CTX_NO_RESET 0
+/* this the context caused it */
+#define AMDGPU_CTX_GUILTY_RESET 1
+/* some other context caused it */
+#define AMDGPU_CTX_INNOCENT_RESET 2
+/* unknown cause */
+#define AMDGPU_CTX_UNKNOWN_RESET 3
+
+struct drm_amdgpu_ctx_in {
+ /** AMDGPU_CTX_OP_* */
+ uint32_t op;
+ /** For future use, no flags defined so far */
+ uint32_t flags;
+ uint32_t ctx_id;
+ uint32_t _pad;
+};
+
+union drm_amdgpu_ctx_out {
+ struct {
+ uint32_t ctx_id;
+ uint32_t _pad;
+ } alloc;
+
+ struct {
+ /** For future use, no flags defined so far */
+ uint64_t flags;
+ /** Number of resets caused by this context so far. */
+ uint32_t hangs;
+ /** Reset status since the last call of the ioctl. */
+ uint32_t reset_status;
+ } state;
+};
+
+union drm_amdgpu_ctx {
+ struct drm_amdgpu_ctx_in in;
+ union drm_amdgpu_ctx_out out;
+};
+
+/*
+ * This is not a reliable API and you should expect it to fail for any
+ * number of reasons and have fallback path that do not use userptr to
+ * perform any operation.
+ */
+#define AMDGPU_GEM_USERPTR_READONLY (1 << 0)
+#define AMDGPU_GEM_USERPTR_ANONONLY (1 << 1)
+#define AMDGPU_GEM_USERPTR_VALIDATE (1 << 2)
+#define AMDGPU_GEM_USERPTR_REGISTER (1 << 3)
+
+struct drm_amdgpu_gem_userptr {
+ uint64_t addr;
+ uint64_t size;
+ /* AMDGPU_GEM_USERPTR_* */
+ uint32_t flags;
+ /* Resulting GEM handle */
+ uint32_t handle;
+};
+
+/* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */
+#define AMDGPU_TILING_ARRAY_MODE_SHIFT 0
+#define AMDGPU_TILING_ARRAY_MODE_MASK 0xf
+#define AMDGPU_TILING_PIPE_CONFIG_SHIFT 4
+#define AMDGPU_TILING_PIPE_CONFIG_MASK 0x1f
+#define AMDGPU_TILING_TILE_SPLIT_SHIFT 9
+#define AMDGPU_TILING_TILE_SPLIT_MASK 0x7
+#define AMDGPU_TILING_MICRO_TILE_MODE_SHIFT 12
+#define AMDGPU_TILING_MICRO_TILE_MODE_MASK 0x7
+#define AMDGPU_TILING_BANK_WIDTH_SHIFT 15
+#define AMDGPU_TILING_BANK_WIDTH_MASK 0x3
+#define AMDGPU_TILING_BANK_HEIGHT_SHIFT 17
+#define AMDGPU_TILING_BANK_HEIGHT_MASK 0x3
+#define AMDGPU_TILING_MACRO_TILE_ASPECT_SHIFT 19
+#define AMDGPU_TILING_MACRO_TILE_ASPECT_MASK 0x3
+#define AMDGPU_TILING_NUM_BANKS_SHIFT 21
+#define AMDGPU_TILING_NUM_BANKS_MASK 0x3
+
+#define AMDGPU_TILING_SET(field, value) \
+ (((value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT)
+#define AMDGPU_TILING_GET(value, field) \
+ (((value) >> AMDGPU_TILING_##field##_SHIFT) & AMDGPU_TILING_##field##_MASK)
+
+#define AMDGPU_GEM_METADATA_OP_SET_METADATA 1
+#define AMDGPU_GEM_METADATA_OP_GET_METADATA 2
+
+/** The same structure is shared for input/output */
+struct drm_amdgpu_gem_metadata {
+ /** GEM Object handle */
+ uint32_t handle;
+ /** Do we want get or set metadata */
+ uint32_t op;
+ struct {
+ /** For future use, no flags defined so far */
+ uint64_t flags;
+ /** family specific tiling info */
+ uint64_t tiling_info;
+ uint32_t data_size_bytes;
+ uint32_t data[64];
+ } data;
+};
+
+struct drm_amdgpu_gem_mmap_in {
+ /** the GEM object handle */
+ uint32_t handle;
+ uint32_t _pad;
+};
+
+struct drm_amdgpu_gem_mmap_out {
+ /** mmap offset from the vma offset manager */
+ uint64_t addr_ptr;
+};
+
+union drm_amdgpu_gem_mmap {
+ struct drm_amdgpu_gem_mmap_in in;
+ struct drm_amdgpu_gem_mmap_out out;
+};
+
+struct drm_amdgpu_gem_wait_idle_in {
+ /** GEM object handle */
+ uint32_t handle;
+ /** For future use, no flags defined so far */
+ uint32_t flags;
+ /** Absolute timeout to wait */
+ uint64_t timeout;
+};
+
+struct drm_amdgpu_gem_wait_idle_out {
+ /** BO status: 0 - BO is idle, 1 - BO is busy */
+ uint32_t status;
+ /** Returned current memory domain */
+ uint32_t domain;
+};
+
+union drm_amdgpu_gem_wait_idle {
+ struct drm_amdgpu_gem_wait_idle_in in;
+ struct drm_amdgpu_gem_wait_idle_out out;
+};
+
+struct drm_amdgpu_wait_cs_in {
+ /** Command submission handle */
+ uint64_t handle;
+ /** Absolute timeout to wait */
+ uint64_t timeout;
+ uint32_t ip_type;
+ uint32_t ip_instance;
+ uint32_t ring;
+ uint32_t ctx_id;
+};
+
+struct drm_amdgpu_wait_cs_out {
+ /** CS status: 0 - CS completed, 1 - CS still busy */
+ uint64_t status;
+};
+
+union drm_amdgpu_wait_cs {
+ struct drm_amdgpu_wait_cs_in in;
+ struct drm_amdgpu_wait_cs_out out;
+};
+
+#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0
+#define AMDGPU_GEM_OP_SET_PLACEMENT 1
+
+/* Sets or returns a value associated with a buffer. */
+struct drm_amdgpu_gem_op {
+ /** GEM object handle */
+ uint32_t handle;
+ /** AMDGPU_GEM_OP_* */
+ uint32_t op;
+ /** Input or return value */
+ uint64_t value;
+};
+
+#define AMDGPU_VA_OP_MAP 1
+#define AMDGPU_VA_OP_UNMAP 2
+
+/* Delay the page table update till the next CS */
+#define AMDGPU_VM_DELAY_UPDATE (1 << 0)
+
+/* Mapping flags */
+/* readable mapping */
+#define AMDGPU_VM_PAGE_READABLE (1 << 1)
+/* writable mapping */
+#define AMDGPU_VM_PAGE_WRITEABLE (1 << 2)
+/* executable mapping, new for VI */
+#define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3)
+
+struct drm_amdgpu_gem_va {
+ /** GEM object handle */
+ uint32_t handle;
+ uint32_t _pad;
+ /** AMDGPU_VA_OP_* */
+ uint32_t operation;
+ /** AMDGPU_VM_PAGE_* */
+ uint32_t flags;
+ /** va address to assign . Must be correctly aligned.*/
+ uint64_t va_address;
+ /** Specify offset inside of BO to assign. Must be correctly aligned.*/
+ uint64_t offset_in_bo;
+ /** Specify mapping size. Must be correctly aligned. */
+ uint64_t map_size;
+};
+
+#define AMDGPU_HW_IP_GFX 0
+#define AMDGPU_HW_IP_COMPUTE 1
+#define AMDGPU_HW_IP_DMA 2
+#define AMDGPU_HW_IP_UVD 3
+#define AMDGPU_HW_IP_VCE 4
+#define AMDGPU_HW_IP_NUM 5
+
+#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
+
+#define AMDGPU_CHUNK_ID_IB 0x01
+#define AMDGPU_CHUNK_ID_FENCE 0x02
+#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03
+
+struct drm_amdgpu_cs_chunk {
+ uint32_t chunk_id;
+ uint32_t length_dw;
+ uint64_t chunk_data;
+};
+
+struct drm_amdgpu_cs_in {
+ /** Rendering context id */
+ uint32_t ctx_id;
+ /** Handle of resource list associated with CS */
+ uint32_t bo_list_handle;
+ uint32_t num_chunks;
+ uint32_t _pad;
+ /** this points to uint64_t * which point to cs chunks */
+ uint64_t chunks;
+};
+
+struct drm_amdgpu_cs_out {
+ uint64_t handle;
+};
+
+union drm_amdgpu_cs {
+ struct drm_amdgpu_cs_in in;
+ struct drm_amdgpu_cs_out out;
+};
+
+/* Specify flags to be used for IB */
+
+/* This IB should be submitted to CE */
+#define AMDGPU_IB_FLAG_CE (1<<0)
+
+/* CE Preamble */
+#define AMDGPU_IB_FLAG_PREAMBLE (1<<1)
+
+struct drm_amdgpu_cs_chunk_ib {
+ uint32_t _pad;
+ /** AMDGPU_IB_FLAG_* */
+ uint32_t flags;
+ /** Virtual address to begin IB execution */
+ uint64_t va_start;
+ /** Size of submission */
+ uint32_t ib_bytes;
+ /** HW IP to submit to */
+ uint32_t ip_type;
+ /** HW IP index of the same type to submit to */
+ uint32_t ip_instance;
+ /** Ring index to submit to */
+ uint32_t ring;
+};
+
+struct drm_amdgpu_cs_chunk_dep {
+ uint32_t ip_type;
+ uint32_t ip_instance;
+ uint32_t ring;
+ uint32_t ctx_id;
+ uint64_t handle;
+};
+
+struct drm_amdgpu_cs_chunk_fence {
+ uint32_t handle;
+ uint32_t offset;
+};
+
+struct drm_amdgpu_cs_chunk_data {
+ union {
+ struct drm_amdgpu_cs_chunk_ib ib_data;
+ struct drm_amdgpu_cs_chunk_fence fence_data;
+ };
+};
+
+/**
+ * Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU
+ *
+ */
+#define AMDGPU_IDS_FLAGS_FUSION 0x1
+
+/* indicate if acceleration can be working */
+#define AMDGPU_INFO_ACCEL_WORKING 0x00
+/* get the crtc_id from the mode object id? */
+#define AMDGPU_INFO_CRTC_FROM_ID 0x01
+/* query hw IP info */
+#define AMDGPU_INFO_HW_IP_INFO 0x02
+/* query hw IP instance count for the specified type */
+#define AMDGPU_INFO_HW_IP_COUNT 0x03
+/* timestamp for GL_ARB_timer_query */
+#define AMDGPU_INFO_TIMESTAMP 0x05
+/* Query the firmware version */
+#define AMDGPU_INFO_FW_VERSION 0x0e
+ /* Subquery id: Query VCE firmware version */
+ #define AMDGPU_INFO_FW_VCE 0x1
+ /* Subquery id: Query UVD firmware version */
+ #define AMDGPU_INFO_FW_UVD 0x2
+ /* Subquery id: Query GMC firmware version */
+ #define AMDGPU_INFO_FW_GMC 0x03
+ /* Subquery id: Query GFX ME firmware version */
+ #define AMDGPU_INFO_FW_GFX_ME 0x04
+ /* Subquery id: Query GFX PFP firmware version */
+ #define AMDGPU_INFO_FW_GFX_PFP 0x05
+ /* Subquery id: Query GFX CE firmware version */
+ #define AMDGPU_INFO_FW_GFX_CE 0x06
+ /* Subquery id: Query GFX RLC firmware version */
+ #define AMDGPU_INFO_FW_GFX_RLC 0x07
+ /* Subquery id: Query GFX MEC firmware version */
+ #define AMDGPU_INFO_FW_GFX_MEC 0x08
+ /* Subquery id: Query SMC firmware version */
+ #define AMDGPU_INFO_FW_SMC 0x0a
+ /* Subquery id: Query SDMA firmware version */
+ #define AMDGPU_INFO_FW_SDMA 0x0b
+/* number of bytes moved for TTM migration */
+#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
+/* the used VRAM size */
+#define AMDGPU_INFO_VRAM_USAGE 0x10
+/* the used GTT size */
+#define AMDGPU_INFO_GTT_USAGE 0x11
+/* Information about GDS, etc. resource configuration */
+#define AMDGPU_INFO_GDS_CONFIG 0x13
+/* Query information about VRAM and GTT domains */
+#define AMDGPU_INFO_VRAM_GTT 0x14
+/* Query information about register in MMR address space*/
+#define AMDGPU_INFO_READ_MMR_REG 0x15
+/* Query information about device: rev id, family, etc. */
+#define AMDGPU_INFO_DEV_INFO 0x16
+/* visible vram usage */
+#define AMDGPU_INFO_VIS_VRAM_USAGE 0x17
+
+#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
+#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
+#define AMDGPU_INFO_MMR_SH_INDEX_SHIFT 8
+#define AMDGPU_INFO_MMR_SH_INDEX_MASK 0xff
+
+/* Input structure for the INFO ioctl */
+struct drm_amdgpu_info {
+ /* Where the return value will be stored */
+ uint64_t return_pointer;
+ /* The size of the return value. Just like "size" in "snprintf",
+ * it limits how many bytes the kernel can write. */
+ uint32_t return_size;
+ /* The query request id. */
+ uint32_t query;
+
+ union {
+ struct {
+ uint32_t id;
+ uint32_t _pad;
+ } mode_crtc;
+
+ struct {
+ /** AMDGPU_HW_IP_* */
+ uint32_t type;
+ /**
+ * Index of the IP if there are more IPs of the same
+ * type. Ignored by AMDGPU_INFO_HW_IP_COUNT.
+ */
+ uint32_t ip_instance;
+ } query_hw_ip;
+
+ struct {
+ uint32_t dword_offset;
+ /** number of registers to read */
+ uint32_t count;
+ uint32_t instance;
+ /** For future use, no flags defined so far */
+ uint32_t flags;
+ } read_mmr_reg;
+
+ struct {
+ /** AMDGPU_INFO_FW_* */
+ uint32_t fw_type;
+ /**
+ * Index of the IP if there are more IPs of
+ * the same type.
+ */
+ uint32_t ip_instance;
+ /**
+ * Index of the engine. Whether this is used depends
+ * on the firmware type. (e.g. MEC, SDMA)
+ */
+ uint32_t index;
+ uint32_t _pad;
+ } query_fw;
+ };
+};
+
+struct drm_amdgpu_info_gds {
+ /** GDS GFX partition size */
+ uint32_t gds_gfx_partition_size;
+ /** GDS compute partition size */
+ uint32_t compute_partition_size;
+ /** total GDS memory size */
+ uint32_t gds_total_size;
+ /** GWS size per GFX partition */
+ uint32_t gws_per_gfx_partition;
+ /** GSW size per compute partition */
+ uint32_t gws_per_compute_partition;
+ /** OA size per GFX partition */
+ uint32_t oa_per_gfx_partition;
+ /** OA size per compute partition */
+ uint32_t oa_per_compute_partition;
+ uint32_t _pad;
+};
+
+struct drm_amdgpu_info_vram_gtt {
+ uint64_t vram_size;
+ uint64_t vram_cpu_accessible_size;
+ uint64_t gtt_size;
+};
+
+struct drm_amdgpu_info_firmware {
+ uint32_t ver;
+ uint32_t feature;
+};
+
+#define AMDGPU_VRAM_TYPE_UNKNOWN 0
+#define AMDGPU_VRAM_TYPE_GDDR1 1
+#define AMDGPU_VRAM_TYPE_DDR2 2
+#define AMDGPU_VRAM_TYPE_GDDR3 3
+#define AMDGPU_VRAM_TYPE_GDDR4 4
+#define AMDGPU_VRAM_TYPE_GDDR5 5
+#define AMDGPU_VRAM_TYPE_HBM 6
+#define AMDGPU_VRAM_TYPE_DDR3 7
+
+struct drm_amdgpu_info_device {
+ /** PCI Device ID */
+ uint32_t device_id;
+ /** Internal chip revision: A0, A1, etc.) */
+ uint32_t chip_rev;
+ uint32_t external_rev;
+ /** Revision id in PCI Config space */
+ uint32_t pci_rev;
+ uint32_t family;
+ uint32_t num_shader_engines;
+ uint32_t num_shader_arrays_per_engine;
+ /* in KHz */
+ uint32_t gpu_counter_freq;
+ uint64_t max_engine_clock;
+ uint64_t max_memory_clock;
+ /* cu information */
+ uint32_t cu_active_number;
+ uint32_t cu_ao_mask;
+ uint32_t cu_bitmap[4][4];
+ /** Render backend pipe mask. One render backend is CB+DB. */
+ uint32_t enabled_rb_pipes_mask;
+ uint32_t num_rb_pipes;
+ uint32_t num_hw_gfx_contexts;
+ uint32_t _pad;
+ uint64_t ids_flags;
+ /** Starting virtual address for UMDs. */
+ uint64_t virtual_address_offset;
+ /** The maximum virtual address */
+ uint64_t virtual_address_max;
+ /** Required alignment of virtual addresses. */
+ uint32_t virtual_address_alignment;
+ /** Page table entry - fragment size */
+ uint32_t pte_fragment_size;
+ uint32_t gart_page_size;
+ /** constant engine ram size*/
+ uint32_t ce_ram_size;
+ /** video memory type info*/
+ uint32_t vram_type;
+ /** video memory bit width*/
+ uint32_t vram_bit_width;
+};
+
+struct drm_amdgpu_info_hw_ip {
+ /** Version of h/w IP */
+ uint32_t hw_ip_version_major;
+ uint32_t hw_ip_version_minor;
+ /** Capabilities */
+ uint64_t capabilities_flags;
+ /** command buffer address start alignment*/
+ uint32_t ib_start_alignment;
+ /** command buffer size alignment*/
+ uint32_t ib_size_alignment;
+ /** Bitmask of available rings. Bit 0 means ring 0, etc. */
+ uint32_t available_rings;
+ uint32_t _pad;
+};
+
+/*
+ * Supported GPU families
+ */
+#define AMDGPU_FAMILY_UNKNOWN 0
+#define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */
+#define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */
+#define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */
+#define AMDGPU_FAMILY_CZ 135 /* Carrizo */
+
+#endif
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index b0b8556..3801584 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -630,6 +630,7 @@ struct drm_gem_open {
*/
#define DRM_CAP_CURSOR_WIDTH 0x8
#define DRM_CAP_CURSOR_HEIGHT 0x9
+#define DRM_CAP_ADDFB2_MODIFIERS 0x10
/** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
@@ -654,6 +655,13 @@ struct drm_get_cap {
*/
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
+/**
+ * DRM_CLIENT_CAP_ATOMIC
+ *
+ * If set to 1, the DRM core will expose atomic properties to userspace
+ */
+#define DRM_CLIENT_CAP_ATOMIC 3
+
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
@@ -777,6 +785,9 @@ struct drm_prime_handle {
#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
#define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2)
+#define DRM_IOCTL_MODE_ATOMIC DRM_IOWR(0xBC, struct drm_mode_atomic)
+#define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob)
+#define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob)
/**
* Device specific ioctls should only be in their respective headers
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 646ae5f..2f295cd 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -109,9 +109,6 @@
#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
-/* special NV12 tiled format */
-#define DRM_FORMAT_NV12MT fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */
-
/*
* 3 plane YCbCr
* index 0: Y plane, [7:0] Y
@@ -132,4 +129,97 @@
#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
+
+/*
+ * Format Modifiers:
+ *
+ * Format modifiers describe, typically, a re-ordering or modification
+ * of the data in a plane of an FB. This can be used to express tiled/
+ * swizzled formats, or compression, or a combination of the two.
+ *
+ * The upper 8 bits of the format modifier are a vendor-id as assigned
+ * below. The lower 56 bits are assigned as vendor sees fit.
+ */
+
+/* Vendor Ids: */
+#define DRM_FORMAT_MOD_NONE 0
+#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
+#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
+#define DRM_FORMAT_MOD_VENDOR_NV 0x03
+#define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04
+#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05
+/* add more to the end as needed */
+
+#define fourcc_mod_code(vendor, val) \
+ ((((u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL))
+
+/*
+ * Format Modifier tokens:
+ *
+ * When adding a new token please document the layout with a code comment,
+ * similar to the fourcc codes above. drm_fourcc.h is considered the
+ * authoritative source for all of these.
+ */
+
+/* Intel framebuffer modifiers */
+
+/*
+ * Intel X-tiling layout
+ *
+ * This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
+ * in row-major layout. Within the tile bytes are laid out row-major, with
+ * a platform-dependent stride. On top of that the memory can apply
+ * platform-depending swizzling of some higher address bits into bit6.
+ *
+ * This format is highly platforms specific and not useful for cross-driver
+ * sharing. It exists since on a given platform it does uniquely identify the
+ * layout in a simple way for i915-specific userspace.
+ */
+#define I915_FORMAT_MOD_X_TILED fourcc_mod_code(INTEL, 1)
+
+/*
+ * Intel Y-tiling layout
+ *
+ * This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
+ * in row-major layout. Within the tile bytes are laid out in OWORD (16 bytes)
+ * chunks column-major, with a platform-dependent height. On top of that the
+ * memory can apply platform-depending swizzling of some higher address bits
+ * into bit6.
+ *
+ * This format is highly platforms specific and not useful for cross-driver
+ * sharing. It exists since on a given platform it does uniquely identify the
+ * layout in a simple way for i915-specific userspace.
+ */
+#define I915_FORMAT_MOD_Y_TILED fourcc_mod_code(INTEL, 2)
+
+/*
+ * Intel Yf-tiling layout
+ *
+ * This is a tiled layout using 4Kb tiles in row-major layout.
+ * Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
+ * are arranged in four groups (two wide, two high) with column-major layout.
+ * Each group therefore consits out of four 256 byte units, which are also laid
+ * out as 2x2 column-major.
+ * 256 byte units are made out of four 64 byte blocks of pixels, producing
+ * either a square block or a 2:1 unit.
+ * 64 byte blocks of pixels contain four pixel rows of 16 bytes, where the width
+ * in pixel depends on the pixel depth.
+ */
+#define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3)
+
+/*
+ * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
+ *
+ * Macroblocks are laid in a Z-shape, and each pixel data is following the
+ * standard NV12 style.
+ * As for NV12, an image is the result of two frame buffers: one for Y,
+ * one for the interleaved Cb/Cr components (1/2 the height of the Y buffer).
+ * Alignment requirements are (for each buffer):
+ * - multiple of 128 pixels for the width
+ * - multiple of 32 pixels for the height
+ *
+ * For more information: see http://linuxtv.org/downloads/v4l-dvb-apis/re32.html
+ */
+#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1)
+
#endif /* DRM_FOURCC_H */
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 86574b0..359107a 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -272,6 +272,13 @@ struct drm_mode_get_connector {
#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1)
#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2)
+/* the PROP_ATOMIC flag is used to hide properties from userspace that
+ * is not aware of atomic properties. This is mostly to work around
+ * older userspace (DDX drivers) that read/write each prop they find,
+ * witout being aware that this could be triggering a lengthy modeset.
+ */
+#define DRM_MODE_PROP_ATOMIC 0x80000000
+
struct drm_mode_property_enum {
__u64 value;
char name[DRM_PROP_NAME_LEN];
@@ -329,6 +336,7 @@ struct drm_mode_fb_cmd {
};
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
+#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
struct drm_mode_fb_cmd2 {
__u32 fb_id;
@@ -338,7 +346,7 @@ struct drm_mode_fb_cmd2 {
/*
* In case of planar formats, this ioctl allows up to 4
- * buffer objects with offets and pitches per plane.
+ * buffer objects with offsets and pitches per plane.
* The pitch and offset order is dictated by the fourcc,
* e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
*
@@ -346,13 +354,21 @@ struct drm_mode_fb_cmd2 {
* followed by an interleaved U/V plane containing
* 8 bit 2x2 subsampled colour difference samples.
*
- * So it would consist of Y as offset[0] and UV as
- * offeset[1]. Note that offset[0] will generally
- * be 0.
+ * So it would consist of Y as offsets[0] and UV as
+ * offsets[1]. Note that offsets[0] will generally
+ * be 0 (but this is not required).
+ *
+ * To accommodate tiled, compressed, etc formats, a per-plane
+ * modifier can be specified. The default value of zero
+ * indicates "native" format as specified by the fourcc.
+ * Vendor specific modifier token. This allows, for example,
+ * different tiling/swizzling pattern on different planes.
+ * See discussion above of DRM_FORMAT_MOD_xxx.
*/
__u32 handles[4];
__u32 pitches[4]; /* pitch for each plane */
__u32 offsets[4]; /* offset of each plane */
+ __u64 modifier[4]; /* ie, tiling, compressed (per plane) */
};
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
@@ -519,4 +535,47 @@ struct drm_mode_destroy_dumb {
uint32_t handle;
};
+/* page-flip flags are valid, plus: */
+#define DRM_MODE_ATOMIC_TEST_ONLY 0x0100
+#define DRM_MODE_ATOMIC_NONBLOCK 0x0200
+#define DRM_MODE_ATOMIC_ALLOW_MODESET 0x0400
+
+#define DRM_MODE_ATOMIC_FLAGS (\
+ DRM_MODE_PAGE_FLIP_EVENT |\
+ DRM_MODE_PAGE_FLIP_ASYNC |\
+ DRM_MODE_ATOMIC_TEST_ONLY |\
+ DRM_MODE_ATOMIC_NONBLOCK |\
+ DRM_MODE_ATOMIC_ALLOW_MODESET)
+
+struct drm_mode_atomic {
+ __u32 flags;
+ __u32 count_objs;
+ __u64 objs_ptr;
+ __u64 count_props_ptr;
+ __u64 props_ptr;
+ __u64 prop_values_ptr;
+ __u64 reserved;
+ __u64 user_data;
+};
+
+/**
+ * Create a new 'blob' data property, copying length bytes from data pointer,
+ * and returning new blob ID.
+ */
+struct drm_mode_create_blob {
+ /** Pointer to data to copy. */
+ __u64 data;
+ /** Length of data to copy. */
+ __u32 length;
+ /** Return: new property ID. */
+ __u32 blob_id;
+};
+
+/**
+ * Destroy a user-created blob property.
+ */
+struct drm_mode_destroy_blob {
+ __u32 blob_id;
+};
+
#endif
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 2502622..6e1a2ed 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -171,8 +171,12 @@ typedef struct _drm_i915_sarea {
#define I915_BOX_TEXTURE_LOAD 0x8
#define I915_BOX_LOST_CONTEXT 0x10
-/* I915 specific ioctls
- * The device specific ioctl range is 0x40 to 0x79.
+/*
+ * i915 specific ioctls.
+ *
+ * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
+ * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
+ * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
*/
#define DRM_I915_INIT 0x00
#define DRM_I915_FLUSH 0x01
@@ -224,6 +228,8 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_REG_READ 0x31
#define DRM_I915_GET_RESET_STATS 0x32
#define DRM_I915_GEM_USERPTR 0x33
+#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
+#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -268,13 +274,15 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
-#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
+#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
+#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -341,6 +349,11 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_WT 27
#define I915_PARAM_CMD_PARSER_VERSION 28
#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
+#define I915_PARAM_MMAP_VERSION 30
+#define I915_PARAM_HAS_BSD2 31
+#define I915_PARAM_REVISION 32
+#define I915_PARAM_SUBSLICE_TOTAL 33
+#define I915_PARAM_EU_TOTAL 34
typedef struct drm_i915_getparam {
int param;
@@ -488,6 +501,14 @@ struct drm_i915_gem_mmap {
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 addr_ptr;
+
+ /**
+ * Flags for extended behaviour.
+ *
+ * Added in version 2.
+ */
+ __u64 flags;
+#define I915_MMAP_WC 0x1
};
struct drm_i915_gem_mmap_gtt {
@@ -737,7 +758,13 @@ struct drm_i915_gem_execbuffer2 {
*/
#define I915_EXEC_HANDLE_LUT (1<<12)
-#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_HANDLE_LUT<<1)
+/** Used for switching BSD rings on the platforms with two BSD rings */
+#define I915_EXEC_BSD_MASK (3<<13)
+#define I915_EXEC_BSD_DEFAULT (0<<13) /* default ping-pong mode */
+#define I915_EXEC_BSD_RING1 (1<<13)
+#define I915_EXEC_BSD_RING2 (2<<13)
+
+#define __I915_EXEC_UNKNOWN_FLAGS -(1<<15)
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -973,6 +1000,7 @@ struct drm_intel_overlay_put_image {
/* flags */
#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
+#define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2)
struct drm_intel_overlay_attrs {
__u32 flags;
__u32 color_key;
@@ -1073,4 +1101,12 @@ struct drm_i915_gem_userptr {
__u32 handle;
};
+struct drm_i915_gem_context_param {
+ __u32 ctx_id;
+ __u32 size;
+ __u64 param;
+#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
+ __u64 value;
+};
+
#endif /* _UAPI_I915_DRM_H_ */
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 0664c31..75a232b 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -23,7 +23,7 @@
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints:
- * 1) Do not use pointers, use uint64_t instead for 32 bit / 64 bit
+ * 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
* user/kernel compatibility
* 2) Keep fields aligned to their size
* 3) Because of how drm_ioctl() works, we can add new fields at
@@ -44,8 +44,8 @@
* same as 'struct timespec' but 32/64b ABI safe.
*/
struct drm_msm_timespec {
- int64_t tv_sec; /* seconds */
- int64_t tv_nsec; /* nanoseconds */
+ __s64 tv_sec; /* seconds */
+ __s64 tv_nsec; /* nanoseconds */
};
#define MSM_PARAM_GPU_ID 0x01
@@ -53,9 +53,9 @@ struct drm_msm_timespec {
#define MSM_PARAM_CHIP_ID 0x03
struct drm_msm_param {
- uint32_t pipe; /* in, MSM_PIPE_x */
- uint32_t param; /* in, MSM_PARAM_x */
- uint64_t value; /* out (get_param) or in (set_param) */
+ __u32 pipe; /* in, MSM_PIPE_x */
+ __u32 param; /* in, MSM_PARAM_x */
+ __u64 value; /* out (get_param) or in (set_param) */
};
/*
@@ -77,15 +77,15 @@ struct drm_msm_param {
MSM_BO_UNCACHED)
struct drm_msm_gem_new {
- uint64_t size; /* in */
- uint32_t flags; /* in, mask of MSM_BO_x */
- uint32_t handle; /* out */
+ __u64 size; /* in */
+ __u32 flags; /* in, mask of MSM_BO_x */
+ __u32 handle; /* out */
};
struct drm_msm_gem_info {
- uint32_t handle; /* in */
- uint32_t pad;
- uint64_t offset; /* out, offset to pass to mmap() */
+ __u32 handle; /* in */
+ __u32 pad;
+ __u64 offset; /* out, offset to pass to mmap() */
};
#define MSM_PREP_READ 0x01
@@ -95,13 +95,13 @@ struct drm_msm_gem_info {
#define MSM_PREP_FLAGS (MSM_PREP_READ | MSM_PREP_WRITE | MSM_PREP_NOSYNC)
struct drm_msm_gem_cpu_prep {
- uint32_t handle; /* in */
- uint32_t op; /* in, mask of MSM_PREP_x */
+ __u32 handle; /* in */
+ __u32 op; /* in, mask of MSM_PREP_x */
struct drm_msm_timespec timeout; /* in */
};
struct drm_msm_gem_cpu_fini {
- uint32_t handle; /* in */
+ __u32 handle; /* in */
};
/*
@@ -120,11 +120,11 @@ struct drm_msm_gem_cpu_fini {
* otherwise EINVAL.
*/
struct drm_msm_gem_submit_reloc {
- uint32_t submit_offset; /* in, offset from submit_bo */
- uint32_t or; /* in, value OR'd with result */
- int32_t shift; /* in, amount of left shift (can be negative) */
- uint32_t reloc_idx; /* in, index of reloc_bo buffer */
- uint64_t reloc_offset; /* in, offset from start of reloc_bo */
+ __u32 submit_offset; /* in, offset from submit_bo */
+ __u32 or; /* in, value OR'd with result */
+ __s32 shift; /* in, amount of left shift (can be negative) */
+ __u32 reloc_idx; /* in, index of reloc_bo buffer */
+ __u64 reloc_offset; /* in, offset from start of reloc_bo */
};
/* submit-types:
@@ -139,13 +139,13 @@ struct drm_msm_gem_submit_reloc {
#define MSM_SUBMIT_CMD_IB_TARGET_BUF 0x0002
#define MSM_SUBMIT_CMD_CTX_RESTORE_BUF 0x0003
struct drm_msm_gem_submit_cmd {
- uint32_t type; /* in, one of MSM_SUBMIT_CMD_x */
- uint32_t submit_idx; /* in, index of submit_bo cmdstream buffer */
- uint32_t submit_offset; /* in, offset into submit_bo */
- uint32_t size; /* in, cmdstream size */
- uint32_t pad;
- uint32_t nr_relocs; /* in, number of submit_reloc's */
- uint64_t __user relocs; /* in, ptr to array of submit_reloc's */
+ __u32 type; /* in, one of MSM_SUBMIT_CMD_x */
+ __u32 submit_idx; /* in, index of submit_bo cmdstream buffer */
+ __u32 submit_offset; /* in, offset into submit_bo */
+ __u32 size; /* in, cmdstream size */
+ __u32 pad;
+ __u32 nr_relocs; /* in, number of submit_reloc's */
+ __u64 __user relocs; /* in, ptr to array of submit_reloc's */
};
/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
@@ -165,9 +165,9 @@ struct drm_msm_gem_submit_cmd {
#define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
struct drm_msm_gem_submit_bo {
- uint32_t flags; /* in, mask of MSM_SUBMIT_BO_x */
- uint32_t handle; /* in, GEM handle */
- uint64_t presumed; /* in/out, presumed buffer address */
+ __u32 flags; /* in, mask of MSM_SUBMIT_BO_x */
+ __u32 handle; /* in, GEM handle */
+ __u64 presumed; /* in/out, presumed buffer address */
};
/* Each cmdstream submit consists of a table of buffers involved, and
@@ -175,12 +175,12 @@ struct drm_msm_gem_submit_bo {
* (context-restore), and IB buffers needed for per tile/bin draw cmds.
*/
struct drm_msm_gem_submit {
- uint32_t pipe; /* in, MSM_PIPE_x */
- uint32_t fence; /* out */
- uint32_t nr_bos; /* in, number of submit_bo's */
- uint32_t nr_cmds; /* in, number of submit_cmd's */
- uint64_t __user bos; /* in, ptr to array of submit_bo's */
- uint64_t __user cmds; /* in, ptr to array of submit_cmd's */
+ __u32 pipe; /* in, MSM_PIPE_x */
+ __u32 fence; /* out */
+ __u32 nr_bos; /* in, number of submit_bo's */
+ __u32 nr_cmds; /* in, number of submit_cmd's */
+ __u64 __user bos; /* in, ptr to array of submit_bo's */
+ __u64 __user cmds; /* in, ptr to array of submit_cmd's */
};
/* The normal way to synchronize with the GPU is just to CPU_PREP on
@@ -191,8 +191,8 @@ struct drm_msm_gem_submit {
* APIs without requiring a dummy bo to synchronize on.
*/
struct drm_msm_wait_fence {
- uint32_t fence; /* in */
- uint32_t pad;
+ __u32 fence; /* in */
+ __u32 pad;
struct drm_msm_timespec timeout; /* in */
};
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index 0d7608d..5507eea 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -39,6 +39,7 @@
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
+#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4)
#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 50d0fb4..1ef7666 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -1034,6 +1034,12 @@ struct drm_radeon_cs {
#define RADEON_INFO_VRAM_USAGE 0x1e
#define RADEON_INFO_GTT_USAGE 0x1f
#define RADEON_INFO_ACTIVE_CU_COUNT 0x20
+#define RADEON_INFO_CURRENT_GPU_TEMP 0x21
+#define RADEON_INFO_CURRENT_GPU_SCLK 0x22
+#define RADEON_INFO_CURRENT_GPU_MCLK 0x23
+#define RADEON_INFO_READ_REG 0x24
+#define RADEON_INFO_VA_UNMAP_WORKING 0x25
+#define RADEON_INFO_GPU_RESET_COUNTER 0x26
struct drm_radeon_info {
uint32_t request;
diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h
index c15d781..5391780 100644
--- a/include/uapi/drm/tegra_drm.h
+++ b/include/uapi/drm/tegra_drm.h
@@ -36,7 +36,8 @@ struct drm_tegra_gem_create {
struct drm_tegra_gem_mmap {
__u32 handle;
- __u32 offset;
+ __u32 pad;
+ __u64 offset;
};
struct drm_tegra_syncpt_read {
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 00b10002..1ff9942 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -6,6 +6,7 @@ header-y += caif/
header-y += dvb/
header-y += hdlc/
header-y += hsi/
+header-y += iio/
header-y += isdn/
header-y += mmc/
header-y += nfsd/
@@ -35,6 +36,7 @@ header-y += adfs_fs.h
header-y += affs_hardblocks.h
header-y += agpgart.h
header-y += aio_abi.h
+header-y += am437x-vpfe.h
header-y += apm_bios.h
header-y += arcfb.h
header-y += atalk.h
@@ -136,6 +138,7 @@ header-y += genetlink.h
header-y += gen_stats.h
header-y += gfs2_ondisk.h
header-y += gigaset_dev.h
+header-y += gsmmux.h
header-y += hdlcdrv.h
header-y += hdlc.h
header-y += hdreg.h
@@ -269,6 +272,7 @@ header-y += ncp_fs.h
header-y += ncp.h
header-y += ncp_mount.h
header-y += ncp_no.h
+header-y += ndctl.h
header-y += neighbour.h
header-y += netconf.h
header-y += netdevice.h
@@ -283,6 +287,7 @@ header-y += net.h
header-y += netlink_diag.h
header-y += netlink.h
header-y += netrom.h
+header-y += net_namespace.h
header-y += net_tstamp.h
header-y += nfc.h
header-y += nfs2.h
@@ -349,6 +354,7 @@ header-y += rtc.h
header-y += rtnetlink.h
header-y += scc.h
header-y += sched.h
+header-y += scif_ioctl.h
header-y += screen_info.h
header-y += sctp.h
header-y += sdla.h
@@ -368,7 +374,6 @@ header-y += snmp.h
header-y += sock_diag.h
header-y += socket.h
header-y += sockios.h
-header-y += som.h
header-y += sonet.h
header-y += sonypi.h
header-y += soundcard.h
@@ -428,7 +433,9 @@ header-y += virtio_balloon.h
header-y += virtio_blk.h
header-y += virtio_config.h
header-y += virtio_console.h
+header-y += virtio_gpu.h
header-y += virtio_ids.h
+header-y += virtio_input.h
header-y += virtio_net.h
header-y += virtio_pci.h
header-y += virtio_ring.h
@@ -445,5 +452,6 @@ header-y += wireless.h
header-y += x25.h
header-y += xattr.h
header-y += xfrm.h
+header-y += xilinx-v4l2-controls.h
header-y += zorro.h
header-y += zorro_ids.h
diff --git a/include/uapi/linux/am437x-vpfe.h b/include/uapi/linux/am437x-vpfe.h
new file mode 100644
index 0000000..d757743
--- /dev/null
+++ b/include/uapi/linux/am437x-vpfe.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2013 - 2014 Texas Instruments, Inc.
+ *
+ * Benoit Parrot <bparrot@ti.com>
+ * Lad, Prabhakar <prabhakar.csengg@gmail.com>
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef AM437X_VPFE_USER_H
+#define AM437X_VPFE_USER_H
+
+#include <linux/videodev2.h>
+
+enum vpfe_ccdc_data_size {
+ VPFE_CCDC_DATA_16BITS = 0,
+ VPFE_CCDC_DATA_15BITS,
+ VPFE_CCDC_DATA_14BITS,
+ VPFE_CCDC_DATA_13BITS,
+ VPFE_CCDC_DATA_12BITS,
+ VPFE_CCDC_DATA_11BITS,
+ VPFE_CCDC_DATA_10BITS,
+ VPFE_CCDC_DATA_8BITS,
+};
+
+/* enum for No of pixel per line to be avg. in Black Clamping*/
+enum vpfe_ccdc_sample_length {
+ VPFE_CCDC_SAMPLE_1PIXELS = 0,
+ VPFE_CCDC_SAMPLE_2PIXELS,
+ VPFE_CCDC_SAMPLE_4PIXELS,
+ VPFE_CCDC_SAMPLE_8PIXELS,
+ VPFE_CCDC_SAMPLE_16PIXELS,
+};
+
+/* enum for No of lines in Black Clamping */
+enum vpfe_ccdc_sample_line {
+ VPFE_CCDC_SAMPLE_1LINES = 0,
+ VPFE_CCDC_SAMPLE_2LINES,
+ VPFE_CCDC_SAMPLE_4LINES,
+ VPFE_CCDC_SAMPLE_8LINES,
+ VPFE_CCDC_SAMPLE_16LINES,
+};
+
+/* enum for Alaw gamma width */
+enum vpfe_ccdc_gamma_width {
+ VPFE_CCDC_GAMMA_BITS_15_6 = 0, /* use bits 15-6 for gamma */
+ VPFE_CCDC_GAMMA_BITS_14_5,
+ VPFE_CCDC_GAMMA_BITS_13_4,
+ VPFE_CCDC_GAMMA_BITS_12_3,
+ VPFE_CCDC_GAMMA_BITS_11_2,
+ VPFE_CCDC_GAMMA_BITS_10_1,
+ VPFE_CCDC_GAMMA_BITS_09_0, /* use bits 9-0 for gamma */
+};
+
+/* structure for ALaw */
+struct vpfe_ccdc_a_law {
+ /* Enable/disable A-Law */
+ unsigned char enable;
+ /* Gamma Width Input */
+ enum vpfe_ccdc_gamma_width gamma_wd;
+};
+
+/* structure for Black Clamping */
+struct vpfe_ccdc_black_clamp {
+ unsigned char enable;
+ /* only if bClampEnable is TRUE */
+ enum vpfe_ccdc_sample_length sample_pixel;
+ /* only if bClampEnable is TRUE */
+ enum vpfe_ccdc_sample_line sample_ln;
+ /* only if bClampEnable is TRUE */
+ unsigned short start_pixel;
+ /* only if bClampEnable is TRUE */
+ unsigned short sgain;
+ /* only if bClampEnable is FALSE */
+ unsigned short dc_sub;
+};
+
+/* structure for Black Level Compensation */
+struct vpfe_ccdc_black_compensation {
+ /* Constant value to subtract from Red component */
+ char r;
+ /* Constant value to subtract from Gr component */
+ char gr;
+ /* Constant value to subtract from Blue component */
+ char b;
+ /* Constant value to subtract from Gb component */
+ char gb;
+};
+
+/* Structure for CCDC configuration parameters for raw capture mode passed
+ * by application
+ */
+struct vpfe_ccdc_config_params_raw {
+ /* data size value from 8 to 16 bits */
+ enum vpfe_ccdc_data_size data_sz;
+ /* Structure for Optional A-Law */
+ struct vpfe_ccdc_a_law alaw;
+ /* Structure for Optical Black Clamp */
+ struct vpfe_ccdc_black_clamp blk_clamp;
+ /* Structure for Black Compensation */
+ struct vpfe_ccdc_black_compensation blk_comp;
+};
+
+/*
+ * Private IOCTL
+ * VIDIOC_AM437X_CCDC_CFG - Set CCDC configuration for raw capture
+ * This is an experimental ioctl that will change in future kernels. So use
+ * this ioctl with care !
+ **/
+#define VIDIOC_AM437X_CCDC_CFG \
+ _IOW('V', BASE_VIDIOC_PRIVATE + 1, void *)
+
+#endif /* AM437X_VPFE_USER_H */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 45da7ec..29ef6f9 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -113,13 +113,19 @@ enum bpf_map_type {
BPF_MAP_TYPE_UNSPEC,
BPF_MAP_TYPE_HASH,
BPF_MAP_TYPE_ARRAY,
+ BPF_MAP_TYPE_PROG_ARRAY,
};
enum bpf_prog_type {
BPF_PROG_TYPE_UNSPEC,
BPF_PROG_TYPE_SOCKET_FILTER,
+ BPF_PROG_TYPE_KPROBE,
+ BPF_PROG_TYPE_SCHED_CLS,
+ BPF_PROG_TYPE_SCHED_ACT,
};
+#define BPF_PSEUDO_MAP_FD 1
+
/* flags for BPF_MAP_UPDATE_ELEM command */
#define BPF_ANY 0 /* create new element or update existing */
#define BPF_NOEXIST 1 /* create new element if it didn't exist */
@@ -151,6 +157,7 @@ union bpf_attr {
__u32 log_level; /* verbosity level of verifier */
__u32 log_size; /* size of user buffer */
__aligned_u64 log_buf; /* user supplied buffer */
+ __u32 kern_version; /* checked when prog_type=kprobe */
};
} __attribute__((aligned(8)));
@@ -162,7 +169,106 @@ enum bpf_func_id {
BPF_FUNC_map_lookup_elem, /* void *map_lookup_elem(&map, &key) */
BPF_FUNC_map_update_elem, /* int map_update_elem(&map, &key, &value, flags) */
BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */
+ BPF_FUNC_probe_read, /* int bpf_probe_read(void *dst, int size, void *src) */
+ BPF_FUNC_ktime_get_ns, /* u64 bpf_ktime_get_ns(void) */
+ BPF_FUNC_trace_printk, /* int bpf_trace_printk(const char *fmt, int fmt_size, ...) */
+ BPF_FUNC_get_prandom_u32, /* u32 prandom_u32(void) */
+ BPF_FUNC_get_smp_processor_id, /* u32 raw_smp_processor_id(void) */
+
+ /**
+ * skb_store_bytes(skb, offset, from, len, flags) - store bytes into packet
+ * @skb: pointer to skb
+ * @offset: offset within packet from skb->mac_header
+ * @from: pointer where to copy bytes from
+ * @len: number of bytes to store into packet
+ * @flags: bit 0 - if true, recompute skb->csum
+ * other bits - reserved
+ * Return: 0 on success
+ */
+ BPF_FUNC_skb_store_bytes,
+
+ /**
+ * l3_csum_replace(skb, offset, from, to, flags) - recompute IP checksum
+ * @skb: pointer to skb
+ * @offset: offset within packet where IP checksum is located
+ * @from: old value of header field
+ * @to: new value of header field
+ * @flags: bits 0-3 - size of header field
+ * other bits - reserved
+ * Return: 0 on success
+ */
+ BPF_FUNC_l3_csum_replace,
+
+ /**
+ * l4_csum_replace(skb, offset, from, to, flags) - recompute TCP/UDP checksum
+ * @skb: pointer to skb
+ * @offset: offset within packet where TCP/UDP checksum is located
+ * @from: old value of header field
+ * @to: new value of header field
+ * @flags: bits 0-3 - size of header field
+ * bit 4 - is pseudo header
+ * other bits - reserved
+ * Return: 0 on success
+ */
+ BPF_FUNC_l4_csum_replace,
+
+ /**
+ * bpf_tail_call(ctx, prog_array_map, index) - jump into another BPF program
+ * @ctx: context pointer passed to next program
+ * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
+ * @index: index inside array that selects specific program to run
+ * Return: 0 on success
+ */
+ BPF_FUNC_tail_call,
+
+ /**
+ * bpf_clone_redirect(skb, ifindex, flags) - redirect to another netdev
+ * @skb: pointer to skb
+ * @ifindex: ifindex of the net device
+ * @flags: bit 0 - if set, redirect to ingress instead of egress
+ * other bits - reserved
+ * Return: 0 on success
+ */
+ BPF_FUNC_clone_redirect,
+
+ /**
+ * u64 bpf_get_current_pid_tgid(void)
+ * Return: current->tgid << 32 | current->pid
+ */
+ BPF_FUNC_get_current_pid_tgid,
+
+ /**
+ * u64 bpf_get_current_uid_gid(void)
+ * Return: current_gid << 32 | current_uid
+ */
+ BPF_FUNC_get_current_uid_gid,
+
+ /**
+ * bpf_get_current_comm(char *buf, int size_of_buf)
+ * stores current->comm into buf
+ * Return: 0 on success
+ */
+ BPF_FUNC_get_current_comm,
__BPF_FUNC_MAX_ID,
};
+/* user accessible mirror of in-kernel sk_buff.
+ * new fields can only be added to the end of this structure
+ */
+struct __sk_buff {
+ __u32 len;
+ __u32 pkt_type;
+ __u32 mark;
+ __u32 queue_mapping;
+ __u32 protocol;
+ __u32 vlan_present;
+ __u32 vlan_tci;
+ __u32 vlan_proto;
+ __u32 priority;
+ __u32 ingress_ifindex;
+ __u32 ifindex;
+ __u32 tc_index;
+ __u32 cb[5];
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 611e1c5..b6dec05 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -495,8 +495,7 @@ struct btrfs_ioctl_send_args {
/* Error codes as returned by the kernel */
enum btrfs_err_code {
- notused,
- BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
+ BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET = 1,
BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index 41892f7..9692cda 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -95,11 +95,17 @@ typedef __u32 can_err_mask_t;
* @can_dlc: frame payload length in byte (0 .. 8) aka data length code
* N.B. the DLC field from ISO 11898-1 Chapter 8.4.2.3 has a 1:1
* mapping of the 'data length code' to the real payload length
+ * @__pad: padding
+ * @__res0: reserved / padding
+ * @__res1: reserved / padding
* @data: CAN frame payload (up to 8 byte)
*/
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* frame payload length in byte (0 .. CAN_MAX_DLEN) */
+ __u8 __pad; /* padding */
+ __u8 __res0; /* reserved / padding */
+ __u8 __res1; /* reserved / padding */
__u8 data[CAN_MAX_DLEN] __attribute__((aligned(8)));
};
diff --git a/include/uapi/linux/can/gw.h b/include/uapi/linux/can/gw.h
index 3e6184c..5079b9d 100644
--- a/include/uapi/linux/can/gw.h
+++ b/include/uapi/linux/can/gw.h
@@ -78,6 +78,7 @@ enum {
CGW_FILTER, /* specify struct can_filter on source CAN device */
CGW_DELETED, /* number of deleted CAN frames (see max_hops param) */
CGW_LIM_HOPS, /* limit the number of hops of this specific rule */
+ CGW_MOD_UID, /* user defined identifier for modification updates */
__CGW_MAX
};
@@ -162,6 +163,10 @@ enum {
* load time of the can-gw module). This value is used to reduce the number of
* possible hops for this gateway rule to a value smaller then max_hops.
*
+ * CGW_MOD_UID (length 4 bytes):
+ * Optional non-zero user defined routing job identifier to alter existing
+ * modification settings at runtime.
+ *
* CGW_CS_XOR (length 4 bytes):
* Set a simple XOR checksum starting with an initial value into
* data[result-idx] using data[start-idx] .. data[end-idx]
diff --git a/include/uapi/linux/can/raw.h b/include/uapi/linux/can/raw.h
index 78ec76f..8735f108 100644
--- a/include/uapi/linux/can/raw.h
+++ b/include/uapi/linux/can/raw.h
@@ -57,6 +57,7 @@ enum {
CAN_RAW_LOOPBACK, /* local loopback (default:on) */
CAN_RAW_RECV_OWN_MSGS, /* receive my own msgs (default:off) */
CAN_RAW_FD_FRAMES, /* allow CAN FD frames (default:off) */
+ CAN_RAW_JOIN_FILTERS, /* all filters must match to trigger */
};
#endif /* !_UAPI_CAN_RAW_H */
diff --git a/include/linux/cryptouser.h b/include/uapi/linux/cryptouser.h
index 4abf2ea..2e67bb6 100644
--- a/include/linux/cryptouser.h
+++ b/include/uapi/linux/cryptouser.h
@@ -25,6 +25,7 @@ enum {
CRYPTO_MSG_DELALG,
CRYPTO_MSG_UPDATEALG,
CRYPTO_MSG_GETALG,
+ CRYPTO_MSG_DELRNG,
__CRYPTO_MSG_MAX
};
#define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1)
@@ -43,6 +44,7 @@ enum crypto_attr_type_t {
CRYPTOCFGA_REPORT_COMPRESS, /* struct crypto_report_comp */
CRYPTOCFGA_REPORT_RNG, /* struct crypto_report_rng */
CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */
+ CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */
__CRYPTOCFGA_MAX
#define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
@@ -101,5 +103,9 @@ struct crypto_report_rng {
unsigned int seedsize;
};
+struct crypto_report_akcipher {
+ char type[CRYPTO_MAX_NAME];
+};
+
#define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
sizeof(struct crypto_report_blkcipher))
diff --git a/include/uapi/linux/dcbnl.h b/include/uapi/linux/dcbnl.h
index e711f20..3ea470f 100644
--- a/include/uapi/linux/dcbnl.h
+++ b/include/uapi/linux/dcbnl.h
@@ -78,6 +78,70 @@ struct ieee_maxrate {
__u64 tc_maxrate[IEEE_8021QAZ_MAX_TCS];
};
+enum dcbnl_cndd_states {
+ DCB_CNDD_RESET = 0,
+ DCB_CNDD_EDGE,
+ DCB_CNDD_INTERIOR,
+ DCB_CNDD_INTERIOR_READY,
+};
+
+/* This structure contains the IEEE 802.1Qau QCN managed object.
+ *
+ *@rpg_enable: enable QCN RP
+ *@rppp_max_rps: maximum number of RPs allowed for this CNPV on this port
+ *@rpg_time_reset: time between rate increases if no CNMs received.
+ * given in u-seconds
+ *@rpg_byte_reset: transmitted data between rate increases if no CNMs received.
+ * given in Bytes
+ *@rpg_threshold: The number of times rpByteStage or rpTimeStage can count
+ * before RP rate control state machine advances states
+ *@rpg_max_rate: the maxinun rate, in Mbits per second,
+ * at which an RP can transmit
+ *@rpg_ai_rate: The rate, in Mbits per second,
+ * used to increase rpTargetRate in the RPR_ACTIVE_INCREASE
+ *@rpg_hai_rate: The rate, in Mbits per second,
+ * used to increase rpTargetRate in the RPR_HYPER_INCREASE state
+ *@rpg_gd: Upon CNM receive, flow rate is limited to (Fb/Gd)*CurrentRate.
+ * rpgGd is given as log2(Gd), where Gd may only be powers of 2
+ *@rpg_min_dec_fac: The minimum factor by which the current transmit rate
+ * can be changed by reception of a CNM.
+ * value is given as percentage (1-100)
+ *@rpg_min_rate: The minimum value, in bits per second, for rate to limit
+ *@cndd_state_machine: The state of the congestion notification domain
+ * defense state machine, as defined by IEEE 802.3Qau
+ * section 32.1.1. In the interior ready state,
+ * the QCN capable hardware may add CN-TAG TLV to the
+ * outgoing traffic, to specifically identify outgoing
+ * flows.
+ */
+
+struct ieee_qcn {
+ __u8 rpg_enable[IEEE_8021QAZ_MAX_TCS];
+ __u32 rppp_max_rps[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_time_reset[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_byte_reset[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_threshold[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_max_rate[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_ai_rate[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_hai_rate[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_gd[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_min_dec_fac[IEEE_8021QAZ_MAX_TCS];
+ __u32 rpg_min_rate[IEEE_8021QAZ_MAX_TCS];
+ __u32 cndd_state_machine[IEEE_8021QAZ_MAX_TCS];
+};
+
+/* This structure contains the IEEE 802.1Qau QCN statistics.
+ *
+ *@rppp_rp_centiseconds: the number of RP-centiseconds accumulated
+ * by RPs at this priority level on this Port
+ *@rppp_created_rps: number of active RPs(flows) that react to CNMs
+ */
+
+struct ieee_qcn_stats {
+ __u64 rppp_rp_centiseconds[IEEE_8021QAZ_MAX_TCS];
+ __u32 rppp_created_rps[IEEE_8021QAZ_MAX_TCS];
+};
+
/* This structure contains the IEEE 802.1Qaz PFC managed object
*
* @pfc_cap: Indicates the number of traffic classes on the local device
@@ -143,8 +207,7 @@ struct cee_pfc {
#define IEEE_8021QAZ_APP_SEL_ANY 4
/* This structure contains the IEEE 802.1Qaz APP managed object. This
- * object is also used for the CEE std as well. There is no difference
- * between the objects.
+ * object is also used for the CEE std as well.
*
* @selector: protocol identifier type
* @protocol: protocol of type indicated
@@ -152,13 +215,18 @@ struct cee_pfc {
* 8-bit 802.1p user priority bitmap for CEE
*
* ----
- * Selector field values
+ * Selector field values for IEEE 802.1Qaz
* 0 Reserved
* 1 Ethertype
* 2 Well known port number over TCP or SCTP
* 3 Well known port number over UDP or DCCP
* 4 Well known port number over TCP, SCTP, UDP, or DCCP
* 5-7 Reserved
+ *
+ * Selector field values for CEE
+ * 0 Ethertype
+ * 1 Well known port number over TCP or UDP
+ * 2-3 Reserved
*/
struct dcb_app {
__u8 selector;
@@ -334,6 +402,8 @@ enum ieee_attrs {
DCB_ATTR_IEEE_PEER_PFC,
DCB_ATTR_IEEE_PEER_APP,
DCB_ATTR_IEEE_MAXRATE,
+ DCB_ATTR_IEEE_QCN,
+ DCB_ATTR_IEEE_QCN_STATS,
__DCB_ATTR_IEEE_MAX
};
#define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1)
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index a570d7b..061aca3 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -267,9 +267,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 29
+#define DM_VERSION_MINOR 32
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2014-10-28)"
+#define DM_VERSION_EXTRA "-ioctl (2015-6-26)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
diff --git a/include/uapi/linux/dvb/dmx.h b/include/uapi/linux/dvb/dmx.h
index b4fb650..427e489 100644
--- a/include/uapi/linux/dvb/dmx.h
+++ b/include/uapi/linux/dvb/dmx.h
@@ -32,7 +32,7 @@
#define DMX_FILTER_SIZE 16
-typedef enum
+enum dmx_output
{
DMX_OUT_DECODER, /* Streaming directly to decoder. */
DMX_OUT_TAP, /* Output going to a memory buffer */
@@ -41,10 +41,11 @@ typedef enum
/* (to be retrieved by reading from the */
/* logical DVR device). */
DMX_OUT_TSDEMUX_TAP /* Like TS_TAP but retrieved from the DMX device */
-} dmx_output_t;
+};
+typedef enum dmx_output dmx_output_t;
-typedef enum
+typedef enum dmx_input
{
DMX_IN_FRONTEND, /* Input from a front-end device. */
DMX_IN_DVR /* Input from the logical DVR device. */
@@ -122,7 +123,7 @@ typedef struct dmx_caps {
int num_decoders;
} dmx_caps_t;
-typedef enum {
+typedef enum dmx_source {
DMX_SOURCE_FRONT0 = 0,
DMX_SOURCE_FRONT1,
DMX_SOURCE_FRONT2,
@@ -139,7 +140,6 @@ struct dmx_stc {
__u64 stc; /* output: stc in 'base'*90 kHz units */
};
-
#define DMX_START _IO('o', 41)
#define DMX_STOP _IO('o', 42)
#define DMX_SET_FILTER _IOW('o', 43, struct dmx_sct_filter_params)
diff --git a/include/uapi/linux/dvb/frontend.h b/include/uapi/linux/dvb/frontend.h
index c56d77c..00a20cd 100644
--- a/include/uapi/linux/dvb/frontend.h
+++ b/include/uapi/linux/dvb/frontend.h
@@ -28,15 +28,14 @@
#include <linux/types.h>
-typedef enum fe_type {
+enum fe_type {
FE_QPSK,
FE_QAM,
FE_OFDM,
FE_ATSC
-} fe_type_t;
-
+};
-typedef enum fe_caps {
+enum fe_caps {
FE_IS_STUPID = 0,
FE_CAN_INVERSION_AUTO = 0x1,
FE_CAN_FEC_1_2 = 0x2,
@@ -68,12 +67,11 @@ typedef enum fe_caps {
FE_NEEDS_BENDING = 0x20000000, /* not supported anymore, don't use (frontend requires frequency bending) */
FE_CAN_RECOVER = 0x40000000, /* frontend can recover from a cable unplug automatically */
FE_CAN_MUTE_TS = 0x80000000 /* frontend can stop spurious TS data output */
-} fe_caps_t;
-
+};
struct dvb_frontend_info {
char name[128];
- fe_type_t type; /* DEPRECATED. Use DTV_ENUM_DELSYS instead */
+ enum fe_type type; /* DEPRECATED. Use DTV_ENUM_DELSYS instead */
__u32 frequency_min;
__u32 frequency_max;
__u32 frequency_stepsize;
@@ -82,7 +80,7 @@ struct dvb_frontend_info {
__u32 symbol_rate_max;
__u32 symbol_rate_tolerance; /* ppm */
__u32 notifier_delay; /* DEPRECATED */
- fe_caps_t caps;
+ enum fe_caps caps;
};
@@ -95,32 +93,27 @@ struct dvb_diseqc_master_cmd {
__u8 msg_len; /* valid values are 3...6 */
};
-
struct dvb_diseqc_slave_reply {
__u8 msg [4]; /* { framing, data [3] } */
__u8 msg_len; /* valid values are 0...4, 0 means no msg */
int timeout; /* return from ioctl after timeout ms with */
}; /* errorcode when no message was received */
-
-typedef enum fe_sec_voltage {
+enum fe_sec_voltage {
SEC_VOLTAGE_13,
SEC_VOLTAGE_18,
SEC_VOLTAGE_OFF
-} fe_sec_voltage_t;
-
+};
-typedef enum fe_sec_tone_mode {
+enum fe_sec_tone_mode {
SEC_TONE_ON,
SEC_TONE_OFF
-} fe_sec_tone_mode_t;
-
+};
-typedef enum fe_sec_mini_cmd {
+enum fe_sec_mini_cmd {
SEC_MINI_A,
SEC_MINI_B
-} fe_sec_mini_cmd_t;
-
+};
/**
* enum fe_status - enumerates the possible frontend status
@@ -133,8 +126,7 @@ typedef enum fe_sec_mini_cmd {
* @FE_REINIT: frontend was reinitialized, application is recommended
* to reset DiSEqC, tone and parameters
*/
-
-typedef enum fe_status {
+enum fe_status {
FE_HAS_SIGNAL = 0x01,
FE_HAS_CARRIER = 0x02,
FE_HAS_VITERBI = 0x04,
@@ -142,16 +134,15 @@ typedef enum fe_status {
FE_HAS_LOCK = 0x10,
FE_TIMEDOUT = 0x20,
FE_REINIT = 0x40,
-} fe_status_t;
+};
-typedef enum fe_spectral_inversion {
+enum fe_spectral_inversion {
INVERSION_OFF,
INVERSION_ON,
INVERSION_AUTO
-} fe_spectral_inversion_t;
-
+};
-typedef enum fe_code_rate {
+enum fe_code_rate {
FEC_NONE = 0,
FEC_1_2,
FEC_2_3,
@@ -165,10 +156,9 @@ typedef enum fe_code_rate {
FEC_3_5,
FEC_9_10,
FEC_2_5,
-} fe_code_rate_t;
-
+};
-typedef enum fe_modulation {
+enum fe_modulation {
QPSK,
QAM_16,
QAM_32,
@@ -183,9 +173,9 @@ typedef enum fe_modulation {
APSK_32,
DQPSK,
QAM_4_NR,
-} fe_modulation_t;
+};
-typedef enum fe_transmit_mode {
+enum fe_transmit_mode {
TRANSMISSION_MODE_2K,
TRANSMISSION_MODE_8K,
TRANSMISSION_MODE_AUTO,
@@ -195,21 +185,9 @@ typedef enum fe_transmit_mode {
TRANSMISSION_MODE_32K,
TRANSMISSION_MODE_C1,
TRANSMISSION_MODE_C3780,
-} fe_transmit_mode_t;
-
-#if defined(__DVB_CORE__) || !defined (__KERNEL__)
-typedef enum fe_bandwidth {
- BANDWIDTH_8_MHZ,
- BANDWIDTH_7_MHZ,
- BANDWIDTH_6_MHZ,
- BANDWIDTH_AUTO,
- BANDWIDTH_5_MHZ,
- BANDWIDTH_10_MHZ,
- BANDWIDTH_1_712_MHZ,
-} fe_bandwidth_t;
-#endif
+};
-typedef enum fe_guard_interval {
+enum fe_guard_interval {
GUARD_INTERVAL_1_32,
GUARD_INTERVAL_1_16,
GUARD_INTERVAL_1_8,
@@ -221,16 +199,15 @@ typedef enum fe_guard_interval {
GUARD_INTERVAL_PN420,
GUARD_INTERVAL_PN595,
GUARD_INTERVAL_PN945,
-} fe_guard_interval_t;
-
+};
-typedef enum fe_hierarchy {
+enum fe_hierarchy {
HIERARCHY_NONE,
HIERARCHY_1,
HIERARCHY_2,
HIERARCHY_4,
HIERARCHY_AUTO
-} fe_hierarchy_t;
+};
enum fe_interleaving {
INTERLEAVING_NONE,
@@ -239,51 +216,6 @@ enum fe_interleaving {
INTERLEAVING_720,
};
-#if defined(__DVB_CORE__) || !defined (__KERNEL__)
-struct dvb_qpsk_parameters {
- __u32 symbol_rate; /* symbol rate in Symbols per second */
- fe_code_rate_t fec_inner; /* forward error correction (see above) */
-};
-
-struct dvb_qam_parameters {
- __u32 symbol_rate; /* symbol rate in Symbols per second */
- fe_code_rate_t fec_inner; /* forward error correction (see above) */
- fe_modulation_t modulation; /* modulation type (see above) */
-};
-
-struct dvb_vsb_parameters {
- fe_modulation_t modulation; /* modulation type (see above) */
-};
-
-struct dvb_ofdm_parameters {
- fe_bandwidth_t bandwidth;
- fe_code_rate_t code_rate_HP; /* high priority stream code rate */
- fe_code_rate_t code_rate_LP; /* low priority stream code rate */
- fe_modulation_t constellation; /* modulation type (see above) */
- fe_transmit_mode_t transmission_mode;
- fe_guard_interval_t guard_interval;
- fe_hierarchy_t hierarchy_information;
-};
-
-
-struct dvb_frontend_parameters {
- __u32 frequency; /* (absolute) frequency in Hz for QAM/OFDM/ATSC */
- /* intermediate frequency in kHz for QPSK */
- fe_spectral_inversion_t inversion;
- union {
- struct dvb_qpsk_parameters qpsk;
- struct dvb_qam_parameters qam;
- struct dvb_ofdm_parameters ofdm;
- struct dvb_vsb_parameters vsb;
- } u;
-};
-
-struct dvb_frontend_event {
- fe_status_t status;
- struct dvb_frontend_parameters parameters;
-};
-#endif
-
/* S2API Commands */
#define DTV_UNDEFINED 0
#define DTV_TUNE 1
@@ -377,20 +309,20 @@ struct dvb_frontend_event {
#define DTV_MAX_COMMAND DTV_STAT_TOTAL_BLOCK_COUNT
-typedef enum fe_pilot {
+enum fe_pilot {
PILOT_ON,
PILOT_OFF,
PILOT_AUTO,
-} fe_pilot_t;
+};
-typedef enum fe_rolloff {
+enum fe_rolloff {
ROLLOFF_35, /* Implied value in DVB-S, default for DVB-S2 */
ROLLOFF_20,
ROLLOFF_25,
ROLLOFF_AUTO,
-} fe_rolloff_t;
+};
-typedef enum fe_delivery_system {
+enum fe_delivery_system {
SYS_UNDEFINED,
SYS_DVBC_ANNEX_A,
SYS_DVBC_ANNEX_B,
@@ -410,7 +342,7 @@ typedef enum fe_delivery_system {
SYS_DVBT2,
SYS_TURBO,
SYS_DVBC_ANNEX_C,
-} fe_delivery_system_t;
+};
/* backward compatibility */
#define SYS_DVBC_ANNEX_AC SYS_DVBC_ANNEX_A
@@ -467,7 +399,7 @@ struct dtv_cmds_h {
* @FE_SCALE_NOT_AVAILABLE: That QoS measure is not available. That
* could indicate a temporary or a permanent
* condition.
- * @FE_SCALE_DECIBEL: The scale is measured in 0.0001 dB steps, typically
+ * @FE_SCALE_DECIBEL: The scale is measured in 0.001 dB steps, typically
* used on signal measures.
* @FE_SCALE_RELATIVE: The scale is a relative percentual measure,
* ranging from 0 (0%) to 0xffff (100%).
@@ -503,20 +435,20 @@ enum fecap_scale_params {
*
* In other words, for ISDB, those values should be filled like:
* u.st.stat.svalue[0] = global statistics;
- * u.st.stat.scale[0] = FE_SCALE_DECIBELS;
+ * u.st.stat.scale[0] = FE_SCALE_DECIBEL;
* u.st.stat.value[1] = layer A statistics;
* u.st.stat.scale[1] = FE_SCALE_NOT_AVAILABLE (if not available);
* u.st.stat.svalue[2] = layer B statistics;
- * u.st.stat.scale[2] = FE_SCALE_DECIBELS;
+ * u.st.stat.scale[2] = FE_SCALE_DECIBEL;
* u.st.stat.svalue[3] = layer C statistics;
- * u.st.stat.scale[3] = FE_SCALE_DECIBELS;
+ * u.st.stat.scale[3] = FE_SCALE_DECIBEL;
* u.st.len = 4;
*/
struct dtv_stats {
__u8 scale; /* enum fecap_scale_params type */
union {
__u64 uvalue; /* for counters and relative scales */
- __s64 svalue; /* for 0.0001 dB measures */
+ __s64 svalue; /* for 0.001 dB measures */
};
} __attribute__ ((packed));
@@ -552,10 +484,88 @@ struct dtv_properties {
struct dtv_property *props;
};
+#if defined(__DVB_CORE__) || !defined (__KERNEL__)
+
+/*
+ * DEPRECATED: The DVBv3 ioctls, structs and enums should not be used on
+ * newer programs, as it doesn't support the second generation of digital
+ * TV standards, nor supports newer delivery systems.
+ */
+
+enum fe_bandwidth {
+ BANDWIDTH_8_MHZ,
+ BANDWIDTH_7_MHZ,
+ BANDWIDTH_6_MHZ,
+ BANDWIDTH_AUTO,
+ BANDWIDTH_5_MHZ,
+ BANDWIDTH_10_MHZ,
+ BANDWIDTH_1_712_MHZ,
+};
+
+/* This is needed for legacy userspace support */
+typedef enum fe_sec_voltage fe_sec_voltage_t;
+typedef enum fe_caps fe_caps_t;
+typedef enum fe_type fe_type_t;
+typedef enum fe_sec_tone_mode fe_sec_tone_mode_t;
+typedef enum fe_sec_mini_cmd fe_sec_mini_cmd_t;
+typedef enum fe_status fe_status_t;
+typedef enum fe_spectral_inversion fe_spectral_inversion_t;
+typedef enum fe_code_rate fe_code_rate_t;
+typedef enum fe_modulation fe_modulation_t;
+typedef enum fe_transmit_mode fe_transmit_mode_t;
+typedef enum fe_bandwidth fe_bandwidth_t;
+typedef enum fe_guard_interval fe_guard_interval_t;
+typedef enum fe_hierarchy fe_hierarchy_t;
+typedef enum fe_pilot fe_pilot_t;
+typedef enum fe_rolloff fe_rolloff_t;
+typedef enum fe_delivery_system fe_delivery_system_t;
+
+struct dvb_qpsk_parameters {
+ __u32 symbol_rate; /* symbol rate in Symbols per second */
+ fe_code_rate_t fec_inner; /* forward error correction (see above) */
+};
+
+struct dvb_qam_parameters {
+ __u32 symbol_rate; /* symbol rate in Symbols per second */
+ fe_code_rate_t fec_inner; /* forward error correction (see above) */
+ fe_modulation_t modulation; /* modulation type (see above) */
+};
+
+struct dvb_vsb_parameters {
+ fe_modulation_t modulation; /* modulation type (see above) */
+};
+
+struct dvb_ofdm_parameters {
+ fe_bandwidth_t bandwidth;
+ fe_code_rate_t code_rate_HP; /* high priority stream code rate */
+ fe_code_rate_t code_rate_LP; /* low priority stream code rate */
+ fe_modulation_t constellation; /* modulation type (see above) */
+ fe_transmit_mode_t transmission_mode;
+ fe_guard_interval_t guard_interval;
+ fe_hierarchy_t hierarchy_information;
+};
+
+struct dvb_frontend_parameters {
+ __u32 frequency; /* (absolute) frequency in Hz for DVB-C/DVB-T/ATSC */
+ /* intermediate frequency in kHz for DVB-S */
+ fe_spectral_inversion_t inversion;
+ union {
+ struct dvb_qpsk_parameters qpsk; /* DVB-S */
+ struct dvb_qam_parameters qam; /* DVB-C */
+ struct dvb_ofdm_parameters ofdm; /* DVB-T */
+ struct dvb_vsb_parameters vsb; /* ATSC */
+ } u;
+};
+
+struct dvb_frontend_event {
+ fe_status_t status;
+ struct dvb_frontend_parameters parameters;
+};
+#endif
+
#define FE_SET_PROPERTY _IOW('o', 82, struct dtv_properties)
#define FE_GET_PROPERTY _IOR('o', 83, struct dtv_properties)
-
/**
* When set, this flag will disable any zigzagging or other "normal" tuning
* behaviour. Additionally, there will be no automatic monitoring of the lock
@@ -565,7 +575,6 @@ struct dtv_properties {
*/
#define FE_TUNE_MODE_ONESHOT 0x01
-
#define FE_GET_INFO _IOR('o', 61, struct dvb_frontend_info)
#define FE_DISEQC_RESET_OVERLOAD _IO('o', 62)
diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h
index ae99f77..b088296 100644
--- a/include/uapi/linux/elf-em.h
+++ b/include/uapi/linux/elf-em.h
@@ -25,6 +25,7 @@
#define EM_ARM 40 /* ARM 32 bit */
#define EM_SH 42 /* SuperH */
#define EM_SPARCV9 43 /* SPARC v9 64-bit */
+#define EM_H8_300 46 /* Renesas H8/300 */
#define EM_IA_64 50 /* HP/Intel IA-64 */
#define EM_X86_64 62 /* AMD x86-64 */
#define EM_S390 22 /* IBM S/390 */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 5f66d9c..cd67aec 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -139,6 +139,7 @@ static inline __u32 ethtool_cmd_speed(const struct ethtool_cmd *ep)
#define ETHTOOL_FWVERS_LEN 32
#define ETHTOOL_BUSINFO_LEN 32
+#define ETHTOOL_EROMVERS_LEN 32
/**
* struct ethtool_drvinfo - general driver and device information
@@ -148,6 +149,7 @@ static inline __u32 ethtool_cmd_speed(const struct ethtool_cmd *ep)
* not be an empty string.
* @version: Driver version string; may be an empty string
* @fw_version: Firmware version string; may be an empty string
+ * @erom_version: Expansion ROM version string; may be an empty string
* @bus_info: Device bus address. This should match the dev_name()
* string for the underlying bus device, if there is one. May be
* an empty string.
@@ -176,7 +178,7 @@ struct ethtool_drvinfo {
char version[32];
char fw_version[ETHTOOL_FWVERS_LEN];
char bus_info[ETHTOOL_BUSINFO_LEN];
- char reserved1[32];
+ char erom_version[ETHTOOL_EROMVERS_LEN];
char reserved2[12];
__u32 n_priv_flags;
__u32 n_stats;
@@ -213,6 +215,11 @@ enum tunable_id {
ETHTOOL_ID_UNSPEC,
ETHTOOL_RX_COPYBREAK,
ETHTOOL_TX_COPYBREAK,
+ /*
+ * Add your fresh new tubale attribute above and remember to update
+ * tunable_strings[] in net/core/ethtool.c
+ */
+ __ETHTOOL_TUNABLE_COUNT,
};
enum tunable_type_id {
@@ -543,6 +550,7 @@ enum ethtool_stringset {
ETH_SS_NTUPLE_FILTERS,
ETH_SS_FEATURES,
ETH_SS_RSS_HASH_FUNCS,
+ ETH_SS_TUNABLES,
};
/**
@@ -794,6 +802,31 @@ struct ethtool_rx_flow_spec {
__u32 location;
};
+/* How rings are layed out when accessing virtual functions or
+ * offloaded queues is device specific. To allow users to do flow
+ * steering and specify these queues the ring cookie is partitioned
+ * into a 32bit queue index with an 8 bit virtual function id.
+ * This also leaves the 3bytes for further specifiers. It is possible
+ * future devices may support more than 256 virtual functions if
+ * devices start supporting PCIe w/ARI. However at the moment I
+ * do not know of any devices that support this so I do not reserve
+ * space for this at this time. If a future patch consumes the next
+ * byte it should be aware of this possiblity.
+ */
+#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFLL
+#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000LL
+#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32
+static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
+{
+ return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
+};
+
+static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
+{
+ return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
+ ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+};
+
/**
* struct ethtool_rxnfc - command to get or set RX flow classification rules
* @cmd: Specific command number - %ETHTOOL_GRXFH, %ETHTOOL_SRXFH,
@@ -1262,15 +1295,19 @@ enum ethtool_sfeatures_retval_bits {
* it was forced up into this mode or autonegotiated.
*/
-/* The forced speed, 10Mb, 100Mb, gigabit, [2.5|10|20|40|56]GbE. */
+/* The forced speed, 10Mb, 100Mb, gigabit, [2.5|5|10|20|25|40|50|56|100]GbE. */
#define SPEED_10 10
#define SPEED_100 100
#define SPEED_1000 1000
#define SPEED_2500 2500
+#define SPEED_5000 5000
#define SPEED_10000 10000
#define SPEED_20000 20000
+#define SPEED_25000 25000
#define SPEED_40000 40000
+#define SPEED_50000 50000
#define SPEED_56000 56000
+#define SPEED_100000 100000
#define SPEED_UNKNOWN -1
diff --git a/include/uapi/linux/falloc.h b/include/uapi/linux/falloc.h
index d1197ae..3e445a7 100644
--- a/include/uapi/linux/falloc.h
+++ b/include/uapi/linux/falloc.h
@@ -41,4 +41,21 @@
*/
#define FALLOC_FL_ZERO_RANGE 0x10
+/*
+ * FALLOC_FL_INSERT_RANGE is use to insert space within the file size without
+ * overwriting any existing data. The contents of the file beyond offset are
+ * shifted towards right by len bytes to create a hole. As such, this
+ * operation will increase the size of the file by len bytes.
+ *
+ * Different filesystems may implement different limitations on the granularity
+ * of the operation. Most will limit operations to filesystem block size
+ * boundaries, but this boundary may be larger or smaller depending on
+ * the filesystem and/or the configuration of the filesystem or file.
+ *
+ * Attempting to insert space using this flag at OR beyond the end of
+ * the file is considered an illegal operation - just use ftruncate(2) or
+ * fallocate(2) with mode 0 for such type of operations.
+ */
+#define FALLOC_FL_INSERT_RANGE 0x20
+
#endif /* _UAPI_FALLOC_H_ */
diff --git a/include/uapi/linux/filter.h b/include/uapi/linux/filter.h
index 47785d5..c97340e 100644
--- a/include/uapi/linux/filter.h
+++ b/include/uapi/linux/filter.h
@@ -77,9 +77,13 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
#define SKF_AD_VLAN_TAG_PRESENT 48
#define SKF_AD_PAY_OFFSET 52
#define SKF_AD_RANDOM 56
-#define SKF_AD_MAX 60
-#define SKF_NET_OFF (-0x100000)
-#define SKF_LL_OFF (-0x200000)
+#define SKF_AD_VLAN_TPID 60
+#define SKF_AD_MAX 64
+#define SKF_NET_OFF (-0x100000)
+#define SKF_LL_OFF (-0x200000)
+
+#define BPF_NET_OFF SKF_NET_OFF
+#define BPF_LL_OFF SKF_LL_OFF
#endif /* _UAPI__LINUX_FILTER_H__ */
diff --git a/include/uapi/linux/fou.h b/include/uapi/linux/fou.h
index 8df0689..d2947c5 100644
--- a/include/uapi/linux/fou.h
+++ b/include/uapi/linux/fou.h
@@ -14,6 +14,7 @@ enum {
FOU_ATTR_AF, /* u8 */
FOU_ATTR_IPPROTO, /* u8 */
FOU_ATTR_TYPE, /* u8 */
+ FOU_ATTR_REMCSUM_NOPARTIAL, /* flag */
__FOU_ATTR_MAX,
};
@@ -24,6 +25,7 @@ enum {
FOU_CMD_UNSPEC,
FOU_CMD_ADD,
FOU_CMD_DEL,
+ FOU_CMD_GET,
__FOU_CMD_MAX,
};
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 3735fa0..9b964a5 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -90,6 +90,7 @@ struct inodes_stat_t {
#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
#define MS_I_VERSION (1<<23) /* Update inode I_version field */
#define MS_STRICTATIME (1<<24) /* Always perform atime updates */
+#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
/* These sb flags are internal to the kernel */
#define MS_NOSEC (1<<28)
@@ -100,7 +101,8 @@ struct inodes_stat_t {
/*
* Superblock flags that can be altered by MS_REMOUNT
*/
-#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION)
+#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\
+ MS_LAZYTIME)
/*
* Old magic mount flag and mask
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 25084a0..c9aca04 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -755,4 +755,7 @@ struct fuse_notify_retrieve_in {
uint64_t dummy4;
};
+/* Device ioctls: */
+#define FUSE_DEV_IOC_CLONE _IOR(229, 0, uint32_t)
+
#endif /* _LINUX_FUSE_H */
diff --git a/include/linux/gsmmux.h b/include/uapi/linux/gsmmux.h
index c25e947..c06742d 100644
--- a/include/linux/gsmmux.h
+++ b/include/uapi/linux/gsmmux.h
@@ -1,6 +1,9 @@
#ifndef _LINUX_GSMMUX_H
#define _LINUX_GSMMUX_H
+#include <linux/if.h>
+#include <linux/ioctl.h>
+
struct gsm_config
{
unsigned int adaption;
diff --git a/include/uapi/linux/hsi/Kbuild b/include/uapi/linux/hsi/Kbuild
index 30ab3cd..a16a005 100644
--- a/include/uapi/linux/hsi/Kbuild
+++ b/include/uapi/linux/hsi/Kbuild
@@ -1,2 +1,2 @@
# UAPI Header export list
-header-y += hsi_char.h
+header-y += hsi_char.h cs-protocol.h
diff --git a/include/uapi/linux/hsi/cs-protocol.h b/include/uapi/linux/hsi/cs-protocol.h
new file mode 100644
index 0000000..f153d6e
--- /dev/null
+++ b/include/uapi/linux/hsi/cs-protocol.h
@@ -0,0 +1,119 @@
+/*
+ * cmt-speech interface definitions
+ *
+ * Copyright (C) 2008,2009,2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Kai Vehmanen <kai.vehmanen@nokia.com>
+ * Original author: Peter Ujfalusi <peter.ujfalusi@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef _CS_PROTOCOL_H
+#define _CS_PROTOCOL_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* chardev parameters */
+#define CS_DEV_FILE_NAME "/dev/cmt_speech"
+
+/* user-space API versioning */
+#define CS_IF_VERSION 2
+
+/* APE kernel <-> user space messages */
+#define CS_CMD_SHIFT 28
+#define CS_DOMAIN_SHIFT 24
+
+#define CS_CMD_MASK 0xff000000
+#define CS_PARAM_MASK 0xffffff
+
+#define CS_CMD(id, dom) \
+ (((id) << CS_CMD_SHIFT) | ((dom) << CS_DOMAIN_SHIFT))
+
+#define CS_ERROR CS_CMD(1, 0)
+#define CS_RX_DATA_RECEIVED CS_CMD(2, 0)
+#define CS_TX_DATA_READY CS_CMD(3, 0)
+#define CS_TX_DATA_SENT CS_CMD(4, 0)
+
+/* params to CS_ERROR indication */
+#define CS_ERR_PEER_RESET 0
+
+/* ioctl interface */
+
+/* parameters to CS_CONFIG_BUFS ioctl */
+#define CS_FEAT_TSTAMP_RX_CTRL (1 << 0)
+#define CS_FEAT_ROLLING_RX_COUNTER (2 << 0)
+
+/* parameters to CS_GET_STATE ioctl */
+#define CS_STATE_CLOSED 0
+#define CS_STATE_OPENED 1 /* resource allocated */
+#define CS_STATE_CONFIGURED 2 /* data path active */
+
+/* maximum number of TX/RX buffers */
+#define CS_MAX_BUFFERS_SHIFT 4
+#define CS_MAX_BUFFERS (1 << CS_MAX_BUFFERS_SHIFT)
+
+/* Parameters for setting up the data buffers */
+struct cs_buffer_config {
+ __u32 rx_bufs; /* number of RX buffer slots */
+ __u32 tx_bufs; /* number of TX buffer slots */
+ __u32 buf_size; /* bytes */
+ __u32 flags; /* see CS_FEAT_* */
+ __u32 reserved[4];
+};
+
+/*
+ * struct for monotonic timestamp taken when the
+ * last control command was received
+ */
+struct cs_timestamp {
+ __u32 tv_sec; /* seconds */
+ __u32 tv_nsec; /* nanoseconds */
+};
+
+/*
+ * Struct describing the layout and contents of the driver mmap area.
+ * This information is meant as read-only information for the application.
+ */
+struct cs_mmap_config_block {
+ __u32 reserved1;
+ __u32 buf_size; /* 0=disabled, otherwise the transfer size */
+ __u32 rx_bufs; /* # of RX buffers */
+ __u32 tx_bufs; /* # of TX buffers */
+ __u32 reserved2;
+ /* array of offsets within the mmap area for each RX and TX buffer */
+ __u32 rx_offsets[CS_MAX_BUFFERS];
+ __u32 tx_offsets[CS_MAX_BUFFERS];
+ __u32 rx_ptr;
+ __u32 rx_ptr_boundary;
+ __u32 reserved3[2];
+ /* enabled with CS_FEAT_TSTAMP_RX_CTRL */
+ struct cs_timestamp tstamp_rx_ctrl;
+};
+
+#define CS_IO_MAGIC 'C'
+
+#define CS_IOW(num, dtype) _IOW(CS_IO_MAGIC, num, dtype)
+#define CS_IOR(num, dtype) _IOR(CS_IO_MAGIC, num, dtype)
+#define CS_IOWR(num, dtype) _IOWR(CS_IO_MAGIC, num, dtype)
+#define CS_IO(num) _IO(CS_IO_MAGIC, num)
+
+#define CS_GET_STATE CS_IOR(21, unsigned int)
+#define CS_SET_WAKELINE CS_IOW(23, unsigned int)
+#define CS_GET_IF_VERSION CS_IOR(30, unsigned int)
+#define CS_CONFIG_BUFS CS_IOW(31, struct cs_buffer_config)
+
+#endif /* _CS_PROTOCOL_H */
diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h
index bb1cb73..e4c0a35 100644
--- a/include/uapi/linux/hyperv.h
+++ b/include/uapi/linux/hyperv.h
@@ -45,6 +45,11 @@
#define VSS_OP_REGISTER 128
+/*
+ Daemon code with full handshake support.
+ */
+#define VSS_OP_REGISTER1 129
+
enum hv_vss_op {
VSS_OP_CREATE = 0,
VSS_OP_DELETE,
@@ -100,7 +105,8 @@ struct hv_vss_msg {
*/
#define FCOPY_VERSION_0 0
-#define FCOPY_CURRENT_VERSION FCOPY_VERSION_0
+#define FCOPY_VERSION_1 1
+#define FCOPY_CURRENT_VERSION FCOPY_VERSION_1
#define W_MAX_PATH 260
enum hv_fcopy_op {
diff --git a/include/uapi/linux/i2c.h b/include/uapi/linux/i2c.h
index 0e949cb..b0a7dd6 100644
--- a/include/uapi/linux/i2c.h
+++ b/include/uapi/linux/i2c.h
@@ -87,6 +87,7 @@ struct i2c_msg {
#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_IGNORE_NAK etc. */
#define I2C_FUNC_SMBUS_PEC 0x00000008
#define I2C_FUNC_NOSTART 0x00000010 /* I2C_M_NOSTART */
+#define I2C_FUNC_SLAVE 0x00000020
#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 */
#define I2C_FUNC_SMBUS_QUICK 0x00010000
#define I2C_FUNC_SMBUS_READ_BYTE 0x00020000
diff --git a/include/uapi/linux/if_addr.h b/include/uapi/linux/if_addr.h
index dea10a8..4318ab1 100644
--- a/include/uapi/linux/if_addr.h
+++ b/include/uapi/linux/if_addr.h
@@ -50,6 +50,8 @@ enum {
#define IFA_F_PERMANENT 0x80
#define IFA_F_MANAGETEMPADDR 0x100
#define IFA_F_NOPREFIXROUTE 0x200
+#define IFA_F_MCAUTOJOIN 0x400
+#define IFA_F_STABLE_PRIVACY 0x800
struct ifa_cacheinfo {
__u32 ifa_prefered;
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index b03ee8f..eaaea62 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -125,6 +125,8 @@ enum {
#define BRIDGE_VLAN_INFO_MASTER (1<<0) /* Operate on Bridge device as well */
#define BRIDGE_VLAN_INFO_PVID (1<<1) /* VLAN is PVID, ingress untagged */
#define BRIDGE_VLAN_INFO_UNTAGGED (1<<2) /* VLAN egresses untagged */
+#define BRIDGE_VLAN_INFO_RANGE_BEGIN (1<<3) /* VLAN is start of vlan range */
+#define BRIDGE_VLAN_INFO_RANGE_END (1<<4) /* VLAN is end of vlan range */
struct bridge_vlan_info {
__u16 flags;
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index f7d0d2d..2c7e8e3 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -146,6 +146,8 @@ enum {
IFLA_PHYS_PORT_ID,
IFLA_CARRIER_CHANGES,
IFLA_PHYS_SWITCH_ID,
+ IFLA_LINK_NETNSID,
+ IFLA_PHYS_PORT_NAME,
__IFLA_MAX
};
@@ -214,6 +216,7 @@ enum {
enum in6_addr_gen_mode {
IN6_ADDR_GEN_MODE_EUI64,
IN6_ADDR_GEN_MODE_NONE,
+ IN6_ADDR_GEN_MODE_STABLE_PRIVACY,
};
/* Bridge section */
@@ -223,6 +226,9 @@ enum {
IFLA_BR_FORWARD_DELAY,
IFLA_BR_HELLO_TIME,
IFLA_BR_MAX_AGE,
+ IFLA_BR_AGEING_TIME,
+ IFLA_BR_STP_STATE,
+ IFLA_BR_PRIORITY,
__IFLA_BR_MAX,
};
@@ -246,6 +252,7 @@ enum {
IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */
IFLA_BRPORT_PROXYARP, /* proxy ARP */
IFLA_BRPORT_LEARNING_SYNC, /* mac learning sync from device */
+ IFLA_BRPORT_PROXYARP_WIFI, /* proxy ARP for Wi-Fi */
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -370,6 +377,10 @@ enum {
IFLA_VXLAN_UDP_CSUM,
IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
+ IFLA_VXLAN_REMCSUM_TX,
+ IFLA_VXLAN_REMCSUM_RX,
+ IFLA_VXLAN_GBP,
+ IFLA_VXLAN_REMCSUM_NOPARTIAL,
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
@@ -379,6 +390,17 @@ struct ifla_vxlan_port_range {
__be16 high;
};
+/* GENEVE section */
+enum {
+ IFLA_GENEVE_UNSPEC,
+ IFLA_GENEVE_ID,
+ IFLA_GENEVE_REMOTE,
+ IFLA_GENEVE_TTL,
+ IFLA_GENEVE_TOS,
+ __IFLA_GENEVE_MAX
+};
+#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
+
/* Bonding section */
enum {
@@ -406,6 +428,9 @@ enum {
IFLA_BOND_AD_LACP_RATE,
IFLA_BOND_AD_SELECT,
IFLA_BOND_AD_INFO,
+ IFLA_BOND_AD_ACTOR_SYS_PRIO,
+ IFLA_BOND_AD_USER_PORT_KEY,
+ IFLA_BOND_AD_ACTOR_SYSTEM,
__IFLA_BOND_MAX,
};
@@ -431,6 +456,8 @@ enum {
IFLA_BOND_SLAVE_PERM_HWADDR,
IFLA_BOND_SLAVE_QUEUE_ID,
IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
+ IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE,
+ IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE,
__IFLA_BOND_SLAVE_MAX,
};
@@ -454,6 +481,10 @@ enum {
IFLA_VF_SPOOFCHK, /* Spoof Checking on/off switch */
IFLA_VF_LINK_STATE, /* link state enable/disable/auto switch */
IFLA_VF_RATE, /* Min and Max TX Bandwidth Allocation */
+ IFLA_VF_RSS_QUERY_EN, /* RSS Redirection Table and Hash Key query
+ * on/off switch
+ */
+ IFLA_VF_STATS, /* network device statistics */
__IFLA_VF_MAX,
};
@@ -498,6 +529,23 @@ struct ifla_vf_link_state {
__u32 link_state;
};
+struct ifla_vf_rss_query_en {
+ __u32 vf;
+ __u32 setting;
+};
+
+enum {
+ IFLA_VF_STATS_RX_PACKETS,
+ IFLA_VF_STATS_TX_PACKETS,
+ IFLA_VF_STATS_RX_BYTES,
+ IFLA_VF_STATS_TX_BYTES,
+ IFLA_VF_STATS_BROADCAST,
+ IFLA_VF_STATS_MULTICAST,
+ __IFLA_VF_STATS_MAX,
+};
+
+#define IFLA_VF_STATS_MAX (__IFLA_VF_STATS_MAX - 1)
+
/* VF ports management section
*
* Nested layout of set/get msg is:
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index da2d668..d3d715f8c 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -54,6 +54,7 @@ struct sockaddr_ll {
#define PACKET_FANOUT 18
#define PACKET_TX_HAS_OFF 19
#define PACKET_QDISC_BYPASS 20
+#define PACKET_ROLLOVER_STATS 21
#define PACKET_FANOUT_HASH 0
#define PACKET_FANOUT_LB 1
@@ -75,6 +76,12 @@ struct tpacket_stats_v3 {
unsigned int tp_freeze_q_cnt;
};
+struct tpacket_rollover_stats {
+ __aligned_u64 tp_all;
+ __aligned_u64 tp_huge;
+ __aligned_u64 tp_failed;
+};
+
union tpacket_stats_u {
struct tpacket_stats stats1;
struct tpacket_stats_v3 stats3;
@@ -99,6 +106,7 @@ struct tpacket_auxdata {
#define TP_STATUS_VLAN_VALID (1 << 4) /* auxdata has valid tp_vlan_tci */
#define TP_STATUS_BLK_TMO (1 << 5)
#define TP_STATUS_VLAN_TPID_VALID (1 << 6) /* auxdata has valid tp_vlan_tpid */
+#define TP_STATUS_CSUM_VALID (1 << 7)
/* Tx ring - header status */
#define TP_STATUS_AVAILABLE 0
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index 50ae243..3cb5e1d 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -50,6 +50,12 @@
#define TUNGETFILTER _IOR('T', 219, struct sock_fprog)
#define TUNSETVNETLE _IOW('T', 220, int)
#define TUNGETVNETLE _IOR('T', 221, int)
+/* The TUNSETVNETBE and TUNGETVNETBE ioctls are for cross-endian support on
+ * little-endian hosts. Not all kernel configurations support them, but all
+ * configurations that support SET also support GET.
+ */
+#define TUNSETVNETBE _IOW('T', 222, int)
+#define TUNGETVNETBE _IOR('T', 223, int)
/* TUNSETIFF ifr flags */
#define IFF_TUN 0x0001
diff --git a/include/uapi/linux/iio/Kbuild b/include/uapi/linux/iio/Kbuild
new file mode 100644
index 0000000..86f76d8
--- /dev/null
+++ b/include/uapi/linux/iio/Kbuild
@@ -0,0 +1,3 @@
+# UAPI Header export list
+header-y += events.h
+header-y += types.h
diff --git a/include/uapi/linux/iio/events.h b/include/uapi/linux/iio/events.h
new file mode 100644
index 0000000..00bbdae
--- /dev/null
+++ b/include/uapi/linux/iio/events.h
@@ -0,0 +1,42 @@
+/* The industrial I/O - event passing to userspace
+ *
+ * Copyright (c) 2008-2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef _UAPI_IIO_EVENTS_H_
+#define _UAPI_IIO_EVENTS_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * struct iio_event_data - The actual event being pushed to userspace
+ * @id: event identifier
+ * @timestamp: best estimate of time of event occurrence (often from
+ * the interrupt handler)
+ */
+struct iio_event_data {
+ __u64 id;
+ __s64 timestamp;
+};
+
+#define IIO_GET_EVENT_FD_IOCTL _IOR('i', 0x90, int)
+
+#define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF)
+
+#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0x7F)
+
+#define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF)
+
+/* Event code number extraction depends on which type of event we have.
+ * Perhaps review this function in the future*/
+#define IIO_EVENT_CODE_EXTRACT_CHAN(mask) ((__s16)(mask & 0xFFFF))
+#define IIO_EVENT_CODE_EXTRACT_CHAN2(mask) ((__s16)(((mask) >> 16) & 0xFFFF))
+
+#define IIO_EVENT_CODE_EXTRACT_MODIFIER(mask) ((mask >> 40) & 0xFF)
+#define IIO_EVENT_CODE_EXTRACT_DIFF(mask) (((mask) >> 55) & 0x1)
+
+#endif /* _UAPI_IIO_EVENTS_H_ */
diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h
new file mode 100644
index 0000000..2f8b117
--- /dev/null
+++ b/include/uapi/linux/iio/types.h
@@ -0,0 +1,94 @@
+/* industrial I/O data types needed both in and out of kernel
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _UAPI_IIO_TYPES_H_
+#define _UAPI_IIO_TYPES_H_
+
+enum iio_chan_type {
+ IIO_VOLTAGE,
+ IIO_CURRENT,
+ IIO_POWER,
+ IIO_ACCEL,
+ IIO_ANGL_VEL,
+ IIO_MAGN,
+ IIO_LIGHT,
+ IIO_INTENSITY,
+ IIO_PROXIMITY,
+ IIO_TEMP,
+ IIO_INCLI,
+ IIO_ROT,
+ IIO_ANGL,
+ IIO_TIMESTAMP,
+ IIO_CAPACITANCE,
+ IIO_ALTVOLTAGE,
+ IIO_CCT,
+ IIO_PRESSURE,
+ IIO_HUMIDITYRELATIVE,
+ IIO_ACTIVITY,
+ IIO_STEPS,
+ IIO_ENERGY,
+ IIO_DISTANCE,
+ IIO_VELOCITY,
+};
+
+enum iio_modifier {
+ IIO_NO_MOD,
+ IIO_MOD_X,
+ IIO_MOD_Y,
+ IIO_MOD_Z,
+ IIO_MOD_X_AND_Y,
+ IIO_MOD_X_AND_Z,
+ IIO_MOD_Y_AND_Z,
+ IIO_MOD_X_AND_Y_AND_Z,
+ IIO_MOD_X_OR_Y,
+ IIO_MOD_X_OR_Z,
+ IIO_MOD_Y_OR_Z,
+ IIO_MOD_X_OR_Y_OR_Z,
+ IIO_MOD_LIGHT_BOTH,
+ IIO_MOD_LIGHT_IR,
+ IIO_MOD_ROOT_SUM_SQUARED_X_Y,
+ IIO_MOD_SUM_SQUARED_X_Y_Z,
+ IIO_MOD_LIGHT_CLEAR,
+ IIO_MOD_LIGHT_RED,
+ IIO_MOD_LIGHT_GREEN,
+ IIO_MOD_LIGHT_BLUE,
+ IIO_MOD_QUATERNION,
+ IIO_MOD_TEMP_AMBIENT,
+ IIO_MOD_TEMP_OBJECT,
+ IIO_MOD_NORTH_MAGN,
+ IIO_MOD_NORTH_TRUE,
+ IIO_MOD_NORTH_MAGN_TILT_COMP,
+ IIO_MOD_NORTH_TRUE_TILT_COMP,
+ IIO_MOD_RUNNING,
+ IIO_MOD_JOGGING,
+ IIO_MOD_WALKING,
+ IIO_MOD_STILL,
+ IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z,
+ IIO_MOD_I,
+ IIO_MOD_Q,
+};
+
+enum iio_event_type {
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_TYPE_ROC,
+ IIO_EV_TYPE_THRESH_ADAPTIVE,
+ IIO_EV_TYPE_MAG_ADAPTIVE,
+ IIO_EV_TYPE_CHANGE,
+};
+
+enum iio_event_direction {
+ IIO_EV_DIR_EITHER,
+ IIO_EV_DIR_RISING,
+ IIO_EV_DIR_FALLING,
+ IIO_EV_DIR_NONE,
+};
+
+#endif /* _UAPI_IIO_TYPES_H_ */
+
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index c33a65e..eaf9491 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -19,8 +19,10 @@
#define _UAPI_LINUX_IN_H
#include <linux/types.h>
+#include <linux/libc-compat.h>
#include <linux/socket.h>
+#if __UAPI_DEF_IN_IPPROTO
/* Standard well-defined IP protocols. */
enum {
IPPROTO_IP = 0, /* Dummy protocol for TCP */
@@ -69,16 +71,20 @@ enum {
#define IPPROTO_SCTP IPPROTO_SCTP
IPPROTO_UDPLITE = 136, /* UDP-Lite (RFC 3828) */
#define IPPROTO_UDPLITE IPPROTO_UDPLITE
+ IPPROTO_MPLS = 137, /* MPLS in IP (RFC 4023) */
+#define IPPROTO_MPLS IPPROTO_MPLS
IPPROTO_RAW = 255, /* Raw IP packets */
#define IPPROTO_RAW IPPROTO_RAW
IPPROTO_MAX
};
+#endif
-
+#if __UAPI_DEF_IN_ADDR
/* Internet address. */
struct in_addr {
__be32 s_addr;
};
+#endif
#define IP_TOS 1
#define IP_TTL 2
@@ -109,6 +115,8 @@ struct in_addr {
#define IP_MINTTL 21
#define IP_NODEFRAG 22
+#define IP_CHECKSUM 23
+#define IP_BIND_ADDRESS_NO_PORT 24
/* IP_MTU_DISCOVER values */
#define IP_PMTUDISC_DONT 0 /* Never send DF frames */
@@ -154,6 +162,7 @@ struct in_addr {
/* Request struct for multicast socket ops */
+#if __UAPI_DEF_IP_MREQ
struct ip_mreq {
struct in_addr imr_multiaddr; /* IP multicast address of group */
struct in_addr imr_interface; /* local IP address of interface */
@@ -205,14 +214,18 @@ struct group_filter {
#define GROUP_FILTER_SIZE(numsrc) \
(sizeof(struct group_filter) - sizeof(struct __kernel_sockaddr_storage) \
+ (numsrc) * sizeof(struct __kernel_sockaddr_storage))
+#endif
+#if __UAPI_DEF_IN_PKTINFO
struct in_pktinfo {
int ipi_ifindex;
struct in_addr ipi_spec_dst;
struct in_addr ipi_addr;
};
+#endif
/* Structure describing an Internet (IP) socket address. */
+#if __UAPI_DEF_SOCKADDR_IN
#define __SOCK_SIZE__ 16 /* sizeof(struct sockaddr) */
struct sockaddr_in {
__kernel_sa_family_t sin_family; /* Address family */
@@ -224,8 +237,9 @@ struct sockaddr_in {
sizeof(unsigned short int) - sizeof(struct in_addr)];
};
#define sin_zero __pad /* for BSD UNIX comp. -FvK */
+#endif
-
+#if __UAPI_DEF_IN_CLASS
/*
* Definitions of the bits in an Internet address integer.
* On subnets, host and network parts are found according
@@ -276,7 +290,7 @@ struct sockaddr_in {
#define INADDR_ALLHOSTS_GROUP 0xe0000001U /* 224.0.0.1 */
#define INADDR_ALLRTRS_GROUP 0xe0000002U /* 224.0.0.2 */
#define INADDR_MAX_LOCAL_GROUP 0xe00000ffU /* 224.0.0.255 */
-
+#endif
/* <asm/byteorder.h> contains the htonl type stuff.. */
#include <asm/byteorder.h>
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index d65c0a0..68a1f71 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -111,9 +111,11 @@ enum {
INET_DIAG_SKMEMINFO,
INET_DIAG_SHUTDOWN,
INET_DIAG_DCTCPINFO,
+ INET_DIAG_PROTOCOL, /* response attribute only */
+ INET_DIAG_SKV6ONLY,
};
-#define INET_DIAG_MAX INET_DIAG_DCTCPINFO
+#define INET_DIAG_MAX INET_DIAG_SKV6ONLY
/* INET_DIAG_MEM */
@@ -143,4 +145,8 @@ struct tcp_dctcp_info {
__u32 dctcp_ab_tot;
};
+union tcp_cc_info {
+ struct tcpvegas_info vegas;
+ struct tcp_dctcp_info dctcp;
+};
#endif /* _UAPI_INET_DIAG_H_ */
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index a1d7e93..731417c 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -166,6 +166,7 @@ struct input_keymap_entry {
#define INPUT_PROP_SEMI_MT 0x03 /* touch rectangle only */
#define INPUT_PROP_TOPBUTTONPAD 0x04 /* softbuttons at top of pad */
#define INPUT_PROP_POINTING_STICK 0x05 /* is a pointing stick */
+#define INPUT_PROP_ACCELEROMETER 0x06 /* has accelerometer */
#define INPUT_PROP_MAX 0x1f
#define INPUT_PROP_CNT (INPUT_PROP_MAX + 1)
@@ -368,7 +369,8 @@ struct input_keymap_entry {
#define KEY_MSDOS 151
#define KEY_COFFEE 152 /* AL Terminal Lock/Screensaver */
#define KEY_SCREENLOCK KEY_COFFEE
-#define KEY_DIRECTION 153
+#define KEY_ROTATE_DISPLAY 153 /* Display orientation for e.g. tablets */
+#define KEY_DIRECTION KEY_ROTATE_DISPLAY
#define KEY_CYCLEWINDOWS 154
#define KEY_MAIL 155
#define KEY_BOOKMARKS 156 /* AC Bookmarks */
@@ -701,6 +703,10 @@ struct input_keymap_entry {
#define KEY_NUMERIC_9 0x209
#define KEY_NUMERIC_STAR 0x20a
#define KEY_NUMERIC_POUND 0x20b
+#define KEY_NUMERIC_A 0x20c /* Phone key A - HUT Telephony 0xb9 */
+#define KEY_NUMERIC_B 0x20d
+#define KEY_NUMERIC_C 0x20e
+#define KEY_NUMERIC_D 0x20f
#define KEY_CAMERA_FOCUS 0x210
#define KEY_WPS_BUTTON 0x211 /* WiFi Protected Setup key */
@@ -972,7 +978,8 @@ struct input_keymap_entry {
*/
#define MT_TOOL_FINGER 0
#define MT_TOOL_PEN 1
-#define MT_TOOL_MAX 1
+#define MT_TOOL_PALM 2
+#define MT_TOOL_MAX 2
/*
* Values describing the status of a force-feedback effect
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index 4119594..08f894d 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -164,6 +164,7 @@ enum
IPV4_DEVCONF_ROUTE_LOCALNET,
IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL,
IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL,
+ IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN,
__IPV4_DEVCONF_MAX
};
diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h
index cabe95d..3199243 100644
--- a/include/uapi/linux/ip_vs.h
+++ b/include/uapi/linux/ip_vs.h
@@ -358,6 +358,8 @@ enum {
IPVS_SVC_ATTR_PE_NAME, /* name of ct retriever */
+ IPVS_SVC_ATTR_STATS64, /* nested attribute for service stats */
+
__IPVS_SVC_ATTR_MAX,
};
@@ -387,6 +389,8 @@ enum {
IPVS_DEST_ATTR_ADDR_FAMILY, /* Address family of address */
+ IPVS_DEST_ATTR_STATS64, /* nested attribute for dest stats */
+
__IPVS_DEST_ATTR_MAX,
};
@@ -410,7 +414,8 @@ enum {
/*
* Attributes used to describe service or destination entry statistics
*
- * Used inside nested attributes IPVS_SVC_ATTR_STATS and IPVS_DEST_ATTR_STATS
+ * Used inside nested attributes IPVS_SVC_ATTR_STATS, IPVS_DEST_ATTR_STATS,
+ * IPVS_SVC_ATTR_STATS64 and IPVS_DEST_ATTR_STATS64.
*/
enum {
IPVS_STATS_ATTR_UNSPEC = 0,
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index e863d08..5efa54a 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -1,6 +1,7 @@
#ifndef _UAPI_IPV6_H
#define _UAPI_IPV6_H
+#include <linux/libc-compat.h>
#include <linux/types.h>
#include <linux/in6.h>
#include <asm/byteorder.h>
@@ -15,16 +16,19 @@
* *under construction*
*/
-
+#if __UAPI_DEF_IN6_PKTINFO
struct in6_pktinfo {
struct in6_addr ipi6_addr;
int ipi6_ifindex;
};
+#endif
+#if __UAPI_DEF_IP6_MTUINFO
struct ip6_mtuinfo {
struct sockaddr_in6 ip6m_addr;
__u32 ip6m_mtu;
};
+#endif
struct in6_ifreq {
struct in6_addr ifr6_addr;
@@ -165,6 +169,8 @@ enum {
DEVCONF_SUPPRESS_FRAG_NDISC,
DEVCONF_ACCEPT_RA_FROM_LOCAL,
DEVCONF_USE_OPTIMISTIC,
+ DEVCONF_ACCEPT_RA_MTU,
+ DEVCONF_STABLE_SECRET,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/ipv6_route.h b/include/uapi/linux/ipv6_route.h
index 2be7bd1..f6598d1 100644
--- a/include/uapi/linux/ipv6_route.h
+++ b/include/uapi/linux/ipv6_route.h
@@ -34,6 +34,7 @@
#define RTF_PREF(pref) ((pref) << 27)
#define RTF_PREF_MASK 0x18000000
+#define RTF_PCPU 0x40000000
#define RTF_LOCAL 0x80000000
diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h
index 2f96d23..a6c4962 100644
--- a/include/uapi/linux/kernel-page-flags.h
+++ b/include/uapi/linux/kernel-page-flags.h
@@ -32,6 +32,7 @@
#define KPF_KSM 21
#define KPF_THP 22
#define KPF_BALLOON 23
+#define KPF_ZERO_PAGE 24
#endif /* _UAPILINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h
index 6925f5b..99048e5 100644
--- a/include/uapi/linux/kexec.h
+++ b/include/uapi/linux/kexec.h
@@ -55,12 +55,6 @@ struct kexec_segment {
size_t memsz;
};
-/* Load a new kernel image as described by the kexec_segment array
- * consisting of passed number of segments at the entry-point address.
- * The flags allow different useage types.
- */
-extern int kexec_load(void *, size_t, struct kexec_segment *,
- unsigned long int);
#endif /* __KERNEL__ */
#endif /* _UAPILINUX_KEXEC_H */
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index af94f31..d683342 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -27,7 +27,7 @@
#include <linux/ioctl.h>
#define KFD_IOCTL_MAJOR_VERSION 1
-#define KFD_IOCTL_MINOR_VERSION 0
+#define KFD_IOCTL_MINOR_VERSION 1
struct kfd_ioctl_get_version_args {
uint32_t major_version; /* from KFD */
@@ -128,6 +128,110 @@ struct kfd_ioctl_get_process_apertures_args {
uint32_t pad;
};
+#define MAX_ALLOWED_NUM_POINTS 100
+#define MAX_ALLOWED_AW_BUFF_SIZE 4096
+#define MAX_ALLOWED_WAC_BUFF_SIZE 128
+
+struct kfd_ioctl_dbg_register_args {
+ uint32_t gpu_id; /* to KFD */
+ uint32_t pad;
+};
+
+struct kfd_ioctl_dbg_unregister_args {
+ uint32_t gpu_id; /* to KFD */
+ uint32_t pad;
+};
+
+struct kfd_ioctl_dbg_address_watch_args {
+ uint64_t content_ptr; /* a pointer to the actual content */
+ uint32_t gpu_id; /* to KFD */
+ uint32_t buf_size_in_bytes; /*including gpu_id and buf_size */
+};
+
+struct kfd_ioctl_dbg_wave_control_args {
+ uint64_t content_ptr; /* a pointer to the actual content */
+ uint32_t gpu_id; /* to KFD */
+ uint32_t buf_size_in_bytes; /*including gpu_id and buf_size */
+};
+
+/* Matching HSA_EVENTTYPE */
+#define KFD_IOC_EVENT_SIGNAL 0
+#define KFD_IOC_EVENT_NODECHANGE 1
+#define KFD_IOC_EVENT_DEVICESTATECHANGE 2
+#define KFD_IOC_EVENT_HW_EXCEPTION 3
+#define KFD_IOC_EVENT_SYSTEM_EVENT 4
+#define KFD_IOC_EVENT_DEBUG_EVENT 5
+#define KFD_IOC_EVENT_PROFILE_EVENT 6
+#define KFD_IOC_EVENT_QUEUE_EVENT 7
+#define KFD_IOC_EVENT_MEMORY 8
+
+#define KFD_IOC_WAIT_RESULT_COMPLETE 0
+#define KFD_IOC_WAIT_RESULT_TIMEOUT 1
+#define KFD_IOC_WAIT_RESULT_FAIL 2
+
+#define KFD_SIGNAL_EVENT_LIMIT 256
+
+struct kfd_ioctl_create_event_args {
+ uint64_t event_page_offset; /* from KFD */
+ uint32_t event_trigger_data; /* from KFD - signal events only */
+ uint32_t event_type; /* to KFD */
+ uint32_t auto_reset; /* to KFD */
+ uint32_t node_id; /* to KFD - only valid for certain
+ event types */
+ uint32_t event_id; /* from KFD */
+ uint32_t event_slot_index; /* from KFD */
+};
+
+struct kfd_ioctl_destroy_event_args {
+ uint32_t event_id; /* to KFD */
+ uint32_t pad;
+};
+
+struct kfd_ioctl_set_event_args {
+ uint32_t event_id; /* to KFD */
+ uint32_t pad;
+};
+
+struct kfd_ioctl_reset_event_args {
+ uint32_t event_id; /* to KFD */
+ uint32_t pad;
+};
+
+struct kfd_memory_exception_failure {
+ uint32_t NotPresent; /* Page not present or supervisor privilege */
+ uint32_t ReadOnly; /* Write access to a read-only page */
+ uint32_t NoExecute; /* Execute access to a page marked NX */
+ uint32_t pad;
+};
+
+/* memory exception data*/
+struct kfd_hsa_memory_exception_data {
+ struct kfd_memory_exception_failure failure;
+ uint64_t va;
+ uint32_t gpu_id;
+ uint32_t pad;
+};
+
+/* Event data*/
+struct kfd_event_data {
+ union {
+ struct kfd_hsa_memory_exception_data memory_exception_data;
+ }; /* From KFD */
+ uint64_t kfd_event_data_ext; /* pointer to an extension structure
+ for future exception types */
+ uint32_t event_id; /* to KFD */
+ uint32_t pad;
+};
+
+struct kfd_ioctl_wait_events_args {
+ uint64_t events_ptr; /* pointed to struct
+ kfd_event_data array, to KFD */
+ uint32_t num_events; /* to KFD */
+ uint32_t wait_for_all; /* to KFD */
+ uint32_t timeout; /* to KFD */
+ uint32_t wait_result; /* from KFD */
+};
+
#define AMDKFD_IOCTL_BASE 'K'
#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
@@ -155,7 +259,34 @@ struct kfd_ioctl_get_process_apertures_args {
#define AMDKFD_IOC_UPDATE_QUEUE \
AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
+#define AMDKFD_IOC_CREATE_EVENT \
+ AMDKFD_IOWR(0x08, struct kfd_ioctl_create_event_args)
+
+#define AMDKFD_IOC_DESTROY_EVENT \
+ AMDKFD_IOW(0x09, struct kfd_ioctl_destroy_event_args)
+
+#define AMDKFD_IOC_SET_EVENT \
+ AMDKFD_IOW(0x0A, struct kfd_ioctl_set_event_args)
+
+#define AMDKFD_IOC_RESET_EVENT \
+ AMDKFD_IOW(0x0B, struct kfd_ioctl_reset_event_args)
+
+#define AMDKFD_IOC_WAIT_EVENTS \
+ AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args)
+
+#define AMDKFD_IOC_DBG_REGISTER \
+ AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args)
+
+#define AMDKFD_IOC_DBG_UNREGISTER \
+ AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args)
+
+#define AMDKFD_IOC_DBG_ADDRESS_WATCH \
+ AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args)
+
+#define AMDKFD_IOC_DBG_WAVE_CONTROL \
+ AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
+
#define AMDKFD_COMMAND_START 0x01
-#define AMDKFD_COMMAND_END 0x08
+#define AMDKFD_COMMAND_END 0x11
#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index a37fd12..716ad4a 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -147,6 +147,16 @@ struct kvm_pit_config {
#define KVM_PIT_SPEAKER_DUMMY 1
+struct kvm_s390_skeys {
+ __u64 start_gfn;
+ __u64 count;
+ __u64 skeydata_addr;
+ __u32 flags;
+ __u32 reserved[9];
+};
+#define KVM_S390_GET_SKEYS_NONE 1
+#define KVM_S390_SKEYS_MAX 1048576
+
#define KVM_EXIT_UNKNOWN 0
#define KVM_EXIT_EXCEPTION 1
#define KVM_EXIT_IO 2
@@ -172,6 +182,7 @@ struct kvm_pit_config {
#define KVM_EXIT_S390_TSCH 22
#define KVM_EXIT_EPR 23
#define KVM_EXIT_SYSTEM_EVENT 24
+#define KVM_EXIT_S390_STSI 25
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -191,7 +202,7 @@ struct kvm_run {
__u32 exit_reason;
__u8 ready_for_interrupt_injection;
__u8 if_flag;
- __u8 padding2[2];
+ __u16 flags;
/* in (pre_kvm_run), out (post_kvm_run) */
__u64 cr8;
@@ -309,6 +320,15 @@ struct kvm_run {
__u32 type;
__u64 flags;
} system_event;
+ /* KVM_EXIT_S390_STSI */
+ struct {
+ __u64 addr;
+ __u8 ar;
+ __u8 reserved;
+ __u8 fc;
+ __u8 sel1;
+ __u16 sel2;
+ } s390_stsi;
/* Fix the size of the union. */
char padding[256];
};
@@ -324,7 +344,7 @@ struct kvm_run {
__u64 kvm_dirty_regs;
union {
struct kvm_sync_regs regs;
- char padding[1024];
+ char padding[2048];
} s;
};
@@ -365,6 +385,24 @@ struct kvm_translation {
__u8 pad[5];
};
+/* for KVM_S390_MEM_OP */
+struct kvm_s390_mem_op {
+ /* in */
+ __u64 gaddr; /* the guest address */
+ __u64 flags; /* flags */
+ __u32 size; /* amount of bytes */
+ __u32 op; /* type of operation */
+ __u64 buf; /* buffer in userspace */
+ __u8 ar; /* the access register number */
+ __u8 reserved[31]; /* should be set to 0 */
+};
+/* types for kvm_s390_mem_op->op */
+#define KVM_S390_MEMOP_LOGICAL_READ 0
+#define KVM_S390_MEMOP_LOGICAL_WRITE 1
+/* flags for kvm_s390_mem_op->flags */
+#define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0)
+#define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1)
+
/* for KVM_INTERRUPT */
struct kvm_interrupt {
/* in */
@@ -491,6 +529,11 @@ struct kvm_s390_emerg_info {
__u16 code;
};
+#define KVM_S390_STOP_FLAG_STORE_STATUS 0x01
+struct kvm_s390_stop_info {
+ __u32 flags;
+};
+
struct kvm_s390_mchk_info {
__u64 cr14;
__u64 mcic;
@@ -509,11 +552,19 @@ struct kvm_s390_irq {
struct kvm_s390_emerg_info emerg;
struct kvm_s390_extcall_info extcall;
struct kvm_s390_prefix_info prefix;
+ struct kvm_s390_stop_info stop;
struct kvm_s390_mchk_info mchk;
char reserved[64];
} u;
};
+struct kvm_s390_irq_state {
+ __u64 buf;
+ __u32 flags;
+ __u32 len;
+ __u32 reserved[4];
+};
+
/* for KVM_SET_GUEST_DEBUG */
#define KVM_GUESTDBG_ENABLE 0x00000001
@@ -753,6 +804,19 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_PPC_FIXUP_HCALL 103
#define KVM_CAP_PPC_ENABLE_HCALL 104
#define KVM_CAP_CHECK_EXTENSION_VM 105
+#define KVM_CAP_S390_USER_SIGP 106
+#define KVM_CAP_S390_VECTOR_REGISTERS 107
+#define KVM_CAP_S390_MEM_OP 108
+#define KVM_CAP_S390_USER_STSI 109
+#define KVM_CAP_S390_SKEYS 110
+#define KVM_CAP_MIPS_FPU 111
+#define KVM_CAP_MIPS_MSA 112
+#define KVM_CAP_S390_INJECT_IRQ 113
+#define KVM_CAP_S390_IRQ_STATE 114
+#define KVM_CAP_PPC_HWRNG 115
+#define KVM_CAP_DISABLE_QUIRKS 116
+#define KVM_CAP_X86_SMM 117
+#define KVM_CAP_MULTI_ADDRESS_SPACE 118
#ifdef KVM_CAP_IRQ_ROUTING
@@ -833,7 +897,7 @@ struct kvm_xen_hvm_config {
*
* KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies
* the irqfd to operate in resampling mode for level triggered interrupt
- * emlation. See Documentation/virtual/kvm/api.txt.
+ * emulation. See Documentation/virtual/kvm/api.txt.
*/
#define KVM_IRQFD_FLAG_RESAMPLE (1 << 1)
@@ -952,6 +1016,8 @@ enum kvm_device_type {
#define KVM_DEV_TYPE_ARM_VGIC_V2 KVM_DEV_TYPE_ARM_VGIC_V2
KVM_DEV_TYPE_FLIC,
#define KVM_DEV_TYPE_FLIC KVM_DEV_TYPE_FLIC
+ KVM_DEV_TYPE_ARM_VGIC_V3,
+#define KVM_DEV_TYPE_ARM_VGIC_V3 KVM_DEV_TYPE_ARM_VGIC_V3
KVM_DEV_TYPE_MAX,
};
@@ -1126,6 +1192,18 @@ struct kvm_s390_ucas_mapping {
#define KVM_ARM_VCPU_INIT _IOW(KVMIO, 0xae, struct kvm_vcpu_init)
#define KVM_ARM_PREFERRED_TARGET _IOR(KVMIO, 0xaf, struct kvm_vcpu_init)
#define KVM_GET_REG_LIST _IOWR(KVMIO, 0xb0, struct kvm_reg_list)
+/* Available with KVM_CAP_S390_MEM_OP */
+#define KVM_S390_MEM_OP _IOW(KVMIO, 0xb1, struct kvm_s390_mem_op)
+/* Available with KVM_CAP_S390_SKEYS */
+#define KVM_S390_GET_SKEYS _IOW(KVMIO, 0xb2, struct kvm_s390_skeys)
+#define KVM_S390_SET_SKEYS _IOW(KVMIO, 0xb3, struct kvm_s390_skeys)
+/* Available with KVM_CAP_S390_INJECT_IRQ */
+#define KVM_S390_IRQ _IOW(KVMIO, 0xb4, struct kvm_s390_irq)
+/* Available with KVM_CAP_S390_IRQ_STATE */
+#define KVM_S390_SET_IRQ_STATE _IOW(KVMIO, 0xb5, struct kvm_s390_irq_state)
+#define KVM_S390_GET_IRQ_STATE _IOW(KVMIO, 0xb6, struct kvm_s390_irq_state)
+/* Available with KVM_CAP_X86_SMM */
+#define KVM_SMI _IO(KVMIO, 0xb7)
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
index 21caa26..347ef22 100644
--- a/include/uapi/linux/l2tp.h
+++ b/include/uapi/linux/l2tp.h
@@ -178,5 +178,6 @@ enum l2tp_seqmode {
*/
#define L2TP_GENL_NAME "l2tp"
#define L2TP_GENL_VERSION 0x1
+#define L2TP_GENL_MCGROUP "l2tp"
#endif /* _UAPI_LINUX_L2TP_H_ */
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index e28807a..7d024ce 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -56,6 +56,13 @@
/* GLIBC headers included first so don't define anything
* that would already be defined. */
+#define __UAPI_DEF_IN_ADDR 0
+#define __UAPI_DEF_IN_IPPROTO 0
+#define __UAPI_DEF_IN_PKTINFO 0
+#define __UAPI_DEF_IP_MREQ 0
+#define __UAPI_DEF_SOCKADDR_IN 0
+#define __UAPI_DEF_IN_CLASS 0
+
#define __UAPI_DEF_IN6_ADDR 0
/* The exception is the in6_addr macros which must be defined
* if the glibc code didn't define them. This guard matches
@@ -70,12 +77,21 @@
#define __UAPI_DEF_IPV6_MREQ 0
#define __UAPI_DEF_IPPROTO_V6 0
#define __UAPI_DEF_IPV6_OPTIONS 0
+#define __UAPI_DEF_IN6_PKTINFO 0
+#define __UAPI_DEF_IP6_MTUINFO 0
#else
/* Linux headers included first, and we must define everything
* we need. The expectation is that glibc will check the
* __UAPI_DEF_* defines and adjust appropriately. */
+#define __UAPI_DEF_IN_ADDR 1
+#define __UAPI_DEF_IN_IPPROTO 1
+#define __UAPI_DEF_IN_PKTINFO 1
+#define __UAPI_DEF_IP_MREQ 1
+#define __UAPI_DEF_SOCKADDR_IN 1
+#define __UAPI_DEF_IN_CLASS 1
+
#define __UAPI_DEF_IN6_ADDR 1
/* We unconditionally define the in6_addr macros and glibc must
* coordinate. */
@@ -84,6 +100,8 @@
#define __UAPI_DEF_IPV6_MREQ 1
#define __UAPI_DEF_IPPROTO_V6 1
#define __UAPI_DEF_IPV6_OPTIONS 1
+#define __UAPI_DEF_IN6_PKTINFO 1
+#define __UAPI_DEF_IP6_MTUINFO 1
#endif /* _NETINET_IN_H */
@@ -99,6 +117,14 @@
* that we need. */
#else /* !defined(__GLIBC__) */
+/* Definitions for in.h */
+#define __UAPI_DEF_IN_ADDR 1
+#define __UAPI_DEF_IN_IPPROTO 1
+#define __UAPI_DEF_IN_PKTINFO 1
+#define __UAPI_DEF_IP_MREQ 1
+#define __UAPI_DEF_SOCKADDR_IN 1
+#define __UAPI_DEF_IN_CLASS 1
+
/* Definitions for in6.h */
#define __UAPI_DEF_IN6_ADDR 1
#define __UAPI_DEF_IN6_ADDR_ALT 1
@@ -106,6 +132,8 @@
#define __UAPI_DEF_IPV6_MREQ 1
#define __UAPI_DEF_IPPROTO_V6 1
#define __UAPI_DEF_IPV6_OPTIONS 1
+#define __UAPI_DEF_IN6_PKTINFO 1
+#define __UAPI_DEF_IP6_MTUINFO 1
/* Definitions for xattr.h */
#define __UAPI_DEF_XATTR 1
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 7d664ea..7b1425a 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -58,6 +58,8 @@
#define STACK_END_MAGIC 0x57AC6E9D
+#define TRACEFS_MAGIC 0x74726163
+
#define V9FS_MAGIC 0x01021997
#define BDEVFS_MAGIC 0x62646576
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index 23b4090..190d491 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -33,22 +33,32 @@
#define MEDIA_BUS_FMT_FIXED 0x0001
-/* RGB - next is 0x100e */
+/* RGB - next is 0x1018 */
+#define MEDIA_BUS_FMT_RGB444_1X12 0x1016
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002
#define MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE 0x1003
#define MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE 0x1004
+#define MEDIA_BUS_FMT_RGB565_1X16 0x1017
#define MEDIA_BUS_FMT_BGR565_2X8_BE 0x1005
#define MEDIA_BUS_FMT_BGR565_2X8_LE 0x1006
#define MEDIA_BUS_FMT_RGB565_2X8_BE 0x1007
#define MEDIA_BUS_FMT_RGB565_2X8_LE 0x1008
#define MEDIA_BUS_FMT_RGB666_1X18 0x1009
+#define MEDIA_BUS_FMT_RBG888_1X24 0x100e
+#define MEDIA_BUS_FMT_RGB666_1X24_CPADHI 0x1015
+#define MEDIA_BUS_FMT_RGB666_1X7X3_SPWG 0x1010
+#define MEDIA_BUS_FMT_BGR888_1X24 0x1013
+#define MEDIA_BUS_FMT_GBR888_1X24 0x1014
#define MEDIA_BUS_FMT_RGB888_1X24 0x100a
#define MEDIA_BUS_FMT_RGB888_2X12_BE 0x100b
#define MEDIA_BUS_FMT_RGB888_2X12_LE 0x100c
+#define MEDIA_BUS_FMT_RGB888_1X7X4_SPWG 0x1011
+#define MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA 0x1012
#define MEDIA_BUS_FMT_ARGB8888_1X32 0x100d
+#define MEDIA_BUS_FMT_RGB888_1X32_PADHI 0x100f
-/* YUV (including grey) - next is 0x2024 */
+/* YUV (including grey) - next is 0x2026 */
#define MEDIA_BUS_FMT_Y8_1X8 0x2001
#define MEDIA_BUS_FMT_UV8_1X8 0x2015
#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002
@@ -65,6 +75,10 @@
#define MEDIA_BUS_FMT_YUYV10_2X10 0x200b
#define MEDIA_BUS_FMT_YVYU10_2X10 0x200c
#define MEDIA_BUS_FMT_Y12_1X12 0x2013
+#define MEDIA_BUS_FMT_UYVY12_2X12 0x201c
+#define MEDIA_BUS_FMT_VYUY12_2X12 0x201d
+#define MEDIA_BUS_FMT_YUYV12_2X12 0x201e
+#define MEDIA_BUS_FMT_YVYU12_2X12 0x201f
#define MEDIA_BUS_FMT_UYVY8_1X16 0x200f
#define MEDIA_BUS_FMT_VYUY8_1X16 0x2010
#define MEDIA_BUS_FMT_YUYV8_1X16 0x2011
@@ -74,16 +88,14 @@
#define MEDIA_BUS_FMT_VYUY10_1X20 0x201b
#define MEDIA_BUS_FMT_YUYV10_1X20 0x200d
#define MEDIA_BUS_FMT_YVYU10_1X20 0x200e
-#define MEDIA_BUS_FMT_YUV10_1X30 0x2016
-#define MEDIA_BUS_FMT_AYUV8_1X32 0x2017
-#define MEDIA_BUS_FMT_UYVY12_2X12 0x201c
-#define MEDIA_BUS_FMT_VYUY12_2X12 0x201d
-#define MEDIA_BUS_FMT_YUYV12_2X12 0x201e
-#define MEDIA_BUS_FMT_YVYU12_2X12 0x201f
+#define MEDIA_BUS_FMT_VUY8_1X24 0x2024
+#define MEDIA_BUS_FMT_YUV8_1X24 0x2025
#define MEDIA_BUS_FMT_UYVY12_1X24 0x2020
#define MEDIA_BUS_FMT_VYUY12_1X24 0x2021
#define MEDIA_BUS_FMT_YUYV12_1X24 0x2022
#define MEDIA_BUS_FMT_YVYU12_1X24 0x2023
+#define MEDIA_BUS_FMT_YUV10_1X30 0x2016
+#define MEDIA_BUS_FMT_AYUV8_1X32 0x2017
/* Bayer - next is 0x3019 */
#define MEDIA_BUS_FMT_SBGGR8_1X8 0x3001
diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h
index d847c76..4e816be 100644
--- a/include/uapi/linux/media.h
+++ b/include/uapi/linux/media.h
@@ -50,7 +50,14 @@ struct media_device_info {
#define MEDIA_ENT_T_DEVNODE_V4L (MEDIA_ENT_T_DEVNODE + 1)
#define MEDIA_ENT_T_DEVNODE_FB (MEDIA_ENT_T_DEVNODE + 2)
#define MEDIA_ENT_T_DEVNODE_ALSA (MEDIA_ENT_T_DEVNODE + 3)
-#define MEDIA_ENT_T_DEVNODE_DVB (MEDIA_ENT_T_DEVNODE + 4)
+#define MEDIA_ENT_T_DEVNODE_DVB_FE (MEDIA_ENT_T_DEVNODE + 4)
+#define MEDIA_ENT_T_DEVNODE_DVB_DEMUX (MEDIA_ENT_T_DEVNODE + 5)
+#define MEDIA_ENT_T_DEVNODE_DVB_DVR (MEDIA_ENT_T_DEVNODE + 6)
+#define MEDIA_ENT_T_DEVNODE_DVB_CA (MEDIA_ENT_T_DEVNODE + 7)
+#define MEDIA_ENT_T_DEVNODE_DVB_NET (MEDIA_ENT_T_DEVNODE + 8)
+
+/* Legacy symbol. Use it to avoid userspace compilation breakages */
+#define MEDIA_ENT_T_DEVNODE_DVB MEDIA_ENT_T_DEVNODE_DVB_FE
#define MEDIA_ENT_T_V4L2_SUBDEV (2 << MEDIA_ENT_TYPE_SHIFT)
#define MEDIA_ENT_T_V4L2_SUBDEV_SENSOR (MEDIA_ENT_T_V4L2_SUBDEV + 1)
@@ -59,6 +66,8 @@ struct media_device_info {
/* A converter of analogue video to its digital representation. */
#define MEDIA_ENT_T_V4L2_SUBDEV_DECODER (MEDIA_ENT_T_V4L2_SUBDEV + 4)
+#define MEDIA_ENT_T_V4L2_SUBDEV_TUNER (MEDIA_ENT_T_V4L2_SUBDEV + 5)
+
#define MEDIA_ENT_FL_DEFAULT (1 << 0)
struct media_entity_desc {
@@ -78,17 +87,48 @@ struct media_entity_desc {
struct {
__u32 major;
__u32 minor;
- } v4l;
- struct {
- __u32 major;
- __u32 minor;
- } fb;
+ } dev;
+
+#if 1
+ /*
+ * TODO: this shouldn't have been added without
+ * actual drivers that use this. When the first real driver
+ * appears that sets this information, special attention
+ * should be given whether this information is 1) enough, and
+ * 2) can deal with udev rules that rename devices. The struct
+ * dev would not be sufficient for this since that does not
+ * contain the subdevice information. In addition, struct dev
+ * can only refer to a single device, and not to multiple (e.g.
+ * pcm and mixer devices).
+ *
+ * So for now mark this as a to do.
+ */
struct {
__u32 card;
__u32 device;
__u32 subdevice;
} alsa;
+#endif
+
+#if 1
+ /*
+ * DEPRECATED: previous node specifications. Kept just to
+ * avoid breaking compilation, but media_entity_desc.dev
+ * should be used instead. In particular, alsa and dvb
+ * fields below are wrong: for all devnodes, there should
+ * be just major/minor inside the struct, as this is enough
+ * to represent any devnode, no matter what type.
+ */
+ struct {
+ __u32 major;
+ __u32 minor;
+ } v4l;
+ struct {
+ __u32 major;
+ __u32 minor;
+ } fb;
int dvb;
+#endif
/* Sub-device specifications */
/* Nothing needed yet */
diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h
index 0d11c3d..9cd8b21 100644
--- a/include/uapi/linux/mempolicy.h
+++ b/include/uapi/linux/mempolicy.h
@@ -67,7 +67,7 @@ enum mpol_rebind_step {
#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */
#define MPOL_F_REBINDING (1 << 2) /* identify policies in rebinding */
#define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */
-#define MPOL_F_MORON (1 << 4) /* Migrate On pte_numa Reference On Node */
+#define MPOL_F_MORON (1 << 4) /* Migrate On protnone Reference On Node */
#endif /* _UAPI_LINUX_MEMPOLICY_H */
diff --git a/include/uapi/linux/mic_common.h b/include/uapi/linux/mic_common.h
index 6eb4024..302a2ce 100644
--- a/include/uapi/linux/mic_common.h
+++ b/include/uapi/linux/mic_common.h
@@ -80,6 +80,12 @@ struct mic_device_ctrl {
* @h2c_config_db: Host to Card Virtio config doorbell set by card
* @shutdown_status: Card shutdown status set by card
* @shutdown_card: Set to 1 by the host when a card shutdown is initiated
+ * @tot_nodes: Total number of nodes in the SCIF network
+ * @node_id: Unique id of the node
+ * @h2c_scif_db - Host to card SCIF doorbell set by card
+ * @c2h_scif_db - Card to host SCIF doorbell set by host
+ * @scif_host_dma_addr - SCIF host queue pair DMA address
+ * @scif_card_dma_addr - SCIF card queue pair DMA address
*/
struct mic_bootparam {
__le32 magic;
@@ -88,6 +94,12 @@ struct mic_bootparam {
__s8 h2c_config_db;
__u8 shutdown_status;
__u8 shutdown_card;
+ __u8 tot_nodes;
+ __u8 node_id;
+ __u8 h2c_scif_db;
+ __u8 c2h_scif_db;
+ __u64 scif_host_dma_addr;
+ __u64 scif_card_dma_addr;
} __attribute__ ((aligned(8)));
/**
diff --git a/include/uapi/linux/mpls.h b/include/uapi/linux/mpls.h
index bc9abfe..139d4dd 100644
--- a/include/uapi/linux/mpls.h
+++ b/include/uapi/linux/mpls.h
@@ -31,4 +31,14 @@ struct mpls_label {
#define MPLS_LS_TTL_MASK 0x000000FF
#define MPLS_LS_TTL_SHIFT 0
+/* Reserved labels */
+#define MPLS_LABEL_IPV4NULL 0 /* RFC3032 */
+#define MPLS_LABEL_RTALERT 1 /* RFC3032 */
+#define MPLS_LABEL_IPV6NULL 2 /* RFC3032 */
+#define MPLS_LABEL_IMPLNULL 3 /* RFC3032 */
+#define MPLS_LABEL_ENTROPY 7 /* RFC6790 */
+#define MPLS_LABEL_GAL 13 /* RFC5586 */
+#define MPLS_LABEL_OAMALERT 14 /* RFC3429 */
+#define MPLS_LABEL_EXTENSION 15 /* RFC7274 */
+
#endif /* _UAPI_MPLS_H */
diff --git a/include/uapi/linux/msdos_fs.h b/include/uapi/linux/msdos_fs.h
index e284ff9..e956704 100644
--- a/include/uapi/linux/msdos_fs.h
+++ b/include/uapi/linux/msdos_fs.h
@@ -134,7 +134,7 @@ struct fat_boot_sector {
__u8 vol_id[4]; /* volume ID */
__u8 vol_label[11]; /* volume label */
__u8 fs_type[8]; /* file system type */
- /* other fiealds are not added here */
+ /* other fields are not added here */
} fat16;
struct {
@@ -157,7 +157,7 @@ struct fat_boot_sector {
__u8 vol_id[4]; /* volume ID */
__u8 vol_label[11]; /* volume label */
__u8 fs_type[8]; /* file system type */
- /* other fiealds are not added here */
+ /* other fields are not added here */
} fat32;
};
};
diff --git a/include/uapi/linux/nbd.h b/include/uapi/linux/nbd.h
index 4f52549..e08e413 100644
--- a/include/uapi/linux/nbd.h
+++ b/include/uapi/linux/nbd.h
@@ -44,8 +44,6 @@ enum {
/* there is a gap here to match userspace */
#define NBD_FLAG_SEND_TRIM (1 << 5) /* send trim/discard */
-#define nbd_cmd(req) ((req)->cmd[0])
-
/* userspace doesn't need the nbd_device structure */
/* These are sent over the network in the request/reply magic fields */
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
new file mode 100644
index 0000000..2b94ea2
--- /dev/null
+++ b/include/uapi/linux/ndctl.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2014-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU Lesser General Public License,
+ * version 2.1, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+ * more details.
+ */
+#ifndef __NDCTL_H__
+#define __NDCTL_H__
+
+#include <linux/types.h>
+
+struct nd_cmd_smart {
+ __u32 status;
+ __u8 data[128];
+} __packed;
+
+struct nd_cmd_smart_threshold {
+ __u32 status;
+ __u8 data[8];
+} __packed;
+
+struct nd_cmd_dimm_flags {
+ __u32 status;
+ __u32 flags;
+} __packed;
+
+struct nd_cmd_get_config_size {
+ __u32 status;
+ __u32 config_size;
+ __u32 max_xfer;
+} __packed;
+
+struct nd_cmd_get_config_data_hdr {
+ __u32 in_offset;
+ __u32 in_length;
+ __u32 status;
+ __u8 out_buf[0];
+} __packed;
+
+struct nd_cmd_set_config_hdr {
+ __u32 in_offset;
+ __u32 in_length;
+ __u8 in_buf[0];
+} __packed;
+
+struct nd_cmd_vendor_hdr {
+ __u32 opcode;
+ __u32 in_length;
+ __u8 in_buf[0];
+} __packed;
+
+struct nd_cmd_vendor_tail {
+ __u32 status;
+ __u32 out_length;
+ __u8 out_buf[0];
+} __packed;
+
+struct nd_cmd_ars_cap {
+ __u64 address;
+ __u64 length;
+ __u32 status;
+ __u32 max_ars_out;
+} __packed;
+
+struct nd_cmd_ars_start {
+ __u64 address;
+ __u64 length;
+ __u16 type;
+ __u8 reserved[6];
+ __u32 status;
+} __packed;
+
+struct nd_cmd_ars_status {
+ __u32 status;
+ __u32 out_length;
+ __u64 address;
+ __u64 length;
+ __u16 type;
+ __u32 num_records;
+ struct nd_ars_record {
+ __u32 handle;
+ __u32 flags;
+ __u64 err_address;
+ __u64 mask;
+ } __packed records[0];
+} __packed;
+
+enum {
+ ND_CMD_IMPLEMENTED = 0,
+
+ /* bus commands */
+ ND_CMD_ARS_CAP = 1,
+ ND_CMD_ARS_START = 2,
+ ND_CMD_ARS_STATUS = 3,
+
+ /* per-dimm commands */
+ ND_CMD_SMART = 1,
+ ND_CMD_SMART_THRESHOLD = 2,
+ ND_CMD_DIMM_FLAGS = 3,
+ ND_CMD_GET_CONFIG_SIZE = 4,
+ ND_CMD_GET_CONFIG_DATA = 5,
+ ND_CMD_SET_CONFIG_DATA = 6,
+ ND_CMD_VENDOR_EFFECT_LOG_SIZE = 7,
+ ND_CMD_VENDOR_EFFECT_LOG = 8,
+ ND_CMD_VENDOR = 9,
+};
+
+static inline const char *nvdimm_bus_cmd_name(unsigned cmd)
+{
+ static const char * const names[] = {
+ [ND_CMD_ARS_CAP] = "ars_cap",
+ [ND_CMD_ARS_START] = "ars_start",
+ [ND_CMD_ARS_STATUS] = "ars_status",
+ };
+
+ if (cmd < ARRAY_SIZE(names) && names[cmd])
+ return names[cmd];
+ return "unknown";
+}
+
+static inline const char *nvdimm_cmd_name(unsigned cmd)
+{
+ static const char * const names[] = {
+ [ND_CMD_SMART] = "smart",
+ [ND_CMD_SMART_THRESHOLD] = "smart_thresh",
+ [ND_CMD_DIMM_FLAGS] = "flags",
+ [ND_CMD_GET_CONFIG_SIZE] = "get_size",
+ [ND_CMD_GET_CONFIG_DATA] = "get_data",
+ [ND_CMD_SET_CONFIG_DATA] = "set_data",
+ [ND_CMD_VENDOR_EFFECT_LOG_SIZE] = "effect_size",
+ [ND_CMD_VENDOR_EFFECT_LOG] = "effect_log",
+ [ND_CMD_VENDOR] = "vendor",
+ };
+
+ if (cmd < ARRAY_SIZE(names) && names[cmd])
+ return names[cmd];
+ return "unknown";
+}
+
+#define ND_IOCTL 'N'
+
+#define ND_IOCTL_SMART _IOWR(ND_IOCTL, ND_CMD_SMART,\
+ struct nd_cmd_smart)
+
+#define ND_IOCTL_SMART_THRESHOLD _IOWR(ND_IOCTL, ND_CMD_SMART_THRESHOLD,\
+ struct nd_cmd_smart_threshold)
+
+#define ND_IOCTL_DIMM_FLAGS _IOWR(ND_IOCTL, ND_CMD_DIMM_FLAGS,\
+ struct nd_cmd_dimm_flags)
+
+#define ND_IOCTL_GET_CONFIG_SIZE _IOWR(ND_IOCTL, ND_CMD_GET_CONFIG_SIZE,\
+ struct nd_cmd_get_config_size)
+
+#define ND_IOCTL_GET_CONFIG_DATA _IOWR(ND_IOCTL, ND_CMD_GET_CONFIG_DATA,\
+ struct nd_cmd_get_config_data_hdr)
+
+#define ND_IOCTL_SET_CONFIG_DATA _IOWR(ND_IOCTL, ND_CMD_SET_CONFIG_DATA,\
+ struct nd_cmd_set_config_hdr)
+
+#define ND_IOCTL_VENDOR _IOWR(ND_IOCTL, ND_CMD_VENDOR,\
+ struct nd_cmd_vendor_hdr)
+
+#define ND_IOCTL_ARS_CAP _IOWR(ND_IOCTL, ND_CMD_ARS_CAP,\
+ struct nd_cmd_ars_cap)
+
+#define ND_IOCTL_ARS_START _IOWR(ND_IOCTL, ND_CMD_ARS_START,\
+ struct nd_cmd_ars_start)
+
+#define ND_IOCTL_ARS_STATUS _IOWR(ND_IOCTL, ND_CMD_ARS_STATUS,\
+ struct nd_cmd_ars_status)
+
+#define ND_DEVICE_DIMM 1 /* nd_dimm: container for "config data" */
+#define ND_DEVICE_REGION_PMEM 2 /* nd_region: (parent of PMEM namespaces) */
+#define ND_DEVICE_REGION_BLK 3 /* nd_region: (parent of BLK namespaces) */
+#define ND_DEVICE_NAMESPACE_IO 4 /* legacy persistent memory */
+#define ND_DEVICE_NAMESPACE_PMEM 5 /* PMEM namespace (may alias with BLK) */
+#define ND_DEVICE_NAMESPACE_BLK 6 /* BLK namespace (may alias with PMEM) */
+
+enum nd_driver_flags {
+ ND_DRIVER_DIMM = 1 << ND_DEVICE_DIMM,
+ ND_DRIVER_REGION_PMEM = 1 << ND_DEVICE_REGION_PMEM,
+ ND_DRIVER_REGION_BLK = 1 << ND_DEVICE_REGION_BLK,
+ ND_DRIVER_NAMESPACE_IO = 1 << ND_DEVICE_NAMESPACE_IO,
+ ND_DRIVER_NAMESPACE_PMEM = 1 << ND_DEVICE_NAMESPACE_PMEM,
+ ND_DRIVER_NAMESPACE_BLK = 1 << ND_DEVICE_NAMESPACE_BLK,
+};
+
+enum {
+ ND_MIN_NAMESPACE_SIZE = 0x00400000,
+};
+#endif /* __NDCTL_H__ */
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index f3d77f9..2e35c61 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -25,6 +25,7 @@ enum {
NDA_VNI,
NDA_IFINDEX,
NDA_MASTER,
+ NDA_LINK_NETNSID,
__NDA_MAX
};
@@ -125,6 +126,7 @@ enum {
NDTPA_PROXY_QLEN, /* u32 */
NDTPA_LOCKTIME, /* u64, msecs */
NDTPA_QUEUE_LENBYTES, /* u32 */
+ NDTPA_MCAST_REPROBES, /* u32 */
__NDTPA_MAX
};
#define NDTPA_MAX (__NDTPA_MAX - 1)
diff --git a/include/uapi/linux/net_namespace.h b/include/uapi/linux/net_namespace.h
new file mode 100644
index 0000000..778cd2c
--- /dev/null
+++ b/include/uapi/linux/net_namespace.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2015 6WIND S.A.
+ * Author: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+#ifndef _UAPI_LINUX_NET_NAMESPACE_H_
+#define _UAPI_LINUX_NET_NAMESPACE_H_
+
+/* Attributes of RTM_NEWNSID/RTM_GETNSID messages */
+enum {
+ NETNSA_NONE,
+#define NETNSA_NSID_NOT_ASSIGNED -1
+ NETNSA_NSID,
+ NETNSA_PID,
+ NETNSA_FD,
+ __NETNSA_MAX,
+};
+
+#define NETNSA_MAX (__NETNSA_MAX - 1)
+
+#endif /* _UAPI_LINUX_NET_NAMESPACE_H_ */
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index edbc888..6d1abea 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -24,8 +24,9 @@ enum {
SOF_TIMESTAMPING_TX_SCHED = (1<<8),
SOF_TIMESTAMPING_TX_ACK = (1<<9),
SOF_TIMESTAMPING_OPT_CMSG = (1<<10),
+ SOF_TIMESTAMPING_OPT_TSONLY = (1<<11),
- SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_CMSG,
+ SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_TSONLY,
SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) |
SOF_TIMESTAMPING_LAST
};
diff --git a/include/uapi/linux/netconf.h b/include/uapi/linux/netconf.h
index 669a1f0..23cbd34 100644
--- a/include/uapi/linux/netconf.h
+++ b/include/uapi/linux/netconf.h
@@ -15,6 +15,7 @@ enum {
NETCONFA_RP_FILTER,
NETCONFA_MC_FORWARDING,
NETCONFA_PROXY_NEIGH,
+ NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
__NETCONFA_MAX
};
#define NETCONFA_MAX (__NETCONFA_MAX - 1)
diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h
index ef1b1f8..d93f949 100644
--- a/include/uapi/linux/netfilter.h
+++ b/include/uapi/linux/netfilter.h
@@ -4,7 +4,8 @@
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/sysctl.h>
-
+#include <linux/in.h>
+#include <linux/in6.h>
/* Responses from hook functions. */
#define NF_DROP 0
@@ -51,11 +52,17 @@ enum nf_inet_hooks {
NF_INET_NUMHOOKS
};
+enum nf_dev_hooks {
+ NF_NETDEV_INGRESS,
+ NF_NETDEV_NUMHOOKS
+};
+
enum {
NFPROTO_UNSPEC = 0,
NFPROTO_INET = 1,
NFPROTO_IPV4 = 2,
NFPROTO_ARP = 3,
+ NFPROTO_NETDEV = 5,
NFPROTO_BRIDGE = 7,
NFPROTO_IPV6 = 10,
NFPROTO_DECNET = 12,
diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h
index 5ab4e60..63b2e34 100644
--- a/include/uapi/linux/netfilter/ipset/ip_set.h
+++ b/include/uapi/linux/netfilter/ipset/ip_set.h
@@ -15,12 +15,12 @@
/* The protocol version */
#define IPSET_PROTOCOL 6
-/* The maximum permissible comment length we will accept over netlink */
-#define IPSET_MAX_COMMENT_SIZE 255
-
/* The max length of strings including NUL: set and type identifiers */
#define IPSET_MAXNAMELEN 32
+/* The maximum permissible comment length we will accept over netlink */
+#define IPSET_MAX_COMMENT_SIZE 255
+
/* Message types and commands */
enum ipset_cmd {
IPSET_CMD_NONE,
diff --git a/include/uapi/linux/netfilter/nf_conntrack_tcp.h b/include/uapi/linux/netfilter/nf_conntrack_tcp.h
index 9993a42..ef9f80f 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_tcp.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_tcp.h
@@ -42,6 +42,9 @@ enum tcp_conntrack {
/* The field td_maxack has been set */
#define IP_CT_TCP_FLAG_MAXACK_SET 0x20
+/* Marks possibility for expected RFC5961 challenge ACK */
+#define IP_CT_EXP_CHALLENGE_ACK 0x40
+
struct nf_ct_tcp_flags {
__u8 flags;
__u8 mask;
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 832bc46..a99e6a9 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -1,19 +1,49 @@
#ifndef _LINUX_NF_TABLES_H
#define _LINUX_NF_TABLES_H
+#define NFT_TABLE_MAXNAMELEN 32
#define NFT_CHAIN_MAXNAMELEN 32
#define NFT_USERDATA_MAXLEN 256
+/**
+ * enum nft_registers - nf_tables registers
+ *
+ * nf_tables used to have five registers: a verdict register and four data
+ * registers of size 16. The data registers have been changed to 16 registers
+ * of size 4. For compatibility reasons, the NFT_REG_[1-4] registers still
+ * map to areas of size 16, the 4 byte registers are addressed using
+ * NFT_REG32_00 - NFT_REG32_15.
+ */
enum nft_registers {
NFT_REG_VERDICT,
NFT_REG_1,
NFT_REG_2,
NFT_REG_3,
NFT_REG_4,
- __NFT_REG_MAX
+ __NFT_REG_MAX,
+
+ NFT_REG32_00 = 8,
+ MFT_REG32_01,
+ NFT_REG32_02,
+ NFT_REG32_03,
+ NFT_REG32_04,
+ NFT_REG32_05,
+ NFT_REG32_06,
+ NFT_REG32_07,
+ NFT_REG32_08,
+ NFT_REG32_09,
+ NFT_REG32_10,
+ NFT_REG32_11,
+ NFT_REG32_12,
+ NFT_REG32_13,
+ NFT_REG32_14,
+ NFT_REG32_15,
};
#define NFT_REG_MAX (__NFT_REG_MAX - 1)
+#define NFT_REG_SIZE 16
+#define NFT_REG32_SIZE 4
+
/**
* enum nft_verdicts - nf_tables internal verdicts
*
@@ -92,11 +122,13 @@ enum nft_list_attributes {
*
* @NFTA_HOOK_HOOKNUM: netfilter hook number (NLA_U32)
* @NFTA_HOOK_PRIORITY: netfilter hook priority (NLA_U32)
+ * @NFTA_HOOK_DEV: netdevice name (NLA_STRING)
*/
enum nft_hook_attributes {
NFTA_HOOK_UNSPEC,
NFTA_HOOK_HOOKNUM,
NFTA_HOOK_PRIORITY,
+ NFTA_HOOK_DEV,
__NFTA_HOOK_MAX
};
#define NFTA_HOOK_MAX (__NFTA_HOOK_MAX - 1)
@@ -207,12 +239,16 @@ enum nft_rule_compat_attributes {
* @NFT_SET_CONSTANT: set contents may not change while bound
* @NFT_SET_INTERVAL: set contains intervals
* @NFT_SET_MAP: set is used as a dictionary
+ * @NFT_SET_TIMEOUT: set uses timeouts
+ * @NFT_SET_EVAL: set contains expressions for evaluation
*/
enum nft_set_flags {
NFT_SET_ANONYMOUS = 0x1,
NFT_SET_CONSTANT = 0x2,
NFT_SET_INTERVAL = 0x4,
NFT_SET_MAP = 0x8,
+ NFT_SET_TIMEOUT = 0x10,
+ NFT_SET_EVAL = 0x20,
};
/**
@@ -251,6 +287,8 @@ enum nft_set_desc_attributes {
* @NFTA_SET_POLICY: selection policy (NLA_U32)
* @NFTA_SET_DESC: set description (NLA_NESTED)
* @NFTA_SET_ID: uniquely identifies a set in a transaction (NLA_U32)
+ * @NFTA_SET_TIMEOUT: default timeout value (NLA_U64)
+ * @NFTA_SET_GC_INTERVAL: garbage collection interval (NLA_U32)
*/
enum nft_set_attributes {
NFTA_SET_UNSPEC,
@@ -264,6 +302,8 @@ enum nft_set_attributes {
NFTA_SET_POLICY,
NFTA_SET_DESC,
NFTA_SET_ID,
+ NFTA_SET_TIMEOUT,
+ NFTA_SET_GC_INTERVAL,
__NFTA_SET_MAX
};
#define NFTA_SET_MAX (__NFTA_SET_MAX - 1)
@@ -283,12 +323,20 @@ enum nft_set_elem_flags {
* @NFTA_SET_ELEM_KEY: key value (NLA_NESTED: nft_data)
* @NFTA_SET_ELEM_DATA: data value of mapping (NLA_NESTED: nft_data_attributes)
* @NFTA_SET_ELEM_FLAGS: bitmask of nft_set_elem_flags (NLA_U32)
+ * @NFTA_SET_ELEM_TIMEOUT: timeout value (NLA_U64)
+ * @NFTA_SET_ELEM_EXPIRATION: expiration time (NLA_U64)
+ * @NFTA_SET_ELEM_USERDATA: user data (NLA_BINARY)
+ * @NFTA_SET_ELEM_EXPR: expression (NLA_NESTED: nft_expr_attributes)
*/
enum nft_set_elem_attributes {
NFTA_SET_ELEM_UNSPEC,
NFTA_SET_ELEM_KEY,
NFTA_SET_ELEM_DATA,
NFTA_SET_ELEM_FLAGS,
+ NFTA_SET_ELEM_TIMEOUT,
+ NFTA_SET_ELEM_EXPIRATION,
+ NFTA_SET_ELEM_USERDATA,
+ NFTA_SET_ELEM_EXPR,
__NFTA_SET_ELEM_MAX
};
#define NFTA_SET_ELEM_MAX (__NFTA_SET_ELEM_MAX - 1)
@@ -346,6 +394,9 @@ enum nft_data_attributes {
};
#define NFTA_DATA_MAX (__NFTA_DATA_MAX - 1)
+/* Maximum length of a value */
+#define NFT_DATA_VALUE_MAXLEN 64
+
/**
* enum nft_verdict_attributes - nf_tables verdict netlink attributes
*
@@ -504,6 +555,35 @@ enum nft_lookup_attributes {
};
#define NFTA_LOOKUP_MAX (__NFTA_LOOKUP_MAX - 1)
+enum nft_dynset_ops {
+ NFT_DYNSET_OP_ADD,
+ NFT_DYNSET_OP_UPDATE,
+};
+
+/**
+ * enum nft_dynset_attributes - dynset expression attributes
+ *
+ * @NFTA_DYNSET_SET_NAME: name of set the to add data to (NLA_STRING)
+ * @NFTA_DYNSET_SET_ID: uniquely identifier of the set in the transaction (NLA_U32)
+ * @NFTA_DYNSET_OP: operation (NLA_U32)
+ * @NFTA_DYNSET_SREG_KEY: source register of the key (NLA_U32)
+ * @NFTA_DYNSET_SREG_DATA: source register of the data (NLA_U32)
+ * @NFTA_DYNSET_TIMEOUT: timeout value for the new element (NLA_U64)
+ * @NFTA_DYNSET_EXPR: expression (NLA_NESTED: nft_expr_attributes)
+ */
+enum nft_dynset_attributes {
+ NFTA_DYNSET_UNSPEC,
+ NFTA_DYNSET_SET_NAME,
+ NFTA_DYNSET_SET_ID,
+ NFTA_DYNSET_OP,
+ NFTA_DYNSET_SREG_KEY,
+ NFTA_DYNSET_SREG_DATA,
+ NFTA_DYNSET_TIMEOUT,
+ NFTA_DYNSET_EXPR,
+ __NFTA_DYNSET_MAX,
+};
+#define NFTA_DYNSET_MAX (__NFTA_DYNSET_MAX - 1)
+
/**
* enum nft_payload_bases - nf_tables payload expression offset bases
*
diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h
index 8dd819e..b67a853 100644
--- a/include/uapi/linux/netfilter/nfnetlink_queue.h
+++ b/include/uapi/linux/netfilter/nfnetlink_queue.h
@@ -49,6 +49,7 @@ enum nfqnl_attr_type {
NFQA_EXP, /* nf_conntrack_netlink.h */
NFQA_UID, /* __u32 sk uid */
NFQA_GID, /* __u32 sk gid */
+ NFQA_SECCTX, /* security context string */
__NFQA_MAX
};
@@ -102,7 +103,8 @@ enum nfqnl_attr_config {
#define NFQA_CFG_F_CONNTRACK (1 << 1)
#define NFQA_CFG_F_GSO (1 << 2)
#define NFQA_CFG_F_UID_GID (1 << 3)
-#define NFQA_CFG_F_MAX (1 << 4)
+#define NFQA_CFG_F_SECCTX (1 << 4)
+#define NFQA_CFG_F_MAX (1 << 5)
/* flags for NFQA_SKB_INFO */
/* packet appears to have wrong checksums, but they are ok */
diff --git a/include/uapi/linux/netfilter/xt_socket.h b/include/uapi/linux/netfilter/xt_socket.h
index 6315e2a..87644f8 100644
--- a/include/uapi/linux/netfilter/xt_socket.h
+++ b/include/uapi/linux/netfilter/xt_socket.h
@@ -6,6 +6,7 @@
enum {
XT_SOCKET_TRANSPARENT = 1 << 0,
XT_SOCKET_NOWILDCARD = 1 << 1,
+ XT_SOCKET_RESTORESKMARK = 1 << 2,
};
struct xt_socket_mtinfo1 {
@@ -18,4 +19,11 @@ struct xt_socket_mtinfo2 {
};
#define XT_SOCKET_FLAGS_V2 (XT_SOCKET_TRANSPARENT | XT_SOCKET_NOWILDCARD)
+struct xt_socket_mtinfo3 {
+ __u8 flags;
+};
+#define XT_SOCKET_FLAGS_V3 (XT_SOCKET_TRANSPARENT \
+ | XT_SOCKET_NOWILDCARD \
+ | XT_SOCKET_RESTORESKMARK)
+
#endif /* _XT_SOCKET_H */
diff --git a/include/uapi/linux/netfilter_bridge/ebtables.h b/include/uapi/linux/netfilter_bridge/ebtables.h
index ba99336..fd2ee50 100644
--- a/include/uapi/linux/netfilter_bridge/ebtables.h
+++ b/include/uapi/linux/netfilter_bridge/ebtables.h
@@ -6,15 +6,13 @@
*
* ebtables.c,v 2.0, April, 2002
*
- * This code is stongly inspired on the iptables code which is
+ * This code is strongly inspired by the iptables code which is
* Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
*/
#ifndef _UAPI__LINUX_BRIDGE_EFF_H
#define _UAPI__LINUX_BRIDGE_EFF_H
-#include <linux/if.h>
#include <linux/netfilter_bridge.h>
-#include <linux/if_ether.h>
#define EBT_TABLE_MAXNAMELEN 32
#define EBT_CHAIN_MAXNAMELEN EBT_TABLE_MAXNAMELEN
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 1a85940..cf6a65c 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -101,13 +101,15 @@ struct nlmsgerr {
struct nlmsghdr msg;
};
-#define NETLINK_ADD_MEMBERSHIP 1
-#define NETLINK_DROP_MEMBERSHIP 2
-#define NETLINK_PKTINFO 3
-#define NETLINK_BROADCAST_ERROR 4
-#define NETLINK_NO_ENOBUFS 5
-#define NETLINK_RX_RING 6
-#define NETLINK_TX_RING 7
+#define NETLINK_ADD_MEMBERSHIP 1
+#define NETLINK_DROP_MEMBERSHIP 2
+#define NETLINK_PKTINFO 3
+#define NETLINK_BROADCAST_ERROR 4
+#define NETLINK_NO_ENOBUFS 5
+#define NETLINK_RX_RING 6
+#define NETLINK_TX_RING 7
+#define NETLINK_LISTEN_ALL_NSID 8
+#define NETLINK_LIST_MEMBERSHIPS 9
struct nl_pktinfo {
__u32 group;
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
index 8119255..dd3f753 100644
--- a/include/uapi/linux/nfc.h
+++ b/include/uapi/linux/nfc.h
@@ -86,6 +86,8 @@
* for this event is the application ID (AID).
* @NFC_CMD_GET_SE: Dump all discovered secure elements from an NFC controller.
* @NFC_CMD_SE_IO: Send/Receive APDUs to/from the selected secure element.
+ * @NFC_CMD_VENDOR: Vendor specific command, to be implemented directly
+ * from the driver in order to support hardware specific operations.
*/
enum nfc_commands {
NFC_CMD_UNSPEC,
@@ -117,6 +119,7 @@ enum nfc_commands {
NFC_CMD_GET_SE,
NFC_CMD_SE_IO,
NFC_CMD_ACTIVATE_TARGET,
+ NFC_CMD_VENDOR,
/* private: internal use only */
__NFC_CMD_AFTER_LAST
};
@@ -153,6 +156,10 @@ enum nfc_commands {
* @NFC_ATTR_APDU: Secure element APDU
* @NFC_ATTR_TARGET_ISO15693_DSFID: ISO 15693 Data Storage Format Identifier
* @NFC_ATTR_TARGET_ISO15693_UID: ISO 15693 Unique Identifier
+ * @NFC_ATTR_VENDOR_ID: NFC manufacturer unique ID, typically an OUI
+ * @NFC_ATTR_VENDOR_SUBCMD: Vendor specific sub command
+ * @NFC_ATTR_VENDOR_DATA: Vendor specific data, to be optionally passed
+ * to a vendor specific command implementation
*/
enum nfc_attrs {
NFC_ATTR_UNSPEC,
@@ -183,6 +190,10 @@ enum nfc_attrs {
NFC_ATTR_SE_APDU,
NFC_ATTR_TARGET_ISO15693_DSFID,
NFC_ATTR_TARGET_ISO15693_UID,
+ NFC_ATTR_SE_PARAMS,
+ NFC_ATTR_VENDOR_ID,
+ NFC_ATTR_VENDOR_SUBCMD,
+ NFC_ATTR_VENDOR_DATA,
/* private: internal use only */
__NFC_ATTR_AFTER_LAST
};
diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h
index 35f5f4c..2119c7c 100644
--- a/include/uapi/linux/nfs4.h
+++ b/include/uapi/linux/nfs4.h
@@ -86,6 +86,10 @@
#define ACL4_SUPPORT_AUDIT_ACL 0x04
#define ACL4_SUPPORT_ALARM_ACL 0x08
+#define NFS4_ACL_AUTO_INHERIT 0x00000001
+#define NFS4_ACL_PROTECTED 0x00000002
+#define NFS4_ACL_DEFAULTED 0x00000004
+
#define NFS4_ACE_FILE_INHERIT_ACE 0x00000001
#define NFS4_ACE_DIRECTORY_INHERIT_ACE 0x00000002
#define NFS4_ACE_NO_PROPAGATE_INHERIT_ACE 0x00000004
@@ -93,6 +97,7 @@
#define NFS4_ACE_SUCCESSFUL_ACCESS_ACE_FLAG 0x00000010
#define NFS4_ACE_FAILED_ACCESS_ACE_FLAG 0x00000020
#define NFS4_ACE_IDENTIFIER_GROUP 0x00000040
+#define NFS4_ACE_INHERITED_ACE 0x00000080
#define NFS4_ACE_READ_DATA 0x00000001
#define NFS4_ACE_LIST_DIRECTORY 0x00000001
@@ -106,6 +111,8 @@
#define NFS4_ACE_DELETE_CHILD 0x00000040
#define NFS4_ACE_READ_ATTRIBUTES 0x00000080
#define NFS4_ACE_WRITE_ATTRIBUTES 0x00000100
+#define NFS4_ACE_WRITE_RETENTION 0x00000200
+#define NFS4_ACE_WRITE_RETENTION_HOLD 0x00000400
#define NFS4_ACE_DELETE 0x00010000
#define NFS4_ACE_READ_ACL 0x00020000
#define NFS4_ACE_WRITE_ACL 0x00040000
@@ -162,13 +169,6 @@
*/
#define NFS4_MAX_BACK_CHANNEL_OPS 2
-enum nfs4_acl_whotype {
- NFS4_ACL_WHO_NAMED = 0,
- NFS4_ACL_WHO_OWNER,
- NFS4_ACL_WHO_GROUP,
- NFS4_ACL_WHO_EVERYONE,
-};
-
#endif /* _UAPI_LINUX_NFS4_H */
/*
diff --git a/include/uapi/linux/nfs_idmap.h b/include/uapi/linux/nfs_idmap.h
index 8d4b1c7..038e36c 100644
--- a/include/uapi/linux/nfs_idmap.h
+++ b/include/uapi/linux/nfs_idmap.h
@@ -1,5 +1,5 @@
/*
- * include/linux/nfs_idmap.h
+ * include/uapi/linux/nfs_idmap.h
*
* UID and GID to name mapping for clients.
*
diff --git a/include/uapi/linux/nfsd/debug.h b/include/uapi/linux/nfsd/debug.h
index 1fdc95b..28ec6c9 100644
--- a/include/uapi/linux/nfsd/debug.h
+++ b/include/uapi/linux/nfsd/debug.h
@@ -12,14 +12,6 @@
#include <linux/sunrpc/debug.h>
/*
- * Enable debugging for nfsd.
- * Requires RPC_DEBUG.
- */
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-# define NFSD_DEBUG 1
-#endif
-
-/*
* knfsd debug flags
*/
#define NFSDDBG_SOCK 0x0001
@@ -32,6 +24,7 @@
#define NFSDDBG_REPCACHE 0x0080
#define NFSDDBG_XDR 0x0100
#define NFSDDBG_LOCKD 0x0200
+#define NFSDDBG_PNFS 0x0400
#define NFSDDBG_ALL 0x7FFF
#define NFSDDBG_NOCHANGE 0xFFFF
diff --git a/include/uapi/linux/nfsd/export.h b/include/uapi/linux/nfsd/export.h
index 584b6ef..0df7bd5 100644
--- a/include/uapi/linux/nfsd/export.h
+++ b/include/uapi/linux/nfsd/export.h
@@ -21,6 +21,9 @@
/*
* Export flags.
+ *
+ * Please update the expflags[] array in fs/nfsd/export.c when adding
+ * a new flag.
*/
#define NFSEXP_READONLY 0x0001
#define NFSEXP_INSECURE_PORT 0x0002
@@ -47,8 +50,10 @@
* exported filesystem.
*/
#define NFSEXP_V4ROOT 0x10000
+#define NFSEXP_PNFS 0x20000
+
/* All flags that we claim to support. (Note we don't support NOACL.) */
-#define NFSEXP_ALLFLAGS 0x1FE7F
+#define NFSEXP_ALLFLAGS 0x3FE7F
/* The flags that may vary depending on security flavor: */
#define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index b37bd5a..c0ab6b0 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -25,10 +25,30 @@
*
*/
+/*
+ * This header file defines the userspace API to the wireless stack. Please
+ * be careful not to break things - i.e. don't move anything around or so
+ * unless you can demonstrate that it breaks neither API nor ABI.
+ *
+ * Additions to the API should be accompanied by actual implementations in
+ * an upstream driver, so that example implementations exist in case there
+ * are ever concerns about the precise semantics of the API or changes are
+ * needed, and to ensure that code for dead (no longer implemented) API
+ * can actually be identified and removed.
+ * Nonetheless, semantics should also be documented carefully in this file.
+ */
+
#include <linux/types.h>
#define NL80211_GENL_NAME "nl80211"
+#define NL80211_MULTICAST_GROUP_CONFIG "config"
+#define NL80211_MULTICAST_GROUP_SCAN "scan"
+#define NL80211_MULTICAST_GROUP_REG "regulatory"
+#define NL80211_MULTICAST_GROUP_MLME "mlme"
+#define NL80211_MULTICAST_GROUP_VENDOR "vendor"
+#define NL80211_MULTICAST_GROUP_TESTMODE "testmode"
+
/**
* DOC: Station handling
*
@@ -173,8 +193,8 @@
* %NL80211_ATTR_WIPHY and %NL80211_ATTR_WIPHY_NAME.
*
* @NL80211_CMD_GET_INTERFACE: Request an interface's configuration;
- * either a dump request on a %NL80211_ATTR_WIPHY or a specific get
- * on an %NL80211_ATTR_IFINDEX is supported.
+ * either a dump request for all interfaces or a specific get with a
+ * single %NL80211_ATTR_IFINDEX is supported.
* @NL80211_CMD_SET_INTERFACE: Set type of a virtual interface, requires
* %NL80211_ATTR_IFINDEX and %NL80211_ATTR_IFTYPE.
* @NL80211_CMD_NEW_INTERFACE: Newly created virtual interface or response
@@ -252,7 +272,18 @@
* %NL80211_ATTR_IFINDEX.
*
* @NL80211_CMD_GET_REG: ask the wireless core to send us its currently set
- * regulatory domain.
+ * regulatory domain. If %NL80211_ATTR_WIPHY is specified and the device
+ * has a private regulatory domain, it will be returned. Otherwise, the
+ * global regdomain will be returned.
+ * A device will have a private regulatory domain if it uses the
+ * regulatory_hint() API. Even when a private regdomain is used the channel
+ * information will still be mended according to further hints from
+ * the regulatory core to help with compliance. A dump version of this API
+ * is now available which will returns the global regdomain as well as
+ * all private regdomains of present wiphys (for those that have it).
+ * If a wiphy is self-managed (%NL80211_ATTR_WIPHY_SELF_MANAGED_REG), then
+ * its private regdomain is the only valid one for it. The regulatory
+ * core is not used to help with compliance in this case.
* @NL80211_CMD_SET_REG: Set current regulatory domain. CRDA sends this command
* after being queried by the kernel. CRDA replies by sending a regulatory
* domain structure which consists of %NL80211_ATTR_REG_ALPHA set to our
@@ -306,7 +337,9 @@
* if passed, define which channels should be scanned; if not
* passed, all channels allowed for the current regulatory domain
* are used. Extra IEs can also be passed from the userspace by
- * using the %NL80211_ATTR_IE attribute.
+ * using the %NL80211_ATTR_IE attribute. The first cycle of the
+ * scheduled scan can be delayed by %NL80211_ATTR_SCHED_SCAN_DELAY
+ * is supplied.
* @NL80211_CMD_STOP_SCHED_SCAN: stop a scheduled scan. Returns -ENOENT if
* scheduled scan is not running. The caller may assume that as soon
* as the call returns, it is safe to start a new scheduled scan again.
@@ -774,6 +807,10 @@
* peer given by %NL80211_ATTR_MAC. Both peers must be on the base channel
* when this command completes.
*
+ * @NL80211_CMD_WIPHY_REG_CHANGE: Similar to %NL80211_CMD_REG_CHANGE, but used
+ * as an event to indicate changes for devices with wiphy-specific regdom
+ * management.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -958,6 +995,8 @@ enum nl80211_commands {
NL80211_CMD_TDLS_CHANNEL_SWITCH,
NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH,
+ NL80211_CMD_WIPHY_REG_CHANGE,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -1655,6 +1694,13 @@ enum nl80211_commands {
* @NL80211_ATTR_SOCKET_OWNER: Flag attribute, if set during interface
* creation then the new interface will be owned by the netlink socket
* that created it and will be destroyed when the socket is closed.
+ * If set during scheduled scan start then the new scan req will be
+ * owned by the netlink socket that created it and the scheduled scan will
+ * be stopped when the socket is closed.
+ * If set during configuration of regulatory indoor operation then the
+ * regulatory indoor configuration would be owned by the netlink socket
+ * that configured the indoor setting, and the indoor operation would be
+ * cleared when the socket is closed.
*
* @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is
* the TDLS link initiator.
@@ -1688,6 +1734,33 @@ enum nl80211_commands {
*
* @NL80211_ATTR_MAC_MASK: MAC address mask
*
+ * @NL80211_ATTR_WIPHY_SELF_MANAGED_REG: flag attribute indicating this device
+ * is self-managing its regulatory information and any regulatory domain
+ * obtained from it is coming from the device's wiphy and not the global
+ * cfg80211 regdomain.
+ *
+ * @NL80211_ATTR_EXT_FEATURES: extended feature flags contained in a byte
+ * array. The feature flags are identified by their bit index (see &enum
+ * nl80211_ext_feature_index). The bit index is ordered starting at the
+ * least-significant bit of the first byte in the array, ie. bit index 0
+ * is located at bit 0 of byte 0. bit index 25 would be located at bit 1
+ * of byte 3 (u8 array).
+ *
+ * @NL80211_ATTR_SURVEY_RADIO_STATS: Request overall radio statistics to be
+ * returned along with other survey data. If set, @NL80211_CMD_GET_SURVEY
+ * may return a survey entry without a channel indicating global radio
+ * statistics (only some values are valid and make sense.)
+ * For devices that don't return such an entry even then, the information
+ * should be contained in the result as the sum of the respective counters
+ * over all channels.
+ *
+ * @NL80211_ATTR_SCHED_SCAN_DELAY: delay before the first cycle of a
+ * scheduled scan (or a WoWLAN net-detect scan) is started, u32
+ * in seconds.
+
+ * @NL80211_ATTR_REG_INDOOR: flag attribute, if set indicates that the device
+ * is operating in an indoor environment.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2045,6 +2118,18 @@ enum nl80211_attrs {
NL80211_ATTR_MAC_MASK,
+ NL80211_ATTR_WIPHY_SELF_MANAGED_REG,
+
+ NL80211_ATTR_EXT_FEATURES,
+
+ NL80211_ATTR_SURVEY_RADIO_STATS,
+
+ NL80211_ATTR_NETNS_FD,
+
+ NL80211_ATTR_SCHED_SCAN_DELAY,
+
+ NL80211_ATTR_REG_INDOOR,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -2085,7 +2170,7 @@ enum nl80211_attrs {
#define NL80211_MAX_SUPP_RATES 32
#define NL80211_MAX_SUPP_HT_RATES 77
-#define NL80211_MAX_SUPP_REG_RULES 32
+#define NL80211_MAX_SUPP_REG_RULES 64
#define NL80211_TKIP_DATA_OFFSET_ENCR_KEY 0
#define NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY 16
#define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24
@@ -2225,8 +2310,15 @@ struct nl80211_sta_flag_update {
* @NL80211_RATE_INFO_VHT_MCS: MCS index for VHT (u8)
* @NL80211_RATE_INFO_VHT_NSS: number of streams in VHT (u8)
* @NL80211_RATE_INFO_80_MHZ_WIDTH: 80 MHz VHT rate
- * @NL80211_RATE_INFO_80P80_MHZ_WIDTH: 80+80 MHz VHT rate
+ * @NL80211_RATE_INFO_80P80_MHZ_WIDTH: unused - 80+80 is treated the
+ * same as 160 for purposes of the bitrates
* @NL80211_RATE_INFO_160_MHZ_WIDTH: 160 MHz VHT rate
+ * @NL80211_RATE_INFO_10_MHZ_WIDTH: 10 MHz width - note that this is
+ * a legacy rate and will be reported as the actual bitrate, i.e.
+ * half the base (20 MHz) rate
+ * @NL80211_RATE_INFO_5_MHZ_WIDTH: 5 MHz width - note that this is
+ * a legacy rate and will be reported as the actual bitrate, i.e.
+ * a quarter of the base (20 MHz) rate
* @__NL80211_RATE_INFO_AFTER_LAST: internal use
*/
enum nl80211_rate_info {
@@ -2241,6 +2333,8 @@ enum nl80211_rate_info {
NL80211_RATE_INFO_80_MHZ_WIDTH,
NL80211_RATE_INFO_80P80_MHZ_WIDTH,
NL80211_RATE_INFO_160_MHZ_WIDTH,
+ NL80211_RATE_INFO_10_MHZ_WIDTH,
+ NL80211_RATE_INFO_5_MHZ_WIDTH,
/* keep last */
__NL80211_RATE_INFO_AFTER_LAST,
@@ -2285,18 +2379,24 @@ enum nl80211_sta_bss_param {
*
* @__NL80211_STA_INFO_INVALID: attribute number 0 is reserved
* @NL80211_STA_INFO_INACTIVE_TIME: time since last activity (u32, msecs)
- * @NL80211_STA_INFO_RX_BYTES: total received bytes (u32, from this station)
- * @NL80211_STA_INFO_TX_BYTES: total transmitted bytes (u32, to this station)
- * @NL80211_STA_INFO_RX_BYTES64: total received bytes (u64, from this station)
- * @NL80211_STA_INFO_TX_BYTES64: total transmitted bytes (u64, to this station)
+ * @NL80211_STA_INFO_RX_BYTES: total received bytes (MPDU length)
+ * (u32, from this station)
+ * @NL80211_STA_INFO_TX_BYTES: total transmitted bytes (MPDU length)
+ * (u32, to this station)
+ * @NL80211_STA_INFO_RX_BYTES64: total received bytes (MPDU length)
+ * (u64, from this station)
+ * @NL80211_STA_INFO_TX_BYTES64: total transmitted bytes (MPDU length)
+ * (u64, to this station)
* @NL80211_STA_INFO_SIGNAL: signal strength of last received PPDU (u8, dBm)
* @NL80211_STA_INFO_TX_BITRATE: current unicast tx rate, nested attribute
* containing info as possible, see &enum nl80211_rate_info
- * @NL80211_STA_INFO_RX_PACKETS: total received packet (u32, from this station)
- * @NL80211_STA_INFO_TX_PACKETS: total transmitted packets (u32, to this
- * station)
- * @NL80211_STA_INFO_TX_RETRIES: total retries (u32, to this station)
- * @NL80211_STA_INFO_TX_FAILED: total failed packets (u32, to this station)
+ * @NL80211_STA_INFO_RX_PACKETS: total received packet (MSDUs and MMPDUs)
+ * (u32, from this station)
+ * @NL80211_STA_INFO_TX_PACKETS: total transmitted packets (MSDUs and MMPDUs)
+ * (u32, to this station)
+ * @NL80211_STA_INFO_TX_RETRIES: total retries (MPDUs) (u32, to this station)
+ * @NL80211_STA_INFO_TX_FAILED: total failed packets (MPDUs)
+ * (u32, to this station)
* @NL80211_STA_INFO_SIGNAL_AVG: signal strength average (u8, dBm)
* @NL80211_STA_INFO_LLID: the station's mesh LLID
* @NL80211_STA_INFO_PLID: the station's mesh PLID
@@ -2320,6 +2420,16 @@ enum nl80211_sta_bss_param {
* Same format as NL80211_STA_INFO_CHAIN_SIGNAL.
* @NL80211_STA_EXPECTED_THROUGHPUT: expected throughput considering also the
* 802.11 header (u32, kbps)
+ * @NL80211_STA_INFO_RX_DROP_MISC: RX packets dropped for unspecified reasons
+ * (u64)
+ * @NL80211_STA_INFO_BEACON_RX: number of beacons received from this peer (u64)
+ * @NL80211_STA_INFO_BEACON_SIGNAL_AVG: signal strength average
+ * for beacons only (u8, dBm)
+ * @NL80211_STA_INFO_TID_STATS: per-TID statistics (see &enum nl80211_tid_stats)
+ * This is a nested attribute where each the inner attribute number is the
+ * TID+1 and the special TID 16 (i.e. value 17) is used for non-QoS frames;
+ * each one of those is again nested with &enum nl80211_tid_stats
+ * attributes carrying the actual values.
* @__NL80211_STA_INFO_AFTER_LAST: internal
* @NL80211_STA_INFO_MAX: highest possible station info attribute
*/
@@ -2352,6 +2462,10 @@ enum nl80211_sta_info {
NL80211_STA_INFO_CHAIN_SIGNAL,
NL80211_STA_INFO_CHAIN_SIGNAL_AVG,
NL80211_STA_INFO_EXPECTED_THROUGHPUT,
+ NL80211_STA_INFO_RX_DROP_MISC,
+ NL80211_STA_INFO_BEACON_RX,
+ NL80211_STA_INFO_BEACON_SIGNAL_AVG,
+ NL80211_STA_INFO_TID_STATS,
/* keep last */
__NL80211_STA_INFO_AFTER_LAST,
@@ -2359,6 +2473,31 @@ enum nl80211_sta_info {
};
/**
+ * enum nl80211_tid_stats - per TID statistics attributes
+ * @__NL80211_TID_STATS_INVALID: attribute number 0 is reserved
+ * @NL80211_TID_STATS_RX_MSDU: number of MSDUs received (u64)
+ * @NL80211_TID_STATS_TX_MSDU: number of MSDUs transmitted (or
+ * attempted to transmit; u64)
+ * @NL80211_TID_STATS_TX_MSDU_RETRIES: number of retries for
+ * transmitted MSDUs (not counting the first attempt; u64)
+ * @NL80211_TID_STATS_TX_MSDU_FAILED: number of failed transmitted
+ * MSDUs (u64)
+ * @NUM_NL80211_TID_STATS: number of attributes here
+ * @NL80211_TID_STATS_MAX: highest numbered attribute here
+ */
+enum nl80211_tid_stats {
+ __NL80211_TID_STATS_INVALID,
+ NL80211_TID_STATS_RX_MSDU,
+ NL80211_TID_STATS_TX_MSDU,
+ NL80211_TID_STATS_TX_MSDU_RETRIES,
+ NL80211_TID_STATS_TX_MSDU_FAILED,
+
+ /* keep last */
+ NUM_NL80211_TID_STATS,
+ NL80211_TID_STATS_MAX = NUM_NL80211_TID_STATS - 1
+};
+
+/**
* enum nl80211_mpath_flags - nl80211 mesh path flags
*
* @NL80211_MPATH_FLAG_ACTIVE: the mesh path is active
@@ -2481,16 +2620,17 @@ enum nl80211_band_attr {
* an indoor surroundings, i.e., it is connected to AC power (and not
* through portable DC inverters) or is under the control of a master
* that is acting as an AP and is connected to AC power.
- * @NL80211_FREQUENCY_ATTR_GO_CONCURRENT: GO operation is allowed on this
+ * @NL80211_FREQUENCY_ATTR_IR_CONCURRENT: IR operation is allowed on this
* channel if it's connected concurrently to a BSS on the same channel on
* the 2 GHz band or to a channel in the same UNII band (on the 5 GHz
- * band), and IEEE80211_CHAN_RADAR is not set. Instantiating a GO on a
- * channel that has the GO_CONCURRENT attribute set can be done when there
- * is a clear assessment that the device is operating under the guidance of
- * an authorized master, i.e., setting up a GO while the device is also
- * connected to an AP with DFS and radar detection on the UNII band (it is
- * up to user-space, i.e., wpa_supplicant to perform the required
- * verifications)
+ * band), and IEEE80211_CHAN_RADAR is not set. Instantiating a GO or TDLS
+ * off-channel on a channel that has the IR_CONCURRENT attribute set can be
+ * done when there is a clear assessment that the device is operating under
+ * the guidance of an authorized master, i.e., setting up a GO or TDLS
+ * off-channel while the device is also connected to an AP with DFS and
+ * radar detection on the UNII band (it is up to user-space, i.e.,
+ * wpa_supplicant to perform the required verifications). Using this
+ * attribute for IR is disallowed for master interfaces (IBSS, AP).
* @NL80211_FREQUENCY_ATTR_NO_20MHZ: 20 MHz operation is not allowed
* on this channel in current regulatory domain.
* @NL80211_FREQUENCY_ATTR_NO_10MHZ: 10 MHz operation is not allowed
@@ -2502,7 +2642,7 @@ enum nl80211_band_attr {
* See https://apps.fcc.gov/eas/comments/GetPublishedDocument.html?id=327&tn=528122
* for more information on the FCC description of the relaxations allowed
* by NL80211_FREQUENCY_ATTR_INDOOR_ONLY and
- * NL80211_FREQUENCY_ATTR_GO_CONCURRENT.
+ * NL80211_FREQUENCY_ATTR_IR_CONCURRENT.
*/
enum nl80211_frequency_attr {
__NL80211_FREQUENCY_ATTR_INVALID,
@@ -2520,7 +2660,7 @@ enum nl80211_frequency_attr {
NL80211_FREQUENCY_ATTR_NO_160MHZ,
NL80211_FREQUENCY_ATTR_DFS_CAC_TIME,
NL80211_FREQUENCY_ATTR_INDOOR_ONLY,
- NL80211_FREQUENCY_ATTR_GO_CONCURRENT,
+ NL80211_FREQUENCY_ATTR_IR_CONCURRENT,
NL80211_FREQUENCY_ATTR_NO_20MHZ,
NL80211_FREQUENCY_ATTR_NO_10MHZ,
@@ -2533,6 +2673,8 @@ enum nl80211_frequency_attr {
#define NL80211_FREQUENCY_ATTR_PASSIVE_SCAN NL80211_FREQUENCY_ATTR_NO_IR
#define NL80211_FREQUENCY_ATTR_NO_IBSS NL80211_FREQUENCY_ATTR_NO_IR
#define NL80211_FREQUENCY_ATTR_NO_IR NL80211_FREQUENCY_ATTR_NO_IR
+#define NL80211_FREQUENCY_ATTR_GO_CONCURRENT \
+ NL80211_FREQUENCY_ATTR_IR_CONCURRENT
/**
* enum nl80211_bitrate_attr - bitrate attributes
@@ -2691,7 +2833,7 @@ enum nl80211_sched_scan_match_attr {
* @NL80211_RRF_AUTO_BW: maximum available bandwidth should be calculated
* base on contiguous rules and wider channels will be allowed to cross
* multiple contiguous/overlapping frequency ranges.
- * @NL80211_RRF_GO_CONCURRENT: See &NL80211_FREQUENCY_ATTR_GO_CONCURRENT
+ * @NL80211_RRF_IR_CONCURRENT: See &NL80211_FREQUENCY_ATTR_IR_CONCURRENT
* @NL80211_RRF_NO_HT40MINUS: channels can't be used in HT40- operation
* @NL80211_RRF_NO_HT40PLUS: channels can't be used in HT40+ operation
* @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed
@@ -2708,7 +2850,7 @@ enum nl80211_reg_rule_flags {
NL80211_RRF_NO_IR = 1<<7,
__NL80211_RRF_NO_IBSS = 1<<8,
NL80211_RRF_AUTO_BW = 1<<11,
- NL80211_RRF_GO_CONCURRENT = 1<<12,
+ NL80211_RRF_IR_CONCURRENT = 1<<12,
NL80211_RRF_NO_HT40MINUS = 1<<13,
NL80211_RRF_NO_HT40PLUS = 1<<14,
NL80211_RRF_NO_80MHZ = 1<<15,
@@ -2720,6 +2862,7 @@ enum nl80211_reg_rule_flags {
#define NL80211_RRF_NO_IR NL80211_RRF_NO_IR
#define NL80211_RRF_NO_HT40 (NL80211_RRF_NO_HT40MINUS |\
NL80211_RRF_NO_HT40PLUS)
+#define NL80211_RRF_GO_CONCURRENT NL80211_RRF_IR_CONCURRENT
/* For backport compatibility with older userspace */
#define NL80211_RRF_NO_IR_ALL (NL80211_RRF_NO_IR | __NL80211_RRF_NO_IBSS)
@@ -2772,16 +2915,18 @@ enum nl80211_user_reg_hint_type {
* @NL80211_SURVEY_INFO_FREQUENCY: center frequency of channel
* @NL80211_SURVEY_INFO_NOISE: noise level of channel (u8, dBm)
* @NL80211_SURVEY_INFO_IN_USE: channel is currently being used
- * @NL80211_SURVEY_INFO_CHANNEL_TIME: amount of time (in ms) that the radio
- * spent on this channel
- * @NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY: amount of the time the primary
+ * @NL80211_SURVEY_INFO_TIME: amount of time (in ms) that the radio
+ * was turned on (on channel or globally)
+ * @NL80211_SURVEY_INFO_TIME_BUSY: amount of the time the primary
* channel was sensed busy (either due to activity or energy detect)
- * @NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY: amount of time the extension
+ * @NL80211_SURVEY_INFO_TIME_EXT_BUSY: amount of time the extension
* channel was sensed busy
- * @NL80211_SURVEY_INFO_CHANNEL_TIME_RX: amount of time the radio spent
- * receiving data
- * @NL80211_SURVEY_INFO_CHANNEL_TIME_TX: amount of time the radio spent
- * transmitting data
+ * @NL80211_SURVEY_INFO_TIME_RX: amount of time the radio spent
+ * receiving data (on channel or globally)
+ * @NL80211_SURVEY_INFO_TIME_TX: amount of time the radio spent
+ * transmitting data (on channel or globally)
+ * @NL80211_SURVEY_INFO_TIME_SCAN: time the radio spent for scan
+ * (on this channel or globally)
* @NL80211_SURVEY_INFO_MAX: highest survey info attribute number
* currently defined
* @__NL80211_SURVEY_INFO_AFTER_LAST: internal use
@@ -2791,17 +2936,25 @@ enum nl80211_survey_info {
NL80211_SURVEY_INFO_FREQUENCY,
NL80211_SURVEY_INFO_NOISE,
NL80211_SURVEY_INFO_IN_USE,
- NL80211_SURVEY_INFO_CHANNEL_TIME,
- NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY,
- NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY,
- NL80211_SURVEY_INFO_CHANNEL_TIME_RX,
- NL80211_SURVEY_INFO_CHANNEL_TIME_TX,
+ NL80211_SURVEY_INFO_TIME,
+ NL80211_SURVEY_INFO_TIME_BUSY,
+ NL80211_SURVEY_INFO_TIME_EXT_BUSY,
+ NL80211_SURVEY_INFO_TIME_RX,
+ NL80211_SURVEY_INFO_TIME_TX,
+ NL80211_SURVEY_INFO_TIME_SCAN,
/* keep last */
__NL80211_SURVEY_INFO_AFTER_LAST,
NL80211_SURVEY_INFO_MAX = __NL80211_SURVEY_INFO_AFTER_LAST - 1
};
+/* keep old names for compatibility */
+#define NL80211_SURVEY_INFO_CHANNEL_TIME NL80211_SURVEY_INFO_TIME
+#define NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY NL80211_SURVEY_INFO_TIME_BUSY
+#define NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY NL80211_SURVEY_INFO_TIME_EXT_BUSY
+#define NL80211_SURVEY_INFO_CHANNEL_TIME_RX NL80211_SURVEY_INFO_TIME_RX
+#define NL80211_SURVEY_INFO_CHANNEL_TIME_TX NL80211_SURVEY_INFO_TIME_TX
+
/**
* enum nl80211_mntr_flags - monitor configuration flags
*
@@ -2966,7 +3119,8 @@ enum nl80211_mesh_power_mode {
*
* @NL80211_MESHCONF_PLINK_TIMEOUT: If no tx activity is seen from a STA we've
* established peering with for longer than this time (in seconds), then
- * remove it from the STA's list of peers. Default is 30 minutes.
+ * remove it from the STA's list of peers. You may set this to 0 to disable
+ * the removal of the STA. Default is 30 minutes.
*
* @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
*/
@@ -3238,6 +3392,9 @@ enum nl80211_bss {
/**
* enum nl80211_bss_status - BSS "status"
* @NL80211_BSS_STATUS_AUTHENTICATED: Authenticated with this BSS.
+ * Note that this is no longer used since cfg80211 no longer
+ * keeps track of whether or not authentication was done with
+ * a given BSS.
* @NL80211_BSS_STATUS_ASSOCIATED: Associated with this BSS.
* @NL80211_BSS_STATUS_IBSS_JOINED: Joined to this IBSS.
*
@@ -3565,6 +3722,8 @@ struct nl80211_pattern_support {
* @NL80211_WOWLAN_TRIG_ANY: wake up on any activity, do not really put
* the chip into a special state -- works best with chips that have
* support for low-power operation already (flag)
+ * Note that this mode is incompatible with all of the others, if
+ * any others are even supported by the device.
* @NL80211_WOWLAN_TRIG_DISCONNECT: wake up on disconnect, the way disconnect
* is detected is implementation-specific (flag)
* @NL80211_WOWLAN_TRIG_MAGIC_PKT: wake up on magic packet (6x 0xff, followed
@@ -3621,9 +3780,12 @@ struct nl80211_pattern_support {
* @NL80211_WOWLAN_TRIG_NET_DETECT: wake up when a configured network
* is detected. This is a nested attribute that contains the
* same attributes used with @NL80211_CMD_START_SCHED_SCAN. It
- * specifies how the scan is performed (e.g. the interval and the
- * channels to scan) as well as the scan results that will
- * trigger a wake (i.e. the matchsets).
+ * specifies how the scan is performed (e.g. the interval, the
+ * channels to scan and the initial delay) as well as the scan
+ * results that will trigger a wake (i.e. the matchsets). This
+ * attribute is also sent in a response to
+ * @NL80211_CMD_GET_WIPHY, indicating the number of match sets
+ * supported by the driver (u32).
* @NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS: nested attribute
* containing an array with information about what triggered the
* wake up. If no elements are present in the array, it means
@@ -4194,6 +4356,21 @@ enum nl80211_feature_flags {
};
/**
+ * enum nl80211_ext_feature_index - bit index of extended features.
+ * @NL80211_EXT_FEATURE_VHT_IBSS: This driver supports IBSS with VHT datarates.
+ *
+ * @NUM_NL80211_EXT_FEATURES: number of extended features.
+ * @MAX_NL80211_EXT_FEATURES: highest extended feature index.
+ */
+enum nl80211_ext_feature_index {
+ NL80211_EXT_FEATURE_VHT_IBSS,
+
+ /* add new features before the definition below */
+ NUM_NL80211_EXT_FEATURES,
+ MAX_NL80211_EXT_FEATURES = NUM_NL80211_EXT_FEATURES - 1
+};
+
+/**
* enum nl80211_probe_resp_offload_support_attr - optional supported
* protocols for probe-response offloading by the driver/FW.
* To be used with the %NL80211_ATTR_PROBE_RESP_OFFLOAD attribute.
diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h
index 26386cf..732b32e 100644
--- a/include/uapi/linux/nvme.h
+++ b/include/uapi/linux/nvme.h
@@ -115,7 +115,13 @@ struct nvme_id_ns {
__le16 nawun;
__le16 nawupf;
__le16 nacwu;
- __u8 rsvd40[80];
+ __le16 nabsn;
+ __le16 nabo;
+ __le16 nabspf;
+ __u16 rsvd46;
+ __le64 nvmcap[2];
+ __u8 rsvd64[40];
+ __u8 nguid[16];
__u8 eui64[8];
struct nvme_lbaf lbaf[16];
__u8 rsvd192[192];
@@ -124,10 +130,22 @@ struct nvme_id_ns {
enum {
NVME_NS_FEAT_THIN = 1 << 0,
+ NVME_NS_FLBAS_LBA_MASK = 0xf,
+ NVME_NS_FLBAS_META_EXT = 0x10,
NVME_LBAF_RP_BEST = 0,
NVME_LBAF_RP_BETTER = 1,
NVME_LBAF_RP_GOOD = 2,
NVME_LBAF_RP_DEGRADED = 3,
+ NVME_NS_DPC_PI_LAST = 1 << 4,
+ NVME_NS_DPC_PI_FIRST = 1 << 3,
+ NVME_NS_DPC_PI_TYPE3 = 1 << 2,
+ NVME_NS_DPC_PI_TYPE2 = 1 << 1,
+ NVME_NS_DPC_PI_TYPE1 = 1 << 0,
+ NVME_NS_DPS_PI_FIRST = 1 << 3,
+ NVME_NS_DPS_PI_MASK = 0x7,
+ NVME_NS_DPS_PI_TYPE1 = 1,
+ NVME_NS_DPS_PI_TYPE2 = 2,
+ NVME_NS_DPS_PI_TYPE3 = 3,
};
struct nvme_smart_log {
@@ -161,6 +179,10 @@ enum {
NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
};
+enum {
+ NVME_AER_NOTICE_NS_CHANGED = 0x0002,
+};
+
struct nvme_lba_range_type {
__u8 type;
__u8 attributes;
@@ -261,6 +283,10 @@ enum {
NVME_RW_DSM_LATENCY_LOW = 3 << 4,
NVME_RW_DSM_SEQ_REQ = 1 << 6,
NVME_RW_DSM_COMPRESSED = 1 << 7,
+ NVME_RW_PRINFO_PRCHK_REF = 1 << 10,
+ NVME_RW_PRINFO_PRCHK_APP = 1 << 11,
+ NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
+ NVME_RW_PRINFO_PRACT = 1 << 13,
};
struct nvme_dsm_cmd {
@@ -549,11 +575,14 @@ struct nvme_passthru_cmd {
__u32 result;
};
+#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8))
+
#define nvme_admin_cmd nvme_passthru_cmd
#define NVME_IOCTL_ID _IO('N', 0x40)
#define NVME_IOCTL_ADMIN_CMD _IOWR('N', 0x41, struct nvme_admin_cmd)
#define NVME_IOCTL_SUBMIT_IO _IOW('N', 0x42, struct nvme_user_io)
#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd)
+#define NVME_IOCTL_RESET _IO('N', 0x44)
#endif /* _UAPI_LINUX_NVME_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index f714e86..1dab776 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -153,6 +153,8 @@ enum ovs_packet_cmd {
* flow key against the kernel's.
* @OVS_PACKET_ATTR_ACTIONS: Contains actions for the packet. Used
* for %OVS_PACKET_CMD_EXECUTE. It has nested %OVS_ACTION_ATTR_* attributes.
+ * Also used in upcall when %OVS_ACTION_ATTR_USERSPACE has optional
+ * %OVS_USERSPACE_ATTR_ACTIONS attribute.
* @OVS_PACKET_ATTR_USERDATA: Present for an %OVS_PACKET_CMD_ACTION
* notification if the %OVS_ACTION_ATTR_USERSPACE action specified an
* %OVS_USERSPACE_ATTR_USERDATA attribute, with the same length and content
@@ -252,11 +254,21 @@ enum ovs_vport_attr {
#define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1)
+enum {
+ OVS_VXLAN_EXT_UNSPEC,
+ OVS_VXLAN_EXT_GBP, /* Flag or __u32 */
+ __OVS_VXLAN_EXT_MAX,
+};
+
+#define OVS_VXLAN_EXT_MAX (__OVS_VXLAN_EXT_MAX - 1)
+
+
/* OVS_VPORT_ATTR_OPTIONS attributes for tunnels.
*/
enum {
OVS_TUNNEL_ATTR_UNSPEC,
OVS_TUNNEL_ATTR_DST_PORT, /* 16-bit UDP port, used by L4 tunnels. */
+ OVS_TUNNEL_ATTR_EXTENSION,
__OVS_TUNNEL_ATTR_MAX
};
@@ -328,6 +340,7 @@ enum ovs_tunnel_key_attr {
OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS, /* Array of Geneve options. */
OVS_TUNNEL_KEY_ATTR_TP_SRC, /* be16 src Transport Port. */
OVS_TUNNEL_KEY_ATTR_TP_DST, /* be16 dst Transport Port. */
+ OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS, /* Nested OVS_VXLAN_EXT_* */
__OVS_TUNNEL_KEY_ATTR_MAX
};
@@ -448,6 +461,14 @@ struct ovs_key_nd {
* a wildcarded match. Omitting attribute is treated as wildcarding all
* corresponding fields. Optional for all requests. If not present,
* all flow key bits are exact match bits.
+ * @OVS_FLOW_ATTR_UFID: A value between 1-16 octets specifying a unique
+ * identifier for the flow. Causes the flow to be indexed by this value rather
+ * than the value of the %OVS_FLOW_ATTR_KEY attribute. Optional for all
+ * requests. Present in notifications if the flow was created with this
+ * attribute.
+ * @OVS_FLOW_ATTR_UFID_FLAGS: A 32-bit value of OR'd %OVS_UFID_F_*
+ * flags that provide alternative semantics for flow installation and
+ * retrieval. Optional for all requests.
*
* These attributes follow the &struct ovs_header within the Generic Netlink
* payload for %OVS_FLOW_* commands.
@@ -463,12 +484,24 @@ enum ovs_flow_attr {
OVS_FLOW_ATTR_MASK, /* Sequence of OVS_KEY_ATTR_* attributes. */
OVS_FLOW_ATTR_PROBE, /* Flow operation is a feature probe, error
* logging should be suppressed. */
+ OVS_FLOW_ATTR_UFID, /* Variable length unique flow identifier. */
+ OVS_FLOW_ATTR_UFID_FLAGS,/* u32 of OVS_UFID_F_*. */
__OVS_FLOW_ATTR_MAX
};
#define OVS_FLOW_ATTR_MAX (__OVS_FLOW_ATTR_MAX - 1)
/**
+ * Omit attributes for notifications.
+ *
+ * If a datapath request contains an %OVS_UFID_F_OMIT_* flag, then the datapath
+ * may omit the corresponding %OVS_FLOW_ATTR_* from the response.
+ */
+#define OVS_UFID_F_OMIT_KEY (1 << 0)
+#define OVS_UFID_F_OMIT_MASK (1 << 1)
+#define OVS_UFID_F_OMIT_ACTIONS (1 << 2)
+
+/**
* enum ovs_sample_attr - Attributes for %OVS_ACTION_ATTR_SAMPLE action.
* @OVS_SAMPLE_ATTR_PROBABILITY: 32-bit fraction of packets to sample with
* @OVS_ACTION_ATTR_SAMPLE. A value of 0 samples no packets, a value of
@@ -497,6 +530,7 @@ enum ovs_sample_attr {
* copied to the %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA.
* @OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: If present, u32 output port to get
* tunnel info.
+ * @OVS_USERSPACE_ATTR_ACTIONS: If present, send actions with upcall.
*/
enum ovs_userspace_attr {
OVS_USERSPACE_ATTR_UNSPEC,
@@ -504,6 +538,7 @@ enum ovs_userspace_attr {
OVS_USERSPACE_ATTR_USERDATA, /* Optional user-specified cookie. */
OVS_USERSPACE_ATTR_EGRESS_TUN_PORT, /* Optional, u32 output port
* to get tunnel info. */
+ OVS_USERSPACE_ATTR_ACTIONS, /* Optional flag to get actions. */
__OVS_USERSPACE_ATTR_MAX
};
@@ -568,6 +603,12 @@ struct ovs_action_hash {
* @OVS_ACTION_ATTR_SET: Replaces the contents of an existing header. The
* single nested %OVS_KEY_ATTR_* attribute specifies a header to modify and its
* value.
+ * @OVS_ACTION_ATTR_SET_MASKED: Replaces the contents of an existing header. A
+ * nested %OVS_KEY_ATTR_* attribute specifies a header to modify, its value,
+ * and a mask. For every bit set in the mask, the corresponding bit value
+ * is copied from the value to the packet header field, rest of the bits are
+ * left unchanged. The non-masked value bits must be passed in as zeroes.
+ * Masking is not supported for the %OVS_KEY_ATTR_TUNNEL attribute.
* @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q header onto the
* packet.
* @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet.
@@ -586,6 +627,9 @@ struct ovs_action_hash {
* Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all
* fields within a header are modifiable, e.g. the IPv4 protocol and fragment
* type may not be changed.
+ *
+ * @OVS_ACTION_ATTR_SET_TO_MASKED: Kernel internal masked set action translated
+ * from the @OVS_ACTION_ATTR_SET.
*/
enum ovs_action_attr {
@@ -600,8 +644,19 @@ enum ovs_action_attr {
OVS_ACTION_ATTR_HASH, /* struct ovs_action_hash. */
OVS_ACTION_ATTR_PUSH_MPLS, /* struct ovs_action_push_mpls. */
OVS_ACTION_ATTR_POP_MPLS, /* __be16 ethertype. */
+ OVS_ACTION_ATTR_SET_MASKED, /* One nested OVS_KEY_ATTR_* including
+ * data immediately followed by a mask.
+ * The data must be zero for the unmasked
+ * bits. */
- __OVS_ACTION_ATTR_MAX
+ __OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted
+ * from userspace. */
+
+#ifdef __KERNEL__
+ OVS_ACTION_ATTR_SET_TO_MASKED, /* Kernel module internal masked
+ * set action converted from
+ * OVS_ACTION_ATTR_SET. */
+#endif
};
#define OVS_ACTION_ATTR_MAX (__OVS_ACTION_ATTR_MAX - 1)
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 4a1d0cc..efe3443 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -451,6 +451,10 @@
#define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */
#define PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800 /* Enable No Snoop */
#define PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */
+#define PCI_EXP_DEVCTL_READRQ_128B 0x0000 /* 128 Bytes */
+#define PCI_EXP_DEVCTL_READRQ_256B 0x1000 /* 256 Bytes */
+#define PCI_EXP_DEVCTL_READRQ_512B 0x2000 /* 512 Bytes */
+#define PCI_EXP_DEVCTL_READRQ_1024B 0x3000 /* 1024 Bytes */
#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */
#define PCI_EXP_DEVSTA 10 /* Device Status */
#define PCI_EXP_DEVSTA_CED 0x0001 /* Correctable Error Detected */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 9b79abb..d97f84c 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -152,21 +152,44 @@ enum perf_event_sample_format {
* The branch types can be combined, however BRANCH_ANY covers all types
* of branches and therefore it supersedes all the other types.
*/
+enum perf_branch_sample_type_shift {
+ PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */
+ PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */
+ PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */
+
+ PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */
+ PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */
+ PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */
+ PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */
+ PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */
+ PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */
+ PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */
+ PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */
+
+ PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */
+ PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */
+
+ PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
+};
+
enum perf_branch_sample_type {
- PERF_SAMPLE_BRANCH_USER = 1U << 0, /* user branches */
- PERF_SAMPLE_BRANCH_KERNEL = 1U << 1, /* kernel branches */
- PERF_SAMPLE_BRANCH_HV = 1U << 2, /* hypervisor branches */
-
- PERF_SAMPLE_BRANCH_ANY = 1U << 3, /* any branch types */
- PERF_SAMPLE_BRANCH_ANY_CALL = 1U << 4, /* any call branch */
- PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << 5, /* any return branch */
- PERF_SAMPLE_BRANCH_IND_CALL = 1U << 6, /* indirect calls */
- PERF_SAMPLE_BRANCH_ABORT_TX = 1U << 7, /* transaction aborts */
- PERF_SAMPLE_BRANCH_IN_TX = 1U << 8, /* in transaction */
- PERF_SAMPLE_BRANCH_NO_TX = 1U << 9, /* not in transaction */
- PERF_SAMPLE_BRANCH_COND = 1U << 10, /* conditional branches */
-
- PERF_SAMPLE_BRANCH_MAX = 1U << 11, /* non-ABI */
+ PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
+ PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
+ PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
+
+ PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
+ PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
+ PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
+ PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
+ PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
+ PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
+ PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
+ PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
+
+ PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
+ PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
+
+ PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
};
#define PERF_SAMPLE_BRANCH_PLM_ALL \
@@ -240,6 +263,7 @@ enum perf_event_read_format {
#define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */
/* add: sample_stack_user */
#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
+#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
/*
* Hardware event_id to monitor via a performance monitoring event:
@@ -305,7 +329,8 @@ struct perf_event_attr {
exclude_callchain_user : 1, /* exclude user callchains */
mmap2 : 1, /* include mmap with inode data */
comm_exec : 1, /* flag comm events that are due to an exec */
- __reserved_1 : 39;
+ use_clockid : 1, /* use @clockid for time fields */
+ __reserved_1 : 38;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -334,8 +359,7 @@ struct perf_event_attr {
*/
__u32 sample_stack_user;
- /* Align to u64. */
- __u32 __reserved_2;
+ __s32 clockid;
/*
* Defines set of regs to dump for each sample
* state captured on:
@@ -345,6 +369,12 @@ struct perf_event_attr {
* See asm/perf_regs.h for details.
*/
__u64 sample_regs_intr;
+
+ /*
+ * Wakeup watermark for AUX area
+ */
+ __u32 aux_watermark;
+ __u32 __reserved_2; /* align to __u64 */
};
#define perf_flags(attr) (*(&(attr)->read_format + 1))
@@ -360,6 +390,7 @@ struct perf_event_attr {
#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
+#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
enum perf_event_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0,
@@ -500,9 +531,30 @@ struct perf_event_mmap_page {
* In this case the kernel will not over-write unread data.
*
* See perf_output_put_handle() for the data ordering.
+ *
+ * data_{offset,size} indicate the location and size of the perf record
+ * buffer within the mmapped area.
*/
__u64 data_head; /* head in the data section */
__u64 data_tail; /* user-space written tail */
+ __u64 data_offset; /* where the buffer starts */
+ __u64 data_size; /* data buffer size */
+
+ /*
+ * AUX area is defined by aux_{offset,size} fields that should be set
+ * by the userspace, so that
+ *
+ * aux_offset >= data_offset + data_size
+ *
+ * prior to mmap()ing it. Size of the mmap()ed area should be aux_size.
+ *
+ * Ring buffer pointers aux_{head,tail} have the same semantics as
+ * data_{head,tail} and same ordering rules apply.
+ */
+ __u64 aux_head;
+ __u64 aux_tail;
+ __u64 aux_offset;
+ __u64 aux_size;
};
#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
@@ -514,6 +566,10 @@ struct perf_event_mmap_page {
#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
/*
+ * Indicates that /proc/PID/maps parsing are truncated by time out.
+ */
+#define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
+/*
* PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
* different events so can reuse the same bit position.
*/
@@ -725,6 +781,43 @@ enum perf_event_type {
*/
PERF_RECORD_MMAP2 = 10,
+ /*
+ * Records that new data landed in the AUX buffer part.
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u64 aux_offset;
+ * u64 aux_size;
+ * u64 flags;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_AUX = 11,
+
+ /*
+ * Indicates that instruction trace has started
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid;
+ * u32 tid;
+ * };
+ */
+ PERF_RECORD_ITRACE_START = 12,
+
+ /*
+ * Records the dropped/lost sample number.
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u64 lost;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_LOST_SAMPLES = 13,
+
PERF_RECORD_MAX, /* non-ABI */
};
@@ -742,6 +835,12 @@ enum perf_callchain_context {
PERF_CONTEXT_MAX = (__u64)-4095,
};
+/**
+ * PERF_RECORD_AUX::flags bits
+ */
+#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
+#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
+
#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
#define PERF_FLAG_FD_OUTPUT (1UL << 1)
#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 25731df..4f0d1bc 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/pkt_sched.h>
+#ifdef __KERNEL__
/* I think i could have done better macros ; for now this is stolen from
* some arch/mips code - jhs
*/
@@ -35,20 +36,6 @@ bits 9,10,11: redirect counter - redirect TTL. Loop avoidance
*
* */
-#define TC_MUNGED _TC_MAKEMASK1(0)
-#define SET_TC_MUNGED(v) ( TC_MUNGED | (v & ~TC_MUNGED))
-#define CLR_TC_MUNGED(v) ( v & ~TC_MUNGED)
-
-#define TC_OK2MUNGE _TC_MAKEMASK1(1)
-#define SET_TC_OK2MUNGE(v) ( TC_OK2MUNGE | (v & ~TC_OK2MUNGE))
-#define CLR_TC_OK2MUNGE(v) ( v & ~TC_OK2MUNGE)
-
-#define S_TC_VERD _TC_MAKE32(2)
-#define M_TC_VERD _TC_MAKEMASK(4,S_TC_VERD)
-#define G_TC_VERD(x) _TC_GETVALUE(x,S_TC_VERD,M_TC_VERD)
-#define V_TC_VERD(x) _TC_MAKEVALUE(x,S_TC_VERD)
-#define SET_TC_VERD(v,n) ((V_TC_VERD(n)) | (v & ~M_TC_VERD))
-
#define S_TC_FROM _TC_MAKE32(6)
#define M_TC_FROM _TC_MAKEMASK(2,S_TC_FROM)
#define G_TC_FROM(x) _TC_GETVALUE(x,S_TC_FROM,M_TC_FROM)
@@ -62,18 +49,16 @@ bits 9,10,11: redirect counter - redirect TTL. Loop avoidance
#define SET_TC_NCLS(v) ( TC_NCLS | (v & ~TC_NCLS))
#define CLR_TC_NCLS(v) ( v & ~TC_NCLS)
-#define S_TC_RTTL _TC_MAKE32(9)
-#define M_TC_RTTL _TC_MAKEMASK(3,S_TC_RTTL)
-#define G_TC_RTTL(x) _TC_GETVALUE(x,S_TC_RTTL,M_TC_RTTL)
-#define V_TC_RTTL(x) _TC_MAKEVALUE(x,S_TC_RTTL)
-#define SET_TC_RTTL(v,n) ((V_TC_RTTL(n)) | (v & ~M_TC_RTTL))
-
#define S_TC_AT _TC_MAKE32(12)
#define M_TC_AT _TC_MAKEMASK(2,S_TC_AT)
#define G_TC_AT(x) _TC_GETVALUE(x,S_TC_AT,M_TC_AT)
#define V_TC_AT(x) _TC_MAKEVALUE(x,S_TC_AT)
#define SET_TC_AT(v,n) ((V_TC_AT(n)) | (v & ~M_TC_AT))
+#define MAX_REC_LOOP 4
+#define MAX_RED_LOOP 4
+#endif
+
/* Action attributes */
enum {
TCA_ACT_UNSPEC,
@@ -93,8 +78,6 @@ enum {
#define TCA_ACT_NOUNBIND 0
#define TCA_ACT_REPLACE 1
#define TCA_ACT_NOREPLACE 0
-#define MAX_REC_LOOP 4
-#define MAX_RED_LOOP 4
#define TC_ACT_UNSPEC (-1)
#define TC_ACT_OK 0
@@ -397,11 +380,43 @@ enum {
TCA_BPF_CLASSID,
TCA_BPF_OPS_LEN,
TCA_BPF_OPS,
+ TCA_BPF_FD,
+ TCA_BPF_NAME,
__TCA_BPF_MAX,
};
#define TCA_BPF_MAX (__TCA_BPF_MAX - 1)
+/* Flower classifier */
+
+enum {
+ TCA_FLOWER_UNSPEC,
+ TCA_FLOWER_CLASSID,
+ TCA_FLOWER_INDEV,
+ TCA_FLOWER_ACT,
+ TCA_FLOWER_KEY_ETH_DST, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_DST_MASK, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_SRC, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_SRC_MASK, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_TYPE, /* be16 */
+ TCA_FLOWER_KEY_IP_PROTO, /* u8 */
+ TCA_FLOWER_KEY_IPV4_SRC, /* be32 */
+ TCA_FLOWER_KEY_IPV4_SRC_MASK, /* be32 */
+ TCA_FLOWER_KEY_IPV4_DST, /* be32 */
+ TCA_FLOWER_KEY_IPV4_DST_MASK, /* be32 */
+ TCA_FLOWER_KEY_IPV6_SRC, /* struct in6_addr */
+ TCA_FLOWER_KEY_IPV6_SRC_MASK, /* struct in6_addr */
+ TCA_FLOWER_KEY_IPV6_DST, /* struct in6_addr */
+ TCA_FLOWER_KEY_IPV6_DST_MASK, /* struct in6_addr */
+ TCA_FLOWER_KEY_TCP_SRC, /* be16 */
+ TCA_FLOWER_KEY_TCP_DST, /* be16 */
+ TCA_FLOWER_KEY_UDP_SRC, /* be16 */
+ TCA_FLOWER_KEY_UDP_DST, /* be16 */
+ __TCA_FLOWER_MAX,
+};
+
+#define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1)
+
/* Extended Matches */
struct tcf_ematch_tree_hdr {
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index d62316b..8d2530d 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -268,7 +268,8 @@ enum {
TCA_GRED_STAB,
TCA_GRED_DPS,
TCA_GRED_MAX_P,
- __TCA_GRED_MAX,
+ TCA_GRED_LIMIT,
+ __TCA_GRED_MAX,
};
#define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
@@ -679,6 +680,7 @@ enum {
TCA_CODEL_LIMIT,
TCA_CODEL_INTERVAL,
TCA_CODEL_ECN,
+ TCA_CODEL_CE_THRESHOLD,
__TCA_CODEL_MAX
};
@@ -695,6 +697,7 @@ struct tc_codel_xstats {
__u32 drop_overlimit; /* number of time max qdisc packet limit was hit */
__u32 ecn_mark; /* number of packets we ECN marked instead of dropped */
__u32 dropping; /* are we in dropping state ? */
+ __u32 ce_mark; /* number of CE marked packets because of ce_threshold */
};
/* FQ_CODEL */
@@ -707,6 +710,7 @@ enum {
TCA_FQ_CODEL_ECN,
TCA_FQ_CODEL_FLOWS,
TCA_FQ_CODEL_QUANTUM,
+ TCA_FQ_CODEL_CE_THRESHOLD,
__TCA_FQ_CODEL_MAX
};
@@ -730,6 +734,7 @@ struct tc_fq_codel_qd_stats {
*/
__u32 new_flows_len; /* count of flows in new list */
__u32 old_flows_len; /* count of flows in old list */
+ __u32 ce_mark; /* packets above ce_threshold */
};
struct tc_fq_codel_cl_stats {
@@ -774,6 +779,8 @@ enum {
TCA_FQ_FLOW_REFILL_DELAY, /* flow credit refill delay in usec */
+ TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */
+
__TCA_FQ_MAX
};
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 89f6350..31891d9 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -185,4 +185,9 @@ struct prctl_mm_map {
#define PR_MPX_ENABLE_MANAGEMENT 43
#define PR_MPX_DISABLE_MANAGEMENT 44
+#define PR_SET_FP_MODE 45
+#define PR_GET_FP_MODE 46
+# define PR_FP_MODE_FR (1 << 0) /* 64b FP registers */
+# define PR_FP_MODE_FRE (1 << 1) /* 32b compatibility */
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/quota.h b/include/uapi/linux/quota.h
index 3b6cfbe..9c95b2c 100644
--- a/include/uapi/linux/quota.h
+++ b/include/uapi/linux/quota.h
@@ -36,11 +36,12 @@
#include <linux/errno.h>
#include <linux/types.h>
-#define __DQUOT_VERSION__ "dquot_6.5.2"
+#define __DQUOT_VERSION__ "dquot_6.6.0"
-#define MAXQUOTAS 2
+#define MAXQUOTAS 3
#define USRQUOTA 0 /* element used for user quotas */
#define GRPQUOTA 1 /* element used for group quotas */
+#define PRJQUOTA 2 /* element used for project quotas */
/*
* Definitions for the default names of the quotas files.
@@ -48,6 +49,7 @@
#define INITQFNAMES { \
"user", /* USRQUOTA */ \
"group", /* GRPQUOTA */ \
+ "project", /* PRJQUOTA */ \
"undefined", \
};
@@ -126,10 +128,22 @@ struct if_dqblk {
#define IIF_FLAGS 4
#define IIF_ALL (IIF_BGRACE | IIF_IGRACE | IIF_FLAGS)
+enum {
+ DQF_ROOT_SQUASH_B = 0,
+ DQF_SYS_FILE_B = 16,
+ /* Kernel internal flags invisible to userspace */
+ DQF_PRIVATE
+};
+
+/* Root squash enabled (for v1 quota format) */
+#define DQF_ROOT_SQUASH (1 << DQF_ROOT_SQUASH_B)
+/* Quota stored in a system file */
+#define DQF_SYS_FILE (1 << DQF_SYS_FILE_B)
+
struct if_dqinfo {
__u64 dqi_bgrace;
__u64 dqi_igrace;
- __u32 dqi_flags;
+ __u32 dqi_flags; /* DFQ_* */
__u32 dqi_valid;
};
diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
index 49f4210..2ae6131 100644
--- a/include/uapi/linux/raid/md_p.h
+++ b/include/uapi/linux/raid/md_p.h
@@ -78,6 +78,12 @@
#define MD_DISK_ACTIVE 1 /* disk is running or spare disk */
#define MD_DISK_SYNC 2 /* disk is in sync with the raid set */
#define MD_DISK_REMOVED 3 /* disk is in sync with the raid set */
+#define MD_DISK_CLUSTER_ADD 4 /* Initiate a disk add across the cluster
+ * For clustered enviroments only.
+ */
+#define MD_DISK_CANDIDATE 5 /* disk is added as spare (local) until confirmed
+ * For clustered enviroments only.
+ */
#define MD_DISK_WRITEMOSTLY 9 /* disk is "write-mostly" is RAID1 config.
* read requests will only be sent here in
@@ -101,6 +107,7 @@ typedef struct mdp_device_descriptor_s {
#define MD_SB_CLEAN 0
#define MD_SB_ERRORS 1
+#define MD_SB_CLUSTERED 5 /* MD is clustered */
#define MD_SB_BITMAP_PRESENT 8 /* bitmap may be present nearby */
/*
diff --git a/include/uapi/linux/raid/md_u.h b/include/uapi/linux/raid/md_u.h
index 74e7c60..1cb8aa6 100644
--- a/include/uapi/linux/raid/md_u.h
+++ b/include/uapi/linux/raid/md_u.h
@@ -62,6 +62,7 @@
#define STOP_ARRAY _IO (MD_MAJOR, 0x32)
#define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33)
#define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34)
+#define CLUSTERED_DISK_NACK _IO (MD_MAJOR, 0x35)
/* 63 partitions with the alternate major number (mdp) */
#define MdpMinorShift 6
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index 9195095..0f9265c 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -38,6 +38,8 @@
#define RDS_IB_ABI_VERSION 0x301
+#define SOL_RDS 276
+
/*
* setsockopt/getsockopt for SOL_RDS
*/
@@ -48,6 +50,14 @@
#define RDS_RECVERR 5
#define RDS_CONG_MONITOR 6
#define RDS_GET_MR_FOR_DEST 7
+#define SO_RDS_TRANSPORT 8
+
+/* supported values for SO_RDS_TRANSPORT */
+#define RDS_TRANS_IB 0
+#define RDS_TRANS_IWARP 1
+#define RDS_TRANS_TCP 2
+#define RDS_TRANS_COUNT 3
+#define RDS_TRANS_NONE (~0)
/*
* Control message types for SOL_RDS.
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 9c9b8b4..fdd8f07 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -132,6 +132,13 @@ enum {
RTM_GETMDB = 86,
#define RTM_GETMDB RTM_GETMDB
+ RTM_NEWNSID = 88,
+#define RTM_NEWNSID RTM_NEWNSID
+ RTM_DELNSID = 89,
+#define RTM_DELNSID RTM_DELNSID
+ RTM_GETNSID = 90,
+#define RTM_GETNSID RTM_GETNSID
+
__RTM_MAX,
#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
};
@@ -298,6 +305,9 @@ enum rtattr_type_t {
RTA_TABLE,
RTA_MARK,
RTA_MFC_STATS,
+ RTA_VIA,
+ RTA_NEWDST,
+ RTA_PREF,
__RTA_MAX
};
@@ -327,6 +337,10 @@ struct rtnexthop {
#define RTNH_F_DEAD 1 /* Nexthop is dead (used by multipath) */
#define RTNH_F_PERVASIVE 2 /* Do recursive gateway lookup */
#define RTNH_F_ONLINK 4 /* Gateway is forced on link */
+#define RTNH_F_OFFLOAD 8 /* offloaded route */
+#define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */
+
+#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN)
/* Macros to handle hexthops */
@@ -339,6 +353,12 @@ struct rtnexthop {
#define RTNH_SPACE(len) RTNH_ALIGN(RTNH_LENGTH(len))
#define RTNH_DATA(rtnh) ((struct rtattr*)(((char*)(rtnh)) + RTNH_LENGTH(0)))
+/* RTA_VIA */
+struct rtvia {
+ __kernel_sa_family_t rtvia_family;
+ __u8 rtvia_addr[0];
+};
+
/* RTM_CACHEINFO */
struct rta_cacheinfo {
@@ -389,6 +409,8 @@ enum {
#define RTAX_INITRWND RTAX_INITRWND
RTAX_QUICKACK,
#define RTAX_QUICKACK RTAX_QUICKACK
+ RTAX_CC_ALGO,
+#define RTAX_CC_ALGO RTAX_CC_ALGO
__RTAX_MAX
};
@@ -616,6 +638,10 @@ enum rtnetlink_groups {
#define RTNLGRP_IPV6_NETCONF RTNLGRP_IPV6_NETCONF
RTNLGRP_MDB,
#define RTNLGRP_MDB RTNLGRP_MDB
+ RTNLGRP_MPLS_ROUTE,
+#define RTNLGRP_MPLS_ROUTE RTNLGRP_MPLS_ROUTE
+ RTNLGRP_NSID,
+#define RTNLGRP_NSID RTNLGRP_NSID
__RTNLGRP_MAX
};
#define RTNLGRP_MAX (__RTNLGRP_MAX - 1)
@@ -634,6 +660,7 @@ struct tcamsg {
/* New extended info filters for IFLA_EXT_MASK */
#define RTEXT_FILTER_VF (1 << 0)
#define RTEXT_FILTER_BRVLAN (1 << 1)
+#define RTEXT_FILTER_BRVLAN_COMPRESSED (1 << 2)
/* End of information exported to user level */
diff --git a/include/uapi/linux/scif_ioctl.h b/include/uapi/linux/scif_ioctl.h
new file mode 100644
index 0000000..4a94d91
--- /dev/null
+++ b/include/uapi/linux/scif_ioctl.h
@@ -0,0 +1,130 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel SCIF driver.
+ *
+ */
+/*
+ * -----------------------------------------
+ * SCIF IOCTL interface information
+ * -----------------------------------------
+ */
+#ifndef SCIF_IOCTL_H
+#define SCIF_IOCTL_H
+
+#include <linux/types.h>
+
+/**
+ * struct scif_port_id - SCIF port information
+ * @node: node on which port resides
+ * @port: local port number
+ */
+struct scif_port_id {
+ __u16 node;
+ __u16 port;
+};
+
+/**
+ * struct scifioctl_connect - used for SCIF_CONNECT IOCTL
+ * @self: used to read back the assigned port_id
+ * @peer: destination node and port to connect to
+ */
+struct scifioctl_connect {
+ struct scif_port_id self;
+ struct scif_port_id peer;
+};
+
+/**
+ * struct scifioctl_accept - used for SCIF_ACCEPTREQ IOCTL
+ * @flags: flags
+ * @peer: global id of peer endpoint
+ * @endpt: new connected endpoint descriptor
+ */
+struct scifioctl_accept {
+ __s32 flags;
+ struct scif_port_id peer;
+ __u64 endpt;
+};
+
+/**
+ * struct scifioctl_msg - used for SCIF_SEND/SCIF_RECV IOCTL
+ * @msg: message buffer address
+ * @len: message length
+ * @flags: flags
+ * @out_len: number of bytes sent/received
+ */
+struct scifioctl_msg {
+ __u64 msg;
+ __s32 len;
+ __s32 flags;
+ __s32 out_len;
+};
+
+/**
+ * struct scifioctl_node_ids - used for SCIF_GET_NODEIDS IOCTL
+ * @nodes: pointer to an array of node_ids
+ * @self: ID of the current node
+ * @len: length of array
+ */
+struct scifioctl_node_ids {
+ __u64 nodes;
+ __u64 self;
+ __s32 len;
+};
+
+#define SCIF_BIND _IOWR('s', 1, __u64)
+#define SCIF_LISTEN _IOW('s', 2, __s32)
+#define SCIF_CONNECT _IOWR('s', 3, struct scifioctl_connect)
+#define SCIF_ACCEPTREQ _IOWR('s', 4, struct scifioctl_accept)
+#define SCIF_ACCEPTREG _IOWR('s', 5, __u64)
+#define SCIF_SEND _IOWR('s', 6, struct scifioctl_msg)
+#define SCIF_RECV _IOWR('s', 7, struct scifioctl_msg)
+#define SCIF_GET_NODEIDS _IOWR('s', 14, struct scifioctl_node_ids)
+
+#endif /* SCIF_IOCTL_H */
diff --git a/include/uapi/linux/serial.h b/include/uapi/linux/serial.h
index 5e0d0ed..25331f9 100644
--- a/include/uapi/linux/serial.h
+++ b/include/uapi/linux/serial.h
@@ -65,6 +65,10 @@ struct serial_struct {
#define SERIAL_IO_PORT 0
#define SERIAL_IO_HUB6 1
#define SERIAL_IO_MEM 2
+#define SERIAL_IO_MEM32 3
+#define SERIAL_IO_AU 4
+#define SERIAL_IO_TSI 5
+#define SERIAL_IO_MEM32BE 6
#define UART_CLEAR_FIFO 0x01
#define UART_USE_FIFO 0x02
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index c172180..93ba148 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -55,7 +55,8 @@
#define PORT_ALTR_16550_F64 27 /* Altera 16550 UART with 64 FIFOs */
#define PORT_ALTR_16550_F128 28 /* Altera 16550 UART with 128 FIFOs */
#define PORT_RT2880 29 /* Ralink RT2880 internal UART */
-#define PORT_MAX_8250 29 /* max port ID */
+#define PORT_16550A_FSL64 30 /* Freescale 16550 UART with 64 FIFOs */
+#define PORT_MAX_8250 30 /* max port ID */
/*
* ARM specific type numbers. These are not currently guaranteed
@@ -248,4 +249,16 @@
/* MESON */
#define PORT_MESON 109
+/* Conexant Digicolor */
+#define PORT_DIGICOLOR 110
+
+/* SPRD SERIAL */
+#define PORT_SPRD 111
+
+/* Cris v10 / v32 SoC */
+#define PORT_CRIS 112
+
+/* STM32 USART */
+#define PORT_STM32 113
+
#endif /* _UAPILINUX_SERIAL_CORE_H */
diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h
index 53af3b7..1e5ac4e7 100644
--- a/include/uapi/linux/serial_reg.h
+++ b/include/uapi/linux/serial_reg.h
@@ -86,7 +86,8 @@
#define UART_FCR6_T_TRIGGER_8 0x10 /* Mask for transmit trigger set at 8 */
#define UART_FCR6_T_TRIGGER_24 0x20 /* Mask for transmit trigger set at 24 */
#define UART_FCR6_T_TRIGGER_30 0x30 /* Mask for transmit trigger set at 30 */
-#define UART_FCR7_64BYTE 0x20 /* Go into 64 byte mode (TI16C750) */
+#define UART_FCR7_64BYTE 0x20 /* Go into 64 byte mode (TI16C750 and
+ some Freescale UARTs) */
#define UART_FCR_R_TRIG_SHIFT 6
#define UART_FCR_R_TRIG_BITS(x) \
@@ -241,25 +242,6 @@
#define UART_FCR_PXAR32 0xc0 /* receive FIFO threshold = 32 */
/*
- * Intel MID on-chip HSU (High Speed UART) defined bits
- */
-#define UART_FCR_HSU_64_1B 0x00 /* receive FIFO treshold = 1 */
-#define UART_FCR_HSU_64_16B 0x40 /* receive FIFO treshold = 16 */
-#define UART_FCR_HSU_64_32B 0x80 /* receive FIFO treshold = 32 */
-#define UART_FCR_HSU_64_56B 0xc0 /* receive FIFO treshold = 56 */
-
-#define UART_FCR_HSU_16_1B 0x00 /* receive FIFO treshold = 1 */
-#define UART_FCR_HSU_16_4B 0x40 /* receive FIFO treshold = 4 */
-#define UART_FCR_HSU_16_8B 0x80 /* receive FIFO treshold = 8 */
-#define UART_FCR_HSU_16_14B 0xc0 /* receive FIFO treshold = 14 */
-
-#define UART_FCR_HSU_64B_FIFO 0x20 /* chose 64 bytes FIFO */
-#define UART_FCR_HSU_16B_FIFO 0x00 /* chose 16 bytes FIFO */
-
-#define UART_FCR_HALF_EMPT_TXI 0x00 /* trigger TX_EMPT IRQ for half empty */
-#define UART_FCR_FULL_EMPT_TXI 0x08 /* trigger TX_EMPT IRQ for full empty */
-
-/*
* These register definitions are for the 16C950
*/
#define UART_ASR 0x01 /* Additional Status Register */
@@ -349,6 +331,9 @@
* Extra serial register definitions for the internal UARTs
* in TI OMAP processors.
*/
+#define OMAP1_UART1_BASE 0xfffb0000
+#define OMAP1_UART2_BASE 0xfffb0800
+#define OMAP1_UART3_BASE 0xfffb9800
#define UART_OMAP_MDR1 0x08 /* Mode definition register */
#define UART_OMAP_MDR2 0x09 /* Mode definition register 2 */
#define UART_OMAP_SCR 0x10 /* Supplementary control register */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index b222241..eee8968 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -270,6 +270,14 @@ enum
LINUX_MIB_TCPHYSTARTTRAINCWND, /* TCPHystartTrainCwnd */
LINUX_MIB_TCPHYSTARTDELAYDETECT, /* TCPHystartDelayDetect */
LINUX_MIB_TCPHYSTARTDELAYCWND, /* TCPHystartDelayCwnd */
+ LINUX_MIB_TCPACKSKIPPEDSYNRECV, /* TCPACKSkippedSynRecv */
+ LINUX_MIB_TCPACKSKIPPEDPAWS, /* TCPACKSkippedPAWS */
+ LINUX_MIB_TCPACKSKIPPEDSEQ, /* TCPACKSkippedSeq */
+ LINUX_MIB_TCPACKSKIPPEDFINWAIT2, /* TCPACKSkippedFinWait2 */
+ LINUX_MIB_TCPACKSKIPPEDTIMEWAIT, /* TCPACKSkippedTimeWait */
+ LINUX_MIB_TCPACKSKIPPEDCHALLENGE, /* TCPACKSkippedChallenge */
+ LINUX_MIB_TCPWINPROBE, /* TCPWinProbe */
+ LINUX_MIB_TCPKEEPALIVE, /* TCPKeepAlive */
__LINUX_MIB_MAX
};
diff --git a/include/uapi/linux/sock_diag.h b/include/uapi/linux/sock_diag.h
index b00e29e..49230d3 100644
--- a/include/uapi/linux/sock_diag.h
+++ b/include/uapi/linux/sock_diag.h
@@ -23,4 +23,14 @@ enum {
SK_MEMINFO_VARS,
};
+enum sknetlink_groups {
+ SKNLGRP_NONE,
+ SKNLGRP_INET_TCP_DESTROY,
+ SKNLGRP_INET_UDP_DESTROY,
+ SKNLGRP_INET6_TCP_DESTROY,
+ SKNLGRP_INET6_UDP_DESTROY,
+ __SKNLGRP_MAX,
+};
+#define SKNLGRP_MAX (__SKNLGRP_MAX - 1)
+
#endif /* _UAPI__SOCK_DIAG_H__ */
diff --git a/include/uapi/linux/som.h b/include/uapi/linux/som.h
deleted file mode 100644
index 166594e..0000000
--- a/include/uapi/linux/som.h
+++ /dev/null
@@ -1,154 +0,0 @@
-#ifndef _LINUX_SOM_H
-#define _LINUX_SOM_H
-
-/* File format definition for SOM executables / shared libraries */
-
-/* we need struct timespec */
-#include <linux/time.h>
-
-#define SOM_PAGESIZE 4096
-
-/* this is the SOM header */
-struct som_hdr {
- short system_id; /* magic number - system */
- short a_magic; /* magic number - file type */
- unsigned int version_id; /* versiod ID: YYMMDDHH */
- struct timespec file_time; /* system clock */
- unsigned int entry_space; /* space for entry point */
- unsigned int entry_subspace; /* subspace for entry point */
- unsigned int entry_offset; /* offset of entry point */
- unsigned int aux_header_location; /* auxiliary header location */
- unsigned int aux_header_size; /* auxiliary header size */
- unsigned int som_length; /* length of entire SOM */
- unsigned int presumed_dp; /* compiler's DP value */
- unsigned int space_location; /* space dictionary location */
- unsigned int space_total; /* number of space entries */
- unsigned int subspace_location; /* subspace entries location */
- unsigned int subspace_total; /* number of subspace entries */
- unsigned int loader_fixup_location; /* MPE/iX loader fixup */
- unsigned int loader_fixup_total; /* number of fixup records */
- unsigned int space_strings_location; /* (sub)space names */
- unsigned int space_strings_size; /* size of strings area */
- unsigned int init_array_location; /* reserved */
- unsigned int init_array_total; /* reserved */
- unsigned int compiler_location; /* module dictionary */
- unsigned int compiler_total; /* number of modules */
- unsigned int symbol_location; /* symbol dictionary */
- unsigned int symbol_total; /* number of symbols */
- unsigned int fixup_request_location; /* fixup requests */
- unsigned int fixup_request_total; /* number of fixup requests */
- unsigned int symbol_strings_location;/* module & symbol names area */
- unsigned int symbol_strings_size; /* size of strings area */
- unsigned int unloadable_sp_location; /* unloadable spaces location */
- unsigned int unloadable_sp_size; /* size of data */
- unsigned int checksum;
-};
-
-/* values for system_id */
-
-#define SOM_SID_PARISC_1_0 0x020b
-#define SOM_SID_PARISC_1_1 0x0210
-#define SOM_SID_PARISC_2_0 0x0214
-
-/* values for a_magic */
-
-#define SOM_LIB_EXEC 0x0104
-#define SOM_RELOCATABLE 0x0106
-#define SOM_EXEC_NONSHARE 0x0107
-#define SOM_EXEC_SHARE 0x0108
-#define SOM_EXEC_DEMAND 0x010B
-#define SOM_LIB_DYN 0x010D
-#define SOM_LIB_SHARE 0x010E
-#define SOM_LIB_RELOC 0x0619
-
-/* values for version_id. Decimal not hex, yes. Grr. */
-
-#define SOM_ID_OLD 85082112
-#define SOM_ID_NEW 87102412
-
-struct aux_id {
- unsigned int mandatory :1; /* the linker must understand this */
- unsigned int copy :1; /* Must be copied by the linker */
- unsigned int append :1; /* Must be merged by the linker */
- unsigned int ignore :1; /* Discard section if unknown */
- unsigned int reserved :12;
- unsigned int type :16; /* Header type */
- unsigned int length; /* length of _following_ data */
-};
-
-/* The Exec Auxiliary Header. Called The HP-UX Header within HP apparently. */
-struct som_exec_auxhdr {
- struct aux_id som_auxhdr;
- int exec_tsize; /* Text size in bytes */
- int exec_tmem; /* Address to load text at */
- int exec_tfile; /* Location of text in file */
- int exec_dsize; /* Data size in bytes */
- int exec_dmem; /* Address to load data at */
- int exec_dfile; /* Location of data in file */
- int exec_bsize; /* Uninitialised data (bss) */
- int exec_entry; /* Address to start executing */
- int exec_flags; /* loader flags */
- int exec_bfill; /* initialisation value for bss */
-};
-
-/* Oh, the things people do to avoid casts. Shame it'll break with gcc's
- * new aliasing rules really.
- */
-union name_pt {
- char * n_name;
- unsigned int n_strx;
-};
-
-/* The Space Dictionary */
-struct space_dictionary_record {
- union name_pt name; /* index to subspace name */
- unsigned int is_loadable :1; /* loadable */
- unsigned int is_defined :1; /* defined within file */
- unsigned int is_private :1; /* not sharable */
- unsigned int has_intermediate_code :1; /* contains intermediate code */
- unsigned int is_tspecific :1; /* thread specific */
- unsigned int reserved :11; /* for future expansion */
- unsigned int sort_key :8; /* for linker */
- unsigned int reserved2 :8; /* for future expansion */
-
- int space_number; /* index */
- int subspace_index; /* index into subspace dict */
- unsigned int subspace_quantity; /* number of subspaces */
- int loader_fix_index; /* for loader */
- unsigned int loader_fix_quantity; /* for loader */
- int init_pointer_index; /* data pointer array index */
- unsigned int init_pointer_quantity; /* number of data pointers */
-};
-
-/* The Subspace Dictionary */
-struct subspace_dictionary_record {
- int space_index;
- unsigned int access_control_bits :7;
- unsigned int memory_resident :1;
- unsigned int dup_common :1;
- unsigned int is_common :1;
- unsigned int quadrant :2;
- unsigned int initially_frozen :1;
- unsigned int is_first :1;
- unsigned int code_only :1;
- unsigned int sort_key :8;
- unsigned int replicate_init :1;
- unsigned int continuation :1;
- unsigned int is_tspecific :1;
- unsigned int is_comdat :1;
- unsigned int reserved :4;
-
- int file_loc_init_value;
- unsigned int initialization_length;
- unsigned int subspace_start;
- unsigned int subspace_length;
-
- unsigned int reserved2 :5;
- unsigned int alignment :27;
-
- union name_pt name;
- int fixup_request_index;
- unsigned int fixup_request_quantity;
-};
-
-#endif /* _LINUX_SOM_H */
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index b483d19..b67f99d 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -6,7 +6,7 @@
#include <linux/types.h>
#include <linux/uio.h>
-#define TCMU_VERSION "1.0"
+#define TCMU_VERSION "2.0"
/*
* Ring Design
@@ -39,9 +39,13 @@
* should process the next packet the same way, and so on.
*/
-#define TCMU_MAILBOX_VERSION 1
+#define TCMU_MAILBOX_VERSION 2
#define ALIGN_SIZE 64 /* Should be enough for most CPUs */
+/* See https://gcc.gnu.org/onlinedocs/cpp/Stringification.html */
+#define xstr(s) str(s)
+#define str(s) #s
+
struct tcmu_mailbox {
__u16 version;
__u16 flags;
@@ -64,31 +68,36 @@ enum tcmu_opcode {
* Only a few opcodes, and length is 8-byte aligned, so use low bits for opcode.
*/
struct tcmu_cmd_entry_hdr {
- __u32 len_op;
+ __u32 len_op;
+ __u16 cmd_id;
+ __u8 kflags;
+#define TCMU_UFLAG_UNKNOWN_OP 0x1
+ __u8 uflags;
+
} __packed;
#define TCMU_OP_MASK 0x7
-static inline enum tcmu_opcode tcmu_hdr_get_op(struct tcmu_cmd_entry_hdr *hdr)
+static inline enum tcmu_opcode tcmu_hdr_get_op(__u32 len_op)
{
- return hdr->len_op & TCMU_OP_MASK;
+ return len_op & TCMU_OP_MASK;
}
-static inline void tcmu_hdr_set_op(struct tcmu_cmd_entry_hdr *hdr, enum tcmu_opcode op)
+static inline void tcmu_hdr_set_op(__u32 *len_op, enum tcmu_opcode op)
{
- hdr->len_op &= ~TCMU_OP_MASK;
- hdr->len_op |= (op & TCMU_OP_MASK);
+ *len_op &= ~TCMU_OP_MASK;
+ *len_op |= (op & TCMU_OP_MASK);
}
-static inline __u32 tcmu_hdr_get_len(struct tcmu_cmd_entry_hdr *hdr)
+static inline __u32 tcmu_hdr_get_len(__u32 len_op)
{
- return hdr->len_op & ~TCMU_OP_MASK;
+ return len_op & ~TCMU_OP_MASK;
}
-static inline void tcmu_hdr_set_len(struct tcmu_cmd_entry_hdr *hdr, __u32 len)
+static inline void tcmu_hdr_set_len(__u32 *len_op, __u32 len)
{
- hdr->len_op &= TCMU_OP_MASK;
- hdr->len_op |= len;
+ *len_op &= TCMU_OP_MASK;
+ *len_op |= len;
}
/* Currently the same as SCSI_SENSE_BUFFERSIZE */
@@ -97,13 +106,14 @@ static inline void tcmu_hdr_set_len(struct tcmu_cmd_entry_hdr *hdr, __u32 len)
struct tcmu_cmd_entry {
struct tcmu_cmd_entry_hdr hdr;
- uint16_t cmd_id;
- uint16_t __pad1;
-
union {
struct {
+ uint32_t iov_cnt;
+ uint32_t iov_bidi_cnt;
+ uint32_t iov_dif_cnt;
uint64_t cdb_off;
- uint64_t iov_cnt;
+ uint64_t __pad1;
+ uint64_t __pad2;
struct iovec iov[0];
} req;
struct {
diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild
index b057da2..242cf0c 100644
--- a/include/uapi/linux/tc_act/Kbuild
+++ b/include/uapi/linux/tc_act/Kbuild
@@ -8,3 +8,5 @@ header-y += tc_nat.h
header-y += tc_pedit.h
header-y += tc_skbedit.h
header-y += tc_vlan.h
+header-y += tc_bpf.h
+header-y += tc_connmark.h
diff --git a/include/uapi/linux/tc_act/tc_bpf.h b/include/uapi/linux/tc_act/tc_bpf.h
new file mode 100644
index 0000000..07f17cc
--- /dev/null
+++ b/include/uapi/linux/tc_act/tc_bpf.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_TC_BPF_H
+#define __LINUX_TC_BPF_H
+
+#include <linux/pkt_cls.h>
+
+#define TCA_ACT_BPF 13
+
+struct tc_act_bpf {
+ tc_gen;
+};
+
+enum {
+ TCA_ACT_BPF_UNSPEC,
+ TCA_ACT_BPF_TM,
+ TCA_ACT_BPF_PARMS,
+ TCA_ACT_BPF_OPS_LEN,
+ TCA_ACT_BPF_OPS,
+ TCA_ACT_BPF_FD,
+ TCA_ACT_BPF_NAME,
+ __TCA_ACT_BPF_MAX,
+};
+#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/tc_act/tc_connmark.h b/include/uapi/linux/tc_act/tc_connmark.h
new file mode 100644
index 0000000..994b097
--- /dev/null
+++ b/include/uapi/linux/tc_act/tc_connmark.h
@@ -0,0 +1,22 @@
+#ifndef __UAPI_TC_CONNMARK_H
+#define __UAPI_TC_CONNMARK_H
+
+#include <linux/types.h>
+#include <linux/pkt_cls.h>
+
+#define TCA_ACT_CONNMARK 14
+
+struct tc_connmark {
+ tc_gen;
+ __u16 zone;
+};
+
+enum {
+ TCA_CONNMARK_UNSPEC,
+ TCA_CONNMARK_PARMS,
+ TCA_CONNMARK_TM,
+ __TCA_CONNMARK_MAX
+};
+#define TCA_CONNMARK_MAX (__TCA_CONNMARK_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 3b97183..65a77b0 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -112,6 +112,9 @@ enum {
#define TCP_FASTOPEN 23 /* Enable FastOpen on listeners */
#define TCP_TIMESTAMP 24
#define TCP_NOTSENT_LOWAT 25 /* limit number of unsent bytes in write queue */
+#define TCP_CC_INFO 26 /* Get Congestion Control (optional) info */
+#define TCP_SAVE_SYN 27 /* Record SYN headers for new connections */
+#define TCP_SAVED_SYN 28 /* Get SYN headers recorded for connection */
struct tcp_repair_opt {
__u32 opt_code;
@@ -189,6 +192,10 @@ struct tcp_info {
__u64 tcpi_pacing_rate;
__u64 tcpi_max_pacing_rate;
+ __u64 tcpi_bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked */
+ __u64 tcpi_bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived */
+ __u32 tcpi_segs_out; /* RFC4898 tcpEStatsPerfSegsOut */
+ __u32 tcpi_segs_in; /* RFC4898 tcpEStatsPerfSegsIn */
};
/* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/tipc_config.h b/include/uapi/linux/tipc_config.h
index 876d0a1..087b0ef 100644
--- a/include/uapi/linux/tipc_config.h
+++ b/include/uapi/linux/tipc_config.h
@@ -272,6 +272,26 @@ static inline int TLV_CHECK(const void *tlv, __u16 space, __u16 exp_type)
(ntohs(((struct tlv_desc *)tlv)->tlv_type) == exp_type);
}
+static inline int TLV_GET_LEN(struct tlv_desc *tlv)
+{
+ return ntohs(tlv->tlv_len);
+}
+
+static inline void TLV_SET_LEN(struct tlv_desc *tlv, __u16 len)
+{
+ tlv->tlv_len = htons(len);
+}
+
+static inline int TLV_CHECK_TYPE(struct tlv_desc *tlv, __u16 type)
+{
+ return (ntohs(tlv->tlv_type) == type);
+}
+
+static inline void TLV_SET_TYPE(struct tlv_desc *tlv, __u16 type)
+{
+ tlv->tlv_type = htons(type);
+}
+
static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len)
{
struct tlv_desc *tlv_ptr;
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
index 8d72382..d4c8f14 100644
--- a/include/uapi/linux/tipc_netlink.h
+++ b/include/uapi/linux/tipc_netlink.h
@@ -83,11 +83,20 @@ enum {
TIPC_NLA_BEARER_NAME, /* string */
TIPC_NLA_BEARER_PROP, /* nest */
TIPC_NLA_BEARER_DOMAIN, /* u32 */
+ TIPC_NLA_BEARER_UDP_OPTS, /* nest */
__TIPC_NLA_BEARER_MAX,
TIPC_NLA_BEARER_MAX = __TIPC_NLA_BEARER_MAX - 1
};
+enum {
+ TIPC_NLA_UDP_UNSPEC,
+ TIPC_NLA_UDP_LOCAL, /* sockaddr_storage */
+ TIPC_NLA_UDP_REMOTE, /* sockaddr_storage */
+
+ __TIPC_NLA_UDP_MAX,
+ TIPC_NLA_UDP_MAX = __TIPC_NLA_UDP_MAX - 1
+};
/* Socket info */
enum {
TIPC_NLA_SOCK_UNSPEC,
diff --git a/include/uapi/linux/tty.h b/include/uapi/linux/tty.h
index dac199a..01c4410 100644
--- a/include/uapi/linux/tty.h
+++ b/include/uapi/linux/tty.h
@@ -34,5 +34,6 @@
#define N_TI_WL 22 /* for TI's WL BT, FM, GPS combo chips */
#define N_TRACESINK 23 /* Trace data routing for MIPI P1149.7 */
#define N_TRACEROUTER 24 /* Trace data routing for MIPI P1149.7 */
+#define N_NCI 25 /* NFC NCI UART */
#endif /* _UAPI_LINUX_TTY_H */
diff --git a/include/uapi/linux/tty_flags.h b/include/uapi/linux/tty_flags.h
index fae4864..072e41e 100644
--- a/include/uapi/linux/tty_flags.h
+++ b/include/uapi/linux/tty_flags.h
@@ -15,7 +15,7 @@
#define ASYNCB_FOURPORT 1 /* Set OU1, OUT2 per AST Fourport settings */
#define ASYNCB_SAK 2 /* Secure Attention Key (Orange book) */
#define ASYNCB_SPLIT_TERMIOS 3 /* [x] Separate termios for dialin/callout */
-#define ASYNCB_SPD_HI 4 /* Use 56000 instead of 38400 bps */
+#define ASYNCB_SPD_HI 4 /* Use 57600 instead of 38400 bps */
#define ASYNCB_SPD_VHI 5 /* Use 115200 instead of 38400 bps */
#define ASYNCB_SKIP_TEST 6 /* Skip UART test during autoconfiguration */
#define ASYNCB_AUTO_IRQ 7 /* Do automatic IRQ during
diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
index 295ba29..108dd79 100644
--- a/include/uapi/linux/usb/functionfs.h
+++ b/include/uapi/linux/usb/functionfs.h
@@ -20,6 +20,7 @@ enum functionfs_flags {
FUNCTIONFS_HAS_SS_DESC = 4,
FUNCTIONFS_HAS_MS_OS_DESC = 8,
FUNCTIONFS_VIRTUAL_ADDR = 16,
+ FUNCTIONFS_EVENTFD = 32,
};
/* Descriptor of an non-audio endpoint */
diff --git a/include/uapi/linux/usbdevice_fs.h b/include/uapi/linux/usbdevice_fs.h
index abe5f4b..019ba1e 100644
--- a/include/uapi/linux/usbdevice_fs.h
+++ b/include/uapi/linux/usbdevice_fs.h
@@ -128,11 +128,12 @@ struct usbdevfs_hub_portinfo {
char port [127]; /* e.g. port 3 connects to device 27 */
};
-/* Device capability flags */
+/* System and bus capability flags */
#define USBDEVFS_CAP_ZERO_PACKET 0x01
#define USBDEVFS_CAP_BULK_CONTINUATION 0x02
#define USBDEVFS_CAP_NO_PACKET_SIZE_LIM 0x04
#define USBDEVFS_CAP_BULK_SCATTER_GATHER 0x08
+#define USBDEVFS_CAP_REAP_AFTER_DISCONNECT 0x10
/* USBDEVFS_DISCONNECT_CLAIM flags & struct */
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 661f119..9f6e108 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -170,6 +170,10 @@ enum v4l2_colorfx {
* We reserve 16 controls for this driver. */
#define V4L2_CID_USER_SAA7134_BASE (V4L2_CID_USER_BASE + 0x1060)
+/* The base for the adv7180 driver controls.
+ * We reserve 16 controls for this driver. */
+#define V4L2_CID_USER_ADV7180_BASE (V4L2_CID_USER_BASE + 0x1070)
+
/* MPEG-class control IDs */
/* The MPEG controls are applicable to all codec controls
* and the 'MPEG' part of the define is historical */
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index 6c8f159..c039f1d 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -48,14 +48,15 @@
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(720, 480, 1, 0, \
13500000, 19, 62, 57, 4, 3, 15, 4, 3, 16, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_HALF_LINE) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_720X480P59_94 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(720, 480, 0, 0, \
27000000, 16, 62, 60, 9, 6, 30, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
/* Note: these are the nominal timings, for HDMI links this format is typically
@@ -64,14 +65,15 @@
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(720, 576, 1, 0, \
13500000, 12, 63, 69, 2, 3, 19, 2, 3, 20, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_HALF_LINE) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_720X576P50 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(720, 576, 0, 0, \
27000000, 12, 64, 68, 5, 5, 39, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1280X720P24 { \
@@ -88,7 +90,7 @@
V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 2420, 40, 220, 5, 5, 20, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1280X720P30 { \
@@ -96,7 +98,8 @@
V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 1760, 40, 220, 5, 5, 20, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1280X720P50 { \
@@ -104,7 +107,7 @@
V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 440, 40, 220, 5, 5, 20, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1280X720P60 { \
@@ -112,7 +115,8 @@
V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 110, 40, 220, 5, 5, 20, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1920X1080P24 { \
@@ -120,7 +124,8 @@
V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 638, 44, 148, 4, 5, 36, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1920X1080P25 { \
@@ -128,7 +133,7 @@
V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 528, 44, 148, 4, 5, 36, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1920X1080P30 { \
@@ -136,7 +141,8 @@
V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 88, 44, 148, 4, 5, 36, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1920X1080I50 { \
@@ -144,7 +150,8 @@
V4L2_INIT_BT_TIMINGS(1920, 1080, 1, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 528, 44, 148, 2, 5, 15, 2, 5, 16, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_HALF_LINE) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1920X1080P50 { \
@@ -152,7 +159,7 @@
V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
148500000, 528, 44, 148, 4, 5, 36, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1920X1080I60 { \
@@ -161,7 +168,8 @@
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
74250000, 88, 44, 148, 2, 5, 15, 2, 5, 16, \
V4L2_DV_BT_STD_CEA861, \
- V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_HALF_LINE) \
+ V4L2_DV_FL_CAN_REDUCE_FPS | \
+ V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_1920X1080P60 { \
@@ -170,77 +178,83 @@
V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
148500000, 88, 44, 148, 4, 5, 36, 0, 0, 0, \
V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CEA861, \
- V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P24 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P25 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P30 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P50 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P60 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P24 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P25 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P30 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P50 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, 0) \
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P60 { \
.type = V4L2_DV_BT_656_1120, \
V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
- V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_CAN_REDUCE_FPS) \
+ V4L2_DV_BT_STD_CEA861, \
+ V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
}
diff --git a/include/uapi/linux/v4l2-mediabus.h b/include/uapi/linux/v4l2-mediabus.h
index 26db206..9cac632 100644
--- a/include/uapi/linux/v4l2-mediabus.h
+++ b/include/uapi/linux/v4l2-mediabus.h
@@ -24,6 +24,7 @@
* @colorspace: colorspace of the data (from enum v4l2_colorspace)
* @ycbcr_enc: YCbCr encoding of the data (from enum v4l2_ycbcr_encoding)
* @quantization: quantization of the data (from enum v4l2_quantization)
+ * @xfer_func: transfer function of the data (from enum v4l2_xfer_func)
*/
struct v4l2_mbus_framefmt {
__u32 width;
@@ -33,7 +34,8 @@ struct v4l2_mbus_framefmt {
__u32 colorspace;
__u16 ycbcr_enc;
__u16 quantization;
- __u32 reserved[6];
+ __u16 xfer_func;
+ __u16 reserved[11];
};
#ifndef __KERNEL__
diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h
index e0a7e3d..dbce2b554 100644
--- a/include/uapi/linux/v4l2-subdev.h
+++ b/include/uapi/linux/v4l2-subdev.h
@@ -69,12 +69,14 @@ struct v4l2_subdev_crop {
* @pad: pad number, as reported by the media API
* @index: format index during enumeration
* @code: format code (MEDIA_BUS_FMT_ definitions)
+ * @which: format type (from enum v4l2_subdev_format_whence)
*/
struct v4l2_subdev_mbus_code_enum {
__u32 pad;
__u32 index;
__u32 code;
- __u32 reserved[9];
+ __u32 which;
+ __u32 reserved[8];
};
/**
@@ -82,6 +84,7 @@ struct v4l2_subdev_mbus_code_enum {
* @pad: pad number, as reported by the media API
* @index: format index during enumeration
* @code: format code (MEDIA_BUS_FMT_ definitions)
+ * @which: format type (from enum v4l2_subdev_format_whence)
*/
struct v4l2_subdev_frame_size_enum {
__u32 index;
@@ -91,7 +94,8 @@ struct v4l2_subdev_frame_size_enum {
__u32 max_width;
__u32 min_height;
__u32 max_height;
- __u32 reserved[9];
+ __u32 which;
+ __u32 reserved[8];
};
/**
@@ -113,6 +117,7 @@ struct v4l2_subdev_frame_interval {
* @width: frame width in pixels
* @height: frame height in pixels
* @interval: frame interval in seconds
+ * @which: format type (from enum v4l2_subdev_format_whence)
*/
struct v4l2_subdev_frame_interval_enum {
__u32 index;
@@ -121,7 +126,8 @@ struct v4l2_subdev_frame_interval_enum {
__u32 width;
__u32 height;
struct v4l2_fract interval;
- __u32 reserved[9];
+ __u32 which;
+ __u32 reserved[8];
};
/**
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 29715d2..9fd7b5d 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -36,6 +36,8 @@
/* Two-stage IOMMU */
#define VFIO_TYPE1_NESTING_IOMMU 6 /* Implies v2 */
+#define VFIO_SPAPR_TCE_v2_IOMMU 7
+
/*
* The IOCTL interface is designed for extensibility by embedding the
* structure length (argsz) and flags into structures passed between
@@ -160,6 +162,8 @@ struct vfio_device_info {
__u32 flags;
#define VFIO_DEVICE_FLAGS_RESET (1 << 0) /* Device supports reset */
#define VFIO_DEVICE_FLAGS_PCI (1 << 1) /* vfio-pci device */
+#define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */
+#define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */
__u32 num_regions; /* Max region index + 1 */
__u32 num_irqs; /* Max IRQ index + 1 */
};
@@ -333,6 +337,7 @@ enum {
VFIO_PCI_MSI_IRQ_INDEX,
VFIO_PCI_MSIX_IRQ_INDEX,
VFIO_PCI_ERR_IRQ_INDEX,
+ VFIO_PCI_REQ_IRQ_INDEX,
VFIO_PCI_NUM_IRQS
};
@@ -440,6 +445,23 @@ struct vfio_iommu_type1_dma_unmap {
/* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
/*
+ * The SPAPR TCE DDW info struct provides the information about
+ * the details of Dynamic DMA window capability.
+ *
+ * @pgsizes contains a page size bitmask, 4K/64K/16M are supported.
+ * @max_dynamic_windows_supported tells the maximum number of windows
+ * which the platform can create.
+ * @levels tells the maximum number of levels in multi-level IOMMU tables;
+ * this allows splitting a table into smaller chunks which reduces
+ * the amount of physically contiguous memory required for the table.
+ */
+struct vfio_iommu_spapr_tce_ddw_info {
+ __u64 pgsizes; /* Bitmap of supported page sizes */
+ __u32 max_dynamic_windows_supported;
+ __u32 levels;
+};
+
+/*
* The SPAPR TCE info struct provides the information about the PCI bus
* address ranges available for DMA, these values are programmed into
* the hardware so the guest has to know that information.
@@ -449,14 +471,17 @@ struct vfio_iommu_type1_dma_unmap {
* addresses too so the window works as a filter rather than an offset
* for IOVA addresses.
*
- * A flag will need to be added if other page sizes are supported,
- * so as defined here, it is always 4k.
+ * Flags supported:
+ * - VFIO_IOMMU_SPAPR_INFO_DDW: informs the userspace that dynamic DMA windows
+ * (DDW) support is present. @ddw is only supported when DDW is present.
*/
struct vfio_iommu_spapr_tce_info {
__u32 argsz;
- __u32 flags; /* reserved for future use */
+ __u32 flags;
+#define VFIO_IOMMU_SPAPR_INFO_DDW (1 << 0) /* DDW supported */
__u32 dma32_window_start; /* 32 bit window start (bytes) */
__u32 dma32_window_size; /* 32 bit window size (bytes) */
+ struct vfio_iommu_spapr_tce_ddw_info ddw;
};
#define VFIO_IOMMU_SPAPR_TCE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
@@ -467,12 +492,23 @@ struct vfio_iommu_spapr_tce_info {
* - unfreeze IO/DMA for frozen PE;
* - read PE state;
* - reset PE;
- * - configure PE.
+ * - configure PE;
+ * - inject EEH error.
*/
+struct vfio_eeh_pe_err {
+ __u32 type;
+ __u32 func;
+ __u64 addr;
+ __u64 mask;
+};
+
struct vfio_eeh_pe_op {
__u32 argsz;
__u32 flags;
__u32 op;
+ union {
+ struct vfio_eeh_pe_err err;
+ };
};
#define VFIO_EEH_PE_DISABLE 0 /* Disable EEH functionality */
@@ -489,9 +525,70 @@ struct vfio_eeh_pe_op {
#define VFIO_EEH_PE_RESET_HOT 6 /* Assert hot reset */
#define VFIO_EEH_PE_RESET_FUNDAMENTAL 7 /* Assert fundamental reset */
#define VFIO_EEH_PE_CONFIGURE 8 /* PE configuration */
+#define VFIO_EEH_PE_INJECT_ERR 9 /* Inject EEH error */
#define VFIO_EEH_PE_OP _IO(VFIO_TYPE, VFIO_BASE + 21)
+/**
+ * VFIO_IOMMU_SPAPR_REGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 17, struct vfio_iommu_spapr_register_memory)
+ *
+ * Registers user space memory where DMA is allowed. It pins
+ * user pages and does the locked memory accounting so
+ * subsequent VFIO_IOMMU_MAP_DMA/VFIO_IOMMU_UNMAP_DMA calls
+ * get faster.
+ */
+struct vfio_iommu_spapr_register_memory {
+ __u32 argsz;
+ __u32 flags;
+ __u64 vaddr; /* Process virtual address */
+ __u64 size; /* Size of mapping (bytes) */
+};
+#define VFIO_IOMMU_SPAPR_REGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 17)
+
+/**
+ * VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 18, struct vfio_iommu_spapr_register_memory)
+ *
+ * Unregisters user space memory registered with
+ * VFIO_IOMMU_SPAPR_REGISTER_MEMORY.
+ * Uses vfio_iommu_spapr_register_memory for parameters.
+ */
+#define VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 18)
+
+/**
+ * VFIO_IOMMU_SPAPR_TCE_CREATE - _IOWR(VFIO_TYPE, VFIO_BASE + 19, struct vfio_iommu_spapr_tce_create)
+ *
+ * Creates an additional TCE table and programs it (sets a new DMA window)
+ * to every IOMMU group in the container. It receives page shift, window
+ * size and number of levels in the TCE table being created.
+ *
+ * It allocates and returns an offset on a PCI bus of the new DMA window.
+ */
+struct vfio_iommu_spapr_tce_create {
+ __u32 argsz;
+ __u32 flags;
+ /* in */
+ __u32 page_shift;
+ __u64 window_size;
+ __u32 levels;
+ /* out */
+ __u64 start_addr;
+};
+#define VFIO_IOMMU_SPAPR_TCE_CREATE _IO(VFIO_TYPE, VFIO_BASE + 19)
+
+/**
+ * VFIO_IOMMU_SPAPR_TCE_REMOVE - _IOW(VFIO_TYPE, VFIO_BASE + 20, struct vfio_iommu_spapr_tce_remove)
+ *
+ * Unprograms a TCE table from all groups in the container and destroys it.
+ * It receives a PCI bus offset as a window id.
+ */
+struct vfio_iommu_spapr_tce_remove {
+ __u32 argsz;
+ __u32 flags;
+ /* in */
+ __u64 start_addr;
+};
+#define VFIO_IOMMU_SPAPR_TCE_REMOVE _IO(VFIO_TYPE, VFIO_BASE + 20)
+
/* ***************************************************************** */
#endif /* _UAPIVFIO_H */
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index bb6a5b4..ab373191 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -103,6 +103,20 @@ struct vhost_memory {
/* Get accessor: reads index, writes value in num */
#define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
+/* Set the vring byte order in num. Valid values are VHOST_VRING_LITTLE_ENDIAN
+ * or VHOST_VRING_BIG_ENDIAN (other values return -EINVAL).
+ * The byte order cannot be changed while the device is active: trying to do so
+ * returns -EBUSY.
+ * This is a legacy only API that is simply ignored when VIRTIO_F_VERSION_1 is
+ * set.
+ * Not all kernel configurations support this ioctl, but all configurations that
+ * support SET also support GET.
+ */
+#define VHOST_VRING_LITTLE_ENDIAN 0
+#define VHOST_VRING_BIG_ENDIAN 1
+#define VHOST_SET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x13, struct vhost_vring_state)
+#define VHOST_GET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x14, struct vhost_vring_state)
+
/* The following ioctls use eventfd file descriptors to signal and poll
* for events. */
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index d279c1b..3228fbe 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -178,6 +178,12 @@ enum v4l2_memory {
/* see also http://vektor.theorem.ca/graphics/ycbcr/ */
enum v4l2_colorspace {
+ /*
+ * Default colorspace, i.e. let the driver figure it out.
+ * Can only be used with video capture.
+ */
+ V4L2_COLORSPACE_DEFAULT = 0,
+
/* SMPTE 170M: used for broadcast NTSC/PAL SDTV */
V4L2_COLORSPACE_SMPTE170M = 1,
@@ -220,8 +226,56 @@ enum v4l2_colorspace {
/* BT.2020 colorspace, used for UHDTV. */
V4L2_COLORSPACE_BT2020 = 10,
+
+ /* Raw colorspace: for RAW unprocessed images */
+ V4L2_COLORSPACE_RAW = 11,
+};
+
+/*
+ * Determine how COLORSPACE_DEFAULT should map to a proper colorspace.
+ * This depends on whether this is a SDTV image (use SMPTE 170M), an
+ * HDTV image (use Rec. 709), or something else (use sRGB).
+ */
+#define V4L2_MAP_COLORSPACE_DEFAULT(is_sdtv, is_hdtv) \
+ ((is_sdtv) ? V4L2_COLORSPACE_SMPTE170M : \
+ ((is_hdtv) ? V4L2_COLORSPACE_REC709 : V4L2_COLORSPACE_SRGB))
+
+enum v4l2_xfer_func {
+ /*
+ * Mapping of V4L2_XFER_FUNC_DEFAULT to actual transfer functions
+ * for the various colorspaces:
+ *
+ * V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_470_SYSTEM_M,
+ * V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_REC709 and
+ * V4L2_COLORSPACE_BT2020: V4L2_XFER_FUNC_709
+ *
+ * V4L2_COLORSPACE_SRGB, V4L2_COLORSPACE_JPEG: V4L2_XFER_FUNC_SRGB
+ *
+ * V4L2_COLORSPACE_ADOBERGB: V4L2_XFER_FUNC_ADOBERGB
+ *
+ * V4L2_COLORSPACE_SMPTE240M: V4L2_XFER_FUNC_SMPTE240M
+ *
+ * V4L2_COLORSPACE_RAW: V4L2_XFER_FUNC_NONE
+ */
+ V4L2_XFER_FUNC_DEFAULT = 0,
+ V4L2_XFER_FUNC_709 = 1,
+ V4L2_XFER_FUNC_SRGB = 2,
+ V4L2_XFER_FUNC_ADOBERGB = 3,
+ V4L2_XFER_FUNC_SMPTE240M = 4,
+ V4L2_XFER_FUNC_NONE = 5,
};
+/*
+ * Determine how XFER_FUNC_DEFAULT should map to a proper transfer function.
+ * This depends on the colorspace.
+ */
+#define V4L2_MAP_XFER_FUNC_DEFAULT(colsp) \
+ ((colsp) == V4L2_COLORSPACE_ADOBERGB ? V4L2_XFER_FUNC_ADOBERGB : \
+ ((colsp) == V4L2_COLORSPACE_SMPTE240M ? V4L2_XFER_FUNC_SMPTE240M : \
+ ((colsp) == V4L2_COLORSPACE_RAW ? V4L2_XFER_FUNC_NONE : \
+ ((colsp) == V4L2_COLORSPACE_SRGB || (colsp) == V4L2_COLORSPACE_JPEG ? \
+ V4L2_XFER_FUNC_SRGB : V4L2_XFER_FUNC_709))))
+
enum v4l2_ycbcr_encoding {
/*
* Mapping of V4L2_YCBCR_ENC_DEFAULT to actual encodings for the
@@ -266,17 +320,39 @@ enum v4l2_ycbcr_encoding {
V4L2_YCBCR_ENC_SMPTE240M = 8,
};
+/*
+ * Determine how YCBCR_ENC_DEFAULT should map to a proper Y'CbCr encoding.
+ * This depends on the colorspace.
+ */
+#define V4L2_MAP_YCBCR_ENC_DEFAULT(colsp) \
+ ((colsp) == V4L2_COLORSPACE_REC709 ? V4L2_YCBCR_ENC_709 : \
+ ((colsp) == V4L2_COLORSPACE_BT2020 ? V4L2_YCBCR_ENC_BT2020 : \
+ ((colsp) == V4L2_COLORSPACE_SMPTE240M ? V4L2_YCBCR_ENC_SMPTE240M : \
+ V4L2_YCBCR_ENC_601)))
+
enum v4l2_quantization {
/*
- * The default for R'G'B' quantization is always full range. For
- * Y'CbCr the quantization is always limited range, except for
- * SYCC, XV601, XV709 or JPEG: those are full range.
+ * The default for R'G'B' quantization is always full range, except
+ * for the BT2020 colorspace. For Y'CbCr the quantization is always
+ * limited range, except for COLORSPACE_JPEG, SYCC, XV601 or XV709:
+ * those are full range.
*/
V4L2_QUANTIZATION_DEFAULT = 0,
V4L2_QUANTIZATION_FULL_RANGE = 1,
V4L2_QUANTIZATION_LIM_RANGE = 2,
};
+/*
+ * Determine how QUANTIZATION_DEFAULT should map to a proper quantization.
+ * This depends on whether the image is RGB or not, the colorspace and the
+ * Y'CbCr encoding.
+ */
+#define V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb, colsp, ycbcr_enc) \
+ (((is_rgb) && (colsp) == V4L2_COLORSPACE_BT2020) ? V4L2_QUANTIZATION_LIM_RANGE : \
+ (((is_rgb) || (ycbcr_enc) == V4L2_YCBCR_ENC_XV601 || \
+ (ycbcr_enc) == V4L2_YCBCR_ENC_XV709 || (colsp) == V4L2_COLORSPACE_JPEG) ? \
+ V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE))
+
enum v4l2_priority {
V4L2_PRIORITY_UNSET = 0, /* not initialized */
V4L2_PRIORITY_BACKGROUND = 1,
@@ -369,6 +445,7 @@ struct v4l2_pix_format {
__u32 flags; /* format flags (V4L2_PIX_FMT_FLAG_*) */
__u32 ycbcr_enc; /* enum v4l2_ycbcr_encoding */
__u32 quantization; /* enum v4l2_quantization */
+ __u32 xfer_func; /* enum v4l2_xfer_func */
};
/* Pixel format FOURCC depth Description */
@@ -403,6 +480,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */
#define V4L2_PIX_FMT_Y12 v4l2_fourcc('Y', '1', '2', ' ') /* 12 Greyscale */
#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */
+#define V4L2_PIX_FMT_Y16_BE v4l2_fourcc_be('Y', '1', '6', ' ') /* 16 Greyscale BE */
/* Grey bit-packed formats */
#define V4L2_PIX_FMT_Y10BPACK v4l2_fourcc('Y', '1', '0', 'B') /* 10 Greyscale bit-packed */
@@ -463,10 +541,11 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_SGBRG10 v4l2_fourcc('G', 'B', '1', '0') /* 10 GBGB.. RGRG.. */
#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0') /* 10 GRGR.. BGBG.. */
#define V4L2_PIX_FMT_SRGGB10 v4l2_fourcc('R', 'G', '1', '0') /* 10 RGRG.. GBGB.. */
-#define V4L2_PIX_FMT_SBGGR12 v4l2_fourcc('B', 'G', '1', '2') /* 12 BGBG.. GRGR.. */
-#define V4L2_PIX_FMT_SGBRG12 v4l2_fourcc('G', 'B', '1', '2') /* 12 GBGB.. RGRG.. */
-#define V4L2_PIX_FMT_SGRBG12 v4l2_fourcc('B', 'A', '1', '2') /* 12 GRGR.. BGBG.. */
-#define V4L2_PIX_FMT_SRGGB12 v4l2_fourcc('R', 'G', '1', '2') /* 12 RGRG.. GBGB.. */
+ /* 10bit raw bayer packed, 5 bytes for every 4 pixels */
+#define V4L2_PIX_FMT_SBGGR10P v4l2_fourcc('p', 'B', 'A', 'A')
+#define V4L2_PIX_FMT_SGBRG10P v4l2_fourcc('p', 'G', 'A', 'A')
+#define V4L2_PIX_FMT_SGRBG10P v4l2_fourcc('p', 'g', 'A', 'A')
+#define V4L2_PIX_FMT_SRGGB10P v4l2_fourcc('p', 'R', 'A', 'A')
/* 10bit raw bayer a-law compressed to 8 bits */
#define V4L2_PIX_FMT_SBGGR10ALAW8 v4l2_fourcc('a', 'B', 'A', '8')
#define V4L2_PIX_FMT_SGBRG10ALAW8 v4l2_fourcc('a', 'G', 'A', '8')
@@ -477,10 +556,10 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_SGBRG10DPCM8 v4l2_fourcc('b', 'G', 'A', '8')
#define V4L2_PIX_FMT_SGRBG10DPCM8 v4l2_fourcc('B', 'D', '1', '0')
#define V4L2_PIX_FMT_SRGGB10DPCM8 v4l2_fourcc('b', 'R', 'A', '8')
- /*
- * 10bit raw bayer, expanded to 16 bits
- * xxxxrrrrrrrrrrxxxxgggggggggg xxxxggggggggggxxxxbbbbbbbbbb...
- */
+#define V4L2_PIX_FMT_SBGGR12 v4l2_fourcc('B', 'G', '1', '2') /* 12 BGBG.. GRGR.. */
+#define V4L2_PIX_FMT_SGBRG12 v4l2_fourcc('G', 'B', '1', '2') /* 12 GBGB.. RGRG.. */
+#define V4L2_PIX_FMT_SGRBG12 v4l2_fourcc('B', 'A', '1', '2') /* 12 GRGR.. BGBG.. */
+#define V4L2_PIX_FMT_SRGGB12 v4l2_fourcc('R', 'G', '1', '2') /* 12 RGRG.. GBGB.. */
#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */
/* compressed formats */
@@ -808,6 +887,8 @@ struct v4l2_buffer {
#define V4L2_BUF_FLAG_TSTAMP_SRC_MASK 0x00070000
#define V4L2_BUF_FLAG_TSTAMP_SRC_EOF 0x00000000
#define V4L2_BUF_FLAG_TSTAMP_SRC_SOE 0x00010000
+/* mem2mem encoder/decoder */
+#define V4L2_BUF_FLAG_LAST 0x00100000
/**
* struct v4l2_exportbuffer - export of video buffer as DMABUF file descriptor
@@ -1186,6 +1267,12 @@ struct v4l2_bt_timings {
exactly the same number of half-lines. Whether half-lines can be detected
or used depends on the hardware. */
#define V4L2_DV_FL_HALF_LINE (1 << 3)
+/* If set, then this is a Consumer Electronics (CE) video format. Such formats
+ * differ from other formats (commonly called IT formats) in that if RGB
+ * encoding is used then by default the RGB values use limited range (i.e.
+ * use the range 16-235) as opposed to 0-255. All formats defined in CEA-861
+ * except for the 640x480 format are CE formats. */
+#define V4L2_DV_FL_IS_CE_VIDEO (1 << 4)
/* A few useful defines to calculate the total blanking and frame sizes */
#define V4L2_DV_BT_BLANKING_WIDTH(bt) \
@@ -1455,6 +1542,7 @@ struct v4l2_querymenu {
#define V4L2_CTRL_FLAG_WRITE_ONLY 0x0040
#define V4L2_CTRL_FLAG_VOLATILE 0x0080
#define V4L2_CTRL_FLAG_HAS_PAYLOAD 0x0100
+#define V4L2_CTRL_FLAG_EXECUTE_ON_WRITE 0x0200
/* Query flags, to be ORed with the control ID */
#define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000
@@ -1840,8 +1928,8 @@ struct v4l2_mpeg_vbi_fmt_ivtv {
*/
struct v4l2_plane_pix_format {
__u32 sizeimage;
- __u16 bytesperline;
- __u16 reserved[7];
+ __u32 bytesperline;
+ __u16 reserved[6];
} __attribute__ ((packed));
/**
@@ -1856,6 +1944,7 @@ struct v4l2_plane_pix_format {
* @flags: format flags (V4L2_PIX_FMT_FLAG_*)
* @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
* @quantization: enum v4l2_quantization, colorspace quantization
+ * @xfer_func: enum v4l2_xfer_func, colorspace transfer function
*/
struct v4l2_pix_format_mplane {
__u32 width;
@@ -1869,7 +1958,8 @@ struct v4l2_pix_format_mplane {
__u8 flags;
__u8 ycbcr_enc;
__u8 quantization;
- __u8 reserved[8];
+ __u8 xfer_func;
+ __u8 reserved[7];
} __attribute__ ((packed));
/**
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index be40f70..d7f1cbc 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -25,6 +25,8 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. */
+#include <linux/types.h>
+#include <linux/virtio_types.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
@@ -36,12 +38,11 @@
/* Size of a PFN in the balloon interface. */
#define VIRTIO_BALLOON_PFN_SHIFT 12
-struct virtio_balloon_config
-{
+struct virtio_balloon_config {
/* Number of pages host wants Guest to give up. */
- __le32 num_pages;
+ __u32 num_pages;
/* Number of pages we've actually got in balloon. */
- __le32 actual;
+ __u32 actual;
};
#define VIRTIO_BALLOON_S_SWAP_IN 0 /* Amount of memory swapped in */
@@ -52,9 +53,32 @@ struct virtio_balloon_config
#define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */
#define VIRTIO_BALLOON_S_NR 6
+/*
+ * Memory statistics structure.
+ * Driver fills an array of these structures and passes to device.
+ *
+ * NOTE: fields are laid out in a way that would make compiler add padding
+ * between and after fields, so we have to use compiler-specific attributes to
+ * pack it, to disable this padding. This also often causes compiler to
+ * generate suboptimal code.
+ *
+ * We maintain this statistics structure format for backwards compatibility,
+ * but don't follow this example.
+ *
+ * If implementing a similar structure, do something like the below instead:
+ * struct virtio_balloon_stat {
+ * __virtio16 tag;
+ * __u8 reserved[6];
+ * __virtio64 val;
+ * };
+ *
+ * In other words, add explicit reserved fields to align field and
+ * structure boundaries at field size, avoiding compiler padding
+ * without the packed attribute.
+ */
struct virtio_balloon_stat {
- __u16 tag;
- __u64 val;
+ __virtio16 tag;
+ __virtio64 val;
} __attribute__((packed));
#endif /* _LINUX_VIRTIO_BALLOON_H */
diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h
index 247c8ba..19c66fc 100644
--- a/include/uapi/linux/virtio_blk.h
+++ b/include/uapi/linux/virtio_blk.h
@@ -31,22 +31,25 @@
#include <linux/virtio_types.h>
/* Feature bits */
-#define VIRTIO_BLK_F_BARRIER 0 /* Does host support barriers? */
#define VIRTIO_BLK_F_SIZE_MAX 1 /* Indicates maximum segment size */
#define VIRTIO_BLK_F_SEG_MAX 2 /* Indicates maximum # of segments */
#define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */
#define VIRTIO_BLK_F_RO 5 /* Disk is read-only */
#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/
-#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */
-#define VIRTIO_BLK_F_WCE 9 /* Writeback mode enabled after reset */
#define VIRTIO_BLK_F_TOPOLOGY 10 /* Topology information is available */
-#define VIRTIO_BLK_F_CONFIG_WCE 11 /* Writeback mode available in config */
#define VIRTIO_BLK_F_MQ 12 /* support more than one vq */
+/* Legacy feature bits */
+#ifndef VIRTIO_BLK_NO_LEGACY
+#define VIRTIO_BLK_F_BARRIER 0 /* Does host support barriers? */
+#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */
+#define VIRTIO_BLK_F_WCE 9 /* Writeback mode enabled after reset */
+#define VIRTIO_BLK_F_CONFIG_WCE 11 /* Writeback mode available in config */
#ifndef __KERNEL__
/* Old (deprecated) name for VIRTIO_BLK_F_WCE. */
#define VIRTIO_BLK_F_FLUSH VIRTIO_BLK_F_WCE
#endif
+#endif /* !VIRTIO_BLK_NO_LEGACY */
#define VIRTIO_BLK_ID_BYTES 20 /* ID string length */
@@ -57,7 +60,7 @@ struct virtio_blk_config {
__u32 size_max;
/* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */
__u32 seg_max;
- /* geometry the device (if VIRTIO_BLK_F_GEOMETRY) */
+ /* geometry of the device (if VIRTIO_BLK_F_GEOMETRY) */
struct virtio_blk_geometry {
__u16 cylinders;
__u8 heads;
@@ -100,8 +103,10 @@ struct virtio_blk_config {
#define VIRTIO_BLK_T_IN 0
#define VIRTIO_BLK_T_OUT 1
+#ifndef VIRTIO_BLK_NO_LEGACY
/* This bit says it's a scsi command, not an actual read or write. */
#define VIRTIO_BLK_T_SCSI_CMD 2
+#endif /* VIRTIO_BLK_NO_LEGACY */
/* Cache flush command */
#define VIRTIO_BLK_T_FLUSH 4
@@ -109,10 +114,16 @@ struct virtio_blk_config {
/* Get device ID command */
#define VIRTIO_BLK_T_GET_ID 8
+#ifndef VIRTIO_BLK_NO_LEGACY
/* Barrier before this op. */
#define VIRTIO_BLK_T_BARRIER 0x80000000
+#endif /* !VIRTIO_BLK_NO_LEGACY */
-/* This is the first element of the read scatter-gather list. */
+/*
+ * This comes first in the read scatter-gather list.
+ * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated,
+ * this is the first element of the read scatter-gather list.
+ */
struct virtio_blk_outhdr {
/* VIRTIO_BLK_T* */
__virtio32 type;
@@ -122,12 +133,14 @@ struct virtio_blk_outhdr {
__virtio64 sector;
};
+#ifndef VIRTIO_BLK_NO_LEGACY
struct virtio_scsi_inhdr {
__virtio32 errors;
__virtio32 data_len;
__virtio32 sense_len;
__virtio32 residual;
};
+#endif /* !VIRTIO_BLK_NO_LEGACY */
/* And this is the final byte of the write scatter-gather list. */
#define VIRTIO_BLK_S_OK 0
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index a6d0cde..c18264d 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -49,12 +49,14 @@
#define VIRTIO_TRANSPORT_F_START 28
#define VIRTIO_TRANSPORT_F_END 33
+#ifndef VIRTIO_CONFIG_NO_LEGACY
/* Do we get callbacks when the ring is completely used, even if we've
* suppressed them? */
#define VIRTIO_F_NOTIFY_ON_EMPTY 24
/* Can the device handle any descriptor layout? */
#define VIRTIO_F_ANY_LAYOUT 27
+#endif /* VIRTIO_CONFIG_NO_LEGACY */
/* v1.0 compliant. */
#define VIRTIO_F_VERSION_1 32
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
new file mode 100644
index 0000000..478be52
--- /dev/null
+++ b/include/uapi/linux/virtio_gpu.h
@@ -0,0 +1,206 @@
+/*
+ * Virtio GPU Device
+ *
+ * Copyright Red Hat, Inc. 2013-2014
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * This header is BSD licensed so anyone can use the definitions
+ * to implement compatible drivers/servers:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef VIRTIO_GPU_HW_H
+#define VIRTIO_GPU_HW_H
+
+#include <linux/types.h>
+
+enum virtio_gpu_ctrl_type {
+ VIRTIO_GPU_UNDEFINED = 0,
+
+ /* 2d commands */
+ VIRTIO_GPU_CMD_GET_DISPLAY_INFO = 0x0100,
+ VIRTIO_GPU_CMD_RESOURCE_CREATE_2D,
+ VIRTIO_GPU_CMD_RESOURCE_UNREF,
+ VIRTIO_GPU_CMD_SET_SCANOUT,
+ VIRTIO_GPU_CMD_RESOURCE_FLUSH,
+ VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D,
+ VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING,
+ VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING,
+
+ /* cursor commands */
+ VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300,
+ VIRTIO_GPU_CMD_MOVE_CURSOR,
+
+ /* success responses */
+ VIRTIO_GPU_RESP_OK_NODATA = 0x1100,
+ VIRTIO_GPU_RESP_OK_DISPLAY_INFO,
+
+ /* error responses */
+ VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
+ VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY,
+ VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID,
+ VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID,
+ VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID,
+ VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER,
+};
+
+#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
+
+struct virtio_gpu_ctrl_hdr {
+ __le32 type;
+ __le32 flags;
+ __le64 fence_id;
+ __le32 ctx_id;
+ __le32 padding;
+};
+
+/* data passed in the cursor vq */
+
+struct virtio_gpu_cursor_pos {
+ __le32 scanout_id;
+ __le32 x;
+ __le32 y;
+ __le32 padding;
+};
+
+/* VIRTIO_GPU_CMD_UPDATE_CURSOR, VIRTIO_GPU_CMD_MOVE_CURSOR */
+struct virtio_gpu_update_cursor {
+ struct virtio_gpu_ctrl_hdr hdr;
+ struct virtio_gpu_cursor_pos pos; /* update & move */
+ __le32 resource_id; /* update only */
+ __le32 hot_x; /* update only */
+ __le32 hot_y; /* update only */
+ __le32 padding;
+};
+
+/* data passed in the control vq, 2d related */
+
+struct virtio_gpu_rect {
+ __le32 x;
+ __le32 y;
+ __le32 width;
+ __le32 height;
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_UNREF */
+struct virtio_gpu_resource_unref {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 resource_id;
+ __le32 padding;
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: create a 2d resource with a format */
+struct virtio_gpu_resource_create_2d {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 resource_id;
+ __le32 format;
+ __le32 width;
+ __le32 height;
+};
+
+/* VIRTIO_GPU_CMD_SET_SCANOUT */
+struct virtio_gpu_set_scanout {
+ struct virtio_gpu_ctrl_hdr hdr;
+ struct virtio_gpu_rect r;
+ __le32 scanout_id;
+ __le32 resource_id;
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_FLUSH */
+struct virtio_gpu_resource_flush {
+ struct virtio_gpu_ctrl_hdr hdr;
+ struct virtio_gpu_rect r;
+ __le32 resource_id;
+ __le32 padding;
+};
+
+/* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: simple transfer to_host */
+struct virtio_gpu_transfer_to_host_2d {
+ struct virtio_gpu_ctrl_hdr hdr;
+ struct virtio_gpu_rect r;
+ __le64 offset;
+ __le32 resource_id;
+ __le32 padding;
+};
+
+struct virtio_gpu_mem_entry {
+ __le64 addr;
+ __le32 length;
+ __le32 padding;
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING */
+struct virtio_gpu_resource_attach_backing {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 resource_id;
+ __le32 nr_entries;
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING */
+struct virtio_gpu_resource_detach_backing {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 resource_id;
+ __le32 padding;
+};
+
+/* VIRTIO_GPU_RESP_OK_DISPLAY_INFO */
+#define VIRTIO_GPU_MAX_SCANOUTS 16
+struct virtio_gpu_resp_display_info {
+ struct virtio_gpu_ctrl_hdr hdr;
+ struct virtio_gpu_display_one {
+ struct virtio_gpu_rect r;
+ __le32 enabled;
+ __le32 flags;
+ } pmodes[VIRTIO_GPU_MAX_SCANOUTS];
+};
+
+#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)
+
+struct virtio_gpu_config {
+ __u32 events_read;
+ __u32 events_clear;
+ __u32 num_scanouts;
+ __u32 reserved;
+};
+
+/* simple formats for fbcon/X use */
+enum virtio_gpu_formats {
+ VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM = 1,
+ VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM = 2,
+ VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM = 3,
+ VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM = 4,
+
+ VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM = 67,
+ VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM = 68,
+
+ VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM = 121,
+ VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM = 134,
+};
+
+#endif
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 284fc3a..77925f5 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -39,5 +39,7 @@
#define VIRTIO_ID_9P 9 /* 9p virtio console */
#define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */
#define VIRTIO_ID_CAIF 12 /* Virtio caif */
+#define VIRTIO_ID_GPU 16 /* virtio GPU */
+#define VIRTIO_ID_INPUT 18 /* virtio input */
#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/uapi/linux/virtio_input.h b/include/uapi/linux/virtio_input.h
new file mode 100644
index 0000000..a7fe5c8
--- /dev/null
+++ b/include/uapi/linux/virtio_input.h
@@ -0,0 +1,76 @@
+#ifndef _LINUX_VIRTIO_INPUT_H
+#define _LINUX_VIRTIO_INPUT_H
+/* This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE. */
+
+#include <linux/types.h>
+
+enum virtio_input_config_select {
+ VIRTIO_INPUT_CFG_UNSET = 0x00,
+ VIRTIO_INPUT_CFG_ID_NAME = 0x01,
+ VIRTIO_INPUT_CFG_ID_SERIAL = 0x02,
+ VIRTIO_INPUT_CFG_ID_DEVIDS = 0x03,
+ VIRTIO_INPUT_CFG_PROP_BITS = 0x10,
+ VIRTIO_INPUT_CFG_EV_BITS = 0x11,
+ VIRTIO_INPUT_CFG_ABS_INFO = 0x12,
+};
+
+struct virtio_input_absinfo {
+ __u32 min;
+ __u32 max;
+ __u32 fuzz;
+ __u32 flat;
+ __u32 res;
+};
+
+struct virtio_input_devids {
+ __u16 bustype;
+ __u16 vendor;
+ __u16 product;
+ __u16 version;
+};
+
+struct virtio_input_config {
+ __u8 select;
+ __u8 subsel;
+ __u8 size;
+ __u8 reserved[5];
+ union {
+ char string[128];
+ __u8 bitmap[128];
+ struct virtio_input_absinfo abs;
+ struct virtio_input_devids ids;
+ } u;
+};
+
+struct virtio_input_event {
+ __le16 type;
+ __le16 code;
+ __le32 value;
+};
+
+#endif /* _LINUX_VIRTIO_INPUT_H */
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index b5f1677..7bbee79 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -35,7 +35,6 @@
#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */
#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
-#define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */
#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
#define VIRTIO_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in. */
@@ -56,6 +55,10 @@
* Steering */
#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
+#ifndef VIRTIO_NET_NO_LEGACY
+#define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */
+#endif /* VIRTIO_NET_NO_LEGACY */
+
#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
#define VIRTIO_NET_S_ANNOUNCE 2 /* Announcement is needed */
@@ -71,19 +74,39 @@ struct virtio_net_config {
__u16 max_virtqueue_pairs;
} __attribute__((packed));
+/*
+ * This header comes first in the scatter-gather list. If you don't
+ * specify GSO or CSUM features, you can simply ignore the header.
+ *
+ * This is bitwise-equivalent to the legacy struct virtio_net_hdr_mrg_rxbuf,
+ * only flattened.
+ */
+struct virtio_net_hdr_v1 {
+#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* Use csum_start, csum_offset */
+#define VIRTIO_NET_HDR_F_DATA_VALID 2 /* Csum is valid */
+ __u8 flags;
+#define VIRTIO_NET_HDR_GSO_NONE 0 /* Not a GSO frame */
+#define VIRTIO_NET_HDR_GSO_TCPV4 1 /* GSO frame, IPv4 TCP (TSO) */
+#define VIRTIO_NET_HDR_GSO_UDP 3 /* GSO frame, IPv4 UDP (UFO) */
+#define VIRTIO_NET_HDR_GSO_TCPV6 4 /* GSO frame, IPv6 TCP */
+#define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */
+ __u8 gso_type;
+ __virtio16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */
+ __virtio16 gso_size; /* Bytes to append to hdr_len per frame */
+ __virtio16 csum_start; /* Position to start checksumming from */
+ __virtio16 csum_offset; /* Offset after that to place checksum */
+ __virtio16 num_buffers; /* Number of merged rx buffers */
+};
+
+#ifndef VIRTIO_NET_NO_LEGACY
/* This header comes first in the scatter-gather list.
- * If VIRTIO_F_ANY_LAYOUT is not negotiated, it must
+ * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated, it must
* be the first element of the scatter-gather list. If you don't
* specify GSO or CSUM features, you can simply ignore the header. */
struct virtio_net_hdr {
-#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset
-#define VIRTIO_NET_HDR_F_DATA_VALID 2 // Csum is valid
+ /* See VIRTIO_NET_HDR_F_* */
__u8 flags;
-#define VIRTIO_NET_HDR_GSO_NONE 0 // Not a GSO frame
-#define VIRTIO_NET_HDR_GSO_TCPV4 1 // GSO frame, IPv4 TCP (TSO)
-#define VIRTIO_NET_HDR_GSO_UDP 3 // GSO frame, IPv4 UDP (UFO)
-#define VIRTIO_NET_HDR_GSO_TCPV6 4 // GSO frame, IPv6 TCP
-#define VIRTIO_NET_HDR_GSO_ECN 0x80 // TCP has ECN set
+ /* See VIRTIO_NET_HDR_GSO_* */
__u8 gso_type;
__virtio16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */
__virtio16 gso_size; /* Bytes to append to hdr_len per frame */
@@ -97,6 +120,7 @@ struct virtio_net_hdr_mrg_rxbuf {
struct virtio_net_hdr hdr;
__virtio16 num_buffers; /* Number of merged rx buffers */
};
+#endif /* ...VIRTIO_NET_NO_LEGACY */
/*
* Control virtqueue data structures
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index 35b552c..7530146 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -39,7 +39,7 @@
#ifndef _LINUX_VIRTIO_PCI_H
#define _LINUX_VIRTIO_PCI_H
-#include <linux/virtio_config.h>
+#include <linux/types.h>
#ifndef VIRTIO_PCI_NO_LEGACY
@@ -99,4 +99,95 @@
/* Vector value used to disable MSI for queue */
#define VIRTIO_MSI_NO_VECTOR 0xffff
+#ifndef VIRTIO_PCI_NO_MODERN
+
+/* IDs for different capabilities. Must all exist. */
+
+/* Common configuration */
+#define VIRTIO_PCI_CAP_COMMON_CFG 1
+/* Notifications */
+#define VIRTIO_PCI_CAP_NOTIFY_CFG 2
+/* ISR access */
+#define VIRTIO_PCI_CAP_ISR_CFG 3
+/* Device specific configuration */
+#define VIRTIO_PCI_CAP_DEVICE_CFG 4
+/* PCI configuration access */
+#define VIRTIO_PCI_CAP_PCI_CFG 5
+
+/* This is the PCI capability header: */
+struct virtio_pci_cap {
+ __u8 cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
+ __u8 cap_next; /* Generic PCI field: next ptr. */
+ __u8 cap_len; /* Generic PCI field: capability length */
+ __u8 cfg_type; /* Identifies the structure. */
+ __u8 bar; /* Where to find it. */
+ __u8 padding[3]; /* Pad to full dword. */
+ __le32 offset; /* Offset within bar. */
+ __le32 length; /* Length of the structure, in bytes. */
+};
+
+struct virtio_pci_notify_cap {
+ struct virtio_pci_cap cap;
+ __le32 notify_off_multiplier; /* Multiplier for queue_notify_off. */
+};
+
+/* Fields in VIRTIO_PCI_CAP_COMMON_CFG: */
+struct virtio_pci_common_cfg {
+ /* About the whole device. */
+ __le32 device_feature_select; /* read-write */
+ __le32 device_feature; /* read-only */
+ __le32 guest_feature_select; /* read-write */
+ __le32 guest_feature; /* read-write */
+ __le16 msix_config; /* read-write */
+ __le16 num_queues; /* read-only */
+ __u8 device_status; /* read-write */
+ __u8 config_generation; /* read-only */
+
+ /* About a specific virtqueue. */
+ __le16 queue_select; /* read-write */
+ __le16 queue_size; /* read-write, power of 2. */
+ __le16 queue_msix_vector; /* read-write */
+ __le16 queue_enable; /* read-write */
+ __le16 queue_notify_off; /* read-only */
+ __le32 queue_desc_lo; /* read-write */
+ __le32 queue_desc_hi; /* read-write */
+ __le32 queue_avail_lo; /* read-write */
+ __le32 queue_avail_hi; /* read-write */
+ __le32 queue_used_lo; /* read-write */
+ __le32 queue_used_hi; /* read-write */
+};
+
+/* Macro versions of offsets for the Old Timers! */
+#define VIRTIO_PCI_CAP_VNDR 0
+#define VIRTIO_PCI_CAP_NEXT 1
+#define VIRTIO_PCI_CAP_LEN 2
+#define VIRTIO_PCI_CAP_CFG_TYPE 3
+#define VIRTIO_PCI_CAP_BAR 4
+#define VIRTIO_PCI_CAP_OFFSET 8
+#define VIRTIO_PCI_CAP_LENGTH 12
+
+#define VIRTIO_PCI_NOTIFY_CAP_MULT 16
+
+#define VIRTIO_PCI_COMMON_DFSELECT 0
+#define VIRTIO_PCI_COMMON_DF 4
+#define VIRTIO_PCI_COMMON_GFSELECT 8
+#define VIRTIO_PCI_COMMON_GF 12
+#define VIRTIO_PCI_COMMON_MSIX 16
+#define VIRTIO_PCI_COMMON_NUMQ 18
+#define VIRTIO_PCI_COMMON_STATUS 20
+#define VIRTIO_PCI_COMMON_CFGGENERATION 21
+#define VIRTIO_PCI_COMMON_Q_SELECT 22
+#define VIRTIO_PCI_COMMON_Q_SIZE 24
+#define VIRTIO_PCI_COMMON_Q_MSIX 26
+#define VIRTIO_PCI_COMMON_Q_ENABLE 28
+#define VIRTIO_PCI_COMMON_Q_NOFF 30
+#define VIRTIO_PCI_COMMON_Q_DESCLO 32
+#define VIRTIO_PCI_COMMON_Q_DESCHI 36
+#define VIRTIO_PCI_COMMON_Q_AVAILLO 40
+#define VIRTIO_PCI_COMMON_Q_AVAILHI 44
+#define VIRTIO_PCI_COMMON_Q_USEDLO 48
+#define VIRTIO_PCI_COMMON_Q_USEDHI 52
+
+#endif /* VIRTIO_PCI_NO_MODERN */
+
#endif
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index a3318f3..915980a 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -155,7 +155,7 @@ static inline unsigned vring_size(unsigned int num, unsigned long align)
}
/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
-/* Assuming a given event_idx value from the other size, if
+/* Assuming a given event_idx value from the other side, if
* we have just incremented index from old to new_idx,
* should we trigger an event? */
static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
diff --git a/include/uapi/linux/virtio_scsi.h b/include/uapi/linux/virtio_scsi.h
index 42b9370..cc18ef8 100644
--- a/include/uapi/linux/virtio_scsi.h
+++ b/include/uapi/linux/virtio_scsi.h
@@ -29,8 +29,16 @@
#include <linux/virtio_types.h>
-#define VIRTIO_SCSI_CDB_SIZE 32
-#define VIRTIO_SCSI_SENSE_SIZE 96
+/* Default values of the CDB and sense data size configuration fields */
+#define VIRTIO_SCSI_CDB_DEFAULT_SIZE 32
+#define VIRTIO_SCSI_SENSE_DEFAULT_SIZE 96
+
+#ifndef VIRTIO_SCSI_CDB_SIZE
+#define VIRTIO_SCSI_CDB_SIZE VIRTIO_SCSI_CDB_DEFAULT_SIZE
+#endif
+#ifndef VIRTIO_SCSI_SENSE_SIZE
+#define VIRTIO_SCSI_SENSE_SIZE VIRTIO_SCSI_SENSE_DEFAULT_SIZE
+#endif
/* SCSI command request, followed by data-out */
struct virtio_scsi_cmd_req {
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 02d5125..2cd9e60 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -1,6 +1,7 @@
#ifndef _LINUX_XFRM_H
#define _LINUX_XFRM_H
+#include <linux/in6.h>
#include <linux/types.h>
/* All of the structures in this file may not change size as they are
@@ -13,6 +14,7 @@
typedef union {
__be32 a4;
__be32 a6[4];
+ struct in6_addr in6;
} xfrm_address_t;
/* Ident of a specific xfrm_state. It is used on input to lookup
diff --git a/include/uapi/linux/xilinx-v4l2-controls.h b/include/uapi/linux/xilinx-v4l2-controls.h
new file mode 100644
index 0000000..fb495b9
--- /dev/null
+++ b/include/uapi/linux/xilinx-v4l2-controls.h
@@ -0,0 +1,73 @@
+/*
+ * Xilinx Controls Header
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __UAPI_XILINX_V4L2_CONTROLS_H__
+#define __UAPI_XILINX_V4L2_CONTROLS_H__
+
+#include <linux/v4l2-controls.h>
+
+#define V4L2_CID_XILINX_OFFSET 0xc000
+#define V4L2_CID_XILINX_BASE (V4L2_CID_USER_BASE + V4L2_CID_XILINX_OFFSET)
+
+/*
+ * Private Controls for Xilinx Video IPs
+ */
+
+/*
+ * Xilinx TPG Video IP
+ */
+
+#define V4L2_CID_XILINX_TPG (V4L2_CID_USER_BASE + 0xc000)
+
+/* Draw cross hairs */
+#define V4L2_CID_XILINX_TPG_CROSS_HAIRS (V4L2_CID_XILINX_TPG + 1)
+/* Enable a moving box */
+#define V4L2_CID_XILINX_TPG_MOVING_BOX (V4L2_CID_XILINX_TPG + 2)
+/* Mask out a color component */
+#define V4L2_CID_XILINX_TPG_COLOR_MASK (V4L2_CID_XILINX_TPG + 3)
+/* Enable a stuck pixel feature */
+#define V4L2_CID_XILINX_TPG_STUCK_PIXEL (V4L2_CID_XILINX_TPG + 4)
+/* Enable a noisy output */
+#define V4L2_CID_XILINX_TPG_NOISE (V4L2_CID_XILINX_TPG + 5)
+/* Enable the motion feature */
+#define V4L2_CID_XILINX_TPG_MOTION (V4L2_CID_XILINX_TPG + 6)
+/* Configure the motion speed of moving patterns */
+#define V4L2_CID_XILINX_TPG_MOTION_SPEED (V4L2_CID_XILINX_TPG + 7)
+/* The row of horizontal cross hair location */
+#define V4L2_CID_XILINX_TPG_CROSS_HAIR_ROW (V4L2_CID_XILINX_TPG + 8)
+/* The colum of vertical cross hair location */
+#define V4L2_CID_XILINX_TPG_CROSS_HAIR_COLUMN (V4L2_CID_XILINX_TPG + 9)
+/* Set starting point of sine wave for horizontal component */
+#define V4L2_CID_XILINX_TPG_ZPLATE_HOR_START (V4L2_CID_XILINX_TPG + 10)
+/* Set speed of the horizontal component */
+#define V4L2_CID_XILINX_TPG_ZPLATE_HOR_SPEED (V4L2_CID_XILINX_TPG + 11)
+/* Set starting point of sine wave for vertical component */
+#define V4L2_CID_XILINX_TPG_ZPLATE_VER_START (V4L2_CID_XILINX_TPG + 12)
+/* Set speed of the vertical component */
+#define V4L2_CID_XILINX_TPG_ZPLATE_VER_SPEED (V4L2_CID_XILINX_TPG + 13)
+/* Moving box size */
+#define V4L2_CID_XILINX_TPG_BOX_SIZE (V4L2_CID_XILINX_TPG + 14)
+/* Moving box color */
+#define V4L2_CID_XILINX_TPG_BOX_COLOR (V4L2_CID_XILINX_TPG + 15)
+/* Upper limit count of generated stuck pixels */
+#define V4L2_CID_XILINX_TPG_STUCK_PIXEL_THRESH (V4L2_CID_XILINX_TPG + 16)
+/* Noise level */
+#define V4L2_CID_XILINX_TPG_NOISE_GAIN (V4L2_CID_XILINX_TPG + 17)
+
+#endif /* __UAPI_XILINX_V4L2_CONTROLS_H__ */
diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h
index cd6d789..99a8ca1 100644
--- a/include/uapi/misc/cxl.h
+++ b/include/uapi/misc/cxl.h
@@ -32,10 +32,32 @@ struct cxl_ioctl_start_work {
#define CXL_START_WORK_ALL (CXL_START_WORK_AMR |\
CXL_START_WORK_NUM_IRQS)
+
+/* Possible modes that an afu can be in */
+#define CXL_MODE_DEDICATED 0x1
+#define CXL_MODE_DIRECTED 0x2
+
+/* possible flags for the cxl_afu_id flags field */
+#define CXL_AFUID_FLAG_SLAVE 0x1 /* In directed-mode afu is in slave mode */
+
+struct cxl_afu_id {
+ __u64 flags; /* One of CXL_AFUID_FLAG_X */
+ __u32 card_id;
+ __u32 afu_offset;
+ __u32 afu_mode; /* one of the CXL_MODE_X */
+ __u32 reserved1;
+ __u64 reserved2;
+ __u64 reserved3;
+ __u64 reserved4;
+ __u64 reserved5;
+ __u64 reserved6;
+};
+
/* ioctl numbers */
#define CXL_MAGIC 0xCA
#define CXL_IOCTL_START_WORK _IOW(CXL_MAGIC, 0x00, struct cxl_ioctl_start_work)
#define CXL_IOCTL_GET_PROCESS_ELEMENT _IOR(CXL_MAGIC, 0x01, __u32)
+#define CXL_IOCTL_GET_AFU_ID _IOR(CXL_MAGIC, 0x02, struct cxl_afu_id)
#define CXL_READ_MIN_SIZE 0x1000 /* 4K */
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 4275b96..978841e 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -91,6 +91,7 @@ enum {
enum {
IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
+ IB_USER_VERBS_EX_CMD_CREATE_CQ = IB_USER_VERBS_CMD_CREATE_CQ,
IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
};
@@ -202,10 +203,6 @@ struct ib_uverbs_query_device_resp {
__u8 reserved[4];
};
-enum {
- IB_USER_VERBS_EX_QUERY_DEVICE_ODP = 1ULL << 0,
-};
-
struct ib_uverbs_ex_query_device {
__u32 comp_mask;
__u32 reserved;
@@ -224,8 +221,10 @@ struct ib_uverbs_odp_caps {
struct ib_uverbs_ex_query_device_resp {
struct ib_uverbs_query_device_resp base;
__u32 comp_mask;
- __u32 reserved;
+ __u32 response_length;
struct ib_uverbs_odp_caps odp_caps;
+ __u64 timestamp_mask;
+ __u64 hca_core_clock; /* in KHZ */
};
struct ib_uverbs_query_port {
@@ -357,11 +356,27 @@ struct ib_uverbs_create_cq {
__u64 driver_data[0];
};
+struct ib_uverbs_ex_create_cq {
+ __u64 user_handle;
+ __u32 cqe;
+ __u32 comp_vector;
+ __s32 comp_channel;
+ __u32 comp_mask;
+ __u32 flags;
+ __u32 reserved;
+};
+
struct ib_uverbs_create_cq_resp {
__u32 cq_handle;
__u32 cqe;
};
+struct ib_uverbs_ex_create_cq_resp {
+ struct ib_uverbs_create_cq_resp base;
+ __u32 comp_mask;
+ __u32 response_length;
+};
+
struct ib_uverbs_resize_cq {
__u64 response;
__u32 cq_handle;
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index de69170..6e4bb42 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -37,6 +37,7 @@ enum {
RDMA_NL_IWPM_ADD_MAPPING,
RDMA_NL_IWPM_QUERY_MAPPING,
RDMA_NL_IWPM_REMOVE_MAPPING,
+ RDMA_NL_IWPM_REMOTE_INFO,
RDMA_NL_IWPM_HANDLE_ERR,
RDMA_NL_IWPM_MAPINFO,
RDMA_NL_IWPM_MAPINFO_NUM,
diff --git a/include/uapi/sound/asequencer.h b/include/uapi/sound/asequencer.h
index 09c8a00..5a5fa49 100644
--- a/include/uapi/sound/asequencer.h
+++ b/include/uapi/sound/asequencer.h
@@ -22,6 +22,7 @@
#ifndef _UAPI__SOUND_ASEQUENCER_H
#define _UAPI__SOUND_ASEQUENCER_H
+#include <sound/asound.h>
/** version of the sequencer */
#define SNDRV_SEQ_VERSION SNDRV_PROTOCOL_VERSION (1, 0, 1)
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
new file mode 100644
index 0000000..1221520
--- /dev/null
+++ b/include/uapi/sound/asoc.h
@@ -0,0 +1,388 @@
+/*
+ * uapi/sound/asoc.h -- ALSA SoC Firmware Controls and DAPM
+ *
+ * Copyright (C) 2012 Texas Instruments Inc.
+ * Copyright (C) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Simple file API to load FW that includes mixers, coefficients, DAPM graphs,
+ * algorithms, equalisers, DAIs, widgets etc.
+*/
+
+#ifndef __LINUX_UAPI_SND_ASOC_H
+#define __LINUX_UAPI_SND_ASOC_H
+
+#include <linux/types.h>
+#include <sound/asound.h>
+
+/*
+ * Maximum number of channels topology kcontrol can represent.
+ */
+#define SND_SOC_TPLG_MAX_CHAN 8
+
+/*
+ * Maximum number of PCM formats capability
+ */
+#define SND_SOC_TPLG_MAX_FORMATS 16
+
+/*
+ * Maximum number of PCM stream configs
+ */
+#define SND_SOC_TPLG_STREAM_CONFIG_MAX 8
+
+/* individual kcontrol info types - can be mixed with other types */
+#define SND_SOC_TPLG_CTL_VOLSW 1
+#define SND_SOC_TPLG_CTL_VOLSW_SX 2
+#define SND_SOC_TPLG_CTL_VOLSW_XR_SX 3
+#define SND_SOC_TPLG_CTL_ENUM 4
+#define SND_SOC_TPLG_CTL_BYTES 5
+#define SND_SOC_TPLG_CTL_ENUM_VALUE 6
+#define SND_SOC_TPLG_CTL_RANGE 7
+#define SND_SOC_TPLG_CTL_STROBE 8
+
+
+/* individual widget kcontrol info types - can be mixed with other types */
+#define SND_SOC_TPLG_DAPM_CTL_VOLSW 64
+#define SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE 65
+#define SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT 66
+#define SND_SOC_TPLG_DAPM_CTL_ENUM_VALUE 67
+#define SND_SOC_TPLG_DAPM_CTL_PIN 68
+
+/* DAPM widget types - add new items to the end */
+#define SND_SOC_TPLG_DAPM_INPUT 0
+#define SND_SOC_TPLG_DAPM_OUTPUT 1
+#define SND_SOC_TPLG_DAPM_MUX 2
+#define SND_SOC_TPLG_DAPM_MIXER 3
+#define SND_SOC_TPLG_DAPM_PGA 4
+#define SND_SOC_TPLG_DAPM_OUT_DRV 5
+#define SND_SOC_TPLG_DAPM_ADC 6
+#define SND_SOC_TPLG_DAPM_DAC 7
+#define SND_SOC_TPLG_DAPM_SWITCH 8
+#define SND_SOC_TPLG_DAPM_PRE 9
+#define SND_SOC_TPLG_DAPM_POST 10
+#define SND_SOC_TPLG_DAPM_AIF_IN 11
+#define SND_SOC_TPLG_DAPM_AIF_OUT 12
+#define SND_SOC_TPLG_DAPM_DAI_IN 13
+#define SND_SOC_TPLG_DAPM_DAI_OUT 14
+#define SND_SOC_TPLG_DAPM_DAI_LINK 15
+#define SND_SOC_TPLG_DAPM_LAST SND_SOC_TPLG_DAPM_DAI_LINK
+
+/* Header magic number and string sizes */
+#define SND_SOC_TPLG_MAGIC 0x41536F43 /* ASoC */
+
+/* string sizes */
+#define SND_SOC_TPLG_NUM_TEXTS 16
+
+/* ABI version */
+#define SND_SOC_TPLG_ABI_VERSION 0x2
+
+/* Max size of TLV data */
+#define SND_SOC_TPLG_TLV_SIZE 32
+
+/*
+ * File and Block header data types.
+ * Add new generic and vendor types to end of list.
+ * Generic types are handled by the core whilst vendors types are passed
+ * to the component drivers for handling.
+ */
+#define SND_SOC_TPLG_TYPE_MIXER 1
+#define SND_SOC_TPLG_TYPE_BYTES 2
+#define SND_SOC_TPLG_TYPE_ENUM 3
+#define SND_SOC_TPLG_TYPE_DAPM_GRAPH 4
+#define SND_SOC_TPLG_TYPE_DAPM_WIDGET 5
+#define SND_SOC_TPLG_TYPE_DAI_LINK 6
+#define SND_SOC_TPLG_TYPE_PCM 7
+#define SND_SOC_TPLG_TYPE_MANIFEST 8
+#define SND_SOC_TPLG_TYPE_CODEC_LINK 9
+#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_CODEC_LINK
+
+/* vendor block IDs - please add new vendor types to end */
+#define SND_SOC_TPLG_TYPE_VENDOR_FW 1000
+#define SND_SOC_TPLG_TYPE_VENDOR_CONFIG 1001
+#define SND_SOC_TPLG_TYPE_VENDOR_COEFF 1002
+#define SND_SOC_TPLG_TYPEVENDOR_CODEC 1003
+
+#define SND_SOC_TPLG_STREAM_PLAYBACK 0
+#define SND_SOC_TPLG_STREAM_CAPTURE 1
+
+/*
+ * Block Header.
+ * This header preceeds all object and object arrays below.
+ */
+struct snd_soc_tplg_hdr {
+ __le32 magic; /* magic number */
+ __le32 abi; /* ABI version */
+ __le32 version; /* optional vendor specific version details */
+ __le32 type; /* SND_SOC_TPLG_TYPE_ */
+ __le32 size; /* size of this structure */
+ __le32 vendor_type; /* optional vendor specific type info */
+ __le32 payload_size; /* data bytes, excluding this header */
+ __le32 index; /* identifier for block */
+ __le32 count; /* number of elements in block */
+} __attribute__((packed));
+
+/*
+ * Private data.
+ * All topology objects may have private data that can be used by the driver or
+ * firmware. Core will ignore this data.
+ */
+struct snd_soc_tplg_private {
+ __le32 size; /* in bytes of private data */
+ char data[0];
+} __attribute__((packed));
+
+/*
+ * Kcontrol TLV data.
+ */
+struct snd_soc_tplg_ctl_tlv {
+ __le32 size; /* in bytes aligned to 4 */
+ __le32 numid; /* control element numeric identification */
+ __le32 count; /* number of elem in data array */
+ __le32 data[SND_SOC_TPLG_TLV_SIZE];
+} __attribute__((packed));
+
+/*
+ * Kcontrol channel data
+ */
+struct snd_soc_tplg_channel {
+ __le32 size; /* in bytes of this structure */
+ __le32 reg;
+ __le32 shift;
+ __le32 id; /* ID maps to Left, Right, LFE etc */
+} __attribute__((packed));
+
+/*
+ * Kcontrol Operations IDs
+ */
+struct snd_soc_tplg_kcontrol_ops_id {
+ __le32 get;
+ __le32 put;
+ __le32 info;
+} __attribute__((packed));
+
+/*
+ * kcontrol header
+ */
+struct snd_soc_tplg_ctl_hdr {
+ __le32 size; /* in bytes of this structure */
+ __le32 type;
+ char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ __le32 access;
+ struct snd_soc_tplg_kcontrol_ops_id ops;
+ __le32 tlv_size; /* non zero means control has TLV data */
+} __attribute__((packed));
+
+/*
+ * Stream Capabilities
+ */
+struct snd_soc_tplg_stream_caps {
+ __le32 size; /* in bytes of this structure */
+ char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ __le64 formats[SND_SOC_TPLG_MAX_FORMATS]; /* supported formats SNDRV_PCM_FMTBIT_* */
+ __le32 rates; /* supported rates SNDRV_PCM_RATE_* */
+ __le32 rate_min; /* min rate */
+ __le32 rate_max; /* max rate */
+ __le32 channels_min; /* min channels */
+ __le32 channels_max; /* max channels */
+ __le32 periods_min; /* min number of periods */
+ __le32 periods_max; /* max number of periods */
+ __le32 period_size_min; /* min period size bytes */
+ __le32 period_size_max; /* max period size bytes */
+ __le32 buffer_size_min; /* min buffer size bytes */
+ __le32 buffer_size_max; /* max buffer size bytes */
+} __attribute__((packed));
+
+/*
+ * FE or BE Stream configuration supported by SW/FW
+ */
+struct snd_soc_tplg_stream {
+ __le32 size; /* in bytes of this structure */
+ __le64 format; /* SNDRV_PCM_FMTBIT_* */
+ __le32 rate; /* SNDRV_PCM_RATE_* */
+ __le32 period_bytes; /* size of period in bytes */
+ __le32 buffer_bytes; /* size of buffer in bytes */
+ __le32 channels; /* channels */
+ __le32 tdm_slot; /* optional BE bitmask of supported TDM slots */
+ __le32 dai_fmt; /* SND_SOC_DAIFMT_ */
+} __attribute__((packed));
+
+/*
+ * Duplex stream configuration supported by SW/FW.
+ */
+struct snd_soc_tplg_stream_config {
+ __le32 size; /* in bytes of this structure */
+ char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ struct snd_soc_tplg_stream playback;
+ struct snd_soc_tplg_stream capture;
+} __attribute__((packed));
+
+/*
+ * Manifest. List totals for each payload type. Not used in parsing, but will
+ * be passed to the component driver before any other objects in order for any
+ * global componnent resource allocations.
+ *
+ * File block representation for manifest :-
+ * +-----------------------------------+----+
+ * | struct snd_soc_tplg_hdr | 1 |
+ * +-----------------------------------+----+
+ * | struct snd_soc_tplg_manifest | 1 |
+ * +-----------------------------------+----+
+ */
+struct snd_soc_tplg_manifest {
+ __le32 size; /* in bytes of this structure */
+ __le32 control_elems; /* number of control elements */
+ __le32 widget_elems; /* number of widget elements */
+ __le32 graph_elems; /* number of graph elements */
+ __le32 dai_elems; /* number of DAI elements */
+ __le32 dai_link_elems; /* number of DAI link elements */
+} __attribute__((packed));
+
+/*
+ * Mixer kcontrol.
+ *
+ * File block representation for mixer kcontrol :-
+ * +-----------------------------------+----+
+ * | struct snd_soc_tplg_hdr | 1 |
+ * +-----------------------------------+----+
+ * | struct snd_soc_tplg_mixer_control | N |
+ * +-----------------------------------+----+
+ */
+struct snd_soc_tplg_mixer_control {
+ struct snd_soc_tplg_ctl_hdr hdr;
+ __le32 size; /* in bytes of this structure */
+ __le32 min;
+ __le32 max;
+ __le32 platform_max;
+ __le32 invert;
+ __le32 num_channels;
+ struct snd_soc_tplg_channel channel[SND_SOC_TPLG_MAX_CHAN];
+ struct snd_soc_tplg_ctl_tlv tlv;
+ struct snd_soc_tplg_private priv;
+} __attribute__((packed));
+
+/*
+ * Enumerated kcontrol
+ *
+ * File block representation for enum kcontrol :-
+ * +-----------------------------------+----+
+ * | struct snd_soc_tplg_hdr | 1 |
+ * +-----------------------------------+----+
+ * | struct snd_soc_tplg_enum_control | N |
+ * +-----------------------------------+----+
+ */
+struct snd_soc_tplg_enum_control {
+ struct snd_soc_tplg_ctl_hdr hdr;
+ __le32 size; /* in bytes of this structure */
+ __le32 num_channels;
+ struct snd_soc_tplg_channel channel[SND_SOC_TPLG_MAX_CHAN];
+ __le32 items;
+ __le32 mask;
+ __le32 count;
+ char texts[SND_SOC_TPLG_NUM_TEXTS][SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ __le32 values[SND_SOC_TPLG_NUM_TEXTS * SNDRV_CTL_ELEM_ID_NAME_MAXLEN / 4];
+ struct snd_soc_tplg_private priv;
+} __attribute__((packed));
+
+/*
+ * Bytes kcontrol
+ *
+ * File block representation for bytes kcontrol :-
+ * +-----------------------------------+----+
+ * | struct snd_soc_tplg_hdr | 1 |
+ * +-----------------------------------+----+
+ * | struct snd_soc_tplg_bytes_control | N |
+ * +-----------------------------------+----+
+ */
+struct snd_soc_tplg_bytes_control {
+ struct snd_soc_tplg_ctl_hdr hdr;
+ __le32 size; /* in bytes of this structure */
+ __le32 max;
+ __le32 mask;
+ __le32 base;
+ __le32 num_regs;
+ struct snd_soc_tplg_private priv;
+} __attribute__((packed));
+
+/*
+ * DAPM Graph Element
+ *
+ * File block representation for DAPM graph elements :-
+ * +-------------------------------------+----+
+ * | struct snd_soc_tplg_hdr | 1 |
+ * +-------------------------------------+----+
+ * | struct snd_soc_tplg_dapm_graph_elem | N |
+ * +-------------------------------------+----+
+ */
+struct snd_soc_tplg_dapm_graph_elem {
+ char sink[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ char control[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ char source[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+} __attribute__((packed));
+
+/*
+ * DAPM Widget.
+ *
+ * File block representation for DAPM widget :-
+ * +-------------------------------------+-----+
+ * | struct snd_soc_tplg_hdr | 1 |
+ * +-------------------------------------+-----+
+ * | struct snd_soc_tplg_dapm_widget | N |
+ * +-------------------------------------+-----+
+ * | struct snd_soc_tplg_enum_control | 0|1 |
+ * | struct snd_soc_tplg_mixer_control | 0|N |
+ * +-------------------------------------+-----+
+ *
+ * Optional enum or mixer control can be appended to the end of each widget
+ * in the block.
+ */
+struct snd_soc_tplg_dapm_widget {
+ __le32 size; /* in bytes of this structure */
+ __le32 id; /* SND_SOC_DAPM_CTL */
+ char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ char sname[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+
+ __le32 reg; /* negative reg = no direct dapm */
+ __le32 shift; /* bits to shift */
+ __le32 mask; /* non-shifted mask */
+ __u32 invert; /* invert the power bit */
+ __u32 ignore_suspend; /* kept enabled over suspend */
+ __u16 event_flags;
+ __u16 event_type;
+ __u16 num_kcontrols;
+ struct snd_soc_tplg_private priv;
+ /*
+ * kcontrols that relate to this widget
+ * follow here after widget private data
+ */
+} __attribute__((packed));
+
+struct snd_soc_tplg_pcm_cfg_caps {
+ struct snd_soc_tplg_stream_caps caps;
+ struct snd_soc_tplg_stream_config configs[SND_SOC_TPLG_STREAM_CONFIG_MAX];
+ __le32 num_configs; /* number of configs */
+} __attribute__((packed));
+
+/*
+ * Describes SW/FW specific features of PCM or DAI link.
+ *
+ * File block representation for PCM/DAI-Link :-
+ * +-----------------------------------+-----+
+ * | struct snd_soc_tplg_hdr | 1 |
+ * +-----------------------------------+-----+
+ * | struct snd_soc_tplg_dapm_pcm_dai | N |
+ * +-----------------------------------+-----+
+ */
+struct snd_soc_tplg_pcm_dai {
+ __le32 size; /* in bytes of this structure */
+ char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ __le32 id; /* unique ID - used to match */
+ __le32 playback; /* supports playback mode */
+ __le32 capture; /* supports capture mode */
+ __le32 compress; /* 1 = compressed; 0 = PCM */
+ struct snd_soc_tplg_pcm_cfg_caps capconf[2]; /* capabilities and configs */
+} __attribute__((packed));
+
+#endif
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 1f23cd6..a45be6b 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -25,6 +25,9 @@
#include <linux/types.h>
+#ifndef __KERNEL__
+#include <stdlib.h>
+#endif
/*
* protocol version
@@ -140,7 +143,7 @@ struct snd_hwdep_dsp_image {
* *
*****************************************************************************/
-#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 12)
+#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 13)
typedef unsigned long snd_pcm_uframes_t;
typedef signed long snd_pcm_sframes_t;
@@ -267,9 +270,17 @@ typedef int __bitwise snd_pcm_subformat_t;
#define SNDRV_PCM_INFO_JOINT_DUPLEX 0x00200000 /* playback and capture stream are somewhat correlated */
#define SNDRV_PCM_INFO_SYNC_START 0x00400000 /* pcm support some kind of sync go */
#define SNDRV_PCM_INFO_NO_PERIOD_WAKEUP 0x00800000 /* period wakeup can be disabled */
-#define SNDRV_PCM_INFO_HAS_WALL_CLOCK 0x01000000 /* has audio wall clock for audio/system time sync */
+#define SNDRV_PCM_INFO_HAS_WALL_CLOCK 0x01000000 /* (Deprecated)has audio wall clock for audio/system time sync */
+#define SNDRV_PCM_INFO_HAS_LINK_ATIME 0x01000000 /* report hardware link audio time, reset on startup */
+#define SNDRV_PCM_INFO_HAS_LINK_ABSOLUTE_ATIME 0x02000000 /* report absolute hardware link audio time, not reset on startup */
+#define SNDRV_PCM_INFO_HAS_LINK_ESTIMATED_ATIME 0x04000000 /* report estimated link audio time */
+#define SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME 0x08000000 /* report synchronized audio/system time */
+
+#define SNDRV_PCM_INFO_DRAIN_TRIGGER 0x40000000 /* internal kernel flag - trigger in drain */
#define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */
+
+
typedef int __bitwise snd_pcm_state_t;
#define SNDRV_PCM_STATE_OPEN ((__force snd_pcm_state_t) 0) /* stream is open */
#define SNDRV_PCM_STATE_SETUP ((__force snd_pcm_state_t) 1) /* stream has a setup */
@@ -407,6 +418,22 @@ struct snd_pcm_channel_info {
unsigned int step; /* samples distance in bits */
};
+enum {
+ /*
+ * first definition for backwards compatibility only,
+ * maps to wallclock/link time for HDAudio playback and DEFAULT/DMA time for everything else
+ */
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_COMPAT = 0,
+
+ /* timestamp definitions */
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT = 1, /* DMA time, reported as per hw_ptr */
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK = 2, /* link time reported by sample or wallclock counter, reset on startup */
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_ABSOLUTE = 3, /* link time reported by sample or wallclock counter, not reset on startup */
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_ESTIMATED = 4, /* link time estimated indirectly */
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED = 5, /* link time synchronized with system time */
+ SNDRV_PCM_AUDIO_TSTAMP_TYPE_LAST = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED
+};
+
struct snd_pcm_status {
snd_pcm_state_t state; /* stream state */
struct timespec trigger_tstamp; /* time when stream was started/stopped/paused */
@@ -418,9 +445,11 @@ struct snd_pcm_status {
snd_pcm_uframes_t avail_max; /* max frames available on hw since last status */
snd_pcm_uframes_t overrange; /* count of ADC (capture) overrange detections from last status */
snd_pcm_state_t suspended_state; /* suspended stream state */
- __u32 reserved_alignment; /* must be filled with zero */
- struct timespec audio_tstamp; /* from sample counter or wall clock */
- unsigned char reserved[56-sizeof(struct timespec)]; /* must be filled with zero */
+ __u32 audio_tstamp_data; /* needed for 64-bit alignment, used for configs/report to/from userspace */
+ struct timespec audio_tstamp; /* sample counter, wall clock, PHC or on-demand sync'ed */
+ struct timespec driver_tstamp; /* useful in case reference system tstamp is reported with delay */
+ __u32 audio_tstamp_accuracy; /* in ns units, only valid if indicated in audio_tstamp_data */
+ unsigned char reserved[52-2*sizeof(struct timespec)]; /* must be filled with zero */
};
struct snd_pcm_mmap_status {
@@ -533,6 +562,7 @@ enum {
#define SNDRV_PCM_IOCTL_DELAY _IOR('A', 0x21, snd_pcm_sframes_t)
#define SNDRV_PCM_IOCTL_HWSYNC _IO('A', 0x22)
#define SNDRV_PCM_IOCTL_SYNC_PTR _IOWR('A', 0x23, struct snd_pcm_sync_ptr)
+#define SNDRV_PCM_IOCTL_STATUS_EXT _IOWR('A', 0x24, struct snd_pcm_status)
#define SNDRV_PCM_IOCTL_CHANNEL_INFO _IOR('A', 0x32, struct snd_pcm_channel_info)
#define SNDRV_PCM_IOCTL_PREPARE _IO('A', 0x40)
#define SNDRV_PCM_IOCTL_RESET _IO('A', 0x41)
@@ -834,7 +864,7 @@ struct snd_ctl_elem_id {
snd_ctl_elem_iface_t iface; /* interface identifier */
unsigned int device; /* device/client number */
unsigned int subdevice; /* subdevice (substream) number */
- unsigned char name[44]; /* ASCII name of item */
+ unsigned char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; /* ASCII name of item */
unsigned int index; /* index of item */
};
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index 22ed8cb..e00d8cb 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -75,7 +75,7 @@ struct snd_compr_tstamp {
/**
* struct snd_compr_avail - avail descriptor
* @avail: Number of bytes available in ring buffer for writing/reading
- * @tstamp: timestamp infomation
+ * @tstamp: timestamp information
*/
struct snd_compr_avail {
__u64 avail;
diff --git a/include/uapi/sound/emu10k1.h b/include/uapi/sound/emu10k1.h
index d1bbaf7..ec1535b 100644
--- a/include/uapi/sound/emu10k1.h
+++ b/include/uapi/sound/emu10k1.h
@@ -23,8 +23,7 @@
#define _UAPI__SOUND_EMU10K1_H
#include <linux/types.h>
-
-
+#include <sound/asound.h>
/*
* ---- FX8010 ----
diff --git a/include/uapi/sound/hdspm.h b/include/uapi/sound/hdspm.h
index b357f1a..5737332 100644
--- a/include/uapi/sound/hdspm.h
+++ b/include/uapi/sound/hdspm.h
@@ -20,6 +20,12 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
/* Maximum channels is 64 even on 56Mode you have 64playbacks to matrix */
#define HDSPM_MAX_CHANNELS 64
diff --git a/include/uapi/sound/tlv.h b/include/uapi/sound/tlv.h
new file mode 100644
index 0000000..ffc4f20
--- /dev/null
+++ b/include/uapi/sound/tlv.h
@@ -0,0 +1,31 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __UAPI_SOUND_TLV_H
+#define __UAPI_SOUND_TLV_H
+
+#define SNDRV_CTL_TLVT_CONTAINER 0 /* one level down - group of TLVs */
+#define SNDRV_CTL_TLVT_DB_SCALE 1 /* dB scale */
+#define SNDRV_CTL_TLVT_DB_LINEAR 2 /* linear volume */
+#define SNDRV_CTL_TLVT_DB_RANGE 3 /* dB range container */
+#define SNDRV_CTL_TLVT_DB_MINMAX 4 /* dB scale with min/max */
+#define SNDRV_CTL_TLVT_DB_MINMAX_MUTE 5 /* dB scale with min/max with mute */
+
+/*
+ * channel-mapping TLV items
+ * TLV length must match with num_channels
+ */
+#define SNDRV_CTL_TLVT_CHMAP_FIXED 0x101 /* fixed channel position */
+#define SNDRV_CTL_TLVT_CHMAP_VAR 0x102 /* channels freely swappable */
+#define SNDRV_CTL_TLVT_CHMAP_PAIRED 0x103 /* pair-wise swappable */
+
+#endif
diff --git a/include/uapi/sound/usb_stream.h b/include/uapi/sound/usb_stream.h
new file mode 100644
index 0000000..cfe8fba
--- /dev/null
+++ b/include/uapi/sound/usb_stream.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2007, 2008 Karsten Wiese <fzu@wemgehoertderstaat.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _UAPI__SOUND_USB_STREAM_H
+#define _UAPI__SOUND_USB_STREAM_H
+
+#define USB_STREAM_INTERFACE_VERSION 2
+
+#define SNDRV_USB_STREAM_IOCTL_SET_PARAMS \
+ _IOW('H', 0x90, struct usb_stream_config)
+
+struct usb_stream_packet {
+ unsigned offset;
+ unsigned length;
+};
+
+
+struct usb_stream_config {
+ unsigned version;
+ unsigned sample_rate;
+ unsigned period_frames;
+ unsigned frame_size;
+};
+
+struct usb_stream {
+ struct usb_stream_config cfg;
+ unsigned read_size;
+ unsigned write_size;
+
+ int period_size;
+
+ unsigned state;
+
+ int idle_insize;
+ int idle_outsize;
+ int sync_packet;
+ unsigned insize_done;
+ unsigned periods_done;
+ unsigned periods_polled;
+
+ struct usb_stream_packet outpacket[2];
+ unsigned inpackets;
+ unsigned inpacket_head;
+ unsigned inpacket_split;
+ unsigned inpacket_split_at;
+ unsigned next_inpacket_split;
+ unsigned next_inpacket_split_at;
+ struct usb_stream_packet inpacket[0];
+};
+
+enum usb_stream_state {
+ usb_stream_invalid,
+ usb_stream_stopped,
+ usb_stream_sync0,
+ usb_stream_sync1,
+ usb_stream_ready,
+ usb_stream_running,
+ usb_stream_xrun,
+};
+
+#endif /* _UAPI__SOUND_USB_STREAM_H */
diff --git a/include/video/exynos5433_decon.h b/include/video/exynos5433_decon.h
new file mode 100644
index 0000000..3696575
--- /dev/null
+++ b/include/video/exynos5433_decon.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2014 Samsung Electronics Co.Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#ifndef EXYNOS_REGS_DECON_H
+#define EXYNOS_REGS_DECON_H
+
+/* Exynos543X DECON */
+#define DECON_VIDCON0 0x0000
+#define DECON_VIDOUTCON0 0x0010
+#define DECON_WINCONx(n) (0x0020 + ((n) * 4))
+#define DECON_VIDOSDxH(n) (0x0080 + ((n) * 4))
+#define DECON_SHADOWCON 0x00A0
+#define DECON_VIDOSDxA(n) (0x00B0 + ((n) * 0x20))
+#define DECON_VIDOSDxB(n) (0x00B4 + ((n) * 0x20))
+#define DECON_VIDOSDxC(n) (0x00B8 + ((n) * 0x20))
+#define DECON_VIDOSDxD(n) (0x00BC + ((n) * 0x20))
+#define DECON_VIDOSDxE(n) (0x00C0 + ((n) * 0x20))
+#define DECON_VIDW0xADD0B0(n) (0x0150 + ((n) * 0x10))
+#define DECON_VIDW0xADD0B1(n) (0x0154 + ((n) * 0x10))
+#define DECON_VIDW0xADD0B2(n) (0x0158 + ((n) * 0x10))
+#define DECON_VIDW0xADD1B0(n) (0x01A0 + ((n) * 0x10))
+#define DECON_VIDW0xADD1B1(n) (0x01A4 + ((n) * 0x10))
+#define DECON_VIDW0xADD1B2(n) (0x01A8 + ((n) * 0x10))
+#define DECON_VIDW0xADD2(n) (0x0200 + ((n) * 4))
+#define DECON_LOCALxSIZE(n) (0x0214 + ((n) * 4))
+#define DECON_VIDINTCON0 0x0220
+#define DECON_VIDINTCON1 0x0224
+#define DECON_WxKEYCON0(n) (0x0230 + ((n - 1) * 8))
+#define DECON_WxKEYCON1(n) (0x0234 + ((n - 1) * 8))
+#define DECON_WxKEYALPHA(n) (0x0250 + ((n - 1) * 4))
+#define DECON_WINxMAP(n) (0x0270 + ((n) * 4))
+#define DECON_QOSLUT07_00 0x02C0
+#define DECON_QOSLUT15_08 0x02C4
+#define DECON_QOSCTRL 0x02C8
+#define DECON_BLENDERQx(n) (0x0300 + ((n - 1) * 4))
+#define DECON_BLENDCON 0x0310
+#define DECON_OPE_VIDW0xADD0(n) (0x0400 + ((n) * 4))
+#define DECON_OPE_VIDW0xADD1(n) (0x0414 + ((n) * 4))
+#define DECON_FRAMEFIFO_REG7 0x051C
+#define DECON_FRAMEFIFO_REG8 0x0520
+#define DECON_FRAMEFIFO_STATUS 0x0524
+#define DECON_CMU 0x1404
+#define DECON_UPDATE 0x1410
+#define DECON_UPDATE_SCHEME 0x1438
+#define DECON_VIDCON1 0x2000
+#define DECON_VIDCON2 0x2004
+#define DECON_VIDCON3 0x2008
+#define DECON_VIDCON4 0x200C
+#define DECON_VIDTCON2 0x2028
+#define DECON_FRAME_SIZE 0x2038
+#define DECON_LINECNT_OP_THRESHOLD 0x203C
+#define DECON_TRIGCON 0x2040
+#define DECON_TRIGSKIP 0x2050
+#define DECON_CRCRDATA 0x20B0
+#define DECON_CRCCTRL 0x20B4
+
+/* Exynos5430 DECON */
+#define DECON_VIDTCON0 0x2020
+#define DECON_VIDTCON1 0x2024
+
+/* Exynos5433 DECON */
+#define DECON_VIDTCON00 0x2010
+#define DECON_VIDTCON01 0x2014
+#define DECON_VIDTCON10 0x2018
+#define DECON_VIDTCON11 0x201C
+
+/* Exynos543X DECON Internal */
+#define DECON_W013DSTREOCON 0x0320
+#define DECON_W233DSTREOCON 0x0324
+#define DECON_FRAMEFIFO_REG0 0x0500
+#define DECON_ENHANCER_CTRL 0x2100
+
+/* Exynos543X DECON TV */
+#define DECON_VCLKCON0 0x0014
+#define DECON_VIDINTCON2 0x0228
+#define DECON_VIDINTCON3 0x022C
+
+/* VIDCON0 */
+#define VIDCON0_SWRESET (1 << 28)
+#define VIDCON0_STOP_STATUS (1 << 2)
+#define VIDCON0_ENVID (1 << 1)
+#define VIDCON0_ENVID_F (1 << 0)
+
+/* VIDOUTCON0 */
+#define VIDOUT_LCD_ON (1 << 24)
+#define VIDOUT_IF_F_MASK (0x3 << 20)
+#define VIDOUT_RGB_IF (0x0 << 20)
+#define VIDOUT_COMMAND_IF (0x2 << 20)
+
+/* WINCONx */
+#define WINCONx_HAWSWP_F (1 << 16)
+#define WINCONx_WSWP_F (1 << 15)
+#define WINCONx_BURSTLEN_MASK (0x3 << 10)
+#define WINCONx_BURSTLEN_16WORD (0x0 << 10)
+#define WINCONx_BURSTLEN_8WORD (0x1 << 10)
+#define WINCONx_BURSTLEN_4WORD (0x2 << 10)
+#define WINCONx_BLD_PIX_F (1 << 6)
+#define WINCONx_BPPMODE_MASK (0xf << 2)
+#define WINCONx_BPPMODE_16BPP_565 (0x5 << 2)
+#define WINCONx_BPPMODE_16BPP_A1555 (0x6 << 2)
+#define WINCONx_BPPMODE_16BPP_I1555 (0x7 << 2)
+#define WINCONx_BPPMODE_24BPP_888 (0xb << 2)
+#define WINCONx_BPPMODE_24BPP_A1887 (0xc << 2)
+#define WINCONx_BPPMODE_25BPP_A1888 (0xd << 2)
+#define WINCONx_BPPMODE_32BPP_A8888 (0xd << 2)
+#define WINCONx_BPPMODE_16BPP_A4444 (0xe << 2)
+#define WINCONx_ALPHA_SEL_F (1 << 1)
+#define WINCONx_ENWIN_F (1 << 0)
+
+/* SHADOWCON */
+#define SHADOWCON_Wx_PROTECT(n) (1 << (10 + (n)))
+
+/* VIDOSDxD */
+#define VIDOSD_Wx_ALPHA_R_F(n) (((n) & 0xff) << 16)
+#define VIDOSD_Wx_ALPHA_G_F(n) (((n) & 0xff) << 8)
+#define VIDOSD_Wx_ALPHA_B_F(n) (((n) & 0xff) << 0)
+
+/* VIDINTCON0 */
+#define VIDINTCON0_FRAMEDONE (1 << 17)
+#define VIDINTCON0_INTFRMEN (1 << 12)
+#define VIDINTCON0_INTEN (1 << 0)
+
+/* VIDINTCON1 */
+#define VIDINTCON1_INTFRMDONEPEND (1 << 2)
+#define VIDINTCON1_INTFRMPEND (1 << 1)
+#define VIDINTCON1_INTFIFOPEND (1 << 0)
+
+/* DECON_CMU */
+#define CMU_CLKGAGE_MODE_SFR_F (1 << 1)
+#define CMU_CLKGAGE_MODE_MEM_F (1 << 0)
+
+/* DECON_UPDATE */
+#define STANDALONE_UPDATE_F (1 << 0)
+
+/* DECON_VIDTCON00 */
+#define VIDTCON00_VBPD_F(x) (((x) & 0xfff) << 16)
+#define VIDTCON00_VFPD_F(x) ((x) & 0xfff)
+
+/* DECON_VIDTCON01 */
+#define VIDTCON01_VSPW_F(x) (((x) & 0xfff) << 16)
+
+/* DECON_VIDTCON10 */
+#define VIDTCON10_HBPD_F(x) (((x) & 0xfff) << 16)
+#define VIDTCON10_HFPD_F(x) ((x) & 0xfff)
+
+/* DECON_VIDTCON11 */
+#define VIDTCON11_HSPW_F(x) (((x) & 0xfff) << 16)
+
+/* DECON_VIDTCON2 */
+#define VIDTCON2_LINEVAL(x) (((x) & 0xfff) << 16)
+#define VIDTCON2_HOZVAL(x) ((x) & 0xfff)
+
+/* TRIGCON */
+#define TRIGCON_TRIGEN_PER_F (1 << 31)
+#define TRIGCON_TRIGEN_F (1 << 30)
+#define TRIGCON_TE_AUTO_MASK (1 << 29)
+#define TRIGCON_SWTRIGCMD (1 << 1)
+#define TRIGCON_SWTRIGEN (1 << 0)
+
+#endif /* EXYNOS_REGS_DECON_H */
diff --git a/include/video/exynos7_decon.h b/include/video/exynos7_decon.h
new file mode 100644
index 0000000..a62b11b
--- /dev/null
+++ b/include/video/exynos7_decon.h
@@ -0,0 +1,349 @@
+/* include/video/exynos7_decon.h
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Ajay Kumar <ajaykumar.rs@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/* VIDCON0 */
+#define VIDCON0 0x00
+
+#define VIDCON0_SWRESET (1 << 28)
+#define VIDCON0_DECON_STOP_STATUS (1 << 2)
+#define VIDCON0_ENVID (1 << 1)
+#define VIDCON0_ENVID_F (1 << 0)
+
+/* VIDOUTCON0 */
+#define VIDOUTCON0 0x4
+
+#define VIDOUTCON0_DUAL_MASK (0x3 << 24)
+#define VIDOUTCON0_DUAL_ON (0x3 << 24)
+#define VIDOUTCON0_DISP_IF_1_ON (0x2 << 24)
+#define VIDOUTCON0_DISP_IF_0_ON (0x1 << 24)
+#define VIDOUTCON0_DUAL_OFF (0x0 << 24)
+#define VIDOUTCON0_IF_SHIFT 23
+#define VIDOUTCON0_IF_MASK (0x1 << 23)
+#define VIDOUTCON0_RGBIF (0x0 << 23)
+#define VIDOUTCON0_I80IF (0x1 << 23)
+
+/* VIDCON3 */
+#define VIDCON3 0x8
+
+/* VIDCON4 */
+#define VIDCON4 0xC
+#define VIDCON4_FIFOCNT_START_EN (1 << 0)
+
+/* VCLKCON0 */
+#define VCLKCON0 0x10
+#define VCLKCON0_CLKVALUP (1 << 8)
+#define VCLKCON0_VCLKFREE (1 << 0)
+
+/* VCLKCON */
+#define VCLKCON1 0x14
+#define VCLKCON1_CLKVAL_NUM_VCLK(val) (((val) & 0xff) << 0)
+#define VCLKCON2 0x18
+
+/* SHADOWCON */
+#define SHADOWCON 0x30
+
+#define SHADOWCON_WINx_PROTECT(_win) (1 << (10 + (_win)))
+
+/* WINCONx */
+#define WINCON(_win) (0x50 + ((_win) * 4))
+
+#define WINCONx_BUFSTATUS (0x3 << 30)
+#define WINCONx_BUFSEL_MASK (0x3 << 28)
+#define WINCONx_BUFSEL_SHIFT 28
+#define WINCONx_TRIPLE_BUF_MODE (0x1 << 18)
+#define WINCONx_DOUBLE_BUF_MODE (0x0 << 18)
+#define WINCONx_BURSTLEN_16WORD (0x0 << 11)
+#define WINCONx_BURSTLEN_8WORD (0x1 << 11)
+#define WINCONx_BURSTLEN_MASK (0x1 << 11)
+#define WINCONx_BURSTLEN_SHIFT 11
+#define WINCONx_BLD_PLANE (0 << 8)
+#define WINCONx_BLD_PIX (1 << 8)
+#define WINCONx_ALPHA_MUL (1 << 7)
+
+#define WINCONx_BPPMODE_MASK (0xf << 2)
+#define WINCONx_BPPMODE_SHIFT 2
+#define WINCONx_BPPMODE_16BPP_565 (0x8 << 2)
+#define WINCONx_BPPMODE_24BPP_BGRx (0x7 << 2)
+#define WINCONx_BPPMODE_24BPP_RGBx (0x6 << 2)
+#define WINCONx_BPPMODE_24BPP_xBGR (0x5 << 2)
+#define WINCONx_BPPMODE_24BPP_xRGB (0x4 << 2)
+#define WINCONx_BPPMODE_32BPP_BGRA (0x3 << 2)
+#define WINCONx_BPPMODE_32BPP_RGBA (0x2 << 2)
+#define WINCONx_BPPMODE_32BPP_ABGR (0x1 << 2)
+#define WINCONx_BPPMODE_32BPP_ARGB (0x0 << 2)
+#define WINCONx_ALPHA_SEL (1 << 1)
+#define WINCONx_ENWIN (1 << 0)
+
+#define WINCON1_ALPHA_MUL_F (1 << 7)
+#define WINCON2_ALPHA_MUL_F (1 << 7)
+#define WINCON3_ALPHA_MUL_F (1 << 7)
+#define WINCON4_ALPHA_MUL_F (1 << 7)
+
+/* VIDOSDxH: The height for the OSD image(READ ONLY)*/
+#define VIDOSD_H(_x) (0x80 + ((_x) * 4))
+
+/* Frame buffer start addresses: VIDWxxADD0n */
+#define VIDW_BUF_START(_win) (0x80 + ((_win) * 0x10))
+#define VIDW_BUF_START1(_win) (0x84 + ((_win) * 0x10))
+#define VIDW_BUF_START2(_win) (0x88 + ((_win) * 0x10))
+
+#define VIDW_WHOLE_X(_win) (0x0130 + ((_win) * 8))
+#define VIDW_WHOLE_Y(_win) (0x0134 + ((_win) * 8))
+#define VIDW_OFFSET_X(_win) (0x0170 + ((_win) * 8))
+#define VIDW_OFFSET_Y(_win) (0x0174 + ((_win) * 8))
+#define VIDW_BLKOFFSET(_win) (0x01B0 + ((_win) * 4))
+#define VIDW_BLKSIZE(win) (0x0200 + ((_win) * 4))
+
+/* Interrupt controls register */
+#define VIDINTCON2 0x228
+
+#define VIDINTCON1_INTEXTRA1_EN (1 << 1)
+#define VIDINTCON1_INTEXTRA0_EN (1 << 0)
+
+/* Interrupt controls and status register */
+#define VIDINTCON3 0x22C
+
+#define VIDINTCON1_INTEXTRA1_PEND (1 << 1)
+#define VIDINTCON1_INTEXTRA0_PEND (1 << 0)
+
+/* VIDOSDxA ~ VIDOSDxE */
+#define VIDOSD_BASE 0x230
+
+#define OSD_STRIDE 0x20
+
+#define VIDOSD_A(_win) (VIDOSD_BASE + \
+ ((_win) * OSD_STRIDE) + 0x00)
+#define VIDOSD_B(_win) (VIDOSD_BASE + \
+ ((_win) * OSD_STRIDE) + 0x04)
+#define VIDOSD_C(_win) (VIDOSD_BASE + \
+ ((_win) * OSD_STRIDE) + 0x08)
+#define VIDOSD_D(_win) (VIDOSD_BASE + \
+ ((_win) * OSD_STRIDE) + 0x0C)
+#define VIDOSD_E(_win) (VIDOSD_BASE + \
+ ((_win) * OSD_STRIDE) + 0x10)
+
+#define VIDOSDxA_TOPLEFT_X_MASK (0x1fff << 13)
+#define VIDOSDxA_TOPLEFT_X_SHIFT 13
+#define VIDOSDxA_TOPLEFT_X_LIMIT 0x1fff
+#define VIDOSDxA_TOPLEFT_X(_x) (((_x) & 0x1fff) << 13)
+
+#define VIDOSDxA_TOPLEFT_Y_MASK (0x1fff << 0)
+#define VIDOSDxA_TOPLEFT_Y_SHIFT 0
+#define VIDOSDxA_TOPLEFT_Y_LIMIT 0x1fff
+#define VIDOSDxA_TOPLEFT_Y(_x) (((_x) & 0x1fff) << 0)
+
+#define VIDOSDxB_BOTRIGHT_X_MASK (0x1fff << 13)
+#define VIDOSDxB_BOTRIGHT_X_SHIFT 13
+#define VIDOSDxB_BOTRIGHT_X_LIMIT 0x1fff
+#define VIDOSDxB_BOTRIGHT_X(_x) (((_x) & 0x1fff) << 13)
+
+#define VIDOSDxB_BOTRIGHT_Y_MASK (0x1fff << 0)
+#define VIDOSDxB_BOTRIGHT_Y_SHIFT 0
+#define VIDOSDxB_BOTRIGHT_Y_LIMIT 0x1fff
+#define VIDOSDxB_BOTRIGHT_Y(_x) (((_x) & 0x1fff) << 0)
+
+#define VIDOSDxC_ALPHA0_R_F(_x) (((_x) & 0xFF) << 16)
+#define VIDOSDxC_ALPHA0_G_F(_x) (((_x) & 0xFF) << 8)
+#define VIDOSDxC_ALPHA0_B_F(_x) (((_x) & 0xFF) << 0)
+
+#define VIDOSDxD_ALPHA1_R_F(_x) (((_x) & 0xFF) << 16)
+#define VIDOSDxD_ALPHA1_G_F(_x) (((_x) & 0xFF) << 8)
+#define VIDOSDxD_ALPHA1_B_F(_x) (((_x) & 0xFF) >> 0)
+
+/* Window MAP (Color map) */
+#define WINxMAP(_win) (0x340 + ((_win) * 4))
+
+#define WINxMAP_MAP (1 << 24)
+#define WINxMAP_MAP_COLOUR_MASK (0xffffff << 0)
+#define WINxMAP_MAP_COLOUR_SHIFT 0
+#define WINxMAP_MAP_COLOUR_LIMIT 0xffffff
+#define WINxMAP_MAP_COLOUR(_x) ((_x) << 0)
+
+/* Window colour-key control registers */
+#define WKEYCON 0x370
+
+#define WKEYCON0 0x00
+#define WKEYCON1 0x04
+#define WxKEYCON0_KEYBL_EN (1 << 26)
+#define WxKEYCON0_KEYEN_F (1 << 25)
+#define WxKEYCON0_DIRCON (1 << 24)
+#define WxKEYCON0_COMPKEY_MASK (0xffffff << 0)
+#define WxKEYCON0_COMPKEY_SHIFT 0
+#define WxKEYCON0_COMPKEY_LIMIT 0xffffff
+#define WxKEYCON0_COMPKEY(_x) ((_x) << 0)
+#define WxKEYCON1_COLVAL_MASK (0xffffff << 0)
+#define WxKEYCON1_COLVAL_SHIFT 0
+#define WxKEYCON1_COLVAL_LIMIT 0xffffff
+#define WxKEYCON1_COLVAL(_x) ((_x) << 0)
+
+/* color key control register for hardware window 1 ~ 4. */
+#define WKEYCON0_BASE(x) ((WKEYCON + WKEYCON0) + ((x - 1) * 8))
+/* color key value register for hardware window 1 ~ 4. */
+#define WKEYCON1_BASE(x) ((WKEYCON + WKEYCON1) + ((x - 1) * 8))
+
+/* Window KEY Alpha value */
+#define WxKEYALPHA(_win) (0x3A0 + (((_win) - 1) * 0x4))
+
+#define Wx_KEYALPHA_R_F_SHIFT 16
+#define Wx_KEYALPHA_G_F_SHIFT 8
+#define Wx_KEYALPHA_B_F_SHIFT 0
+
+/* Blending equation */
+#define BLENDE(_win) (0x03C0 + ((_win) * 4))
+#define BLENDE_COEF_ZERO 0x0
+#define BLENDE_COEF_ONE 0x1
+#define BLENDE_COEF_ALPHA_A 0x2
+#define BLENDE_COEF_ONE_MINUS_ALPHA_A 0x3
+#define BLENDE_COEF_ALPHA_B 0x4
+#define BLENDE_COEF_ONE_MINUS_ALPHA_B 0x5
+#define BLENDE_COEF_ALPHA0 0x6
+#define BLENDE_COEF_A 0xA
+#define BLENDE_COEF_ONE_MINUS_A 0xB
+#define BLENDE_COEF_B 0xC
+#define BLENDE_COEF_ONE_MINUS_B 0xD
+#define BLENDE_Q_FUNC(_v) ((_v) << 18)
+#define BLENDE_P_FUNC(_v) ((_v) << 12)
+#define BLENDE_B_FUNC(_v) ((_v) << 6)
+#define BLENDE_A_FUNC(_v) ((_v) << 0)
+
+/* Blending equation control */
+#define BLENDCON 0x3D8
+#define BLENDCON_NEW_MASK (1 << 0)
+#define BLENDCON_NEW_8BIT_ALPHA_VALUE (1 << 0)
+#define BLENDCON_NEW_4BIT_ALPHA_VALUE (0 << 0)
+
+/* Interrupt control register */
+#define VIDINTCON0 0x500
+
+#define VIDINTCON0_WAKEUP_MASK (0x3f << 26)
+#define VIDINTCON0_INTEXTRAEN (1 << 21)
+
+#define VIDINTCON0_FRAMESEL0_SHIFT 15
+#define VIDINTCON0_FRAMESEL0_MASK (0x3 << 15)
+#define VIDINTCON0_FRAMESEL0_BACKPORCH (0x0 << 15)
+#define VIDINTCON0_FRAMESEL0_VSYNC (0x1 << 15)
+#define VIDINTCON0_FRAMESEL0_ACTIVE (0x2 << 15)
+#define VIDINTCON0_FRAMESEL0_FRONTPORCH (0x3 << 15)
+
+#define VIDINTCON0_INT_FRAME (1 << 11)
+
+#define VIDINTCON0_FIFOLEVEL_MASK (0x7 << 3)
+#define VIDINTCON0_FIFOLEVEL_SHIFT 3
+#define VIDINTCON0_FIFOLEVEL_EMPTY (0x0 << 3)
+#define VIDINTCON0_FIFOLEVEL_TO25PC (0x1 << 3)
+#define VIDINTCON0_FIFOLEVEL_TO50PC (0x2 << 3)
+#define VIDINTCON0_FIFOLEVEL_FULL (0x4 << 3)
+
+#define VIDINTCON0_FIFOSEL_MAIN_EN (1 << 1)
+#define VIDINTCON0_INT_FIFO (1 << 1)
+
+#define VIDINTCON0_INT_ENABLE (1 << 0)
+
+/* Interrupt controls and status register */
+#define VIDINTCON1 0x504
+
+#define VIDINTCON1_INT_EXTRA (1 << 3)
+#define VIDINTCON1_INT_I80 (1 << 2)
+#define VIDINTCON1_INT_FRAME (1 << 1)
+#define VIDINTCON1_INT_FIFO (1 << 0)
+
+/* VIDCON1 */
+#define VIDCON1(_x) (0x0600 + ((_x) * 0x50))
+#define VIDCON1_LINECNT_GET(_v) (((_v) >> 17) & 0x1fff)
+#define VIDCON1_VCLK_MASK (0x3 << 9)
+#define VIDCON1_VCLK_HOLD (0x0 << 9)
+#define VIDCON1_VCLK_RUN (0x1 << 9)
+#define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9)
+#define VIDCON1_RGB_ORDER_O_MASK (0x7 << 4)
+#define VIDCON1_RGB_ORDER_O_RGB (0x0 << 4)
+#define VIDCON1_RGB_ORDER_O_GBR (0x1 << 4)
+#define VIDCON1_RGB_ORDER_O_BRG (0x2 << 4)
+#define VIDCON1_RGB_ORDER_O_BGR (0x4 << 4)
+#define VIDCON1_RGB_ORDER_O_RBG (0x5 << 4)
+#define VIDCON1_RGB_ORDER_O_GRB (0x6 << 4)
+
+/* VIDTCON0 */
+#define VIDTCON0 0x610
+
+#define VIDTCON0_VBPD_MASK (0xffff << 16)
+#define VIDTCON0_VBPD_SHIFT 16
+#define VIDTCON0_VBPD_LIMIT 0xffff
+#define VIDTCON0_VBPD(_x) ((_x) << 16)
+
+#define VIDTCON0_VFPD_MASK (0xffff << 0)
+#define VIDTCON0_VFPD_SHIFT 0
+#define VIDTCON0_VFPD_LIMIT 0xffff
+#define VIDTCON0_VFPD(_x) ((_x) << 0)
+
+/* VIDTCON1 */
+#define VIDTCON1 0x614
+
+#define VIDTCON1_VSPW_MASK (0xffff << 16)
+#define VIDTCON1_VSPW_SHIFT 16
+#define VIDTCON1_VSPW_LIMIT 0xffff
+#define VIDTCON1_VSPW(_x) ((_x) << 16)
+
+/* VIDTCON2 */
+#define VIDTCON2 0x618
+
+#define VIDTCON2_HBPD_MASK (0xffff << 16)
+#define VIDTCON2_HBPD_SHIFT 16
+#define VIDTCON2_HBPD_LIMIT 0xffff
+#define VIDTCON2_HBPD(_x) ((_x) << 16)
+
+#define VIDTCON2_HFPD_MASK (0xffff << 0)
+#define VIDTCON2_HFPD_SHIFT 0
+#define VIDTCON2_HFPD_LIMIT 0xffff
+#define VIDTCON2_HFPD(_x) ((_x) << 0)
+
+/* VIDTCON3 */
+#define VIDTCON3 0x61C
+
+#define VIDTCON3_HSPW_MASK (0xffff << 16)
+#define VIDTCON3_HSPW_SHIFT 16
+#define VIDTCON3_HSPW_LIMIT 0xffff
+#define VIDTCON3_HSPW(_x) ((_x) << 16)
+
+/* VIDTCON4 */
+#define VIDTCON4 0x620
+
+#define VIDTCON4_LINEVAL_MASK (0xfff << 16)
+#define VIDTCON4_LINEVAL_SHIFT 16
+#define VIDTCON4_LINEVAL_LIMIT 0xfff
+#define VIDTCON4_LINEVAL(_x) (((_x) & 0xfff) << 16)
+
+#define VIDTCON4_HOZVAL_MASK (0xfff << 0)
+#define VIDTCON4_HOZVAL_SHIFT 0
+#define VIDTCON4_HOZVAL_LIMIT 0xfff
+#define VIDTCON4_HOZVAL(_x) (((_x) & 0xfff) << 0)
+
+/* LINECNT OP THRSHOLD*/
+#define LINECNT_OP_THRESHOLD 0x630
+
+/* CRCCTRL */
+#define CRCCTRL 0x6C8
+#define CRCCTRL_CRCCLKEN (0x1 << 2)
+#define CRCCTRL_CRCSTART_F (0x1 << 1)
+#define CRCCTRL_CRCEN (0x1 << 0)
+
+/* DECON_CMU */
+#define DECON_CMU 0x704
+
+#define DECON_CMU_ALL_CLKGATE_ENABLE 0x3
+#define DECON_CMU_SE_CLKGATE_ENABLE (0x1 << 2)
+#define DECON_CMU_SFR_CLKGATE_ENABLE (0x1 << 1)
+#define DECON_CMU_MEM_CLKGATE_ENABLE (0x1 << 0)
+
+/* DECON_UPDATE */
+#define DECON_UPDATE 0x710
+
+#define DECON_UPDATE_SLAVE_SYNC (1 << 4)
+#define DECON_UPDATE_STANDALONE_F (1 << 0)
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index c74bf4a..85dedca 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -17,6 +17,7 @@
#include <linux/bitmap.h>
#include <linux/fb.h>
#include <media/v4l2-mediabus.h>
+#include <video/videomode.h>
struct ipu_soc;
@@ -32,28 +33,15 @@ enum ipuv3_type {
* Bitfield of Display Interface signal polarities.
*/
struct ipu_di_signal_cfg {
- unsigned datamask_en:1;
- unsigned interlaced:1;
- unsigned odd_field_first:1;
- unsigned clksel_en:1;
- unsigned clkidle_en:1;
unsigned data_pol:1; /* true = inverted */
unsigned clk_pol:1; /* true = rising edge */
unsigned enable_pol:1;
- unsigned Hsync_pol:1; /* true = active high */
- unsigned Vsync_pol:1;
-
- u16 width;
- u16 height;
- u32 pixel_fmt;
- u16 h_start_width;
- u16 h_sync_width;
- u16 h_end_width;
- u16 v_start_width;
- u16 v_sync_width;
- u16 v_end_width;
+
+ struct videomode mode;
+
+ u32 bus_format;
u32 v_to_h_sync;
- unsigned long pixelclock;
+
#define IPU_DI_CLKMODE_SYNC (1 << 0)
#define IPU_DI_CLKMODE_EXT (1 << 1)
unsigned long clkflags;
@@ -236,6 +224,7 @@ void ipu_di_put(struct ipu_di *);
int ipu_di_disable(struct ipu_di *);
int ipu_di_enable(struct ipu_di *);
int ipu_di_get_num(struct ipu_di *);
+int ipu_di_adjust_videomode(struct ipu_di *di, struct videomode *mode);
int ipu_di_init_sync_panel(struct ipu_di *, struct ipu_di_signal_cfg *sig);
/*
diff --git a/include/video/neomagic.h b/include/video/neomagic.h
index bc5013e..91e225a 100644
--- a/include/video/neomagic.h
+++ b/include/video/neomagic.h
@@ -159,10 +159,7 @@ struct neofb_par {
unsigned char VCLK3NumeratorHigh;
unsigned char VCLK3Denominator;
unsigned char VerticalExt;
-
-#ifdef CONFIG_MTRR
- int mtrr;
-#endif
+ int wc_cookie;
u8 __iomem *mmio_vbase;
u8 cursorOff;
u8 *cursorPad; /* Must die !! */
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index 6a84498..f001a35 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -129,14 +129,13 @@ enum omap_rfbi_te_mode {
};
enum omap_dss_signal_level {
- OMAPDSS_SIG_ACTIVE_HIGH = 0,
- OMAPDSS_SIG_ACTIVE_LOW = 1,
+ OMAPDSS_SIG_ACTIVE_LOW,
+ OMAPDSS_SIG_ACTIVE_HIGH,
};
enum omap_dss_signal_edge {
- OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
- OMAPDSS_DRIVE_SIG_RISING_EDGE,
OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ OMAPDSS_DRIVE_SIG_RISING_EDGE,
};
enum omap_dss_venc_type {
@@ -314,6 +313,7 @@ enum omapdss_version {
OMAPDSS_VER_OMAP4, /* All other OMAP4s */
OMAPDSS_VER_OMAP5,
OMAPDSS_VER_AM43xx,
+ OMAPDSS_VER_DRA7xx,
};
/* Board specific data */
@@ -688,6 +688,7 @@ struct omapdss_dsi_ops {
};
struct omap_dss_device {
+ struct kobject kobj;
struct device *dev;
struct module *owner;
diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
index a20e4a3..0530e5a 100644
--- a/include/video/samsung_fimd.h
+++ b/include/video/samsung_fimd.h
@@ -289,6 +289,11 @@
#define VIDISD14C_ALPHA1_B_LIMIT 0xf
#define VIDISD14C_ALPHA1_B(_x) ((_x) << 0)
+#define VIDW_ALPHA 0x021c
+#define VIDW_ALPHA_R(_x) ((_x) << 16)
+#define VIDW_ALPHA_G(_x) ((_x) << 8)
+#define VIDW_ALPHA_B(_x) ((_x) << 0)
+
/* Video buffer addresses */
#define VIDW_BUF_START(_buff) (0xA0 + ((_buff) * 8))
#define VIDW_BUF_START1(_buff) (0xA4 + ((_buff) * 8))
@@ -436,6 +441,12 @@
#define BLENDCON_NEW_8BIT_ALPHA_VALUE (1 << 0)
#define BLENDCON_NEW_4BIT_ALPHA_VALUE (0 << 0)
+/* Display port clock control */
+#define DP_MIE_CLKCON 0x27c
+#define DP_MIE_CLK_DISABLE 0x0
+#define DP_MIE_CLK_DP_ENABLE 0x2
+#define DP_MIE_CLK_MIE_ENABLE 0x3
+
/* Notes on per-window bpp settings
*
* Value Win0 Win1 Win2 Win3 Win 4
diff --git a/include/video/tdfx.h b/include/video/tdfx.h
index befbaf0..69674b9 100644
--- a/include/video/tdfx.h
+++ b/include/video/tdfx.h
@@ -196,7 +196,7 @@ struct tdfx_par {
u32 palette[16];
void __iomem *regbase_virt;
unsigned long iobase;
- int mtrr_handle;
+ int wc_cookie;
#ifdef CONFIG_FB_3DFX_I2C
struct tdfxfb_i2c_chan chan[2];
#endif
diff --git a/include/xen/events.h b/include/xen/events.h
index 5321cd9..7d95fdf 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -17,7 +17,7 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
irq_handler_t handler,
unsigned long irqflags, const char *devname,
void *dev_id);
-int bind_virq_to_irq(unsigned int virq, unsigned int cpu);
+int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu);
int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
irq_handler_t handler,
unsigned long irqflags, const char *devname,
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 3387465..4478f4b 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -45,6 +45,8 @@
#include <asm/xen/hypervisor.h>
#include <xen/features.h>
+#include <linux/mm_types.h>
+#include <linux/page-flags.h>
#define GNTTAB_RESERVED_XENSTORE 1
@@ -58,6 +60,22 @@ struct gnttab_free_callback {
u16 count;
};
+struct gntab_unmap_queue_data;
+
+typedef void (*gnttab_unmap_refs_done)(int result, struct gntab_unmap_queue_data *data);
+
+struct gntab_unmap_queue_data
+{
+ struct delayed_work gnttab_work;
+ void *data;
+ gnttab_unmap_refs_done done;
+ struct gnttab_unmap_grant_ref *unmap_ops;
+ struct gnttab_unmap_grant_ref *kunmap_ops;
+ struct page **pages;
+ unsigned int count;
+ unsigned int age;
+};
+
int gnttab_init(void);
int gnttab_suspend(void);
int gnttab_resume(void);
@@ -163,12 +181,18 @@ void gnttab_free_auto_xlat_frames(void);
#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
+int gnttab_alloc_pages(int nr_pages, struct page **pages);
+void gnttab_free_pages(int nr_pages, struct page **pages);
+
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
- struct gnttab_map_grant_ref *kunmap_ops,
+ struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count);
+void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
+int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item);
+
/* Perform a batch of grant map/copy operations. Retry every batch slot
* for which the hypervisor returns GNTST_eagain. This is typically due
@@ -182,4 +206,22 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count);
void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count);
+
+struct xen_page_foreign {
+ domid_t domid;
+ grant_ref_t gref;
+};
+
+static inline struct xen_page_foreign *xen_page_foreign(struct page *page)
+{
+ if (!PageForeign(page))
+ return NULL;
+#if BITS_PER_LONG < 64
+ return (struct xen_page_foreign *)page->private;
+#else
+ BUILD_BUG_ON(sizeof(struct xen_page_foreign) > BITS_PER_LONG);
+ return (struct xen_page_foreign *)&page->private;
+#endif
+}
+
#endif /* __ASM_GNTTAB_H__ */
diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h
index 131a6cc..6ad3d11 100644
--- a/include/xen/interface/features.h
+++ b/include/xen/interface/features.h
@@ -41,6 +41,12 @@
/* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */
#define XENFEAT_mmu_pt_update_preserve_ad 5
+/*
+ * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel
+ * available pte bits.
+ */
+#define XENFEAT_gnttab_map_avail_bits 7
+
/* x86: Does this Xen host support the HVM callback vector type? */
#define XENFEAT_hvm_callback_vector 8
diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h
index bcce564..56806bc 100644
--- a/include/xen/interface/grant_table.h
+++ b/include/xen/interface/grant_table.h
@@ -526,6 +526,13 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_cache_flush);
#define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte)
/*
+ * Bits to be placed in guest kernel available PTE bits (architecture
+ * dependent; only supported when XENFEAT_gnttab_map_avail_bits is set).
+ */
+#define _GNTMAP_guest_avail0 (16)
+#define GNTMAP_guest_avail_mask ((uint32_t)~0 << _GNTMAP_guest_avail0)
+
+/*
* Values for error status returns. All errors are -ve.
*/
#define GNTST_okay (0) /* Normal return. */
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index f68719f..a483789 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -67,7 +67,7 @@
#define __HYPERVISOR_vcpu_op 24
#define __HYPERVISOR_set_segment_base 25 /* x86/64 only */
#define __HYPERVISOR_mmuext_op 26
-#define __HYPERVISOR_acm_op 27
+#define __HYPERVISOR_xsm_op 27
#define __HYPERVISOR_nmi_op 28
#define __HYPERVISOR_sched_op 29
#define __HYPERVISOR_callback_op 30
@@ -75,7 +75,11 @@
#define __HYPERVISOR_event_channel_op 32
#define __HYPERVISOR_physdev_op 33
#define __HYPERVISOR_hvm_op 34
+#define __HYPERVISOR_sysctl 35
+#define __HYPERVISOR_domctl 36
+#define __HYPERVISOR_kexec_op 37
#define __HYPERVISOR_tmem_op 38
+#define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */
/* Architecture-specific hypercall definitions. */
#define __HYPERVISOR_arch_0 48
diff --git a/include/xen/page.h b/include/xen/page.h
index 12765b6..c5ed20b 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -3,6 +3,11 @@
#include <asm/xen/page.h>
+static inline unsigned long page_to_mfn(struct page *page)
+{
+ return pfn_to_mfn(page_to_pfn(page));
+}
+
struct xen_memory_region {
phys_addr_t start;
phys_addr_t size;
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 7491ee5..0ce4f32 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -13,6 +13,7 @@ void xen_arch_post_suspend(int suspend_cancelled);
void xen_timer_resume(void);
void xen_arch_resume(void);
+void xen_arch_suspend(void);
void xen_resume_notifier_register(struct notifier_block *nb);
void xen_resume_notifier_unregister(struct notifier_block *nb);
@@ -27,13 +28,58 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
struct vm_area_struct;
+
+/*
+ * xen_remap_domain_mfn_array() - map an array of foreign frames
+ * @vma: VMA to map the pages into
+ * @addr: Address at which to map the pages
+ * @gfn: Array of GFNs to map
+ * @nr: Number entries in the GFN array
+ * @err_ptr: Returns per-GFN error status.
+ * @prot: page protection mask
+ * @domid: Domain owning the pages
+ * @pages: Array of pages if this domain has an auto-translated physmap
+ *
+ * @gfn and @err_ptr may point to the same buffer, the GFNs will be
+ * overwritten by the error codes after they are mapped.
+ *
+ * Returns the number of successfully mapped frames, or a -ve error
+ * code.
+ */
+int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *gfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned domid,
+ struct page **pages);
+
+/* xen_remap_domain_mfn_range() - map a range of foreign frames
+ * @vma: VMA to map the pages into
+ * @addr: Address at which to map the pages
+ * @gfn: First GFN to map.
+ * @nr: Number frames to map
+ * @prot: page protection mask
+ * @domid: Domain owning the pages
+ * @pages: Array of pages if this domain has an auto-translated physmap
+ *
+ * Returns the number of successfully mapped frames, or a -ve error
+ * code.
+ */
int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
unsigned long addr,
- xen_pfn_t mfn, int nr,
+ xen_pfn_t gfn, int nr,
pgprot_t prot, unsigned domid,
struct page **pages);
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages);
+int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *gfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned domid,
+ struct page **pages);
+int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
+ int nr, struct page **pages);
bool xen_running_on_version_or_later(unsigned int major, unsigned int minor);
@@ -46,4 +92,30 @@ static inline efi_system_table_t __init *xen_efi_probe(void)
}
#endif
+#ifdef CONFIG_PREEMPT
+
+static inline void xen_preemptible_hcall_begin(void)
+{
+}
+
+static inline void xen_preemptible_hcall_end(void)
+{
+}
+
+#else
+
+DECLARE_PER_CPU(bool, xen_in_preemptible_hcall);
+
+static inline void xen_preemptible_hcall_begin(void)
+{
+ __this_cpu_write(xen_in_preemptible_hcall, true);
+}
+
+static inline void xen_preemptible_hcall_end(void)
+{
+ __this_cpu_write(xen_in_preemptible_hcall, false);
+}
+
+#endif /* CONFIG_PREEMPT */
+
#endif /* INCLUDE_XEN_OPS_H */
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index b78f21c..289c0b5 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -46,6 +46,10 @@
#include <xen/interface/io/xenbus.h>
#include <xen/interface/io/xs_wire.h>
+#define XENBUS_MAX_RING_PAGE_ORDER 4
+#define XENBUS_MAX_RING_PAGES (1U << XENBUS_MAX_RING_PAGE_ORDER)
+#define INVALID_GRANT_HANDLE (~0U)
+
/* Register callback to watch this node. */
struct xenbus_watch
{
@@ -114,9 +118,9 @@ int __must_check __xenbus_register_backend(struct xenbus_driver *drv,
const char *mod_name);
#define xenbus_register_frontend(drv) \
- __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME);
+ __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME)
#define xenbus_register_backend(drv) \
- __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME);
+ __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME)
void xenbus_unregister_driver(struct xenbus_driver *drv);
@@ -199,15 +203,19 @@ int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch,
const char *pathfmt, ...);
int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state);
-int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn);
-int xenbus_map_ring_valloc(struct xenbus_device *dev,
- int gnt_ref, void **vaddr);
-int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
- grant_handle_t *handle, void *vaddr);
+int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
+ unsigned int nr_pages, grant_ref_t *grefs);
+int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
+ unsigned int nr_grefs, void **vaddr);
+int xenbus_map_ring(struct xenbus_device *dev,
+ grant_ref_t *gnt_refs, unsigned int nr_grefs,
+ grant_handle_t *handles, unsigned long *vaddrs,
+ bool *leaked);
int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr);
int xenbus_unmap_ring(struct xenbus_device *dev,
- grant_handle_t handle, void *vaddr);
+ grant_handle_t *handles, unsigned int nr_handles,
+ unsigned long *vaddrs);
int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port);
int xenbus_free_evtchn(struct xenbus_device *dev, int port);