diff options
author | Zhang Rui <rui.zhang@intel.com> | 2015-06-11 04:52:14 (GMT) |
---|---|---|
committer | Zhang Rui <rui.zhang@intel.com> | 2015-06-11 04:52:14 (GMT) |
commit | 111b23cf895b5cbcdc1b2c6580be1bb78a577d05 (patch) | |
tree | 89b840115ccd753216ba0b26e587e52699b99310 | |
parent | 6a6bcf08e5d1834447655a762dfaf552b675cc54 (diff) | |
parent | 53daf9383f34d7bf61358a37449fb4d59fbdafc2 (diff) | |
download | linux-111b23cf895b5cbcdc1b2c6580be1bb78a577d05.tar.xz |
Merge branches 'release' and 'thermal-soc' of .git into next
777 files changed, 12097 insertions, 6616 deletions
@@ -3709,6 +3709,13 @@ N: Dirk Verworner D: Co-author of German book ``Linux-Kernel-Programmierung'' D: Co-founder of Berlin Linux User Group +N: Andrew Victor +E: linux@maxim.org.za +W: http://maxim.org.za/at91_26.html +D: First maintainer of Atmel ARM-based SoC, aka AT91 +D: Introduced support for at91rm9200, the first chip of AT91 family +S: South Africa + N: Riku Voipio E: riku.voipio@iki.fi D: Author of PCA9532 LED and Fintek f75375s hwmon driver diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt index 653d5d7..31d1d65 100644 --- a/Documentation/IPMI.txt +++ b/Documentation/IPMI.txt @@ -505,7 +505,10 @@ at module load time (for a module) with: The addresses are normal I2C addresses. The adapter is the string name of the adapter, as shown in /sys/class/i2c-adapter/i2c-<n>/name. -It is *NOT* i2c-<n> itself. +It is *NOT* i2c-<n> itself. Also, the comparison is done ignoring +spaces, so if the name is "This is an I2C chip" you can say +adapter_name=ThisisanI2cchip. This is because it's hard to pass in +spaces in kernel parameters. The debug flags are bit flags for each BMC found, they are: IPMI messages: 1, driver state: 2, timing: 4, I2C probe: 8 diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt index 750401f..15dfce7 100644 --- a/Documentation/acpi/enumeration.txt +++ b/Documentation/acpi/enumeration.txt @@ -253,7 +253,7 @@ input driver: GPIO support ~~~~~~~~~~~~ ACPI 5 introduced two new resources to describe GPIO connections: GpioIo -and GpioInt. These resources are used be used to pass GPIO numbers used by +and GpioInt. These resources can be used to pass GPIO numbers used by the device to the driver. ACPI 5.1 extended this with _DSD (Device Specific Data) which made it possible to name the GPIOs among other things. diff --git a/Documentation/acpi/gpio-properties.txt b/Documentation/acpi/gpio-properties.txt index ae36fcf..f35dad1 100644 --- a/Documentation/acpi/gpio-properties.txt +++ b/Documentation/acpi/gpio-properties.txt @@ -1,9 +1,9 @@ _DSD Device Properties Related to GPIO -------------------------------------- -With the release of ACPI 5.1 and the _DSD configuration objecte names -can finally be given to GPIOs (and other things as well) returned by -_CRS. Previously, we were only able to use an integer index to find +With the release of ACPI 5.1, the _DSD configuration object finally +allows names to be given to GPIOs (and other things as well) returned +by _CRS. Previously, we were only able to use an integer index to find the corresponding GPIO, which is pretty error prone (it depends on the _CRS output ordering, for example). diff --git a/Documentation/devicetree/bindings/arm/omap/l3-noc.txt b/Documentation/devicetree/bindings/arm/omap/l3-noc.txt index 974624e..161448d 100644 --- a/Documentation/devicetree/bindings/arm/omap/l3-noc.txt +++ b/Documentation/devicetree/bindings/arm/omap/l3-noc.txt @@ -6,6 +6,7 @@ provided by Arteris. Required properties: - compatible : Should be "ti,omap3-l3-smx" for OMAP3 family Should be "ti,omap4-l3-noc" for OMAP4 family + Should be "ti,omap5-l3-noc" for OMAP5 family Should be "ti,dra7-l3-noc" for DRA7 family Should be "ti,am4372-l3-noc" for AM43 family - reg: Contains L3 register address range for each noc domain. diff --git a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt index a4873e5..e30e184 100644 --- a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt +++ b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt @@ -38,7 +38,7 @@ dma_apbx: dma-apbx@80024000 { 80 81 68 69 70 71 72 73 74 75 76 77>; - interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty", + interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty", "saif0", "saif1", "i2c0", "i2c1", "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx", "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx"; diff --git a/Documentation/devicetree/bindings/mtd/m25p80.txt b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt index f20b111..2bee681 100644 --- a/Documentation/devicetree/bindings/mtd/m25p80.txt +++ b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt @@ -8,8 +8,8 @@ Required properties: is not Linux-only, but in case of Linux, see the "m25p_ids" table in drivers/mtd/devices/m25p80.c for the list of supported chips. - Must also include "nor-jedec" for any SPI NOR flash that can be - identified by the JEDEC READ ID opcode (0x9F). + Must also include "jedec,spi-nor" for any SPI NOR flash that can + be identified by the JEDEC READ ID opcode (0x9F). - reg : Chip-Select number - spi-max-frequency : Maximum frequency of the SPI bus the chip can operate at @@ -25,7 +25,7 @@ Example: flash: m25p80@0 { #address-cells = <1>; #size-cells = <1>; - compatible = "spansion,m25p80", "nor-jedec"; + compatible = "spansion,m25p80", "jedec,spi-nor"; reg = <0>; spi-max-frequency = <40000000>; m25p,fast-read; diff --git a/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt b/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt new file mode 100644 index 0000000..be78968 --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt @@ -0,0 +1,30 @@ +Abracon ABX80X I2C ultra low power RTC/Alarm chip + +The Abracon ABX80X family consist of the ab0801, ab0803, ab0804, ab0805, ab1801, +ab1803, ab1804 and ab1805. The ab0805 is the superset of ab080x and the ab1805 +is the superset of ab180x. + +Required properties: + + - "compatible": should one of: + "abracon,abx80x" + "abracon,ab0801" + "abracon,ab0803" + "abracon,ab0804" + "abracon,ab0805" + "abracon,ab1801" + "abracon,ab1803" + "abracon,ab1804" + "abracon,ab1805" + Using "abracon,abx80x" will enable chip autodetection. + - "reg": I2C bus address of the device + +Optional properties: + +The abx804 and abx805 have a trickle charger that is able to charge the +connected battery or supercap. Both the following properties have to be defined +and valid to enable charging: + + - "abracon,tc-diode": should be "standard" (0.6V) or "schottky" (0.3V) + - "abracon,tc-resistor": should be <0>, <3>, <6> or <11>. 0 disables the output + resistor, the other values are in ohm. diff --git a/Documentation/devicetree/bindings/thermal/hisilicon-thermal.txt b/Documentation/devicetree/bindings/thermal/hisilicon-thermal.txt new file mode 100644 index 0000000..d48fc52 --- /dev/null +++ b/Documentation/devicetree/bindings/thermal/hisilicon-thermal.txt @@ -0,0 +1,23 @@ +* Temperature Sensor on hisilicon SoCs + +** Required properties : + +- compatible: "hisilicon,tsensor". +- reg: physical base address of thermal sensor and length of memory mapped + region. +- interrupt: The interrupt number to the cpu. Defines the interrupt used + by /SOCTHERM/tsensor. +- clock-names: Input clock name, should be 'thermal_clk'. +- clocks: phandles for clock specified in "clock-names" property. +- #thermal-sensor-cells: Should be 1. See ./thermal.txt for a description. + +Example : + + tsensor: tsensor@0,f7030700 { + compatible = "hisilicon,tsensor"; + reg = <0x0 0xf7030700 0x0 0x1000>; + interrupts = <0 7 0x4>; + clocks = <&sys_ctrl HI6220_TSENSOR_CLK>; + clock-names = "thermal_clk"; + #thermal-sensor-cells = <1>; + } diff --git a/Documentation/devicetree/bindings/thermal/qcom-spmi-temp-alarm.txt b/Documentation/devicetree/bindings/thermal/qcom-spmi-temp-alarm.txt new file mode 100644 index 0000000..290ec06 --- /dev/null +++ b/Documentation/devicetree/bindings/thermal/qcom-spmi-temp-alarm.txt @@ -0,0 +1,57 @@ +Qualcomm QPNP PMIC Temperature Alarm + +QPNP temperature alarm peripherals are found inside of Qualcomm PMIC chips +that utilize the Qualcomm SPMI implementation. These peripherals provide an +interrupt signal and status register to identify high PMIC die temperature. + +Required properties: +- compatible: Should contain "qcom,spmi-temp-alarm". +- reg: Specifies the SPMI address and length of the controller's + registers. +- interrupts: PMIC temperature alarm interrupt. +- #thermal-sensor-cells: Should be 0. See thermal.txt for a description. + +Optional properties: +- io-channels: Should contain IIO channel specifier for the ADC channel, + which report chip die temperature. +- io-channel-names: Should contain "thermal". + +Example: + + pm8941_temp: thermal-alarm@2400 { + compatible = "qcom,spmi-temp-alarm"; + reg = <0x2400 0x100>; + interrupts = <0 0x24 0 IRQ_TYPE_EDGE_RISING>; + #thermal-sensor-cells = <0>; + + io-channels = <&pm8941_vadc VADC_DIE_TEMP>; + io-channel-names = "thermal"; + }; + + thermal-zones { + pm8941 { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&pm8941_temp>; + + trips { + passive { + temperature = <1050000>; + hysteresis = <2000>; + type = "passive"; + }; + alert { + temperature = <125000>; + hysteresis = <2000>; + type = "hot"; + }; + crit { + temperature = <145000>; + hysteresis = <2000>; + type = "critical"; + }; + }; + }; + }; + diff --git a/Documentation/devicetree/bindings/thermal/thermal.txt b/Documentation/devicetree/bindings/thermal/thermal.txt index 29fe0bf..8a49362 100644 --- a/Documentation/devicetree/bindings/thermal/thermal.txt +++ b/Documentation/devicetree/bindings/thermal/thermal.txt @@ -167,6 +167,13 @@ Optional property: by means of sensor ID. Additional coefficients are interpreted as constant offset. +- sustainable-power: An estimate of the sustainable power (in mW) that the + Type: unsigned thermal zone can dissipate at the desired + Size: one cell control temperature. For reference, the + sustainable power of a 4'' phone is typically + 2000mW, while on a 10'' tablet is around + 4500mW. + Note: The delay properties are bound to the maximum dT/dt (temperature derivative over time) in two situations for a thermal zone: (i) - when passive cooling is activated (polling-delay-passive); and @@ -546,6 +553,8 @@ thermal-zones { */ coefficients = <1200 -345 890>; + sustainable-power = <2500>; + trips { /* Trips are based on resulting linear equation */ cpu_trip: cpu-trip { diff --git a/Documentation/kasan.txt b/Documentation/kasan.txt index 092fc10..4692241 100644 --- a/Documentation/kasan.txt +++ b/Documentation/kasan.txt @@ -9,7 +9,9 @@ a fast and comprehensive solution for finding use-after-free and out-of-bounds bugs. KASan uses compile-time instrumentation for checking every memory access, -therefore you will need a certain version of GCC > 4.9.2 +therefore you will need a gcc version of 4.9.2 or later. KASan could detect out +of bounds accesses to stack or global variables, but only if gcc 5.0 or later was +used to built the kernel. Currently KASan is supported only for x86_64 architecture and requires that the kernel be built with the SLUB allocator. @@ -23,8 +25,8 @@ To enable KASAN configure kernel with: and choose between CONFIG_KASAN_OUTLINE and CONFIG_KASAN_INLINE. Outline/inline is compiler instrumentation types. The former produces smaller binary the -latter is 1.1 - 2 times faster. Inline instrumentation requires GCC 5.0 or -latter. +latter is 1.1 - 2 times faster. Inline instrumentation requires a gcc version +of 5.0 or later. Currently KASAN works only with the SLUB memory allocator. For better bug detection and nicer report, enable CONFIG_STACKTRACE and put diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index f6befa9..61ab162 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -3787,6 +3787,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. READ_CAPACITY_16 command); f = NO_REPORT_OPCODES (don't use report opcodes command, uas only); + g = MAX_SECTORS_240 (don't transfer more than + 240 sectors at a time, uas only); h = CAPACITY_HEURISTICS (decrease the reported device capacity by one sector if the number is odd); diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt index 09c2382..c72702e 100644 --- a/Documentation/module-signing.txt +++ b/Documentation/module-signing.txt @@ -119,9 +119,9 @@ Most notably, in the x509.genkey file, the req_distinguished_name section should be altered from the default: [ req_distinguished_name ] - O = Magrathea - CN = Glacier signing key - emailAddress = slartibartfast@magrathea.h2g2 + #O = Unspecified company + CN = Build time autogenerated kernel key + #emailAddress = unspecified.user@unspecified.company The generated RSA key size can also be set with: diff --git a/Documentation/networking/mpls-sysctl.txt b/Documentation/networking/mpls-sysctl.txt index 639ddf0..9ed15f8 100644 --- a/Documentation/networking/mpls-sysctl.txt +++ b/Documentation/networking/mpls-sysctl.txt @@ -18,3 +18,12 @@ platform_labels - INTEGER Possible values: 0 - 1048575 Default: 0 + +conf/<interface>/input - BOOL + Control whether packets can be input on this interface. + + If disabled, packets will be discarded without further + processing. + + 0 - disabled (default) + not 0 - enabled diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt index cbfac09..59f4db2 100644 --- a/Documentation/networking/scaling.txt +++ b/Documentation/networking/scaling.txt @@ -282,7 +282,7 @@ following is true: - The current CPU's queue head counter >= the recorded tail counter value in rps_dev_flow[i] -- The current CPU is unset (equal to RPS_NO_CPU) +- The current CPU is unset (>= nr_cpu_ids) - The current CPU is offline After this check, the packet is sent to the (possibly updated) current diff --git a/Documentation/powerpc/transactional_memory.txt b/Documentation/powerpc/transactional_memory.txt index ba0a2a4..ded6979 100644 --- a/Documentation/powerpc/transactional_memory.txt +++ b/Documentation/powerpc/transactional_memory.txt @@ -74,23 +74,22 @@ Causes of transaction aborts Syscalls ======== -Syscalls made from within an active transaction will not be performed and the -transaction will be doomed by the kernel with the failure code TM_CAUSE_SYSCALL -| TM_CAUSE_PERSISTENT. +Performing syscalls from within transaction is not recommended, and can lead +to unpredictable results. -Syscalls made from within a suspended transaction are performed as normal and -the transaction is not explicitly doomed by the kernel. However, what the -kernel does to perform the syscall may result in the transaction being doomed -by the hardware. The syscall is performed in suspended mode so any side -effects will be persistent, independent of transaction success or failure. No -guarantees are provided by the kernel about which syscalls will affect -transaction success. +Syscalls do not by design abort transactions, but beware: The kernel code will +not be running in transactional state. The effect of syscalls will always +remain visible, but depending on the call they may abort your transaction as a +side-effect, read soon-to-be-aborted transactional data that should not remain +invisible, etc. If you constantly retry a transaction that constantly aborts +itself by calling a syscall, you'll have a livelock & make no progress. -Care must be taken when relying on syscalls to abort during active transactions -if the calls are made via a library. Libraries may cache values (which may -give the appearance of success) or perform operations that cause transaction -failure before entering the kernel (which may produce different failure codes). -Examples are glibc's getpid() and lazy symbol resolution. +Simple syscalls (e.g. sigprocmask()) "could" be OK. Even things like write() +from, say, printf() should be OK as long as the kernel does not access any +memory that was accessed transactionally. + +Consider any syscalls that happen to work as debug-only -- not recommended for +production use. Best to queue them up till after the transaction is over. Signals @@ -177,7 +176,8 @@ kernel aborted a transaction: TM_CAUSE_RESCHED Thread was rescheduled. TM_CAUSE_TLBI Software TLB invalid. TM_CAUSE_FAC_UNAV FP/VEC/VSX unavailable trap. - TM_CAUSE_SYSCALL Syscall from active transaction. + TM_CAUSE_SYSCALL Currently unused; future syscalls that must abort + transactions for consistency will use this. TM_CAUSE_SIGNAL Signal delivered. TM_CAUSE_MISC Currently unused. TM_CAUSE_ALIGNMENT Alignment fault. diff --git a/Documentation/serial/tty.txt b/Documentation/serial/tty.txt index 1e52d67..dbe6623 100644 --- a/Documentation/serial/tty.txt +++ b/Documentation/serial/tty.txt @@ -198,6 +198,9 @@ TTY_IO_ERROR If set, causes all subsequent userspace read/write TTY_OTHER_CLOSED Device is a pty and the other side has closed. +TTY_OTHER_DONE Device is a pty and the other side has closed and + all pending input processing has been completed. + TTY_NO_WRITE_SPLIT Prevent driver from splitting up writes into smaller chunks. diff --git a/Documentation/thermal/cpu-cooling-api.txt b/Documentation/thermal/cpu-cooling-api.txt index 753e47c..7165358 100644 --- a/Documentation/thermal/cpu-cooling-api.txt +++ b/Documentation/thermal/cpu-cooling-api.txt @@ -36,8 +36,162 @@ the user. The registration APIs returns the cooling device pointer. np: pointer to the cooling device device tree node clip_cpus: cpumask of cpus where the frequency constraints will happen. -1.1.3 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) +1.1.3 struct thermal_cooling_device *cpufreq_power_cooling_register( + const struct cpumask *clip_cpus, u32 capacitance, + get_static_t plat_static_func) + +Similar to cpufreq_cooling_register, this function registers a cpufreq +cooling device. Using this function, the cooling device will +implement the power extensions by using a simple cpu power model. The +cpus must have registered their OPPs using the OPP library. + +The additional parameters are needed for the power model (See 2. Power +models). "capacitance" is the dynamic power coefficient (See 2.1 +Dynamic power). "plat_static_func" is a function to calculate the +static power consumed by these cpus (See 2.2 Static power). + +1.1.4 struct thermal_cooling_device *of_cpufreq_power_cooling_register( + struct device_node *np, const struct cpumask *clip_cpus, u32 capacitance, + get_static_t plat_static_func) + +Similar to cpufreq_power_cooling_register, this function register a +cpufreq cooling device with power extensions using the device tree +information supplied by the np parameter. + +1.1.5 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) This interface function unregisters the "thermal-cpufreq-%x" cooling device. cdev: Cooling device pointer which has to be unregistered. + +2. Power models + +The power API registration functions provide a simple power model for +CPUs. The current power is calculated as dynamic + (optionally) +static power. This power model requires that the operating-points of +the CPUs are registered using the kernel's opp library and the +`cpufreq_frequency_table` is assigned to the `struct device` of the +cpu. If you are using CONFIG_CPUFREQ_DT then the +`cpufreq_frequency_table` should already be assigned to the cpu +device. + +The `plat_static_func` parameter of `cpufreq_power_cooling_register()` +and `of_cpufreq_power_cooling_register()` is optional. If you don't +provide it, only dynamic power will be considered. + +2.1 Dynamic power + +The dynamic power consumption of a processor depends on many factors. +For a given processor implementation the primary factors are: + +- The time the processor spends running, consuming dynamic power, as + compared to the time in idle states where dynamic consumption is + negligible. Herein we refer to this as 'utilisation'. +- The voltage and frequency levels as a result of DVFS. The DVFS + level is a dominant factor governing power consumption. +- In running time the 'execution' behaviour (instruction types, memory + access patterns and so forth) causes, in most cases, a second order + variation. In pathological cases this variation can be significant, + but typically it is of a much lesser impact than the factors above. + +A high level dynamic power consumption model may then be represented as: + +Pdyn = f(run) * Voltage^2 * Frequency * Utilisation + +f(run) here represents the described execution behaviour and its +result has a units of Watts/Hz/Volt^2 (this often expressed in +mW/MHz/uVolt^2) + +The detailed behaviour for f(run) could be modelled on-line. However, +in practice, such an on-line model has dependencies on a number of +implementation specific processor support and characterisation +factors. Therefore, in initial implementation that contribution is +represented as a constant coefficient. This is a simplification +consistent with the relative contribution to overall power variation. + +In this simplified representation our model becomes: + +Pdyn = Capacitance * Voltage^2 * Frequency * Utilisation + +Where `capacitance` is a constant that represents an indicative +running time dynamic power coefficient in fundamental units of +mW/MHz/uVolt^2. Typical values for mobile CPUs might lie in range +from 100 to 500. For reference, the approximate values for the SoC in +ARM's Juno Development Platform are 530 for the Cortex-A57 cluster and +140 for the Cortex-A53 cluster. + + +2.2 Static power + +Static leakage power consumption depends on a number of factors. For a +given circuit implementation the primary factors are: + +- Time the circuit spends in each 'power state' +- Temperature +- Operating voltage +- Process grade + +The time the circuit spends in each 'power state' for a given +evaluation period at first order means OFF or ON. However, +'retention' states can also be supported that reduce power during +inactive periods without loss of context. + +Note: The visibility of state entries to the OS can vary, according to +platform specifics, and this can then impact the accuracy of a model +based on OS state information alone. It might be possible in some +cases to extract more accurate information from system resources. + +The temperature, operating voltage and process 'grade' (slow to fast) +of the circuit are all significant factors in static leakage power +consumption. All of these have complex relationships to static power. + +Circuit implementation specific factors include the chosen silicon +process as well as the type, number and size of transistors in both +the logic gates and any RAM elements included. + +The static power consumption modelling must take into account the +power managed regions that are implemented. Taking the example of an +ARM processor cluster, the modelling would take into account whether +each CPU can be powered OFF separately or if only a single power +region is implemented for the complete cluster. + +In one view, there are others, a static power consumption model can +then start from a set of reference values for each power managed +region (e.g. CPU, Cluster/L2) in each state (e.g. ON, OFF) at an +arbitrary process grade, voltage and temperature point. These values +are then scaled for all of the following: the time in each state, the +process grade, the current temperature and the operating voltage. +However, since both implementation specific and complex relationships +dominate the estimate, the appropriate interface to the model from the +cpu cooling device is to provide a function callback that calculates +the static power in this platform. When registering the cpu cooling +device pass a function pointer that follows the `get_static_t` +prototype: + + int plat_get_static(cpumask_t *cpumask, int interval, + unsigned long voltage, u32 &power); + +`cpumask` is the cpumask of the cpus involved in the calculation. +`voltage` is the voltage at which they are operating. The function +should calculate the average static power for the last `interval` +milliseconds. It returns 0 on success, -E* on error. If it +succeeds, it should store the static power in `power`. Reading the +temperature of the cpus described by `cpumask` is left for +plat_get_static() to do as the platform knows best which thermal +sensor is closest to the cpu. + +If `plat_static_func` is NULL, static power is considered to be +negligible for this platform and only dynamic power is considered. + +The platform specific callback can then use any combination of tables +and/or equations to permute the estimated value. Process grade +information is not passed to the model since access to such data, from +on-chip measurement capability or manufacture time data, is platform +specific. + +Note: the significance of static power for CPUs in comparison to +dynamic power is highly dependent on implementation. Given the +potential complexity in implementation, the importance and accuracy of +its inclusion when using cpu cooling devices should be assessed on a +case by case basis. + diff --git a/Documentation/thermal/power_allocator.txt b/Documentation/thermal/power_allocator.txt new file mode 100644 index 0000000..c3797b5 --- /dev/null +++ b/Documentation/thermal/power_allocator.txt @@ -0,0 +1,247 @@ +Power allocator governor tunables +================================= + +Trip points +----------- + +The governor requires the following two passive trip points: + +1. "switch on" trip point: temperature above which the governor + control loop starts operating. This is the first passive trip + point of the thermal zone. + +2. "desired temperature" trip point: it should be higher than the + "switch on" trip point. This the target temperature the governor + is controlling for. This is the last passive trip point of the + thermal zone. + +PID Controller +-------------- + +The power allocator governor implements a +Proportional-Integral-Derivative controller (PID controller) with +temperature as the control input and power as the controlled output: + + P_max = k_p * e + k_i * err_integral + k_d * diff_err + sustainable_power + +where + e = desired_temperature - current_temperature + err_integral is the sum of previous errors + diff_err = e - previous_error + +It is similar to the one depicted below: + + k_d + | +current_temp | + | v + | +----------+ +---+ + | +----->| diff_err |-->| X |------+ + | | +----------+ +---+ | + | | | tdp actor + | | k_i | | get_requested_power() + | | | | | | | + | | | | | | | ... + v | v v v v v + +---+ | +-------+ +---+ +---+ +---+ +----------+ + | S |-------+----->| sum e |----->| X |--->| S |-->| S |-->|power | + +---+ | +-------+ +---+ +---+ +---+ |allocation| + ^ | ^ +----------+ + | | | | | + | | +---+ | | | + | +------->| X |-------------------+ v v + | +---+ granted performance +desired_temperature ^ + | + | + k_po/k_pu + +Sustainable power +----------------- + +An estimate of the sustainable dissipatable power (in mW) should be +provided while registering the thermal zone. This estimates the +sustained power that can be dissipated at the desired control +temperature. This is the maximum sustained power for allocation at +the desired maximum temperature. The actual sustained power can vary +for a number of reasons. The closed loop controller will take care of +variations such as environmental conditions, and some factors related +to the speed-grade of the silicon. `sustainable_power` is therefore +simply an estimate, and may be tuned to affect the aggressiveness of +the thermal ramp. For reference, the sustainable power of a 4" phone +is typically 2000mW, while on a 10" tablet is around 4500mW (may vary +depending on screen size). + +If you are using device tree, do add it as a property of the +thermal-zone. For example: + + thermal-zones { + soc_thermal { + polling-delay = <1000>; + polling-delay-passive = <100>; + sustainable-power = <2500>; + ... + +Instead, if the thermal zone is registered from the platform code, pass a +`thermal_zone_params` that has a `sustainable_power`. If no +`thermal_zone_params` were being passed, then something like below +will suffice: + + static const struct thermal_zone_params tz_params = { + .sustainable_power = 3500, + }; + +and then pass `tz_params` as the 5th parameter to +`thermal_zone_device_register()` + +k_po and k_pu +------------- + +The implementation of the PID controller in the power allocator +thermal governor allows the configuration of two proportional term +constants: `k_po` and `k_pu`. `k_po` is the proportional term +constant during temperature overshoot periods (current temperature is +above "desired temperature" trip point). Conversely, `k_pu` is the +proportional term constant during temperature undershoot periods +(current temperature below "desired temperature" trip point). + +These controls are intended as the primary mechanism for configuring +the permitted thermal "ramp" of the system. For instance, a lower +`k_pu` value will provide a slower ramp, at the cost of capping +available capacity at a low temperature. On the other hand, a high +value of `k_pu` will result in the governor granting very high power +whilst temperature is low, and may lead to temperature overshooting. + +The default value for `k_pu` is: + + 2 * sustainable_power / (desired_temperature - switch_on_temp) + +This means that at `switch_on_temp` the output of the controller's +proportional term will be 2 * `sustainable_power`. The default value +for `k_po` is: + + sustainable_power / (desired_temperature - switch_on_temp) + +Focusing on the proportional and feed forward values of the PID +controller equation we have: + + P_max = k_p * e + sustainable_power + +The proportional term is proportional to the difference between the +desired temperature and the current one. When the current temperature +is the desired one, then the proportional component is zero and +`P_max` = `sustainable_power`. That is, the system should operate in +thermal equilibrium under constant load. `sustainable_power` is only +an estimate, which is the reason for closed-loop control such as this. + +Expanding `k_pu` we get: + P_max = 2 * sustainable_power * (T_set - T) / (T_set - T_on) + + sustainable_power + +where + T_set is the desired temperature + T is the current temperature + T_on is the switch on temperature + +When the current temperature is the switch_on temperature, the above +formula becomes: + + P_max = 2 * sustainable_power * (T_set - T_on) / (T_set - T_on) + + sustainable_power = 2 * sustainable_power + sustainable_power = + 3 * sustainable_power + +Therefore, the proportional term alone linearly decreases power from +3 * `sustainable_power` to `sustainable_power` as the temperature +rises from the switch on temperature to the desired temperature. + +k_i and integral_cutoff +----------------------- + +`k_i` configures the PID loop's integral term constant. This term +allows the PID controller to compensate for long term drift and for +the quantized nature of the output control: cooling devices can't set +the exact power that the governor requests. When the temperature +error is below `integral_cutoff`, errors are accumulated in the +integral term. This term is then multiplied by `k_i` and the result +added to the output of the controller. Typically `k_i` is set low (1 +or 2) and `integral_cutoff` is 0. + +k_d +--- + +`k_d` configures the PID loop's derivative term constant. It's +recommended to leave it as the default: 0. + +Cooling device power API +======================== + +Cooling devices controlled by this governor must supply the additional +"power" API in their `cooling_device_ops`. It consists on three ops: + +1. int get_requested_power(struct thermal_cooling_device *cdev, + struct thermal_zone_device *tz, u32 *power); +@cdev: The `struct thermal_cooling_device` pointer +@tz: thermal zone in which we are currently operating +@power: pointer in which to store the calculated power + +`get_requested_power()` calculates the power requested by the device +in milliwatts and stores it in @power . It should return 0 on +success, -E* on failure. This is currently used by the power +allocator governor to calculate how much power to give to each cooling +device. + +2. int state2power(struct thermal_cooling_device *cdev, struct + thermal_zone_device *tz, unsigned long state, u32 *power); +@cdev: The `struct thermal_cooling_device` pointer +@tz: thermal zone in which we are currently operating +@state: A cooling device state +@power: pointer in which to store the equivalent power + +Convert cooling device state @state into power consumption in +milliwatts and store it in @power. It should return 0 on success, -E* +on failure. This is currently used by thermal core to calculate the +maximum power that an actor can consume. + +3. int power2state(struct thermal_cooling_device *cdev, u32 power, + unsigned long *state); +@cdev: The `struct thermal_cooling_device` pointer +@power: power in milliwatts +@state: pointer in which to store the resulting state + +Calculate a cooling device state that would make the device consume at +most @power mW and store it in @state. It should return 0 on success, +-E* on failure. This is currently used by the thermal core to convert +a given power set by the power allocator governor to a state that the +cooling device can set. It is a function because this conversion may +depend on external factors that may change so this function should the +best conversion given "current circumstances". + +Cooling device weights +---------------------- + +Weights are a mechanism to bias the allocation among cooling +devices. They express the relative power efficiency of different +cooling devices. Higher weight can be used to express higher power +efficiency. Weighting is relative such that if each cooling device +has a weight of one they are considered equal. This is particularly +useful in heterogeneous systems where two cooling devices may perform +the same kind of compute, but with different efficiency. For example, +a system with two different types of processors. + +If the thermal zone is registered using +`thermal_zone_device_register()` (i.e., platform code), then weights +are passed as part of the thermal zone's `thermal_bind_parameters`. +If the platform is registered using device tree, then they are passed +as the `contribution` property of each map in the `cooling-maps` node. + +Limitations of the power allocator governor +=========================================== + +The power allocator governor's PID controller works best if there is a +periodic tick. If you have a driver that calls +`thermal_zone_device_update()` (or anything that ends up calling the +governor's `throttle()` function) repetitively, the governor response +won't be very good. Note that this is not particular to this +governor, step-wise will also misbehave if you call its throttle() +faster than the normal thermal framework tick (due to interrupts for +example) as it will overreact. diff --git a/Documentation/thermal/sysfs-api.txt b/Documentation/thermal/sysfs-api.txt index 87519cb..c1f6864 100644 --- a/Documentation/thermal/sysfs-api.txt +++ b/Documentation/thermal/sysfs-api.txt @@ -95,7 +95,7 @@ temperature) and throttle appropriate devices. 1.3 interface for binding a thermal zone device with a thermal cooling device 1.3.1 int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, int trip, struct thermal_cooling_device *cdev, - unsigned long upper, unsigned long lower); + unsigned long upper, unsigned long lower, unsigned int weight); This interface function bind a thermal cooling device to the certain trip point of a thermal zone device. @@ -110,6 +110,8 @@ temperature) and throttle appropriate devices. lower:the Minimum cooling state can be used for this trip point. THERMAL_NO_LIMIT means no lower limit, and the cooling device can be in cooling state 0. + weight: the influence of this cooling device in this thermal + zone. See 1.4.1 below for more information. 1.3.2 int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz, int trip, struct thermal_cooling_device *cdev); @@ -127,9 +129,15 @@ temperature) and throttle appropriate devices. This structure defines the following parameters that are used to bind a zone with a cooling device for a particular trip point. .cdev: The cooling device pointer - .weight: The 'influence' of a particular cooling device on this zone. - This is on a percentage scale. The sum of all these weights - (for a particular zone) cannot exceed 100. + .weight: The 'influence' of a particular cooling device on this + zone. This is relative to the rest of the cooling + devices. For example, if all cooling devices have a + weight of 1, then they all contribute the same. You can + use percentages if you want, but it's not mandatory. A + weight of 0 means that this cooling device doesn't + contribute to the cooling of this zone unless all cooling + devices have a weight of 0. If all weights are 0, then + they all contribute the same. .trip_mask:This is a bit mask that gives the binding relation between this thermal zone and cdev, for a particular trip point. If nth bit is set, then the cdev and thermal zone are bound @@ -176,6 +184,14 @@ Thermal zone device sys I/F, created once it's registered: |---trip_point_[0-*]_type: Trip point type |---trip_point_[0-*]_hyst: Hysteresis value for this trip point |---emul_temp: Emulated temperature set node + |---sustainable_power: Sustainable dissipatable power + |---k_po: Proportional term during temperature overshoot + |---k_pu: Proportional term during temperature undershoot + |---k_i: PID's integral term in the power allocator gov + |---k_d: PID's derivative term in the power allocator + |---integral_cutoff: Offset above which errors are accumulated + |---slope: Slope constant applied as linear extrapolation + |---offset: Offset constant applied as linear extrapolation Thermal cooling device sys I/F, created once it's registered: /sys/class/thermal/cooling_device[0-*]: @@ -192,6 +208,8 @@ thermal_zone_bind_cooling_device/thermal_zone_unbind_cooling_device. /sys/class/thermal/thermal_zone[0-*]: |---cdev[0-*]: [0-*]th cooling device in current thermal zone |---cdev[0-*]_trip_point: Trip point that cdev[0-*] is associated with + |---cdev[0-*]_weight: Influence of the cooling device in + this thermal zone Besides the thermal zone device sysfs I/F and cooling device sysfs I/F, the generic thermal driver also creates a hwmon sysfs I/F for each _type_ @@ -265,6 +283,14 @@ cdev[0-*]_trip_point point. RO, Optional +cdev[0-*]_weight + The influence of cdev[0-*] in this thermal zone. This value + is relative to the rest of cooling devices in the thermal + zone. For example, if a cooling device has a weight double + than that of other, it's twice as effective in cooling the + thermal zone. + RW, Optional + passive Attribute is only present for zones in which the passive cooling policy is not supported by native thermal driver. Default is zero @@ -289,6 +315,66 @@ emul_temp because userland can easily disable the thermal policy by simply flooding this sysfs node with low temperature values. +sustainable_power + An estimate of the sustained power that can be dissipated by + the thermal zone. Used by the power allocator governor. For + more information see Documentation/thermal/power_allocator.txt + Unit: milliwatts + RW, Optional + +k_po + The proportional term of the power allocator governor's PID + controller during temperature overshoot. Temperature overshoot + is when the current temperature is above the "desired + temperature" trip point. For more information see + Documentation/thermal/power_allocator.txt + RW, Optional + +k_pu + The proportional term of the power allocator governor's PID + controller during temperature undershoot. Temperature undershoot + is when the current temperature is below the "desired + temperature" trip point. For more information see + Documentation/thermal/power_allocator.txt + RW, Optional + +k_i + The integral term of the power allocator governor's PID + controller. This term allows the PID controller to compensate + for long term drift. For more information see + Documentation/thermal/power_allocator.txt + RW, Optional + +k_d + The derivative term of the power allocator governor's PID + controller. For more information see + Documentation/thermal/power_allocator.txt + RW, Optional + +integral_cutoff + Temperature offset from the desired temperature trip point + above which the integral term of the power allocator + governor's PID controller starts accumulating errors. For + example, if integral_cutoff is 0, then the integral term only + accumulates error when temperature is above the desired + temperature trip point. For more information see + Documentation/thermal/power_allocator.txt + RW, Optional + +slope + The slope constant used in a linear extrapolation model + to determine a hotspot temperature based off the sensor's + raw readings. It is up to the device driver to determine + the usage of these values. + RW, Optional + +offset + The offset constant used in a linear extrapolation model + to determine a hotspot temperature based off the sensor's + raw readings. It is up to the device driver to determine + the usage of these values. + RW, Optional + ***************************** * Cooling device attributes * ***************************** @@ -318,7 +404,8 @@ passive, active. If an ACPI thermal zone supports critical, passive, active[0] and active[1] at the same time, it may register itself as a thermal_zone_device (thermal_zone1) with 4 trip points in all. It has one processor and one fan, which are both registered as -thermal_cooling_device. +thermal_cooling_device. Both are considered to have the same +effectiveness in cooling the thermal zone. If the processor is listed in _PSL method, and the fan is listed in _AL0 method, the sys I/F structure will be built like this: @@ -340,8 +427,10 @@ method, the sys I/F structure will be built like this: |---trip_point_3_type: active1 |---cdev0: --->/sys/class/thermal/cooling_device0 |---cdev0_trip_point: 1 /* cdev0 can be used for passive */ + |---cdev0_weight: 1024 |---cdev1: --->/sys/class/thermal/cooling_device3 |---cdev1_trip_point: 2 /* cdev1 can be used for active[0]*/ + |---cdev1_weight: 1024 |cooling_device0: |---type: Processor diff --git a/MAINTAINERS b/MAINTAINERS index 2e5bbc0..f8e0afb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -892,11 +892,10 @@ S: Maintained F: arch/arm/mach-alpine/ ARM/ATMEL AT91RM9200 AND AT91SAM ARM ARCHITECTURES -M: Andrew Victor <linux@maxim.org.za> M: Nicolas Ferre <nicolas.ferre@atmel.com> +M: Alexandre Belloni <alexandre.belloni@free-electrons.com> M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -W: http://maxim.org.za/at91_26.html W: http://www.linux4sam.org S: Supported F: arch/arm/mach-at91/ @@ -975,7 +974,7 @@ S: Maintained ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE M: Hans Ulli Kroll <ulli.kroll@googlemail.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -T: git git://git.berlios.de/gemini-board +T: git git://github.com/ulli-kroll/linux.git S: Maintained F: arch/arm/mach-gemini/ @@ -990,6 +989,12 @@ F: drivers/clocksource/timer-prima2.c F: drivers/clocksource/timer-atlas7.c N: [^a-z]sirf +ARM/CONEXANT DIGICOLOR MACHINE SUPPORT +M: Baruch Siach <baruch@tkos.co.il> +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +N: digicolor + ARM/EBSA110 MACHINE SUPPORT M: Russell King <linux@arm.linux.org.uk> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -1188,7 +1193,7 @@ ARM/MAGICIAN MACHINE SUPPORT M: Philipp Zabel <philipp.zabel@gmail.com> S: Maintained -ARM/Marvell Armada 370 and Armada XP SOC support +ARM/Marvell Kirkwood and Armada 370, 375, 38x, XP SOC support M: Jason Cooper <jason@lakedaemon.net> M: Andrew Lunn <andrew@lunn.ch> M: Gregory Clement <gregory.clement@free-electrons.com> @@ -1197,12 +1202,17 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-mvebu/ F: drivers/rtc/rtc-armada38x.c +F: arch/arm/boot/dts/armada* +F: arch/arm/boot/dts/kirkwood* + ARM/Marvell Berlin SoC support M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-berlin/ +F: arch/arm/boot/dts/berlin* + ARM/Marvell Dove/MV78xx0/Orion SOC support M: Jason Cooper <jason@lakedaemon.net> @@ -1215,6 +1225,9 @@ F: arch/arm/mach-dove/ F: arch/arm/mach-mv78xx0/ F: arch/arm/mach-orion5x/ F: arch/arm/plat-orion/ +F: arch/arm/boot/dts/dove* +F: arch/arm/boot/dts/orion5x* + ARM/Orion SoC/Technologic Systems TS-78xx platform support M: Alexander Clouter <alex@digriz.org.uk> @@ -1366,6 +1379,7 @@ N: rockchip ARM/SAMSUNG EXYNOS ARM ARCHITECTURES M: Kukjin Kim <kgene@kernel.org> +M: Krzysztof Kozlowski <k.kozlowski@samsung.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) S: Maintained @@ -1439,9 +1453,10 @@ ARM/SOCFPGA ARCHITECTURE M: Dinh Nguyen <dinguyen@opensource.altera.com> S: Maintained F: arch/arm/mach-socfpga/ +F: arch/arm/boot/dts/socfpga* +F: arch/arm/configs/socfpga_defconfig W: http://www.rocketboards.org -T: git://git.rocketboards.org/linux-socfpga.git -T: git://git.rocketboards.org/linux-socfpga-next.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT M: Dinh Nguyen <dinguyen@opensource.altera.com> @@ -1929,7 +1944,7 @@ S: Maintained F: drivers/net/wireless/b43legacy/ BACKLIGHT CLASS/SUBSYSTEM -M: Jingoo Han <jg1.han@samsung.com> +M: Jingoo Han <jingoohan1@gmail.com> M: Lee Jones <lee.jones@linaro.org> S: Maintained F: drivers/video/backlight/ @@ -2116,8 +2131,9 @@ S: Supported F: drivers/net/ethernet/broadcom/bnx2x/ BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE -M: Christian Daudt <bcm@fixthebug.org> M: Florian Fainelli <f.fainelli@gmail.com> +M: Ray Jui <rjui@broadcom.com> +M: Scott Branden <sbranden@broadcom.com> L: bcm-kernel-feedback-list@broadcom.com T: git git://github.com/broadcom/mach-bcm S: Maintained @@ -2168,7 +2184,6 @@ S: Maintained F: drivers/usb/gadget/udc/bcm63xx_udc.* BROADCOM BCM7XXX ARM ARCHITECTURE -M: Marc Carino <marc.ceeeee@gmail.com> M: Brian Norris <computersforpeace@gmail.com> M: Gregory Fong <gregory.0xf0@gmail.com> M: Florian Fainelli <f.fainelli@gmail.com> @@ -3413,6 +3428,13 @@ F: drivers/gpu/drm/rcar-du/ F: drivers/gpu/drm/shmobile/ F: include/linux/platform_data/shmob_drm.h +DRM DRIVERS FOR ROCKCHIP +M: Mark Yao <mark.yao@rock-chips.com> +L: dri-devel@lists.freedesktop.org +S: Maintained +F: drivers/gpu/drm/rockchip/ +F: Documentation/devicetree/bindings/video/rockchip* + DSBR100 USB FM RADIO DRIVER M: Alexey Klimov <klimov.linux@gmail.com> L: linux-media@vger.kernel.org @@ -3905,7 +3927,7 @@ F: drivers/extcon/ F: Documentation/extcon/ EXYNOS DP DRIVER -M: Jingoo Han <jg1.han@samsung.com> +M: Jingoo Han <jingoohan1@gmail.com> L: dri-devel@lists.freedesktop.org S: Maintained F: drivers/gpu/drm/exynos/exynos_dp* @@ -4364,11 +4386,10 @@ F: fs/gfs2/ F: include/uapi/linux/gfs2_ondisk.h GIGASET ISDN DRIVERS -M: Hansjoerg Lipp <hjlipp@web.de> -M: Tilman Schmidt <tilman@imap.cc> +M: Paul Bolle <pebolle@tiscali.nl> L: gigaset307x-common@lists.sourceforge.net W: http://gigaset307x.sourceforge.net/ -S: Maintained +S: Odd Fixes F: Documentation/isdn/README.gigaset F: drivers/isdn/gigaset/ F: include/uapi/linux/gigaset_dev.h @@ -5035,17 +5056,19 @@ S: Orphan F: drivers/video/fbdev/imsttfb.c INFINIBAND SUBSYSTEM -M: Roland Dreier <roland@kernel.org> +M: Doug Ledford <dledford@redhat.com> M: Sean Hefty <sean.hefty@intel.com> M: Hal Rosenstock <hal.rosenstock@gmail.com> L: linux-rdma@vger.kernel.org W: http://www.openfabrics.org/ Q: http://patchwork.kernel.org/project/linux-rdma/list/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma.git S: Supported F: Documentation/infiniband/ F: drivers/infiniband/ F: include/uapi/linux/if_infiniband.h +F: include/uapi/rdma/ +F: include/rdma/ INOTIFY M: John McCutchan <john@johnmccutchan.com> @@ -5798,6 +5821,7 @@ F: drivers/scsi/53c700* LED SUBSYSTEM M: Bryan Wu <cooloney@gmail.com> M: Richard Purdie <rpurdie@rpsys.net> +M: Jacek Anaszewski <j.anaszewski@samsung.com> L: linux-leds@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/linux-leds.git S: Maintained @@ -6943,6 +6967,17 @@ T: git git://git.rocketboards.org/linux-socfpga-next.git S: Maintained F: arch/nios2/ +NOKIA N900 POWER SUPPLY DRIVERS +M: Pali Rohár <pali.rohar@gmail.com> +S: Maintained +F: include/linux/power/bq2415x_charger.h +F: include/linux/power/bq27x00_battery.h +F: include/linux/power/isp1704_charger.h +F: drivers/power/bq2415x_charger.c +F: drivers/power/bq27x00_battery.c +F: drivers/power/isp1704_charger.c +F: drivers/power/rx51_battery.c + NTB DRIVER M: Jon Mason <jdmason@kudzu.us> M: Dave Jiang <dave.jiang@intel.com> @@ -7531,7 +7566,7 @@ S: Maintained F: drivers/pci/host/*rcar* PCI DRIVER FOR SAMSUNG EXYNOS -M: Jingoo Han <jg1.han@samsung.com> +M: Jingoo Han <jingoohan1@gmail.com> L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) @@ -7539,7 +7574,7 @@ S: Maintained F: drivers/pci/host/pci-exynos.c PCI DRIVER FOR SYNOPSIS DESIGNWARE -M: Jingoo Han <jg1.han@samsung.com> +M: Jingoo Han <jingoohan1@gmail.com> L: linux-pci@vger.kernel.org S: Maintained F: drivers/pci/host/*designware* @@ -8495,7 +8530,7 @@ S: Supported F: sound/soc/samsung/ SAMSUNG FRAMEBUFFER DRIVER -M: Jingoo Han <jg1.han@samsung.com> +M: Jingoo Han <jingoohan1@gmail.com> L: linux-fbdev@vger.kernel.org S: Maintained F: drivers/video/fbdev/s3c-fb.c @@ -8800,10 +8835,11 @@ W: http://www.emulex.com S: Supported F: drivers/scsi/be2iscsi/ -SERVER ENGINES 10Gbps NIC - BladeEngine 2 DRIVER -M: Sathya Perla <sathya.perla@emulex.com> -M: Subbu Seetharaman <subbu.seetharaman@emulex.com> -M: Ajit Khaparde <ajit.khaparde@emulex.com> +Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER +M: Sathya Perla <sathya.perla@avagotech.com> +M: Ajit Khaparde <ajit.khaparde@avagotech.com> +M: Padmanabh Ratnakar <padmanabh.ratnakar@avagotech.com> +M: Sriharsha Basavapatna <sriharsha.basavapatna@avagotech.com> L: netdev@vger.kernel.org W: http://www.emulex.com S: Supported @@ -10523,7 +10559,6 @@ F: include/linux/virtio_console.h F: include/uapi/linux/virtio_console.h VIRTIO CORE, NET AND BLOCK DRIVERS -M: Rusty Russell <rusty@rustcorp.com.au> M: "Michael S. Tsirkin" <mst@redhat.com> L: virtualization@lists.linux-foundation.org S: Maintained @@ -11031,6 +11066,7 @@ F: drivers/media/pci/zoran/ ZRAM COMPRESSED RAM BLOCK DEVICE DRVIER M: Minchan Kim <minchan@kernel.org> M: Nitin Gupta <ngupta@vflare.org> +R: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> L: linux-kernel@vger.kernel.org S: Maintained F: drivers/block/zram/ @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 1 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc4 NAME = Hurr durr I'ma sheep # *DOCUMENTATION* diff --git a/arch/arc/Kconfig.debug b/arch/arc/Kconfig.debug index a7fc0da..ff6a4b5 100644 --- a/arch/arc/Kconfig.debug +++ b/arch/arc/Kconfig.debug @@ -2,19 +2,6 @@ menu "Kernel hacking" source "lib/Kconfig.debug" -config EARLY_PRINTK - bool "Early printk" if EMBEDDED - default y - help - Write kernel log output directly into the VGA buffer or to a serial - port. - - This is useful for kernel debugging when your machine crashes very - early before the console code is initialized. For normal operation - it is not recommended because it looks ugly and doesn't cooperate - with klogd/syslogd or the X server. You should normally N here, - unless you want to debug such a crash. - config 16KSTACKS bool "Use 16Kb for kernel stacks instead of 8Kb" help diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 067551b..9917a45 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -99,7 +99,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ atomic_ops_unlock(flags); \ } -#define ATOMIC_OP_RETURN(op, c_op) \ +#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ unsigned long flags; \ diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c index 8c3a3e0..12b2100 100644 --- a/arch/arc/mm/cache_arc700.c +++ b/arch/arc/mm/cache_arc700.c @@ -266,7 +266,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr, * Machine specific helpers for Entire D-Cache or Per Line ops */ -static unsigned int __before_dc_op(const int op) +static inline unsigned int __before_dc_op(const int op) { unsigned int reg = reg; @@ -284,7 +284,7 @@ static unsigned int __before_dc_op(const int op) return reg; } -static void __after_dc_op(const int op, unsigned int reg) +static inline void __after_dc_op(const int op, unsigned int reg) { if (op & OP_FLUSH) /* flush / flush-n-inv both wait */ while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS); diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts index 8ae29c9..c17097d 100644 --- a/arch/arm/boot/dts/am437x-sk-evm.dts +++ b/arch/arm/boot/dts/am437x-sk-evm.dts @@ -49,7 +49,7 @@ pinctrl-0 = <&matrix_keypad_pins>; debounce-delay-ms = <5>; - col-scan-delay-us = <1500>; + col-scan-delay-us = <5>; row-gpios = <&gpio5 5 GPIO_ACTIVE_HIGH /* Bank5, pin5 */ &gpio5 6 GPIO_ACTIVE_HIGH>; /* Bank5, pin6 */ @@ -473,7 +473,7 @@ interrupt-parent = <&gpio0>; interrupts = <31 0>; - wake-gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpio1 28 GPIO_ACTIVE_LOW>; touchscreen-size-x = <480>; touchscreen-size-y = <272>; diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts index 15f198e..7128fad 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts @@ -18,6 +18,7 @@ aliases { rtc0 = &mcp_rtc; rtc1 = &tps659038_rtc; + rtc2 = &rtc; }; memory { @@ -83,7 +84,7 @@ gpio_fan: gpio_fan { /* Based on 5v 500mA AFB02505HHB */ compatible = "gpio-fan"; - gpios = <&tps659038_gpio 1 GPIO_ACTIVE_HIGH>; + gpios = <&tps659038_gpio 2 GPIO_ACTIVE_HIGH>; gpio-fan,speed-map = <0 0>, <13000 1>; #cooling-cells = <2>; @@ -130,8 +131,8 @@ uart3_pins_default: uart3_pins_default { pinctrl-single,pins = < - 0x248 (PIN_INPUT_SLEW | MUX_MODE0) /* uart3_rxd.rxd */ - 0x24c (PIN_INPUT_SLEW | MUX_MODE0) /* uart3_txd.txd */ + 0x3f8 (PIN_INPUT_SLEW | MUX_MODE2) /* uart2_ctsn.uart3_rxd */ + 0x3fc (PIN_INPUT_SLEW | MUX_MODE1) /* uart2_rtsn.uart3_txd */ >; }; @@ -455,7 +456,7 @@ mcp_rtc: rtc@6f { compatible = "microchip,mcp7941x"; reg = <0x6f>; - interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_LOW>; /* IRQ_SYS_1N */ + interrupts = <GIC_SPI 2 IRQ_TYPE_EDGE_RISING>; /* IRQ_SYS_1N */ pinctrl-names = "default"; pinctrl-0 = <&mcp79410_pins_default>; @@ -478,7 +479,7 @@ &uart3 { status = "okay"; interrupts-extended = <&crossbar_mpu GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>, - <&dra7_pmx_core 0x248>; + <&dra7_pmx_core 0x3f8>; pinctrl-names = "default"; pinctrl-0 = <&uart3_pins_default>; diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi index c675257..f076ff8 100644 --- a/arch/arm/boot/dts/armada-375.dtsi +++ b/arch/arm/boot/dts/armada-375.dtsi @@ -69,7 +69,7 @@ mainpll: mainpll { compatible = "fixed-clock"; #clock-cells = <0>; - clock-frequency = <2000000000>; + clock-frequency = <1000000000>; }; /* 25 MHz reference crystal */ refclk: oscillator { diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi index ed2dd8b..218a2ac 100644 --- a/arch/arm/boot/dts/armada-38x.dtsi +++ b/arch/arm/boot/dts/armada-38x.dtsi @@ -585,7 +585,7 @@ mainpll: mainpll { compatible = "fixed-clock"; #clock-cells = <0>; - clock-frequency = <2000000000>; + clock-frequency = <1000000000>; }; /* 25 MHz reference crystal */ diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi index 0e85fc1..ecd1318 100644 --- a/arch/arm/boot/dts/armada-39x.dtsi +++ b/arch/arm/boot/dts/armada-39x.dtsi @@ -502,7 +502,7 @@ mainpll: mainpll { compatible = "fixed-clock"; #clock-cells = <0>; - clock-frequency = <2000000000>; + clock-frequency = <1000000000>; }; }; }; diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts index e3b08fb..990e8a2 100644 --- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts +++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts @@ -105,6 +105,10 @@ }; internal-regs { + rtc@10300 { + /* No crystal connected to the internal RTC */ + status = "disabled"; + }; serial@12000 { status = "okay"; }; diff --git a/arch/arm/boot/dts/dove-cubox.dts b/arch/arm/boot/dts/dove-cubox.dts index aae7efc..e6fa251 100644 --- a/arch/arm/boot/dts/dove-cubox.dts +++ b/arch/arm/boot/dts/dove-cubox.dts @@ -87,6 +87,7 @@ /* connect xtal input to 25MHz reference */ clocks = <&ref25>; + clock-names = "xtal"; /* connect xtal input as source of pll0 and pll1 */ silabs,pll-source = <0 0>, <1 0>; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 5332b57..f03a091 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -911,7 +911,7 @@ ti,clock-cycles = <16>; reg = <0x4ae07ddc 0x4>, <0x4ae07de0 0x4>, - <0x4ae06014 0x4>, <0x4a003b20 0x8>, + <0x4ae06014 0x4>, <0x4a003b20 0xc>, <0x4ae0c158 0x4>; reg-names = "setup-address", "control-address", "int-address", "efuse-address", @@ -944,7 +944,7 @@ ti,clock-cycles = <16>; reg = <0x4ae07e34 0x4>, <0x4ae07e24 0x4>, - <0x4ae06010 0x4>, <0x4a0025cc 0x8>, + <0x4ae06010 0x4>, <0x4a0025cc 0xc>, <0x4a002470 0x4>; reg-names = "setup-address", "control-address", "int-address", "efuse-address", @@ -977,7 +977,7 @@ ti,clock-cycles = <16>; reg = <0x4ae07e30 0x4>, <0x4ae07e20 0x4>, - <0x4ae06010 0x4>, <0x4a0025e0 0x8>, + <0x4ae06010 0x4>, <0x4a0025e0 0xc>, <0x4a00246c 0x4>; reg-names = "setup-address", "control-address", "int-address", "efuse-address", @@ -1010,7 +1010,7 @@ ti,clock-cycles = <16>; reg = <0x4ae07de4 0x4>, <0x4ae07de8 0x4>, - <0x4ae06010 0x4>, <0x4a003b08 0x8>, + <0x4ae06010 0x4>, <0x4a003b08 0xc>, <0x4ae0c154 0x4>; reg-names = "setup-address", "control-address", "int-address", "efuse-address", @@ -1203,7 +1203,7 @@ status = "disabled"; }; - rtc@48838000 { + rtc: rtc@48838000 { compatible = "ti,am3352-rtc"; reg = <0x48838000 0x100>; interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>, diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi index 8de12af7..d6b49e5 100644 --- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi +++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi @@ -9,6 +9,7 @@ #include <dt-bindings/sound/samsung-i2s.h> #include <dt-bindings/input/input.h> +#include <dt-bindings/clock/maxim,max77686.h> #include "exynos4412.dtsi" / { @@ -105,6 +106,8 @@ rtc@10070000 { status = "okay"; + clocks = <&clock CLK_RTC>, <&max77686 MAX77686_CLK_AP>; + clock-names = "rtc", "rtc_src"; }; g2d@10800000 { diff --git a/arch/arm/boot/dts/exynos5250-snow.dts b/arch/arm/boot/dts/exynos5250-snow.dts index 2657e84..1eca97e 100644 --- a/arch/arm/boot/dts/exynos5250-snow.dts +++ b/arch/arm/boot/dts/exynos5250-snow.dts @@ -567,6 +567,7 @@ num-slots = <1>; broken-cd; cap-sdio-irq; + keep-power-in-suspend; card-detect-delay = <200>; samsung,dw-mshc-ciu-div = <3>; samsung,dw-mshc-sdr-timing = <2 3>; diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts index 0788d08..146e711 100644 --- a/arch/arm/boot/dts/exynos5420-peach-pit.dts +++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts @@ -711,6 +711,7 @@ num-slots = <1>; broken-cd; cap-sdio-irq; + keep-power-in-suspend; card-detect-delay = <200>; clock-frequency = <400000000>; samsung,dw-mshc-ciu-div = <1>; diff --git a/arch/arm/boot/dts/exynos5420-trip-points.dtsi b/arch/arm/boot/dts/exynos5420-trip-points.dtsi index 5d31fc1..2180a01 100644 --- a/arch/arm/boot/dts/exynos5420-trip-points.dtsi +++ b/arch/arm/boot/dts/exynos5420-trip-points.dtsi @@ -28,7 +28,7 @@ trips { type = "active"; }; cpu-crit-0 { - temperature = <1200000>; /* millicelsius */ + temperature = <120000>; /* millicelsius */ hysteresis = <0>; /* millicelsius */ type = "critical"; }; diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi index f67b23f..4531753 100644 --- a/arch/arm/boot/dts/exynos5420.dtsi +++ b/arch/arm/boot/dts/exynos5420.dtsi @@ -536,6 +536,7 @@ clock-names = "dp"; phys = <&dp_phy>; phy-names = "dp"; + power-domains = <&disp_pd>; }; mipi_phy: video-phy@10040714 { diff --git a/arch/arm/boot/dts/exynos5440-trip-points.dtsi b/arch/arm/boot/dts/exynos5440-trip-points.dtsi index 48adfa8..356e963 100644 --- a/arch/arm/boot/dts/exynos5440-trip-points.dtsi +++ b/arch/arm/boot/dts/exynos5440-trip-points.dtsi @@ -18,7 +18,7 @@ trips { type = "active"; }; cpu-crit-0 { - temperature = <1050000>; /* millicelsius */ + temperature = <105000>; /* millicelsius */ hysteresis = <0>; /* millicelsius */ type = "critical"; }; diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts index 412f41d..02eb8b1 100644 --- a/arch/arm/boot/dts/exynos5800-peach-pi.dts +++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts @@ -674,6 +674,7 @@ num-slots = <1>; broken-cd; cap-sdio-irq; + keep-power-in-suspend; card-detect-delay = <200>; clock-frequency = <400000000>; samsung,dw-mshc-ciu-div = <1>; diff --git a/arch/arm/boot/dts/imx23-olinuxino.dts b/arch/arm/boot/dts/imx23-olinuxino.dts index 7e6eef2..8204539 100644 --- a/arch/arm/boot/dts/imx23-olinuxino.dts +++ b/arch/arm/boot/dts/imx23-olinuxino.dts @@ -12,6 +12,7 @@ */ /dts-v1/; +#include <dt-bindings/gpio/gpio.h> #include "imx23.dtsi" / { @@ -93,6 +94,7 @@ ahb@80080000 { usb0: usb@80080000 { + dr_mode = "host"; vbus-supply = <®_usb0_vbus>; status = "okay"; }; @@ -122,7 +124,7 @@ user { label = "green"; - gpios = <&gpio2 1 1>; + gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; }; }; }; diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi index e4d3aec..677f81d 100644 --- a/arch/arm/boot/dts/imx25.dtsi +++ b/arch/arm/boot/dts/imx25.dtsi @@ -428,6 +428,7 @@ pwm4: pwm@53fc8000 { compatible = "fsl,imx25-pwm", "fsl,imx27-pwm"; + #pwm-cells = <2>; reg = <0x53fc8000 0x4000>; clocks = <&clks 108>, <&clks 52>; clock-names = "ipg", "per"; diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi index 25e25f8..4e073e8 100644 --- a/arch/arm/boot/dts/imx28.dtsi +++ b/arch/arm/boot/dts/imx28.dtsi @@ -913,7 +913,7 @@ 80 81 68 69 70 71 72 73 74 75 76 77>; - interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty", + interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty", "saif0", "saif1", "i2c0", "i2c1", "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx", "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx"; diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi index 19cc269..1ce6133 100644 --- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi +++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi @@ -31,6 +31,7 @@ regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; gpio = <&gpio4 15 0>; + enable-active-high; }; reg_usb_h1_vbus: regulator@1 { @@ -40,6 +41,7 @@ regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; gpio = <&gpio1 0 0>; + enable-active-high; }; }; diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi index 46b2fed..3b24b126 100644 --- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi @@ -185,7 +185,6 @@ &i2c3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c3>; - pinctrl-assert-gpios = <&gpio5 4 GPIO_ACTIVE_HIGH>; status = "okay"; max7310_a: gpio@30 { diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index a293158..5c16145 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -498,6 +498,8 @@ DRVDD-supply = <&vmmc2>; IOVDD-supply = <&vio>; DVDD-supply = <&vio>; + + ai3x-micbias-vg = <1>; }; tlv320aic3x_aux: tlv320aic3x@19 { @@ -509,6 +511,8 @@ DRVDD-supply = <&vmmc2>; IOVDD-supply = <&vio>; DVDD-supply = <&vio>; + + ai3x-micbias-vg = <2>; }; tsl2563: tsl2563@29 { diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi index d18a90f..69a40cf 100644 --- a/arch/arm/boot/dts/omap3.dtsi +++ b/arch/arm/boot/dts/omap3.dtsi @@ -456,6 +456,7 @@ }; mmu_isp: mmu@480bd400 { + #iommu-cells = <0>; compatible = "ti,omap2-iommu"; reg = <0x480bd400 0x80>; interrupts = <24>; @@ -464,6 +465,7 @@ }; mmu_iva: mmu@5d000000 { + #iommu-cells = <0>; compatible = "ti,omap2-iommu"; reg = <0x5d000000 0x80>; interrupts = <28>; diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index efe5f73..7d24ae0 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi @@ -128,7 +128,7 @@ * hierarchy. */ ocp { - compatible = "ti,omap4-l3-noc", "simple-bus"; + compatible = "ti,omap5-l3-noc", "simple-bus"; #address-cells = <1>; #size-cells = <1>; ranges; diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts index 74c3212..824ddab 100644 --- a/arch/arm/boot/dts/r8a7791-koelsch.dts +++ b/arch/arm/boot/dts/r8a7791-koelsch.dts @@ -545,7 +545,7 @@ compatible = "adi,adv7511w"; reg = <0x39>; interrupt-parent = <&gpio3>; - interrupts = <29 IRQ_TYPE_EDGE_FALLING>; + interrupts = <29 IRQ_TYPE_LEVEL_LOW>; adi,input-depth = <8>; adi,input-colorspace = "rgb"; diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi index bfd3f1c..2201cd5 100644 --- a/arch/arm/boot/dts/ste-dbx5x0.dtsi +++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi @@ -1017,23 +1017,6 @@ status = "disabled"; }; - vmmci: regulator-gpio { - compatible = "regulator-gpio"; - - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <2900000>; - regulator-name = "mmci-reg"; - regulator-type = "voltage"; - - startup-delay-us = <100>; - enable-active-high; - - states = <1800000 0x1 - 2900000 0x0>; - - status = "disabled"; - }; - mcde@a0350000 { compatible = "stericsson,mcde"; reg = <0xa0350000 0x1000>, /* MCDE */ diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi index bf8f0ed..744c1e3 100644 --- a/arch/arm/boot/dts/ste-href.dtsi +++ b/arch/arm/boot/dts/ste-href.dtsi @@ -111,6 +111,21 @@ pinctrl-1 = <&i2c3_sleep_mode>; }; + vmmci: regulator-gpio { + compatible = "regulator-gpio"; + + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <2900000>; + regulator-name = "mmci-reg"; + regulator-type = "voltage"; + + startup-delay-us = <100>; + enable-active-high; + + states = <1800000 0x1 + 2900000 0x0>; + }; + // External Micro SD slot sdi0_per1@80126000 { arm,primecell-periphid = <0x10480180>; diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts index 206826a..1bc84eb 100644 --- a/arch/arm/boot/dts/ste-snowball.dts +++ b/arch/arm/boot/dts/ste-snowball.dts @@ -146,8 +146,21 @@ }; vmmci: regulator-gpio { + compatible = "regulator-gpio"; + gpios = <&gpio7 4 0x4>; enable-gpio = <&gpio6 25 0x4>; + + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <2900000>; + regulator-name = "mmci-reg"; + regulator-type = "voltage"; + + startup-delay-us = <100>; + enable-active-high; + + states = <1800000 0x1 + 2900000 0x0>; }; // External Micro SD slot diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi index cf01c81..13cc7ca 100644 --- a/arch/arm/boot/dts/tegra124.dtsi +++ b/arch/arm/boot/dts/tegra124.dtsi @@ -826,7 +826,7 @@ <&tegra_car TEGRA124_CLK_PLL_U>, <&tegra_car TEGRA124_CLK_USBD>; clock-names = "reg", "pll_u", "utmi-pads"; - resets = <&tegra_car 59>, <&tegra_car 22>; + resets = <&tegra_car 22>, <&tegra_car 22>; reset-names = "usb", "utmi-pads"; nvidia,hssync-start-delay = <0>; nvidia,idle-wait-delay = <17>; @@ -838,6 +838,7 @@ nvidia,hssquelch-level = <2>; nvidia,hsdiscon-level = <5>; nvidia,xcvr-hsslew = <12>; + nvidia,has-utmi-pad-registers; status = "disabled"; }; @@ -862,7 +863,7 @@ <&tegra_car TEGRA124_CLK_PLL_U>, <&tegra_car TEGRA124_CLK_USBD>; clock-names = "reg", "pll_u", "utmi-pads"; - resets = <&tegra_car 22>, <&tegra_car 22>; + resets = <&tegra_car 58>, <&tegra_car 22>; reset-names = "usb", "utmi-pads"; nvidia,hssync-start-delay = <0>; nvidia,idle-wait-delay = <17>; @@ -874,7 +875,6 @@ nvidia,hssquelch-level = <2>; nvidia,hsdiscon-level = <5>; nvidia,xcvr-hsslew = <12>; - nvidia,has-utmi-pad-registers; status = "disabled"; }; @@ -899,7 +899,7 @@ <&tegra_car TEGRA124_CLK_PLL_U>, <&tegra_car TEGRA124_CLK_USBD>; clock-names = "reg", "pll_u", "utmi-pads"; - resets = <&tegra_car 58>, <&tegra_car 22>; + resets = <&tegra_car 59>, <&tegra_car 22>; reset-names = "usb", "utmi-pads"; nvidia,hssync-start-delay = <0>; nvidia,idle-wait-delay = <17>; diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts index 7a2aeac..107395c 100644 --- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts +++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts @@ -191,6 +191,7 @@ compatible = "arm,cortex-a15-pmu"; interrupts = <0 68 4>, <0 69 4>; + interrupt-affinity = <&cpu0>, <&cpu1>; }; oscclk6a: oscclk6a { diff --git a/arch/arm/boot/dts/vexpress-v2p-ca9.dts b/arch/arm/boot/dts/vexpress-v2p-ca9.dts index 23662b5..d949fac 100644 --- a/arch/arm/boot/dts/vexpress-v2p-ca9.dts +++ b/arch/arm/boot/dts/vexpress-v2p-ca9.dts @@ -33,28 +33,28 @@ #address-cells = <1>; #size-cells = <0>; - cpu@0 { + A9_0: cpu@0 { device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <0>; next-level-cache = <&L2>; }; - cpu@1 { + A9_1: cpu@1 { device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <1>; next-level-cache = <&L2>; }; - cpu@2 { + A9_2: cpu@2 { device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <2>; next-level-cache = <&L2>; }; - cpu@3 { + A9_3: cpu@3 { device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <3>; @@ -170,6 +170,7 @@ compatible = "arm,pl310-cache"; reg = <0x1e00a000 0x1000>; interrupts = <0 43 4>; + cache-unified; cache-level = <2>; arm,data-latency = <1 1 1>; arm,tag-latency = <1 1 1>; @@ -181,6 +182,8 @@ <0 61 4>, <0 62 4>, <0 63 4>; + interrupt-affinity = <&A9_0>, <&A9_1>, <&A9_2>, <&A9_3>; + }; dcc { diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index ab86655..0ca4a3e 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -39,11 +39,14 @@ CONFIG_ARCH_HIP04=y CONFIG_ARCH_KEYSTONE=y CONFIG_ARCH_MESON=y CONFIG_ARCH_MXC=y +CONFIG_SOC_IMX50=y CONFIG_SOC_IMX51=y CONFIG_SOC_IMX53=y CONFIG_SOC_IMX6Q=y CONFIG_SOC_IMX6SL=y +CONFIG_SOC_IMX6SX=y CONFIG_SOC_VF610=y +CONFIG_SOC_LS1021A=y CONFIG_ARCH_OMAP3=y CONFIG_ARCH_OMAP4=y CONFIG_SOC_OMAP5=y diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 9ff7b54..3743ca2 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -393,7 +393,7 @@ CONFIG_TI_EDMA=y CONFIG_DMA_OMAP=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_EXTCON=m -CONFIG_EXTCON_GPIO=m +CONFIG_EXTCON_USB_GPIO=m CONFIG_EXTCON_PALMAS=m CONFIG_TI_EMIF=m CONFIG_PWM=y diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h index 8e3fcb9..2ef282f 100644 --- a/arch/arm/include/asm/dma-iommu.h +++ b/arch/arm/include/asm/dma-iommu.h @@ -25,7 +25,7 @@ struct dma_iommu_mapping { }; struct dma_iommu_mapping * -arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size); +arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size); void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping); diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 2f7e6ff..0b579b2 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h @@ -110,5 +110,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) bool xen_arch_need_swiotlb(struct device *dev, unsigned long pfn, unsigned long mfn); +unsigned long xen_get_swiotlb_free_pages(unsigned int order); #endif /* _ASM_ARM_XEN_PAGE_H */ diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 91c7ba1..213919b 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -303,12 +303,17 @@ static int probe_current_pmu(struct arm_pmu *pmu) static int of_pmu_irq_cfg(struct platform_device *pdev) { - int i; + int i, irq; int *irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); if (!irqs) return -ENOMEM; + /* Don't bother with PPIs; they're already affine */ + irq = platform_get_irq(pdev, 0); + if (irq >= 0 && irq_is_percpu(irq)) + return 0; + for (i = 0; i < pdev->num_resources; ++i) { struct device_node *dn; int cpu; @@ -317,7 +322,7 @@ static int of_pmu_irq_cfg(struct platform_device *pdev) i); if (!dn) { pr_warn("Failed to parse %s/interrupt-affinity[%d]\n", - of_node_full_name(dn), i); + of_node_full_name(pdev->dev.of_node), i); break; } diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h index acd5b56..5f5cd56 100644 --- a/arch/arm/mach-exynos/common.h +++ b/arch/arm/mach-exynos/common.h @@ -159,6 +159,8 @@ extern void exynos_enter_aftr(void); extern struct cpuidle_exynos_data cpuidle_coupled_exynos_data; +extern void exynos_set_delayed_reset_assertion(bool enable); + extern void s5p_init_cpu(void __iomem *cpuid_addr); extern unsigned int samsung_rev(void); extern void __iomem *cpu_boot_reg_base(void); diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c index bcde0dd..5917a30 100644 --- a/arch/arm/mach-exynos/exynos.c +++ b/arch/arm/mach-exynos/exynos.c @@ -167,6 +167,33 @@ static void __init exynos_init_io(void) } /* + * Set or clear the USE_DELAYED_RESET_ASSERTION option. Used by smp code + * and suspend. + * + * This is necessary only on Exynos4 SoCs. When system is running + * USE_DELAYED_RESET_ASSERTION should be set so the ARM CLK clock down + * feature could properly detect global idle state when secondary CPU is + * powered down. + * + * However this should not be set when such system is going into suspend. + */ +void exynos_set_delayed_reset_assertion(bool enable) +{ + if (of_machine_is_compatible("samsung,exynos4")) { + unsigned int tmp, core_id; + + for (core_id = 0; core_id < num_possible_cpus(); core_id++) { + tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id)); + if (enable) + tmp |= S5P_USE_DELAYED_RESET_ASSERTION; + else + tmp &= ~(S5P_USE_DELAYED_RESET_ASSERTION); + pmu_raw_writel(tmp, EXYNOS_ARM_CORE_OPTION(core_id)); + } + } +} + +/* * Apparently, these SoCs are not able to wake-up from suspend using * the PMU. Too bad. Should they suddenly become capable of such a * feat, the matches below should be moved to suspend.c. diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c index ebd135b..a825bca 100644 --- a/arch/arm/mach-exynos/platsmp.c +++ b/arch/arm/mach-exynos/platsmp.c @@ -34,30 +34,6 @@ extern void exynos4_secondary_startup(void); -/* - * Set or clear the USE_DELAYED_RESET_ASSERTION option, set on Exynos4 SoCs - * during hot-(un)plugging CPUx. - * - * The feature can be cleared safely during first boot of secondary CPU. - * - * Exynos4 SoCs require setting USE_DELAYED_RESET_ASSERTION during powering - * down a CPU so the CPU idle clock down feature could properly detect global - * idle state when CPUx is off. - */ -static void exynos_set_delayed_reset_assertion(u32 core_id, bool enable) -{ - if (soc_is_exynos4()) { - unsigned int tmp; - - tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id)); - if (enable) - tmp |= S5P_USE_DELAYED_RESET_ASSERTION; - else - tmp &= ~(S5P_USE_DELAYED_RESET_ASSERTION); - pmu_raw_writel(tmp, EXYNOS_ARM_CORE_OPTION(core_id)); - } -} - #ifdef CONFIG_HOTPLUG_CPU static inline void cpu_leave_lowpower(u32 core_id) { @@ -73,8 +49,6 @@ static inline void cpu_leave_lowpower(u32 core_id) : "=&r" (v) : "Ir" (CR_C), "Ir" (0x40) : "cc"); - - exynos_set_delayed_reset_assertion(core_id, false); } static inline void platform_do_lowpower(unsigned int cpu, int *spurious) @@ -87,14 +61,6 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious) /* Turn the CPU off on next WFI instruction. */ exynos_cpu_power_down(core_id); - /* - * Exynos4 SoCs require setting - * USE_DELAYED_RESET_ASSERTION so the CPU idle - * clock down feature could properly detect - * global idle state when CPUx is off. - */ - exynos_set_delayed_reset_assertion(core_id, true); - wfi(); if (pen_release == core_id) { @@ -371,9 +337,6 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) udelay(10); } - /* No harm if this is called during first boot of secondary CPU */ - exynos_set_delayed_reset_assertion(core_id, false); - /* * now the secondary core is starting up let it run its * calibrations, then wait for it to finish @@ -420,6 +383,8 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus) exynos_sysram_init(); + exynos_set_delayed_reset_assertion(true); + if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) scu_enable(scu_base_addr()); diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c index cbe56b3..a968653 100644 --- a/arch/arm/mach-exynos/pm_domains.c +++ b/arch/arm/mach-exynos/pm_domains.c @@ -188,7 +188,7 @@ no_clk: args.np = np; args.args_count = 0; child_domain = of_genpd_get_from_provider(&args); - if (!child_domain) + if (IS_ERR(child_domain)) continue; if (of_parse_phandle_with_args(np, "power-domains", @@ -196,7 +196,7 @@ no_clk: continue; parent_domain = of_genpd_get_from_provider(&args); - if (!parent_domain) + if (IS_ERR(parent_domain)) continue; if (pm_genpd_add_subdomain(parent_domain, child_domain)) diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c index 3e6aea7..c0b6dcc 100644 --- a/arch/arm/mach-exynos/suspend.c +++ b/arch/arm/mach-exynos/suspend.c @@ -342,6 +342,8 @@ static void exynos_pm_enter_sleep_mode(void) static void exynos_pm_prepare(void) { + exynos_set_delayed_reset_assertion(false); + /* Set wake-up mask registers */ exynos_pm_set_wakeup_mask(); @@ -482,6 +484,7 @@ early_wakeup: /* Clear SLEEP mode set in INFORM1 */ pmu_raw_writel(0x0, S5P_INFORM1); + exynos_set_delayed_reset_assertion(true); } static void exynos3250_pm_resume(void) @@ -723,8 +726,10 @@ void __init exynos_pm_init(void) return; } - if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) + if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) { pr_warn("Outdated DT detected, suspend/resume will NOT work\n"); + return; + } pm_data = (const struct exynos_pm_data *) match->data; diff --git a/arch/arm/mach-gemini/common.h b/arch/arm/mach-gemini/common.h index 38a4526..dd88369 100644 --- a/arch/arm/mach-gemini/common.h +++ b/arch/arm/mach-gemini/common.h @@ -12,6 +12,8 @@ #ifndef __GEMINI_COMMON_H__ #define __GEMINI_COMMON_H__ +#include <linux/reboot.h> + struct mtd_partition; extern void gemini_map_io(void); @@ -26,6 +28,6 @@ extern int platform_register_pflash(unsigned int size, struct mtd_partition *parts, unsigned int nr_parts); -extern void gemini_restart(char mode, const char *cmd); +extern void gemini_restart(enum reboot_mode mode, const char *cmd); #endif /* __GEMINI_COMMON_H__ */ diff --git a/arch/arm/mach-gemini/reset.c b/arch/arm/mach-gemini/reset.c index b266597..21a6d6d 100644 --- a/arch/arm/mach-gemini/reset.c +++ b/arch/arm/mach-gemini/reset.c @@ -14,7 +14,9 @@ #include <mach/hardware.h> #include <mach/global_reg.h> -void gemini_restart(char mode, const char *cmd) +#include "common.h" + +void gemini_restart(enum reboot_mode mode, const char *cmd) { __raw_writel(RESET_GLOBAL | RESET_CPU1, IO_ADDRESS(GEMINI_GLOBAL_BASE) + GLOBAL_RESET); diff --git a/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c b/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c index fb8d4a2..a5edd7d 100644 --- a/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c +++ b/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010 Pengutronix, Wolfram Sang <w.sang@pengutronix.de> + * Copyright (C) 2010 Pengutronix, Wolfram Sang <kernel@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 355b089..752969f 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -171,6 +171,12 @@ */ #define LINKS_PER_OCP_IF 2 +/* + * Address offset (in bytes) between the reset control and the reset + * status registers: 4 bytes on OMAP4 + */ +#define OMAP4_RST_CTRL_ST_OFFSET 4 + /** * struct omap_hwmod_soc_ops - fn ptrs for some SoC-specific operations * @enable_module: function to enable a module (via MODULEMODE) @@ -3016,10 +3022,12 @@ static int _omap4_deassert_hardreset(struct omap_hwmod *oh, if (ohri->st_shift) pr_err("omap_hwmod: %s: %s: hwmod data error: OMAP4 does not support st_shift\n", oh->name, ohri->name); - return omap_prm_deassert_hardreset(ohri->rst_shift, 0, + return omap_prm_deassert_hardreset(ohri->rst_shift, ohri->rst_shift, oh->clkdm->pwrdm.ptr->prcm_partition, oh->clkdm->pwrdm.ptr->prcm_offs, - oh->prcm.omap4.rstctrl_offs, 0); + oh->prcm.omap4.rstctrl_offs, + oh->prcm.omap4.rstctrl_offs + + OMAP4_RST_CTRL_ST_OFFSET); } /** @@ -3048,27 +3056,6 @@ static int _omap4_is_hardreset_asserted(struct omap_hwmod *oh, } /** - * _am33xx_assert_hardreset - call AM33XX PRM hardreset fn with hwmod args - * @oh: struct omap_hwmod * to assert hardreset - * @ohri: hardreset line data - * - * Call am33xx_prminst_assert_hardreset() with parameters extracted - * from the hwmod @oh and the hardreset line data @ohri. Only - * intended for use as an soc_ops function pointer. Passes along the - * return value from am33xx_prminst_assert_hardreset(). XXX This - * function is scheduled for removal when the PRM code is moved into - * drivers/. - */ -static int _am33xx_assert_hardreset(struct omap_hwmod *oh, - struct omap_hwmod_rst_info *ohri) - -{ - return omap_prm_assert_hardreset(ohri->rst_shift, 0, - oh->clkdm->pwrdm.ptr->prcm_offs, - oh->prcm.omap4.rstctrl_offs); -} - -/** * _am33xx_deassert_hardreset - call AM33XX PRM hardreset fn with hwmod args * @oh: struct omap_hwmod * to deassert hardreset * @ohri: hardreset line data @@ -3083,32 +3070,13 @@ static int _am33xx_assert_hardreset(struct omap_hwmod *oh, static int _am33xx_deassert_hardreset(struct omap_hwmod *oh, struct omap_hwmod_rst_info *ohri) { - return omap_prm_deassert_hardreset(ohri->rst_shift, ohri->st_shift, 0, + return omap_prm_deassert_hardreset(ohri->rst_shift, ohri->st_shift, + oh->clkdm->pwrdm.ptr->prcm_partition, oh->clkdm->pwrdm.ptr->prcm_offs, oh->prcm.omap4.rstctrl_offs, oh->prcm.omap4.rstst_offs); } -/** - * _am33xx_is_hardreset_asserted - call AM33XX PRM hardreset fn with hwmod args - * @oh: struct omap_hwmod * to test hardreset - * @ohri: hardreset line data - * - * Call am33xx_prminst_is_hardreset_asserted() with parameters - * extracted from the hwmod @oh and the hardreset line data @ohri. - * Only intended for use as an soc_ops function pointer. Passes along - * the return value from am33xx_prminst_is_hardreset_asserted(). XXX - * This function is scheduled for removal when the PRM code is moved - * into drivers/. - */ -static int _am33xx_is_hardreset_asserted(struct omap_hwmod *oh, - struct omap_hwmod_rst_info *ohri) -{ - return omap_prm_is_hardreset_asserted(ohri->rst_shift, 0, - oh->clkdm->pwrdm.ptr->prcm_offs, - oh->prcm.omap4.rstctrl_offs); -} - /* Public functions */ u32 omap_hwmod_read(struct omap_hwmod *oh, u16 reg_offs) @@ -3908,21 +3876,13 @@ void __init omap_hwmod_init(void) soc_ops.init_clkdm = _init_clkdm; soc_ops.update_context_lost = _omap4_update_context_lost; soc_ops.get_context_lost = _omap4_get_context_lost; - } else if (soc_is_am43xx()) { + } else if (cpu_is_ti816x() || soc_is_am33xx() || soc_is_am43xx()) { soc_ops.enable_module = _omap4_enable_module; soc_ops.disable_module = _omap4_disable_module; soc_ops.wait_target_ready = _omap4_wait_target_ready; soc_ops.assert_hardreset = _omap4_assert_hardreset; - soc_ops.deassert_hardreset = _omap4_deassert_hardreset; - soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted; - soc_ops.init_clkdm = _init_clkdm; - } else if (cpu_is_ti816x() || soc_is_am33xx()) { - soc_ops.enable_module = _omap4_enable_module; - soc_ops.disable_module = _omap4_disable_module; - soc_ops.wait_target_ready = _omap4_wait_target_ready; - soc_ops.assert_hardreset = _am33xx_assert_hardreset; soc_ops.deassert_hardreset = _am33xx_deassert_hardreset; - soc_ops.is_hardreset_asserted = _am33xx_is_hardreset_asserted; + soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted; soc_ops.init_clkdm = _init_clkdm; } else { WARN(1, "omap_hwmod: unknown SoC type\n"); diff --git a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c index e222314..17e8004 100644 --- a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c @@ -544,6 +544,44 @@ static struct omap_hwmod am43xx_hdq1w_hwmod = { }, }; +static struct omap_hwmod_class_sysconfig am43xx_vpfe_sysc = { + .rev_offs = 0x0, + .sysc_offs = 0x104, + .sysc_flags = SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE, + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | + MSTANDBY_FORCE | MSTANDBY_SMART | MSTANDBY_NO), + .sysc_fields = &omap_hwmod_sysc_type2, +}; + +static struct omap_hwmod_class am43xx_vpfe_hwmod_class = { + .name = "vpfe", + .sysc = &am43xx_vpfe_sysc, +}; + +static struct omap_hwmod am43xx_vpfe0_hwmod = { + .name = "vpfe0", + .class = &am43xx_vpfe_hwmod_class, + .clkdm_name = "l3s_clkdm", + .prcm = { + .omap4 = { + .modulemode = MODULEMODE_SWCTRL, + .clkctrl_offs = AM43XX_CM_PER_VPFE0_CLKCTRL_OFFSET, + }, + }, +}; + +static struct omap_hwmod am43xx_vpfe1_hwmod = { + .name = "vpfe1", + .class = &am43xx_vpfe_hwmod_class, + .clkdm_name = "l3s_clkdm", + .prcm = { + .omap4 = { + .modulemode = MODULEMODE_SWCTRL, + .clkctrl_offs = AM43XX_CM_PER_VPFE1_CLKCTRL_OFFSET, + }, + }, +}; + /* Interfaces */ static struct omap_hwmod_ocp_if am43xx_l3_main__l4_hs = { .master = &am33xx_l3_main_hwmod, @@ -825,6 +863,34 @@ static struct omap_hwmod_ocp_if am43xx_l4_ls__hdq1w = { .user = OCP_USER_MPU | OCP_USER_SDMA, }; +static struct omap_hwmod_ocp_if am43xx_l3__vpfe0 = { + .master = &am43xx_vpfe0_hwmod, + .slave = &am33xx_l3_main_hwmod, + .clk = "l3_gclk", + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + +static struct omap_hwmod_ocp_if am43xx_l3__vpfe1 = { + .master = &am43xx_vpfe1_hwmod, + .slave = &am33xx_l3_main_hwmod, + .clk = "l3_gclk", + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + +static struct omap_hwmod_ocp_if am43xx_l4_ls__vpfe0 = { + .master = &am33xx_l4_ls_hwmod, + .slave = &am43xx_vpfe0_hwmod, + .clk = "l4ls_gclk", + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + +static struct omap_hwmod_ocp_if am43xx_l4_ls__vpfe1 = { + .master = &am33xx_l4_ls_hwmod, + .slave = &am43xx_vpfe1_hwmod, + .clk = "l4ls_gclk", + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = { &am33xx_l4_wkup__synctimer, &am43xx_l4_ls__timer8, @@ -925,6 +991,10 @@ static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = { &am43xx_l4_ls__dss_dispc, &am43xx_l4_ls__dss_rfbi, &am43xx_l4_ls__hdq1w, + &am43xx_l3__vpfe0, + &am43xx_l3__vpfe1, + &am43xx_l4_ls__vpfe0, + &am43xx_l4_ls__vpfe1, NULL, }; diff --git a/arch/arm/mach-omap2/prcm43xx.h b/arch/arm/mach-omap2/prcm43xx.h index 48df3b5..d026199 100644 --- a/arch/arm/mach-omap2/prcm43xx.h +++ b/arch/arm/mach-omap2/prcm43xx.h @@ -144,5 +144,6 @@ #define AM43XX_CM_PER_USBPHYOCP2SCP1_CLKCTRL_OFFSET 0x05C0 #define AM43XX_CM_PER_DSS_CLKCTRL_OFFSET 0x0a20 #define AM43XX_CM_PER_HDQ1W_CLKCTRL_OFFSET 0x04a0 - +#define AM43XX_CM_PER_VPFE0_CLKCTRL_OFFSET 0x0068 +#define AM43XX_CM_PER_VPFE1_CLKCTRL_OFFSET 0x0070 #endif diff --git a/arch/arm/mach-omap2/prm-regbits-34xx.h b/arch/arm/mach-omap2/prm-regbits-34xx.h index cbefbd7..661d753 100644 --- a/arch/arm/mach-omap2/prm-regbits-34xx.h +++ b/arch/arm/mach-omap2/prm-regbits-34xx.h @@ -112,6 +112,7 @@ #define OMAP3430_VC_CMD_ONLP_SHIFT 16 #define OMAP3430_VC_CMD_RET_SHIFT 8 #define OMAP3430_VC_CMD_OFF_SHIFT 0 +#define OMAP3430_SREN_MASK (1 << 4) #define OMAP3430_HSEN_MASK (1 << 3) #define OMAP3430_MCODE_MASK (0x7 << 0) #define OMAP3430_VALID_MASK (1 << 24) diff --git a/arch/arm/mach-omap2/prm-regbits-44xx.h b/arch/arm/mach-omap2/prm-regbits-44xx.h index b1c7a33..e794828 100644 --- a/arch/arm/mach-omap2/prm-regbits-44xx.h +++ b/arch/arm/mach-omap2/prm-regbits-44xx.h @@ -35,6 +35,7 @@ #define OMAP4430_GLOBAL_WARM_SW_RST_SHIFT 1 #define OMAP4430_GLOBAL_WUEN_MASK (1 << 16) #define OMAP4430_HSMCODE_MASK (0x7 << 0) +#define OMAP4430_SRMODEEN_MASK (1 << 4) #define OMAP4430_HSMODEEN_MASK (1 << 3) #define OMAP4430_HSSCLL_SHIFT 24 #define OMAP4430_ICEPICK_RST_SHIFT 9 diff --git a/arch/arm/mach-omap2/prminst44xx.c b/arch/arm/mach-omap2/prminst44xx.c index c4859c4..d0b15db 100644 --- a/arch/arm/mach-omap2/prminst44xx.c +++ b/arch/arm/mach-omap2/prminst44xx.c @@ -87,12 +87,6 @@ u32 omap4_prminst_rmw_inst_reg_bits(u32 mask, u32 bits, u8 part, s16 inst, return v; } -/* - * Address offset (in bytes) between the reset control and the reset - * status registers: 4 bytes on OMAP4 - */ -#define OMAP4_RST_CTRL_ST_OFFSET 4 - /** * omap4_prminst_is_hardreset_asserted - read the HW reset line state of * submodules contained in the hwmod module @@ -141,11 +135,11 @@ int omap4_prminst_assert_hardreset(u8 shift, u8 part, s16 inst, * omap4_prminst_deassert_hardreset - deassert a submodule hardreset line and * wait * @shift: register bit shift corresponding to the reset line to deassert - * @st_shift: status bit offset, not used for OMAP4+ + * @st_shift: status bit offset corresponding to the reset line * @part: PRM partition * @inst: PRM instance offset * @rstctrl_offs: reset register offset - * @st_offs: reset status register offset, not used for OMAP4+ + * @rstst_offs: reset status register offset * * Some IPs like dsp, ipu or iva contain processors that require an HW * reset line to be asserted / deasserted in order to fully enable the @@ -157,11 +151,11 @@ int omap4_prminst_assert_hardreset(u8 shift, u8 part, s16 inst, * of reset, or -EBUSY if the submodule did not exit reset promptly. */ int omap4_prminst_deassert_hardreset(u8 shift, u8 st_shift, u8 part, s16 inst, - u16 rstctrl_offs, u16 st_offs) + u16 rstctrl_offs, u16 rstst_offs) { int c; u32 mask = 1 << shift; - u16 rstst_offs = rstctrl_offs + OMAP4_RST_CTRL_ST_OFFSET; + u32 st_mask = 1 << st_shift; /* Check the current status to avoid de-asserting the line twice */ if (omap4_prminst_is_hardreset_asserted(shift, part, inst, @@ -169,13 +163,13 @@ int omap4_prminst_deassert_hardreset(u8 shift, u8 st_shift, u8 part, s16 inst, return -EEXIST; /* Clear the reset status by writing 1 to the status bit */ - omap4_prminst_rmw_inst_reg_bits(0xffffffff, mask, part, inst, + omap4_prminst_rmw_inst_reg_bits(0xffffffff, st_mask, part, inst, rstst_offs); /* de-assert the reset control line */ omap4_prminst_rmw_inst_reg_bits(mask, 0, part, inst, rstctrl_offs); /* wait the status to be set */ - omap_test_timeout(omap4_prminst_is_hardreset_asserted(shift, part, inst, - rstst_offs), + omap_test_timeout(omap4_prminst_is_hardreset_asserted(st_shift, part, + inst, rstst_offs), MAX_MODULE_HARDRESET_WAIT, c); return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0; diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index cef67af..cac46d8 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -298,14 +298,11 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer, if (IS_ERR(src)) return PTR_ERR(src); - if (clk_get_parent(timer->fclk) != src) { - r = clk_set_parent(timer->fclk, src); - if (r < 0) { - pr_warn("%s: %s cannot set source\n", __func__, - oh->name); - clk_put(src); - return r; - } + r = clk_set_parent(timer->fclk, src); + if (r < 0) { + pr_warn("%s: %s cannot set source\n", __func__, oh->name); + clk_put(src); + return r; } clk_put(src); diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c index be9ef83..076fd20 100644 --- a/arch/arm/mach-omap2/vc.c +++ b/arch/arm/mach-omap2/vc.c @@ -316,7 +316,8 @@ static void __init omap3_vc_init_pmic_signaling(struct voltagedomain *voltdm) * idle. And we can also scale voltages to zero for off-idle. * Note that no actual voltage scaling during off-idle will * happen unless the board specific twl4030 PMIC scripts are - * loaded. + * loaded. See also omap_vc_i2c_init for comments regarding + * erratum i531. */ val = voltdm->read(OMAP3_PRM_VOLTCTRL_OFFSET); if (!(val & OMAP3430_PRM_VOLTCTRL_SEL_OFF)) { @@ -704,9 +705,16 @@ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm) return; } + /* + * Note that for omap3 OMAP3430_SREN_MASK clears SREN to work around + * erratum i531 "Extra Power Consumed When Repeated Start Operation + * Mode Is Enabled on I2C Interface Dedicated for Smart Reflex (I2C4)". + * Otherwise I2C4 eventually leads into about 23mW extra power being + * consumed even during off idle using VMODE. + */ i2c_high_speed = voltdm->pmic->i2c_high_speed; if (i2c_high_speed) - voltdm->rmw(vc->common->i2c_cfg_hsen_mask, + voltdm->rmw(vc->common->i2c_cfg_clear_mask, vc->common->i2c_cfg_hsen_mask, vc->common->i2c_cfg_reg); diff --git a/arch/arm/mach-omap2/vc.h b/arch/arm/mach-omap2/vc.h index cdbdd78..89b83b7 100644 --- a/arch/arm/mach-omap2/vc.h +++ b/arch/arm/mach-omap2/vc.h @@ -34,6 +34,7 @@ struct voltagedomain; * @cmd_ret_shift: RET field shift in PRM_VC_CMD_VAL_* register * @cmd_off_shift: OFF field shift in PRM_VC_CMD_VAL_* register * @i2c_cfg_reg: I2C configuration register offset + * @i2c_cfg_clear_mask: high-speed mode bit clear mask in I2C config register * @i2c_cfg_hsen_mask: high-speed mode bit field mask in I2C config register * @i2c_mcode_mask: MCODE field mask for I2C config register * @@ -52,6 +53,7 @@ struct omap_vc_common { u8 cmd_ret_shift; u8 cmd_off_shift; u8 i2c_cfg_reg; + u8 i2c_cfg_clear_mask; u8 i2c_cfg_hsen_mask; u8 i2c_mcode_mask; }; diff --git a/arch/arm/mach-omap2/vc3xxx_data.c b/arch/arm/mach-omap2/vc3xxx_data.c index 75bc4aa..71d74c9 100644 --- a/arch/arm/mach-omap2/vc3xxx_data.c +++ b/arch/arm/mach-omap2/vc3xxx_data.c @@ -40,6 +40,7 @@ static struct omap_vc_common omap3_vc_common = { .cmd_onlp_shift = OMAP3430_VC_CMD_ONLP_SHIFT, .cmd_ret_shift = OMAP3430_VC_CMD_RET_SHIFT, .cmd_off_shift = OMAP3430_VC_CMD_OFF_SHIFT, + .i2c_cfg_clear_mask = OMAP3430_SREN_MASK | OMAP3430_HSEN_MASK, .i2c_cfg_hsen_mask = OMAP3430_HSEN_MASK, .i2c_cfg_reg = OMAP3_PRM_VC_I2C_CFG_OFFSET, .i2c_mcode_mask = OMAP3430_MCODE_MASK, diff --git a/arch/arm/mach-omap2/vc44xx_data.c b/arch/arm/mach-omap2/vc44xx_data.c index 085e5d6..2abd5fa 100644 --- a/arch/arm/mach-omap2/vc44xx_data.c +++ b/arch/arm/mach-omap2/vc44xx_data.c @@ -42,6 +42,7 @@ static const struct omap_vc_common omap4_vc_common = { .cmd_ret_shift = OMAP4430_RET_SHIFT, .cmd_off_shift = OMAP4430_OFF_SHIFT, .i2c_cfg_reg = OMAP4_PRM_VC_CFG_I2C_MODE_OFFSET, + .i2c_cfg_clear_mask = OMAP4430_SRMODEEN_MASK | OMAP4430_HSMODEEN_MASK, .i2c_cfg_hsen_mask = OMAP4430_HSMODEEN_MASK, .i2c_mcode_mask = OMAP4430_HSMCODE_MASK, }; diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index 8896e71..f096836 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig @@ -691,4 +691,13 @@ config SHARPSL_PM_MAX1111 config PXA310_ULPI bool +config PXA_SYSTEMS_CPLDS + tristate "Motherboard cplds" + default ARCH_LUBBOCK || MACH_MAINSTONE + help + This driver supports the Lubbock and Mainstone multifunction chip + found on the pxa25x development platform system (Lubbock) and pxa27x + development platform system (Mainstone). This IO board supports the + interrupts handling, ethernet controller, flash chips, etc ... + endif diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile index eb0bf76..4087d33 100644 --- a/arch/arm/mach-pxa/Makefile +++ b/arch/arm/mach-pxa/Makefile @@ -90,4 +90,5 @@ obj-$(CONFIG_MACH_RAUMFELD_CONNECTOR) += raumfeld.o obj-$(CONFIG_MACH_RAUMFELD_SPEAKER) += raumfeld.o obj-$(CONFIG_MACH_ZIPIT2) += z2.o +obj-$(CONFIG_PXA_SYSTEMS_CPLDS) += pxa_cplds_irqs.o obj-$(CONFIG_TOSA_BT) += tosa-bt.o diff --git a/arch/arm/mach-pxa/include/mach/lubbock.h b/arch/arm/mach-pxa/include/mach/lubbock.h index 958cd6af..1eecf79 100644 --- a/arch/arm/mach-pxa/include/mach/lubbock.h +++ b/arch/arm/mach-pxa/include/mach/lubbock.h @@ -37,7 +37,9 @@ #define LUB_GP __LUB_REG(LUBBOCK_FPGA_PHYS + 0x100) /* Board specific IRQs */ -#define LUBBOCK_IRQ(x) (IRQ_BOARD_START + (x)) +#define LUBBOCK_NR_IRQS IRQ_BOARD_START + +#define LUBBOCK_IRQ(x) (LUBBOCK_NR_IRQS + (x)) #define LUBBOCK_SD_IRQ LUBBOCK_IRQ(0) #define LUBBOCK_SA1111_IRQ LUBBOCK_IRQ(1) #define LUBBOCK_USB_IRQ LUBBOCK_IRQ(2) /* usb connect */ @@ -47,8 +49,7 @@ #define LUBBOCK_USB_DISC_IRQ LUBBOCK_IRQ(6) /* usb disconnect */ #define LUBBOCK_LAST_IRQ LUBBOCK_IRQ(6) -#define LUBBOCK_SA1111_IRQ_BASE (IRQ_BOARD_START + 16) -#define LUBBOCK_NR_IRQS (IRQ_BOARD_START + 16 + 55) +#define LUBBOCK_SA1111_IRQ_BASE (LUBBOCK_NR_IRQS + 32) #ifndef __ASSEMBLY__ extern void lubbock_set_misc_wr(unsigned int mask, unsigned int set); diff --git a/arch/arm/mach-pxa/include/mach/mainstone.h b/arch/arm/mach-pxa/include/mach/mainstone.h index 1bfc4e8..e82a7d3 100644 --- a/arch/arm/mach-pxa/include/mach/mainstone.h +++ b/arch/arm/mach-pxa/include/mach/mainstone.h @@ -120,7 +120,9 @@ #define MST_PCMCIA_PWR_VCC_50 0x4 /* voltage VCC = 5.0V */ /* board specific IRQs */ -#define MAINSTONE_IRQ(x) (IRQ_BOARD_START + (x)) +#define MAINSTONE_NR_IRQS IRQ_BOARD_START + +#define MAINSTONE_IRQ(x) (MAINSTONE_NR_IRQS + (x)) #define MAINSTONE_MMC_IRQ MAINSTONE_IRQ(0) #define MAINSTONE_USIM_IRQ MAINSTONE_IRQ(1) #define MAINSTONE_USBC_IRQ MAINSTONE_IRQ(2) @@ -136,6 +138,4 @@ #define MAINSTONE_S1_STSCHG_IRQ MAINSTONE_IRQ(14) #define MAINSTONE_S1_IRQ MAINSTONE_IRQ(15) -#define MAINSTONE_NR_IRQS (IRQ_BOARD_START + 16) - #endif diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c index d8a1be6..4ac9ab8 100644 --- a/arch/arm/mach-pxa/lubbock.c +++ b/arch/arm/mach-pxa/lubbock.c @@ -12,6 +12,7 @@ * published by the Free Software Foundation. */ #include <linux/gpio.h> +#include <linux/gpio/machine.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> @@ -123,84 +124,6 @@ void lubbock_set_misc_wr(unsigned int mask, unsigned int set) } EXPORT_SYMBOL(lubbock_set_misc_wr); -static unsigned long lubbock_irq_enabled; - -static void lubbock_mask_irq(struct irq_data *d) -{ - int lubbock_irq = (d->irq - LUBBOCK_IRQ(0)); - LUB_IRQ_MASK_EN = (lubbock_irq_enabled &= ~(1 << lubbock_irq)); -} - -static void lubbock_unmask_irq(struct irq_data *d) -{ - int lubbock_irq = (d->irq - LUBBOCK_IRQ(0)); - /* the irq can be acknowledged only if deasserted, so it's done here */ - LUB_IRQ_SET_CLR &= ~(1 << lubbock_irq); - LUB_IRQ_MASK_EN = (lubbock_irq_enabled |= (1 << lubbock_irq)); -} - -static struct irq_chip lubbock_irq_chip = { - .name = "FPGA", - .irq_ack = lubbock_mask_irq, - .irq_mask = lubbock_mask_irq, - .irq_unmask = lubbock_unmask_irq, -}; - -static void lubbock_irq_handler(unsigned int irq, struct irq_desc *desc) -{ - unsigned long pending = LUB_IRQ_SET_CLR & lubbock_irq_enabled; - do { - /* clear our parent irq */ - desc->irq_data.chip->irq_ack(&desc->irq_data); - if (likely(pending)) { - irq = LUBBOCK_IRQ(0) + __ffs(pending); - generic_handle_irq(irq); - } - pending = LUB_IRQ_SET_CLR & lubbock_irq_enabled; - } while (pending); -} - -static void __init lubbock_init_irq(void) -{ - int irq; - - pxa25x_init_irq(); - - /* setup extra lubbock irqs */ - for (irq = LUBBOCK_IRQ(0); irq <= LUBBOCK_LAST_IRQ; irq++) { - irq_set_chip_and_handler(irq, &lubbock_irq_chip, - handle_level_irq); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); - } - - irq_set_chained_handler(PXA_GPIO_TO_IRQ(0), lubbock_irq_handler); - irq_set_irq_type(PXA_GPIO_TO_IRQ(0), IRQ_TYPE_EDGE_FALLING); -} - -#ifdef CONFIG_PM - -static void lubbock_irq_resume(void) -{ - LUB_IRQ_MASK_EN = lubbock_irq_enabled; -} - -static struct syscore_ops lubbock_irq_syscore_ops = { - .resume = lubbock_irq_resume, -}; - -static int __init lubbock_irq_device_init(void) -{ - if (machine_is_lubbock()) { - register_syscore_ops(&lubbock_irq_syscore_ops); - return 0; - } - return -ENODEV; -} - -device_initcall(lubbock_irq_device_init); - -#endif - static int lubbock_udc_is_connected(void) { return (LUB_MISC_RD & (1 << 9)) == 0; @@ -383,11 +306,38 @@ static struct platform_device lubbock_flash_device[2] = { }, }; +static struct resource lubbock_cplds_resources[] = { + [0] = { + .start = LUBBOCK_FPGA_PHYS + 0xc0, + .end = LUBBOCK_FPGA_PHYS + 0xe0 - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = PXA_GPIO_TO_IRQ(0), + .end = PXA_GPIO_TO_IRQ(0), + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE, + }, + [2] = { + .start = LUBBOCK_IRQ(0), + .end = LUBBOCK_IRQ(6), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device lubbock_cplds_device = { + .name = "pxa_cplds_irqs", + .id = -1, + .resource = &lubbock_cplds_resources[0], + .num_resources = 3, +}; + + static struct platform_device *devices[] __initdata = { &sa1111_device, &smc91x_device, &lubbock_flash_device[0], &lubbock_flash_device[1], + &lubbock_cplds_device, }; static struct pxafb_mode_info sharp_lm8v31_mode = { @@ -648,7 +598,7 @@ MACHINE_START(LUBBOCK, "Intel DBPXA250 Development Platform (aka Lubbock)") /* Maintainer: MontaVista Software Inc. */ .map_io = lubbock_map_io, .nr_irqs = LUBBOCK_NR_IRQS, - .init_irq = lubbock_init_irq, + .init_irq = pxa25x_init_irq, .handle_irq = pxa25x_handle_irq, .init_time = pxa_timer_init, .init_machine = lubbock_init, diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c index 78b84c0..2c0658c 100644 --- a/arch/arm/mach-pxa/mainstone.c +++ b/arch/arm/mach-pxa/mainstone.c @@ -13,6 +13,7 @@ * published by the Free Software Foundation. */ #include <linux/gpio.h> +#include <linux/gpio/machine.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/syscore_ops.h> @@ -122,92 +123,6 @@ static unsigned long mainstone_pin_config[] = { GPIO1_GPIO | WAKEUP_ON_EDGE_BOTH, }; -static unsigned long mainstone_irq_enabled; - -static void mainstone_mask_irq(struct irq_data *d) -{ - int mainstone_irq = (d->irq - MAINSTONE_IRQ(0)); - MST_INTMSKENA = (mainstone_irq_enabled &= ~(1 << mainstone_irq)); -} - -static void mainstone_unmask_irq(struct irq_data *d) -{ - int mainstone_irq = (d->irq - MAINSTONE_IRQ(0)); - /* the irq can be acknowledged only if deasserted, so it's done here */ - MST_INTSETCLR &= ~(1 << mainstone_irq); - MST_INTMSKENA = (mainstone_irq_enabled |= (1 << mainstone_irq)); -} - -static struct irq_chip mainstone_irq_chip = { - .name = "FPGA", - .irq_ack = mainstone_mask_irq, - .irq_mask = mainstone_mask_irq, - .irq_unmask = mainstone_unmask_irq, -}; - -static void mainstone_irq_handler(unsigned int irq, struct irq_desc *desc) -{ - unsigned long pending = MST_INTSETCLR & mainstone_irq_enabled; - do { - /* clear useless edge notification */ - desc->irq_data.chip->irq_ack(&desc->irq_data); - if (likely(pending)) { - irq = MAINSTONE_IRQ(0) + __ffs(pending); - generic_handle_irq(irq); - } - pending = MST_INTSETCLR & mainstone_irq_enabled; - } while (pending); -} - -static void __init mainstone_init_irq(void) -{ - int irq; - - pxa27x_init_irq(); - - /* setup extra Mainstone irqs */ - for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) { - irq_set_chip_and_handler(irq, &mainstone_irq_chip, - handle_level_irq); - if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14)) - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN); - else - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); - } - set_irq_flags(MAINSTONE_IRQ(8), 0); - set_irq_flags(MAINSTONE_IRQ(12), 0); - - MST_INTMSKENA = 0; - MST_INTSETCLR = 0; - - irq_set_chained_handler(PXA_GPIO_TO_IRQ(0), mainstone_irq_handler); - irq_set_irq_type(PXA_GPIO_TO_IRQ(0), IRQ_TYPE_EDGE_FALLING); -} - -#ifdef CONFIG_PM - -static void mainstone_irq_resume(void) -{ - MST_INTMSKENA = mainstone_irq_enabled; -} - -static struct syscore_ops mainstone_irq_syscore_ops = { - .resume = mainstone_irq_resume, -}; - -static int __init mainstone_irq_device_init(void) -{ - if (machine_is_mainstone()) - register_syscore_ops(&mainstone_irq_syscore_ops); - - return 0; -} - -device_initcall(mainstone_irq_device_init); - -#endif - - static struct resource smc91x_resources[] = { [0] = { .start = (MST_ETH_PHYS + 0x300), @@ -487,11 +402,37 @@ static struct platform_device mst_gpio_keys_device = { }, }; +static struct resource mst_cplds_resources[] = { + [0] = { + .start = MST_FPGA_PHYS + 0xc0, + .end = MST_FPGA_PHYS + 0xe0 - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = PXA_GPIO_TO_IRQ(0), + .end = PXA_GPIO_TO_IRQ(0), + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE, + }, + [2] = { + .start = MAINSTONE_IRQ(0), + .end = MAINSTONE_IRQ(15), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device mst_cplds_device = { + .name = "pxa_cplds_irqs", + .id = -1, + .resource = &mst_cplds_resources[0], + .num_resources = 3, +}; + static struct platform_device *platform_devices[] __initdata = { &smc91x_device, &mst_flash_device[0], &mst_flash_device[1], &mst_gpio_keys_device, + &mst_cplds_device, }; static struct pxaohci_platform_data mainstone_ohci_platform_data = { @@ -718,7 +659,7 @@ MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)") .atag_offset = 0x100, /* BLOB boot parameter setting */ .map_io = mainstone_map_io, .nr_irqs = MAINSTONE_NR_IRQS, - .init_irq = mainstone_init_irq, + .init_irq = pxa27x_init_irq, .handle_irq = pxa27x_handle_irq, .init_time = pxa_timer_init, .init_machine = mainstone_init, diff --git a/arch/arm/mach-pxa/pxa_cplds_irqs.c b/arch/arm/mach-pxa/pxa_cplds_irqs.c new file mode 100644 index 0000000..f1aeb54 --- /dev/null +++ b/arch/arm/mach-pxa/pxa_cplds_irqs.c @@ -0,0 +1,200 @@ +/* + * Intel Reference Systems cplds + * + * Copyright (C) 2014 Robert Jarzmik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Cplds motherboard driver, supporting lubbock and mainstone SoC board. + */ + +#include <linux/bitops.h> +#include <linux/gpio.h> +#include <linux/gpio/consumer.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/mfd/core.h> +#include <linux/module.h> +#include <linux/of_platform.h> + +#define FPGA_IRQ_MASK_EN 0x0 +#define FPGA_IRQ_SET_CLR 0x10 + +#define CPLDS_NB_IRQ 32 + +struct cplds { + void __iomem *base; + int irq; + unsigned int irq_mask; + struct gpio_desc *gpio0; + struct irq_domain *irqdomain; +}; + +static irqreturn_t cplds_irq_handler(int in_irq, void *d) +{ + struct cplds *fpga = d; + unsigned long pending; + unsigned int bit; + + pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask; + for_each_set_bit(bit, &pending, CPLDS_NB_IRQ) + generic_handle_irq(irq_find_mapping(fpga->irqdomain, bit)); + + return IRQ_HANDLED; +} + +static void cplds_irq_mask_ack(struct irq_data *d) +{ + struct cplds *fpga = irq_data_get_irq_chip_data(d); + unsigned int cplds_irq = irqd_to_hwirq(d); + unsigned int set, bit = BIT(cplds_irq); + + fpga->irq_mask &= ~bit; + writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN); + set = readl(fpga->base + FPGA_IRQ_SET_CLR); + writel(set & ~bit, fpga->base + FPGA_IRQ_SET_CLR); +} + +static void cplds_irq_unmask(struct irq_data *d) +{ + struct cplds *fpga = irq_data_get_irq_chip_data(d); + unsigned int cplds_irq = irqd_to_hwirq(d); + unsigned int bit = BIT(cplds_irq); + + fpga->irq_mask |= bit; + writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN); +} + +static struct irq_chip cplds_irq_chip = { + .name = "pxa_cplds", + .irq_mask_ack = cplds_irq_mask_ack, + .irq_unmask = cplds_irq_unmask, + .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE, +}; + +static int cplds_irq_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hwirq) +{ + struct cplds *fpga = d->host_data; + + irq_set_chip_and_handler(irq, &cplds_irq_chip, handle_level_irq); + irq_set_chip_data(irq, fpga); + + return 0; +} + +static const struct irq_domain_ops cplds_irq_domain_ops = { + .xlate = irq_domain_xlate_twocell, + .map = cplds_irq_domain_map, +}; + +static int cplds_resume(struct platform_device *pdev) +{ + struct cplds *fpga = platform_get_drvdata(pdev); + + writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN); + + return 0; +} + +static int cplds_probe(struct platform_device *pdev) +{ + struct resource *res; + struct cplds *fpga; + int ret; + unsigned int base_irq = 0; + unsigned long irqflags = 0; + + fpga = devm_kzalloc(&pdev->dev, sizeof(*fpga), GFP_KERNEL); + if (!fpga) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (res) { + fpga->irq = (unsigned int)res->start; + irqflags = res->flags; + } + if (!fpga->irq) + return -ENODEV; + + base_irq = platform_get_irq(pdev, 1); + if (base_irq < 0) + base_irq = 0; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + fpga->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(fpga->base)) + return PTR_ERR(fpga->base); + + platform_set_drvdata(pdev, fpga); + + writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN); + writel(0, fpga->base + FPGA_IRQ_SET_CLR); + + ret = devm_request_irq(&pdev->dev, fpga->irq, cplds_irq_handler, + irqflags, dev_name(&pdev->dev), fpga); + if (ret == -ENOSYS) + return -EPROBE_DEFER; + + if (ret) { + dev_err(&pdev->dev, "couldn't request main irq%d: %d\n", + fpga->irq, ret); + return ret; + } + + irq_set_irq_wake(fpga->irq, 1); + fpga->irqdomain = irq_domain_add_linear(pdev->dev.of_node, + CPLDS_NB_IRQ, + &cplds_irq_domain_ops, fpga); + if (!fpga->irqdomain) + return -ENODEV; + + if (base_irq) { + ret = irq_create_strict_mappings(fpga->irqdomain, base_irq, 0, + CPLDS_NB_IRQ); + if (ret) { + dev_err(&pdev->dev, "couldn't create the irq mapping %d..%d\n", + base_irq, base_irq + CPLDS_NB_IRQ); + return ret; + } + } + + return 0; +} + +static int cplds_remove(struct platform_device *pdev) +{ + struct cplds *fpga = platform_get_drvdata(pdev); + + irq_set_chip_and_handler(fpga->irq, NULL, NULL); + + return 0; +} + +static const struct of_device_id cplds_id_table[] = { + { .compatible = "intel,lubbock-cplds-irqs", }, + { .compatible = "intel,mainstone-cplds-irqs", }, + { } +}; +MODULE_DEVICE_TABLE(of, cplds_id_table); + +static struct platform_driver cplds_driver = { + .driver = { + .name = "pxa_cplds_irqs", + .of_match_table = of_match_ptr(cplds_id_table), + }, + .probe = cplds_probe, + .remove = cplds_remove, + .resume = cplds_resume, +}; + +module_platform_driver(cplds_driver); + +MODULE_DESCRIPTION("PXA Cplds interrupts driver"); +MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>"); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-rockchip/pm.c b/arch/arm/mach-rockchip/pm.c index b07d886..b0dcbe2 100644 --- a/arch/arm/mach-rockchip/pm.c +++ b/arch/arm/mach-rockchip/pm.c @@ -83,6 +83,13 @@ static void rk3288_slp_mode_set(int level) SGRF_PCLK_WDT_GATE | SGRF_FAST_BOOT_EN | SGRF_PCLK_WDT_GATE_WRITE | SGRF_FAST_BOOT_EN_WRITE); + /* + * The dapswjdp can not auto reset before resume, that cause it may + * access some illegal address during resume. Let's disable it before + * suspend, and the MASKROM will enable it back. + */ + regmap_write(sgrf_regmap, RK3288_SGRF_CPU_CON0, SGRF_DAPDEVICEEN_WRITE); + /* booting address of resuming system is from this register value */ regmap_write(sgrf_regmap, RK3288_SGRF_FAST_BOOT_ADDR, rk3288_bootram_phy); diff --git a/arch/arm/mach-rockchip/pm.h b/arch/arm/mach-rockchip/pm.h index 03ff31d..3e8d39c 100644 --- a/arch/arm/mach-rockchip/pm.h +++ b/arch/arm/mach-rockchip/pm.h @@ -55,6 +55,10 @@ static inline void rockchip_suspend_init(void) #define SGRF_FAST_BOOT_EN BIT(8) #define SGRF_FAST_BOOT_EN_WRITE BIT(24) +#define RK3288_SGRF_CPU_CON0 (0x40) +#define SGRF_DAPDEVICEEN BIT(0) +#define SGRF_DAPDEVICEEN_WRITE BIT(16) + #define RK3288_CRU_MODE_CON 0x50 #define RK3288_CRU_SEL0_CON 0x60 #define RK3288_CRU_SEL1_CON 0x64 diff --git a/arch/arm/mach-rockchip/rockchip.c b/arch/arm/mach-rockchip/rockchip.c index d360ec0..b6cf3b4 100644 --- a/arch/arm/mach-rockchip/rockchip.c +++ b/arch/arm/mach-rockchip/rockchip.c @@ -30,11 +30,30 @@ #include "pm.h" #define RK3288_GRF_SOC_CON0 0x244 +#define RK3288_TIMER6_7_PHYS 0xff810000 static void __init rockchip_timer_init(void) { if (of_machine_is_compatible("rockchip,rk3288")) { struct regmap *grf; + void __iomem *reg_base; + + /* + * Most/all uboot versions for rk3288 don't enable timer7 + * which is needed for the architected timer to work. + * So make sure it is running during early boot. + */ + reg_base = ioremap(RK3288_TIMER6_7_PHYS, SZ_16K); + if (reg_base) { + writel(0, reg_base + 0x30); + writel(0xffffffff, reg_base + 0x20); + writel(0xffffffff, reg_base + 0x24); + writel(1, reg_base + 0x30); + dsb(); + iounmap(reg_base); + } else { + pr_err("rockchip: could not map timer7 registers\n"); + } /* * Disable auto jtag/sdmmc switching that causes issues diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 09c5fe3..7e7583d 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1878,7 +1878,7 @@ struct dma_map_ops iommu_coherent_ops = { * arm_iommu_attach_device function. */ struct dma_iommu_mapping * -arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size) +arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size) { unsigned int bits = size >> PAGE_SHIFT; unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long); @@ -1886,6 +1886,10 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size) int extensions = 1; int err = -ENOMEM; + /* currently only 32-bit DMA address space is supported */ + if (size > DMA_BIT_MASK(32) + 1) + return ERR_PTR(-ERANGE); + if (!bitmap_size) return ERR_PTR(-EINVAL); @@ -2057,13 +2061,6 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, if (!iommu) return false; - /* - * currently arm_iommu_create_mapping() takes a max of size_t - * for size param. So check this limit for now. - */ - if (size > SIZE_MAX) - return false; - mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); if (IS_ERR(mapping)) { pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index aa0519e..774ef13 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -22,8 +22,6 @@ * * These are the low level assembler for performing cache and TLB * functions on the arm1020. - * - * CONFIG_CPU_ARM1020_CPU_IDLE -> nohlt */ #include <linux/linkage.h> #include <linux/init.h> diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index bff4c7f..ae3c27b 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -22,8 +22,6 @@ * * These are the low level assembler for performing cache and TLB * functions on the arm1020e. - * - * CONFIG_CPU_ARM1020_CPU_IDLE -> nohlt */ #include <linux/linkage.h> #include <linux/init.h> diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index ede8c54..32a47cc 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -441,9 +441,6 @@ ENTRY(cpu_arm925_set_pte_ext) .type __arm925_setup, #function __arm925_setup: mov r0, #0 -#if defined(CONFIG_CPU_ICACHE_STREAMING_DISABLE) - orr r0,r0,#1 << 7 -#endif /* Transparent on, D-cache clean & flush mode. See NOTE2 above */ orr r0,r0,#1 << 1 @ transparent mode on diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index e494d6d..92e08bf 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S @@ -602,7 +602,6 @@ __\name\()_proc_info: PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ initfn __feroceon_setup, __\name\()_proc_info - .long __feroceon_setup .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index e1268f9..e0e2358 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -54,6 +54,7 @@ #define SEEN_DATA (1 << (BPF_MEMWORDS + 3)) #define FLAG_NEED_X_RESET (1 << 0) +#define FLAG_IMM_OVERFLOW (1 << 1) struct jit_ctx { const struct bpf_prog *skf; @@ -293,6 +294,15 @@ static u16 imm_offset(u32 k, struct jit_ctx *ctx) /* PC in ARM mode == address of the instruction + 8 */ imm = offset - (8 + ctx->idx * 4); + if (imm & ~0xfff) { + /* + * literal pool is too far, signal it into flags. we + * can only detect it on the second pass unfortunately. + */ + ctx->flags |= FLAG_IMM_OVERFLOW; + return 0; + } + return imm; } @@ -449,10 +459,21 @@ static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx) return; } #endif - if (rm != ARM_R0) - emit(ARM_MOV_R(ARM_R0, rm), ctx); + + /* + * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4 + * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into + * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm + * before using it as a source for ARM_R1. + * + * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is + * ARM_R5 (r_X) so there is no particular register overlap + * issues. + */ if (rn != ARM_R1) emit(ARM_MOV_R(ARM_R1, rn), ctx); + if (rm != ARM_R0) + emit(ARM_MOV_R(ARM_R0, rm), ctx); ctx->seen |= SEEN_CALL; emit_mov_i(ARM_R3, (u32)jit_udiv, ctx); @@ -855,6 +876,14 @@ b_epilogue: default: return -1; } + + if (ctx->flags & FLAG_IMM_OVERFLOW) + /* + * this instruction generated an overflow when + * trying to access the literal pool, so + * delegate this filter to the kernel interpreter. + */ + return -1; } /* compute offsets only during the first pass */ @@ -917,7 +946,14 @@ void bpf_jit_compile(struct bpf_prog *fp) ctx.idx = 0; build_prologue(&ctx); - build_body(&ctx); + if (build_body(&ctx) < 0) { +#if __LINUX_ARM_ARCH__ < 7 + if (ctx.imm_count) + kfree(ctx.imms); +#endif + bpf_jit_binary_free(header); + goto out; + } build_epilogue(&ctx); flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx)); diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index 793551d..4983250 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -4,6 +4,7 @@ #include <linux/gfp.h> #include <linux/highmem.h> #include <linux/export.h> +#include <linux/memblock.h> #include <linux/of_address.h> #include <linux/slab.h> #include <linux/types.h> @@ -21,6 +22,20 @@ #include <asm/xen/hypercall.h> #include <asm/xen/interface.h> +unsigned long xen_get_swiotlb_free_pages(unsigned int order) +{ + struct memblock_region *reg; + gfp_t flags = __GFP_NOWARN; + + for_each_memblock(memory, reg) { + if (reg->base < (phys_addr_t)0xffffffff) { + flags |= __GFP_DMA; + break; + } + } + return __get_free_pages(flags, order); +} + enum dma_cache_op { DMA_UNMAP, DMA_MAP, diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 4269dba..7796af4 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -31,6 +31,7 @@ config ARM64 select GENERIC_EARLY_IOREMAP select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW + select GENERIC_IRQ_SHOW_LEVEL select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD diff --git a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi index c138b95..351c95b 100644 --- a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi +++ b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi @@ -21,6 +21,20 @@ clock-output-names = "juno_mb:clk25mhz"; }; + v2m_refclk1mhz: refclk1mhz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <1000000>; + clock-output-names = "juno_mb:refclk1mhz"; + }; + + v2m_refclk32khz: refclk32khz { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <32768>; + clock-output-names = "juno_mb:refclk32khz"; + }; + motherboard { compatible = "arm,vexpress,v2p-p1", "simple-bus"; #address-cells = <2>; /* SMB chipselect number and offset */ @@ -66,6 +80,15 @@ #size-cells = <1>; ranges = <0 3 0 0x200000>; + v2m_sysctl: sysctl@020000 { + compatible = "arm,sp810", "arm,primecell"; + reg = <0x020000 0x1000>; + clocks = <&v2m_refclk32khz>, <&v2m_refclk1mhz>, <&mb_clk24mhz>; + clock-names = "refclk", "timclk", "apb_pclk"; + #clock-cells = <1>; + clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3"; + }; + mmci@050000 { compatible = "arm,pl180", "arm,primecell"; reg = <0x050000 0x1000>; @@ -106,16 +129,16 @@ compatible = "arm,sp804", "arm,primecell"; reg = <0x110000 0x10000>; interrupts = <9>; - clocks = <&mb_clk24mhz>, <&soc_smc50mhz>; - clock-names = "timclken1", "apb_pclk"; + clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&mb_clk24mhz>; + clock-names = "timclken1", "timclken2", "apb_pclk"; }; v2m_timer23: timer@120000 { compatible = "arm,sp804", "arm,primecell"; reg = <0x120000 0x10000>; interrupts = <9>; - clocks = <&mb_clk24mhz>, <&soc_smc50mhz>; - clock-names = "timclken1", "apb_pclk"; + clocks = <&v2m_sysctl 2>, <&v2m_sysctl 3>, <&mb_clk24mhz>; + clock-names = "timclken1", "timclken2", "apb_pclk"; }; rtc@170000 { diff --git a/arch/arm64/crypto/crc32-arm64.c b/arch/arm64/crypto/crc32-arm64.c index 9499199..6a37c3c 100644 --- a/arch/arm64/crypto/crc32-arm64.c +++ b/arch/arm64/crypto/crc32-arm64.c @@ -147,13 +147,21 @@ static int chksum_final(struct shash_desc *desc, u8 *out) { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + put_unaligned_le32(ctx->crc, out); + return 0; +} + +static int chksumc_final(struct shash_desc *desc, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + put_unaligned_le32(~ctx->crc, out); return 0; } static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out) { - put_unaligned_le32(~crc32_arm64_le_hw(crc, data, len), out); + put_unaligned_le32(crc32_arm64_le_hw(crc, data, len), out); return 0; } @@ -199,6 +207,14 @@ static int crc32_cra_init(struct crypto_tfm *tfm) { struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); + mctx->key = 0; + return 0; +} + +static int crc32c_cra_init(struct crypto_tfm *tfm) +{ + struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); + mctx->key = ~0; return 0; } @@ -229,7 +245,7 @@ static struct shash_alg crc32c_alg = { .setkey = chksum_setkey, .init = chksum_init, .update = chksumc_update, - .final = chksum_final, + .final = chksumc_final, .finup = chksumc_finup, .digest = chksumc_digest, .descsize = sizeof(struct chksum_desc_ctx), @@ -241,7 +257,7 @@ static struct shash_alg crc32c_alg = { .cra_alignmask = 0, .cra_ctxsize = sizeof(struct chksum_ctx), .cra_module = THIS_MODULE, - .cra_init = crc32_cra_init, + .cra_init = crc32c_cra_init, } }; diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c index 114e7cc..aefda98 100644 --- a/arch/arm64/crypto/sha1-ce-glue.c +++ b/arch/arm64/crypto/sha1-ce-glue.c @@ -74,6 +74,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, static int sha1_ce_final(struct shash_desc *desc, u8 *out) { + struct sha1_ce_state *sctx = shash_desc_ctx(desc); + + sctx->finalize = 0; kernel_neon_begin_partial(16); sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform); kernel_neon_end(); diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c index 1340e44c..7cd5875 100644 --- a/arch/arm64/crypto/sha2-ce-glue.c +++ b/arch/arm64/crypto/sha2-ce-glue.c @@ -75,6 +75,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data, static int sha256_ce_final(struct shash_desc *desc, u8 *out) { + struct sha256_ce_state *sctx = shash_desc_ctx(desc); + + sctx->finalize = 0; kernel_neon_begin_partial(28); sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform); kernel_neon_end(); diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index a5abb00..71f19c4 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -65,6 +65,14 @@ do { \ do { \ compiletime_assert_atomic_type(*p); \ switch (sizeof(*p)) { \ + case 1: \ + asm volatile ("stlrb %w1, %0" \ + : "=Q" (*p) : "r" (v) : "memory"); \ + break; \ + case 2: \ + asm volatile ("stlrh %w1, %0" \ + : "=Q" (*p) : "r" (v) : "memory"); \ + break; \ case 4: \ asm volatile ("stlr %w1, %0" \ : "=Q" (*p) : "r" (v) : "memory"); \ @@ -81,6 +89,14 @@ do { \ typeof(*p) ___p1; \ compiletime_assert_atomic_type(*p); \ switch (sizeof(*p)) { \ + case 1: \ + asm volatile ("ldarb %w0, %1" \ + : "=r" (___p1) : "Q" (*p) : "memory"); \ + break; \ + case 2: \ + asm volatile ("ldarh %w0, %1" \ + : "=r" (___p1) : "Q" (*p) : "memory"); \ + break; \ case 4: \ asm volatile ("ldar %w0, %1" \ : "=r" (___p1) : "Q" (*p) : "memory"); \ diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index 21033bb..28f8365 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -24,7 +24,6 @@ #include <asm/cacheflush.h> #include <asm/alternative.h> #include <asm/cpufeature.h> -#include <asm/insn.h> #include <linux/stop_machine.h> extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; @@ -34,48 +33,6 @@ struct alt_region { struct alt_instr *end; }; -/* - * Decode the imm field of a b/bl instruction, and return the byte - * offset as a signed value (so it can be used when computing a new - * branch target). - */ -static s32 get_branch_offset(u32 insn) -{ - s32 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn); - - /* sign-extend the immediate before turning it into a byte offset */ - return (imm << 6) >> 4; -} - -static u32 get_alt_insn(u8 *insnptr, u8 *altinsnptr) -{ - u32 insn; - - aarch64_insn_read(altinsnptr, &insn); - - /* Stop the world on instructions we don't support... */ - BUG_ON(aarch64_insn_is_cbz(insn)); - BUG_ON(aarch64_insn_is_cbnz(insn)); - BUG_ON(aarch64_insn_is_bcond(insn)); - /* ... and there is probably more. */ - - if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) { - enum aarch64_insn_branch_type type; - unsigned long target; - - if (aarch64_insn_is_b(insn)) - type = AARCH64_INSN_BRANCH_NOLINK; - else - type = AARCH64_INSN_BRANCH_LINK; - - target = (unsigned long)altinsnptr + get_branch_offset(insn); - insn = aarch64_insn_gen_branch_imm((unsigned long)insnptr, - target, type); - } - - return insn; -} - static int __apply_alternatives(void *alt_region) { struct alt_instr *alt; @@ -83,9 +40,6 @@ static int __apply_alternatives(void *alt_region) u8 *origptr, *replptr; for (alt = region->begin; alt < region->end; alt++) { - u32 insn; - int i; - if (!cpus_have_cap(alt->cpufeature)) continue; @@ -95,12 +49,7 @@ static int __apply_alternatives(void *alt_region) origptr = (u8 *)&alt->orig_offset + alt->orig_offset; replptr = (u8 *)&alt->alt_offset + alt->alt_offset; - - for (i = 0; i < alt->alt_len; i += sizeof(insn)) { - insn = get_alt_insn(origptr + i, replptr + i); - aarch64_insn_write(origptr + i, insn); - } - + memcpy(origptr, replptr, alt->alt_len); flush_icache_range((uintptr_t)origptr, (uintptr_t)(origptr + alt->alt_len)); } diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 195991d..cce18c8 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -1310,11 +1310,16 @@ static const struct of_device_id armpmu_of_device_ids[] = { static int armpmu_device_probe(struct platform_device *pdev) { - int i, *irqs; + int i, irq, *irqs; if (!cpu_pmu) return -ENODEV; + /* Don't bother with PPIs; they're already affine */ + irq = platform_get_irq(pdev, 0); + if (irq >= 0 && irq_is_percpu(irq)) + return 0; + irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); if (!irqs) return -ENOMEM; @@ -1327,7 +1332,7 @@ static int armpmu_device_probe(struct platform_device *pdev) i); if (!dn) { pr_warn("Failed to parse %s/interrupt-affinity[%d]\n", - of_node_full_name(dn), i); + of_node_full_name(pdev->dev.of_node), i); break; } diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index ef7d112..b0bd4e5 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -67,8 +67,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags) *ret_page = phys_to_page(phys); ptr = (void *)val; - if (flags & __GFP_ZERO) - memset(ptr, 0, size); + memset(ptr, 0, size); } return ptr; @@ -105,7 +104,6 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, struct page *page; void *addr; - size = PAGE_ALIGN(size); page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, get_order(size)); if (!page) @@ -113,8 +111,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, *dma_handle = phys_to_dma(dev, page_to_phys(page)); addr = page_address(page); - if (flags & __GFP_ZERO) - memset(addr, 0, size); + memset(addr, 0, size); return addr; } else { return swiotlb_alloc_coherent(dev, size, dma_handle, flags); @@ -195,6 +192,8 @@ static void __dma_free(struct device *dev, size_t size, { void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); + size = PAGE_ALIGN(size); + if (!is_device_dma_coherent(dev)) { if (__free_from_pool(vaddr, size)) return; diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index 74c2567..f3d6221 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -328,10 +328,12 @@ static int ptdump_init(void) for (j = 0; j < pg_level[i].num; j++) pg_level[i].mask |= pg_level[i].bits[j].mask; +#ifdef CONFIG_SPARSEMEM_VMEMMAP address_markers[VMEMMAP_START_NR].start_address = (unsigned long)virt_to_page(PAGE_OFFSET); address_markers[VMEMMAP_END_NR].start_address = (unsigned long)virt_to_page(high_memory); +#endif pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops); diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index edba042..dc6a484 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -487,7 +487,7 @@ emit_cond_jmp: return -EINVAL; } - imm64 = (u64)insn1.imm << 32 | imm; + imm64 = (u64)insn1.imm << 32 | (u32)imm; emit_a64_mov_i64(dst, imm64, ctx); return 1; diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index ce7aea3..c18ddc7 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c @@ -45,7 +45,7 @@ static volatile unsigned long flushcache_cpumask = 0; /* * For flush_tlb_others() */ -static volatile cpumask_t flush_cpumask; +static cpumask_t flush_cpumask; static struct mm_struct *flush_mm; static struct vm_area_struct *flush_vma; static volatile unsigned long flush_va; @@ -415,7 +415,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, */ send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0); - while (!cpumask_empty((cpumask_t*)&flush_cpumask)) { + while (!cpumask_empty(&flush_cpumask)) { /* nothing. lockup detection does not belong here */ mb(); } @@ -468,7 +468,7 @@ void smp_invalidate_interrupt(void) __flush_tlb_page(va); } } - cpumask_clear_cpu(cpu_id, (cpumask_t*)&flush_cpumask); + cpumask_clear_cpu(cpu_id, &flush_cpumask); } /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 5200f64..ae2dd59 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -277,7 +277,7 @@ LDFLAGS += -m $(ld-emul) ifdef CONFIG_MIPS CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \ egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \ - sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/") + sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g') ifdef CONFIG_64BIT CHECKFLAGS += -m64 endif diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index a594d8e..f19e890 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h @@ -304,7 +304,7 @@ do { \ \ current->thread.abi = &mips_abi; \ \ - current->thread.fpu.fcr31 = current_cpu_data.fpu_csr31; \ + current->thread.fpu.fcr31 = boot_cpu_data.fpu_csr31; \ } while (0) #endif /* CONFIG_32BIT */ @@ -366,7 +366,7 @@ do { \ else \ current->thread.abi = &mips_abi; \ \ - current->thread.fpu.fcr31 = current_cpu_data.fpu_csr31; \ + current->thread.fpu.fcr31 = boot_cpu_data.fpu_csr31; \ \ p = personality(current->personality); \ if (p != PER_LINUX32 && p != PER_LINUX) \ diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index bb02fac..2b25d1b 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -45,7 +45,7 @@ extern int __cpu_logical_map[NR_CPUS]; #define SMP_DUMP 0x8 #define SMP_ASK_C0COUNT 0x10 -extern volatile cpumask_t cpu_callin_map; +extern cpumask_t cpu_callin_map; /* Mask of CPUs which are currently definitely operating coherently */ extern cpumask_t cpu_coherent_mask; diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c index be4899f..4a4d9e0 100644 --- a/arch/mips/kernel/elf.c +++ b/arch/mips/kernel/elf.c @@ -76,14 +76,6 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, /* Lets see if this is an O32 ELF */ if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) { - /* FR = 1 for N32 */ - if (ehdr32->e_flags & EF_MIPS_ABI2) - state->overall_fp_mode = FP_FR1; - else - /* Set a good default FPU mode for O32 */ - state->overall_fp_mode = cpu_has_mips_r6 ? - FP_FRE : FP_FR0; - if (ehdr32->e_flags & EF_MIPS_FP64) { /* * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it @@ -104,9 +96,6 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, (char *)&abiflags, sizeof(abiflags)); } else { - /* FR=1 is really the only option for 64-bit */ - state->overall_fp_mode = FP_FR1; - if (phdr64->p_type != PT_MIPS_ABIFLAGS) return 0; if (phdr64->p_filesz < sizeof(abiflags)) @@ -137,6 +126,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, struct elf32_hdr *ehdr = _ehdr; struct mode_req prog_req, interp_req; int fp_abi, interp_fp_abi, abi0, abi1, max_abi; + bool is_mips64; if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) return 0; @@ -152,10 +142,22 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, abi0 = abi1 = fp_abi; } - /* ABI limits. O32 = FP_64A, N32/N64 = FP_SOFT */ - max_abi = ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) && - (!(ehdr->e_flags & EF_MIPS_ABI2))) ? - MIPS_ABI_FP_64A : MIPS_ABI_FP_SOFT; + is_mips64 = (ehdr->e_ident[EI_CLASS] == ELFCLASS64) || + (ehdr->e_flags & EF_MIPS_ABI2); + + if (is_mips64) { + /* MIPS64 code always uses FR=1, thus the default is easy */ + state->overall_fp_mode = FP_FR1; + + /* Disallow access to the various FPXX & FP64 ABIs */ + max_abi = MIPS_ABI_FP_SOFT; + } else { + /* Default to a mode capable of running code expecting FR=0 */ + state->overall_fp_mode = cpu_has_mips_r6 ? FP_FRE : FP_FR0; + + /* Allow all ABIs we know about */ + max_abi = MIPS_ABI_FP_64A; + } if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) || (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN)) diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index d544e77..e933a30 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -176,7 +176,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) __get_user(value, data + 64); fcr31 = child->thread.fpu.fcr31; - mask = current_cpu_data.fpu_msk31; + mask = boot_cpu_data.fpu_msk31; child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask); /* FIR may not be written. */ diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index 7e011f9..4251d39 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -92,7 +92,7 @@ static void __init cps_smp_setup(void) #ifdef CONFIG_MIPS_MT_FPAFF /* If we have an FPU, enroll ourselves in the FPU-full mask */ if (cpu_has_fpu) - cpu_set(0, mt_fpu_cpumask); + cpumask_set_cpu(0, &mt_fpu_cpumask); #endif /* CONFIG_MIPS_MT_FPAFF */ } diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 193ace7..faa46eb 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -43,7 +43,7 @@ #include <asm/time.h> #include <asm/setup.h> -volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ +cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ EXPORT_SYMBOL(__cpu_number_map); @@ -218,8 +218,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) /* * Trust is futile. We should really have timeouts ... */ - while (!cpumask_test_cpu(cpu, &cpu_callin_map)) + while (!cpumask_test_cpu(cpu, &cpu_callin_map)) { udelay(100); + schedule(); + } synchronise_count_master(cpu); return 0; diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index ba32e48..d2d1c19 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -269,7 +269,6 @@ static void __show_regs(const struct pt_regs *regs) */ printk("epc : %0*lx %pS\n", field, regs->cp0_epc, (void *) regs->cp0_epc); - printk(" %s\n", print_tainted()); printk("ra : %0*lx %pS\n", field, regs->regs[31], (void *) regs->regs[31]); diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index 6230f37..4b50c57 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -2389,7 +2389,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, { unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; enum emulation_result er = EMULATE_DONE; - unsigned long curr_pc; if (run->mmio.len > sizeof(*gpr)) { kvm_err("Bad MMIO length: %d", run->mmio.len); @@ -2397,11 +2396,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, goto done; } - /* - * Update PC and hold onto current PC in case there is - * an error and we want to rollback the PC - */ - curr_pc = vcpu->arch.pc; er = update_pc(vcpu, vcpu->arch.pending_load_cause); if (er == EMULATE_FAIL) return er; diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index d31c537..22b9b2c 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -889,7 +889,7 @@ static inline void cop1_cfc(struct pt_regs *xcp, struct mips_fpu_struct *ctx, break; case FPCREG_RID: - value = current_cpu_data.fpu_id; + value = boot_cpu_data.fpu_id; break; default: @@ -921,7 +921,7 @@ static inline void cop1_ctc(struct pt_regs *xcp, struct mips_fpu_struct *ctx, (void *)xcp->cp0_epc, MIPSInst_RT(ir), value); /* Preserve read-only bits. */ - mask = current_cpu_data.fpu_msk31; + mask = boot_cpu_data.fpu_msk31; fcr31 = (value & ~mask) | (fcr31 & mask); break; diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index a27a088..08318ec 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -495,7 +495,7 @@ static void r4k_tlb_configure(void) if (cpu_has_rixi) { /* - * Enable the no read, no exec bits, and enable large virtual + * Enable the no read, no exec bits, and enable large physical * address. */ #ifdef CONFIG_64BIT diff --git a/arch/mips/sgi-ip32/ip32-platform.c b/arch/mips/sgi-ip32/ip32-platform.c index 0134db2..5a2a821 100644 --- a/arch/mips/sgi-ip32/ip32-platform.c +++ b/arch/mips/sgi-ip32/ip32-platform.c @@ -130,9 +130,9 @@ struct platform_device ip32_rtc_device = { .resource = ip32_rtc_resources, }; -+static int __init sgio2_rtc_devinit(void) +static __init int sgio2_rtc_devinit(void) { return platform_device_register(&ip32_rtc_device); } -device_initcall(sgio2_cmos_devinit); +device_initcall(sgio2_rtc_devinit); diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h index 3391d06..78c9fd3 100644 --- a/arch/parisc/include/asm/elf.h +++ b/arch/parisc/include/asm/elf.h @@ -348,6 +348,10 @@ struct pt_regs; /* forward declaration... */ #define ELF_HWCAP 0 +#define STACK_RND_MASK (is_32bit_task() ? \ + 0x7ff >> (PAGE_SHIFT - 12) : \ + 0x3ffff >> (PAGE_SHIFT - 12)) + struct mm_struct; extern unsigned long arch_randomize_brk(struct mm_struct *); #define arch_randomize_brk arch_randomize_brk diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 8a488c2..809905a 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -181,9 +181,12 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r) return 1; } +/* + * Copy architecture-specific thread state + */ int copy_thread(unsigned long clone_flags, unsigned long usp, - unsigned long arg, struct task_struct *p) + unsigned long kthread_arg, struct task_struct *p) { struct pt_regs *cregs = &(p->thread.regs); void *stack = task_stack_page(p); @@ -195,11 +198,10 @@ copy_thread(unsigned long clone_flags, unsigned long usp, extern void * const child_return; if (unlikely(p->flags & PF_KTHREAD)) { + /* kernel thread */ memset(cregs, 0, sizeof(struct pt_regs)); if (!usp) /* idle thread */ return 0; - - /* kernel thread */ /* Must exit via ret_from_kernel_thread in order * to call schedule_tail() */ @@ -215,7 +217,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp, #else cregs->gr[26] = usp; #endif - cregs->gr[25] = arg; + cregs->gr[25] = kthread_arg; } else { /* user thread */ /* usp must be word aligned. This also prevents users from diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index e1ffea2..5aba01a 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -77,6 +77,9 @@ static unsigned long mmap_upper_limit(void) if (stack_base > STACK_SIZE_MAX) stack_base = STACK_SIZE_MAX; + /* Add space for stack randomization. */ + stack_base += (STACK_RND_MASK << PAGE_SHIFT); + return PAGE_ALIGN(STACK_TOP - stack_base); } diff --git a/arch/powerpc/include/uapi/asm/tm.h b/arch/powerpc/include/uapi/asm/tm.h index 5047659..5d836b7 100644 --- a/arch/powerpc/include/uapi/asm/tm.h +++ b/arch/powerpc/include/uapi/asm/tm.h @@ -11,7 +11,7 @@ #define TM_CAUSE_RESCHED 0xde #define TM_CAUSE_TLBI 0xdc #define TM_CAUSE_FAC_UNAV 0xda -#define TM_CAUSE_SYSCALL 0xd8 +#define TM_CAUSE_SYSCALL 0xd8 /* future use */ #define TM_CAUSE_MISC 0xd6 /* future use */ #define TM_CAUSE_SIGNAL 0xd4 #define TM_CAUSE_ALIGNMENT 0xd2 diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 44b480e..9ee61d1 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -749,21 +749,24 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat eeh_unfreeze_pe(pe, false); eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); eeh_pe_dev_traverse(pe, eeh_restore_dev_state, dev); + eeh_pe_state_clear(pe, EEH_PE_ISOLATED); break; case pcie_hot_reset: + eeh_pe_state_mark(pe, EEH_PE_ISOLATED); eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev); eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); eeh_ops->reset(pe, EEH_RESET_HOT); break; case pcie_warm_reset: + eeh_pe_state_mark(pe, EEH_PE_ISOLATED); eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev); eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); break; default: - eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); + eeh_pe_state_clear(pe, EEH_PE_ISOLATED | EEH_PE_CFG_BLOCKED); return -EINVAL; }; @@ -1058,6 +1061,9 @@ void eeh_add_device_early(struct pci_dn *pdn) if (!edev || !eeh_enabled()) return; + if (!eeh_has_flag(EEH_PROBE_MODE_DEVTREE)) + return; + /* USB Bus children of PCI devices will not have BUID's */ phb = edev->phb; if (NULL == phb || @@ -1112,6 +1118,9 @@ void eeh_add_device_late(struct pci_dev *dev) return; } + if (eeh_has_flag(EEH_PROBE_MODE_DEV)) + eeh_ops->probe(pdn, NULL); + /* * The EEH cache might not be removed correctly because of * unbalanced kref to the device during unplug time, which diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 8ca9434..afbc200 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -34,7 +34,6 @@ #include <asm/ftrace.h> #include <asm/hw_irq.h> #include <asm/context_tracking.h> -#include <asm/tm.h> /* * System calls. @@ -146,24 +145,6 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) andi. r11,r10,_TIF_SYSCALL_DOTRACE bne syscall_dotrace .Lsyscall_dotrace_cont: -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM -BEGIN_FTR_SECTION - b 1f -END_FTR_SECTION_IFCLR(CPU_FTR_TM) - extrdi. r11, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */ - beq+ 1f - - /* Doom the transaction and don't perform the syscall: */ - mfmsr r11 - li r12, 1 - rldimi r11, r12, MSR_TM_LG, 63-MSR_TM_LG - mtmsrd r11, 0 - li r11, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT) - TABORT(R11) - - b .Lsyscall_exit -1: -#endif cmpldi 0,r0,NR_syscalls bge- syscall_enosys diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index eeaa0d5..ccde8f0 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S @@ -501,9 +501,11 @@ BEGIN_FTR_SECTION CHECK_HMI_INTERRUPT END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) ld r1,PACAR1(r13) + ld r6,_CCR(r1) ld r4,_MSR(r1) ld r5,_NIP(r1) addi r1,r1,INT_FRAME_SIZE + mtcr r6 mtspr SPRN_SRR1,r4 mtspr SPRN_SRR0,r5 rfid diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index 8f3e6cc..c6ca7db 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c @@ -12,6 +12,7 @@ #include <linux/err.h> #include <linux/gfp.h> #include <linux/anon_inodes.h> +#include <linux/spinlock.h> #include <asm/uaccess.h> #include <asm/kvm_book3s.h> @@ -20,7 +21,6 @@ #include <asm/xics.h> #include <asm/debug.h> #include <asm/time.h> -#include <asm/spinlock.h> #include <linux/debugfs.h> #include <linux/seq_file.h> diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 920c252..f8bc950 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -2693,7 +2693,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, hose->last_busno = 0xff; } hose->private_data = phb; - hose->controller_ops = pnv_pci_controller_ops; phb->hub_id = hub_id; phb->opal_id = phb_id; phb->type = ioda_type; @@ -2812,6 +2811,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, pnv_pci_controller_ops.enable_device_hook = pnv_pci_enable_device_hook; pnv_pci_controller_ops.window_alignment = pnv_pci_window_alignment; pnv_pci_controller_ops.reset_secondary_bus = pnv_pci_reset_secondary_bus; + hose->controller_ops = pnv_pci_controller_ops; #ifdef CONFIG_PCI_IOV ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources; diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index b4b1109..019d34a 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -412,6 +412,10 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count) if (rc) return -EINVAL; + rc = dlpar_acquire_drc(drc_index); + if (rc) + return -EINVAL; + parent = of_find_node_by_path("/cpus"); if (!parent) return -ENODEV; @@ -422,12 +426,6 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count) of_node_put(parent); - rc = dlpar_acquire_drc(drc_index); - if (rc) { - dlpar_free_cc_nodes(dn); - return -EINVAL; - } - rc = dlpar_attach_node(dn); if (rc) { dlpar_release_drc(drc_index); diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 8e58c61..b06dc38 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -115,7 +115,7 @@ config S390 select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE - select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z9_109_FEATURES + select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL select HAVE_DEBUG_KMEMLEAK diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h index ba3b2ae..d9c4c31 100644 --- a/arch/s390/crypto/crypt_s390.h +++ b/arch/s390/crypto/crypt_s390.h @@ -3,9 +3,10 @@ * * Support for s390 cryptographic instructions. * - * Copyright IBM Corp. 2003, 2007 + * Copyright IBM Corp. 2003, 2015 * Author(s): Thomas Spatzier * Jan Glauber (jan.glauber@de.ibm.com) + * Harald Freudenberger (freude@de.ibm.com) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -28,15 +29,17 @@ #define CRYPT_S390_MSA 0x1 #define CRYPT_S390_MSA3 0x2 #define CRYPT_S390_MSA4 0x4 +#define CRYPT_S390_MSA5 0x8 /* s390 cryptographic operations */ enum crypt_s390_operations { - CRYPT_S390_KM = 0x0100, - CRYPT_S390_KMC = 0x0200, - CRYPT_S390_KIMD = 0x0300, - CRYPT_S390_KLMD = 0x0400, - CRYPT_S390_KMAC = 0x0500, - CRYPT_S390_KMCTR = 0x0600 + CRYPT_S390_KM = 0x0100, + CRYPT_S390_KMC = 0x0200, + CRYPT_S390_KIMD = 0x0300, + CRYPT_S390_KLMD = 0x0400, + CRYPT_S390_KMAC = 0x0500, + CRYPT_S390_KMCTR = 0x0600, + CRYPT_S390_PPNO = 0x0700 }; /* @@ -138,6 +141,16 @@ enum crypt_s390_kmac_func { KMAC_TDEA_192 = CRYPT_S390_KMAC | 3 }; +/* + * function codes for PPNO (PERFORM PSEUDORANDOM NUMBER + * OPERATION) instruction + */ +enum crypt_s390_ppno_func { + PPNO_QUERY = CRYPT_S390_PPNO | 0, + PPNO_SHA512_DRNG_GEN = CRYPT_S390_PPNO | 3, + PPNO_SHA512_DRNG_SEED = CRYPT_S390_PPNO | 0x83 +}; + /** * crypt_s390_km: * @func: the function code passed to KM; see crypt_s390_km_func @@ -162,11 +175,11 @@ static inline int crypt_s390_km(long func, void *param, int ret; asm volatile( - "0: .insn rre,0xb92e0000,%3,%1 \n" /* KM opcode */ - "1: brc 1,0b \n" /* handle partial completion */ + "0: .insn rre,0xb92e0000,%3,%1\n" /* KM opcode */ + "1: brc 1,0b\n" /* handle partial completion */ " la %0,0\n" "2:\n" - EX_TABLE(0b,2b) EX_TABLE(1b,2b) + EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); if (ret < 0) @@ -198,11 +211,11 @@ static inline int crypt_s390_kmc(long func, void *param, int ret; asm volatile( - "0: .insn rre,0xb92f0000,%3,%1 \n" /* KMC opcode */ - "1: brc 1,0b \n" /* handle partial completion */ + "0: .insn rre,0xb92f0000,%3,%1\n" /* KMC opcode */ + "1: brc 1,0b\n" /* handle partial completion */ " la %0,0\n" "2:\n" - EX_TABLE(0b,2b) EX_TABLE(1b,2b) + EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); if (ret < 0) @@ -233,11 +246,11 @@ static inline int crypt_s390_kimd(long func, void *param, int ret; asm volatile( - "0: .insn rre,0xb93e0000,%1,%1 \n" /* KIMD opcode */ - "1: brc 1,0b \n" /* handle partial completion */ + "0: .insn rre,0xb93e0000,%1,%1\n" /* KIMD opcode */ + "1: brc 1,0b\n" /* handle partial completion */ " la %0,0\n" "2:\n" - EX_TABLE(0b,2b) EX_TABLE(1b,2b) + EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) : "=d" (ret), "+a" (__src), "+d" (__src_len) : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); if (ret < 0) @@ -267,11 +280,11 @@ static inline int crypt_s390_klmd(long func, void *param, int ret; asm volatile( - "0: .insn rre,0xb93f0000,%1,%1 \n" /* KLMD opcode */ - "1: brc 1,0b \n" /* handle partial completion */ + "0: .insn rre,0xb93f0000,%1,%1\n" /* KLMD opcode */ + "1: brc 1,0b\n" /* handle partial completion */ " la %0,0\n" "2:\n" - EX_TABLE(0b,2b) EX_TABLE(1b,2b) + EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) : "=d" (ret), "+a" (__src), "+d" (__src_len) : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); if (ret < 0) @@ -302,11 +315,11 @@ static inline int crypt_s390_kmac(long func, void *param, int ret; asm volatile( - "0: .insn rre,0xb91e0000,%1,%1 \n" /* KLAC opcode */ - "1: brc 1,0b \n" /* handle partial completion */ + "0: .insn rre,0xb91e0000,%1,%1\n" /* KLAC opcode */ + "1: brc 1,0b\n" /* handle partial completion */ " la %0,0\n" "2:\n" - EX_TABLE(0b,2b) EX_TABLE(1b,2b) + EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) : "=d" (ret), "+a" (__src), "+d" (__src_len) : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); if (ret < 0) @@ -340,11 +353,11 @@ static inline int crypt_s390_kmctr(long func, void *param, u8 *dest, int ret = -1; asm volatile( - "0: .insn rrf,0xb92d0000,%3,%1,%4,0 \n" /* KMCTR opcode */ - "1: brc 1,0b \n" /* handle partial completion */ + "0: .insn rrf,0xb92d0000,%3,%1,%4,0\n" /* KMCTR opcode */ + "1: brc 1,0b\n" /* handle partial completion */ " la %0,0\n" "2:\n" - EX_TABLE(0b,2b) EX_TABLE(1b,2b) + EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) : "+d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest), "+a" (__ctr) : "d" (__func), "a" (__param) : "cc", "memory"); @@ -354,6 +367,47 @@ static inline int crypt_s390_kmctr(long func, void *param, u8 *dest, } /** + * crypt_s390_ppno: + * @func: the function code passed to PPNO; see crypt_s390_ppno_func + * @param: address of parameter block; see POP for details on each func + * @dest: address of destination memory area + * @dest_len: size of destination memory area in bytes + * @seed: address of seed data + * @seed_len: size of seed data in bytes + * + * Executes the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION) + * operation of the CPU. + * + * Returns -1 for failure, 0 for the query func, number of random + * bytes stored in dest buffer for generate function + */ +static inline int crypt_s390_ppno(long func, void *param, + u8 *dest, long dest_len, + const u8 *seed, long seed_len) +{ + register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; + register void *__param asm("1") = param; /* param block (240 bytes) */ + register u8 *__dest asm("2") = dest; /* buf for recv random bytes */ + register long __dest_len asm("3") = dest_len; /* requested random bytes */ + register const u8 *__seed asm("4") = seed; /* buf with seed data */ + register long __seed_len asm("5") = seed_len; /* bytes in seed buf */ + int ret = -1; + + asm volatile ( + "0: .insn rre,0xb93c0000,%1,%5\n" /* PPNO opcode */ + "1: brc 1,0b\n" /* handle partial completion */ + " la %0,0\n" + "2:\n" + EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) + : "+d" (ret), "+a"(__dest), "+d"(__dest_len) + : "d"(__func), "a"(__param), "a"(__seed), "d"(__seed_len) + : "cc", "memory"); + if (ret < 0) + return ret; + return (func & CRYPT_S390_FUNC_MASK) ? dest_len - __dest_len : 0; +} + +/** * crypt_s390_func_available: * @func: the function code of the specific function; 0 if op in general * @@ -373,6 +427,9 @@ static inline int crypt_s390_func_available(int func, return 0; if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77)) return 0; + if (facility_mask & CRYPT_S390_MSA5 && !test_facility(57)) + return 0; + switch (func & CRYPT_S390_OP_MASK) { case CRYPT_S390_KM: ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); @@ -390,8 +447,12 @@ static inline int crypt_s390_func_available(int func, ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0); break; case CRYPT_S390_KMCTR: - ret = crypt_s390_kmctr(KMCTR_QUERY, &status, NULL, NULL, 0, - NULL); + ret = crypt_s390_kmctr(KMCTR_QUERY, &status, + NULL, NULL, 0, NULL); + break; + case CRYPT_S390_PPNO: + ret = crypt_s390_ppno(PPNO_QUERY, &status, + NULL, 0, NULL, 0); break; default: return 0; @@ -419,15 +480,14 @@ static inline int crypt_s390_pcc(long func, void *param) int ret = -1; asm volatile( - "0: .insn rre,0xb92c0000,0,0 \n" /* PCC opcode */ - "1: brc 1,0b \n" /* handle partial completion */ + "0: .insn rre,0xb92c0000,0,0\n" /* PCC opcode */ + "1: brc 1,0b\n" /* handle partial completion */ " la %0,0\n" "2:\n" - EX_TABLE(0b,2b) EX_TABLE(1b,2b) + EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) : "+d" (ret) : "d" (__func), "a" (__param) : "cc", "memory"); return ret; } - #endif /* _CRYPTO_ARCH_S390_CRYPT_S390_H */ diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c index 94a35a4..1f374b3 100644 --- a/arch/s390/crypto/prng.c +++ b/arch/s390/crypto/prng.c @@ -1,106 +1,529 @@ /* - * Copyright IBM Corp. 2006, 2007 + * Copyright IBM Corp. 2006, 2015 * Author(s): Jan Glauber <jan.glauber@de.ibm.com> + * Harald Freudenberger <freude@de.ibm.com> * Driver for the s390 pseudo random number generator */ + +#define KMSG_COMPONENT "prng" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include <linux/fs.h> +#include <linux/fips.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/device.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/moduleparam.h> +#include <linux/mutex.h> #include <linux/random.h> #include <linux/slab.h> #include <asm/debug.h> #include <asm/uaccess.h> +#include <asm/timex.h> #include "crypt_s390.h" MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Jan Glauber <jan.glauber@de.ibm.com>"); +MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("s390 PRNG interface"); -static int prng_chunk_size = 256; -module_param(prng_chunk_size, int, S_IRUSR | S_IRGRP | S_IROTH); + +#define PRNG_MODE_AUTO 0 +#define PRNG_MODE_TDES 1 +#define PRNG_MODE_SHA512 2 + +static unsigned int prng_mode = PRNG_MODE_AUTO; +module_param_named(mode, prng_mode, int, 0); +MODULE_PARM_DESC(prng_mode, "PRNG mode: 0 - auto, 1 - TDES, 2 - SHA512"); + + +#define PRNG_CHUNKSIZE_TDES_MIN 8 +#define PRNG_CHUNKSIZE_TDES_MAX (64*1024) +#define PRNG_CHUNKSIZE_SHA512_MIN 64 +#define PRNG_CHUNKSIZE_SHA512_MAX (64*1024) + +static unsigned int prng_chunk_size = 256; +module_param_named(chunksize, prng_chunk_size, int, 0); MODULE_PARM_DESC(prng_chunk_size, "PRNG read chunk size in bytes"); -static int prng_entropy_limit = 4096; -module_param(prng_entropy_limit, int, S_IRUSR | S_IRGRP | S_IROTH | S_IWUSR); -MODULE_PARM_DESC(prng_entropy_limit, - "PRNG add entropy after that much bytes were produced"); + +#define PRNG_RESEED_LIMIT_TDES 4096 +#define PRNG_RESEED_LIMIT_TDES_LOWER 4096 +#define PRNG_RESEED_LIMIT_SHA512 100000 +#define PRNG_RESEED_LIMIT_SHA512_LOWER 10000 + +static unsigned int prng_reseed_limit; +module_param_named(reseed_limit, prng_reseed_limit, int, 0); +MODULE_PARM_DESC(prng_reseed_limit, "PRNG reseed limit"); + /* * Any one who considers arithmetical methods of producing random digits is, * of course, in a state of sin. -- John von Neumann */ -struct s390_prng_data { - unsigned long count; /* how many bytes were produced */ - char *buf; +static int prng_errorflag; + +#define PRNG_GEN_ENTROPY_FAILED 1 +#define PRNG_SELFTEST_FAILED 2 +#define PRNG_INSTANTIATE_FAILED 3 +#define PRNG_SEED_FAILED 4 +#define PRNG_RESEED_FAILED 5 +#define PRNG_GEN_FAILED 6 + +struct prng_ws_s { + u8 parm_block[32]; + u32 reseed_counter; + u64 byte_counter; }; -static struct s390_prng_data *p; +struct ppno_ws_s { + u32 res; + u32 reseed_counter; + u64 stream_bytes; + u8 V[112]; + u8 C[112]; +}; -/* copied from libica, use a non-zero initial parameter block */ -static unsigned char parm_block[32] = { -0x0F,0x2B,0x8E,0x63,0x8C,0x8E,0xD2,0x52,0x64,0xB7,0xA0,0x7B,0x75,0x28,0xB8,0xF4, -0x75,0x5F,0xD2,0xA6,0x8D,0x97,0x11,0xFF,0x49,0xD8,0x23,0xF3,0x7E,0x21,0xEC,0xA0, +struct prng_data_s { + struct mutex mutex; + union { + struct prng_ws_s prngws; + struct ppno_ws_s ppnows; + }; + u8 *buf; + u32 rest; + u8 *prev; }; -static int prng_open(struct inode *inode, struct file *file) +static struct prng_data_s *prng_data; + +/* initial parameter block for tdes mode, copied from libica */ +static const u8 initial_parm_block[32] __initconst = { + 0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52, + 0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4, + 0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF, + 0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0 }; + + +/*** helper functions ***/ + +static int generate_entropy(u8 *ebuf, size_t nbytes) { - return nonseekable_open(inode, file); + int n, ret = 0; + u8 *pg, *h, hash[32]; + + pg = (u8 *) __get_free_page(GFP_KERNEL); + if (!pg) { + prng_errorflag = PRNG_GEN_ENTROPY_FAILED; + return -ENOMEM; + } + + while (nbytes) { + /* fill page with urandom bytes */ + get_random_bytes(pg, PAGE_SIZE); + /* exor page with stckf values */ + for (n = 0; n < sizeof(PAGE_SIZE/sizeof(u64)); n++) { + u64 *p = ((u64 *)pg) + n; + *p ^= get_tod_clock_fast(); + } + n = (nbytes < sizeof(hash)) ? nbytes : sizeof(hash); + if (n < sizeof(hash)) + h = hash; + else + h = ebuf; + /* generate sha256 from this page */ + if (crypt_s390_kimd(KIMD_SHA_256, h, + pg, PAGE_SIZE) != PAGE_SIZE) { + prng_errorflag = PRNG_GEN_ENTROPY_FAILED; + ret = -EIO; + goto out; + } + if (n < sizeof(hash)) + memcpy(ebuf, hash, n); + ret += n; + ebuf += n; + nbytes -= n; + } + +out: + free_page((unsigned long)pg); + return ret; } -static void prng_add_entropy(void) + +/*** tdes functions ***/ + +static void prng_tdes_add_entropy(void) { __u64 entropy[4]; unsigned int i; int ret; for (i = 0; i < 16; i++) { - ret = crypt_s390_kmc(KMC_PRNG, parm_block, (char *)entropy, - (char *)entropy, sizeof(entropy)); + ret = crypt_s390_kmc(KMC_PRNG, prng_data->prngws.parm_block, + (char *)entropy, (char *)entropy, + sizeof(entropy)); BUG_ON(ret < 0 || ret != sizeof(entropy)); - memcpy(parm_block, entropy, sizeof(entropy)); + memcpy(prng_data->prngws.parm_block, entropy, sizeof(entropy)); } } -static void prng_seed(int nbytes) + +static void prng_tdes_seed(int nbytes) { char buf[16]; int i = 0; - BUG_ON(nbytes > 16); + BUG_ON(nbytes > sizeof(buf)); + get_random_bytes(buf, nbytes); /* Add the entropy */ while (nbytes >= 8) { - *((__u64 *)parm_block) ^= *((__u64 *)(buf+i)); - prng_add_entropy(); + *((__u64 *)prng_data->prngws.parm_block) ^= *((__u64 *)(buf+i)); + prng_tdes_add_entropy(); i += 8; nbytes -= 8; } - prng_add_entropy(); + prng_tdes_add_entropy(); + prng_data->prngws.reseed_counter = 0; +} + + +static int __init prng_tdes_instantiate(void) +{ + int datalen; + + pr_debug("prng runs in TDES mode with " + "chunksize=%d and reseed_limit=%u\n", + prng_chunk_size, prng_reseed_limit); + + /* memory allocation, prng_data struct init, mutex init */ + datalen = sizeof(struct prng_data_s) + prng_chunk_size; + prng_data = kzalloc(datalen, GFP_KERNEL); + if (!prng_data) { + prng_errorflag = PRNG_INSTANTIATE_FAILED; + return -ENOMEM; + } + mutex_init(&prng_data->mutex); + prng_data->buf = ((u8 *)prng_data) + sizeof(struct prng_data_s); + memcpy(prng_data->prngws.parm_block, initial_parm_block, 32); + + /* initialize the PRNG, add 128 bits of entropy */ + prng_tdes_seed(16); + + return 0; } -static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes, - loff_t *ppos) + +static void prng_tdes_deinstantiate(void) +{ + pr_debug("The prng module stopped " + "after running in triple DES mode\n"); + kzfree(prng_data); +} + + +/*** sha512 functions ***/ + +static int __init prng_sha512_selftest(void) { - int chunk, n; + /* NIST DRBG testvector for Hash Drbg, Sha-512, Count #0 */ + static const u8 seed[] __initconst = { + 0x6b, 0x50, 0xa7, 0xd8, 0xf8, 0xa5, 0x5d, 0x7a, + 0x3d, 0xf8, 0xbb, 0x40, 0xbc, 0xc3, 0xb7, 0x22, + 0xd8, 0x70, 0x8d, 0xe6, 0x7f, 0xda, 0x01, 0x0b, + 0x03, 0xc4, 0xc8, 0x4d, 0x72, 0x09, 0x6f, 0x8c, + 0x3e, 0xc6, 0x49, 0xcc, 0x62, 0x56, 0xd9, 0xfa, + 0x31, 0xdb, 0x7a, 0x29, 0x04, 0xaa, 0xf0, 0x25 }; + static const u8 V0[] __initconst = { + 0x00, 0xad, 0xe3, 0x6f, 0x9a, 0x01, 0xc7, 0x76, + 0x61, 0x34, 0x35, 0xf5, 0x4e, 0x24, 0x74, 0x22, + 0x21, 0x9a, 0x29, 0x89, 0xc7, 0x93, 0x2e, 0x60, + 0x1e, 0xe8, 0x14, 0x24, 0x8d, 0xd5, 0x03, 0xf1, + 0x65, 0x5d, 0x08, 0x22, 0x72, 0xd5, 0xad, 0x95, + 0xe1, 0x23, 0x1e, 0x8a, 0xa7, 0x13, 0xd9, 0x2b, + 0x5e, 0xbc, 0xbb, 0x80, 0xab, 0x8d, 0xe5, 0x79, + 0xab, 0x5b, 0x47, 0x4e, 0xdd, 0xee, 0x6b, 0x03, + 0x8f, 0x0f, 0x5c, 0x5e, 0xa9, 0x1a, 0x83, 0xdd, + 0xd3, 0x88, 0xb2, 0x75, 0x4b, 0xce, 0x83, 0x36, + 0x57, 0x4b, 0xf1, 0x5c, 0xca, 0x7e, 0x09, 0xc0, + 0xd3, 0x89, 0xc6, 0xe0, 0xda, 0xc4, 0x81, 0x7e, + 0x5b, 0xf9, 0xe1, 0x01, 0xc1, 0x92, 0x05, 0xea, + 0xf5, 0x2f, 0xc6, 0xc6, 0xc7, 0x8f, 0xbc, 0xf4 }; + static const u8 C0[] __initconst = { + 0x00, 0xf4, 0xa3, 0xe5, 0xa0, 0x72, 0x63, 0x95, + 0xc6, 0x4f, 0x48, 0xd0, 0x8b, 0x5b, 0x5f, 0x8e, + 0x6b, 0x96, 0x1f, 0x16, 0xed, 0xbc, 0x66, 0x94, + 0x45, 0x31, 0xd7, 0x47, 0x73, 0x22, 0xa5, 0x86, + 0xce, 0xc0, 0x4c, 0xac, 0x63, 0xb8, 0x39, 0x50, + 0xbf, 0xe6, 0x59, 0x6c, 0x38, 0x58, 0x99, 0x1f, + 0x27, 0xa7, 0x9d, 0x71, 0x2a, 0xb3, 0x7b, 0xf9, + 0xfb, 0x17, 0x86, 0xaa, 0x99, 0x81, 0xaa, 0x43, + 0xe4, 0x37, 0xd3, 0x1e, 0x6e, 0xe5, 0xe6, 0xee, + 0xc2, 0xed, 0x95, 0x4f, 0x53, 0x0e, 0x46, 0x8a, + 0xcc, 0x45, 0xa5, 0xdb, 0x69, 0x0d, 0x81, 0xc9, + 0x32, 0x92, 0xbc, 0x8f, 0x33, 0xe6, 0xf6, 0x09, + 0x7c, 0x8e, 0x05, 0x19, 0x0d, 0xf1, 0xb6, 0xcc, + 0xf3, 0x02, 0x21, 0x90, 0x25, 0xec, 0xed, 0x0e }; + static const u8 random[] __initconst = { + 0x95, 0xb7, 0xf1, 0x7e, 0x98, 0x02, 0xd3, 0x57, + 0x73, 0x92, 0xc6, 0xa9, 0xc0, 0x80, 0x83, 0xb6, + 0x7d, 0xd1, 0x29, 0x22, 0x65, 0xb5, 0xf4, 0x2d, + 0x23, 0x7f, 0x1c, 0x55, 0xbb, 0x9b, 0x10, 0xbf, + 0xcf, 0xd8, 0x2c, 0x77, 0xa3, 0x78, 0xb8, 0x26, + 0x6a, 0x00, 0x99, 0x14, 0x3b, 0x3c, 0x2d, 0x64, + 0x61, 0x1e, 0xee, 0xb6, 0x9a, 0xcd, 0xc0, 0x55, + 0x95, 0x7c, 0x13, 0x9e, 0x8b, 0x19, 0x0c, 0x7a, + 0x06, 0x95, 0x5f, 0x2c, 0x79, 0x7c, 0x27, 0x78, + 0xde, 0x94, 0x03, 0x96, 0xa5, 0x01, 0xf4, 0x0e, + 0x91, 0x39, 0x6a, 0xcf, 0x8d, 0x7e, 0x45, 0xeb, + 0xdb, 0xb5, 0x3b, 0xbf, 0x8c, 0x97, 0x52, 0x30, + 0xd2, 0xf0, 0xff, 0x91, 0x06, 0xc7, 0x61, 0x19, + 0xae, 0x49, 0x8e, 0x7f, 0xbc, 0x03, 0xd9, 0x0f, + 0x8e, 0x4c, 0x51, 0x62, 0x7a, 0xed, 0x5c, 0x8d, + 0x42, 0x63, 0xd5, 0xd2, 0xb9, 0x78, 0x87, 0x3a, + 0x0d, 0xe5, 0x96, 0xee, 0x6d, 0xc7, 0xf7, 0xc2, + 0x9e, 0x37, 0xee, 0xe8, 0xb3, 0x4c, 0x90, 0xdd, + 0x1c, 0xf6, 0xa9, 0xdd, 0xb2, 0x2b, 0x4c, 0xbd, + 0x08, 0x6b, 0x14, 0xb3, 0x5d, 0xe9, 0x3d, 0xa2, + 0xd5, 0xcb, 0x18, 0x06, 0x69, 0x8c, 0xbd, 0x7b, + 0xbb, 0x67, 0xbf, 0xe3, 0xd3, 0x1f, 0xd2, 0xd1, + 0xdb, 0xd2, 0xa1, 0xe0, 0x58, 0xa3, 0xeb, 0x99, + 0xd7, 0xe5, 0x1f, 0x1a, 0x93, 0x8e, 0xed, 0x5e, + 0x1c, 0x1d, 0xe2, 0x3a, 0x6b, 0x43, 0x45, 0xd3, + 0x19, 0x14, 0x09, 0xf9, 0x2f, 0x39, 0xb3, 0x67, + 0x0d, 0x8d, 0xbf, 0xb6, 0x35, 0xd8, 0xe6, 0xa3, + 0x69, 0x32, 0xd8, 0x10, 0x33, 0xd1, 0x44, 0x8d, + 0x63, 0xb4, 0x03, 0xdd, 0xf8, 0x8e, 0x12, 0x1b, + 0x6e, 0x81, 0x9a, 0xc3, 0x81, 0x22, 0x6c, 0x13, + 0x21, 0xe4, 0xb0, 0x86, 0x44, 0xf6, 0x72, 0x7c, + 0x36, 0x8c, 0x5a, 0x9f, 0x7a, 0x4b, 0x3e, 0xe2 }; + int ret = 0; - int tmp; + u8 buf[sizeof(random)]; + struct ppno_ws_s ws; + + memset(&ws, 0, sizeof(ws)); + + /* initial seed */ + ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED, + &ws, NULL, 0, + seed, sizeof(seed)); + if (ret < 0) { + pr_err("The prng self test seed operation for the " + "SHA-512 mode failed with rc=%d\n", ret); + prng_errorflag = PRNG_SELFTEST_FAILED; + return -EIO; + } + + /* check working states V and C */ + if (memcmp(ws.V, V0, sizeof(V0)) != 0 + || memcmp(ws.C, C0, sizeof(C0)) != 0) { + pr_err("The prng self test state test " + "for the SHA-512 mode failed\n"); + prng_errorflag = PRNG_SELFTEST_FAILED; + return -EIO; + } + + /* generate random bytes */ + ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN, + &ws, buf, sizeof(buf), + NULL, 0); + if (ret < 0) { + pr_err("The prng self test generate operation for " + "the SHA-512 mode failed with rc=%d\n", ret); + prng_errorflag = PRNG_SELFTEST_FAILED; + return -EIO; + } + ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN, + &ws, buf, sizeof(buf), + NULL, 0); + if (ret < 0) { + pr_err("The prng self test generate operation for " + "the SHA-512 mode failed with rc=%d\n", ret); + prng_errorflag = PRNG_SELFTEST_FAILED; + return -EIO; + } + + /* check against expected data */ + if (memcmp(buf, random, sizeof(random)) != 0) { + pr_err("The prng self test data test " + "for the SHA-512 mode failed\n"); + prng_errorflag = PRNG_SELFTEST_FAILED; + return -EIO; + } + + return 0; +} + + +static int __init prng_sha512_instantiate(void) +{ + int ret, datalen; + u8 seed[64]; + + pr_debug("prng runs in SHA-512 mode " + "with chunksize=%d and reseed_limit=%u\n", + prng_chunk_size, prng_reseed_limit); + + /* memory allocation, prng_data struct init, mutex init */ + datalen = sizeof(struct prng_data_s) + prng_chunk_size; + if (fips_enabled) + datalen += prng_chunk_size; + prng_data = kzalloc(datalen, GFP_KERNEL); + if (!prng_data) { + prng_errorflag = PRNG_INSTANTIATE_FAILED; + return -ENOMEM; + } + mutex_init(&prng_data->mutex); + prng_data->buf = ((u8 *)prng_data) + sizeof(struct prng_data_s); + + /* selftest */ + ret = prng_sha512_selftest(); + if (ret) + goto outfree; + + /* generate initial seed bytestring, first 48 bytes of entropy */ + ret = generate_entropy(seed, 48); + if (ret != 48) + goto outfree; + /* followed by 16 bytes of unique nonce */ + get_tod_clock_ext(seed + 48); + + /* initial seed of the ppno drng */ + ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED, + &prng_data->ppnows, NULL, 0, + seed, sizeof(seed)); + if (ret < 0) { + prng_errorflag = PRNG_SEED_FAILED; + ret = -EIO; + goto outfree; + } + + /* if fips mode is enabled, generate a first block of random + bytes for the FIPS 140-2 Conditional Self Test */ + if (fips_enabled) { + prng_data->prev = prng_data->buf + prng_chunk_size; + ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN, + &prng_data->ppnows, + prng_data->prev, + prng_chunk_size, + NULL, 0); + if (ret < 0 || ret != prng_chunk_size) { + prng_errorflag = PRNG_GEN_FAILED; + ret = -EIO; + goto outfree; + } + } + + return 0; + +outfree: + kfree(prng_data); + return ret; +} + + +static void prng_sha512_deinstantiate(void) +{ + pr_debug("The prng module stopped after running in SHA-512 mode\n"); + kzfree(prng_data); +} + + +static int prng_sha512_reseed(void) +{ + int ret; + u8 seed[32]; + + /* generate 32 bytes of fresh entropy */ + ret = generate_entropy(seed, sizeof(seed)); + if (ret != sizeof(seed)) + return ret; + + /* do a reseed of the ppno drng with this bytestring */ + ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED, + &prng_data->ppnows, NULL, 0, + seed, sizeof(seed)); + if (ret) { + prng_errorflag = PRNG_RESEED_FAILED; + return -EIO; + } + + return 0; +} + + +static int prng_sha512_generate(u8 *buf, size_t nbytes) +{ + int ret; + + /* reseed needed ? */ + if (prng_data->ppnows.reseed_counter > prng_reseed_limit) { + ret = prng_sha512_reseed(); + if (ret) + return ret; + } + + /* PPNO generate */ + ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN, + &prng_data->ppnows, buf, nbytes, + NULL, 0); + if (ret < 0 || ret != nbytes) { + prng_errorflag = PRNG_GEN_FAILED; + return -EIO; + } + + /* FIPS 140-2 Conditional Self Test */ + if (fips_enabled) { + if (!memcmp(prng_data->prev, buf, nbytes)) { + prng_errorflag = PRNG_GEN_FAILED; + return -EILSEQ; + } + memcpy(prng_data->prev, buf, nbytes); + } + + return ret; +} + + +/*** file io functions ***/ + +static int prng_open(struct inode *inode, struct file *file) +{ + return nonseekable_open(inode, file); +} + + +static ssize_t prng_tdes_read(struct file *file, char __user *ubuf, + size_t nbytes, loff_t *ppos) +{ + int chunk, n, tmp, ret = 0; + + /* lock prng_data struct */ + if (mutex_lock_interruptible(&prng_data->mutex)) + return -ERESTARTSYS; - /* nbytes can be arbitrary length, we split it into chunks */ while (nbytes) { - /* same as in extract_entropy_user in random.c */ if (need_resched()) { if (signal_pending(current)) { if (ret == 0) ret = -ERESTARTSYS; break; } + /* give mutex free before calling schedule() */ + mutex_unlock(&prng_data->mutex); schedule(); + /* occopy mutex again */ + if (mutex_lock_interruptible(&prng_data->mutex)) { + if (ret == 0) + ret = -ERESTARTSYS; + return ret; + } } /* @@ -112,12 +535,11 @@ static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes, /* PRNG only likes multiples of 8 bytes */ n = (chunk + 7) & -8; - if (p->count > prng_entropy_limit) - prng_seed(8); + if (prng_data->prngws.reseed_counter > prng_reseed_limit) + prng_tdes_seed(8); /* if the CPU supports PRNG stckf is present too */ - asm volatile(".insn s,0xb27c0000,%0" - : "=m" (*((unsigned long long *)p->buf)) : : "cc"); + *((unsigned long long *)prng_data->buf) = get_tod_clock_fast(); /* * Beside the STCKF the input for the TDES-EDE is the output @@ -132,35 +554,259 @@ static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes, * Note: you can still get strict X9.17 conformity by setting * prng_chunk_size to 8 bytes. */ - tmp = crypt_s390_kmc(KMC_PRNG, parm_block, p->buf, p->buf, n); - BUG_ON((tmp < 0) || (tmp != n)); + tmp = crypt_s390_kmc(KMC_PRNG, prng_data->prngws.parm_block, + prng_data->buf, prng_data->buf, n); + if (tmp < 0 || tmp != n) { + ret = -EIO; + break; + } - p->count += n; + prng_data->prngws.byte_counter += n; + prng_data->prngws.reseed_counter += n; - if (copy_to_user(ubuf, p->buf, chunk)) + if (copy_to_user(ubuf, prng_data->buf, chunk)) return -EFAULT; nbytes -= chunk; ret += chunk; ubuf += chunk; } + + /* unlock prng_data struct */ + mutex_unlock(&prng_data->mutex); + return ret; } -static const struct file_operations prng_fops = { + +static ssize_t prng_sha512_read(struct file *file, char __user *ubuf, + size_t nbytes, loff_t *ppos) +{ + int n, ret = 0; + u8 *p; + + /* if errorflag is set do nothing and return 'broken pipe' */ + if (prng_errorflag) + return -EPIPE; + + /* lock prng_data struct */ + if (mutex_lock_interruptible(&prng_data->mutex)) + return -ERESTARTSYS; + + while (nbytes) { + if (need_resched()) { + if (signal_pending(current)) { + if (ret == 0) + ret = -ERESTARTSYS; + break; + } + /* give mutex free before calling schedule() */ + mutex_unlock(&prng_data->mutex); + schedule(); + /* occopy mutex again */ + if (mutex_lock_interruptible(&prng_data->mutex)) { + if (ret == 0) + ret = -ERESTARTSYS; + return ret; + } + } + if (prng_data->rest) { + /* push left over random bytes from the previous read */ + p = prng_data->buf + prng_chunk_size - prng_data->rest; + n = (nbytes < prng_data->rest) ? + nbytes : prng_data->rest; + prng_data->rest -= n; + } else { + /* generate one chunk of random bytes into read buf */ + p = prng_data->buf; + n = prng_sha512_generate(p, prng_chunk_size); + if (n < 0) { + ret = n; + break; + } + if (nbytes < prng_chunk_size) { + n = nbytes; + prng_data->rest = prng_chunk_size - n; + } else { + n = prng_chunk_size; + prng_data->rest = 0; + } + } + if (copy_to_user(ubuf, p, n)) { + ret = -EFAULT; + break; + } + ubuf += n; + nbytes -= n; + ret += n; + } + + /* unlock prng_data struct */ + mutex_unlock(&prng_data->mutex); + + return ret; +} + + +/*** sysfs stuff ***/ + +static const struct file_operations prng_sha512_fops = { + .owner = THIS_MODULE, + .open = &prng_open, + .release = NULL, + .read = &prng_sha512_read, + .llseek = noop_llseek, +}; +static const struct file_operations prng_tdes_fops = { .owner = THIS_MODULE, .open = &prng_open, .release = NULL, - .read = &prng_read, + .read = &prng_tdes_read, .llseek = noop_llseek, }; -static struct miscdevice prng_dev = { +static struct miscdevice prng_sha512_dev = { + .name = "prandom", + .minor = MISC_DYNAMIC_MINOR, + .fops = &prng_sha512_fops, +}; +static struct miscdevice prng_tdes_dev = { .name = "prandom", .minor = MISC_DYNAMIC_MINOR, - .fops = &prng_fops, + .fops = &prng_tdes_fops, }; + +/* chunksize attribute (ro) */ +static ssize_t prng_chunksize_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", prng_chunk_size); +} +static DEVICE_ATTR(chunksize, 0444, prng_chunksize_show, NULL); + +/* counter attribute (ro) */ +static ssize_t prng_counter_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u64 counter; + + if (mutex_lock_interruptible(&prng_data->mutex)) + return -ERESTARTSYS; + if (prng_mode == PRNG_MODE_SHA512) + counter = prng_data->ppnows.stream_bytes; + else + counter = prng_data->prngws.byte_counter; + mutex_unlock(&prng_data->mutex); + + return snprintf(buf, PAGE_SIZE, "%llu\n", counter); +} +static DEVICE_ATTR(byte_counter, 0444, prng_counter_show, NULL); + +/* errorflag attribute (ro) */ +static ssize_t prng_errorflag_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", prng_errorflag); +} +static DEVICE_ATTR(errorflag, 0444, prng_errorflag_show, NULL); + +/* mode attribute (ro) */ +static ssize_t prng_mode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + if (prng_mode == PRNG_MODE_TDES) + return snprintf(buf, PAGE_SIZE, "TDES\n"); + else + return snprintf(buf, PAGE_SIZE, "SHA512\n"); +} +static DEVICE_ATTR(mode, 0444, prng_mode_show, NULL); + +/* reseed attribute (w) */ +static ssize_t prng_reseed_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + if (mutex_lock_interruptible(&prng_data->mutex)) + return -ERESTARTSYS; + prng_sha512_reseed(); + mutex_unlock(&prng_data->mutex); + + return count; +} +static DEVICE_ATTR(reseed, 0200, NULL, prng_reseed_store); + +/* reseed limit attribute (rw) */ +static ssize_t prng_reseed_limit_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", prng_reseed_limit); +} +static ssize_t prng_reseed_limit_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned limit; + + if (sscanf(buf, "%u\n", &limit) != 1) + return -EINVAL; + + if (prng_mode == PRNG_MODE_SHA512) { + if (limit < PRNG_RESEED_LIMIT_SHA512_LOWER) + return -EINVAL; + } else { + if (limit < PRNG_RESEED_LIMIT_TDES_LOWER) + return -EINVAL; + } + + prng_reseed_limit = limit; + + return count; +} +static DEVICE_ATTR(reseed_limit, 0644, + prng_reseed_limit_show, prng_reseed_limit_store); + +/* strength attribute (ro) */ +static ssize_t prng_strength_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "256\n"); +} +static DEVICE_ATTR(strength, 0444, prng_strength_show, NULL); + +static struct attribute *prng_sha512_dev_attrs[] = { + &dev_attr_errorflag.attr, + &dev_attr_chunksize.attr, + &dev_attr_byte_counter.attr, + &dev_attr_mode.attr, + &dev_attr_reseed.attr, + &dev_attr_reseed_limit.attr, + &dev_attr_strength.attr, + NULL +}; +static struct attribute *prng_tdes_dev_attrs[] = { + &dev_attr_chunksize.attr, + &dev_attr_byte_counter.attr, + &dev_attr_mode.attr, + NULL +}; + +static struct attribute_group prng_sha512_dev_attr_group = { + .attrs = prng_sha512_dev_attrs +}; +static struct attribute_group prng_tdes_dev_attr_group = { + .attrs = prng_tdes_dev_attrs +}; + + +/*** module init and exit ***/ + static int __init prng_init(void) { int ret; @@ -169,43 +815,105 @@ static int __init prng_init(void) if (!crypt_s390_func_available(KMC_PRNG, CRYPT_S390_MSA)) return -EOPNOTSUPP; - if (prng_chunk_size < 8) - return -EINVAL; + /* choose prng mode */ + if (prng_mode != PRNG_MODE_TDES) { + /* check for MSA5 support for PPNO operations */ + if (!crypt_s390_func_available(PPNO_SHA512_DRNG_GEN, + CRYPT_S390_MSA5)) { + if (prng_mode == PRNG_MODE_SHA512) { + pr_err("The prng module cannot " + "start in SHA-512 mode\n"); + return -EOPNOTSUPP; + } + prng_mode = PRNG_MODE_TDES; + } else + prng_mode = PRNG_MODE_SHA512; + } - p = kmalloc(sizeof(struct s390_prng_data), GFP_KERNEL); - if (!p) - return -ENOMEM; - p->count = 0; + if (prng_mode == PRNG_MODE_SHA512) { - p->buf = kmalloc(prng_chunk_size, GFP_KERNEL); - if (!p->buf) { - ret = -ENOMEM; - goto out_free; - } + /* SHA512 mode */ - /* initialize the PRNG, add 128 bits of entropy */ - prng_seed(16); + if (prng_chunk_size < PRNG_CHUNKSIZE_SHA512_MIN + || prng_chunk_size > PRNG_CHUNKSIZE_SHA512_MAX) + return -EINVAL; + prng_chunk_size = (prng_chunk_size + 0x3f) & ~0x3f; - ret = misc_register(&prng_dev); - if (ret) - goto out_buf; - return 0; + if (prng_reseed_limit == 0) + prng_reseed_limit = PRNG_RESEED_LIMIT_SHA512; + else if (prng_reseed_limit < PRNG_RESEED_LIMIT_SHA512_LOWER) + return -EINVAL; + + ret = prng_sha512_instantiate(); + if (ret) + goto out; + + ret = misc_register(&prng_sha512_dev); + if (ret) { + prng_sha512_deinstantiate(); + goto out; + } + ret = sysfs_create_group(&prng_sha512_dev.this_device->kobj, + &prng_sha512_dev_attr_group); + if (ret) { + misc_deregister(&prng_sha512_dev); + prng_sha512_deinstantiate(); + goto out; + } -out_buf: - kfree(p->buf); -out_free: - kfree(p); + } else { + + /* TDES mode */ + + if (prng_chunk_size < PRNG_CHUNKSIZE_TDES_MIN + || prng_chunk_size > PRNG_CHUNKSIZE_TDES_MAX) + return -EINVAL; + prng_chunk_size = (prng_chunk_size + 0x07) & ~0x07; + + if (prng_reseed_limit == 0) + prng_reseed_limit = PRNG_RESEED_LIMIT_TDES; + else if (prng_reseed_limit < PRNG_RESEED_LIMIT_TDES_LOWER) + return -EINVAL; + + ret = prng_tdes_instantiate(); + if (ret) + goto out; + + ret = misc_register(&prng_tdes_dev); + if (ret) { + prng_tdes_deinstantiate(); + goto out; + } + ret = sysfs_create_group(&prng_tdes_dev.this_device->kobj, + &prng_tdes_dev_attr_group); + if (ret) { + misc_deregister(&prng_tdes_dev); + prng_tdes_deinstantiate(); + goto out; + } + + } + +out: return ret; } + static void __exit prng_exit(void) { - /* wipe me */ - kzfree(p->buf); - kfree(p); - - misc_deregister(&prng_dev); + if (prng_mode == PRNG_MODE_SHA512) { + sysfs_remove_group(&prng_sha512_dev.this_device->kobj, + &prng_sha512_dev_attr_group); + misc_deregister(&prng_sha512_dev); + prng_sha512_deinstantiate(); + } else { + sysfs_remove_group(&prng_tdes_dev.this_device->kobj, + &prng_tdes_dev_attr_group); + misc_deregister(&prng_tdes_dev); + prng_tdes_deinstantiate(); + } } + module_init(prng_init); module_exit(prng_exit); diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h index 694bcd6..2f924bc 100644 --- a/arch/s390/include/asm/kexec.h +++ b/arch/s390/include/asm/kexec.h @@ -26,6 +26,9 @@ /* Not more than 2GB */ #define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31) +/* Allocate control page with GFP_DMA */ +#define KEXEC_CONTROL_MEMORY_GFP GFP_DMA + /* Maximum address we can use for the crash control pages */ #define KEXEC_CRASH_CONTROL_MEMORY_LIMIT (-1UL) diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index a5e6562..d29ad95 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h @@ -14,7 +14,9 @@ typedef struct { unsigned long asce_bits; unsigned long asce_limit; unsigned long vdso_base; - /* The mmu context has extended page tables. */ + /* The mmu context allocates 4K page tables. */ + unsigned int alloc_pgste:1; + /* The mmu context uses extended page tables. */ unsigned int has_pgste:1; /* The mmu context uses storage keys. */ unsigned int use_skey:1; diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index d25d9ff..fb1b93e 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -20,8 +20,11 @@ static inline int init_new_context(struct task_struct *tsk, mm->context.flush_mm = 0; mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; mm->context.asce_bits |= _ASCE_TYPE_REGION3; +#ifdef CONFIG_PGSTE + mm->context.alloc_pgste = page_table_allocate_pgste; mm->context.has_pgste = 0; mm->context.use_skey = 0; +#endif mm->context.asce_limit = STACK_TOP_MAX; crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); return 0; diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 51e7fb6..7b7858f 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -21,6 +21,7 @@ void crst_table_free(struct mm_struct *, unsigned long *); unsigned long *page_table_alloc(struct mm_struct *); void page_table_free(struct mm_struct *, unsigned long *); void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long); +extern int page_table_allocate_pgste; int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, unsigned long key, bool nq); diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 989cfae..fc64239 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -12,12 +12,9 @@ #define _ASM_S390_PGTABLE_H /* - * The Linux memory management assumes a three-level page table setup. For - * s390 31 bit we "fold" the mid level into the top-level page table, so - * that we physically have the same two-level page table as the s390 mmu - * expects in 31 bit mode. For s390 64 bit we use three of the five levels - * the hardware provides (region first and region second tables are not - * used). + * The Linux memory management assumes a three-level page table setup. + * For s390 64 bit we use up to four of the five levels the hardware + * provides (region first tables are not used). * * The "pgd_xxx()" functions are trivial for a folded two-level * setup: the pgd is never bad, and a pmd always exists (as it's folded @@ -101,8 +98,8 @@ extern unsigned long zero_page_mask; #ifndef __ASSEMBLY__ /* - * The vmalloc and module area will always be on the topmost area of the kernel - * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules. + * The vmalloc and module area will always be on the topmost area of the + * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules. * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where * modules will reside. That makes sure that inter module branches always * happen without trampolines and in addition the placement within a 2GB frame @@ -131,38 +128,6 @@ static inline int is_module_addr(void *addr) } /* - * A 31 bit pagetable entry of S390 has following format: - * | PFRA | | OS | - * 0 0IP0 - * 00000000001111111111222222222233 - * 01234567890123456789012345678901 - * - * I Page-Invalid Bit: Page is not available for address-translation - * P Page-Protection Bit: Store access not possible for page - * - * A 31 bit segmenttable entry of S390 has following format: - * | P-table origin | |PTL - * 0 IC - * 00000000001111111111222222222233 - * 01234567890123456789012345678901 - * - * I Segment-Invalid Bit: Segment is not available for address-translation - * C Common-Segment Bit: Segment is not private (PoP 3-30) - * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256) - * - * The 31 bit segmenttable origin of S390 has following format: - * - * |S-table origin | | STL | - * X **GPS - * 00000000001111111111222222222233 - * 01234567890123456789012345678901 - * - * X Space-Switch event: - * G Segment-Invalid Bit: * - * P Private-Space Bit: Segment is not private (PoP 3-30) - * S Storage-Alteration: - * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048) - * * A 64 bit pagetable entry of S390 has following format: * | PFRA |0IPC| OS | * 0000000000111111111122222222223333333333444444444455555555556666 @@ -220,7 +185,6 @@ static inline int is_module_addr(void *addr) /* Software bits in the page table entry */ #define _PAGE_PRESENT 0x001 /* SW pte present bit */ -#define _PAGE_TYPE 0x002 /* SW pte type bit */ #define _PAGE_YOUNG 0x004 /* SW pte young bit */ #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */ #define _PAGE_READ 0x010 /* SW pte read bit */ @@ -240,31 +204,34 @@ static inline int is_module_addr(void *addr) * table lock held. * * The following table gives the different possible bit combinations for - * the pte hardware and software bits in the last 12 bits of a pte: + * the pte hardware and software bits in the last 12 bits of a pte + * (. unassigned bit, x don't care, t swap type): * * 842100000000 * 000084210000 * 000000008421 - * .IR...wrdytp - * empty .10...000000 - * swap .10...xxxx10 - * file .11...xxxxx0 - * prot-none, clean, old .11...000001 - * prot-none, clean, young .11...000101 - * prot-none, dirty, old .10...001001 - * prot-none, dirty, young .10...001101 - * read-only, clean, old .11...010001 - * read-only, clean, young .01...010101 - * read-only, dirty, old .11...011001 - * read-only, dirty, young .01...011101 - * read-write, clean, old .11...110001 - * read-write, clean, young .01...110101 - * read-write, dirty, old .10...111001 - * read-write, dirty, young .00...111101 + * .IR.uswrdy.p + * empty .10.00000000 + * swap .11..ttttt.0 + * prot-none, clean, old .11.xx0000.1 + * prot-none, clean, young .11.xx0001.1 + * prot-none, dirty, old .10.xx0010.1 + * prot-none, dirty, young .10.xx0011.1 + * read-only, clean, old .11.xx0100.1 + * read-only, clean, young .01.xx0101.1 + * read-only, dirty, old .11.xx0110.1 + * read-only, dirty, young .01.xx0111.1 + * read-write, clean, old .11.xx1100.1 + * read-write, clean, young .01.xx1101.1 + * read-write, dirty, old .10.xx1110.1 + * read-write, dirty, young .00.xx1111.1 + * HW-bits: R read-only, I invalid + * SW-bits: p present, y young, d dirty, r read, w write, s special, + * u unused, l large * - * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001 - * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400 - * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402 + * pte_none is true for the bit pattern .10.00000000, pte == 0x400 + * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200 + * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001 */ /* Bits in the segment/region table address-space-control-element */ @@ -335,6 +302,8 @@ static inline int is_module_addr(void *addr) * read-write, dirty, young 11..0...0...11 * The segment table origin is used to distinguish empty (origin==0) from * read-write, old segment table entries (origin!=0) + * HW-bits: R read-only, I invalid + * SW-bits: y young, d dirty, r read, w write */ #define _SEGMENT_ENTRY_SPLIT_BIT 11 /* THP splitting bit number */ @@ -423,6 +392,15 @@ static inline int mm_has_pgste(struct mm_struct *mm) return 0; } +static inline int mm_alloc_pgste(struct mm_struct *mm) +{ +#ifdef CONFIG_PGSTE + if (unlikely(mm->context.alloc_pgste)) + return 1; +#endif + return 0; +} + /* * In the case that a guest uses storage keys * faults should no longer be backed by zero pages @@ -582,10 +560,9 @@ static inline int pte_none(pte_t pte) static inline int pte_swap(pte_t pte) { - /* Bit pattern: (pte & 0x603) == 0x402 */ - return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT | - _PAGE_TYPE | _PAGE_PRESENT)) - == (_PAGE_INVALID | _PAGE_TYPE); + /* Bit pattern: (pte & 0x201) == 0x200 */ + return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT)) + == _PAGE_PROTECT; } static inline int pte_special(pte_t pte) @@ -1586,51 +1563,51 @@ static inline int has_transparent_hugepage(void) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* - * 31 bit swap entry format: - * A page-table entry has some bits we have to treat in a special way. - * Bits 0, 20 and bit 23 have to be zero, otherwise an specification - * exception will occur instead of a page translation exception. The - * specifiation exception has the bad habit not to store necessary - * information in the lowcore. - * Bits 21, 22, 30 and 31 are used to indicate the page type. - * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402 - * This leaves the bits 1-19 and bits 24-29 to store type and offset. - * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 - * plus 24 for the offset. - * 0| offset |0110|o|type |00| - * 0 0000000001111111111 2222 2 22222 33 - * 0 1234567890123456789 0123 4 56789 01 - * * 64 bit swap entry format: * A page-table entry has some bits we have to treat in a special way. * Bits 52 and bit 55 have to be zero, otherwise an specification * exception will occur instead of a page translation exception. The * specifiation exception has the bad habit not to store necessary * information in the lowcore. - * Bits 53, 54, 62 and 63 are used to indicate the page type. - * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402 - * This leaves the bits 0-51 and bits 56-61 to store type and offset. - * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 - * plus 56 for the offset. - * | offset |0110|o|type |00| - * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 - * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 + * Bits 54 and 63 are used to indicate the page type. + * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200 + * This leaves the bits 0-51 and bits 56-62 to store type and offset. + * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51 + * for the offset. + * | offset |01100|type |00| + * |0000000000111111111122222222223333333333444444444455|55555|55566|66| + * |0123456789012345678901234567890123456789012345678901|23456|78901|23| */ -#define __SWP_OFFSET_MASK (~0UL >> 11) +#define __SWP_OFFSET_MASK ((1UL << 52) - 1) +#define __SWP_OFFSET_SHIFT 12 +#define __SWP_TYPE_MASK ((1UL << 5) - 1) +#define __SWP_TYPE_SHIFT 2 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) { pte_t pte; - offset &= __SWP_OFFSET_MASK; - pte_val(pte) = _PAGE_INVALID | _PAGE_TYPE | ((type & 0x1f) << 2) | - ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); + + pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT; + pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT; + pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT; return pte; } -#define __swp_type(entry) (((entry).val >> 2) & 0x1f) -#define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1)) -#define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) +static inline unsigned long __swp_type(swp_entry_t entry) +{ + return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK; +} + +static inline unsigned long __swp_offset(swp_entry_t entry) +{ + return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK; +} + +static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset) +{ + return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) }; +} #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 210ffed..e617e74 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -14,20 +14,23 @@ static inline pmd_t __pte_to_pmd(pte_t pte) /* * Convert encoding pte bits pmd bits - * .IR...wrdytp dy..R...I...wr - * empty .10...000000 -> 00..0...1...00 - * prot-none, clean, old .11...000001 -> 00..1...1...00 - * prot-none, clean, young .11...000101 -> 01..1...1...00 - * prot-none, dirty, old .10...001001 -> 10..1...1...00 - * prot-none, dirty, young .10...001101 -> 11..1...1...00 - * read-only, clean, old .11...010001 -> 00..1...1...01 - * read-only, clean, young .01...010101 -> 01..1...0...01 - * read-only, dirty, old .11...011001 -> 10..1...1...01 - * read-only, dirty, young .01...011101 -> 11..1...0...01 - * read-write, clean, old .11...110001 -> 00..0...1...11 - * read-write, clean, young .01...110101 -> 01..0...0...11 - * read-write, dirty, old .10...111001 -> 10..0...1...11 - * read-write, dirty, young .00...111101 -> 11..0...0...11 + * lIR.uswrdy.p dy..R...I...wr + * empty 010.000000.0 -> 00..0...1...00 + * prot-none, clean, old 111.000000.1 -> 00..1...1...00 + * prot-none, clean, young 111.000001.1 -> 01..1...1...00 + * prot-none, dirty, old 111.000010.1 -> 10..1...1...00 + * prot-none, dirty, young 111.000011.1 -> 11..1...1...00 + * read-only, clean, old 111.000100.1 -> 00..1...1...01 + * read-only, clean, young 101.000101.1 -> 01..1...0...01 + * read-only, dirty, old 111.000110.1 -> 10..1...1...01 + * read-only, dirty, young 101.000111.1 -> 11..1...0...01 + * read-write, clean, old 111.001100.1 -> 00..1...1...11 + * read-write, clean, young 101.001101.1 -> 01..1...0...11 + * read-write, dirty, old 110.001110.1 -> 10..0...1...11 + * read-write, dirty, young 100.001111.1 -> 11..0...0...11 + * HW-bits: R read-only, I invalid + * SW-bits: p present, y young, d dirty, r read, w write, s special, + * u unused, l large */ if (pte_present(pte)) { pmd_val(pmd) = pte_val(pte) & PAGE_MASK; @@ -48,20 +51,23 @@ static inline pte_t __pmd_to_pte(pmd_t pmd) /* * Convert encoding pmd bits pte bits - * dy..R...I...wr .IR...wrdytp - * empty 00..0...1...00 -> .10...001100 - * prot-none, clean, old 00..0...1...00 -> .10...000001 - * prot-none, clean, young 01..0...1...00 -> .10...000101 - * prot-none, dirty, old 10..0...1...00 -> .10...001001 - * prot-none, dirty, young 11..0...1...00 -> .10...001101 - * read-only, clean, old 00..1...1...01 -> .11...010001 - * read-only, clean, young 01..1...1...01 -> .11...010101 - * read-only, dirty, old 10..1...1...01 -> .11...011001 - * read-only, dirty, young 11..1...1...01 -> .11...011101 - * read-write, clean, old 00..0...1...11 -> .10...110001 - * read-write, clean, young 01..0...1...11 -> .10...110101 - * read-write, dirty, old 10..0...1...11 -> .10...111001 - * read-write, dirty, young 11..0...1...11 -> .10...111101 + * dy..R...I...wr lIR.uswrdy.p + * empty 00..0...1...00 -> 010.000000.0 + * prot-none, clean, old 00..1...1...00 -> 111.000000.1 + * prot-none, clean, young 01..1...1...00 -> 111.000001.1 + * prot-none, dirty, old 10..1...1...00 -> 111.000010.1 + * prot-none, dirty, young 11..1...1...00 -> 111.000011.1 + * read-only, clean, old 00..1...1...01 -> 111.000100.1 + * read-only, clean, young 01..1...0...01 -> 101.000101.1 + * read-only, dirty, old 10..1...1...01 -> 111.000110.1 + * read-only, dirty, young 11..1...0...01 -> 101.000111.1 + * read-write, clean, old 00..1...1...11 -> 111.001100.1 + * read-write, clean, young 01..1...0...11 -> 101.001101.1 + * read-write, dirty, old 10..0...1...11 -> 110.001110.1 + * read-write, dirty, young 11..0...0...11 -> 100.001111.1 + * HW-bits: R read-only, I invalid + * SW-bits: p present, y young, d dirty, r read, w write, s special, + * u unused, l large */ if (pmd_present(pmd)) { pte_val(pte) = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN_LARGE; @@ -70,8 +76,8 @@ static inline pte_t __pmd_to_pte(pmd_t pmd) pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) << 4; pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) << 5; pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT); - pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10; - pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10; + pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) >> 10; + pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) >> 10; } else pte_val(pte) = _PAGE_INVALID; return pte; diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 33f5894..b33f661 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -18,6 +18,7 @@ #include <linux/rcupdate.h> #include <linux/slab.h> #include <linux/swapops.h> +#include <linux/sysctl.h> #include <linux/ksm.h> #include <linux/mman.h> @@ -920,6 +921,40 @@ unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr) } EXPORT_SYMBOL(get_guest_storage_key); +static int page_table_allocate_pgste_min = 0; +static int page_table_allocate_pgste_max = 1; +int page_table_allocate_pgste = 0; +EXPORT_SYMBOL(page_table_allocate_pgste); + +static struct ctl_table page_table_sysctl[] = { + { + .procname = "allocate_pgste", + .data = &page_table_allocate_pgste, + .maxlen = sizeof(int), + .mode = S_IRUGO | S_IWUSR, + .proc_handler = proc_dointvec, + .extra1 = &page_table_allocate_pgste_min, + .extra2 = &page_table_allocate_pgste_max, + }, + { } +}; + +static struct ctl_table page_table_sysctl_dir[] = { + { + .procname = "vm", + .maxlen = 0, + .mode = 0555, + .child = page_table_sysctl, + }, + { } +}; + +static int __init page_table_register_sysctl(void) +{ + return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM; +} +__initcall(page_table_register_sysctl); + #else /* CONFIG_PGSTE */ static inline int page_table_with_pgste(struct page *page) @@ -963,7 +998,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) struct page *uninitialized_var(page); unsigned int mask, bit; - if (mm_has_pgste(mm)) + if (mm_alloc_pgste(mm)) return page_table_alloc_pgste(mm); /* Allocate fragments of a 4K page as 1K/2K page table */ spin_lock_bh(&mm->context.list_lock); @@ -1165,116 +1200,25 @@ static inline void thp_split_mm(struct mm_struct *mm) } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb, - struct mm_struct *mm, pud_t *pud, - unsigned long addr, unsigned long end) -{ - unsigned long next, *table, *new; - struct page *page; - spinlock_t *ptl; - pmd_t *pmd; - - pmd = pmd_offset(pud, addr); - do { - next = pmd_addr_end(addr, end); -again: - if (pmd_none_or_clear_bad(pmd)) - continue; - table = (unsigned long *) pmd_deref(*pmd); - page = pfn_to_page(__pa(table) >> PAGE_SHIFT); - if (page_table_with_pgste(page)) - continue; - /* Allocate new page table with pgstes */ - new = page_table_alloc_pgste(mm); - if (!new) - return -ENOMEM; - - ptl = pmd_lock(mm, pmd); - if (likely((unsigned long *) pmd_deref(*pmd) == table)) { - /* Nuke pmd entry pointing to the "short" page table */ - pmdp_flush_lazy(mm, addr, pmd); - pmd_clear(pmd); - /* Copy ptes from old table to new table */ - memcpy(new, table, PAGE_SIZE/2); - clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); - /* Establish new table */ - pmd_populate(mm, pmd, (pte_t *) new); - /* Free old table with rcu, there might be a walker! */ - page_table_free_rcu(tlb, table, addr); - new = NULL; - } - spin_unlock(ptl); - if (new) { - page_table_free_pgste(new); - goto again; - } - } while (pmd++, addr = next, addr != end); - - return addr; -} - -static unsigned long page_table_realloc_pud(struct mmu_gather *tlb, - struct mm_struct *mm, pgd_t *pgd, - unsigned long addr, unsigned long end) -{ - unsigned long next; - pud_t *pud; - - pud = pud_offset(pgd, addr); - do { - next = pud_addr_end(addr, end); - if (pud_none_or_clear_bad(pud)) - continue; - next = page_table_realloc_pmd(tlb, mm, pud, addr, next); - if (unlikely(IS_ERR_VALUE(next))) - return next; - } while (pud++, addr = next, addr != end); - - return addr; -} - -static unsigned long page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm, - unsigned long addr, unsigned long end) -{ - unsigned long next; - pgd_t *pgd; - - pgd = pgd_offset(mm, addr); - do { - next = pgd_addr_end(addr, end); - if (pgd_none_or_clear_bad(pgd)) - continue; - next = page_table_realloc_pud(tlb, mm, pgd, addr, next); - if (unlikely(IS_ERR_VALUE(next))) - return next; - } while (pgd++, addr = next, addr != end); - - return 0; -} - /* * switch on pgstes for its userspace process (for kvm) */ int s390_enable_sie(void) { - struct task_struct *tsk = current; - struct mm_struct *mm = tsk->mm; - struct mmu_gather tlb; + struct mm_struct *mm = current->mm; /* Do we have pgstes? if yes, we are done */ - if (mm_has_pgste(tsk->mm)) + if (mm_has_pgste(mm)) return 0; - + /* Fail if the page tables are 2K */ + if (!mm_alloc_pgste(mm)) + return -EINVAL; down_write(&mm->mmap_sem); + mm->context.has_pgste = 1; /* split thp mappings and disable thp for future mappings */ thp_split_mm(mm); - /* Reallocate the page tables with pgstes */ - tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE); - if (!page_table_realloc(&tlb, mm, 0, TASK_SIZE)) - mm->context.has_pgste = 1; - tlb_finish_mmu(&tlb, 0, TASK_SIZE); up_write(&mm->mmap_sem); - return mm->context.has_pgste ? 0 : -ENOMEM; + return 0; } EXPORT_SYMBOL_GPL(s390_enable_sie); diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 6873f00..d366675 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -774,7 +774,7 @@ static void __init zone_sizes_init(void) * though, there'll be no lowmem, so we just alloc_bootmem * the memmap. There will be no percpu memory either. */ - if (i != 0 && cpumask_test_cpu(i, &isolnodes)) { + if (i != 0 && node_isset(i, isolnodes)) { node_memmap_pfn[i] = alloc_bootmem_pfn(0, memmap_size, 0); BUG_ON(node_percpu[i] != 0); diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index ef17683..48304b8 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -1109,6 +1109,8 @@ struct boot_params *make_boot_params(struct efi_config *c) if (!cmdline_ptr) goto fail; hdr->cmd_line_ptr = (unsigned long)cmdline_ptr; + /* Fill in upper bits of command line address, NOP on 32 bit */ + boot_params->ext_cmd_line_ptr = (u64)(unsigned long)cmdline_ptr >> 32; hdr->ramdisk_image = 0; hdr->ramdisk_size = 0; diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h index e42f758..055ea99 100644 --- a/arch/x86/include/asm/hypervisor.h +++ b/arch/x86/include/asm/hypervisor.h @@ -50,7 +50,7 @@ extern const struct hypervisor_x86 *x86_hyper; /* Recognized hypervisors */ extern const struct hypervisor_x86 x86_hyper_vmware; extern const struct hypervisor_x86 x86_hyper_ms_hyperv; -extern const struct hypervisor_x86 x86_hyper_xen_hvm; +extern const struct hypervisor_x86 x86_hyper_xen; extern const struct hypervisor_x86 x86_hyper_kvm; extern void init_hypervisor(struct cpuinfo_x86 *c); diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h index 25b1cc0..d6b078e 100644 --- a/arch/x86/include/asm/pvclock.h +++ b/arch/x86/include/asm/pvclock.h @@ -95,7 +95,6 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, struct pvclock_vsyscall_time_info { struct pvclock_vcpu_time_info pvti; - u32 migrate_count; } __attribute__((__aligned__(SMP_CACHE_BYTES))); #define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info) diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index cf87de3..64b6117 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -169,7 +169,7 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock) struct __raw_tickets tmp = READ_ONCE(lock->tickets); tmp.head &= ~TICKET_SLOWPATH_FLAG; - return (tmp.tail - tmp.head) > TICKET_LOCK_INC; + return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC; } #define arch_spin_is_contended arch_spin_is_contended diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 358dcd3..c44a5d5 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -269,4 +269,9 @@ static inline bool xen_arch_need_swiotlb(struct device *dev, return false; } +static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order) +{ + return __get_free_pages(__GFP_NOWARN, order); +} + #endif /* _ASM_X86_XEN_PAGE_H */ diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index 36ce402..d820d8e 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c @@ -27,8 +27,8 @@ static const __initconst struct hypervisor_x86 * const hypervisors[] = { -#ifdef CONFIG_XEN_PVHVM - &x86_hyper_xen_hvm, +#ifdef CONFIG_XEN + &x86_hyper_xen, #endif &x86_hyper_vmware, &x86_hyper_ms_hyperv, diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 219d3fb..3998131 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1134,7 +1134,7 @@ static __initconst const u64 slm_hw_cache_extra_regs [ C(LL ) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS, - [ C(RESULT_MISS) ] = SLM_DMND_READ|SLM_LLC_MISS, + [ C(RESULT_MISS) ] = 0, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS, @@ -1184,8 +1184,7 @@ static __initconst const u64 slm_hw_cache_event_ids [ C(OP_READ) ] = { /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ [ C(RESULT_ACCESS) ] = 0x01b7, - /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ - [ C(RESULT_MISS) ] = 0x01b7, + [ C(RESULT_MISS) ] = 0, }, [ C(OP_WRITE) ] = { /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ @@ -1217,7 +1216,7 @@ static __initconst const u64 slm_hw_cache_event_ids [ C(ITLB) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ - [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */ + [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */ }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = -1, @@ -2533,34 +2532,6 @@ ssize_t intel_event_sysfs_show(char *page, u64 config) return x86_event_sysfs_show(page, config, event); } -static __initconst const struct x86_pmu core_pmu = { - .name = "core", - .handle_irq = x86_pmu_handle_irq, - .disable_all = x86_pmu_disable_all, - .enable_all = core_pmu_enable_all, - .enable = core_pmu_enable_event, - .disable = x86_pmu_disable_event, - .hw_config = x86_pmu_hw_config, - .schedule_events = x86_schedule_events, - .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, - .perfctr = MSR_ARCH_PERFMON_PERFCTR0, - .event_map = intel_pmu_event_map, - .max_events = ARRAY_SIZE(intel_perfmon_event_map), - .apic = 1, - /* - * Intel PMCs cannot be accessed sanely above 32 bit width, - * so we install an artificial 1<<31 period regardless of - * the generic event period: - */ - .max_period = (1ULL << 31) - 1, - .get_event_constraints = intel_get_event_constraints, - .put_event_constraints = intel_put_event_constraints, - .event_constraints = intel_core_event_constraints, - .guest_get_msrs = core_guest_get_msrs, - .format_attrs = intel_arch_formats_attr, - .events_sysfs_show = intel_event_sysfs_show, -}; - struct intel_shared_regs *allocate_shared_regs(int cpu) { struct intel_shared_regs *regs; @@ -2743,6 +2714,44 @@ static struct attribute *intel_arch3_formats_attr[] = { NULL, }; +static __initconst const struct x86_pmu core_pmu = { + .name = "core", + .handle_irq = x86_pmu_handle_irq, + .disable_all = x86_pmu_disable_all, + .enable_all = core_pmu_enable_all, + .enable = core_pmu_enable_event, + .disable = x86_pmu_disable_event, + .hw_config = x86_pmu_hw_config, + .schedule_events = x86_schedule_events, + .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, + .perfctr = MSR_ARCH_PERFMON_PERFCTR0, + .event_map = intel_pmu_event_map, + .max_events = ARRAY_SIZE(intel_perfmon_event_map), + .apic = 1, + /* + * Intel PMCs cannot be accessed sanely above 32-bit width, + * so we install an artificial 1<<31 period regardless of + * the generic event period: + */ + .max_period = (1ULL<<31) - 1, + .get_event_constraints = intel_get_event_constraints, + .put_event_constraints = intel_put_event_constraints, + .event_constraints = intel_core_event_constraints, + .guest_get_msrs = core_guest_get_msrs, + .format_attrs = intel_arch_formats_attr, + .events_sysfs_show = intel_event_sysfs_show, + + /* + * Virtual (or funny metal) CPU can define x86_pmu.extra_regs + * together with PMU version 1 and thus be using core_pmu with + * shared_regs. We need following callbacks here to allocate + * it properly. + */ + .cpu_prepare = intel_pmu_cpu_prepare, + .cpu_starting = intel_pmu_cpu_starting, + .cpu_dying = intel_pmu_cpu_dying, +}; + static __initconst const struct x86_pmu intel_pmu = { .name = "Intel", .handle_irq = intel_pmu_handle_irq, diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c index 999289b9..358c54a 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c @@ -722,6 +722,7 @@ static int __init rapl_pmu_init(void) break; case 60: /* Haswell */ case 69: /* Haswell-Celeron */ + case 61: /* Broadwell */ rapl_cntr_mask = RAPL_IDX_HSW; rapl_pmu_events_group.attrs = rapl_events_hsw_attr; break; diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c index 3001015..4562e9e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c @@ -1,6 +1,13 @@ /* Nehalem/SandBridge/Haswell uncore support */ #include "perf_event_intel_uncore.h" +/* Uncore IMC PCI IDs */ +#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100 +#define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154 +#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150 +#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 +#define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04 + /* SNB event control */ #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00 @@ -472,6 +479,10 @@ static const struct pci_device_id hsw_uncore_pci_ids[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC), .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, { /* end: all zeroes */ }, }; @@ -502,6 +513,7 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */ IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */ IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ + IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */ { /* end marker */ } }; diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 8213da6..6e338e3 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -57,7 +57,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, #endif }; -EXPORT_PER_CPU_SYMBOL_GPL(cpu_tss); +EXPORT_PER_CPU_SYMBOL(cpu_tss); #ifdef CONFIG_X86_64 static DEFINE_PER_CPU(unsigned char, is_idle); @@ -156,11 +156,13 @@ void flush_thread(void) /* FPU state will be reallocated lazily at the first use. */ drop_fpu(tsk); free_thread_xstate(tsk); - } else if (!used_math()) { - /* kthread execs. TODO: cleanup this horror. */ - if (WARN_ON(init_fpu(tsk))) - force_sig(SIGKILL, tsk); - user_fpu_begin(); + } else { + if (!tsk_used_math(tsk)) { + /* kthread execs. TODO: cleanup this horror. */ + if (WARN_ON(init_fpu(tsk))) + force_sig(SIGKILL, tsk); + user_fpu_begin(); + } restore_init_xstate(); } } diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index e5ecd20..2f355d2 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c @@ -141,46 +141,7 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock, set_normalized_timespec(ts, now.tv_sec, now.tv_nsec); } -static struct pvclock_vsyscall_time_info *pvclock_vdso_info; - -static struct pvclock_vsyscall_time_info * -pvclock_get_vsyscall_user_time_info(int cpu) -{ - if (!pvclock_vdso_info) { - BUG(); - return NULL; - } - - return &pvclock_vdso_info[cpu]; -} - -struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu) -{ - return &pvclock_get_vsyscall_user_time_info(cpu)->pvti; -} - #ifdef CONFIG_X86_64 -static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l, - void *v) -{ - struct task_migration_notifier *mn = v; - struct pvclock_vsyscall_time_info *pvti; - - pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu); - - /* this is NULL when pvclock vsyscall is not initialized */ - if (unlikely(pvti == NULL)) - return NOTIFY_DONE; - - pvti->migrate_count++; - - return NOTIFY_DONE; -} - -static struct notifier_block pvclock_migrate = { - .notifier_call = pvclock_task_migrate, -}; - /* * Initialize the generic pvclock vsyscall state. This will allocate * a/some page(s) for the per-vcpu pvclock information, set up a @@ -194,17 +155,12 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i, WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE); - pvclock_vdso_info = i; - for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) { __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx, __pa(i) + (idx*PAGE_SIZE), PAGE_KERNEL_VVAR); } - - register_task_migration_notifier(&pvclock_migrate); - return 0; } #endif diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ed31c31..c73efcd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1669,12 +1669,28 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) &guest_hv_clock, sizeof(guest_hv_clock)))) return 0; - /* - * The interface expects us to write an even number signaling that the - * update is finished. Since the guest won't see the intermediate - * state, we just increase by 2 at the end. + /* This VCPU is paused, but it's legal for a guest to read another + * VCPU's kvmclock, so we really have to follow the specification where + * it says that version is odd if data is being modified, and even after + * it is consistent. + * + * Version field updates must be kept separate. This is because + * kvm_write_guest_cached might use a "rep movs" instruction, and + * writes within a string instruction are weakly ordered. So there + * are three writes overall. + * + * As a small optimization, only write the version field in the first + * and third write. The vcpu->pv_time cache is still valid, because the + * version field is the first in the struct. */ - vcpu->hv_clock.version = guest_hv_clock.version + 2; + BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0); + + vcpu->hv_clock.version = guest_hv_clock.version + 1; + kvm_write_guest_cached(v->kvm, &vcpu->pv_time, + &vcpu->hv_clock, + sizeof(vcpu->hv_clock.version)); + + smp_wmb(); /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); @@ -1695,6 +1711,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) kvm_write_guest_cached(v->kvm, &vcpu->pv_time, &vcpu->hv_clock, sizeof(vcpu->hv_clock)); + + smp_wmb(); + + vcpu->hv_clock.version++; + kvm_write_guest_cached(v->kvm, &vcpu->pv_time, + &vcpu->hv_clock, + sizeof(vcpu->hv_clock.version)); return 0; } diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 5ead4d6..70e7444 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -351,18 +351,20 @@ int arch_ioremap_pmd_supported(void) */ void *xlate_dev_mem_ptr(phys_addr_t phys) { - void *addr; - unsigned long start = phys & PAGE_MASK; + unsigned long start = phys & PAGE_MASK; + unsigned long offset = phys & ~PAGE_MASK; + unsigned long vaddr; /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */ if (page_is_ram(start >> PAGE_SHIFT)) return __va(phys); - addr = (void __force *)ioremap_cache(start, PAGE_SIZE); - if (addr) - addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); + vaddr = (unsigned long)ioremap_cache(start, PAGE_SIZE); + /* Only add the offset on success and return NULL if the ioremap() failed: */ + if (vaddr) + vaddr += offset; - return addr; + return (void *)vaddr; } void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 9875143..99f7610 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -559,6 +559,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, if (is_ereg(dst_reg)) EMIT1(0x41); EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); + + /* emit 'movzwl eax, ax' */ + if (is_ereg(dst_reg)) + EMIT3(0x45, 0x0F, 0xB7); + else + EMIT2(0x0F, 0xB7); + EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); break; case 32: /* emit 'bswap eax' to swap lower 4 bytes */ @@ -577,6 +584,27 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, break; case BPF_ALU | BPF_END | BPF_FROM_LE: + switch (imm32) { + case 16: + /* emit 'movzwl eax, ax' to zero extend 16-bit + * into 64 bit + */ + if (is_ereg(dst_reg)) + EMIT3(0x45, 0x0F, 0xB7); + else + EMIT2(0x0F, 0xB7); + EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); + break; + case 32: + /* emit 'mov eax, eax' to clear upper 32-bits */ + if (is_ereg(dst_reg)) + EMIT1(0x45); + EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg)); + break; + case 64: + /* nop */ + break; + } break; /* ST: *(u8*)(dst_reg + off) = imm */ diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index e469598..d939633 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -325,6 +325,26 @@ static void release_pci_root_info(struct pci_host_bridge *bridge) kfree(info); } +/* + * An IO port or MMIO resource assigned to a PCI host bridge may be + * consumed by the host bridge itself or available to its child + * bus/devices. The ACPI specification defines a bit (Producer/Consumer) + * to tell whether the resource is consumed by the host bridge itself, + * but firmware hasn't used that bit consistently, so we can't rely on it. + * + * On x86 and IA64 platforms, all IO port and MMIO resources are assumed + * to be available to child bus/devices except one special case: + * IO port [0xCF8-0xCFF] is consumed by the host bridge itself + * to access PCI configuration space. + * + * So explicitly filter out PCI CFG IO ports[0xCF8-0xCFF]. + */ +static bool resource_is_pcicfg_ioport(struct resource *res) +{ + return (res->flags & IORESOURCE_IO) && + res->start == 0xCF8 && res->end == 0xCFF; +} + static void probe_pci_root_info(struct pci_root_info *info, struct acpi_device *device, int busnum, int domain, @@ -346,8 +366,8 @@ static void probe_pci_root_info(struct pci_root_info *info, "no IO and memory resources present in _CRS\n"); else resource_list_for_each_entry_safe(entry, tmp, list) { - if ((entry->res->flags & IORESOURCE_WINDOW) == 0 || - (entry->res->flags & IORESOURCE_DISABLED)) + if ((entry->res->flags & IORESOURCE_DISABLED) || + resource_is_pcicfg_ioport(entry->res)) resource_list_destroy_entry(entry); else entry->res->name = info->name; diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile index 275a3a8..e970320 100644 --- a/arch/x86/vdso/Makefile +++ b/arch/x86/vdso/Makefile @@ -51,7 +51,7 @@ VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \ $(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE $(call if_changed,vdso) -HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi +HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi -I$(srctree)/arch/x86/include/uapi hostprogs-y += vdso2c quiet_cmd_vdso2c = VDSO2C $@ diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 40d2473..9793322 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c @@ -82,15 +82,18 @@ static notrace cycle_t vread_pvclock(int *mode) cycle_t ret; u64 last; u32 version; - u32 migrate_count; u8 flags; unsigned cpu, cpu1; /* - * When looping to get a consistent (time-info, tsc) pair, we - * also need to deal with the possibility we can switch vcpus, - * so make sure we always re-fetch time-info for the current vcpu. + * Note: hypervisor must guarantee that: + * 1. cpu ID number maps 1:1 to per-CPU pvclock time info. + * 2. that per-CPU pvclock time info is updated if the + * underlying CPU changes. + * 3. that version is increased whenever underlying CPU + * changes. + * */ do { cpu = __getcpu() & VGETCPU_CPU_MASK; @@ -99,27 +102,20 @@ static notrace cycle_t vread_pvclock(int *mode) * __getcpu() calls (Gleb). */ - /* Make sure migrate_count will change if we leave the VCPU. */ - do { - pvti = get_pvti(cpu); - migrate_count = pvti->migrate_count; - - cpu1 = cpu; - cpu = __getcpu() & VGETCPU_CPU_MASK; - } while (unlikely(cpu != cpu1)); + pvti = get_pvti(cpu); version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags); /* * Test we're still on the cpu as well as the version. - * - We must read TSC of pvti's VCPU. - * - KVM doesn't follow the versioning protocol, so data could - * change before version if we left the VCPU. + * We could have been migrated just after the first + * vgetcpu but before fetching the version, so we + * wouldn't notice a version change. */ - smp_rmb(); - } while (unlikely((pvti->pvti.version & 1) || - pvti->pvti.version != version || - pvti->migrate_count != migrate_count)); + cpu1 = __getcpu() & VGETCPU_CPU_MASK; + } while (unlikely(cpu != cpu1 || + (pvti->pvti.version & 1) || + pvti->pvti.version != version)); if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT))) *mode = VCLOCK_NONE; diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 94578ef..46957ea 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1760,6 +1760,9 @@ static struct notifier_block xen_hvm_cpu_notifier = { static void __init xen_hvm_guest_init(void) { + if (xen_pv_domain()) + return; + init_hvm_pv_info(); xen_hvm_init_shared_info(); @@ -1775,6 +1778,7 @@ static void __init xen_hvm_guest_init(void) xen_hvm_init_time_ops(); xen_hvm_init_mmu_ops(); } +#endif static bool xen_nopv = false; static __init int xen_parse_nopv(char *arg) @@ -1784,14 +1788,11 @@ static __init int xen_parse_nopv(char *arg) } early_param("xen_nopv", xen_parse_nopv); -static uint32_t __init xen_hvm_platform(void) +static uint32_t __init xen_platform(void) { if (xen_nopv) return 0; - if (xen_pv_domain()) - return 0; - return xen_cpuid_base(); } @@ -1809,11 +1810,19 @@ bool xen_hvm_need_lapic(void) } EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); -const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = { - .name = "Xen HVM", - .detect = xen_hvm_platform, +static void xen_set_cpu_features(struct cpuinfo_x86 *c) +{ + if (xen_pv_domain()) + clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); +} + +const struct hypervisor_x86 x86_hyper_xen = { + .name = "Xen", + .detect = xen_platform, +#ifdef CONFIG_XEN_PVHVM .init_platform = xen_hvm_guest_init, +#endif .x2apic_available = xen_x2apic_para_available, + .set_cpu_features = xen_set_cpu_features, }; -EXPORT_SYMBOL(x86_hyper_xen_hvm); -#endif +EXPORT_SYMBOL(x86_hyper_xen); diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index d949769..53b4c08 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c @@ -88,7 +88,17 @@ static void xen_vcpu_notify_restore(void *data) tick_resume_local(); } +static void xen_vcpu_notify_suspend(void *data) +{ + tick_suspend_local(); +} + void xen_arch_resume(void) { on_each_cpu(xen_vcpu_notify_restore, NULL, 1); } + +void xen_arch_suspend(void) +{ + on_each_cpu(xen_vcpu_notify_suspend, NULL, 1); +} diff --git a/block/blk-core.c b/block/blk-core.c index fd154b9..7871603 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -552,6 +552,8 @@ void blk_cleanup_queue(struct request_queue *q) q->queue_lock = &q->__queue_lock; spin_unlock_irq(lock); + bdi_destroy(&q->backing_dev_info); + /* @q is and will stay empty, shutdown and put */ blk_put_queue(q); } diff --git a/block/blk-mq.c b/block/blk-mq.c index ade8a2d..e68b71b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -677,8 +677,11 @@ static void blk_mq_rq_timer(unsigned long priv) data.next = blk_rq_timeout(round_jiffies_up(data.next)); mod_timer(&q->timeout, data.next); } else { - queue_for_each_hw_ctx(q, hctx, i) - blk_mq_tag_idle(hctx); + queue_for_each_hw_ctx(q, hctx, i) { + /* the hctx may be unmapped, so check it here */ + if (blk_mq_hw_queue_mapped(hctx)) + blk_mq_tag_idle(hctx); + } } } @@ -855,6 +858,16 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) spin_lock(&hctx->lock); list_splice(&rq_list, &hctx->dispatch); spin_unlock(&hctx->lock); + /* + * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but + * it's possible the queue is stopped and restarted again + * before this. Queue restart will dispatch requests. And since + * requests in rq_list aren't added into hctx->dispatch yet, + * the requests in rq_list might get lost. + * + * blk_mq_run_hw_queue() already checks the STOPPED bit + **/ + blk_mq_run_hw_queue(hctx, true); } } @@ -1571,22 +1584,6 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) return NOTIFY_OK; } -static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu) -{ - struct request_queue *q = hctx->queue; - struct blk_mq_tag_set *set = q->tag_set; - - if (set->tags[hctx->queue_num]) - return NOTIFY_OK; - - set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num); - if (!set->tags[hctx->queue_num]) - return NOTIFY_STOP; - - hctx->tags = set->tags[hctx->queue_num]; - return NOTIFY_OK; -} - static int blk_mq_hctx_notify(void *data, unsigned long action, unsigned int cpu) { @@ -1594,8 +1591,11 @@ static int blk_mq_hctx_notify(void *data, unsigned long action, if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) return blk_mq_hctx_cpu_offline(hctx, cpu); - else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) - return blk_mq_hctx_cpu_online(hctx, cpu); + + /* + * In case of CPU online, tags may be reallocated + * in blk_mq_map_swqueue() after mapping is updated. + */ return NOTIFY_OK; } @@ -1775,6 +1775,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) unsigned int i; struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx; + struct blk_mq_tag_set *set = q->tag_set; queue_for_each_hw_ctx(q, hctx, i) { cpumask_clear(hctx->cpumask); @@ -1803,16 +1804,20 @@ static void blk_mq_map_swqueue(struct request_queue *q) * disable it and free the request entries. */ if (!hctx->nr_ctx) { - struct blk_mq_tag_set *set = q->tag_set; - if (set->tags[i]) { blk_mq_free_rq_map(set, set->tags[i], i); set->tags[i] = NULL; - hctx->tags = NULL; } + hctx->tags = NULL; continue; } + /* unmapped hw queue can be remapped after CPU topo changed */ + if (!set->tags[i]) + set->tags[i] = blk_mq_init_rq_map(set, i); + hctx->tags = set->tags[i]; + WARN_ON(!hctx->tags); + /* * Set the map size to the number of mapped software queues. * This is more accurate and more efficient than looping @@ -2090,9 +2095,16 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, */ list_for_each_entry(q, &all_q_list, all_q_node) blk_mq_freeze_queue_start(q); - list_for_each_entry(q, &all_q_list, all_q_node) + list_for_each_entry(q, &all_q_list, all_q_node) { blk_mq_freeze_queue_wait(q); + /* + * timeout handler can't touch hw queue during the + * reinitialization + */ + del_timer_sync(&q->timeout); + } + list_for_each_entry(q, &all_q_list, all_q_node) blk_mq_queue_reinit(q); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index faaf36a..2b8fd30 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -522,8 +522,6 @@ static void blk_release_queue(struct kobject *kobj) blk_trace_shutdown(q); - bdi_destroy(&q->backing_dev_info); - ida_simple_remove(&blk_queue_ida, q->id); call_rcu(&q->rcu_head, blk_free_queue_rcu); } diff --git a/block/bounce.c b/block/bounce.c index ab21ba2..ed9dd80 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -221,8 +221,8 @@ bounce: if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force) continue; - inc_zone_page_state(to->bv_page, NR_BOUNCE); to->bv_page = mempool_alloc(pool, q->bounce_gfp); + inc_zone_page_state(to->bv_page, NR_BOUNCE); if (rw == WRITE) { char *vto, *vfrom; diff --git a/block/elevator.c b/block/elevator.c index 59794d0..8985038 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -157,7 +157,7 @@ struct elevator_queue *elevator_alloc(struct request_queue *q, eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); if (unlikely(!eq)) - goto err; + return NULL; eq->type = e; kobject_init(&eq->kobj, &elv_ktype); @@ -165,10 +165,6 @@ struct elevator_queue *elevator_alloc(struct request_queue *q, hash_init(eq->hash); return eq; -err: - kfree(eq); - elevator_put(e); - return NULL; } EXPORT_SYMBOL(elevator_alloc); diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c index b193f84..ff6d8ad 100644 --- a/drivers/acpi/acpi_pnp.c +++ b/drivers/acpi/acpi_pnp.c @@ -304,6 +304,8 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = { {"PNPb006"}, /* cs423x-pnpbios */ {"CSC0100"}, + {"CSC0103"}, + {"CSC0110"}, {"CSC0000"}, {"GIM0100"}, /* Guillemot Turtlebeach something appears to be cs4232 compatible */ /* es18xx-pnpbios */ diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c index a72685c..5e8df91 100644 --- a/drivers/acpi/acpica/utglobal.c +++ b/drivers/acpi/acpica/utglobal.c @@ -102,19 +102,12 @@ const struct acpi_predefined_names acpi_gbl_pre_defined_names[] = { {"_SB_", ACPI_TYPE_DEVICE, NULL}, {"_SI_", ACPI_TYPE_LOCAL_SCOPE, NULL}, {"_TZ_", ACPI_TYPE_DEVICE, NULL}, - /* - * March, 2015: - * The _REV object is in the process of being deprecated, because - * other ACPI implementations permanently return 2. Thus, it - * has little or no value. Return 2 for compatibility with - * other ACPI implementations. - */ - {"_REV", ACPI_TYPE_INTEGER, ACPI_CAST_PTR(char, 2)}, + {"_REV", ACPI_TYPE_INTEGER, (char *)ACPI_CA_SUPPORT_LEVEL}, {"_OS_", ACPI_TYPE_STRING, ACPI_OS_NAME}, - {"_GL_", ACPI_TYPE_MUTEX, ACPI_CAST_PTR(char, 1)}, + {"_GL_", ACPI_TYPE_MUTEX, (char *)1}, #if !defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY) - {"_OSI", ACPI_TYPE_METHOD, ACPI_CAST_PTR(char, 1)}, + {"_OSI", ACPI_TYPE_METHOD, (char *)1}, #endif /* Table terminator */ diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 39748bb..7ccba39 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -182,7 +182,7 @@ static void __init acpi_request_region (struct acpi_generic_address *gas, request_mem_region(addr, length, desc); } -static int __init acpi_reserve_resources(void) +static void __init acpi_reserve_resources(void) { acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, "ACPI PM1a_EVT_BLK"); @@ -211,10 +211,7 @@ static int __init acpi_reserve_resources(void) if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) acpi_request_region(&acpi_gbl_FADT.xgpe1_block, acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); - - return 0; } -device_initcall(acpi_reserve_resources); void acpi_os_printf(const char *fmt, ...) { @@ -1845,6 +1842,7 @@ acpi_status __init acpi_os_initialize(void) acpi_status __init acpi_os_initialize1(void) { + acpi_reserve_resources(); kacpid_wq = alloc_workqueue("kacpid", 0, 1); kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0); diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 5589a6e..8244f01 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -573,7 +573,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_get_resources); * @ares: Input ACPI resource object. * @types: Valid resource types of IORESOURCE_XXX * - * This is a hepler function to support acpi_dev_get_resources(), which filters + * This is a helper function to support acpi_dev_get_resources(), which filters * ACPI resource objects according to resource types. */ int acpi_dev_filter_resource_type(struct acpi_resource *ares, diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index cd82762..01504c8 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c @@ -684,7 +684,7 @@ static int acpi_sbs_add(struct acpi_device *device) if (!sbs_manager_broken) { result = acpi_manager_get_info(sbs); if (!result) { - sbs->manager_present = 0; + sbs->manager_present = 1; for (id = 0; id < MAX_SBS_BAT; ++id) if ((sbs->batteries_supported & (1 << id))) acpi_battery_add(sbs, id); diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c index 26e5b50..bf034f8 100644 --- a/drivers/acpi/sbshc.c +++ b/drivers/acpi/sbshc.c @@ -14,6 +14,7 @@ #include <linux/delay.h> #include <linux/module.h> #include <linux/interrupt.h> +#include <linux/dmi.h> #include "sbshc.h" #define PREFIX "ACPI: " @@ -87,6 +88,8 @@ enum acpi_smb_offset { ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */ }; +static bool macbook; + static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data) { return ec_read(hc->offset + address, data); @@ -132,6 +135,8 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, } mutex_lock(&hc->lock); + if (macbook) + udelay(5); if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp)) goto end; if (temp) { @@ -257,12 +262,29 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, acpi_handle handle, acpi_ec_query_func func, void *data); +static int macbook_dmi_match(const struct dmi_system_id *d) +{ + pr_debug("Detected MacBook, enabling workaround\n"); + macbook = true; + return 0; +} + +static struct dmi_system_id acpi_smbus_dmi_table[] = { + { macbook_dmi_match, "Apple MacBook", { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") }, + }, + { }, +}; + static int acpi_smbus_hc_add(struct acpi_device *device) { int status; unsigned long long val; struct acpi_smb_hc *hc; + dmi_check_system(acpi_smbus_dmi_table); + if (!device) return -EINVAL; diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index d24fa19..6d4e44e 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -800,7 +800,8 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal, result = thermal_zone_bind_cooling_device (thermal, trip, cdev, - THERMAL_NO_LIMIT, THERMAL_NO_LIMIT); + THERMAL_NO_LIMIT, THERMAL_NO_LIMIT, + THERMAL_WEIGHT_DEFAULT); else result = thermal_zone_unbind_cooling_device @@ -824,7 +825,8 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal, if (bind) result = thermal_zone_bind_cooling_device (thermal, trip, cdev, - THERMAL_NO_LIMIT, THERMAL_NO_LIMIT); + THERMAL_NO_LIMIT, THERMAL_NO_LIMIT, + THERMAL_WEIGHT_DEFAULT); else result = thermal_zone_unbind_cooling_device (thermal, trip, cdev); @@ -841,7 +843,8 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal, result = thermal_zone_bind_cooling_device (thermal, THERMAL_TRIPS_NONE, cdev, THERMAL_NO_LIMIT, - THERMAL_NO_LIMIT); + THERMAL_NO_LIMIT, + THERMAL_WEIGHT_DEFAULT); else result = thermal_zone_unbind_cooling_device (thermal, THERMAL_TRIPS_NONE, diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 5f60155..9dca4b9 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -270,6 +270,7 @@ config ATA_PIIX config SATA_DWC tristate "DesignWare Cores SATA support" depends on 460EX + select DW_DMAC help This option enables support for the on-chip SATA controller of the AppliedMicro processor 460EX. @@ -729,15 +730,6 @@ config PATA_SC1200 If unsure, say N. -config PATA_SCC - tristate "Toshiba's Cell Reference Set IDE support" - depends on PCI && PPC_CELLEB - help - This option enables support for the built-in IDE controller on - Toshiba Cell Reference Board. - - If unsure, say N. - config PATA_SCH tristate "Intel SCH PATA support" depends on PCI diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index b67e995..40f7865 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -75,7 +75,6 @@ obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o obj-$(CONFIG_PATA_RDC) += pata_rdc.o obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o -obj-$(CONFIG_PATA_SCC) += pata_scc.o obj-$(CONFIG_PATA_SCH) += pata_sch.o obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o obj-$(CONFIG_PATA_SIL680) += pata_sil680.o diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index c7a92a7..65ee944 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -66,6 +66,7 @@ enum board_ids { board_ahci_yes_fbs, /* board IDs for specific chipsets in alphabetical order */ + board_ahci_avn, board_ahci_mcp65, board_ahci_mcp77, board_ahci_mcp89, @@ -84,6 +85,8 @@ enum board_ids { static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); +static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); static void ahci_mcp89_apple_enable(struct pci_dev *pdev); static bool is_mcp89_apple(struct pci_dev *pdev); static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, @@ -107,6 +110,11 @@ static struct ata_port_operations ahci_p5wdh_ops = { .hardreset = ahci_p5wdh_hardreset, }; +static struct ata_port_operations ahci_avn_ops = { + .inherits = &ahci_ops, + .hardreset = ahci_avn_hardreset, +}; + static const struct ata_port_info ahci_port_info[] = { /* by features */ [board_ahci] = { @@ -151,6 +159,12 @@ static const struct ata_port_info ahci_port_info[] = { .port_ops = &ahci_ops, }, /* by chipsets */ + [board_ahci_avn] = { + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_avn_ops, + }, [board_ahci_mcp65] = { AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ), @@ -290,14 +304,14 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */ { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */ { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */ - { PCI_VDEVICE(INTEL, 0x1f32), board_ahci }, /* Avoton AHCI */ - { PCI_VDEVICE(INTEL, 0x1f33), board_ahci }, /* Avoton AHCI */ - { PCI_VDEVICE(INTEL, 0x1f34), board_ahci }, /* Avoton RAID */ - { PCI_VDEVICE(INTEL, 0x1f35), board_ahci }, /* Avoton RAID */ - { PCI_VDEVICE(INTEL, 0x1f36), board_ahci }, /* Avoton RAID */ - { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */ - { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */ - { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */ + { PCI_VDEVICE(INTEL, 0x1f32), board_ahci_avn }, /* Avoton AHCI */ + { PCI_VDEVICE(INTEL, 0x1f33), board_ahci_avn }, /* Avoton AHCI */ + { PCI_VDEVICE(INTEL, 0x1f34), board_ahci_avn }, /* Avoton RAID */ + { PCI_VDEVICE(INTEL, 0x1f35), board_ahci_avn }, /* Avoton RAID */ + { PCI_VDEVICE(INTEL, 0x1f36), board_ahci_avn }, /* Avoton RAID */ + { PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */ + { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */ + { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */ { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */ { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */ { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */ @@ -670,6 +684,79 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, return rc; } +/* + * ahci_avn_hardreset - attempt more aggressive recovery of Avoton ports. + * + * It has been observed with some SSDs that the timing of events in the + * link synchronization phase can leave the port in a state that can not + * be recovered by a SATA-hard-reset alone. The failing signature is + * SStatus.DET stuck at 1 ("Device presence detected but Phy + * communication not established"). It was found that unloading and + * reloading the driver when this problem occurs allows the drive + * connection to be recovered (DET advanced to 0x3). The critical + * component of reloading the driver is that the port state machines are + * reset by bouncing "port enable" in the AHCI PCS configuration + * register. So, reproduce that effect by bouncing a port whenever we + * see DET==1 after a reset. + */ +static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); + struct ata_port *ap = link->ap; + struct ahci_port_priv *pp = ap->private_data; + struct ahci_host_priv *hpriv = ap->host->private_data; + u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; + unsigned long tmo = deadline - jiffies; + struct ata_taskfile tf; + bool online; + int rc, i; + + DPRINTK("ENTER\n"); + + ahci_stop_engine(ap); + + for (i = 0; i < 2; i++) { + u16 val; + u32 sstatus; + int port = ap->port_no; + struct ata_host *host = ap->host; + struct pci_dev *pdev = to_pci_dev(host->dev); + + /* clear D2H reception area to properly wait for D2H FIS */ + ata_tf_init(link->device, &tf); + tf.command = ATA_BUSY; + ata_tf_to_fis(&tf, 0, 0, d2h_fis); + + rc = sata_link_hardreset(link, timing, deadline, &online, + ahci_check_ready); + + if (sata_scr_read(link, SCR_STATUS, &sstatus) != 0 || + (sstatus & 0xf) != 1) + break; + + ata_link_printk(link, KERN_INFO, "avn bounce port%d\n", + port); + + pci_read_config_word(pdev, 0x92, &val); + val &= ~(1 << port); + pci_write_config_word(pdev, 0x92, val); + ata_msleep(ap, 1000); + val |= 1 << port; + pci_write_config_word(pdev, 0x92, val); + deadline += tmo; + } + + hpriv->start_engine(ap); + + if (online) + *class = ahci_dev_classify(ap); + + DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); + return rc; +} + + #ifdef CONFIG_PM static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) { diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c index ea0ff00..8ff428f 100644 --- a/drivers/ata/ahci_st.c +++ b/drivers/ata/ahci_st.c @@ -37,7 +37,6 @@ struct st_ahci_drv_data { struct reset_control *pwr; struct reset_control *sw_rst; struct reset_control *pwr_rst; - struct ahci_host_priv *hpriv; }; static void st_ahci_configure_oob(void __iomem *mmio) @@ -55,9 +54,10 @@ static void st_ahci_configure_oob(void __iomem *mmio) writel(new_val, mmio + ST_AHCI_OOBR); } -static int st_ahci_deassert_resets(struct device *dev) +static int st_ahci_deassert_resets(struct ahci_host_priv *hpriv, + struct device *dev) { - struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev); + struct st_ahci_drv_data *drv_data = hpriv->plat_data; int err; if (drv_data->pwr) { @@ -90,8 +90,8 @@ static int st_ahci_deassert_resets(struct device *dev) static void st_ahci_host_stop(struct ata_host *host) { struct ahci_host_priv *hpriv = host->private_data; + struct st_ahci_drv_data *drv_data = hpriv->plat_data; struct device *dev = host->dev; - struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev); int err; if (drv_data->pwr) { @@ -103,29 +103,30 @@ static void st_ahci_host_stop(struct ata_host *host) ahci_platform_disable_resources(hpriv); } -static int st_ahci_probe_resets(struct platform_device *pdev) +static int st_ahci_probe_resets(struct ahci_host_priv *hpriv, + struct device *dev) { - struct st_ahci_drv_data *drv_data = platform_get_drvdata(pdev); + struct st_ahci_drv_data *drv_data = hpriv->plat_data; - drv_data->pwr = devm_reset_control_get(&pdev->dev, "pwr-dwn"); + drv_data->pwr = devm_reset_control_get(dev, "pwr-dwn"); if (IS_ERR(drv_data->pwr)) { - dev_info(&pdev->dev, "power reset control not defined\n"); + dev_info(dev, "power reset control not defined\n"); drv_data->pwr = NULL; } - drv_data->sw_rst = devm_reset_control_get(&pdev->dev, "sw-rst"); + drv_data->sw_rst = devm_reset_control_get(dev, "sw-rst"); if (IS_ERR(drv_data->sw_rst)) { - dev_info(&pdev->dev, "soft reset control not defined\n"); + dev_info(dev, "soft reset control not defined\n"); drv_data->sw_rst = NULL; } - drv_data->pwr_rst = devm_reset_control_get(&pdev->dev, "pwr-rst"); + drv_data->pwr_rst = devm_reset_control_get(dev, "pwr-rst"); if (IS_ERR(drv_data->pwr_rst)) { - dev_dbg(&pdev->dev, "power soft reset control not defined\n"); + dev_dbg(dev, "power soft reset control not defined\n"); drv_data->pwr_rst = NULL; } - return st_ahci_deassert_resets(&pdev->dev); + return st_ahci_deassert_resets(hpriv, dev); } static struct ata_port_operations st_ahci_port_ops = { @@ -154,15 +155,12 @@ static int st_ahci_probe(struct platform_device *pdev) if (!drv_data) return -ENOMEM; - platform_set_drvdata(pdev, drv_data); - hpriv = ahci_platform_get_resources(pdev); if (IS_ERR(hpriv)) return PTR_ERR(hpriv); + hpriv->plat_data = drv_data; - drv_data->hpriv = hpriv; - - err = st_ahci_probe_resets(pdev); + err = st_ahci_probe_resets(hpriv, &pdev->dev); if (err) return err; @@ -170,7 +168,7 @@ static int st_ahci_probe(struct platform_device *pdev) if (err) return err; - st_ahci_configure_oob(drv_data->hpriv->mmio); + st_ahci_configure_oob(hpriv->mmio); err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info, &ahci_platform_sht); @@ -185,8 +183,9 @@ static int st_ahci_probe(struct platform_device *pdev) #ifdef CONFIG_PM_SLEEP static int st_ahci_suspend(struct device *dev) { - struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev); - struct ahci_host_priv *hpriv = drv_data->hpriv; + struct ata_host *host = dev_get_drvdata(dev); + struct ahci_host_priv *hpriv = host->private_data; + struct st_ahci_drv_data *drv_data = hpriv->plat_data; int err; err = ahci_platform_suspend_host(dev); @@ -208,21 +207,21 @@ static int st_ahci_suspend(struct device *dev) static int st_ahci_resume(struct device *dev) { - struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev); - struct ahci_host_priv *hpriv = drv_data->hpriv; + struct ata_host *host = dev_get_drvdata(dev); + struct ahci_host_priv *hpriv = host->private_data; int err; err = ahci_platform_enable_resources(hpriv); if (err) return err; - err = st_ahci_deassert_resets(dev); + err = st_ahci_deassert_resets(hpriv, dev); if (err) { ahci_platform_disable_resources(hpriv); return err; } - st_ahci_configure_oob(drv_data->hpriv->mmio); + st_ahci_configure_oob(hpriv->mmio); return ahci_platform_resume_host(dev); } diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 61a9c07..287c4ba 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -1707,8 +1707,7 @@ static void ahci_handle_port_interrupt(struct ata_port *ap, if (unlikely(resetting)) status &= ~PORT_IRQ_BAD_PMP; - /* if LPM is enabled, PHYRDY doesn't mean anything */ - if (ap->link.lpm_policy > ATA_LPM_MAX_POWER) { + if (sata_lpm_ignore_phy_events(&ap->link)) { status &= ~PORT_IRQ_PHYRDY; ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG); } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index f6cb1f1..577849c 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4235,7 +4235,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, - { "Samsung SSD 850 PRO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, /* @@ -6752,6 +6752,38 @@ u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val, return tmp; } +/** + * sata_lpm_ignore_phy_events - test if PHY event should be ignored + * @link: Link receiving the event + * + * Test whether the received PHY event has to be ignored or not. + * + * LOCKING: + * None: + * + * RETURNS: + * True if the event has to be ignored. + */ +bool sata_lpm_ignore_phy_events(struct ata_link *link) +{ + unsigned long lpm_timeout = link->last_lpm_change + + msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY); + + /* if LPM is enabled, PHYRDY doesn't mean anything */ + if (link->lpm_policy > ATA_LPM_MAX_POWER) + return true; + + /* ignore the first PHY event after the LPM policy changed + * as it is might be spurious + */ + if ((link->flags & ATA_LFLAG_CHANGED) && + time_before(jiffies, lpm_timeout)) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events); + /* * Dummy port_ops */ diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 07f41be..cf0022e 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -3597,6 +3597,9 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, } } + link->last_lpm_change = jiffies; + link->flags |= ATA_LFLAG_CHANGED; + return 0; fail: diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c deleted file mode 100644 index 5cd60d6..0000000 --- a/drivers/ata/pata_scc.c +++ /dev/null @@ -1,1110 +0,0 @@ -/* - * Support for IDE interfaces on Celleb platform - * - * (C) Copyright 2006 TOSHIBA CORPORATION - * - * This code is based on drivers/ata/ata_piix.c: - * Copyright 2003-2005 Red Hat Inc - * Copyright 2003-2005 Jeff Garzik - * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer - * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> - * Copyright (C) 2003 Red Hat Inc - * - * and drivers/ata/ahci.c: - * Copyright 2004-2005 Red Hat, Inc. - * - * and drivers/ata/libata-core.c: - * Copyright 2003-2004 Red Hat, Inc. All rights reserved. - * Copyright 2003-2004 Jeff Garzik - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/blkdev.h> -#include <linux/delay.h> -#include <linux/device.h> -#include <scsi/scsi_host.h> -#include <linux/libata.h> - -#define DRV_NAME "pata_scc" -#define DRV_VERSION "0.3" - -#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4 - -/* PCI BARs */ -#define SCC_CTRL_BAR 0 -#define SCC_BMID_BAR 1 - -/* offset of CTRL registers */ -#define SCC_CTL_PIOSHT 0x000 -#define SCC_CTL_PIOCT 0x004 -#define SCC_CTL_MDMACT 0x008 -#define SCC_CTL_MCRCST 0x00C -#define SCC_CTL_SDMACT 0x010 -#define SCC_CTL_SCRCST 0x014 -#define SCC_CTL_UDENVT 0x018 -#define SCC_CTL_TDVHSEL 0x020 -#define SCC_CTL_MODEREG 0x024 -#define SCC_CTL_ECMODE 0xF00 -#define SCC_CTL_MAEA0 0xF50 -#define SCC_CTL_MAEC0 0xF54 -#define SCC_CTL_CCKCTRL 0xFF0 - -/* offset of BMID registers */ -#define SCC_DMA_CMD 0x000 -#define SCC_DMA_STATUS 0x004 -#define SCC_DMA_TABLE_OFS 0x008 -#define SCC_DMA_INTMASK 0x010 -#define SCC_DMA_INTST 0x014 -#define SCC_DMA_PTERADD 0x018 -#define SCC_REG_CMD_ADDR 0x020 -#define SCC_REG_DATA 0x000 -#define SCC_REG_ERR 0x004 -#define SCC_REG_FEATURE 0x004 -#define SCC_REG_NSECT 0x008 -#define SCC_REG_LBAL 0x00C -#define SCC_REG_LBAM 0x010 -#define SCC_REG_LBAH 0x014 -#define SCC_REG_DEVICE 0x018 -#define SCC_REG_STATUS 0x01C -#define SCC_REG_CMD 0x01C -#define SCC_REG_ALTSTATUS 0x020 - -/* register value */ -#define TDVHSEL_MASTER 0x00000001 -#define TDVHSEL_SLAVE 0x00000004 - -#define MODE_JCUSFEN 0x00000080 - -#define ECMODE_VALUE 0x01 - -#define CCKCTRL_ATARESET 0x00040000 -#define CCKCTRL_BUFCNT 0x00020000 -#define CCKCTRL_CRST 0x00010000 -#define CCKCTRL_OCLKEN 0x00000100 -#define CCKCTRL_ATACLKOEN 0x00000002 -#define CCKCTRL_LCLKEN 0x00000001 - -#define QCHCD_IOS_SS 0x00000001 - -#define QCHSD_STPDIAG 0x00020000 - -#define INTMASK_MSK 0xD1000012 -#define INTSTS_SERROR 0x80000000 -#define INTSTS_PRERR 0x40000000 -#define INTSTS_RERR 0x10000000 -#define INTSTS_ICERR 0x01000000 -#define INTSTS_BMSINT 0x00000010 -#define INTSTS_BMHE 0x00000008 -#define INTSTS_IOIRQS 0x00000004 -#define INTSTS_INTRQ 0x00000002 -#define INTSTS_ACTEINT 0x00000001 - - -/* PIO transfer mode table */ -/* JCHST */ -static const unsigned long JCHSTtbl[2][7] = { - {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */ - {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */ -}; - -/* JCHHT */ -static const unsigned long JCHHTtbl[2][7] = { - {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */ - {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */ -}; - -/* JCHCT */ -static const unsigned long JCHCTtbl[2][7] = { - {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */ - {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */ -}; - -/* DMA transfer mode table */ -/* JCHDCTM/JCHDCTS */ -static const unsigned long JCHDCTxtbl[2][7] = { - {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */ - {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */ -}; - -/* JCSTWTM/JCSTWTS */ -static const unsigned long JCSTWTxtbl[2][7] = { - {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */ - {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */ -}; - -/* JCTSS */ -static const unsigned long JCTSStbl[2][7] = { - {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */ - {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */ -}; - -/* JCENVT */ -static const unsigned long JCENVTtbl[2][7] = { - {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */ - {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */ -}; - -/* JCACTSELS/JCACTSELM */ -static const unsigned long JCACTSELtbl[2][7] = { - {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */ - {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */ -}; - -static const struct pci_device_id scc_pci_tbl[] = { - { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0}, - { } /* terminate list */ -}; - -/** - * scc_set_piomode - Initialize host controller PATA PIO timings - * @ap: Port whose timings we are configuring - * @adev: um - * - * Set PIO mode for device. - * - * LOCKING: - * None (inherited from caller). - */ - -static void scc_set_piomode (struct ata_port *ap, struct ata_device *adev) -{ - unsigned int pio = adev->pio_mode - XFER_PIO_0; - void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR]; - void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL; - void __iomem *piosht_port = ctrl_base + SCC_CTL_PIOSHT; - void __iomem *pioct_port = ctrl_base + SCC_CTL_PIOCT; - unsigned long reg; - int offset; - - reg = in_be32(cckctrl_port); - if (reg & CCKCTRL_ATACLKOEN) - offset = 1; /* 133MHz */ - else - offset = 0; /* 100MHz */ - - reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio]; - out_be32(piosht_port, reg); - reg = JCHCTtbl[offset][pio]; - out_be32(pioct_port, reg); -} - -/** - * scc_set_dmamode - Initialize host controller PATA DMA timings - * @ap: Port whose timings we are configuring - * @adev: um - * - * Set UDMA mode for device. - * - * LOCKING: - * None (inherited from caller). - */ - -static void scc_set_dmamode (struct ata_port *ap, struct ata_device *adev) -{ - unsigned int udma = adev->dma_mode; - unsigned int is_slave = (adev->devno != 0); - u8 speed = udma; - void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR]; - void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL; - void __iomem *mdmact_port = ctrl_base + SCC_CTL_MDMACT; - void __iomem *mcrcst_port = ctrl_base + SCC_CTL_MCRCST; - void __iomem *sdmact_port = ctrl_base + SCC_CTL_SDMACT; - void __iomem *scrcst_port = ctrl_base + SCC_CTL_SCRCST; - void __iomem *udenvt_port = ctrl_base + SCC_CTL_UDENVT; - void __iomem *tdvhsel_port = ctrl_base + SCC_CTL_TDVHSEL; - int offset, idx; - - if (in_be32(cckctrl_port) & CCKCTRL_ATACLKOEN) - offset = 1; /* 133MHz */ - else - offset = 0; /* 100MHz */ - - if (speed >= XFER_UDMA_0) - idx = speed - XFER_UDMA_0; - else - return; - - if (is_slave) { - out_be32(sdmact_port, JCHDCTxtbl[offset][idx]); - out_be32(scrcst_port, JCSTWTxtbl[offset][idx]); - out_be32(tdvhsel_port, - (in_be32(tdvhsel_port) & ~TDVHSEL_SLAVE) | (JCACTSELtbl[offset][idx] << 2)); - } else { - out_be32(mdmact_port, JCHDCTxtbl[offset][idx]); - out_be32(mcrcst_port, JCSTWTxtbl[offset][idx]); - out_be32(tdvhsel_port, - (in_be32(tdvhsel_port) & ~TDVHSEL_MASTER) | JCACTSELtbl[offset][idx]); - } - out_be32(udenvt_port, - JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx]); -} - -unsigned long scc_mode_filter(struct ata_device *adev, unsigned long mask) -{ - /* errata A308 workaround: limit ATAPI UDMA mode to UDMA4 */ - if (adev->class == ATA_DEV_ATAPI && - (mask & (0xE0 << ATA_SHIFT_UDMA))) { - printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME); - mask &= ~(0xE0 << ATA_SHIFT_UDMA); - } - return mask; -} - -/** - * scc_tf_load - send taskfile registers to host controller - * @ap: Port to which output is sent - * @tf: ATA taskfile register set - * - * Note: Original code is ata_sff_tf_load(). - */ - -static void scc_tf_load (struct ata_port *ap, const struct ata_taskfile *tf) -{ - struct ata_ioports *ioaddr = &ap->ioaddr; - unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; - - if (tf->ctl != ap->last_ctl) { - out_be32(ioaddr->ctl_addr, tf->ctl); - ap->last_ctl = tf->ctl; - ata_wait_idle(ap); - } - - if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { - out_be32(ioaddr->feature_addr, tf->hob_feature); - out_be32(ioaddr->nsect_addr, tf->hob_nsect); - out_be32(ioaddr->lbal_addr, tf->hob_lbal); - out_be32(ioaddr->lbam_addr, tf->hob_lbam); - out_be32(ioaddr->lbah_addr, tf->hob_lbah); - VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", - tf->hob_feature, - tf->hob_nsect, - tf->hob_lbal, - tf->hob_lbam, - tf->hob_lbah); - } - - if (is_addr) { - out_be32(ioaddr->feature_addr, tf->feature); - out_be32(ioaddr->nsect_addr, tf->nsect); - out_be32(ioaddr->lbal_addr, tf->lbal); - out_be32(ioaddr->lbam_addr, tf->lbam); - out_be32(ioaddr->lbah_addr, tf->lbah); - VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", - tf->feature, - tf->nsect, - tf->lbal, - tf->lbam, - tf->lbah); - } - - if (tf->flags & ATA_TFLAG_DEVICE) { - out_be32(ioaddr->device_addr, tf->device); - VPRINTK("device 0x%X\n", tf->device); - } - - ata_wait_idle(ap); -} - -/** - * scc_check_status - Read device status reg & clear interrupt - * @ap: port where the device is - * - * Note: Original code is ata_check_status(). - */ - -static u8 scc_check_status (struct ata_port *ap) -{ - return in_be32(ap->ioaddr.status_addr); -} - -/** - * scc_tf_read - input device's ATA taskfile shadow registers - * @ap: Port from which input is read - * @tf: ATA taskfile register set for storing input - * - * Note: Original code is ata_sff_tf_read(). - */ - -static void scc_tf_read (struct ata_port *ap, struct ata_taskfile *tf) -{ - struct ata_ioports *ioaddr = &ap->ioaddr; - - tf->command = scc_check_status(ap); - tf->feature = in_be32(ioaddr->error_addr); - tf->nsect = in_be32(ioaddr->nsect_addr); - tf->lbal = in_be32(ioaddr->lbal_addr); - tf->lbam = in_be32(ioaddr->lbam_addr); - tf->lbah = in_be32(ioaddr->lbah_addr); - tf->device = in_be32(ioaddr->device_addr); - - if (tf->flags & ATA_TFLAG_LBA48) { - out_be32(ioaddr->ctl_addr, tf->ctl | ATA_HOB); - tf->hob_feature = in_be32(ioaddr->error_addr); - tf->hob_nsect = in_be32(ioaddr->nsect_addr); - tf->hob_lbal = in_be32(ioaddr->lbal_addr); - tf->hob_lbam = in_be32(ioaddr->lbam_addr); - tf->hob_lbah = in_be32(ioaddr->lbah_addr); - out_be32(ioaddr->ctl_addr, tf->ctl); - ap->last_ctl = tf->ctl; - } -} - -/** - * scc_exec_command - issue ATA command to host controller - * @ap: port to which command is being issued - * @tf: ATA taskfile register set - * - * Note: Original code is ata_sff_exec_command(). - */ - -static void scc_exec_command (struct ata_port *ap, - const struct ata_taskfile *tf) -{ - DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); - - out_be32(ap->ioaddr.command_addr, tf->command); - ata_sff_pause(ap); -} - -/** - * scc_check_altstatus - Read device alternate status reg - * @ap: port where the device is - */ - -static u8 scc_check_altstatus (struct ata_port *ap) -{ - return in_be32(ap->ioaddr.altstatus_addr); -} - -/** - * scc_dev_select - Select device 0/1 on ATA bus - * @ap: ATA channel to manipulate - * @device: ATA device (numbered from zero) to select - * - * Note: Original code is ata_sff_dev_select(). - */ - -static void scc_dev_select (struct ata_port *ap, unsigned int device) -{ - u8 tmp; - - if (device == 0) - tmp = ATA_DEVICE_OBS; - else - tmp = ATA_DEVICE_OBS | ATA_DEV1; - - out_be32(ap->ioaddr.device_addr, tmp); - ata_sff_pause(ap); -} - -/** - * scc_set_devctl - Write device control reg - * @ap: port where the device is - * @ctl: value to write - */ - -static void scc_set_devctl(struct ata_port *ap, u8 ctl) -{ - out_be32(ap->ioaddr.ctl_addr, ctl); -} - -/** - * scc_bmdma_setup - Set up PCI IDE BMDMA transaction - * @qc: Info associated with this ATA transaction. - * - * Note: Original code is ata_bmdma_setup(). - */ - -static void scc_bmdma_setup (struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); - u8 dmactl; - void __iomem *mmio = ap->ioaddr.bmdma_addr; - - /* load PRD table addr */ - out_be32(mmio + SCC_DMA_TABLE_OFS, ap->bmdma_prd_dma); - - /* specify data direction, triple-check start bit is clear */ - dmactl = in_be32(mmio + SCC_DMA_CMD); - dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); - if (!rw) - dmactl |= ATA_DMA_WR; - out_be32(mmio + SCC_DMA_CMD, dmactl); - - /* issue r/w command */ - ap->ops->sff_exec_command(ap, &qc->tf); -} - -/** - * scc_bmdma_start - Start a PCI IDE BMDMA transaction - * @qc: Info associated with this ATA transaction. - * - * Note: Original code is ata_bmdma_start(). - */ - -static void scc_bmdma_start (struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - u8 dmactl; - void __iomem *mmio = ap->ioaddr.bmdma_addr; - - /* start host DMA transaction */ - dmactl = in_be32(mmio + SCC_DMA_CMD); - out_be32(mmio + SCC_DMA_CMD, dmactl | ATA_DMA_START); -} - -/** - * scc_devchk - PATA device presence detection - * @ap: ATA channel to examine - * @device: Device to examine (starting at zero) - * - * Note: Original code is ata_devchk(). - */ - -static unsigned int scc_devchk (struct ata_port *ap, - unsigned int device) -{ - struct ata_ioports *ioaddr = &ap->ioaddr; - u8 nsect, lbal; - - ap->ops->sff_dev_select(ap, device); - - out_be32(ioaddr->nsect_addr, 0x55); - out_be32(ioaddr->lbal_addr, 0xaa); - - out_be32(ioaddr->nsect_addr, 0xaa); - out_be32(ioaddr->lbal_addr, 0x55); - - out_be32(ioaddr->nsect_addr, 0x55); - out_be32(ioaddr->lbal_addr, 0xaa); - - nsect = in_be32(ioaddr->nsect_addr); - lbal = in_be32(ioaddr->lbal_addr); - - if ((nsect == 0x55) && (lbal == 0xaa)) - return 1; /* we found a device */ - - return 0; /* nothing found */ -} - -/** - * scc_wait_after_reset - wait for devices to become ready after reset - * - * Note: Original code is ata_sff_wait_after_reset - */ - -static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask, - unsigned long deadline) -{ - struct ata_port *ap = link->ap; - struct ata_ioports *ioaddr = &ap->ioaddr; - unsigned int dev0 = devmask & (1 << 0); - unsigned int dev1 = devmask & (1 << 1); - int rc, ret = 0; - - /* Spec mandates ">= 2ms" before checking status. We wait - * 150ms, because that was the magic delay used for ATAPI - * devices in Hale Landis's ATADRVR, for the period of time - * between when the ATA command register is written, and then - * status is checked. Because waiting for "a while" before - * checking status is fine, post SRST, we perform this magic - * delay here as well. - * - * Old drivers/ide uses the 2mS rule and then waits for ready. - */ - ata_msleep(ap, 150); - - /* always check readiness of the master device */ - rc = ata_sff_wait_ready(link, deadline); - /* -ENODEV means the odd clown forgot the D7 pulldown resistor - * and TF status is 0xff, bail out on it too. - */ - if (rc) - return rc; - - /* if device 1 was found in ata_devchk, wait for register - * access briefly, then wait for BSY to clear. - */ - if (dev1) { - int i; - - ap->ops->sff_dev_select(ap, 1); - - /* Wait for register access. Some ATAPI devices fail - * to set nsect/lbal after reset, so don't waste too - * much time on it. We're gonna wait for !BSY anyway. - */ - for (i = 0; i < 2; i++) { - u8 nsect, lbal; - - nsect = in_be32(ioaddr->nsect_addr); - lbal = in_be32(ioaddr->lbal_addr); - if ((nsect == 1) && (lbal == 1)) - break; - ata_msleep(ap, 50); /* give drive a breather */ - } - - rc = ata_sff_wait_ready(link, deadline); - if (rc) { - if (rc != -ENODEV) - return rc; - ret = rc; - } - } - - /* is all this really necessary? */ - ap->ops->sff_dev_select(ap, 0); - if (dev1) - ap->ops->sff_dev_select(ap, 1); - if (dev0) - ap->ops->sff_dev_select(ap, 0); - - return ret; -} - -/** - * scc_bus_softreset - PATA device software reset - * - * Note: Original code is ata_bus_softreset(). - */ - -static int scc_bus_softreset(struct ata_port *ap, unsigned int devmask, - unsigned long deadline) -{ - struct ata_ioports *ioaddr = &ap->ioaddr; - - DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); - - /* software reset. causes dev0 to be selected */ - out_be32(ioaddr->ctl_addr, ap->ctl); - udelay(20); - out_be32(ioaddr->ctl_addr, ap->ctl | ATA_SRST); - udelay(20); - out_be32(ioaddr->ctl_addr, ap->ctl); - - return scc_wait_after_reset(&ap->link, devmask, deadline); -} - -/** - * scc_softreset - reset host port via ATA SRST - * @ap: port to reset - * @classes: resulting classes of attached devices - * @deadline: deadline jiffies for the operation - * - * Note: Original code is ata_sff_softreset(). - */ - -static int scc_softreset(struct ata_link *link, unsigned int *classes, - unsigned long deadline) -{ - struct ata_port *ap = link->ap; - unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; - unsigned int devmask = 0; - int rc; - u8 err; - - DPRINTK("ENTER\n"); - - /* determine if device 0/1 are present */ - if (scc_devchk(ap, 0)) - devmask |= (1 << 0); - if (slave_possible && scc_devchk(ap, 1)) - devmask |= (1 << 1); - - /* select device 0 again */ - ap->ops->sff_dev_select(ap, 0); - - /* issue bus reset */ - DPRINTK("about to softreset, devmask=%x\n", devmask); - rc = scc_bus_softreset(ap, devmask, deadline); - if (rc) { - ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", rc); - return -EIO; - } - - /* determine by signature whether we have ATA or ATAPI devices */ - classes[0] = ata_sff_dev_classify(&ap->link.device[0], - devmask & (1 << 0), &err); - if (slave_possible && err != 0x81) - classes[1] = ata_sff_dev_classify(&ap->link.device[1], - devmask & (1 << 1), &err); - - DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); - return 0; -} - -/** - * scc_bmdma_stop - Stop PCI IDE BMDMA transfer - * @qc: Command we are ending DMA for - */ - -static void scc_bmdma_stop (struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR]; - void __iomem *bmid_base = ap->host->iomap[SCC_BMID_BAR]; - u32 reg; - - while (1) { - reg = in_be32(bmid_base + SCC_DMA_INTST); - - if (reg & INTSTS_SERROR) { - printk(KERN_WARNING "%s: SERROR\n", DRV_NAME); - out_be32(bmid_base + SCC_DMA_INTST, INTSTS_SERROR|INTSTS_BMSINT); - out_be32(bmid_base + SCC_DMA_CMD, - in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); - continue; - } - - if (reg & INTSTS_PRERR) { - u32 maea0, maec0; - maea0 = in_be32(ctrl_base + SCC_CTL_MAEA0); - maec0 = in_be32(ctrl_base + SCC_CTL_MAEC0); - printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", DRV_NAME, maea0, maec0); - out_be32(bmid_base + SCC_DMA_INTST, INTSTS_PRERR|INTSTS_BMSINT); - out_be32(bmid_base + SCC_DMA_CMD, - in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); - continue; - } - - if (reg & INTSTS_RERR) { - printk(KERN_WARNING "%s: Response Error\n", DRV_NAME); - out_be32(bmid_base + SCC_DMA_INTST, INTSTS_RERR|INTSTS_BMSINT); - out_be32(bmid_base + SCC_DMA_CMD, - in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); - continue; - } - - if (reg & INTSTS_ICERR) { - out_be32(bmid_base + SCC_DMA_CMD, - in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); - printk(KERN_WARNING "%s: Illegal Configuration\n", DRV_NAME); - out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ICERR|INTSTS_BMSINT); - continue; - } - - if (reg & INTSTS_BMSINT) { - unsigned int classes; - unsigned long deadline = ata_deadline(jiffies, ATA_TMOUT_BOOT); - printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME); - out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT); - /* TBD: SW reset */ - scc_softreset(&ap->link, &classes, deadline); - continue; - } - - if (reg & INTSTS_BMHE) { - out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMHE); - continue; - } - - if (reg & INTSTS_ACTEINT) { - out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ACTEINT); - continue; - } - - if (reg & INTSTS_IOIRQS) { - out_be32(bmid_base + SCC_DMA_INTST, INTSTS_IOIRQS); - continue; - } - break; - } - - /* clear start/stop bit */ - out_be32(bmid_base + SCC_DMA_CMD, - in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); - - /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ - ata_sff_dma_pause(ap); /* dummy read */ -} - -/** - * scc_bmdma_status - Read PCI IDE BMDMA status - * @ap: Port associated with this ATA transaction. - */ - -static u8 scc_bmdma_status (struct ata_port *ap) -{ - void __iomem *mmio = ap->ioaddr.bmdma_addr; - u8 host_stat = in_be32(mmio + SCC_DMA_STATUS); - u32 int_status = in_be32(mmio + SCC_DMA_INTST); - struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); - static int retry = 0; - - /* return if IOS_SS is cleared */ - if (!(in_be32(mmio + SCC_DMA_CMD) & ATA_DMA_START)) - return host_stat; - - /* errata A252,A308 workaround: Step4 */ - if ((scc_check_altstatus(ap) & ATA_ERR) - && (int_status & INTSTS_INTRQ)) - return (host_stat | ATA_DMA_INTR); - - /* errata A308 workaround Step5 */ - if (int_status & INTSTS_IOIRQS) { - host_stat |= ATA_DMA_INTR; - - /* We don't check ATAPI DMA because it is limited to UDMA4 */ - if ((qc->tf.protocol == ATA_PROT_DMA && - qc->dev->xfer_mode > XFER_UDMA_4)) { - if (!(int_status & INTSTS_ACTEINT)) { - printk(KERN_WARNING "ata%u: operation failed (transfer data loss)\n", - ap->print_id); - host_stat |= ATA_DMA_ERR; - if (retry++) - ap->udma_mask &= ~(1 << qc->dev->xfer_mode); - } else - retry = 0; - } - } - - return host_stat; -} - -/** - * scc_data_xfer - Transfer data by PIO - * @dev: device for this I/O - * @buf: data buffer - * @buflen: buffer length - * @rw: read/write - * - * Note: Original code is ata_sff_data_xfer(). - */ - -static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf, - unsigned int buflen, int rw) -{ - struct ata_port *ap = dev->link->ap; - unsigned int words = buflen >> 1; - unsigned int i; - __le16 *buf16 = (__le16 *) buf; - void __iomem *mmio = ap->ioaddr.data_addr; - - /* Transfer multiple of 2 bytes */ - if (rw == READ) - for (i = 0; i < words; i++) - buf16[i] = cpu_to_le16(in_be32(mmio)); - else - for (i = 0; i < words; i++) - out_be32(mmio, le16_to_cpu(buf16[i])); - - /* Transfer trailing 1 byte, if any. */ - if (unlikely(buflen & 0x01)) { - __le16 align_buf[1] = { 0 }; - unsigned char *trailing_buf = buf + buflen - 1; - - if (rw == READ) { - align_buf[0] = cpu_to_le16(in_be32(mmio)); - memcpy(trailing_buf, align_buf, 1); - } else { - memcpy(align_buf, trailing_buf, 1); - out_be32(mmio, le16_to_cpu(align_buf[0])); - } - words++; - } - - return words << 1; -} - -/** - * scc_postreset - standard postreset callback - * @ap: the target ata_port - * @classes: classes of attached devices - * - * Note: Original code is ata_sff_postreset(). - */ - -static void scc_postreset(struct ata_link *link, unsigned int *classes) -{ - struct ata_port *ap = link->ap; - - DPRINTK("ENTER\n"); - - /* is double-select really necessary? */ - if (classes[0] != ATA_DEV_NONE) - ap->ops->sff_dev_select(ap, 1); - if (classes[1] != ATA_DEV_NONE) - ap->ops->sff_dev_select(ap, 0); - - /* bail out if no device is present */ - if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { - DPRINTK("EXIT, no device\n"); - return; - } - - /* set up device control */ - out_be32(ap->ioaddr.ctl_addr, ap->ctl); - - DPRINTK("EXIT\n"); -} - -/** - * scc_irq_clear - Clear PCI IDE BMDMA interrupt. - * @ap: Port associated with this ATA transaction. - * - * Note: Original code is ata_bmdma_irq_clear(). - */ - -static void scc_irq_clear (struct ata_port *ap) -{ - void __iomem *mmio = ap->ioaddr.bmdma_addr; - - if (!mmio) - return; - - out_be32(mmio + SCC_DMA_STATUS, in_be32(mmio + SCC_DMA_STATUS)); -} - -/** - * scc_port_start - Set port up for dma. - * @ap: Port to initialize - * - * Allocate space for PRD table using ata_bmdma_port_start(). - * Set PRD table address for PTERADD. (PRD Transfer End Read) - */ - -static int scc_port_start (struct ata_port *ap) -{ - void __iomem *mmio = ap->ioaddr.bmdma_addr; - int rc; - - rc = ata_bmdma_port_start(ap); - if (rc) - return rc; - - out_be32(mmio + SCC_DMA_PTERADD, ap->bmdma_prd_dma); - return 0; -} - -/** - * scc_port_stop - Undo scc_port_start() - * @ap: Port to shut down - * - * Reset PTERADD. - */ - -static void scc_port_stop (struct ata_port *ap) -{ - void __iomem *mmio = ap->ioaddr.bmdma_addr; - - out_be32(mmio + SCC_DMA_PTERADD, 0); -} - -static struct scsi_host_template scc_sht = { - ATA_BMDMA_SHT(DRV_NAME), -}; - -static struct ata_port_operations scc_pata_ops = { - .inherits = &ata_bmdma_port_ops, - - .set_piomode = scc_set_piomode, - .set_dmamode = scc_set_dmamode, - .mode_filter = scc_mode_filter, - - .sff_tf_load = scc_tf_load, - .sff_tf_read = scc_tf_read, - .sff_exec_command = scc_exec_command, - .sff_check_status = scc_check_status, - .sff_check_altstatus = scc_check_altstatus, - .sff_dev_select = scc_dev_select, - .sff_set_devctl = scc_set_devctl, - - .bmdma_setup = scc_bmdma_setup, - .bmdma_start = scc_bmdma_start, - .bmdma_stop = scc_bmdma_stop, - .bmdma_status = scc_bmdma_status, - .sff_data_xfer = scc_data_xfer, - - .cable_detect = ata_cable_80wire, - .softreset = scc_softreset, - .postreset = scc_postreset, - - .sff_irq_clear = scc_irq_clear, - - .port_start = scc_port_start, - .port_stop = scc_port_stop, -}; - -static struct ata_port_info scc_port_info[] = { - { - .flags = ATA_FLAG_SLAVE_POSS, - .pio_mask = ATA_PIO4, - /* No MWDMA */ - .udma_mask = ATA_UDMA6, - .port_ops = &scc_pata_ops, - }, -}; - -/** - * scc_reset_controller - initialize SCC PATA controller. - */ - -static int scc_reset_controller(struct ata_host *host) -{ - void __iomem *ctrl_base = host->iomap[SCC_CTRL_BAR]; - void __iomem *bmid_base = host->iomap[SCC_BMID_BAR]; - void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL; - void __iomem *mode_port = ctrl_base + SCC_CTL_MODEREG; - void __iomem *ecmode_port = ctrl_base + SCC_CTL_ECMODE; - void __iomem *intmask_port = bmid_base + SCC_DMA_INTMASK; - void __iomem *dmastatus_port = bmid_base + SCC_DMA_STATUS; - u32 reg = 0; - - out_be32(cckctrl_port, reg); - reg |= CCKCTRL_ATACLKOEN; - out_be32(cckctrl_port, reg); - reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN; - out_be32(cckctrl_port, reg); - reg |= CCKCTRL_CRST; - out_be32(cckctrl_port, reg); - - for (;;) { - reg = in_be32(cckctrl_port); - if (reg & CCKCTRL_CRST) - break; - udelay(5000); - } - - reg |= CCKCTRL_ATARESET; - out_be32(cckctrl_port, reg); - out_be32(ecmode_port, ECMODE_VALUE); - out_be32(mode_port, MODE_JCUSFEN); - out_be32(intmask_port, INTMASK_MSK); - - if (in_be32(dmastatus_port) & QCHSD_STPDIAG) { - printk(KERN_WARNING "%s: failed to detect 80c cable. (PDIAG# is high)\n", DRV_NAME); - return -EIO; - } - - return 0; -} - -/** - * scc_setup_ports - initialize ioaddr with SCC PATA port offsets. - * @ioaddr: IO address structure to be initialized - * @base: base address of BMID region - */ - -static void scc_setup_ports (struct ata_ioports *ioaddr, void __iomem *base) -{ - ioaddr->cmd_addr = base + SCC_REG_CMD_ADDR; - ioaddr->altstatus_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS; - ioaddr->ctl_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS; - ioaddr->bmdma_addr = base; - ioaddr->data_addr = ioaddr->cmd_addr + SCC_REG_DATA; - ioaddr->error_addr = ioaddr->cmd_addr + SCC_REG_ERR; - ioaddr->feature_addr = ioaddr->cmd_addr + SCC_REG_FEATURE; - ioaddr->nsect_addr = ioaddr->cmd_addr + SCC_REG_NSECT; - ioaddr->lbal_addr = ioaddr->cmd_addr + SCC_REG_LBAL; - ioaddr->lbam_addr = ioaddr->cmd_addr + SCC_REG_LBAM; - ioaddr->lbah_addr = ioaddr->cmd_addr + SCC_REG_LBAH; - ioaddr->device_addr = ioaddr->cmd_addr + SCC_REG_DEVICE; - ioaddr->status_addr = ioaddr->cmd_addr + SCC_REG_STATUS; - ioaddr->command_addr = ioaddr->cmd_addr + SCC_REG_CMD; -} - -static int scc_host_init(struct ata_host *host) -{ - struct pci_dev *pdev = to_pci_dev(host->dev); - int rc; - - rc = scc_reset_controller(host); - if (rc) - return rc; - - rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); - if (rc) - return rc; - rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); - if (rc) - return rc; - - scc_setup_ports(&host->ports[0]->ioaddr, host->iomap[SCC_BMID_BAR]); - - pci_set_master(pdev); - - return 0; -} - -/** - * scc_init_one - Register SCC PATA device with kernel services - * @pdev: PCI device to register - * @ent: Entry in scc_pci_tbl matching with @pdev - * - * LOCKING: - * Inherited from PCI layer (may sleep). - * - * RETURNS: - * Zero on success, or -ERRNO value. - */ - -static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) -{ - unsigned int board_idx = (unsigned int) ent->driver_data; - const struct ata_port_info *ppi[] = { &scc_port_info[board_idx], NULL }; - struct ata_host *host; - int rc; - - ata_print_version_once(&pdev->dev, DRV_VERSION); - - host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1); - if (!host) - return -ENOMEM; - - rc = pcim_enable_device(pdev); - if (rc) - return rc; - - rc = pcim_iomap_regions(pdev, (1 << SCC_CTRL_BAR) | (1 << SCC_BMID_BAR), DRV_NAME); - if (rc == -EBUSY) - pcim_pin_device(pdev); - if (rc) - return rc; - host->iomap = pcim_iomap_table(pdev); - - ata_port_pbar_desc(host->ports[0], SCC_CTRL_BAR, -1, "ctrl"); - ata_port_pbar_desc(host->ports[0], SCC_BMID_BAR, -1, "bmid"); - - rc = scc_host_init(host); - if (rc) - return rc; - - return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, - IRQF_SHARED, &scc_sht); -} - -static struct pci_driver scc_pci_driver = { - .name = DRV_NAME, - .id_table = scc_pci_tbl, - .probe = scc_init_one, - .remove = ata_pci_remove_one, -#ifdef CONFIG_PM_SLEEP - .suspend = ata_pci_device_suspend, - .resume = ata_pci_device_resume, -#endif -}; - -module_pci_driver(scc_pci_driver); - -MODULE_AUTHOR("Toshiba corp"); -MODULE_DESCRIPTION("SCSI low-level driver for Toshiba SCC PATA controller"); -MODULE_LICENSE("GPL"); -MODULE_DEVICE_TABLE(pci, scc_pci_tbl); -MODULE_VERSION(DRV_VERSION); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index ae3fcb4..d7173cb 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1620,8 +1620,8 @@ out: static void loop_remove(struct loop_device *lo) { - del_gendisk(lo->lo_disk); blk_cleanup_queue(lo->lo_queue); + del_gendisk(lo->lo_disk); blk_mq_free_tag_set(&lo->tag_set); put_disk(lo->lo_disk); kfree(lo); diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c index 6b736b0..88f13c5 100644 --- a/drivers/block/nvme-scsi.c +++ b/drivers/block/nvme-scsi.c @@ -944,7 +944,8 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 *inq_response, int alloc_len) { - __be32 max_sectors = cpu_to_be32(queue_max_hw_sectors(ns->queue)); + __be32 max_sectors = cpu_to_be32( + nvme_block_nr(ns, queue_max_hw_sectors(ns->queue))); __be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors); __be32 discard_desc_count = cpu_to_be32(0x100); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 8125233..ec6c5c6 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -2264,6 +2264,11 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) result, xferred); if (!img_request->result) img_request->result = result; + /* + * Need to end I/O on the entire obj_request worth of + * bytes in case of error. + */ + xferred = obj_request->length; } /* Image object requests don't own their page array */ diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index bd2b3bb..713fc9f 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -265,17 +265,6 @@ static void put_persistent_gnt(struct xen_blkif *blkif, atomic_dec(&blkif->persistent_gnt_in_use); } -static void free_persistent_gnts_unmap_callback(int result, - struct gntab_unmap_queue_data *data) -{ - struct completion *c = data->data; - - /* BUG_ON used to reproduce existing behaviour, - but is this the best way to deal with this? */ - BUG_ON(result); - complete(c); -} - static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, unsigned int num) { @@ -285,12 +274,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, struct rb_node *n; int segs_to_unmap = 0; struct gntab_unmap_queue_data unmap_data; - struct completion unmap_completion; - init_completion(&unmap_completion); - - unmap_data.data = &unmap_completion; - unmap_data.done = &free_persistent_gnts_unmap_callback; unmap_data.pages = pages; unmap_data.unmap_ops = unmap; unmap_data.kunmap_ops = NULL; @@ -310,8 +294,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, !rb_next(&persistent_gnt->node)) { unmap_data.count = segs_to_unmap; - gnttab_unmap_refs_async(&unmap_data); - wait_for_completion(&unmap_completion); + BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); put_free_pages(blkif, pages, segs_to_unmap); segs_to_unmap = 0; @@ -329,8 +312,13 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work) struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct persistent_gnt *persistent_gnt; - int ret, segs_to_unmap = 0; + int segs_to_unmap = 0; struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work); + struct gntab_unmap_queue_data unmap_data; + + unmap_data.pages = pages; + unmap_data.unmap_ops = unmap; + unmap_data.kunmap_ops = NULL; while(!list_empty(&blkif->persistent_purge_list)) { persistent_gnt = list_first_entry(&blkif->persistent_purge_list, @@ -346,17 +334,16 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work) pages[segs_to_unmap] = persistent_gnt->page; if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { - ret = gnttab_unmap_refs(unmap, NULL, pages, - segs_to_unmap); - BUG_ON(ret); + unmap_data.count = segs_to_unmap; + BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); put_free_pages(blkif, pages, segs_to_unmap); segs_to_unmap = 0; } kfree(persistent_gnt); } if (segs_to_unmap > 0) { - ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); - BUG_ON(ret); + unmap_data.count = segs_to_unmap; + BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); put_free_pages(blkif, pages, segs_to_unmap); } } diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index c94386a..8dcbced 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -74,6 +74,27 @@ static inline struct zram *dev_to_zram(struct device *dev) return (struct zram *)dev_to_disk(dev)->private_data; } +static ssize_t compact_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + unsigned long nr_migrated; + struct zram *zram = dev_to_zram(dev); + struct zram_meta *meta; + + down_read(&zram->init_lock); + if (!init_done(zram)) { + up_read(&zram->init_lock); + return -EINVAL; + } + + meta = zram->meta; + nr_migrated = zs_compact(meta->mem_pool); + atomic64_add(nr_migrated, &zram->stats.num_migrated); + up_read(&zram->init_lock); + + return len; +} + static ssize_t disksize_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1038,6 +1059,7 @@ static const struct block_device_operations zram_devops = { .owner = THIS_MODULE }; +static DEVICE_ATTR_WO(compact); static DEVICE_ATTR_RW(disksize); static DEVICE_ATTR_RO(initstate); static DEVICE_ATTR_WO(reset); @@ -1114,6 +1136,7 @@ static struct attribute *zram_disk_attrs[] = { &dev_attr_num_writes.attr, &dev_attr_failed_reads.attr, &dev_attr_failed_writes.attr, + &dev_attr_compact.attr, &dev_attr_invalid_io.attr, &dev_attr_notify_free.attr, &dev_attr_zero_pages.attr, diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c index 4f7e8d4..6de97b3 100644 --- a/drivers/bluetooth/bt3c_cs.c +++ b/drivers/bluetooth/bt3c_cs.c @@ -227,7 +227,6 @@ static void bt3c_receive(struct bt3c_info *info) iobase = info->p_dev->resource[0]->start; avail = bt3c_read(iobase, 0x7006); - //printk("bt3c_cs: receiving %d bytes\n", avail); bt3c_address(iobase, 0x7480); while (size < avail) { @@ -250,7 +249,6 @@ static void bt3c_receive(struct bt3c_info *info) bt_cb(info->rx_skb)->pkt_type = inb(iobase + DATA_L); inb(iobase + DATA_H); - //printk("bt3c: PACKET_TYPE=%02x\n", bt_cb(info->rx_skb)->pkt_type); switch (bt_cb(info->rx_skb)->pkt_type) { @@ -364,7 +362,6 @@ static irqreturn_t bt3c_interrupt(int irq, void *dev_inst) if (stat & 0x0001) bt3c_receive(info); if (stat & 0x0002) { - //BT_ERR("Ack (stat=0x%04x)", stat); clear_bit(XMIT_SENDING, &(info->tx_state)); bt3c_write_wakeup(info); } diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index d0741f3..4bba866 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -95,6 +95,78 @@ int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) } EXPORT_SYMBOL_GPL(btbcm_set_bdaddr); +int btbcm_patchram(struct hci_dev *hdev, const char *firmware) +{ + const struct hci_command_hdr *cmd; + const struct firmware *fw; + const u8 *fw_ptr; + size_t fw_size; + struct sk_buff *skb; + u16 opcode; + int err; + + err = request_firmware(&fw, firmware, &hdev->dev); + if (err < 0) { + BT_INFO("%s: BCM: Patch %s not found", hdev->name, firmware); + return err; + } + + /* Start Download */ + skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + BT_ERR("%s: BCM: Download Minidrv command failed (%d)", + hdev->name, err); + goto done; + } + kfree_skb(skb); + + /* 50 msec delay after Download Minidrv completes */ + msleep(50); + + fw_ptr = fw->data; + fw_size = fw->size; + + while (fw_size >= sizeof(*cmd)) { + const u8 *cmd_param; + + cmd = (struct hci_command_hdr *)fw_ptr; + fw_ptr += sizeof(*cmd); + fw_size -= sizeof(*cmd); + + if (fw_size < cmd->plen) { + BT_ERR("%s: BCM: Patch %s is corrupted", hdev->name, + firmware); + err = -EINVAL; + goto done; + } + + cmd_param = fw_ptr; + fw_ptr += cmd->plen; + fw_size -= cmd->plen; + + opcode = le16_to_cpu(cmd->opcode); + + skb = __hci_cmd_sync(hdev, opcode, cmd->plen, cmd_param, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + BT_ERR("%s: BCM: Patch command %04x failed (%d)", + hdev->name, opcode, err); + goto done; + } + kfree_skb(skb); + } + + /* 250 msec delay after Launch Ram completes */ + msleep(250); + +done: + release_firmware(fw); + return err; +} +EXPORT_SYMBOL(btbcm_patchram); + static int btbcm_reset(struct hci_dev *hdev) { struct sk_buff *skb; @@ -198,12 +270,8 @@ static const struct { int btbcm_setup_patchram(struct hci_dev *hdev) { - const struct hci_command_hdr *cmd; - const struct firmware *fw; - const u8 *fw_ptr; - size_t fw_size; char fw_name[64]; - u16 opcode, subver, rev, pid, vid; + u16 subver, rev, pid, vid; const char *hw_name = NULL; struct sk_buff *skb; struct hci_rp_read_local_version *ver; @@ -273,74 +341,19 @@ int btbcm_setup_patchram(struct hci_dev *hdev) hw_name ? : "BCM", (subver & 0x7000) >> 13, (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff); - err = request_firmware(&fw, fw_name, &hdev->dev); - if (err < 0) { - BT_INFO("%s: BCM: patch %s not found", hdev->name, fw_name); + err = btbcm_patchram(hdev, fw_name); + if (err == -ENOENT) return 0; - } - - /* Start Download */ - skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - err = PTR_ERR(skb); - BT_ERR("%s: BCM: Download Minidrv command failed (%d)", - hdev->name, err); - goto reset; - } - kfree_skb(skb); - - /* 50 msec delay after Download Minidrv completes */ - msleep(50); - - fw_ptr = fw->data; - fw_size = fw->size; - - while (fw_size >= sizeof(*cmd)) { - const u8 *cmd_param; - - cmd = (struct hci_command_hdr *)fw_ptr; - fw_ptr += sizeof(*cmd); - fw_size -= sizeof(*cmd); - - if (fw_size < cmd->plen) { - BT_ERR("%s: BCM: patch %s is corrupted", hdev->name, - fw_name); - err = -EINVAL; - goto reset; - } - cmd_param = fw_ptr; - fw_ptr += cmd->plen; - fw_size -= cmd->plen; - - opcode = le16_to_cpu(cmd->opcode); - - skb = __hci_cmd_sync(hdev, opcode, cmd->plen, cmd_param, - HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - err = PTR_ERR(skb); - BT_ERR("%s: BCM: patch command %04x failed (%d)", - hdev->name, opcode, err); - goto reset; - } - kfree_skb(skb); - } - - /* 250 msec delay after Launch Ram completes */ - msleep(250); - -reset: /* Reset */ err = btbcm_reset(hdev); if (err) - goto done; + return err; /* Read Local Version Info */ skb = btbcm_read_local_version(hdev); - if (IS_ERR(skb)) { - err = PTR_ERR(skb); - goto done; - } + if (IS_ERR(skb)) + return PTR_ERR(skb); ver = (struct hci_rp_read_local_version *)skb->data; rev = le16_to_cpu(ver->hci_rev); @@ -355,10 +368,7 @@ reset: set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); -done: - release_firmware(fw); - - return err; + return 0; } EXPORT_SYMBOL_GPL(btbcm_setup_patchram); diff --git a/drivers/bluetooth/btbcm.h b/drivers/bluetooth/btbcm.h index 34268ae..eb6ab5f 100644 --- a/drivers/bluetooth/btbcm.h +++ b/drivers/bluetooth/btbcm.h @@ -25,6 +25,7 @@ int btbcm_check_bdaddr(struct hci_dev *hdev); int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr); +int btbcm_patchram(struct hci_dev *hdev, const char *firmware); int btbcm_setup_patchram(struct hci_dev *hdev); int btbcm_setup_apple(struct hci_dev *hdev); @@ -41,6 +42,11 @@ static inline int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) return -EOPNOTSUPP; } +static inline int btbcm_patchram(struct hci_dev *hdev, const char *firmware) +{ + return -EOPNOTSUPP; +} + static inline int btbcm_setup_patchram(struct hci_dev *hdev) { return 0; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index de7b236..d21f3b4 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -24,6 +24,7 @@ #include <linux/module.h> #include <linux/usb.h> #include <linux/firmware.h> +#include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> @@ -57,6 +58,7 @@ static struct usb_driver btusb_driver; #define BTUSB_AMP 0x4000 #define BTUSB_QCA_ROME 0x8000 #define BTUSB_BCM_APPLE 0x10000 +#define BTUSB_REALTEK 0x20000 static const struct usb_device_id btusb_table[] = { /* Generic Bluetooth USB device */ @@ -288,6 +290,28 @@ static const struct usb_device_id blacklist_table[] = { { USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01), .driver_info = BTUSB_IGNORE }, + /* Realtek Bluetooth devices */ + { USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01), + .driver_info = BTUSB_REALTEK }, + + /* Additional Realtek 8723AE Bluetooth devices */ + { USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x13d3, 0x3394), .driver_info = BTUSB_REALTEK }, + + /* Additional Realtek 8723BE Bluetooth devices */ + { USB_DEVICE(0x0489, 0xe085), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x0489, 0xe08b), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK }, + + /* Additional Realtek 8821AE Bluetooth devices */ + { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x13d3, 0x3458), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK }, + { } /* Terminating entry */ }; @@ -892,7 +916,7 @@ static int btusb_open(struct hci_dev *hdev) */ if (data->setup_on_usb) { err = data->setup_on_usb(hdev); - if (err <0) + if (err < 0) return err; } @@ -1345,6 +1369,378 @@ static int btusb_setup_csr(struct hci_dev *hdev) return ret; } +#define RTL_FRAG_LEN 252 + +struct rtl_download_cmd { + __u8 index; + __u8 data[RTL_FRAG_LEN]; +} __packed; + +struct rtl_download_response { + __u8 status; + __u8 index; +} __packed; + +struct rtl_rom_version_evt { + __u8 status; + __u8 version; +} __packed; + +struct rtl_epatch_header { + __u8 signature[8]; + __le32 fw_version; + __le16 num_patches; +} __packed; + +#define RTL_EPATCH_SIGNATURE "Realtech" +#define RTL_ROM_LMP_3499 0x3499 +#define RTL_ROM_LMP_8723A 0x1200 +#define RTL_ROM_LMP_8723B 0x8723 +#define RTL_ROM_LMP_8821A 0x8821 +#define RTL_ROM_LMP_8761A 0x8761 + +static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version) +{ + struct rtl_rom_version_evt *rom_version; + struct sk_buff *skb; + int ret; + + /* Read RTL ROM version command */ + skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: Read ROM version failed (%ld)", + hdev->name, PTR_ERR(skb)); + return PTR_ERR(skb); + } + + if (skb->len != sizeof(*rom_version)) { + BT_ERR("%s: RTL version event length mismatch", hdev->name); + kfree_skb(skb); + return -EIO; + } + + rom_version = (struct rtl_rom_version_evt *)skb->data; + BT_INFO("%s: rom_version status=%x version=%x", + hdev->name, rom_version->status, rom_version->version); + + ret = rom_version->status; + if (ret == 0) + *version = rom_version->version; + + kfree_skb(skb); + return ret; +} + +static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver, + const struct firmware *fw, + unsigned char **_buf) +{ + const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 }; + struct rtl_epatch_header *epatch_info; + unsigned char *buf; + int i, ret, len; + size_t min_size; + u8 opcode, length, data, rom_version = 0; + int project_id = -1; + const unsigned char *fwptr, *chip_id_base; + const unsigned char *patch_length_base, *patch_offset_base; + u32 patch_offset = 0; + u16 patch_length, num_patches; + const u16 project_id_to_lmp_subver[] = { + RTL_ROM_LMP_8723A, + RTL_ROM_LMP_8723B, + RTL_ROM_LMP_8821A, + RTL_ROM_LMP_8761A + }; + + ret = rtl_read_rom_version(hdev, &rom_version); + if (ret) + return -bt_to_errno(ret); + + min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3; + if (fw->size < min_size) + return -EINVAL; + + fwptr = fw->data + fw->size - sizeof(extension_sig); + if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) { + BT_ERR("%s: extension section signature mismatch", hdev->name); + return -EINVAL; + } + + /* Loop from the end of the firmware parsing instructions, until + * we find an instruction that identifies the "project ID" for the + * hardware supported by this firwmare file. + * Once we have that, we double-check that that project_id is suitable + * for the hardware we are working with. + */ + while (fwptr >= fw->data + (sizeof(struct rtl_epatch_header) + 3)) { + opcode = *--fwptr; + length = *--fwptr; + data = *--fwptr; + + BT_DBG("check op=%x len=%x data=%x", opcode, length, data); + + if (opcode == 0xff) /* EOF */ + break; + + if (length == 0) { + BT_ERR("%s: found instruction with length 0", + hdev->name); + return -EINVAL; + } + + if (opcode == 0 && length == 1) { + project_id = data; + break; + } + + fwptr -= length; + } + + if (project_id < 0) { + BT_ERR("%s: failed to find version instruction", hdev->name); + return -EINVAL; + } + + if (project_id >= ARRAY_SIZE(project_id_to_lmp_subver)) { + BT_ERR("%s: unknown project id %d", hdev->name, project_id); + return -EINVAL; + } + + if (lmp_subver != project_id_to_lmp_subver[project_id]) { + BT_ERR("%s: firmware is for %x but this is a %x", hdev->name, + project_id_to_lmp_subver[project_id], lmp_subver); + return -EINVAL; + } + + epatch_info = (struct rtl_epatch_header *)fw->data; + if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) { + BT_ERR("%s: bad EPATCH signature", hdev->name); + return -EINVAL; + } + + num_patches = le16_to_cpu(epatch_info->num_patches); + BT_DBG("fw_version=%x, num_patches=%d", + le32_to_cpu(epatch_info->fw_version), num_patches); + + /* After the rtl_epatch_header there is a funky patch metadata section. + * Assuming 2 patches, the layout is: + * ChipID1 ChipID2 PatchLength1 PatchLength2 PatchOffset1 PatchOffset2 + * + * Find the right patch for this chip. + */ + min_size += 8 * num_patches; + if (fw->size < min_size) + return -EINVAL; + + chip_id_base = fw->data + sizeof(struct rtl_epatch_header); + patch_length_base = chip_id_base + (sizeof(u16) * num_patches); + patch_offset_base = patch_length_base + (sizeof(u16) * num_patches); + for (i = 0; i < num_patches; i++) { + u16 chip_id = get_unaligned_le16(chip_id_base + + (i * sizeof(u16))); + if (chip_id == rom_version + 1) { + patch_length = get_unaligned_le16(patch_length_base + + (i * sizeof(u16))); + patch_offset = get_unaligned_le32(patch_offset_base + + (i * sizeof(u32))); + break; + } + } + + if (!patch_offset) { + BT_ERR("%s: didn't find patch for chip id %d", + hdev->name, rom_version); + return -EINVAL; + } + + BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i); + min_size = patch_offset + patch_length; + if (fw->size < min_size) + return -EINVAL; + + /* Copy the firmware into a new buffer and write the version at + * the end. + */ + len = patch_length; + buf = kmemdup(fw->data + patch_offset, patch_length, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4); + + *_buf = buf; + return len; +} + +static int rtl_download_firmware(struct hci_dev *hdev, + const unsigned char *data, int fw_len) +{ + struct rtl_download_cmd *dl_cmd; + int frag_num = fw_len / RTL_FRAG_LEN + 1; + int frag_len = RTL_FRAG_LEN; + int ret = 0; + int i; + + dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL); + if (!dl_cmd) + return -ENOMEM; + + for (i = 0; i < frag_num; i++) { + struct rtl_download_response *dl_resp; + struct sk_buff *skb; + + BT_DBG("download fw (%d/%d)", i, frag_num); + + dl_cmd->index = i; + if (i == (frag_num - 1)) { + dl_cmd->index |= 0x80; /* data end */ + frag_len = fw_len % RTL_FRAG_LEN; + } + memcpy(dl_cmd->data, data, frag_len); + + /* Send download command */ + skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: download fw command failed (%ld)", + hdev->name, PTR_ERR(skb)); + ret = -PTR_ERR(skb); + goto out; + } + + if (skb->len != sizeof(*dl_resp)) { + BT_ERR("%s: download fw event length mismatch", + hdev->name); + kfree_skb(skb); + ret = -EIO; + goto out; + } + + dl_resp = (struct rtl_download_response *)skb->data; + if (dl_resp->status != 0) { + kfree_skb(skb); + ret = bt_to_errno(dl_resp->status); + goto out; + } + + kfree_skb(skb); + data += RTL_FRAG_LEN; + } + +out: + kfree(dl_cmd); + return ret; +} + +static int btusb_setup_rtl8723a(struct hci_dev *hdev) +{ + struct btusb_data *data = dev_get_drvdata(&hdev->dev); + struct usb_device *udev = interface_to_usbdev(data->intf); + const struct firmware *fw; + int ret; + + BT_INFO("%s: rtl: loading rtl_bt/rtl8723a_fw.bin", hdev->name); + ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &udev->dev); + if (ret < 0) { + BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name); + return ret; + } + + if (fw->size < 8) { + ret = -EINVAL; + goto out; + } + + /* Check that the firmware doesn't have the epatch signature + * (which is only for RTL8723B and newer). + */ + if (!memcmp(fw->data, RTL_EPATCH_SIGNATURE, 8)) { + BT_ERR("%s: unexpected EPATCH signature!", hdev->name); + ret = -EINVAL; + goto out; + } + + ret = rtl_download_firmware(hdev, fw->data, fw->size); + +out: + release_firmware(fw); + return ret; +} + +static int btusb_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver, + const char *fw_name) +{ + struct btusb_data *data = dev_get_drvdata(&hdev->dev); + struct usb_device *udev = interface_to_usbdev(data->intf); + unsigned char *fw_data = NULL; + const struct firmware *fw; + int ret; + + BT_INFO("%s: rtl: loading %s", hdev->name, fw_name); + ret = request_firmware(&fw, fw_name, &udev->dev); + if (ret < 0) { + BT_ERR("%s: Failed to load %s", hdev->name, fw_name); + return ret; + } + + ret = rtl8723b_parse_firmware(hdev, lmp_subver, fw, &fw_data); + if (ret < 0) + goto out; + + ret = rtl_download_firmware(hdev, fw_data, ret); + kfree(fw_data); + if (ret < 0) + goto out; + +out: + release_firmware(fw); + return ret; +} + +static int btusb_setup_realtek(struct hci_dev *hdev) +{ + struct sk_buff *skb; + struct hci_rp_read_local_version *resp; + u16 lmp_subver; + + skb = btusb_read_local_version(hdev); + if (IS_ERR(skb)) + return -PTR_ERR(skb); + + resp = (struct hci_rp_read_local_version *)skb->data; + BT_INFO("%s: rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x " + "lmp_subver=%04x", hdev->name, resp->hci_ver, resp->hci_rev, + resp->lmp_ver, resp->lmp_subver); + + lmp_subver = le16_to_cpu(resp->lmp_subver); + kfree_skb(skb); + + /* Match a set of subver values that correspond to stock firmware, + * which is not compatible with standard btusb. + * If matched, upload an alternative firmware that does conform to + * standard btusb. Once that firmware is uploaded, the subver changes + * to a different value. + */ + switch (lmp_subver) { + case RTL_ROM_LMP_8723A: + case RTL_ROM_LMP_3499: + return btusb_setup_rtl8723a(hdev); + case RTL_ROM_LMP_8723B: + return btusb_setup_rtl8723b(hdev, lmp_subver, + "rtl_bt/rtl8723b_fw.bin"); + case RTL_ROM_LMP_8821A: + return btusb_setup_rtl8723b(hdev, lmp_subver, + "rtl_bt/rtl8821a_fw.bin"); + case RTL_ROM_LMP_8761A: + return btusb_setup_rtl8723b(hdev, lmp_subver, + "rtl_bt/rtl8761a_fw.bin"); + default: + BT_INFO("rtl: assuming no firmware upload needed."); + return 0; + } +} + static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev, struct intel_version *ver) { @@ -2577,7 +2973,7 @@ static int btusb_setup_qca(struct hci_dev *hdev) int i, err; err = btusb_qca_send_vendor_req(hdev, QCA_GET_TARGET_VERSION, &ver, - sizeof(ver)); + sizeof(ver)); if (err < 0) return err; @@ -2776,6 +3172,9 @@ static int btusb_probe(struct usb_interface *intf, hdev->set_bdaddr = btusb_set_bdaddr_ath3012; } + if (id->driver_info & BTUSB_REALTEK) + hdev->setup = btusb_setup_realtek; + if (id->driver_info & BTUSB_AMP) { /* AMP controllers do not support SCO packets */ data->isoc = NULL; diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c index 1b3f864..ec8fa0e 100644 --- a/drivers/bluetooth/hci_ath.c +++ b/drivers/bluetooth/hci_ath.c @@ -95,7 +95,6 @@ static void ath_hci_uart_work(struct work_struct *work) hci_uart_tx_wakeup(hu); } -/* Initialize protocol */ static int ath_open(struct hci_uart *hu) { struct ath_struct *ath; @@ -116,8 +115,7 @@ static int ath_open(struct hci_uart *hu) return 0; } -/* Flush protocol data */ -static int ath_flush(struct hci_uart *hu) +static int ath_close(struct hci_uart *hu) { struct ath_struct *ath = hu->priv; @@ -125,11 +123,17 @@ static int ath_flush(struct hci_uart *hu) skb_queue_purge(&ath->txq); + kfree_skb(ath->rx_skb); + + cancel_work_sync(&ath->ctxtsw); + + hu->priv = NULL; + kfree(ath); + return 0; } -/* Close protocol */ -static int ath_close(struct hci_uart *hu) +static int ath_flush(struct hci_uart *hu) { struct ath_struct *ath = hu->priv; @@ -137,19 +141,65 @@ static int ath_close(struct hci_uart *hu) skb_queue_purge(&ath->txq); - kfree_skb(ath->rx_skb); + return 0; +} - cancel_work_sync(&ath->ctxtsw); +static int ath_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + struct sk_buff *skb; + u8 buf[10]; + int err; + + buf[0] = 0x01; + buf[1] = 0x01; + buf[2] = 0x00; + buf[3] = sizeof(bdaddr_t); + memcpy(buf + 4, bdaddr, sizeof(bdaddr_t)); + + skb = __hci_cmd_sync(hdev, 0xfc0b, sizeof(buf), buf, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + BT_ERR("%s: Change address command failed (%d)", + hdev->name, err); + return err; + } + kfree_skb(skb); - hu->priv = NULL; - kfree(ath); + return 0; +} + +static int ath_setup(struct hci_uart *hu) +{ + BT_DBG("hu %p", hu); + + hu->hdev->set_bdaddr = ath_set_bdaddr; return 0; } +static const struct h4_recv_pkt ath_recv_pkts[] = { + { H4_RECV_ACL, .recv = hci_recv_frame }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = hci_recv_frame }, +}; + +static int ath_recv(struct hci_uart *hu, const void *data, int count) +{ + struct ath_struct *ath = hu->priv; + + ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count, + ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts)); + if (IS_ERR(ath->rx_skb)) { + int err = PTR_ERR(ath->rx_skb); + BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err); + return err; + } + + return count; +} + #define HCI_OP_ATH_SLEEP 0xFC04 -/* Enqueue frame for transmittion */ static int ath_enqueue(struct hci_uart *hu, struct sk_buff *skb) { struct ath_struct *ath = hu->priv; @@ -159,8 +209,7 @@ static int ath_enqueue(struct hci_uart *hu, struct sk_buff *skb) return 0; } - /* - * Update power management enable flag with parameters of + /* Update power management enable flag with parameters of * HCI sleep enable vendor specific HCI command. */ if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) { @@ -190,37 +239,16 @@ static struct sk_buff *ath_dequeue(struct hci_uart *hu) return skb_dequeue(&ath->txq); } -static const struct h4_recv_pkt ath_recv_pkts[] = { - { H4_RECV_ACL, .recv = hci_recv_frame }, - { H4_RECV_SCO, .recv = hci_recv_frame }, - { H4_RECV_EVENT, .recv = hci_recv_frame }, -}; - -/* Recv data */ -static int ath_recv(struct hci_uart *hu, const void *data, int count) -{ - struct ath_struct *ath = hu->priv; - - ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count, - ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts)); - if (IS_ERR(ath->rx_skb)) { - int err = PTR_ERR(ath->rx_skb); - BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err); - return err; - } - - return count; -} - static const struct hci_uart_proto athp = { .id = HCI_UART_ATH3K, .name = "ATH3K", .open = ath_open, .close = ath_close, + .flush = ath_flush, + .setup = ath_setup, .recv = ath_recv, .enqueue = ath_enqueue, .dequeue = ath_dequeue, - .flush = ath_flush, }; int __init ath_init(void) diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index b854125..5340604 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c @@ -660,7 +660,7 @@ validate_group(struct perf_event *event) * Initialise the fake PMU. We only need to populate the * used_mask for the purposes of validation. */ - .used_mask = CPU_BITS_NONE, + .used_mask = { 0 }, }; if (!validate_event(event->pmu, &fake_pmu, leader)) diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c index 11f7982..ebee57d 100644 --- a/drivers/bus/omap_l3_noc.c +++ b/drivers/bus/omap_l3_noc.c @@ -1,7 +1,7 @@ /* * OMAP L3 Interconnect error handling driver * - * Copyright (C) 2011-2014 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/ * Santosh Shilimkar <santosh.shilimkar@ti.com> * Sricharan <r.sricharan@ti.com> * @@ -233,7 +233,8 @@ static irqreturn_t l3_interrupt_handler(int irq, void *_l3) } static const struct of_device_id l3_noc_match[] = { - {.compatible = "ti,omap4-l3-noc", .data = &omap_l3_data}, + {.compatible = "ti,omap4-l3-noc", .data = &omap4_l3_data}, + {.compatible = "ti,omap5-l3-noc", .data = &omap5_l3_data}, {.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data}, {.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data}, {}, diff --git a/drivers/bus/omap_l3_noc.h b/drivers/bus/omap_l3_noc.h index 9525458..73431f8 100644 --- a/drivers/bus/omap_l3_noc.h +++ b/drivers/bus/omap_l3_noc.h @@ -1,7 +1,7 @@ /* * OMAP L3 Interconnect error handling driver header * - * Copyright (C) 2011-2014 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/ * Santosh Shilimkar <santosh.shilimkar@ti.com> * sricharan <r.sricharan@ti.com> * @@ -175,16 +175,14 @@ static struct l3_flagmux_data omap_l3_flagmux_clk2 = { }; -static struct l3_target_data omap_l3_target_data_clk3[] = { - {0x0100, "EMUSS",}, - {0x0300, "DEBUG SOURCE",}, - {0x0, "HOST CLK3",}, +static struct l3_target_data omap4_l3_target_data_clk3[] = { + {0x0100, "DEBUGSS",}, }; -static struct l3_flagmux_data omap_l3_flagmux_clk3 = { +static struct l3_flagmux_data omap4_l3_flagmux_clk3 = { .offset = 0x0200, - .l3_targ = omap_l3_target_data_clk3, - .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk3), + .l3_targ = omap4_l3_target_data_clk3, + .num_targ_data = ARRAY_SIZE(omap4_l3_target_data_clk3), }; static struct l3_masters_data omap_l3_masters[] = { @@ -215,21 +213,49 @@ static struct l3_masters_data omap_l3_masters[] = { { 0x32, "USBHOSTFS"} }; -static struct l3_flagmux_data *omap_l3_flagmux[] = { +static struct l3_flagmux_data *omap4_l3_flagmux[] = { &omap_l3_flagmux_clk1, &omap_l3_flagmux_clk2, - &omap_l3_flagmux_clk3, + &omap4_l3_flagmux_clk3, }; -static const struct omap_l3 omap_l3_data = { - .l3_flagmux = omap_l3_flagmux, - .num_modules = ARRAY_SIZE(omap_l3_flagmux), +static const struct omap_l3 omap4_l3_data = { + .l3_flagmux = omap4_l3_flagmux, + .num_modules = ARRAY_SIZE(omap4_l3_flagmux), .l3_masters = omap_l3_masters, .num_masters = ARRAY_SIZE(omap_l3_masters), /* The 6 MSBs of register field used to distinguish initiator */ .mst_addr_mask = 0xFC, }; +/* OMAP5 data */ +static struct l3_target_data omap5_l3_target_data_clk3[] = { + {0x0100, "L3INSTR",}, + {0x0300, "DEBUGSS",}, + {0x0, "HOSTCLK3",}, +}; + +static struct l3_flagmux_data omap5_l3_flagmux_clk3 = { + .offset = 0x0200, + .l3_targ = omap5_l3_target_data_clk3, + .num_targ_data = ARRAY_SIZE(omap5_l3_target_data_clk3), +}; + +static struct l3_flagmux_data *omap5_l3_flagmux[] = { + &omap_l3_flagmux_clk1, + &omap_l3_flagmux_clk2, + &omap5_l3_flagmux_clk3, +}; + +static const struct omap_l3 omap5_l3_data = { + .l3_flagmux = omap5_l3_flagmux, + .num_modules = ARRAY_SIZE(omap5_l3_flagmux), + .l3_masters = omap_l3_masters, + .num_masters = ARRAY_SIZE(omap_l3_masters), + /* The 6 MSBs of register field used to distinguish initiator */ + .mst_addr_mask = 0x7E0, +}; + /* DRA7 data */ static struct l3_target_data dra_l3_target_data_clk1[] = { {0x2a00, "AES1",}, @@ -274,7 +300,7 @@ static struct l3_flagmux_data dra_l3_flagmux_clk1 = { static struct l3_target_data dra_l3_target_data_clk2[] = { {0x0, "HOST CLK1",}, - {0x0, "HOST CLK2",}, + {0x800000, "HOST CLK2",}, {0xdead, L3_TARGET_NOT_SUPPORTED,}, {0x3400, "SHA2_2",}, {0x0900, "BB2D",}, diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c index d1494ec..4b31f13 100644 --- a/drivers/char/hw_random/bcm63xx-rng.c +++ b/drivers/char/hw_random/bcm63xx-rng.c @@ -57,7 +57,7 @@ static void bcm63xx_rng_cleanup(struct hwrng *rng) val &= ~RNG_EN; __raw_writel(val, priv->regs + RNG_CTRL); - clk_didsable_unprepare(prov->clk); + clk_disable_unprepare(priv->clk); } static int bcm63xx_rng_data_present(struct hwrng *rng, int wait) @@ -97,14 +97,14 @@ static int bcm63xx_rng_probe(struct platform_device *pdev) priv->rng.name = pdev->name; priv->rng.init = bcm63xx_rng_init; priv->rng.cleanup = bcm63xx_rng_cleanup; - prov->rng.data_present = bcm63xx_rng_data_present; + priv->rng.data_present = bcm63xx_rng_data_present; priv->rng.data_read = bcm63xx_rng_data_read; priv->clk = devm_clk_get(&pdev->dev, "ipsec"); if (IS_ERR(priv->clk)) { - error = PTR_ERR(priv->clk); - dev_err(&pdev->dev, "no clock for device: %d\n", error); - return error; + ret = PTR_ERR(priv->clk); + dev_err(&pdev->dev, "no clock for device: %d\n", ret); + return ret; } if (!devm_request_mem_region(&pdev->dev, r->start, @@ -120,11 +120,11 @@ static int bcm63xx_rng_probe(struct platform_device *pdev) return -ENOMEM; } - error = devm_hwrng_register(&pdev->dev, &priv->rng); - if (error) { + ret = devm_hwrng_register(&pdev->dev, &priv->rng); + if (ret) { dev_err(&pdev->dev, "failed to register rng device: %d\n", - error); - return error; + ret); + return ret; } dev_info(&pdev->dev, "registered RNG driver\n"); diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 9bb5928..bf75f63 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -2000,7 +2000,7 @@ static int smi_ipmb_proc_show(struct seq_file *m, void *v) seq_printf(m, " %x", intf->channels[i].address); seq_putc(m, '\n'); - return seq_has_overflowed(m); + return 0; } static int smi_ipmb_proc_open(struct inode *inode, struct file *file) @@ -2023,7 +2023,7 @@ static int smi_version_proc_show(struct seq_file *m, void *v) ipmi_version_major(&intf->bmc->id), ipmi_version_minor(&intf->bmc->id)); - return seq_has_overflowed(m); + return 0; } static int smi_version_proc_open(struct inode *inode, struct file *file) diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 5e90a18..8a45e92 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -942,8 +942,7 @@ static void sender(void *send_info, * If we are running to completion, start it and run * transactions until everything is clear. */ - smi_info->curr_msg = msg; - smi_info->waiting_msg = NULL; + smi_info->waiting_msg = msg; /* * Run to completion means we are single-threaded, no @@ -2244,7 +2243,7 @@ static int ipmi_pnp_probe(struct pnp_dev *dev, acpi_handle handle; acpi_status status; unsigned long long tmp; - int rv; + int rv = -EINVAL; acpi_dev = pnp_acpi_device(dev); if (!acpi_dev) @@ -2262,8 +2261,10 @@ static int ipmi_pnp_probe(struct pnp_dev *dev, /* _IFT tells us the interface type: KCS, BT, etc */ status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp); - if (ACPI_FAILURE(status)) + if (ACPI_FAILURE(status)) { + dev_err(&dev->dev, "Could not find ACPI IPMI interface type\n"); goto err_free; + } switch (tmp) { case 1: @@ -2276,6 +2277,7 @@ static int ipmi_pnp_probe(struct pnp_dev *dev, info->si_type = SI_BT; break; case 4: /* SSIF, just ignore */ + rv = -ENODEV; goto err_free; default: dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp); @@ -2336,7 +2338,7 @@ static int ipmi_pnp_probe(struct pnp_dev *dev, err_free: kfree(info); - return -EINVAL; + return rv; } static void ipmi_pnp_remove(struct pnp_dev *dev) @@ -3080,7 +3082,7 @@ static int smi_type_proc_show(struct seq_file *m, void *v) seq_printf(m, "%s\n", si_to_str[smi->si_type]); - return seq_has_overflowed(m); + return 0; } static int smi_type_proc_open(struct inode *inode, struct file *file) @@ -3153,7 +3155,7 @@ static int smi_params_proc_show(struct seq_file *m, void *v) smi->irq, smi->slave_addr); - return seq_has_overflowed(m); + return 0; } static int smi_params_proc_open(struct inode *inode, struct file *file) diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index f40e3bd..207689c 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -31,7 +31,6 @@ * interface into the I2C driver, I believe. */ -#include <linux/version.h> #if defined(MODVERSIONS) #include <linux/modversions.h> #endif @@ -166,6 +165,9 @@ enum ssif_stat_indexes { /* Number of watchdog pretimeouts. */ SSIF_STAT_watchdog_pretimeouts, + /* Number of alers received. */ + SSIF_STAT_alerts, + /* Always add statistics before this value, it must be last. */ SSIF_NUM_STATS }; @@ -214,7 +216,16 @@ struct ssif_info { #define WDT_PRE_TIMEOUT_INT 0x08 unsigned char msg_flags; + u8 global_enables; bool has_event_buffer; + bool supports_alert; + + /* + * Used to tell what we should do with alerts. If we are + * waiting on a response, read the data immediately. + */ + bool got_alert; + bool waiting_alert; /* * If set to true, this will request events the next time the @@ -478,13 +489,13 @@ static int ipmi_ssif_thread(void *data) if (ssif_info->i2c_read_write == I2C_SMBUS_WRITE) { result = i2c_smbus_write_block_data( - ssif_info->client, SSIF_IPMI_REQUEST, + ssif_info->client, ssif_info->i2c_command, ssif_info->i2c_data[0], ssif_info->i2c_data + 1); ssif_info->done_handler(ssif_info, result, NULL, 0); } else { result = i2c_smbus_read_block_data( - ssif_info->client, SSIF_IPMI_RESPONSE, + ssif_info->client, ssif_info->i2c_command, ssif_info->i2c_data); if (result < 0) ssif_info->done_handler(ssif_info, result, @@ -518,15 +529,12 @@ static int ssif_i2c_send(struct ssif_info *ssif_info, static void msg_done_handler(struct ssif_info *ssif_info, int result, unsigned char *data, unsigned int len); -static void retry_timeout(unsigned long data) +static void start_get(struct ssif_info *ssif_info) { - struct ssif_info *ssif_info = (void *) data; int rv; - if (ssif_info->stopping) - return; - ssif_info->rtc_us_timer = 0; + ssif_info->multi_pos = 0; rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ, SSIF_IPMI_RESPONSE, @@ -540,6 +548,46 @@ static void retry_timeout(unsigned long data) } } +static void retry_timeout(unsigned long data) +{ + struct ssif_info *ssif_info = (void *) data; + unsigned long oflags, *flags; + bool waiting; + + if (ssif_info->stopping) + return; + + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + waiting = ssif_info->waiting_alert; + ssif_info->waiting_alert = false; + ipmi_ssif_unlock_cond(ssif_info, flags); + + if (waiting) + start_get(ssif_info); +} + + +static void ssif_alert(struct i2c_client *client, unsigned int data) +{ + struct ssif_info *ssif_info = i2c_get_clientdata(client); + unsigned long oflags, *flags; + bool do_get = false; + + ssif_inc_stat(ssif_info, alerts); + + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + if (ssif_info->waiting_alert) { + ssif_info->waiting_alert = false; + del_timer(&ssif_info->retry_timer); + do_get = true; + } else if (ssif_info->curr_msg) { + ssif_info->got_alert = true; + } + ipmi_ssif_unlock_cond(ssif_info, flags); + if (do_get) + start_get(ssif_info); +} + static int start_resend(struct ssif_info *ssif_info); static void msg_done_handler(struct ssif_info *ssif_info, int result, @@ -559,9 +607,12 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, if (ssif_info->retries_left > 0) { ssif_inc_stat(ssif_info, receive_retries); + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + ssif_info->waiting_alert = true; + ssif_info->rtc_us_timer = SSIF_MSG_USEC; mod_timer(&ssif_info->retry_timer, jiffies + SSIF_MSG_JIFFIES); - ssif_info->rtc_us_timer = SSIF_MSG_USEC; + ipmi_ssif_unlock_cond(ssif_info, flags); return; } @@ -581,9 +632,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, ssif_inc_stat(ssif_info, received_message_parts); /* Remove the multi-part read marker. */ - for (i = 0; i < (len-2); i++) - ssif_info->data[i] = data[i+2]; len -= 2; + for (i = 0; i < len; i++) + ssif_info->data[i] = data[i+2]; ssif_info->multi_len = len; ssif_info->multi_pos = 1; @@ -610,9 +661,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, goto continue_op; } - blocknum = data[ssif_info->multi_len]; + blocknum = data[0]; - if (ssif_info->multi_len+len-1 > IPMI_MAX_MSG_LENGTH) { + if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) { /* Received message too big, abort the operation. */ result = -E2BIG; if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) @@ -622,15 +673,15 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, } /* Remove the blocknum from the data. */ - for (i = 0; i < (len-1); i++) - ssif_info->data[i+ssif_info->multi_len] = data[i+1]; len--; + for (i = 0; i < len; i++) + ssif_info->data[i + ssif_info->multi_len] = data[i + 1]; ssif_info->multi_len += len; if (blocknum == 0xff) { /* End of read */ len = ssif_info->multi_len; data = ssif_info->data; - } else if ((blocknum+1) != ssif_info->multi_pos) { + } else if (blocknum + 1 != ssif_info->multi_pos) { /* * Out of sequence block, just abort. Block * numbers start at zero for the second block, @@ -650,7 +701,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, if (rv < 0) { if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) pr_info(PFX - "Error from i2c_non_blocking_op(2)\n"); + "Error from ssif_i2c_send\n"); result = -EIO; } else @@ -830,7 +881,11 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result, } if (ssif_info->multi_data) { - /* In the middle of a multi-data write. */ + /* + * In the middle of a multi-data write. See the comment + * in the SSIF_MULTI_n_PART case in the probe function + * for details on the intricacies of this. + */ int left; ssif_inc_stat(ssif_info, sent_messages_parts); @@ -864,15 +919,32 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result, msg_done_handler(ssif_info, -EIO, NULL, 0); } } else { + unsigned long oflags, *flags; + bool got_alert; + ssif_inc_stat(ssif_info, sent_messages); ssif_inc_stat(ssif_info, sent_messages_parts); - /* Wait a jiffie then request the next message */ - ssif_info->retries_left = SSIF_RECV_RETRIES; - ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC; - mod_timer(&ssif_info->retry_timer, - jiffies + SSIF_MSG_PART_JIFFIES); - return; + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + got_alert = ssif_info->got_alert; + if (got_alert) { + ssif_info->got_alert = false; + ssif_info->waiting_alert = false; + } + + if (got_alert) { + ipmi_ssif_unlock_cond(ssif_info, flags); + /* The alert already happened, try now. */ + retry_timeout((unsigned long) ssif_info); + } else { + /* Wait a jiffie then request the next message */ + ssif_info->waiting_alert = true; + ssif_info->retries_left = SSIF_RECV_RETRIES; + ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC; + mod_timer(&ssif_info->retry_timer, + jiffies + SSIF_MSG_PART_JIFFIES); + ipmi_ssif_unlock_cond(ssif_info, flags); + } } } @@ -881,6 +953,8 @@ static int start_resend(struct ssif_info *ssif_info) int rv; int command; + ssif_info->got_alert = false; + if (ssif_info->data_len > 32) { command = SSIF_IPMI_MULTI_PART_REQUEST_START; ssif_info->multi_data = ssif_info->data; @@ -915,7 +989,7 @@ static int start_send(struct ssif_info *ssif_info, return -E2BIG; ssif_info->retries_left = SSIF_SEND_RETRIES; - memcpy(ssif_info->data+1, data, len); + memcpy(ssif_info->data + 1, data, len); ssif_info->data_len = len; return start_resend(ssif_info); } @@ -1200,7 +1274,7 @@ static int smi_type_proc_show(struct seq_file *m, void *v) { seq_puts(m, "ssif\n"); - return seq_has_overflowed(m); + return 0; } static int smi_type_proc_open(struct inode *inode, struct file *file) @@ -1243,6 +1317,8 @@ static int smi_stats_proc_show(struct seq_file *m, void *v) ssif_get_stat(ssif_info, events)); seq_printf(m, "watchdog_pretimeouts: %u\n", ssif_get_stat(ssif_info, watchdog_pretimeouts)); + seq_printf(m, "alerts: %u\n", + ssif_get_stat(ssif_info, alerts)); return 0; } @@ -1258,6 +1334,23 @@ static const struct file_operations smi_stats_proc_ops = { .release = single_release, }; +static int strcmp_nospace(char *s1, char *s2) +{ + while (*s1 && *s2) { + while (isspace(*s1)) + s1++; + while (isspace(*s2)) + s2++; + if (*s1 > *s2) + return 1; + if (*s1 < *s2) + return -1; + s1++; + s2++; + } + return 0; +} + static struct ssif_addr_info *ssif_info_find(unsigned short addr, char *adapter_name, bool match_null_name) @@ -1272,8 +1365,10 @@ restart: /* One is NULL and one is not */ continue; } - if (strcmp(info->adapter_name, adapter_name)) - /* Names to not match */ + if (adapter_name && + strcmp_nospace(info->adapter_name, + adapter_name)) + /* Names do not match */ continue; } found = info; @@ -1306,6 +1401,12 @@ static bool check_acpi(struct ssif_info *ssif_info, struct device *dev) return false; } +/* + * Global enables we care about. + */ +#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \ + IPMI_BMC_EVT_MSG_INTR) + static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) { unsigned char msg[3]; @@ -1391,13 +1492,33 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) break; case SSIF_MULTI_2_PART: - if (ssif_info->max_xmit_msg_size > 64) - ssif_info->max_xmit_msg_size = 64; + if (ssif_info->max_xmit_msg_size > 63) + ssif_info->max_xmit_msg_size = 63; if (ssif_info->max_recv_msg_size > 62) ssif_info->max_recv_msg_size = 62; break; case SSIF_MULTI_n_PART: + /* + * The specification is rather confusing at + * this point, but I think I understand what + * is meant. At least I have a workable + * solution. With multi-part messages, you + * cannot send a message that is a multiple of + * 32-bytes in length, because the start and + * middle messages are 32-bytes and the end + * message must be at least one byte. You + * can't fudge on an extra byte, that would + * screw up things like fru data writes. So + * we limit the length to 63 bytes. That way + * a 32-byte message gets sent as a single + * part. A larger message will be a 32-byte + * start and the next message is always going + * to be 1-31 bytes in length. Not ideal, but + * it should work. + */ + if (ssif_info->max_xmit_msg_size > 63) + ssif_info->max_xmit_msg_size = 63; break; default: @@ -1407,7 +1528,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) } else { no_support: /* Assume no multi-part or PEC support */ - pr_info(PFX "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n", + pr_info(PFX "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n", rv, len, resp[2]); ssif_info->max_xmit_msg_size = 32; @@ -1436,6 +1557,8 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) goto found; } + ssif_info->global_enables = resp[3]; + if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) { ssif_info->has_event_buffer = true; /* buffer is already enabled, nothing to do. */ @@ -1444,18 +1567,37 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; - msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF; + msg[2] = ssif_info->global_enables | IPMI_BMC_EVT_MSG_BUFF; rv = do_cmd(client, 3, msg, &len, resp); if (rv || (len < 2)) { - pr_warn(PFX "Error getting global enables: %d %d %2.2x\n", + pr_warn(PFX "Error setting global enables: %d %d %2.2x\n", rv, len, resp[2]); rv = 0; /* Not fatal */ goto found; } - if (resp[2] == 0) + if (resp[2] == 0) { /* A successful return means the event buffer is supported. */ ssif_info->has_event_buffer = true; + ssif_info->global_enables |= IPMI_BMC_EVT_MSG_BUFF; + } + + msg[0] = IPMI_NETFN_APP_REQUEST << 2; + msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; + msg[2] = ssif_info->global_enables | IPMI_BMC_RCV_MSG_INTR; + rv = do_cmd(client, 3, msg, &len, resp); + if (rv || (len < 2)) { + pr_warn(PFX "Error setting global enables: %d %d %2.2x\n", + rv, len, resp[2]); + rv = 0; /* Not fatal */ + goto found; + } + + if (resp[2] == 0) { + /* A successful return means the alert is supported. */ + ssif_info->supports_alert = true; + ssif_info->global_enables |= IPMI_BMC_RCV_MSG_INTR; + } found: ssif_info->intf_num = atomic_inc_return(&next_intf); @@ -1813,6 +1955,7 @@ static struct i2c_driver ssif_i2c_driver = { }, .probe = ssif_probe, .remove = ssif_remove, + .alert = ssif_alert, .id_table = ssif_id, .detect = ssif_detect }; @@ -1832,7 +1975,7 @@ static int init_ipmi_ssif(void) rv = new_ssif_client(addr[i], adapter_name[i], dbg[i], slave_addrs[i], SI_HARDCODED); - if (!rv) + if (rv) pr_err(PFX "Couldn't add hardcoded device at addr 0x%x\n", addr[i]); diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 7a73a27..61c417b 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -158,9 +158,18 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int entered_state; struct cpuidle_state *target_state = &drv->states[index]; + bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP); ktime_t time_start, time_end; s64 diff; + /* + * Tell the time framework to switch to a broadcast timer because our + * local timer will be shut down. If a local timer is used from another + * CPU as a broadcast timer, this call may fail if it is not available. + */ + if (broadcast && tick_broadcast_enter()) + return -EBUSY; + trace_cpu_idle_rcuidle(index, dev->cpu); time_start = ktime_get(); @@ -169,6 +178,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, time_end = ktime_get(); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); + if (broadcast) { + if (WARN_ON_ONCE(!irqs_disabled())) + local_irq_disable(); + + tick_broadcast_exit(); + } + if (!cpuidle_state_is_coupled(dev, drv, entered_state)) local_irq_enable(); diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index fd7ac13..bda2cb0 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -437,6 +437,7 @@ config IMG_MDC_DMA config XGENE_DMA tristate "APM X-Gene DMA support" + depends on ARCH_XGENE || COMPILE_TEST select DMA_ENGINE select DMA_ENGINE_RAID select ASYNC_TX_ENABLE_CHANNEL_SWITCH diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 0e035a8..2890d74 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -571,11 +571,15 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device) chan = private_candidate(&mask, device, NULL, NULL); if (chan) { + dma_cap_set(DMA_PRIVATE, device->cap_mask); + device->privatecnt++; err = dma_chan_get(chan); if (err) { pr_debug("%s: failed to get %s: (%d)\n", __func__, dma_chan_name(chan), err); chan = NULL; + if (--device->privatecnt == 0) + dma_cap_clear(DMA_PRIVATE, device->cap_mask); } } diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index f705798..ebd8a5f 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c @@ -673,6 +673,7 @@ static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec, * Power management */ +#ifdef CONFIG_PM static int usb_dmac_runtime_suspend(struct device *dev) { struct usb_dmac *dmac = dev_get_drvdata(dev); @@ -690,6 +691,7 @@ static int usb_dmac_runtime_resume(struct device *dev) return usb_dmac_init(dmac); } +#endif /* CONFIG_PM */ static const struct dev_pm_ops usb_dmac_pm = { SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume, diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c index de67fce..e45d1f1 100644 --- a/drivers/extcon/extcon-usb-gpio.c +++ b/drivers/extcon/extcon-usb-gpio.c @@ -119,6 +119,18 @@ static int usb_extcon_probe(struct platform_device *pdev) return PTR_ERR(info->id_gpiod); } + info->edev = devm_extcon_dev_allocate(dev, usb_extcon_cable); + if (IS_ERR(info->edev)) { + dev_err(dev, "failed to allocate extcon device\n"); + return -ENOMEM; + } + + ret = devm_extcon_dev_register(dev, info->edev); + if (ret < 0) { + dev_err(dev, "failed to register extcon device\n"); + return ret; + } + ret = gpiod_set_debounce(info->id_gpiod, USB_GPIO_DEBOUNCE_MS * 1000); if (ret < 0) @@ -142,18 +154,6 @@ static int usb_extcon_probe(struct platform_device *pdev) return ret; } - info->edev = devm_extcon_dev_allocate(dev, usb_extcon_cable); - if (IS_ERR(info->edev)) { - dev_err(dev, "failed to allocate extcon device\n"); - return -ENOMEM; - } - - ret = devm_extcon_dev_register(dev, info->edev); - if (ret < 0) { - dev_err(dev, "failed to register extcon device\n"); - return ret; - } - platform_set_drvdata(pdev, info); device_init_wakeup(dev, 1); diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 6e45a43..97b1616 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -499,19 +499,19 @@ static int __init dmi_present(const u8 *buf) buf += 16; if (memcmp(buf, "_DMI_", 5) == 0 && dmi_checksum(buf, 15)) { + if (smbios_ver) + dmi_ver = smbios_ver; + else + dmi_ver = (buf[14] & 0xF0) << 4 | (buf[14] & 0x0F); dmi_num = get_unaligned_le16(buf + 12); dmi_len = get_unaligned_le16(buf + 6); dmi_base = get_unaligned_le32(buf + 8); if (dmi_walk_early(dmi_decode) == 0) { if (smbios_ver) { - dmi_ver = smbios_ver; - pr_info("SMBIOS %d.%d%s present.\n", - dmi_ver >> 8, dmi_ver & 0xFF, - (dmi_ver < 0x0300) ? "" : ".x"); + pr_info("SMBIOS %d.%d present.\n", + dmi_ver >> 8, dmi_ver & 0xFF); } else { - dmi_ver = (buf[14] & 0xF0) << 4 | - (buf[14] & 0x0F); pr_info("Legacy DMI %d.%d present.\n", dmi_ver >> 8, dmi_ver & 0xFF); } diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c index 87b8e3b..5c55227 100644 --- a/drivers/firmware/efi/runtime-map.c +++ b/drivers/firmware/efi/runtime-map.c @@ -120,7 +120,8 @@ add_sysfs_runtime_map_entry(struct kobject *kobj, int nr) entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { kset_unregister(map_kset); - return entry; + map_kset = NULL; + return ERR_PTR(-ENOMEM); } memcpy(&entry->md, efi_runtime_map + nr * efi_memdesc_size, @@ -132,6 +133,7 @@ add_sysfs_runtime_map_entry(struct kobject *kobj, int nr) if (ret) { kobject_put(&entry->kobj); kset_unregister(map_kset); + map_kset = NULL; return ERR_PTR(ret); } @@ -195,8 +197,6 @@ out_add_entry: entry = *(map_entries + j); kobject_put(&entry->kobj); } - if (map_kset) - kset_unregister(map_kset); out: return ret; } diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index cd1d5bf..b232397 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -1054,38 +1054,8 @@ static void omap_gpio_mod_init(struct gpio_bank *bank) dev_err(bank->dev, "Could not get gpio dbck\n"); } -static void -omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start, - unsigned int num) -{ - struct irq_chip_generic *gc; - struct irq_chip_type *ct; - - gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base, - handle_simple_irq); - if (!gc) { - dev_err(bank->dev, "Memory alloc failed for gc\n"); - return; - } - - ct = gc->chip_types; - - /* NOTE: No ack required, reading IRQ status clears it. */ - ct->chip.irq_mask = irq_gc_mask_set_bit; - ct->chip.irq_unmask = irq_gc_mask_clr_bit; - ct->chip.irq_set_type = omap_gpio_irq_type; - - if (bank->regs->wkup_en) - ct->chip.irq_set_wake = omap_gpio_wake_enable; - - ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride; - irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, - IRQ_NOREQUEST | IRQ_NOPROBE, 0); -} - static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) { - int j; static int gpio; int irq_base = 0; int ret; @@ -1132,6 +1102,15 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) } #endif + /* MPUIO is a bit different, reading IRQ status clears it */ + if (bank->is_mpuio) { + irqc->irq_ack = dummy_irq_chip.irq_ack; + irqc->irq_mask = irq_gc_mask_set_bit; + irqc->irq_unmask = irq_gc_mask_clr_bit; + if (!bank->regs->wkup_en) + irqc->irq_set_wake = NULL; + } + ret = gpiochip_irqchip_add(&bank->chip, irqc, irq_base, omap_gpio_irq_handler, IRQ_TYPE_NONE); @@ -1145,15 +1124,6 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) gpiochip_set_chained_irqchip(&bank->chip, irqc, bank->irq, omap_gpio_irq_handler); - for (j = 0; j < bank->width; j++) { - int irq = irq_find_mapping(bank->chip.irqdomain, j); - if (bank->is_mpuio) { - omap_mpuio_alloc_gc(bank, irq, bank->width); - irq_set_chip_and_handler(irq, NULL, NULL); - set_irq_flags(irq, 0); - } - } - return 0; } diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index d2303d5..725d161 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -550,7 +550,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address, length = min(agpio->pin_table_length, (u16)(pin_index + bits)); for (i = pin_index; i < length; ++i) { - unsigned pin = agpio->pin_table[i]; + int pin = agpio->pin_table[i]; struct acpi_gpio_connection *conn; struct gpio_desc *desc; bool found; diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c index 7722ed5..af3bc7a 100644 --- a/drivers/gpio/gpiolib-sysfs.c +++ b/drivers/gpio/gpiolib-sysfs.c @@ -551,6 +551,7 @@ static struct class gpio_class = { */ int gpiod_export(struct gpio_desc *desc, bool direction_may_change) { + struct gpio_chip *chip; unsigned long flags; int status; const char *ioname = NULL; @@ -568,8 +569,16 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change) return -EINVAL; } + chip = desc->chip; + mutex_lock(&sysfs_lock); + /* check if chip is being removed */ + if (!chip || !chip->exported) { + status = -ENODEV; + goto fail_unlock; + } + spin_lock_irqsave(&gpio_lock, flags); if (!test_bit(FLAG_REQUESTED, &desc->flags) || test_bit(FLAG_EXPORT, &desc->flags)) { @@ -783,12 +792,15 @@ void gpiochip_unexport(struct gpio_chip *chip) { int status; struct device *dev; + struct gpio_desc *desc; + unsigned int i; mutex_lock(&sysfs_lock); dev = class_find_device(&gpio_class, NULL, chip, match_export); if (dev) { put_device(dev); device_unregister(dev); + /* prevent further gpiod exports */ chip->exported = false; status = 0; } else @@ -797,6 +809,13 @@ void gpiochip_unexport(struct gpio_chip *chip) if (status) chip_dbg(chip, "%s: status %d\n", __func__, status); + + /* unregister gpiod class devices owned by sysfs */ + for (i = 0; i < chip->ngpio; i++) { + desc = &chip->desc[i]; + if (test_and_clear_bit(FLAG_SYSFS, &desc->flags)) + gpiod_free(desc); + } } static int __init gpiolib_sysfs_init(void) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 69af73f..596ee5c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -430,9 +430,10 @@ static int unregister_process_nocpsch(struct device_queue_manager *dqm, BUG_ON(!dqm || !qpd); - BUG_ON(!list_empty(&qpd->queues_list)); + pr_debug("In func %s\n", __func__); - pr_debug("kfd: In func %s\n", __func__); + pr_debug("qpd->queues_list is %s\n", + list_empty(&qpd->queues_list) ? "empty" : "not empty"); retval = 0; mutex_lock(&dqm->lock); @@ -882,6 +883,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, return -ENOMEM; } + init_sdma_vm(dqm, q, qpd); + retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, &q->gart_mqd_addr, &q->properties); if (retval != 0) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 661c660..e469c4b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -728,9 +728,9 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute", dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz( dev->gpu->kgd)); + sysfs_show_64bit_prop(buffer, "local_mem_size", - dev->gpu->kfd2kgd->get_vmem_size( - dev->gpu->kgd)); + (unsigned long long int) 0); sysfs_show_32bit_prop(buffer, "fw_version", dev->gpu->kfd2kgd->get_fw_version( diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index c8a3447..af9662e 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -131,12 +131,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) /* Reinitialize corresponding vblank timestamp if high-precision query * available. Skip this step if query unsupported or failed. Will - * reinitialize delayed at next vblank interrupt in that case. + * reinitialize delayed at next vblank interrupt in that case and + * assign 0 for now, to mark the vblanktimestamp as invalid. */ - if (rc) { - tslot = atomic_read(&vblank->count) + diff; - vblanktimestamp(dev, crtc, tslot) = t_vblank; - } + tslot = atomic_read(&vblank->count) + diff; + vblanktimestamp(dev, crtc, tslot) = rc ? t_vblank : (struct timeval) {0, 0}; smp_mb__before_atomic(); atomic_add(diff, &vblank->count); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index c302ffb..a19d2c7 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -699,6 +699,16 @@ static int i915_drm_resume(struct drm_device *dev) intel_init_pch_refclk(dev); drm_mode_config_reset(dev); + /* + * Interrupts have to be enabled before any batches are run. If not the + * GPU will hang. i915_gem_init_hw() will initiate batches to + * update/restore the context. + * + * Modeset enabling in intel_modeset_init_hw() also needs working + * interrupts. + */ + intel_runtime_pm_enable_interrupts(dev_priv); + mutex_lock(&dev->struct_mutex); if (i915_gem_init_hw(dev)) { DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); @@ -706,9 +716,6 @@ static int i915_drm_resume(struct drm_device *dev) } mutex_unlock(&dev->struct_mutex); - /* We need working interrupts for modeset enabling ... */ - intel_runtime_pm_enable_interrupts(dev_priv); - intel_modeset_init_hw(dev); spin_lock_irq(&dev_priv->irq_lock); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3da1af4..773d1d2 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -6074,6 +6074,8 @@ enum skl_disp_power_wells { #define GTFIFOCTL 0x120008 #define GT_FIFO_FREE_ENTRIES_MASK 0x7f #define GT_FIFO_NUM_RESERVED_ENTRIES 20 +#define GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL (1 << 12) +#define GT_FIFO_CTL_RC6_POLICY_STALL (1 << 11) #define HSW_IDICR 0x9008 #define IDIHASHMSK(x) (((x) & 0x3f) << 16) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d547d9c8..d0f3cbc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13635,9 +13635,6 @@ static const struct intel_dmi_quirk intel_dmi_quirks[] = { }; static struct intel_quirk intel_quirks[] = { - /* HP Mini needs pipe A force quirk (LP: #322104) */ - { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, - /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d023710..f27346e 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1348,7 +1348,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, pipe_config->has_dp_encoder = true; pipe_config->has_drrs = false; - pipe_config->has_audio = intel_dp->has_audio; + pipe_config->has_audio = intel_dp->has_audio && port != PORT_A; if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { intel_fixed_panel_mode(intel_connector->panel.fixed_mode, @@ -2211,8 +2211,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder, int dotclock; tmp = I915_READ(intel_dp->output_reg); - if (tmp & DP_AUDIO_OUTPUT_ENABLE) - pipe_config->has_audio = true; + + pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; if ((port == PORT_A) || !HAS_PCH_CPT(dev)) { if (tmp & DP_SYNC_HS_HIGH) @@ -3812,7 +3812,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) if (val == 0) break; - intel_dp->sink_rates[i] = val * 200; + /* Value read is in kHz while drm clock is saved in deca-kHz */ + intel_dp->sink_rates[i] = (val * 200) / 10; } intel_dp->num_sink_rates = i; } diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 5abda1d..fbcc7df 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -813,12 +813,28 @@ static int intel_dual_link_lvds_callback(const struct dmi_system_id *id) static const struct dmi_system_id intel_dual_link_lvds[] = { { .callback = intel_dual_link_lvds_callback, - .ident = "Apple MacBook Pro (Core i5/i7 Series)", + .ident = "Apple MacBook Pro 15\" (2010)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6,2"), + }, + }, + { + .callback = intel_dual_link_lvds_callback, + .ident = "Apple MacBook Pro 15\" (2011)", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"), }, }, + { + .callback = intel_dual_link_lvds_callback, + .ident = "Apple MacBook Pro 15\" (2012)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,1"), + }, + }, { } /* terminating entry */ }; @@ -848,6 +864,11 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) if (i915.lvds_channel_mode > 0) return i915.lvds_channel_mode == 2; + /* single channel LVDS is limited to 112 MHz */ + if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock + > 112999) + return true; + if (dmi_check_system(intel_dual_link_lvds)) return true; @@ -1111,6 +1132,8 @@ void intel_lvds_init(struct drm_device *dev) out: mutex_unlock(&dev->mode_config.mutex); + intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); + lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder); DRM_DEBUG_KMS("detected %s-link lvds configuration\n", lvds_encoder->is_dual_link ? "dual" : "single"); @@ -1125,7 +1148,6 @@ out: } drm_connector_register(connector); - intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); intel_panel_setup_backlight(connector, INVALID_PIPE); return; diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index ab5cc94..ff2a746 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -360,6 +360,14 @@ static void __intel_uncore_early_sanitize(struct drm_device *dev, __raw_i915_write32(dev_priv, GTFIFODBG, __raw_i915_read32(dev_priv, GTFIFODBG)); + /* WaDisableShadowRegForCpd:chv */ + if (IS_CHERRYVIEW(dev)) { + __raw_i915_write32(dev_priv, GTFIFOCTL, + __raw_i915_read32(dev_priv, GTFIFOCTL) | + GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | + GT_FIFO_CTL_RC6_POLICY_STALL); + } + intel_uncore_forcewake_reset(dev, restore_forcewake); } diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index dac78ad..42b2ea3 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -580,6 +580,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, else radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; + /* if there is no audio, set MINM_OVER_MAXP */ + if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) + radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; if (rdev->family < CHIP_RV770) radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; /* use frac fb div on APUs */ diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index f57c1ab..dd39f43 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -1761,17 +1761,15 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); int encoder_mode = atombios_get_encoder_mode(encoder); DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", radeon_encoder->encoder_id, mode, radeon_encoder->devices, radeon_encoder->active_device); - if (connector && (radeon_audio != 0) && + if ((radeon_audio != 0) && ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || - (ENCODER_MODE_IS_DP(encoder_mode) && - drm_detect_monitor_audio(radeon_connector_edid(connector))))) + ENCODER_MODE_IS_DP(encoder_mode))) radeon_audio_dpms(encoder, mode); switch (radeon_encoder->encoder_id) { diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 28faea9..a0c35bb 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -5822,7 +5822,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) L2_CACHE_BIGK_FRAGMENT_SIZE(4)); /* setup context0 */ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(rdev->dummy_page.addr >> 12)); @@ -5837,7 +5837,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) /* restore context1-15 */ /* set vm size, must be a multiple of 4 */ WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); - WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); + WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1); for (i = 1; i < 16; i++) { if (i < 8) WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index 3adc2af..68fd9fc 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c @@ -295,28 +295,3 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev, WREG32(DCCG_AUDIO_DTO1_MODULE, clock); } } - -void dce6_dp_enable(struct drm_encoder *encoder, bool enable) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - - if (!dig || !dig->afmt) - return; - - if (enable) { - WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset, - EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); - WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, - EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ - EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ - EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ - EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ - } else { - WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0); - } - - dig->afmt->enabled = enable; -} diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f848acf..05e6d6e 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2485,7 +2485,7 @@ static int evergreen_pcie_gart_enable(struct radeon_device *rdev) WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index c18d4ec..0926739 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -219,13 +219,9 @@ void evergreen_set_avi_packet(struct radeon_device *rdev, u32 offset, WREG32(AFMT_AVI_INFO3 + offset, frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24)); - WREG32_OR(HDMI_INFOFRAME_CONTROL0 + offset, - HDMI_AVI_INFO_SEND | /* enable AVI info frames */ - HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */ - WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset, - HDMI_AVI_INFO_LINE(2), /* anything other than 0 */ - ~HDMI_AVI_INFO_LINE_MASK); + HDMI_AVI_INFO_LINE(2), /* anything other than 0 */ + ~HDMI_AVI_INFO_LINE_MASK); } void dce4_hdmi_audio_set_dto(struct radeon_device *rdev, @@ -370,9 +366,13 @@ void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset) WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset, AFMT_AUDIO_CHANNEL_ENABLE(0xff)); + WREG32(HDMI_AUDIO_PACKET_CONTROL + offset, + HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */ + HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ + /* allow 60958 channel status and send audio packets fields to be updated */ - WREG32(AFMT_AUDIO_PACKET_CONTROL + offset, - AFMT_AUDIO_SAMPLE_SEND | AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE); + WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + offset, + AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE); } @@ -398,17 +398,26 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) return; if (enable) { - WREG32(HDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, - HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */ - - WREG32(HDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, - HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */ - HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); - WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, - HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ - HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */ + if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { + WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, + HDMI_AVI_INFO_SEND | /* enable AVI info frames */ + HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */ + HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ + HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */ + WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, + AFMT_AUDIO_SAMPLE_SEND); + } else { + WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, + HDMI_AVI_INFO_SEND | /* enable AVI info frames */ + HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */ + WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, + ~AFMT_AUDIO_SAMPLE_SEND); + } } else { + WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, + ~AFMT_AUDIO_SAMPLE_SEND); WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0); } @@ -424,20 +433,24 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable) struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); if (!dig || !dig->afmt) return; - if (enable) { + if (enable && drm_detect_monitor_audio(radeon_connector_edid(connector))) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector; uint32_t val; + WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, + AFMT_AUDIO_SAMPLE_SEND); + WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); - if (radeon_connector->con_priv) { + if (!ASIC_IS_DCE6(rdev) && radeon_connector->con_priv) { dig_connector = radeon_connector->con_priv; val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset); val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf); @@ -457,6 +470,8 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable) EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ } else { WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0); + WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, + ~AFMT_AUDIO_SAMPLE_SEND); } dig->afmt->enabled = enable; diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index e8a496f..aba2f42 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1282,7 +1282,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev) L2_CACHE_BIGK_FRAGMENT_SIZE(6)); /* setup context0 */ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(rdev->dummy_page.addr >> 12)); @@ -1301,7 +1301,8 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev) */ for (i = 1; i < 8; i++) { WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), + rdev->vm_manager.max_pfn - 1); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), rdev->vm_manager.saved_table_addr[i]); } diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 8f6d862..25b4ac9 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1112,7 +1112,7 @@ static int r600_pcie_gart_enable(struct radeon_device *rdev) WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index dd6606b..e85894a 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -228,12 +228,13 @@ void r600_set_avi_packet(struct radeon_device *rdev, u32 offset, WREG32(HDMI0_AVI_INFO3 + offset, frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24)); + WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset, + HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */ + WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset, - HDMI0_AVI_INFO_SEND | /* enable AVI info frames */ - HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */ + HDMI0_AVI_INFO_SEND | /* enable AVI info frames */ + HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */ - WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset, - HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */ } /* diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index d2abe48..46eb0fa 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1673,7 +1673,6 @@ struct radeon_uvd { struct radeon_bo *vcpu_bo; void *cpu_addr; uint64_t gpu_addr; - void *saved_bo; atomic_t handles[RADEON_MAX_UVD_HANDLES]; struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; unsigned img_size[RADEON_MAX_UVD_HANDLES]; diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index fafd8ce..8dbf508 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -1202,7 +1202,7 @@ static struct radeon_asic rs780_asic = { static struct radeon_asic_ring rv770_uvd_ring = { .ib_execute = &uvd_v1_0_ib_execute, .emit_fence = &uvd_v2_2_fence_emit, - .emit_semaphore = &uvd_v1_0_semaphore_emit, + .emit_semaphore = &uvd_v2_2_semaphore_emit, .cs_parse = &radeon_uvd_cs_parse, .ring_test = &uvd_v1_0_ring_test, .ib_test = &uvd_v1_0_ib_test, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index cf0a90b..a3ca8cd 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -949,6 +949,10 @@ void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); int uvd_v2_2_resume(struct radeon_device *rdev); void uvd_v2_2_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence); +bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev, + struct radeon_ring *ring, + struct radeon_semaphore *semaphore, + bool emit_wait); /* uvd v3.1 */ bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index 48d49e6..dcb7796 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c @@ -102,7 +102,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); void evergreen_dp_enable(struct drm_encoder *encoder, bool enable); -void dce6_dp_enable(struct drm_encoder *encoder, bool enable); static const u32 pin_offsets[7] = { @@ -240,7 +239,7 @@ static struct radeon_audio_funcs dce6_dp_funcs = { .set_avi_packet = evergreen_set_avi_packet, .set_audio_packet = dce4_set_audio_packet, .mode_set = radeon_audio_dp_mode_set, - .dpms = dce6_dp_enable, + .dpms = evergreen_dp_enable, }; static void radeon_audio_interface_init(struct radeon_device *rdev) @@ -461,30 +460,37 @@ void radeon_audio_detect(struct drm_connector *connector, if (!connector || !connector->encoder) return; + if (!radeon_encoder_is_digital(connector->encoder)) + return; + rdev = connector->encoder->dev->dev_private; + + if (!radeon_audio_chipset_supported(rdev)) + return; + radeon_encoder = to_radeon_encoder(connector->encoder); dig = radeon_encoder->enc_priv; - if (status == connector_status_connected) { - struct radeon_connector *radeon_connector; - int sink_type; - - if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) { - radeon_encoder->audio = NULL; - return; - } + if (!dig->afmt) + return; - radeon_connector = to_radeon_connector(connector); - sink_type = radeon_dp_getsinktype(radeon_connector); + if (status == connector_status_connected) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && - sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) + radeon_dp_getsinktype(radeon_connector) == + CONNECTOR_OBJECT_ID_DISPLAYPORT) radeon_encoder->audio = rdev->audio.dp_funcs; else radeon_encoder->audio = rdev->audio.hdmi_funcs; dig->afmt->pin = radeon_audio_get_pin(connector->encoder); - radeon_audio_enable(rdev, dig->afmt->pin, 0xf); + if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { + radeon_audio_enable(rdev, dig->afmt->pin, 0xf); + } else { + radeon_audio_enable(rdev, dig->afmt->pin, 0); + dig->afmt->pin = NULL; + } } else { radeon_audio_enable(rdev, dig->afmt->pin, 0); dig->afmt->pin = NULL; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index cebb65e..d17d251 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1379,8 +1379,10 @@ out: /* updated in get modes as well since we need to know if it's analog or digital */ radeon_connector_update_scratch_regs(connector, ret); - if (radeon_audio != 0) + if (radeon_audio != 0) { + radeon_connector_get_edid(connector); radeon_audio_detect(connector, ret); + } exit: pm_runtime_mark_last_busy(connector->dev->dev); @@ -1717,8 +1719,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force) radeon_connector_update_scratch_regs(connector, ret); - if (radeon_audio != 0) + if (radeon_audio != 0) { + radeon_connector_get_edid(connector); radeon_audio_detect(connector, ret); + } out: pm_runtime_mark_last_busy(connector->dev->dev); diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 4d0f96c..ab39b85 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -88,7 +88,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) p->dma_reloc_idx = 0; /* FIXME: we assume that each relocs use 4 dwords */ p->nrelocs = chunk->length_dw / 4; - p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL); + p->relocs = drm_calloc_large(p->nrelocs, sizeof(struct radeon_bo_list)); if (p->relocs == NULL) { return -ENOMEM; } @@ -428,7 +428,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo } } kfree(parser->track); - kfree(parser->relocs); + drm_free_large(parser->relocs); drm_free_large(parser->vm_bos); for (i = 0; i < parser->nchunks; i++) drm_free_large(parser->chunks[i].kdata); diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index 1017338..2b98ed3 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -666,6 +666,9 @@ radeon_dp_mst_probe(struct radeon_connector *radeon_connector) int ret; u8 msg[1]; + if (!radeon_mst) + return 0; + if (dig_connector->dpcd[DP_DPCD_REV] < 0x12) return 0; diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index 0170137..eef006c 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -135,28 +135,31 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, while (it) { struct radeon_mn_node *node; struct radeon_bo *bo; - int r; + long r; node = container_of(it, struct radeon_mn_node, it); it = interval_tree_iter_next(it, start, end); list_for_each_entry(bo, &node->bos, mn_list) { + if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) + continue; + r = radeon_bo_reserve(bo, true); if (r) { - DRM_ERROR("(%d) failed to reserve user bo\n", r); + DRM_ERROR("(%ld) failed to reserve user bo\n", r); continue; } r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, MAX_SCHEDULE_TIMEOUT); - if (r) - DRM_ERROR("(%d) failed to wait for user bo\n", r); + if (r <= 0) + DRM_ERROR("(%ld) failed to wait for user bo\n", r); radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); if (r) - DRM_ERROR("(%d) failed to validate user bo\n", r); + DRM_ERROR("(%ld) failed to validate user bo\n", r); radeon_bo_unreserve(bo); } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index b292aca..edafd3c 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -591,8 +591,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) { struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); struct radeon_ttm_tt *gtt = (void *)ttm; - struct scatterlist *sg; - int i; + struct sg_page_iter sg_iter; int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY); enum dma_data_direction direction = write ? @@ -605,9 +604,8 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) /* free the sg table and pages again */ dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); - for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) { - struct page *page = sg_page(sg); - + for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) { + struct page *page = sg_page_iter_page(&sg_iter); if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY)) set_page_dirty(page); diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index c10b2ae..6edcb54 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -204,28 +204,32 @@ void radeon_uvd_fini(struct radeon_device *rdev) int radeon_uvd_suspend(struct radeon_device *rdev) { - unsigned size; - void *ptr; - int i; + int i, r; if (rdev->uvd.vcpu_bo == NULL) return 0; - for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) - if (atomic_read(&rdev->uvd.handles[i])) - break; + for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { + uint32_t handle = atomic_read(&rdev->uvd.handles[i]); + if (handle != 0) { + struct radeon_fence *fence; - if (i == RADEON_MAX_UVD_HANDLES) - return 0; + radeon_uvd_note_usage(rdev); - size = radeon_bo_size(rdev->uvd.vcpu_bo); - size -= rdev->uvd_fw->size; + r = radeon_uvd_get_destroy_msg(rdev, + R600_RING_TYPE_UVD_INDEX, handle, &fence); + if (r) { + DRM_ERROR("Error destroying UVD (%d)!\n", r); + continue; + } - ptr = rdev->uvd.cpu_addr; - ptr += rdev->uvd_fw->size; + radeon_fence_wait(fence, false); + radeon_fence_unref(&fence); - rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); - memcpy(rdev->uvd.saved_bo, ptr, size); + rdev->uvd.filp[i] = NULL; + atomic_set(&rdev->uvd.handles[i], 0); + } + } return 0; } @@ -246,12 +250,7 @@ int radeon_uvd_resume(struct radeon_device *rdev) ptr = rdev->uvd.cpu_addr; ptr += rdev->uvd_fw->size; - if (rdev->uvd.saved_bo != NULL) { - memcpy(ptr, rdev->uvd.saved_bo, size); - kfree(rdev->uvd.saved_bo); - rdev->uvd.saved_bo = NULL; - } else - memset(ptr, 0, size); + memset(ptr, 0, size); return 0; } @@ -396,6 +395,29 @@ static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) return 0; } +static int radeon_uvd_validate_codec(struct radeon_cs_parser *p, + unsigned stream_type) +{ + switch (stream_type) { + case 0: /* H264 */ + case 1: /* VC1 */ + /* always supported */ + return 0; + + case 3: /* MPEG2 */ + case 4: /* MPEG4 */ + /* only since UVD 3 */ + if (p->rdev->family >= CHIP_PALM) + return 0; + + /* fall through */ + default: + DRM_ERROR("UVD codec not supported by hardware %d!\n", + stream_type); + return -EINVAL; + } +} + static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, unsigned offset, unsigned buf_sizes[]) { @@ -436,50 +458,70 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, return -EINVAL; } - if (msg_type == 1) { - /* it's a decode msg, calc buffer sizes */ - r = radeon_uvd_cs_msg_decode(msg, buf_sizes); - /* calc image size (width * height) */ - img_size = msg[6] * msg[7]; + switch (msg_type) { + case 0: + /* it's a create msg, calc image size (width * height) */ + img_size = msg[7] * msg[8]; + + r = radeon_uvd_validate_codec(p, msg[4]); radeon_bo_kunmap(bo); if (r) return r; - } else if (msg_type == 2) { + /* try to alloc a new handle */ + for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { + if (atomic_read(&p->rdev->uvd.handles[i]) == handle) { + DRM_ERROR("Handle 0x%x already in use!\n", handle); + return -EINVAL; + } + + if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) { + p->rdev->uvd.filp[i] = p->filp; + p->rdev->uvd.img_size[i] = img_size; + return 0; + } + } + + DRM_ERROR("No more free UVD handles!\n"); + return -EINVAL; + + case 1: + /* it's a decode msg, validate codec and calc buffer sizes */ + r = radeon_uvd_validate_codec(p, msg[4]); + if (!r) + r = radeon_uvd_cs_msg_decode(msg, buf_sizes); + radeon_bo_kunmap(bo); + if (r) + return r; + + /* validate the handle */ + for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { + if (atomic_read(&p->rdev->uvd.handles[i]) == handle) { + if (p->rdev->uvd.filp[i] != p->filp) { + DRM_ERROR("UVD handle collision detected!\n"); + return -EINVAL; + } + return 0; + } + } + + DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); + return -ENOENT; + + case 2: /* it's a destroy msg, free the handle */ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0); radeon_bo_kunmap(bo); return 0; - } else { - /* it's a create msg, calc image size (width * height) */ - img_size = msg[7] * msg[8]; - radeon_bo_kunmap(bo); - if (msg_type != 0) { - DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); - return -EINVAL; - } - - /* it's a create msg, no special handling needed */ - } - - /* create or decode, validate the handle */ - for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { - if (atomic_read(&p->rdev->uvd.handles[i]) == handle) - return 0; - } + default: - /* handle not found try to alloc a new one */ - for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { - if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) { - p->rdev->uvd.filp[i] = p->filp; - p->rdev->uvd.img_size[i] = img_size; - return 0; - } + DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); + return -EINVAL; } - DRM_ERROR("No more free UVD handles!\n"); + BUG(); return -EINVAL; } diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index 24f849f..0de5711 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -493,18 +493,27 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, * * @p: parser context * @handle: handle to validate + * @allocated: allocated a new handle? * * Validates the handle and return the found session index or -EINVAL * we we don't have another free session index. */ -int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) +static int radeon_vce_validate_handle(struct radeon_cs_parser *p, + uint32_t handle, bool *allocated) { unsigned i; + *allocated = false; + /* validate the handle */ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { - if (atomic_read(&p->rdev->vce.handles[i]) == handle) + if (atomic_read(&p->rdev->vce.handles[i]) == handle) { + if (p->rdev->vce.filp[i] != p->filp) { + DRM_ERROR("VCE handle collision detected!\n"); + return -EINVAL; + } return i; + } } /* handle not found try to alloc a new one */ @@ -512,6 +521,7 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { p->rdev->vce.filp[i] = p->filp; p->rdev->vce.img_size[i] = 0; + *allocated = true; return i; } } @@ -529,10 +539,10 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) int radeon_vce_cs_parse(struct radeon_cs_parser *p) { int session_idx = -1; - bool destroyed = false; + bool destroyed = false, created = false, allocated = false; uint32_t tmp, handle = 0; uint32_t *size = &tmp; - int i, r; + int i, r = 0; while (p->idx < p->chunk_ib->length_dw) { uint32_t len = radeon_get_ib_value(p, p->idx); @@ -540,18 +550,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) if ((len < 8) || (len & 3)) { DRM_ERROR("invalid VCE command length (%d)!\n", len); - return -EINVAL; + r = -EINVAL; + goto out; } if (destroyed) { DRM_ERROR("No other command allowed after destroy!\n"); - return -EINVAL; + r = -EINVAL; + goto out; } switch (cmd) { case 0x00000001: // session handle = radeon_get_ib_value(p, p->idx + 2); - session_idx = radeon_vce_validate_handle(p, handle); + session_idx = radeon_vce_validate_handle(p, handle, + &allocated); if (session_idx < 0) return session_idx; size = &p->rdev->vce.img_size[session_idx]; @@ -561,6 +574,13 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) break; case 0x01000001: // create + created = true; + if (!allocated) { + DRM_ERROR("Handle already in use!\n"); + r = -EINVAL; + goto out; + } + *size = radeon_get_ib_value(p, p->idx + 8) * radeon_get_ib_value(p, p->idx + 10) * 8 * 3 / 2; @@ -578,12 +598,12 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9, *size); if (r) - return r; + goto out; r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11, *size / 3); if (r) - return r; + goto out; break; case 0x02000001: // destroy @@ -594,7 +614,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, *size * 2); if (r) - return r; + goto out; break; case 0x05000004: // video bitstream buffer @@ -602,36 +622,47 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, tmp); if (r) - return r; + goto out; break; case 0x05000005: // feedback buffer r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, 4096); if (r) - return r; + goto out; break; default: DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); - return -EINVAL; + r = -EINVAL; + goto out; } if (session_idx == -1) { DRM_ERROR("no session command at start of IB\n"); - return -EINVAL; + r = -EINVAL; + goto out; } p->idx += len / 4; } - if (destroyed) { - /* IB contains a destroy msg, free the handle */ + if (allocated && !created) { + DRM_ERROR("New session without create command!\n"); + r = -ENOENT; + } + +out: + if ((!r && destroyed) || (r && allocated)) { + /* + * IB contains a destroy msg or we have allocated an + * handle and got an error, anyway free the handle + */ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); } - return 0; + return r; } /** diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 2a5a4a9..de42fc4 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -473,6 +473,23 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, } mutex_lock(&vm->mutex); + soffset /= RADEON_GPU_PAGE_SIZE; + eoffset /= RADEON_GPU_PAGE_SIZE; + if (soffset || eoffset) { + struct interval_tree_node *it; + it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1); + if (it && it != &bo_va->it) { + struct radeon_bo_va *tmp; + tmp = container_of(it, struct radeon_bo_va, it); + /* bo and tmp overlap, invalid offset */ + dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with " + "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo, + soffset, tmp->bo, tmp->it.start, tmp->it.last); + mutex_unlock(&vm->mutex); + return -EINVAL; + } + } + if (bo_va->it.start || bo_va->it.last) { if (bo_va->addr) { /* add a clone of the bo_va to clear the old address */ @@ -490,6 +507,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, spin_lock(&vm->status_lock); list_add(&tmp->vm_status, &vm->freed); spin_unlock(&vm->status_lock); + + bo_va->addr = 0; } interval_tree_remove(&bo_va->it, &vm->va); @@ -497,21 +516,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, bo_va->it.last = 0; } - soffset /= RADEON_GPU_PAGE_SIZE; - eoffset /= RADEON_GPU_PAGE_SIZE; if (soffset || eoffset) { - struct interval_tree_node *it; - it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1); - if (it) { - struct radeon_bo_va *tmp; - tmp = container_of(it, struct radeon_bo_va, it); - /* bo and tmp overlap, invalid offset */ - dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with " - "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo, - soffset, tmp->bo, tmp->it.start, tmp->it.last); - mutex_unlock(&vm->mutex); - return -EINVAL; - } bo_va->it.start = soffset; bo_va->it.last = eoffset - 1; interval_tree_insert(&bo_va->it, &vm->va); @@ -1107,7 +1112,8 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev, list_del(&bo_va->bo_list); mutex_lock(&vm->mutex); - interval_tree_remove(&bo_va->it, &vm->va); + if (bo_va->it.start || bo_va->it.last) + interval_tree_remove(&bo_va->it, &vm->va); spin_lock(&vm->status_lock); list_del(&bo_va->vm_status); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 01ee96a..c54d631 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -921,7 +921,7 @@ static int rv770_pcie_gart_enable(struct radeon_device *rdev) WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index 3cf1e29..9ef2064 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h @@ -989,6 +989,9 @@ ((n) & 0x3FFF) << 16) /* UVD */ +#define UVD_SEMA_ADDR_LOW 0xef00 +#define UVD_SEMA_ADDR_HIGH 0xef04 +#define UVD_SEMA_CMD 0xef08 #define UVD_GPCOM_VCPU_CMD 0xef0c #define UVD_GPCOM_VCPU_DATA0 0xef10 #define UVD_GPCOM_VCPU_DATA1 0xef14 diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index b1d74bc..5326f75 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -4303,7 +4303,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev) L2_CACHE_BIGK_FRAGMENT_SIZE(4)); /* setup context0 */ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(rdev->dummy_page.addr >> 12)); @@ -4318,7 +4318,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev) /* empty context1-15 */ /* set vm size, must be a multiple of 4 */ WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); - WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); + WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1); /* Assign the pt base to something valid for now; the pts used for * the VMs are determined by the application and setup and assigned * on the fly in the vm part of radeon_gart.c diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index b35bccf..ff8b83f 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2924,6 +2924,7 @@ struct si_dpm_quirk { static struct si_dpm_quirk si_dpm_quirk_list[] = { /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, { 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index e72b3cb..c6b1cbc 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c @@ -466,18 +466,8 @@ bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev, struct radeon_semaphore *semaphore, bool emit_wait) { - uint64_t addr = semaphore->gpu_addr; - - radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0)); - radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); - - radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0)); - radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); - - radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); - radeon_ring_write(ring, emit_wait ? 1 : 0); - - return true; + /* disable semaphores for UVD V1 hardware */ + return false; } /** diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c index 8919351..7ed778c 100644 --- a/drivers/gpu/drm/radeon/uvd_v2_2.c +++ b/drivers/gpu/drm/radeon/uvd_v2_2.c @@ -60,6 +60,35 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev, } /** + * uvd_v2_2_semaphore_emit - emit semaphore command + * + * @rdev: radeon_device pointer + * @ring: radeon_ring pointer + * @semaphore: semaphore to emit commands for + * @emit_wait: true if we should emit a wait command + * + * Emit a semaphore command (either wait or signal) to the UVD ring. + */ +bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev, + struct radeon_ring *ring, + struct radeon_semaphore *semaphore, + bool emit_wait) +{ + uint64_t addr = semaphore->gpu_addr; + + radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0)); + radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); + + radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0)); + radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); + + radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); + radeon_ring_write(ring, emit_wait ? 1 : 0); + + return true; +} + +/** * uvd_v2_2_resume - memory controller programming * * @rdev: radeon_device pointer diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index ccb0ce0..4557f33 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -1409,7 +1409,7 @@ static int vop_bind(struct device *dev, struct device *master, void *data) struct vop *vop; struct resource *res; size_t alloc_size; - int ret; + int ret, irq; of_id = of_match_device(vop_driver_dt_match, dev); vop_data = of_id->data; @@ -1445,11 +1445,12 @@ static int vop_bind(struct device *dev, struct device *master, void *data) return ret; } - vop->irq = platform_get_irq(pdev, 0); - if (vop->irq < 0) { + irq = platform_get_irq(pdev, 0); + if (irq < 0) { dev_err(dev, "cannot find irq for vop\n"); - return vop->irq; + return irq; } + vop->irq = (unsigned int)irq; spin_lock_init(&vop->reg_lock); spin_lock_init(&vop->irq_lock); diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 1833abd..bfad15a 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -173,7 +173,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags) drm->irq_enabled = true; /* syncpoints are used for full 32-bit hardware VBLANK counters */ - drm->vblank_disable_immediate = true; drm->max_vblank_count = 0xffffffff; err = drm_vblank_init(drm, drm->mode_config.num_crtc); diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index a04c49f..39ea67f 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig @@ -643,15 +643,6 @@ config BLK_DEV_TC86C001 help This driver adds support for Toshiba TC86C001 GOKU-S chip. -config BLK_DEV_CELLEB - tristate "Toshiba's Cell Reference Set IDE support" - depends on PPC_CELLEB - select BLK_DEV_IDEDMA_PCI - help - This driver provides support for the on-board IDE controller on - Toshiba Cell Reference Board. - If unsure, say Y. - endif # TODO: BLK_DEV_IDEDMA_PCI -> BLK_DEV_IDEDMA_SFF diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile index a04ee82..2a8c417 100644 --- a/drivers/ide/Makefile +++ b/drivers/ide/Makefile @@ -38,7 +38,6 @@ obj-$(CONFIG_BLK_DEV_AEC62XX) += aec62xx.o obj-$(CONFIG_BLK_DEV_ALI15X3) += alim15x3.o obj-$(CONFIG_BLK_DEV_AMD74XX) += amd74xx.o obj-$(CONFIG_BLK_DEV_ATIIXP) += atiixp.o -obj-$(CONFIG_BLK_DEV_CELLEB) += scc_pata.o obj-$(CONFIG_BLK_DEV_CMD64X) += cmd64x.o obj-$(CONFIG_BLK_DEV_CS5520) += cs5520.o obj-$(CONFIG_BLK_DEV_CS5530) += cs5530.o diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c deleted file mode 100644 index 2a2d188..0000000 --- a/drivers/ide/scc_pata.c +++ /dev/null @@ -1,887 +0,0 @@ -/* - * Support for IDE interfaces on Celleb platform - * - * (C) Copyright 2006 TOSHIBA CORPORATION - * - * This code is based on drivers/ide/pci/siimage.c: - * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org> - * Copyright (C) 2003 Red Hat - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - */ - -#include <linux/types.h> -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/delay.h> -#include <linux/ide.h> -#include <linux/init.h> - -#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4 - -#define SCC_PATA_NAME "scc IDE" - -#define TDVHSEL_MASTER 0x00000001 -#define TDVHSEL_SLAVE 0x00000004 - -#define MODE_JCUSFEN 0x00000080 - -#define CCKCTRL_ATARESET 0x00040000 -#define CCKCTRL_BUFCNT 0x00020000 -#define CCKCTRL_CRST 0x00010000 -#define CCKCTRL_OCLKEN 0x00000100 -#define CCKCTRL_ATACLKOEN 0x00000002 -#define CCKCTRL_LCLKEN 0x00000001 - -#define QCHCD_IOS_SS 0x00000001 - -#define QCHSD_STPDIAG 0x00020000 - -#define INTMASK_MSK 0xD1000012 -#define INTSTS_SERROR 0x80000000 -#define INTSTS_PRERR 0x40000000 -#define INTSTS_RERR 0x10000000 -#define INTSTS_ICERR 0x01000000 -#define INTSTS_BMSINT 0x00000010 -#define INTSTS_BMHE 0x00000008 -#define INTSTS_IOIRQS 0x00000004 -#define INTSTS_INTRQ 0x00000002 -#define INTSTS_ACTEINT 0x00000001 - -#define ECMODE_VALUE 0x01 - -static struct scc_ports { - unsigned long ctl, dma; - struct ide_host *host; /* for removing port from system */ -} scc_ports[MAX_HWIFS]; - -/* PIO transfer mode table */ -/* JCHST */ -static unsigned long JCHSTtbl[2][7] = { - {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */ - {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */ -}; - -/* JCHHT */ -static unsigned long JCHHTtbl[2][7] = { - {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */ - {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */ -}; - -/* JCHCT */ -static unsigned long JCHCTtbl[2][7] = { - {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */ - {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */ -}; - - -/* DMA transfer mode table */ -/* JCHDCTM/JCHDCTS */ -static unsigned long JCHDCTxtbl[2][7] = { - {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */ - {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */ -}; - -/* JCSTWTM/JCSTWTS */ -static unsigned long JCSTWTxtbl[2][7] = { - {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */ - {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */ -}; - -/* JCTSS */ -static unsigned long JCTSStbl[2][7] = { - {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */ - {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */ -}; - -/* JCENVT */ -static unsigned long JCENVTtbl[2][7] = { - {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */ - {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */ -}; - -/* JCACTSELS/JCACTSELM */ -static unsigned long JCACTSELtbl[2][7] = { - {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */ - {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */ -}; - - -static u8 scc_ide_inb(unsigned long port) -{ - u32 data = in_be32((void*)port); - return (u8)data; -} - -static void scc_exec_command(ide_hwif_t *hwif, u8 cmd) -{ - out_be32((void *)hwif->io_ports.command_addr, cmd); - eieio(); - in_be32((void *)(hwif->dma_base + 0x01c)); - eieio(); -} - -static u8 scc_read_status(ide_hwif_t *hwif) -{ - return (u8)in_be32((void *)hwif->io_ports.status_addr); -} - -static u8 scc_read_altstatus(ide_hwif_t *hwif) -{ - return (u8)in_be32((void *)hwif->io_ports.ctl_addr); -} - -static u8 scc_dma_sff_read_status(ide_hwif_t *hwif) -{ - return (u8)in_be32((void *)(hwif->dma_base + 4)); -} - -static void scc_write_devctl(ide_hwif_t *hwif, u8 ctl) -{ - out_be32((void *)hwif->io_ports.ctl_addr, ctl); - eieio(); - in_be32((void *)(hwif->dma_base + 0x01c)); - eieio(); -} - -static void scc_ide_insw(unsigned long port, void *addr, u32 count) -{ - u16 *ptr = (u16 *)addr; - while (count--) { - *ptr++ = le16_to_cpu(in_be32((void*)port)); - } -} - -static void scc_ide_insl(unsigned long port, void *addr, u32 count) -{ - u16 *ptr = (u16 *)addr; - while (count--) { - *ptr++ = le16_to_cpu(in_be32((void*)port)); - *ptr++ = le16_to_cpu(in_be32((void*)port)); - } -} - -static void scc_ide_outb(u8 addr, unsigned long port) -{ - out_be32((void*)port, addr); -} - -static void -scc_ide_outsw(unsigned long port, void *addr, u32 count) -{ - u16 *ptr = (u16 *)addr; - while (count--) { - out_be32((void*)port, cpu_to_le16(*ptr++)); - } -} - -static void -scc_ide_outsl(unsigned long port, void *addr, u32 count) -{ - u16 *ptr = (u16 *)addr; - while (count--) { - out_be32((void*)port, cpu_to_le16(*ptr++)); - out_be32((void*)port, cpu_to_le16(*ptr++)); - } -} - -/** - * scc_set_pio_mode - set host controller for PIO mode - * @hwif: port - * @drive: drive - * - * Load the timing settings for this device mode into the - * controller. - */ - -static void scc_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) -{ - struct scc_ports *ports = ide_get_hwifdata(hwif); - unsigned long ctl_base = ports->ctl; - unsigned long cckctrl_port = ctl_base + 0xff0; - unsigned long piosht_port = ctl_base + 0x000; - unsigned long pioct_port = ctl_base + 0x004; - unsigned long reg; - int offset; - const u8 pio = drive->pio_mode - XFER_PIO_0; - - reg = in_be32((void __iomem *)cckctrl_port); - if (reg & CCKCTRL_ATACLKOEN) { - offset = 1; /* 133MHz */ - } else { - offset = 0; /* 100MHz */ - } - reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio]; - out_be32((void __iomem *)piosht_port, reg); - reg = JCHCTtbl[offset][pio]; - out_be32((void __iomem *)pioct_port, reg); -} - -/** - * scc_set_dma_mode - set host controller for DMA mode - * @hwif: port - * @drive: drive - * - * Load the timing settings for this device mode into the - * controller. - */ - -static void scc_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) -{ - struct scc_ports *ports = ide_get_hwifdata(hwif); - unsigned long ctl_base = ports->ctl; - unsigned long cckctrl_port = ctl_base + 0xff0; - unsigned long mdmact_port = ctl_base + 0x008; - unsigned long mcrcst_port = ctl_base + 0x00c; - unsigned long sdmact_port = ctl_base + 0x010; - unsigned long scrcst_port = ctl_base + 0x014; - unsigned long udenvt_port = ctl_base + 0x018; - unsigned long tdvhsel_port = ctl_base + 0x020; - int is_slave = drive->dn & 1; - int offset, idx; - unsigned long reg; - unsigned long jcactsel; - const u8 speed = drive->dma_mode; - - reg = in_be32((void __iomem *)cckctrl_port); - if (reg & CCKCTRL_ATACLKOEN) { - offset = 1; /* 133MHz */ - } else { - offset = 0; /* 100MHz */ - } - - idx = speed - XFER_UDMA_0; - - jcactsel = JCACTSELtbl[offset][idx]; - if (is_slave) { - out_be32((void __iomem *)sdmact_port, JCHDCTxtbl[offset][idx]); - out_be32((void __iomem *)scrcst_port, JCSTWTxtbl[offset][idx]); - jcactsel = jcactsel << 2; - out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_SLAVE) | jcactsel); - } else { - out_be32((void __iomem *)mdmact_port, JCHDCTxtbl[offset][idx]); - out_be32((void __iomem *)mcrcst_port, JCSTWTxtbl[offset][idx]); - out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_MASTER) | jcactsel); - } - reg = JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx]; - out_be32((void __iomem *)udenvt_port, reg); -} - -static void scc_dma_host_set(ide_drive_t *drive, int on) -{ - ide_hwif_t *hwif = drive->hwif; - u8 unit = drive->dn & 1; - u8 dma_stat = scc_dma_sff_read_status(hwif); - - if (on) - dma_stat |= (1 << (5 + unit)); - else - dma_stat &= ~(1 << (5 + unit)); - - scc_ide_outb(dma_stat, hwif->dma_base + 4); -} - -/** - * scc_dma_setup - begin a DMA phase - * @drive: target device - * @cmd: command - * - * Build an IDE DMA PRD (IDE speak for scatter gather table) - * and then set up the DMA transfer registers. - * - * Returns 0 on success. If a PIO fallback is required then 1 - * is returned. - */ - -static int scc_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd) -{ - ide_hwif_t *hwif = drive->hwif; - u32 rw = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 0 : ATA_DMA_WR; - u8 dma_stat; - - /* fall back to pio! */ - if (ide_build_dmatable(drive, cmd) == 0) - return 1; - - /* PRD table */ - out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma); - - /* specify r/w */ - out_be32((void __iomem *)hwif->dma_base, rw); - - /* read DMA status for INTR & ERROR flags */ - dma_stat = scc_dma_sff_read_status(hwif); - - /* clear INTR & ERROR flags */ - out_be32((void __iomem *)(hwif->dma_base + 4), dma_stat | 6); - - return 0; -} - -static void scc_dma_start(ide_drive_t *drive) -{ - ide_hwif_t *hwif = drive->hwif; - u8 dma_cmd = scc_ide_inb(hwif->dma_base); - - /* start DMA */ - scc_ide_outb(dma_cmd | 1, hwif->dma_base); -} - -static int __scc_dma_end(ide_drive_t *drive) -{ - ide_hwif_t *hwif = drive->hwif; - u8 dma_stat, dma_cmd; - - /* get DMA command mode */ - dma_cmd = scc_ide_inb(hwif->dma_base); - /* stop DMA */ - scc_ide_outb(dma_cmd & ~1, hwif->dma_base); - /* get DMA status */ - dma_stat = scc_dma_sff_read_status(hwif); - /* clear the INTR & ERROR bits */ - scc_ide_outb(dma_stat | 6, hwif->dma_base + 4); - /* verify good DMA status */ - return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0; -} - -/** - * scc_dma_end - Stop DMA - * @drive: IDE drive - * - * Check and clear INT Status register. - * Then call __scc_dma_end(). - */ - -static int scc_dma_end(ide_drive_t *drive) -{ - ide_hwif_t *hwif = drive->hwif; - void __iomem *dma_base = (void __iomem *)hwif->dma_base; - unsigned long intsts_port = hwif->dma_base + 0x014; - u32 reg; - int dma_stat, data_loss = 0; - static int retry = 0; - - /* errata A308 workaround: Step5 (check data loss) */ - /* We don't check non ide_disk because it is limited to UDMA4 */ - if (!(in_be32((void __iomem *)hwif->io_ports.ctl_addr) - & ATA_ERR) && - drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) { - reg = in_be32((void __iomem *)intsts_port); - if (!(reg & INTSTS_ACTEINT)) { - printk(KERN_WARNING "%s: operation failed (transfer data loss)\n", - drive->name); - data_loss = 1; - if (retry++) { - struct request *rq = hwif->rq; - ide_drive_t *drive; - int i; - - /* ERROR_RESET and drive->crc_count are needed - * to reduce DMA transfer mode in retry process. - */ - if (rq) - rq->errors |= ERROR_RESET; - - ide_port_for_each_dev(i, drive, hwif) - drive->crc_count++; - } - } - } - - while (1) { - reg = in_be32((void __iomem *)intsts_port); - - if (reg & INTSTS_SERROR) { - printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME); - out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT); - - out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS); - continue; - } - - if (reg & INTSTS_PRERR) { - u32 maea0, maec0; - unsigned long ctl_base = hwif->config_data; - - maea0 = in_be32((void __iomem *)(ctl_base + 0xF50)); - maec0 = in_be32((void __iomem *)(ctl_base + 0xF54)); - - printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", SCC_PATA_NAME, maea0, maec0); - - out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT); - - out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS); - continue; - } - - if (reg & INTSTS_RERR) { - printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME); - out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT); - - out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS); - continue; - } - - if (reg & INTSTS_ICERR) { - out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS); - - printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME); - out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT); - continue; - } - - if (reg & INTSTS_BMSINT) { - printk(KERN_WARNING "%s: Internal Bus Error\n", SCC_PATA_NAME); - out_be32((void __iomem *)intsts_port, INTSTS_BMSINT); - - ide_do_reset(drive); - continue; - } - - if (reg & INTSTS_BMHE) { - out_be32((void __iomem *)intsts_port, INTSTS_BMHE); - continue; - } - - if (reg & INTSTS_ACTEINT) { - out_be32((void __iomem *)intsts_port, INTSTS_ACTEINT); - continue; - } - - if (reg & INTSTS_IOIRQS) { - out_be32((void __iomem *)intsts_port, INTSTS_IOIRQS); - continue; - } - break; - } - - dma_stat = __scc_dma_end(drive); - if (data_loss) - dma_stat |= 2; /* emulate DMA error (to retry command) */ - return dma_stat; -} - -/* returns 1 if dma irq issued, 0 otherwise */ -static int scc_dma_test_irq(ide_drive_t *drive) -{ - ide_hwif_t *hwif = drive->hwif; - u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014); - - /* SCC errata A252,A308 workaround: Step4 */ - if ((in_be32((void __iomem *)hwif->io_ports.ctl_addr) - & ATA_ERR) && - (int_stat & INTSTS_INTRQ)) - return 1; - - /* SCC errata A308 workaround: Step5 (polling IOIRQS) */ - if (int_stat & INTSTS_IOIRQS) - return 1; - - return 0; -} - -static u8 scc_udma_filter(ide_drive_t *drive) -{ - ide_hwif_t *hwif = drive->hwif; - u8 mask = hwif->ultra_mask; - - /* errata A308 workaround: limit non ide_disk drive to UDMA4 */ - if ((drive->media != ide_disk) && (mask & 0xE0)) { - printk(KERN_INFO "%s: limit %s to UDMA4\n", - SCC_PATA_NAME, drive->name); - mask = ATA_UDMA4; - } - - return mask; -} - -/** - * setup_mmio_scc - map CTRL/BMID region - * @dev: PCI device we are configuring - * @name: device name - * - */ - -static int setup_mmio_scc (struct pci_dev *dev, const char *name) -{ - void __iomem *ctl_addr; - void __iomem *dma_addr; - int i, ret; - - for (i = 0; i < MAX_HWIFS; i++) { - if (scc_ports[i].ctl == 0) - break; - } - if (i >= MAX_HWIFS) - return -ENOMEM; - - ret = pci_request_selected_regions(dev, (1 << 2) - 1, name); - if (ret < 0) { - printk(KERN_ERR "%s: can't reserve resources\n", name); - return ret; - } - - ctl_addr = pci_ioremap_bar(dev, 0); - if (!ctl_addr) - goto fail_0; - - dma_addr = pci_ioremap_bar(dev, 1); - if (!dma_addr) - goto fail_1; - - pci_set_master(dev); - scc_ports[i].ctl = (unsigned long)ctl_addr; - scc_ports[i].dma = (unsigned long)dma_addr; - pci_set_drvdata(dev, (void *) &scc_ports[i]); - - return 1; - - fail_1: - iounmap(ctl_addr); - fail_0: - return -ENOMEM; -} - -static int scc_ide_setup_pci_device(struct pci_dev *dev, - const struct ide_port_info *d) -{ - struct scc_ports *ports = pci_get_drvdata(dev); - struct ide_host *host; - struct ide_hw hw, *hws[] = { &hw }; - int i, rc; - - memset(&hw, 0, sizeof(hw)); - for (i = 0; i <= 8; i++) - hw.io_ports_array[i] = ports->dma + 0x20 + i * 4; - hw.irq = dev->irq; - hw.dev = &dev->dev; - - rc = ide_host_add(d, hws, 1, &host); - if (rc) - return rc; - - ports->host = host; - - return 0; -} - -/** - * init_setup_scc - set up an SCC PATA Controller - * @dev: PCI device - * @d: IDE port info - * - * Perform the initial set up for this device. - */ - -static int init_setup_scc(struct pci_dev *dev, const struct ide_port_info *d) -{ - unsigned long ctl_base; - unsigned long dma_base; - unsigned long cckctrl_port; - unsigned long intmask_port; - unsigned long mode_port; - unsigned long ecmode_port; - u32 reg = 0; - struct scc_ports *ports; - int rc; - - rc = pci_enable_device(dev); - if (rc) - goto end; - - rc = setup_mmio_scc(dev, d->name); - if (rc < 0) - goto end; - - ports = pci_get_drvdata(dev); - ctl_base = ports->ctl; - dma_base = ports->dma; - cckctrl_port = ctl_base + 0xff0; - intmask_port = dma_base + 0x010; - mode_port = ctl_base + 0x024; - ecmode_port = ctl_base + 0xf00; - - /* controller initialization */ - reg = 0; - out_be32((void*)cckctrl_port, reg); - reg |= CCKCTRL_ATACLKOEN; - out_be32((void*)cckctrl_port, reg); - reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN; - out_be32((void*)cckctrl_port, reg); - reg |= CCKCTRL_CRST; - out_be32((void*)cckctrl_port, reg); - - for (;;) { - reg = in_be32((void*)cckctrl_port); - if (reg & CCKCTRL_CRST) - break; - udelay(5000); - } - - reg |= CCKCTRL_ATARESET; - out_be32((void*)cckctrl_port, reg); - - out_be32((void*)ecmode_port, ECMODE_VALUE); - out_be32((void*)mode_port, MODE_JCUSFEN); - out_be32((void*)intmask_port, INTMASK_MSK); - - rc = scc_ide_setup_pci_device(dev, d); - - end: - return rc; -} - -static void scc_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid) -{ - struct ide_io_ports *io_ports = &drive->hwif->io_ports; - - if (valid & IDE_VALID_FEATURE) - scc_ide_outb(tf->feature, io_ports->feature_addr); - if (valid & IDE_VALID_NSECT) - scc_ide_outb(tf->nsect, io_ports->nsect_addr); - if (valid & IDE_VALID_LBAL) - scc_ide_outb(tf->lbal, io_ports->lbal_addr); - if (valid & IDE_VALID_LBAM) - scc_ide_outb(tf->lbam, io_ports->lbam_addr); - if (valid & IDE_VALID_LBAH) - scc_ide_outb(tf->lbah, io_ports->lbah_addr); - if (valid & IDE_VALID_DEVICE) - scc_ide_outb(tf->device, io_ports->device_addr); -} - -static void scc_tf_read(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid) -{ - struct ide_io_ports *io_ports = &drive->hwif->io_ports; - - if (valid & IDE_VALID_ERROR) - tf->error = scc_ide_inb(io_ports->feature_addr); - if (valid & IDE_VALID_NSECT) - tf->nsect = scc_ide_inb(io_ports->nsect_addr); - if (valid & IDE_VALID_LBAL) - tf->lbal = scc_ide_inb(io_ports->lbal_addr); - if (valid & IDE_VALID_LBAM) - tf->lbam = scc_ide_inb(io_ports->lbam_addr); - if (valid & IDE_VALID_LBAH) - tf->lbah = scc_ide_inb(io_ports->lbah_addr); - if (valid & IDE_VALID_DEVICE) - tf->device = scc_ide_inb(io_ports->device_addr); -} - -static void scc_input_data(ide_drive_t *drive, struct ide_cmd *cmd, - void *buf, unsigned int len) -{ - unsigned long data_addr = drive->hwif->io_ports.data_addr; - - len++; - - if (drive->io_32bit) { - scc_ide_insl(data_addr, buf, len / 4); - - if ((len & 3) >= 2) - scc_ide_insw(data_addr, (u8 *)buf + (len & ~3), 1); - } else - scc_ide_insw(data_addr, buf, len / 2); -} - -static void scc_output_data(ide_drive_t *drive, struct ide_cmd *cmd, - void *buf, unsigned int len) -{ - unsigned long data_addr = drive->hwif->io_ports.data_addr; - - len++; - - if (drive->io_32bit) { - scc_ide_outsl(data_addr, buf, len / 4); - - if ((len & 3) >= 2) - scc_ide_outsw(data_addr, (u8 *)buf + (len & ~3), 1); - } else - scc_ide_outsw(data_addr, buf, len / 2); -} - -/** - * init_mmio_iops_scc - set up the iops for MMIO - * @hwif: interface to set up - * - */ - -static void init_mmio_iops_scc(ide_hwif_t *hwif) -{ - struct pci_dev *dev = to_pci_dev(hwif->dev); - struct scc_ports *ports = pci_get_drvdata(dev); - unsigned long dma_base = ports->dma; - - ide_set_hwifdata(hwif, ports); - - hwif->dma_base = dma_base; - hwif->config_data = ports->ctl; -} - -/** - * init_iops_scc - set up iops - * @hwif: interface to set up - * - * Do the basic setup for the SCC hardware interface - * and then do the MMIO setup. - */ - -static void init_iops_scc(ide_hwif_t *hwif) -{ - struct pci_dev *dev = to_pci_dev(hwif->dev); - - hwif->hwif_data = NULL; - if (pci_get_drvdata(dev) == NULL) - return; - init_mmio_iops_scc(hwif); -} - -static int scc_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d) -{ - return ide_allocate_dma_engine(hwif); -} - -static u8 scc_cable_detect(ide_hwif_t *hwif) -{ - return ATA_CBL_PATA80; -} - -/** - * init_hwif_scc - set up hwif - * @hwif: interface to set up - * - * We do the basic set up of the interface structure. The SCC - * requires several custom handlers so we override the default - * ide DMA handlers appropriately. - */ - -static void init_hwif_scc(ide_hwif_t *hwif) -{ - /* PTERADD */ - out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma); - - if (in_be32((void __iomem *)(hwif->config_data + 0xff0)) & CCKCTRL_ATACLKOEN) - hwif->ultra_mask = ATA_UDMA6; /* 133MHz */ - else - hwif->ultra_mask = ATA_UDMA5; /* 100MHz */ -} - -static const struct ide_tp_ops scc_tp_ops = { - .exec_command = scc_exec_command, - .read_status = scc_read_status, - .read_altstatus = scc_read_altstatus, - .write_devctl = scc_write_devctl, - - .dev_select = ide_dev_select, - .tf_load = scc_tf_load, - .tf_read = scc_tf_read, - - .input_data = scc_input_data, - .output_data = scc_output_data, -}; - -static const struct ide_port_ops scc_port_ops = { - .set_pio_mode = scc_set_pio_mode, - .set_dma_mode = scc_set_dma_mode, - .udma_filter = scc_udma_filter, - .cable_detect = scc_cable_detect, -}; - -static const struct ide_dma_ops scc_dma_ops = { - .dma_host_set = scc_dma_host_set, - .dma_setup = scc_dma_setup, - .dma_start = scc_dma_start, - .dma_end = scc_dma_end, - .dma_test_irq = scc_dma_test_irq, - .dma_lost_irq = ide_dma_lost_irq, - .dma_timer_expiry = ide_dma_sff_timer_expiry, - .dma_sff_read_status = scc_dma_sff_read_status, -}; - -static const struct ide_port_info scc_chipset = { - .name = "sccIDE", - .init_iops = init_iops_scc, - .init_dma = scc_init_dma, - .init_hwif = init_hwif_scc, - .tp_ops = &scc_tp_ops, - .port_ops = &scc_port_ops, - .dma_ops = &scc_dma_ops, - .host_flags = IDE_HFLAG_SINGLE, - .irq_flags = IRQF_SHARED, - .pio_mask = ATA_PIO4, - .chipset = ide_pci, -}; - -/** - * scc_init_one - pci layer discovery entry - * @dev: PCI device - * @id: ident table entry - * - * Called by the PCI code when it finds an SCC PATA controller. - * We then use the IDE PCI generic helper to do most of the work. - */ - -static int scc_init_one(struct pci_dev *dev, const struct pci_device_id *id) -{ - return init_setup_scc(dev, &scc_chipset); -} - -/** - * scc_remove - pci layer remove entry - * @dev: PCI device - * - * Called by the PCI code when it removes an SCC PATA controller. - */ - -static void scc_remove(struct pci_dev *dev) -{ - struct scc_ports *ports = pci_get_drvdata(dev); - struct ide_host *host = ports->host; - - ide_host_remove(host); - - iounmap((void*)ports->dma); - iounmap((void*)ports->ctl); - pci_release_selected_regions(dev, (1 << 2) - 1); - memset(ports, 0, sizeof(*ports)); -} - -static const struct pci_device_id scc_pci_tbl[] = { - { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0 }, - { 0, }, -}; -MODULE_DEVICE_TABLE(pci, scc_pci_tbl); - -static struct pci_driver scc_pci_driver = { - .name = "SCC IDE", - .id_table = scc_pci_tbl, - .probe = scc_init_one, - .remove = scc_remove, -}; - -static int __init scc_ide_init(void) -{ - return ide_pci_register_driver(&scc_pci_driver); -} - -static void __exit scc_ide_exit(void) -{ - pci_unregister_driver(&scc_pci_driver); -} - -module_init(scc_ide_init); -module_exit(scc_ide_exit); - -MODULE_DESCRIPTION("PCI driver module for Toshiba SCC IDE"); -MODULE_LICENSE("GPL"); diff --git a/drivers/iio/accel/mma9551_core.c b/drivers/iio/accel/mma9551_core.c index 7f55a6d..c6d5a3a 100644 --- a/drivers/iio/accel/mma9551_core.c +++ b/drivers/iio/accel/mma9551_core.c @@ -389,7 +389,12 @@ int mma9551_read_config_words(struct i2c_client *client, u8 app_id, { int ret, i; int len_words = len / sizeof(u16); - __be16 be_buf[MMA9551_MAX_MAILBOX_DATA_REGS]; + __be16 be_buf[MMA9551_MAX_MAILBOX_DATA_REGS / 2]; + + if (len_words > ARRAY_SIZE(be_buf)) { + dev_err(&client->dev, "Invalid buffer size %d\n", len); + return -EINVAL; + } ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_CONFIG, reg, NULL, 0, (u8 *) be_buf, len); @@ -424,7 +429,12 @@ int mma9551_read_status_words(struct i2c_client *client, u8 app_id, { int ret, i; int len_words = len / sizeof(u16); - __be16 be_buf[MMA9551_MAX_MAILBOX_DATA_REGS]; + __be16 be_buf[MMA9551_MAX_MAILBOX_DATA_REGS / 2]; + + if (len_words > ARRAY_SIZE(be_buf)) { + dev_err(&client->dev, "Invalid buffer size %d\n", len); + return -EINVAL; + } ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_STATUS, reg, NULL, 0, (u8 *) be_buf, len); @@ -459,7 +469,12 @@ int mma9551_write_config_words(struct i2c_client *client, u8 app_id, { int i; int len_words = len / sizeof(u16); - __be16 be_buf[MMA9551_MAX_MAILBOX_DATA_REGS]; + __be16 be_buf[(MMA9551_MAX_MAILBOX_DATA_REGS - 1) / 2]; + + if (len_words > ARRAY_SIZE(be_buf)) { + dev_err(&client->dev, "Invalid buffer size %d\n", len); + return -EINVAL; + } for (i = 0; i < len_words; i++) be_buf[i] = cpu_to_be16(buf[i]); diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c index 2df1af7..365a109 100644 --- a/drivers/iio/accel/mma9553.c +++ b/drivers/iio/accel/mma9553.c @@ -54,6 +54,7 @@ #define MMA9553_MASK_CONF_STEPCOALESCE GENMASK(7, 0) #define MMA9553_REG_CONF_ACTTHD 0x0E +#define MMA9553_MAX_ACTTHD GENMASK(15, 0) /* Pedometer status registers (R-only) */ #define MMA9553_REG_STATUS 0x00 @@ -316,22 +317,19 @@ static int mma9553_set_config(struct mma9553_data *data, u16 reg, static int mma9553_read_activity_stepcnt(struct mma9553_data *data, u8 *activity, u16 *stepcnt) { - u32 status_stepcnt; - u16 status; + u16 buf[2]; int ret; ret = mma9551_read_status_words(data->client, MMA9551_APPID_PEDOMETER, - MMA9553_REG_STATUS, sizeof(u32), - (u16 *) &status_stepcnt); + MMA9553_REG_STATUS, sizeof(u32), buf); if (ret < 0) { dev_err(&data->client->dev, "error reading status and stepcnt\n"); return ret; } - status = status_stepcnt & MMA9553_MASK_CONF_WORD; - *activity = mma9553_get_bits(status, MMA9553_MASK_STATUS_ACTIVITY); - *stepcnt = status_stepcnt >> 16; + *activity = mma9553_get_bits(buf[0], MMA9553_MASK_STATUS_ACTIVITY); + *stepcnt = buf[1]; return 0; } @@ -872,6 +870,9 @@ static int mma9553_write_event_value(struct iio_dev *indio_dev, case IIO_EV_INFO_PERIOD: switch (chan->type) { case IIO_ACTIVITY: + if (val < 0 || val > MMA9553_ACTIVITY_THD_TO_SEC( + MMA9553_MAX_ACTTHD)) + return -EINVAL; mutex_lock(&data->mutex); ret = mma9553_set_config(data, MMA9553_REG_CONF_ACTTHD, &data->conf.actthd, @@ -971,7 +972,8 @@ static const struct iio_chan_spec_ext_info mma9553_ext_info[] = { .modified = 1, \ .channel2 = _chan2, \ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), \ - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBHEIGHT), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBHEIGHT) | \ + BIT(IIO_CHAN_INFO_ENABLE), \ .event_spec = mma9553_activity_events, \ .num_event_specs = ARRAY_SIZE(mma9553_activity_events), \ .ext_info = mma9553_ext_info, \ diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 58d1d13..211b132 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -546,6 +546,7 @@ int st_accel_common_probe(struct iio_dev *indio_dev) indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &accel_info; + mutex_init(&adata->tb.buf_lock); st_sensors_power_enable(indio_dev); diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c index 08bcfb0..56008a8 100644 --- a/drivers/iio/adc/axp288_adc.c +++ b/drivers/iio/adc/axp288_adc.c @@ -53,39 +53,42 @@ static const struct iio_chan_spec const axp288_adc_channels[] = { .channel = 0, .address = AXP288_TS_ADC_H, .datasheet_name = "TS_PIN", + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), }, { .indexed = 1, .type = IIO_TEMP, .channel = 1, .address = AXP288_PMIC_ADC_H, .datasheet_name = "PMIC_TEMP", + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), }, { .indexed = 1, .type = IIO_TEMP, .channel = 2, .address = AXP288_GP_ADC_H, .datasheet_name = "GPADC", + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), }, { .indexed = 1, .type = IIO_CURRENT, .channel = 3, .address = AXP20X_BATT_CHRG_I_H, .datasheet_name = "BATT_CHG_I", - .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), }, { .indexed = 1, .type = IIO_CURRENT, .channel = 4, .address = AXP20X_BATT_DISCHRG_I_H, .datasheet_name = "BATT_DISCHRG_I", - .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), }, { .indexed = 1, .type = IIO_VOLTAGE, .channel = 5, .address = AXP20X_BATT_V_H, .datasheet_name = "BATT_V", - .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), }, }; @@ -151,9 +154,6 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, chan->address)) dev_err(&indio_dev->dev, "TS pin restore\n"); break; - case IIO_CHAN_INFO_PROCESSED: - ret = axp288_adc_read_channel(val, chan->address, info->regmap); - break; default: ret = -EINVAL; } diff --git a/drivers/iio/adc/cc10001_adc.c b/drivers/iio/adc/cc10001_adc.c index 51e2a83..115f6e9 100644 --- a/drivers/iio/adc/cc10001_adc.c +++ b/drivers/iio/adc/cc10001_adc.c @@ -35,8 +35,9 @@ #define CC10001_ADC_EOC_SET BIT(0) #define CC10001_ADC_CHSEL_SAMPLED 0x0c -#define CC10001_ADC_POWER_UP 0x10 -#define CC10001_ADC_POWER_UP_SET BIT(0) +#define CC10001_ADC_POWER_DOWN 0x10 +#define CC10001_ADC_POWER_DOWN_SET BIT(0) + #define CC10001_ADC_DEBUG 0x14 #define CC10001_ADC_DATA_COUNT 0x20 @@ -62,7 +63,6 @@ struct cc10001_adc_device { u16 *buf; struct mutex lock; - unsigned long channel_map; unsigned int start_delay_ns; unsigned int eoc_delay_ns; }; @@ -79,6 +79,18 @@ static inline u32 cc10001_adc_read_reg(struct cc10001_adc_device *adc_dev, return readl(adc_dev->reg_base + reg); } +static void cc10001_adc_power_up(struct cc10001_adc_device *adc_dev) +{ + cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_DOWN, 0); + ndelay(adc_dev->start_delay_ns); +} + +static void cc10001_adc_power_down(struct cc10001_adc_device *adc_dev) +{ + cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_DOWN, + CC10001_ADC_POWER_DOWN_SET); +} + static void cc10001_adc_start(struct cc10001_adc_device *adc_dev, unsigned int channel) { @@ -88,6 +100,7 @@ static void cc10001_adc_start(struct cc10001_adc_device *adc_dev, val = (channel & CC10001_ADC_CH_MASK) | CC10001_ADC_MODE_SINGLE_CONV; cc10001_adc_write_reg(adc_dev, CC10001_ADC_CONFIG, val); + udelay(1); val = cc10001_adc_read_reg(adc_dev, CC10001_ADC_CONFIG); val = val | CC10001_ADC_START_CONV; cc10001_adc_write_reg(adc_dev, CC10001_ADC_CONFIG, val); @@ -129,6 +142,7 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p) struct iio_dev *indio_dev; unsigned int delay_ns; unsigned int channel; + unsigned int scan_idx; bool sample_invalid; u16 *data; int i; @@ -139,20 +153,17 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p) mutex_lock(&adc_dev->lock); - cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP, - CC10001_ADC_POWER_UP_SET); - - /* Wait for 8 (6+2) clock cycles before activating START */ - ndelay(adc_dev->start_delay_ns); + cc10001_adc_power_up(adc_dev); /* Calculate delay step for eoc and sampled data */ delay_ns = adc_dev->eoc_delay_ns / CC10001_MAX_POLL_COUNT; i = 0; sample_invalid = false; - for_each_set_bit(channel, indio_dev->active_scan_mask, + for_each_set_bit(scan_idx, indio_dev->active_scan_mask, indio_dev->masklength) { + channel = indio_dev->channels[scan_idx].channel; cc10001_adc_start(adc_dev, channel); data[i] = cc10001_adc_poll_done(indio_dev, channel, delay_ns); @@ -166,7 +177,7 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p) } done: - cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP, 0); + cc10001_adc_power_down(adc_dev); mutex_unlock(&adc_dev->lock); @@ -185,11 +196,7 @@ static u16 cc10001_adc_read_raw_voltage(struct iio_dev *indio_dev, unsigned int delay_ns; u16 val; - cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP, - CC10001_ADC_POWER_UP_SET); - - /* Wait for 8 (6+2) clock cycles before activating START */ - ndelay(adc_dev->start_delay_ns); + cc10001_adc_power_up(adc_dev); /* Calculate delay step for eoc and sampled data */ delay_ns = adc_dev->eoc_delay_ns / CC10001_MAX_POLL_COUNT; @@ -198,7 +205,7 @@ static u16 cc10001_adc_read_raw_voltage(struct iio_dev *indio_dev, val = cc10001_adc_poll_done(indio_dev, chan->channel, delay_ns); - cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP, 0); + cc10001_adc_power_down(adc_dev); return val; } @@ -224,7 +231,7 @@ static int cc10001_adc_read_raw(struct iio_dev *indio_dev, case IIO_CHAN_INFO_SCALE: ret = regulator_get_voltage(adc_dev->reg); - if (ret) + if (ret < 0) return ret; *val = ret / 1000; @@ -255,22 +262,22 @@ static const struct iio_info cc10001_adc_info = { .update_scan_mode = &cc10001_update_scan_mode, }; -static int cc10001_adc_channel_init(struct iio_dev *indio_dev) +static int cc10001_adc_channel_init(struct iio_dev *indio_dev, + unsigned long channel_map) { - struct cc10001_adc_device *adc_dev = iio_priv(indio_dev); struct iio_chan_spec *chan_array, *timestamp; unsigned int bit, idx = 0; - indio_dev->num_channels = bitmap_weight(&adc_dev->channel_map, - CC10001_ADC_NUM_CHANNELS); + indio_dev->num_channels = bitmap_weight(&channel_map, + CC10001_ADC_NUM_CHANNELS) + 1; - chan_array = devm_kcalloc(&indio_dev->dev, indio_dev->num_channels + 1, + chan_array = devm_kcalloc(&indio_dev->dev, indio_dev->num_channels, sizeof(struct iio_chan_spec), GFP_KERNEL); if (!chan_array) return -ENOMEM; - for_each_set_bit(bit, &adc_dev->channel_map, CC10001_ADC_NUM_CHANNELS) { + for_each_set_bit(bit, &channel_map, CC10001_ADC_NUM_CHANNELS) { struct iio_chan_spec *chan = &chan_array[idx]; chan->type = IIO_VOLTAGE; @@ -305,6 +312,7 @@ static int cc10001_adc_probe(struct platform_device *pdev) unsigned long adc_clk_rate; struct resource *res; struct iio_dev *indio_dev; + unsigned long channel_map; int ret; indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev)); @@ -313,9 +321,9 @@ static int cc10001_adc_probe(struct platform_device *pdev) adc_dev = iio_priv(indio_dev); - adc_dev->channel_map = GENMASK(CC10001_ADC_NUM_CHANNELS - 1, 0); + channel_map = GENMASK(CC10001_ADC_NUM_CHANNELS - 1, 0); if (!of_property_read_u32(node, "adc-reserved-channels", &ret)) - adc_dev->channel_map &= ~ret; + channel_map &= ~ret; adc_dev->reg = devm_regulator_get(&pdev->dev, "vref"); if (IS_ERR(adc_dev->reg)) @@ -361,7 +369,7 @@ static int cc10001_adc_probe(struct platform_device *pdev) adc_dev->start_delay_ns = adc_dev->eoc_delay_ns * CC10001_WAIT_CYCLES; /* Setup the ADC channels available on the device */ - ret = cc10001_adc_channel_init(indio_dev); + ret = cc10001_adc_channel_init(indio_dev, channel_map); if (ret < 0) goto err_disable_clk; diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c index efbfd12..8d9c9b9 100644 --- a/drivers/iio/adc/mcp320x.c +++ b/drivers/iio/adc/mcp320x.c @@ -60,12 +60,12 @@ struct mcp320x { struct spi_message msg; struct spi_transfer transfer[2]; - u8 tx_buf; - u8 rx_buf[2]; - struct regulator *reg; struct mutex lock; const struct mcp320x_chip_info *chip_info; + + u8 tx_buf ____cacheline_aligned; + u8 rx_buf[2]; }; static int mcp320x_channel_to_tx_data(int device_index, diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c index 3211729..0c4618b 100644 --- a/drivers/iio/adc/qcom-spmi-vadc.c +++ b/drivers/iio/adc/qcom-spmi-vadc.c @@ -18,6 +18,7 @@ #include <linux/iio/iio.h> #include <linux/interrupt.h> #include <linux/kernel.h> +#include <linux/math64.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> @@ -471,11 +472,11 @@ static s32 vadc_calibrate(struct vadc_priv *vadc, const struct vadc_channel_prop *prop, u16 adc_code) { const struct vadc_prescale_ratio *prescale; - s32 voltage; + s64 voltage; voltage = adc_code - vadc->graph[prop->calibration].gnd; voltage *= vadc->graph[prop->calibration].dx; - voltage = voltage / vadc->graph[prop->calibration].dy; + voltage = div64_s64(voltage, vadc->graph[prop->calibration].dy); if (prop->calibration == VADC_CALIB_ABSOLUTE) voltage += vadc->graph[prop->calibration].dx; @@ -487,7 +488,7 @@ static s32 vadc_calibrate(struct vadc_priv *vadc, voltage = voltage * prescale->den; - return voltage / prescale->num; + return div64_s64(voltage, prescale->num); } static int vadc_decimation_from_dt(u32 value) diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c index a221f73..ce93bd8 100644 --- a/drivers/iio/adc/xilinx-xadc-core.c +++ b/drivers/iio/adc/xilinx-xadc-core.c @@ -856,6 +856,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev, switch (chan->address) { case XADC_REG_VCCINT: case XADC_REG_VCCAUX: + case XADC_REG_VREFP: case XADC_REG_VCCBRAM: case XADC_REG_VCCPINT: case XADC_REG_VCCPAUX: @@ -996,7 +997,7 @@ static const struct iio_event_spec xadc_voltage_events[] = { .num_event_specs = (_alarm) ? ARRAY_SIZE(xadc_voltage_events) : 0, \ .scan_index = (_scan_index), \ .scan_type = { \ - .sign = 'u', \ + .sign = ((_addr) == XADC_REG_VREFN) ? 's' : 'u', \ .realbits = 12, \ .storagebits = 16, \ .shift = 4, \ @@ -1008,7 +1009,7 @@ static const struct iio_event_spec xadc_voltage_events[] = { static const struct iio_chan_spec xadc_channels[] = { XADC_CHAN_TEMP(0, 8, XADC_REG_TEMP), XADC_CHAN_VOLTAGE(0, 9, XADC_REG_VCCINT, "vccint", true), - XADC_CHAN_VOLTAGE(1, 10, XADC_REG_VCCINT, "vccaux", true), + XADC_CHAN_VOLTAGE(1, 10, XADC_REG_VCCAUX, "vccaux", true), XADC_CHAN_VOLTAGE(2, 14, XADC_REG_VCCBRAM, "vccbram", true), XADC_CHAN_VOLTAGE(3, 5, XADC_REG_VCCPINT, "vccpint", true), XADC_CHAN_VOLTAGE(4, 6, XADC_REG_VCCPAUX, "vccpaux", true), diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h index c7487e8..54adc50 100644 --- a/drivers/iio/adc/xilinx-xadc.h +++ b/drivers/iio/adc/xilinx-xadc.h @@ -145,9 +145,9 @@ static inline int xadc_write_adc_reg(struct xadc *xadc, unsigned int reg, #define XADC_REG_MAX_VCCPINT 0x28 #define XADC_REG_MAX_VCCPAUX 0x29 #define XADC_REG_MAX_VCCO_DDR 0x2a -#define XADC_REG_MIN_VCCPINT 0x2b -#define XADC_REG_MIN_VCCPAUX 0x2c -#define XADC_REG_MIN_VCCO_DDR 0x2d +#define XADC_REG_MIN_VCCPINT 0x2c +#define XADC_REG_MIN_VCCPAUX 0x2d +#define XADC_REG_MIN_VCCO_DDR 0x2e #define XADC_REG_CONF0 0x40 #define XADC_REG_CONF1 0x41 diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index edd13d2..8dd0477 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -304,8 +304,6 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev, struct st_sensors_platform_data *of_pdata; int err = 0; - mutex_init(&sdata->tb.buf_lock); - /* If OF/DT pdata exists, it will take precedence of anything else */ of_pdata = st_sensors_of_probe(indio_dev->dev.parent, pdata); if (of_pdata) diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c index 21395f2..ffe9664 100644 --- a/drivers/iio/gyro/st_gyro_core.c +++ b/drivers/iio/gyro/st_gyro_core.c @@ -400,6 +400,7 @@ int st_gyro_common_probe(struct iio_dev *indio_dev) indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &gyro_info; + mutex_init(&gdata->tb.buf_lock); st_sensors_power_enable(indio_dev); diff --git a/drivers/iio/kfifo_buf.c b/drivers/iio/kfifo_buf.c index 847ca56..55c267b 100644 --- a/drivers/iio/kfifo_buf.c +++ b/drivers/iio/kfifo_buf.c @@ -38,7 +38,8 @@ static int iio_request_update_kfifo(struct iio_buffer *r) kfifo_free(&buf->kf); ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum, buf->buffer.length); - buf->update_needed = false; + if (ret >= 0) + buf->update_needed = false; } else { kfifo_reset_out(&buf->kf); } diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c index 91ecc46..ef60bae 100644 --- a/drivers/iio/light/hid-sensor-prox.c +++ b/drivers/iio/light/hid-sensor-prox.c @@ -43,8 +43,6 @@ struct prox_state { static const struct iio_chan_spec prox_channels[] = { { .type = IIO_PROXIMITY, - .modified = 1, - .channel2 = IIO_NO_MOD, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE) | @@ -253,7 +251,6 @@ static int hid_prox_probe(struct platform_device *pdev) struct iio_dev *indio_dev; struct prox_state *prox_state; struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; - struct iio_chan_spec *channels; indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(struct prox_state)); @@ -272,20 +269,21 @@ static int hid_prox_probe(struct platform_device *pdev) return ret; } - channels = kmemdup(prox_channels, sizeof(prox_channels), GFP_KERNEL); - if (!channels) { + indio_dev->channels = kmemdup(prox_channels, sizeof(prox_channels), + GFP_KERNEL); + if (!indio_dev->channels) { dev_err(&pdev->dev, "failed to duplicate channels\n"); return -ENOMEM; } - ret = prox_parse_report(pdev, hsdev, channels, + ret = prox_parse_report(pdev, hsdev, + (struct iio_chan_spec *)indio_dev->channels, HID_USAGE_SENSOR_PROX, prox_state); if (ret) { dev_err(&pdev->dev, "failed to setup attributes\n"); goto error_free_dev_mem; } - indio_dev->channels = channels; indio_dev->num_channels = ARRAY_SIZE(prox_channels); indio_dev->dev.parent = &pdev->dev; diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c index 8ade473..2e56f81 100644 --- a/drivers/iio/magnetometer/st_magn_core.c +++ b/drivers/iio/magnetometer/st_magn_core.c @@ -369,6 +369,7 @@ int st_magn_common_probe(struct iio_dev *indio_dev) indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &magn_info; + mutex_init(&mdata->tb.buf_lock); st_sensors_power_enable(indio_dev); diff --git a/drivers/iio/pressure/bmp280.c b/drivers/iio/pressure/bmp280.c index 7c623e2..a2602d8 100644 --- a/drivers/iio/pressure/bmp280.c +++ b/drivers/iio/pressure/bmp280.c @@ -172,6 +172,7 @@ static s32 bmp280_compensate_temp(struct bmp280_data *data, var2 = (((((adc_temp >> 4) - ((s32)le16_to_cpu(buf[T1]))) * ((adc_temp >> 4) - ((s32)le16_to_cpu(buf[T1])))) >> 12) * ((s32)(s16)le16_to_cpu(buf[T3]))) >> 14; + data->t_fine = var1 + var2; return (data->t_fine * 5 + 128) >> 8; } diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c index 7bb8d4c..3cf0bd6 100644 --- a/drivers/iio/pressure/hid-sensor-press.c +++ b/drivers/iio/pressure/hid-sensor-press.c @@ -47,8 +47,6 @@ struct press_state { static const struct iio_chan_spec press_channels[] = { { .type = IIO_PRESSURE, - .modified = 1, - .channel2 = IIO_NO_MOD, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE) | diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index 97baf40d..e881fa6 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c @@ -417,6 +417,7 @@ int st_press_common_probe(struct iio_dev *indio_dev) indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &press_info; + mutex_init(&press_data->tb.buf_lock); st_sensors_power_enable(indio_dev); diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index f80da50..38339d2 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -472,13 +472,8 @@ int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *dmac, } sgid_addr, dgid_addr; - ret = rdma_gid2ip(&sgid_addr._sockaddr, sgid); - if (ret) - return ret; - - ret = rdma_gid2ip(&dgid_addr._sockaddr, dgid); - if (ret) - return ret; + rdma_gid2ip(&sgid_addr._sockaddr, sgid); + rdma_gid2ip(&dgid_addr._sockaddr, dgid); memset(&dev_addr, 0, sizeof(dev_addr)); @@ -512,10 +507,8 @@ int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id) struct sockaddr_in6 _sockaddr_in6; } gid_addr; - ret = rdma_gid2ip(&gid_addr._sockaddr, sgid); + rdma_gid2ip(&gid_addr._sockaddr, sgid); - if (ret) - return ret; memset(&dev_addr, 0, sizeof(dev_addr)); ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id); if (ret) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index e28a494..0c14191 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -437,39 +437,38 @@ static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id) return cm_id_priv; } -static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask) +static void cm_mask_copy(u32 *dst, const u32 *src, const u32 *mask) { int i; - for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++) - ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] & - ((unsigned long *) mask)[i]; + for (i = 0; i < IB_CM_COMPARE_SIZE; i++) + dst[i] = src[i] & mask[i]; } static int cm_compare_data(struct ib_cm_compare_data *src_data, struct ib_cm_compare_data *dst_data) { - u8 src[IB_CM_COMPARE_SIZE]; - u8 dst[IB_CM_COMPARE_SIZE]; + u32 src[IB_CM_COMPARE_SIZE]; + u32 dst[IB_CM_COMPARE_SIZE]; if (!src_data || !dst_data) return 0; cm_mask_copy(src, src_data->data, dst_data->mask); cm_mask_copy(dst, dst_data->data, src_data->mask); - return memcmp(src, dst, IB_CM_COMPARE_SIZE); + return memcmp(src, dst, sizeof(src)); } -static int cm_compare_private_data(u8 *private_data, +static int cm_compare_private_data(u32 *private_data, struct ib_cm_compare_data *dst_data) { - u8 src[IB_CM_COMPARE_SIZE]; + u32 src[IB_CM_COMPARE_SIZE]; if (!dst_data) return 0; cm_mask_copy(src, private_data, dst_data->mask); - return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE); + return memcmp(src, dst_data->data, sizeof(src)); } /* @@ -538,7 +537,7 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) static struct cm_id_private * cm_find_listen(struct ib_device *device, __be64 service_id, - u8 *private_data) + u32 *private_data) { struct rb_node *node = cm.listen_service_table.rb_node; struct cm_id_private *cm_id_priv; @@ -953,7 +952,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, cm_mask_copy(cm_id_priv->compare_data->data, compare_data->data, compare_data->mask); memcpy(cm_id_priv->compare_data->mask, compare_data->mask, - IB_CM_COMPARE_SIZE); + sizeof(compare_data->mask)); } cm_id->state = IB_CM_LISTEN; diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h index be068f4..8b76f0e 100644 --- a/drivers/infiniband/core/cm_msgs.h +++ b/drivers/infiniband/core/cm_msgs.h @@ -103,7 +103,7 @@ struct cm_req_msg { /* local ACK timeout:5, rsvd:3 */ u8 alt_offset139; - u8 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE]; + u32 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE / sizeof(u32)]; } __attribute__ ((packed)); @@ -801,7 +801,7 @@ struct cm_sidr_req_msg { __be16 rsvd; __be64 service_id; - u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE]; + u32 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE / sizeof(u32)]; } __attribute__ ((packed)); struct cm_sidr_rep_msg { diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index d570030..06441a4 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -859,19 +859,27 @@ static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id memcpy(&ib->sib_addr, &path->dgid, 16); } +static __be16 ss_get_port(const struct sockaddr_storage *ss) +{ + if (ss->ss_family == AF_INET) + return ((struct sockaddr_in *)ss)->sin_port; + else if (ss->ss_family == AF_INET6) + return ((struct sockaddr_in6 *)ss)->sin6_port; + BUG(); +} + static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, struct cma_hdr *hdr) { - struct sockaddr_in *listen4, *ip4; + struct sockaddr_in *ip4; - listen4 = (struct sockaddr_in *) &listen_id->route.addr.src_addr; ip4 = (struct sockaddr_in *) &id->route.addr.src_addr; - ip4->sin_family = listen4->sin_family; + ip4->sin_family = AF_INET; ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr; - ip4->sin_port = listen4->sin_port; + ip4->sin_port = ss_get_port(&listen_id->route.addr.src_addr); ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr; - ip4->sin_family = listen4->sin_family; + ip4->sin_family = AF_INET; ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr; ip4->sin_port = hdr->port; } @@ -879,16 +887,15 @@ static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_i static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, struct cma_hdr *hdr) { - struct sockaddr_in6 *listen6, *ip6; + struct sockaddr_in6 *ip6; - listen6 = (struct sockaddr_in6 *) &listen_id->route.addr.src_addr; ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr; - ip6->sin6_family = listen6->sin6_family; + ip6->sin6_family = AF_INET6; ip6->sin6_addr = hdr->dst_addr.ip6; - ip6->sin6_port = listen6->sin6_port; + ip6->sin6_port = ss_get_port(&listen_id->route.addr.src_addr); ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr; - ip6->sin6_family = listen6->sin6_family; + ip6->sin6_family = AF_INET6; ip6->sin6_addr = hdr->src_addr.ip6; ip6->sin6_port = hdr->port; } diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c index b85ddbc..e6ffa2e 100644 --- a/drivers/infiniband/core/iwpm_msg.c +++ b/drivers/infiniband/core/iwpm_msg.c @@ -33,7 +33,7 @@ #include "iwpm_util.h" -static const char iwpm_ulib_name[] = "iWarpPortMapperUser"; +static const char iwpm_ulib_name[IWPM_ULIBNAME_SIZE] = "iWarpPortMapperUser"; static int iwpm_ulib_version = 3; static int iwpm_user_pid = IWPM_PID_UNDEFINED; static atomic_t echo_nlmsg_seq; @@ -468,7 +468,8 @@ add_mapping_response_exit: } EXPORT_SYMBOL(iwpm_add_mapping_cb); -/* netlink attribute policy for the response to add and query mapping request */ +/* netlink attribute policy for the response to add and query mapping request + * and response with remote address info */ static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = { [IWPM_NLA_QUERY_MAPPING_SEQ] = { .type = NLA_U32 }, [IWPM_NLA_QUERY_LOCAL_ADDR] = { .len = sizeof(struct sockaddr_storage) }, @@ -559,6 +560,76 @@ query_mapping_response_exit: } EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb); +/* + * iwpm_remote_info_cb - Process a port mapper message, containing + * the remote connecting peer address info + */ +int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct nlattr *nltb[IWPM_NLA_RQUERY_MAPPING_MAX]; + struct sockaddr_storage *local_sockaddr, *remote_sockaddr; + struct sockaddr_storage *mapped_loc_sockaddr, *mapped_rem_sockaddr; + struct iwpm_remote_info *rem_info; + const char *msg_type; + u8 nl_client; + int ret = -EINVAL; + + msg_type = "Remote Mapping info"; + if (iwpm_parse_nlmsg(cb, IWPM_NLA_RQUERY_MAPPING_MAX, + resp_query_policy, nltb, msg_type)) + return ret; + + nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type); + if (!iwpm_valid_client(nl_client)) { + pr_info("%s: Invalid port mapper client = %d\n", + __func__, nl_client); + return ret; + } + atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq); + + local_sockaddr = (struct sockaddr_storage *) + nla_data(nltb[IWPM_NLA_QUERY_LOCAL_ADDR]); + remote_sockaddr = (struct sockaddr_storage *) + nla_data(nltb[IWPM_NLA_QUERY_REMOTE_ADDR]); + mapped_loc_sockaddr = (struct sockaddr_storage *) + nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_LOC_ADDR]); + mapped_rem_sockaddr = (struct sockaddr_storage *) + nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_REM_ADDR]); + + if (mapped_loc_sockaddr->ss_family != local_sockaddr->ss_family || + mapped_rem_sockaddr->ss_family != remote_sockaddr->ss_family) { + pr_info("%s: Sockaddr family doesn't match the requested one\n", + __func__); + return ret; + } + rem_info = kzalloc(sizeof(struct iwpm_remote_info), GFP_ATOMIC); + if (!rem_info) { + pr_err("%s: Unable to allocate a remote info\n", __func__); + ret = -ENOMEM; + return ret; + } + memcpy(&rem_info->mapped_loc_sockaddr, mapped_loc_sockaddr, + sizeof(struct sockaddr_storage)); + memcpy(&rem_info->remote_sockaddr, remote_sockaddr, + sizeof(struct sockaddr_storage)); + memcpy(&rem_info->mapped_rem_sockaddr, mapped_rem_sockaddr, + sizeof(struct sockaddr_storage)); + rem_info->nl_client = nl_client; + + iwpm_add_remote_info(rem_info); + + iwpm_print_sockaddr(local_sockaddr, + "remote_info: Local sockaddr:"); + iwpm_print_sockaddr(mapped_loc_sockaddr, + "remote_info: Mapped local sockaddr:"); + iwpm_print_sockaddr(remote_sockaddr, + "remote_info: Remote sockaddr:"); + iwpm_print_sockaddr(mapped_rem_sockaddr, + "remote_info: Mapped remote sockaddr:"); + return ret; +} +EXPORT_SYMBOL(iwpm_remote_info_cb); + /* netlink attribute policy for the received request for mapping info */ static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = { [IWPM_NLA_MAPINFO_ULIB_NAME] = { .type = NLA_STRING, diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c index 69e9f84..a626795 100644 --- a/drivers/infiniband/core/iwpm_util.c +++ b/drivers/infiniband/core/iwpm_util.c @@ -33,8 +33,10 @@ #include "iwpm_util.h" -#define IWPM_HASH_BUCKET_SIZE 512 -#define IWPM_HASH_BUCKET_MASK (IWPM_HASH_BUCKET_SIZE - 1) +#define IWPM_MAPINFO_HASH_SIZE 512 +#define IWPM_MAPINFO_HASH_MASK (IWPM_MAPINFO_HASH_SIZE - 1) +#define IWPM_REMINFO_HASH_SIZE 64 +#define IWPM_REMINFO_HASH_MASK (IWPM_REMINFO_HASH_SIZE - 1) static LIST_HEAD(iwpm_nlmsg_req_list); static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock); @@ -42,31 +44,49 @@ static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock); static struct hlist_head *iwpm_hash_bucket; static DEFINE_SPINLOCK(iwpm_mapinfo_lock); +static struct hlist_head *iwpm_reminfo_bucket; +static DEFINE_SPINLOCK(iwpm_reminfo_lock); + static DEFINE_MUTEX(iwpm_admin_lock); static struct iwpm_admin_data iwpm_admin; int iwpm_init(u8 nl_client) { + int ret = 0; if (iwpm_valid_client(nl_client)) return -EINVAL; mutex_lock(&iwpm_admin_lock); if (atomic_read(&iwpm_admin.refcount) == 0) { - iwpm_hash_bucket = kzalloc(IWPM_HASH_BUCKET_SIZE * + iwpm_hash_bucket = kzalloc(IWPM_MAPINFO_HASH_SIZE * sizeof(struct hlist_head), GFP_KERNEL); if (!iwpm_hash_bucket) { - mutex_unlock(&iwpm_admin_lock); + ret = -ENOMEM; pr_err("%s Unable to create mapinfo hash table\n", __func__); - return -ENOMEM; + goto init_exit; + } + iwpm_reminfo_bucket = kzalloc(IWPM_REMINFO_HASH_SIZE * + sizeof(struct hlist_head), GFP_KERNEL); + if (!iwpm_reminfo_bucket) { + kfree(iwpm_hash_bucket); + ret = -ENOMEM; + pr_err("%s Unable to create reminfo hash table\n", __func__); + goto init_exit; } } atomic_inc(&iwpm_admin.refcount); +init_exit: mutex_unlock(&iwpm_admin_lock); - iwpm_set_valid(nl_client, 1); - return 0; + if (!ret) { + iwpm_set_valid(nl_client, 1); + pr_debug("%s: Mapinfo and reminfo tables are created\n", + __func__); + } + return ret; } EXPORT_SYMBOL(iwpm_init); static void free_hash_bucket(void); +static void free_reminfo_bucket(void); int iwpm_exit(u8 nl_client) { @@ -81,7 +101,8 @@ int iwpm_exit(u8 nl_client) } if (atomic_dec_and_test(&iwpm_admin.refcount)) { free_hash_bucket(); - pr_debug("%s: Mapinfo hash table is destroyed\n", __func__); + free_reminfo_bucket(); + pr_debug("%s: Resources are destroyed\n", __func__); } mutex_unlock(&iwpm_admin_lock); iwpm_set_valid(nl_client, 0); @@ -89,7 +110,7 @@ int iwpm_exit(u8 nl_client) } EXPORT_SYMBOL(iwpm_exit); -static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage *, +static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage *, struct sockaddr_storage *); int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, @@ -99,9 +120,10 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, struct hlist_head *hash_bucket_head; struct iwpm_mapping_info *map_info; unsigned long flags; + int ret = -EINVAL; if (!iwpm_valid_client(nl_client)) - return -EINVAL; + return ret; map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL); if (!map_info) { pr_err("%s: Unable to allocate a mapping info\n", __func__); @@ -115,13 +137,16 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, spin_lock_irqsave(&iwpm_mapinfo_lock, flags); if (iwpm_hash_bucket) { - hash_bucket_head = get_hash_bucket_head( + hash_bucket_head = get_mapinfo_hash_bucket( &map_info->local_sockaddr, &map_info->mapped_sockaddr); - hlist_add_head(&map_info->hlist_node, hash_bucket_head); + if (hash_bucket_head) { + hlist_add_head(&map_info->hlist_node, hash_bucket_head); + ret = 0; + } } spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); - return 0; + return ret; } EXPORT_SYMBOL(iwpm_create_mapinfo); @@ -136,9 +161,12 @@ int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr, spin_lock_irqsave(&iwpm_mapinfo_lock, flags); if (iwpm_hash_bucket) { - hash_bucket_head = get_hash_bucket_head( + hash_bucket_head = get_mapinfo_hash_bucket( local_sockaddr, mapped_local_addr); + if (!hash_bucket_head) + goto remove_mapinfo_exit; + hlist_for_each_entry_safe(map_info, tmp_hlist_node, hash_bucket_head, hlist_node) { @@ -152,6 +180,7 @@ int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr, } } } +remove_mapinfo_exit: spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); return ret; } @@ -166,7 +195,7 @@ static void free_hash_bucket(void) /* remove all the mapinfo data from the list */ spin_lock_irqsave(&iwpm_mapinfo_lock, flags); - for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) { + for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) { hlist_for_each_entry_safe(map_info, tmp_hlist_node, &iwpm_hash_bucket[i], hlist_node) { @@ -180,6 +209,96 @@ static void free_hash_bucket(void) spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); } +static void free_reminfo_bucket(void) +{ + struct hlist_node *tmp_hlist_node; + struct iwpm_remote_info *rem_info; + unsigned long flags; + int i; + + /* remove all the remote info from the list */ + spin_lock_irqsave(&iwpm_reminfo_lock, flags); + for (i = 0; i < IWPM_REMINFO_HASH_SIZE; i++) { + hlist_for_each_entry_safe(rem_info, tmp_hlist_node, + &iwpm_reminfo_bucket[i], hlist_node) { + + hlist_del_init(&rem_info->hlist_node); + kfree(rem_info); + } + } + /* free the hash list */ + kfree(iwpm_reminfo_bucket); + iwpm_reminfo_bucket = NULL; + spin_unlock_irqrestore(&iwpm_reminfo_lock, flags); +} + +static struct hlist_head *get_reminfo_hash_bucket(struct sockaddr_storage *, + struct sockaddr_storage *); + +void iwpm_add_remote_info(struct iwpm_remote_info *rem_info) +{ + struct hlist_head *hash_bucket_head; + unsigned long flags; + + spin_lock_irqsave(&iwpm_reminfo_lock, flags); + if (iwpm_reminfo_bucket) { + hash_bucket_head = get_reminfo_hash_bucket( + &rem_info->mapped_loc_sockaddr, + &rem_info->mapped_rem_sockaddr); + if (hash_bucket_head) + hlist_add_head(&rem_info->hlist_node, hash_bucket_head); + } + spin_unlock_irqrestore(&iwpm_reminfo_lock, flags); +} + +int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr, + struct sockaddr_storage *mapped_rem_addr, + struct sockaddr_storage *remote_addr, + u8 nl_client) +{ + struct hlist_node *tmp_hlist_node; + struct hlist_head *hash_bucket_head; + struct iwpm_remote_info *rem_info = NULL; + unsigned long flags; + int ret = -EINVAL; + + if (!iwpm_valid_client(nl_client)) { + pr_info("%s: Invalid client = %d\n", __func__, nl_client); + return ret; + } + spin_lock_irqsave(&iwpm_reminfo_lock, flags); + if (iwpm_reminfo_bucket) { + hash_bucket_head = get_reminfo_hash_bucket( + mapped_loc_addr, + mapped_rem_addr); + if (!hash_bucket_head) + goto get_remote_info_exit; + hlist_for_each_entry_safe(rem_info, tmp_hlist_node, + hash_bucket_head, hlist_node) { + + if (!iwpm_compare_sockaddr(&rem_info->mapped_loc_sockaddr, + mapped_loc_addr) && + !iwpm_compare_sockaddr(&rem_info->mapped_rem_sockaddr, + mapped_rem_addr)) { + + memcpy(remote_addr, &rem_info->remote_sockaddr, + sizeof(struct sockaddr_storage)); + iwpm_print_sockaddr(remote_addr, + "get_remote_info: Remote sockaddr:"); + + hlist_del_init(&rem_info->hlist_node); + kfree(rem_info); + ret = 0; + break; + } + } + } +get_remote_info_exit: + spin_unlock_irqrestore(&iwpm_reminfo_lock, flags); + return ret; +} +EXPORT_SYMBOL(iwpm_get_remote_info); + struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq, u8 nl_client, gfp_t gfp) { @@ -409,31 +528,54 @@ static u32 iwpm_ipv4_jhash(struct sockaddr_in *ipv4_sockaddr) return hash; } -static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage - *local_sockaddr, - struct sockaddr_storage - *mapped_sockaddr) +static int get_hash_bucket(struct sockaddr_storage *a_sockaddr, + struct sockaddr_storage *b_sockaddr, u32 *hash) { - u32 local_hash, mapped_hash, hash; + u32 a_hash, b_hash; - if (local_sockaddr->ss_family == AF_INET) { - local_hash = iwpm_ipv4_jhash((struct sockaddr_in *) local_sockaddr); - mapped_hash = iwpm_ipv4_jhash((struct sockaddr_in *) mapped_sockaddr); + if (a_sockaddr->ss_family == AF_INET) { + a_hash = iwpm_ipv4_jhash((struct sockaddr_in *) a_sockaddr); + b_hash = iwpm_ipv4_jhash((struct sockaddr_in *) b_sockaddr); - } else if (local_sockaddr->ss_family == AF_INET6) { - local_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) local_sockaddr); - mapped_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) mapped_sockaddr); + } else if (a_sockaddr->ss_family == AF_INET6) { + a_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) a_sockaddr); + b_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) b_sockaddr); } else { pr_err("%s: Invalid sockaddr family\n", __func__); - return NULL; + return -EINVAL; } - if (local_hash == mapped_hash) /* if port mapper isn't available */ - hash = local_hash; + if (a_hash == b_hash) /* if port mapper isn't available */ + *hash = a_hash; else - hash = jhash_2words(local_hash, mapped_hash, 0); + *hash = jhash_2words(a_hash, b_hash, 0); + return 0; +} + +static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage + *local_sockaddr, struct sockaddr_storage + *mapped_sockaddr) +{ + u32 hash; + int ret; - return &iwpm_hash_bucket[hash & IWPM_HASH_BUCKET_MASK]; + ret = get_hash_bucket(local_sockaddr, mapped_sockaddr, &hash); + if (ret) + return NULL; + return &iwpm_hash_bucket[hash & IWPM_MAPINFO_HASH_MASK]; +} + +static struct hlist_head *get_reminfo_hash_bucket(struct sockaddr_storage + *mapped_loc_sockaddr, struct sockaddr_storage + *mapped_rem_sockaddr) +{ + u32 hash; + int ret; + + ret = get_hash_bucket(mapped_loc_sockaddr, mapped_rem_sockaddr, &hash); + if (ret) + return NULL; + return &iwpm_reminfo_bucket[hash & IWPM_REMINFO_HASH_MASK]; } static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid) @@ -512,7 +654,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid) } skb_num++; spin_lock_irqsave(&iwpm_mapinfo_lock, flags); - for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) { + for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) { hlist_for_each_entry(map_info, &iwpm_hash_bucket[i], hlist_node) { if (map_info->nl_client != nl_client) @@ -595,7 +737,7 @@ int iwpm_mapinfo_available(void) spin_lock_irqsave(&iwpm_mapinfo_lock, flags); if (iwpm_hash_bucket) { - for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) { + for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) { if (!hlist_empty(&iwpm_hash_bucket[i])) { full_bucket = 1; break; diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h index 9777c86..ee2d9ff 100644 --- a/drivers/infiniband/core/iwpm_util.h +++ b/drivers/infiniband/core/iwpm_util.h @@ -76,6 +76,14 @@ struct iwpm_mapping_info { u8 nl_client; }; +struct iwpm_remote_info { + struct hlist_node hlist_node; + struct sockaddr_storage remote_sockaddr; + struct sockaddr_storage mapped_loc_sockaddr; + struct sockaddr_storage mapped_rem_sockaddr; + u8 nl_client; +}; + struct iwpm_admin_data { atomic_t refcount; atomic_t nlmsg_seq; @@ -128,6 +136,13 @@ int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request); int iwpm_get_nlmsg_seq(void); /** + * iwpm_add_reminfo - Add remote address info of the connecting peer + * to the remote info hash table + * @reminfo: The remote info to be added + */ +void iwpm_add_remote_info(struct iwpm_remote_info *reminfo); + +/** * iwpm_valid_client - Check if the port mapper client is valid * @nl_client: The index of the netlink client * diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 8b8cc6f..40becdb 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -446,7 +446,6 @@ static int ib_umem_odp_map_dma_single_page( int remove_existing_mapping = 0; int ret = 0; - mutex_lock(&umem->odp_data->umem_mutex); /* * Note: we avoid writing if seq is different from the initial seq, to * handle case of a racing notifier. This check also allows us to bail @@ -479,8 +478,6 @@ static int ib_umem_odp_map_dma_single_page( } out: - mutex_unlock(&umem->odp_data->umem_mutex); - /* On Demand Paging - avoid pinning the page */ if (umem->context->invalidate_range || !stored_page) put_page(page); @@ -586,6 +583,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt); user_virt += npages << PAGE_SHIFT; + mutex_lock(&umem->odp_data->umem_mutex); for (j = 0; j < npages; ++j) { ret = ib_umem_odp_map_dma_single_page( umem, k, base_virt_addr, local_page_list[j], @@ -594,6 +592,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, break; k++; } + mutex_unlock(&umem->odp_data->umem_mutex); if (ret < 0) { /* Release left over pages when handling errors. */ @@ -633,12 +632,11 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt, * faults from completion. We might be racing with other * invalidations, so we must make sure we free each page only * once. */ + mutex_lock(&umem->odp_data->umem_mutex); for (addr = virt; addr < bound; addr += (u64)umem->page_size) { idx = (addr - ib_umem_start(umem)) / PAGE_SIZE; - mutex_lock(&umem->odp_data->umem_mutex); if (umem->odp_data->page_list[idx]) { struct page *page = umem->odp_data->page_list[idx]; - struct page *head_page = compound_head(page); dma_addr_t dma = umem->odp_data->dma_list[idx]; dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK; @@ -646,7 +644,8 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt, ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); - if (dma & ODP_WRITE_ALLOWED_BIT) + if (dma & ODP_WRITE_ALLOWED_BIT) { + struct page *head_page = compound_head(page); /* * set_page_dirty prefers being called with * the page lock. However, MMU notifiers are @@ -657,13 +656,14 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt, * be removed. */ set_page_dirty(head_page); + } /* on demand pinning support */ if (!umem->context->invalidate_range) put_page(page); umem->odp_data->page_list[idx] = NULL; umem->odp_data->dma_list[idx] = 0; } - mutex_unlock(&umem->odp_data->umem_mutex); } + mutex_unlock(&umem->odp_data->umem_mutex); } EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages); diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 57176dd..3ad8dc7 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -583,6 +583,22 @@ static void c4iw_record_pm_msg(struct c4iw_ep *ep, sizeof(ep->com.mapped_remote_addr)); } +static int get_remote_addr(struct c4iw_ep *parent_ep, struct c4iw_ep *child_ep) +{ + int ret; + + print_addr(&parent_ep->com, __func__, "get_remote_addr parent_ep "); + print_addr(&child_ep->com, __func__, "get_remote_addr child_ep "); + + ret = iwpm_get_remote_info(&parent_ep->com.mapped_local_addr, + &child_ep->com.mapped_remote_addr, + &child_ep->com.remote_addr, RDMA_NL_C4IW); + if (ret) + PDBG("Unable to find remote peer addr info - err %d\n", ret); + + return ret; +} + static void best_mtu(const unsigned short *mtus, unsigned short mtu, unsigned int *idx, int use_ts, int ipv6) { @@ -675,7 +691,7 @@ static int send_connect(struct c4iw_ep *ep) if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { opt2 |= T5_OPT_2_VALID_F; opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); - opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ + opt2 |= T5_ISS_F; } t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); @@ -2042,9 +2058,12 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) status, status2errno(status)); if (is_neg_adv(status)) { - dev_warn(&dev->rdev.lldi.pdev->dev, - "Connection problems for atid %u status %u (%s)\n", - atid, status, neg_adv_str(status)); + PDBG("%s Connection problems for atid %u status %u (%s)\n", + __func__, atid, status, neg_adv_str(status)); + ep->stats.connect_neg_adv++; + mutex_lock(&dev->rdev.stats.lock); + dev->rdev.stats.neg_adv++; + mutex_unlock(&dev->rdev.stats.lock); return 0; } @@ -2214,7 +2233,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, u32 isn = (prandom_u32() & ~7UL) - 1; opt2 |= T5_OPT_2_VALID_F; opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); - opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ + opt2 |= T5_ISS_F; rpl5 = (void *)rpl; memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16)); if (peer2peer) @@ -2352,27 +2371,57 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) state_set(&child_ep->com, CONNECTING); child_ep->com.dev = dev; child_ep->com.cm_id = NULL; + + /* + * The mapped_local and mapped_remote addresses get setup with + * the actual 4-tuple. The local address will be based on the + * actual local address of the connection, but on the port number + * of the parent listening endpoint. The remote address is + * setup based on a query to the IWPM since we don't know what it + * originally was before mapping. If no mapping was done, then + * mapped_remote == remote, and mapped_local == local. + */ if (iptype == 4) { struct sockaddr_in *sin = (struct sockaddr_in *) - &child_ep->com.local_addr; + &child_ep->com.mapped_local_addr; + sin->sin_family = PF_INET; sin->sin_port = local_port; sin->sin_addr.s_addr = *(__be32 *)local_ip; - sin = (struct sockaddr_in *)&child_ep->com.remote_addr; + + sin = (struct sockaddr_in *)&child_ep->com.local_addr; + sin->sin_family = PF_INET; + sin->sin_port = ((struct sockaddr_in *) + &parent_ep->com.local_addr)->sin_port; + sin->sin_addr.s_addr = *(__be32 *)local_ip; + + sin = (struct sockaddr_in *)&child_ep->com.mapped_remote_addr; sin->sin_family = PF_INET; sin->sin_port = peer_port; sin->sin_addr.s_addr = *(__be32 *)peer_ip; } else { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) - &child_ep->com.local_addr; + &child_ep->com.mapped_local_addr; + sin6->sin6_family = PF_INET6; sin6->sin6_port = local_port; memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); - sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr; + + sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr; + sin6->sin6_family = PF_INET6; + sin6->sin6_port = ((struct sockaddr_in6 *) + &parent_ep->com.local_addr)->sin6_port; + memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); + + sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_remote_addr; sin6->sin6_family = PF_INET6; sin6->sin6_port = peer_port; memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16); } + memcpy(&child_ep->com.remote_addr, &child_ep->com.mapped_remote_addr, + sizeof(child_ep->com.remote_addr)); + get_remote_addr(parent_ep, child_ep); + c4iw_get_ep(&parent_ep->com); child_ep->parent_ep = parent_ep; child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); @@ -2520,9 +2569,13 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) ep = lookup_tid(t, tid); if (is_neg_adv(req->status)) { - dev_warn(&dev->rdev.lldi.pdev->dev, - "Negative advice on abort - tid %u status %d (%s)\n", - ep->hwtid, req->status, neg_adv_str(req->status)); + PDBG("%s Negative advice on abort- tid %u status %d (%s)\n", + __func__, ep->hwtid, req->status, + neg_adv_str(req->status)); + ep->stats.abort_neg_adv++; + mutex_lock(&dev->rdev.stats.lock); + dev->rdev.stats.neg_adv++; + mutex_unlock(&dev->rdev.stats.lock); return 0; } PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, @@ -3571,7 +3624,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, * TP will ignore any value > 0 for MSS index. */ req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF)); - req->cookie = (unsigned long)skb; + req->cookie = (uintptr_t)skb; set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); @@ -3931,9 +3984,11 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) return 0; } if (is_neg_adv(req->status)) { - dev_warn(&dev->rdev.lldi.pdev->dev, - "Negative advice on abort - tid %u status %d (%s)\n", - ep->hwtid, req->status, neg_adv_str(req->status)); + PDBG("%s Negative advice on abort- tid %u status %d (%s)\n", + __func__, ep->hwtid, req->status, + neg_adv_str(req->status)); + ep->stats.abort_neg_adv++; + dev->rdev.stats.neg_adv++; kfree_skb(skb); return 0; } diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index ab7692a..68ddb37 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -55,7 +55,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, FW_RI_RES_WR_NRES_V(1) | FW_WR_COMPL_F); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); - res_wr->cookie = (unsigned long) &wr_wait; + res_wr->cookie = (uintptr_t)&wr_wait; res = res_wr->res; res->u.cq.restype = FW_RI_RES_TYPE_CQ; res->u.cq.op = FW_RI_RES_OP_RESET; @@ -125,7 +125,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, FW_RI_RES_WR_NRES_V(1) | FW_WR_COMPL_F); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); - res_wr->cookie = (unsigned long) &wr_wait; + res_wr->cookie = (uintptr_t)&wr_wait; res = res_wr->res; res->u.cq.restype = FW_RI_RES_TYPE_CQ; res->u.cq.op = FW_RI_RES_OP_WRITE; @@ -156,12 +156,19 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, goto err4; cq->gen = 1; - cq->gts = rdev->lldi.gts_reg; cq->rdev = rdev; if (user) { - cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) + - (cq->cqid << rdev->cqshift); - cq->ugts &= PAGE_MASK; + u32 off = (cq->cqid << rdev->cqshift) & PAGE_MASK; + + cq->ugts = (u64)rdev->bar2_pa + off; + } else if (is_t4(rdev->lldi.adapter_type)) { + cq->gts = rdev->lldi.gts_reg; + cq->qid_mask = -1U; + } else { + u32 off = ((cq->cqid << rdev->cqshift) & PAGE_MASK) + 12; + + cq->gts = rdev->bar2_kva + off; + cq->qid_mask = rdev->qpmask; } return 0; err4: @@ -970,8 +977,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, } PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n", __func__, chp->cq.cqid, chp, chp->cq.size, - chp->cq.memsize, - (unsigned long long) chp->cq.dma_addr); + chp->cq.memsize, (unsigned long long) chp->cq.dma_addr); return &chp->ibcq; err5: kfree(mm2); diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 8fb295e..7e895d7 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -93,6 +93,7 @@ static struct ibnl_client_cbs c4iw_nl_cb_table[] = { [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, + [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb}, [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb} }; @@ -151,7 +152,7 @@ static int wr_log_show(struct seq_file *seq, void *v) int prev_ts_set = 0; int idx, end; -#define ts2ns(ts) div64_ul((ts) * dev->rdev.lldi.cclk_ps, 1000) +#define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000) idx = atomic_read(&dev->rdev.wr_log_idx) & (dev->rdev.wr_log_size - 1); @@ -489,6 +490,7 @@ static int stats_show(struct seq_file *seq, void *v) dev->rdev.stats.act_ofld_conn_fails); seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n", dev->rdev.stats.pas_ofld_conn_fails); + seq_printf(seq, "NEG_ADV_RCVD: %10llu\n", dev->rdev.stats.neg_adv); seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird); return 0; } @@ -560,10 +562,13 @@ static int dump_ep(int id, void *p, void *data) cc = snprintf(epd->buf + epd->pos, space, "ep %p cm_id %p qp %p state %d flags 0x%lx " "history 0x%lx hwtid %d atid %d " + "conn_na %u abort_na %u " "%pI4:%d/%d <-> %pI4:%d/%d\n", ep, ep->com.cm_id, ep->com.qp, (int)ep->com.state, ep->com.flags, ep->com.history, ep->hwtid, ep->atid, + ep->stats.connect_neg_adv, + ep->stats.abort_neg_adv, &lsin->sin_addr, ntohs(lsin->sin_port), ntohs(mapped_lsin->sin_port), &rsin->sin_addr, ntohs(rsin->sin_port), @@ -581,10 +586,13 @@ static int dump_ep(int id, void *p, void *data) cc = snprintf(epd->buf + epd->pos, space, "ep %p cm_id %p qp %p state %d flags 0x%lx " "history 0x%lx hwtid %d atid %d " + "conn_na %u abort_na %u " "%pI6:%d/%d <-> %pI6:%d/%d\n", ep, ep->com.cm_id, ep->com.qp, (int)ep->com.state, ep->com.flags, ep->com.history, ep->hwtid, ep->atid, + ep->stats.connect_neg_adv, + ep->stats.abort_neg_adv, &lsin6->sin6_addr, ntohs(lsin6->sin6_port), ntohs(mapped_lsin6->sin6_port), &rsin6->sin6_addr, ntohs(rsin6->sin6_port), @@ -765,6 +773,29 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) c4iw_init_dev_ucontext(rdev, &rdev->uctx); /* + * This implementation assumes udb_density == ucq_density! Eventually + * we might need to support this but for now fail the open. Also the + * cqid and qpid range must match for now. + */ + if (rdev->lldi.udb_density != rdev->lldi.ucq_density) { + pr_err(MOD "%s: unsupported udb/ucq densities %u/%u\n", + pci_name(rdev->lldi.pdev), rdev->lldi.udb_density, + rdev->lldi.ucq_density); + err = -EINVAL; + goto err1; + } + if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start || + rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) { + pr_err(MOD "%s: unsupported qp and cq id ranges " + "qp start %u size %u cq start %u size %u\n", + pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start, + rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size, + rdev->lldi.vr->cq.size); + err = -EINVAL; + goto err1; + } + + /* * qpshift is the number of bits to shift the qpid left in order * to get the correct address of the doorbell for that qp. */ @@ -784,10 +815,10 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.start, rdev->lldi.vr->cq.size); - PDBG("udb len 0x%x udb base %llx db_reg %p gts_reg %p qpshift %lu " + PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu " "qpmask 0x%x cqshift %lu cqmask 0x%x\n", (unsigned)pci_resource_len(rdev->lldi.pdev, 2), - (u64)pci_resource_start(rdev->lldi.pdev, 2), + (void *)pci_resource_start(rdev->lldi.pdev, 2), rdev->lldi.db_reg, rdev->lldi.gts_reg, rdev->qpshift, rdev->qpmask, @@ -1355,7 +1386,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list) t4_sq_host_wq_pidx(&qp->wq), t4_sq_wq_size(&qp->wq)); if (ret) { - pr_err(KERN_ERR MOD "%s: Fatal error - " + pr_err(MOD "%s: Fatal error - " "DB overflow recovery failed - " "error syncing SQ qid %u\n", pci_name(ctx->lldi.pdev), qp->wq.sq.qid); @@ -1371,7 +1402,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list) t4_rq_wq_size(&qp->wq)); if (ret) { - pr_err(KERN_ERR MOD "%s: Fatal error - " + pr_err(MOD "%s: Fatal error - " "DB overflow recovery failed - " "error syncing RQ qid %u\n", pci_name(ctx->lldi.pdev), qp->wq.rq.qid); diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index d87e165..97bb555 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -137,6 +137,7 @@ struct c4iw_stats { u64 tcam_full; u64 act_ofld_conn_fails; u64 pas_ofld_conn_fails; + u64 neg_adv; }; struct c4iw_hw_queue { @@ -814,6 +815,11 @@ struct c4iw_listen_ep { int backlog; }; +struct c4iw_ep_stats { + unsigned connect_neg_adv; + unsigned abort_neg_adv; +}; + struct c4iw_ep { struct c4iw_ep_common com; struct c4iw_ep *parent_ep; @@ -846,6 +852,7 @@ struct c4iw_ep { unsigned int retry_count; int snd_win; int rcv_win; + struct c4iw_ep_stats stats; }; static inline void print_addr(struct c4iw_ep_common *epc, const char *func, diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 3ef0cf9..cff815b 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -144,7 +144,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, if (i == (num_wqe-1)) { req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | FW_WR_COMPL_F); - req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait; + req->wr.wr_lo = (__force __be64)&wr_wait; } else req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR)); req->wr.wr_mid = cpu_to_be32( @@ -676,12 +676,12 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc) mhp->attr.zbva = 0; mhp->attr.va_fbo = 0; mhp->attr.page_size = 0; - mhp->attr.len = ~0UL; + mhp->attr.len = ~0ULL; mhp->attr.pbl_size = 0; ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, FW_RI_STAG_NSMR, mhp->attr.perms, - mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0); + mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0); if (ret) goto err1; diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 15cae5a..389ced3 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -275,7 +275,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, FW_RI_RES_WR_NRES_V(2) | FW_WR_COMPL_F); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); - res_wr->cookie = (unsigned long) &wr_wait; + res_wr->cookie = (uintptr_t)&wr_wait; res = res_wr->res; res->u.sqrq.restype = FW_RI_RES_TYPE_SQ; res->u.sqrq.op = FW_RI_RES_OP_WRITE; @@ -1209,7 +1209,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, wqe->flowid_len16 = cpu_to_be32( FW_WR_FLOWID_V(ep->hwtid) | FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); - wqe->cookie = (unsigned long) &ep->com.wr_wait; + wqe->cookie = (uintptr_t)&ep->com.wr_wait; wqe->u.fini.type = FW_RI_TYPE_FINI; ret = c4iw_ofld_send(&rhp->rdev, skb); @@ -1279,7 +1279,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) FW_WR_FLOWID_V(qhp->ep->hwtid) | FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); - wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait; + wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait; wqe->u.init.type = FW_RI_TYPE_INIT; wqe->u.init.mpareqbit_p2ptype = @@ -1766,11 +1766,11 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); insert_mmap(ucontext, mm2); mm3->key = uresp.sq_db_gts_key; - mm3->addr = (__force unsigned long) qhp->wq.sq.udb; + mm3->addr = (__force unsigned long)qhp->wq.sq.udb; mm3->len = PAGE_SIZE; insert_mmap(ucontext, mm3); mm4->key = uresp.rq_db_gts_key; - mm4->addr = (__force unsigned long) qhp->wq.rq.udb; + mm4->addr = (__force unsigned long)qhp->wq.rq.udb; mm4->len = PAGE_SIZE; insert_mmap(ucontext, mm4); if (mm5) { diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index 871cdca..7f2a6c2 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h @@ -539,6 +539,7 @@ struct t4_cq { size_t memsize; __be64 bits_type_ts; u32 cqid; + u32 qid_mask; int vector; u16 size; /* including status page */ u16 cidx; @@ -563,12 +564,12 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se) set_bit(CQ_ARMED, &cq->flags); while (cq->cidx_inc > CIDXINC_M) { val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) | - INGRESSQID_V(cq->cqid); + INGRESSQID_V(cq->cqid & cq->qid_mask); writel(val, cq->gts); cq->cidx_inc -= CIDXINC_M; } val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) | - INGRESSQID_V(cq->cqid); + INGRESSQID_V(cq->cqid & cq->qid_mask); writel(val, cq->gts); cq->cidx_inc = 0; return 0; @@ -601,7 +602,7 @@ static inline void t4_hwcq_consume(struct t4_cq *cq) u32 val; val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) | - INGRESSQID_V(cq->cqid); + INGRESSQID_V(cq->cqid & cq->qid_mask); writel(val, cq->gts); cq->cidx_inc = 0; } diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h index 5e53327..343e8daf 100644 --- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h +++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h @@ -848,6 +848,8 @@ enum { /* TCP congestion control algorithms */ #define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S) #define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M) -#define CONG_CNTRL_VALID (1 << 18) +#define T5_ISS_S 18 +#define T5_ISS_V(x) ((x) << T5_ISS_S) +#define T5_ISS_F T5_ISS_V(1U) #endif /* _T4FW_RI_API_H_ */ diff --git a/drivers/infiniband/hw/ehca/ehca_mcast.c b/drivers/infiniband/hw/ehca/ehca_mcast.c index 120aedf..cec1815 100644 --- a/drivers/infiniband/hw/ehca/ehca_mcast.c +++ b/drivers/infiniband/hw/ehca/ehca_mcast.c @@ -77,7 +77,7 @@ int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) return -EINVAL; } - memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid)); + memcpy(&my_gid, gid->raw, sizeof(union ib_gid)); subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix); interface_id = be64_to_cpu(my_gid.global.interface_id); @@ -114,7 +114,7 @@ int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) return -EINVAL; } - memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid)); + memcpy(&my_gid, gid->raw, sizeof(union ib_gid)); subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix); interface_id = be64_to_cpu(my_gid.global.interface_id); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 57070c5..cc64400 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1569,8 +1569,7 @@ static void reset_gids_task(struct work_struct *work) MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); if (err) - pr_warn(KERN_WARNING - "set port %d command failed\n", gw->port); + pr_warn("set port %d command failed\n", gw->port); } mlx4_free_cmd_mailbox(dev, mailbox); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 4d7024b..d35f62d 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1392,7 +1392,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah, if (ah->ah_flags & IB_AH_GRH) { if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) { - pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n", + pr_err("sgid_index (%u) too large. max is %d\n", ah->grh.sgid_index, gen->port[port - 1].gid_table_len); return -EINVAL; } diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 3b2a6dc..9f9d5c5 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c @@ -116,6 +116,7 @@ static struct ibnl_client_cbs nes_nl_cb_table[] = { [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, + [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb}, [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb} diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 6f09a72..72b4341 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -596,27 +596,52 @@ static void nes_form_reg_msg(struct nes_vnic *nesvnic, memcpy(pm_msg->if_name, nesvnic->netdev->name, IWPM_IFNAME_SIZE); } +static void record_sockaddr_info(struct sockaddr_storage *addr_info, + nes_addr_t *ip_addr, u16 *port_num) +{ + struct sockaddr_in *in_addr = (struct sockaddr_in *)addr_info; + + if (in_addr->sin_family == AF_INET) { + *ip_addr = ntohl(in_addr->sin_addr.s_addr); + *port_num = ntohs(in_addr->sin_port); + } +} + /* * nes_record_pm_msg - Save the received mapping info */ static void nes_record_pm_msg(struct nes_cm_info *cm_info, struct iwpm_sa_data *pm_msg) { - struct sockaddr_in *mapped_loc_addr = - (struct sockaddr_in *)&pm_msg->mapped_loc_addr; - struct sockaddr_in *mapped_rem_addr = - (struct sockaddr_in *)&pm_msg->mapped_rem_addr; - - if (mapped_loc_addr->sin_family == AF_INET) { - cm_info->mapped_loc_addr = - ntohl(mapped_loc_addr->sin_addr.s_addr); - cm_info->mapped_loc_port = ntohs(mapped_loc_addr->sin_port); - } - if (mapped_rem_addr->sin_family == AF_INET) { - cm_info->mapped_rem_addr = - ntohl(mapped_rem_addr->sin_addr.s_addr); - cm_info->mapped_rem_port = ntohs(mapped_rem_addr->sin_port); - } + record_sockaddr_info(&pm_msg->mapped_loc_addr, + &cm_info->mapped_loc_addr, &cm_info->mapped_loc_port); + + record_sockaddr_info(&pm_msg->mapped_rem_addr, + &cm_info->mapped_rem_addr, &cm_info->mapped_rem_port); +} + +/* + * nes_get_reminfo - Get the address info of the remote connecting peer + */ +static int nes_get_remote_addr(struct nes_cm_node *cm_node) +{ + struct sockaddr_storage mapped_loc_addr, mapped_rem_addr; + struct sockaddr_storage remote_addr; + int ret; + + nes_create_sockaddr(htonl(cm_node->mapped_loc_addr), + htons(cm_node->mapped_loc_port), &mapped_loc_addr); + nes_create_sockaddr(htonl(cm_node->mapped_rem_addr), + htons(cm_node->mapped_rem_port), &mapped_rem_addr); + + ret = iwpm_get_remote_info(&mapped_loc_addr, &mapped_rem_addr, + &remote_addr, RDMA_NL_NES); + if (ret) + nes_debug(NES_DBG_CM, "Unable to find remote peer address info\n"); + else + record_sockaddr_info(&remote_addr, &cm_node->rem_addr, + &cm_node->rem_port); + return ret; } /** @@ -1566,9 +1591,14 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, return NULL; /* set our node specific transport info */ - cm_node->loc_addr = cm_info->loc_addr; + if (listener) { + cm_node->loc_addr = listener->loc_addr; + cm_node->loc_port = listener->loc_port; + } else { + cm_node->loc_addr = cm_info->loc_addr; + cm_node->loc_port = cm_info->loc_port; + } cm_node->rem_addr = cm_info->rem_addr; - cm_node->loc_port = cm_info->loc_port; cm_node->rem_port = cm_info->rem_port; cm_node->mapped_loc_addr = cm_info->mapped_loc_addr; @@ -2151,6 +2181,7 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, cm_node->state = NES_CM_STATE_ESTABLISHED; if (datasize) { cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; + nes_get_remote_addr(cm_node); handle_rcv_mpa(cm_node, skb); } else { /* rcvd ACK only */ dev_kfree_skb_any(skb); diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index ffd48bf..7df16f7 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h @@ -903,7 +903,7 @@ struct qib_devdata { /* PCI Device ID (here for NodeInfo) */ u16 deviceid; /* for write combining settings */ - unsigned long wc_cookie; + int wc_cookie; unsigned long wc_base; unsigned long wc_len; @@ -1136,7 +1136,6 @@ extern struct qib_devdata *qib_lookup(int unit); extern u32 qib_cpulist_count; extern unsigned long *qib_cpulist; -extern unsigned qib_wc_pat; extern unsigned qib_cc_table_size; int qib_init(struct qib_devdata *, int); int init_chip_wc_pat(struct qib_devdata *dd, u32); diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 9ea6c44..7258818 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -835,7 +835,8 @@ static int mmap_piobufs(struct vm_area_struct *vma, vma->vm_flags &= ~VM_MAYREAD; vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; - if (qib_wc_pat) + /* We used PAT if wc_cookie == 0 */ + if (!dd->wc_cookie) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index 0d2ba59..4b927809 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -3315,11 +3315,9 @@ static int init_6120_variables(struct qib_devdata *dd) qib_6120_config_ctxts(dd); qib_set_ctxtcnt(dd); - if (qib_wc_pat) { - ret = init_chip_wc_pat(dd, 0); - if (ret) - goto bail; - } + ret = init_chip_wc_pat(dd, 0); + if (ret) + goto bail; set_6120_baseaddrs(dd); /* set chip access pointers now */ ret = 0; diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index 22affda..00b2af2 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c @@ -4126,11 +4126,9 @@ static int qib_init_7220_variables(struct qib_devdata *dd) qib_7220_config_ctxts(dd); qib_set_ctxtcnt(dd); /* needed for PAT setup */ - if (qib_wc_pat) { - ret = init_chip_wc_pat(dd, 0); - if (ret) - goto bail; - } + ret = init_chip_wc_pat(dd, 0); + if (ret) + goto bail; set_7220_baseaddrs(dd); /* set chip access pointers now */ ret = 0; diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index ef97b71..f32b462 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -6429,6 +6429,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd) unsigned features, pidx, sbufcnt; int ret, mtu; u32 sbufs, updthresh; + resource_size_t vl15off; /* pport structs are contiguous, allocated after devdata */ ppd = (struct qib_pportdata *)(dd + 1); @@ -6677,29 +6678,27 @@ static int qib_init_7322_variables(struct qib_devdata *dd) qib_7322_config_ctxts(dd); qib_set_ctxtcnt(dd); - if (qib_wc_pat) { - resource_size_t vl15off; - /* - * We do not set WC on the VL15 buffers to avoid - * a rare problem with unaligned writes from - * interrupt-flushed store buffers, so we need - * to map those separately here. We can't solve - * this for the rarely used mtrr case. - */ - ret = init_chip_wc_pat(dd, 0); - if (ret) - goto bail; + /* + * We do not set WC on the VL15 buffers to avoid + * a rare problem with unaligned writes from + * interrupt-flushed store buffers, so we need + * to map those separately here. We can't solve + * this for the rarely used mtrr case. + */ + ret = init_chip_wc_pat(dd, 0); + if (ret) + goto bail; - /* vl15 buffers start just after the 4k buffers */ - vl15off = dd->physaddr + (dd->piobufbase >> 32) + - dd->piobcnt4k * dd->align4k; - dd->piovl15base = ioremap_nocache(vl15off, - NUM_VL15_BUFS * dd->align4k); - if (!dd->piovl15base) { - ret = -ENOMEM; - goto bail; - } + /* vl15 buffers start just after the 4k buffers */ + vl15off = dd->physaddr + (dd->piobufbase >> 32) + + dd->piobcnt4k * dd->align4k; + dd->piovl15base = ioremap_nocache(vl15off, + NUM_VL15_BUFS * dd->align4k); + if (!dd->piovl15base) { + ret = -ENOMEM; + goto bail; } + qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ ret = 0; diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 2ee3695..7e00470 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -91,15 +91,6 @@ MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port"); unsigned qib_cc_table_size; module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO); MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984"); -/* - * qib_wc_pat parameter: - * 0 is WC via MTRR - * 1 is WC via PAT - * If PAT initialization fails, code reverts back to MTRR - */ -unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */ -module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); -MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); static void verify_interrupt(unsigned long); @@ -1377,8 +1368,7 @@ static void cleanup_device_data(struct qib_devdata *dd) spin_unlock(&dd->pport[pidx].cc_shadow_lock); } - if (!qib_wc_pat) - qib_disable_wc(dd); + qib_disable_wc(dd); if (dd->pioavailregs_dma) { dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, @@ -1547,14 +1537,12 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto bail; } - if (!qib_wc_pat) { - ret = qib_enable_wc(dd); - if (ret) { - qib_dev_err(dd, - "Write combining not enabled (err %d): performance may be poor\n", - -ret); - ret = 0; - } + ret = qib_enable_wc(dd); + if (ret) { + qib_dev_err(dd, + "Write combining not enabled (err %d): performance may be poor\n", + -ret); + ret = 0; } qib_verify_pioperf(dd); diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c index 81b225f..edd0ddb 100644 --- a/drivers/infiniband/hw/qib/qib_wc_x86_64.c +++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c @@ -116,21 +116,10 @@ int qib_enable_wc(struct qib_devdata *dd) } if (!ret) { - int cookie; - - cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0); - if (cookie < 0) { - { - qib_devinfo(dd->pcidev, - "mtrr_add() WC for PIO bufs failed (%d)\n", - cookie); - ret = -EINVAL; - } - } else { - dd->wc_cookie = cookie; - dd->wc_base = (unsigned long) pioaddr; - dd->wc_len = (unsigned long) piolen; - } + dd->wc_cookie = arch_phys_wc_add(pioaddr, piolen); + if (dd->wc_cookie < 0) + /* use error from routine */ + ret = dd->wc_cookie; } return ret; @@ -142,18 +131,7 @@ int qib_enable_wc(struct qib_devdata *dd) */ void qib_disable_wc(struct qib_devdata *dd) { - if (dd->wc_cookie) { - int r; - - r = mtrr_del(dd->wc_cookie, dd->wc_base, - dd->wc_len); - if (r < 0) - qib_devinfo(dd->pcidev, - "mtrr_del(%lx, %lx, %lx) failed: %d\n", - dd->wc_cookie, dd->wc_base, - dd->wc_len, r); - dd->wc_cookie = 0; /* even on failure */ - } + arch_phys_wc_del(dd->wc_cookie); } /** diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 56959ad..cf32a778 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -386,8 +386,8 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i rx->rx_ring[i].mapping, GFP_KERNEL)) { ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); - ret = -ENOMEM; - goto err_count; + ret = -ENOMEM; + goto err_count; } ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i); if (ret) { diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index a1cbba9..3465faf 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -266,6 +266,7 @@ static void put_pasid_state(struct pasid_state *pasid_state) static void put_pasid_state_wait(struct pasid_state *pasid_state) { + atomic_dec(&pasid_state->count); wait_event(pasid_state->wq, !atomic_read(&pasid_state->count)); free_pasid_state(pasid_state); } diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 9f7e1d3..66a803b 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -224,14 +224,7 @@ #define RESUME_TERMINATE (1 << 0) #define TTBCR2_SEP_SHIFT 15 -#define TTBCR2_SEP_MASK 0x7 - -#define TTBCR2_ADDR_32 0 -#define TTBCR2_ADDR_36 1 -#define TTBCR2_ADDR_40 2 -#define TTBCR2_ADDR_42 3 -#define TTBCR2_ADDR_44 4 -#define TTBCR2_ADDR_48 5 +#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT) #define TTBRn_HI_ASID_SHIFT 16 @@ -793,26 +786,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); if (smmu->version > ARM_SMMU_V1) { reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; - switch (smmu->va_size) { - case 32: - reg |= (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT); - break; - case 36: - reg |= (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT); - break; - case 40: - reg |= (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT); - break; - case 42: - reg |= (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT); - break; - case 44: - reg |= (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT); - break; - case 48: - reg |= (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT); - break; - } + reg |= TTBCR2_SEP_UPSTREAM; writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); } } else { diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 4015560..cab2145 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -1004,20 +1004,18 @@ static int rk_iommu_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_OF static const struct of_device_id rk_iommu_dt_ids[] = { { .compatible = "rockchip,iommu" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids); -#endif static struct platform_driver rk_iommu_driver = { .probe = rk_iommu_probe, .remove = rk_iommu_remove, .driver = { .name = "rk_iommu", - .of_match_table = of_match_ptr(rk_iommu_dt_ids), + .of_match_table = rk_iommu_dt_ids, }, }; diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 7b315e3..01999d7 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -82,19 +82,6 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock); #define NR_GIC_CPU_IF 8 static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly; -/* - * Supported arch specific GIC irq extension. - * Default make them NULL. - */ -struct irq_chip gic_arch_extn = { - .irq_eoi = NULL, - .irq_mask = NULL, - .irq_unmask = NULL, - .irq_retrigger = NULL, - .irq_set_type = NULL, - .irq_set_wake = NULL, -}; - #ifndef MAX_GIC_NR #define MAX_GIC_NR 1 #endif @@ -167,34 +154,16 @@ static int gic_peek_irq(struct irq_data *d, u32 offset) static void gic_mask_irq(struct irq_data *d) { - unsigned long flags; - - raw_spin_lock_irqsave(&irq_controller_lock, flags); gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR); - if (gic_arch_extn.irq_mask) - gic_arch_extn.irq_mask(d); - raw_spin_unlock_irqrestore(&irq_controller_lock, flags); } static void gic_unmask_irq(struct irq_data *d) { - unsigned long flags; - - raw_spin_lock_irqsave(&irq_controller_lock, flags); - if (gic_arch_extn.irq_unmask) - gic_arch_extn.irq_unmask(d); gic_poke_irq(d, GIC_DIST_ENABLE_SET); - raw_spin_unlock_irqrestore(&irq_controller_lock, flags); } static void gic_eoi_irq(struct irq_data *d) { - if (gic_arch_extn.irq_eoi) { - raw_spin_lock(&irq_controller_lock); - gic_arch_extn.irq_eoi(d); - raw_spin_unlock(&irq_controller_lock); - } - writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); } @@ -251,8 +220,6 @@ static int gic_set_type(struct irq_data *d, unsigned int type) { void __iomem *base = gic_dist_base(d); unsigned int gicirq = gic_irq(d); - unsigned long flags; - int ret; /* Interrupt configuration for SGIs can't be changed */ if (gicirq < 16) @@ -263,25 +230,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) type != IRQ_TYPE_EDGE_RISING) return -EINVAL; - raw_spin_lock_irqsave(&irq_controller_lock, flags); - - if (gic_arch_extn.irq_set_type) - gic_arch_extn.irq_set_type(d, type); - - ret = gic_configure_irq(gicirq, type, base, NULL); - - raw_spin_unlock_irqrestore(&irq_controller_lock, flags); - - return ret; -} - -static int gic_retrigger(struct irq_data *d) -{ - if (gic_arch_extn.irq_retrigger) - return gic_arch_extn.irq_retrigger(d); - - /* the genirq layer expects 0 if we can't retrigger in hardware */ - return 0; + return gic_configure_irq(gicirq, type, base, NULL); } #ifdef CONFIG_SMP @@ -312,21 +261,6 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, } #endif -#ifdef CONFIG_PM -static int gic_set_wake(struct irq_data *d, unsigned int on) -{ - int ret = -ENXIO; - - if (gic_arch_extn.irq_set_wake) - ret = gic_arch_extn.irq_set_wake(d, on); - - return ret; -} - -#else -#define gic_set_wake NULL -#endif - static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) { u32 irqstat, irqnr; @@ -385,11 +319,9 @@ static struct irq_chip gic_chip = { .irq_unmask = gic_unmask_irq, .irq_eoi = gic_eoi_irq, .irq_set_type = gic_set_type, - .irq_retrigger = gic_retrigger, #ifdef CONFIG_SMP .irq_set_affinity = gic_set_affinity, #endif - .irq_set_wake = gic_set_wake, .irq_get_irqchip_state = gic_irq_get_irqchip_state, .irq_set_irqchip_state = gic_irq_set_irqchip_state, }; @@ -1055,7 +987,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, set_handle_irq(gic_handle_irq); } - gic_chip.flags |= gic_arch_extn.flags; gic_dist_init(gic); gic_cpu_init(gic); gic_pm_init(gic); diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c index 51c485d..f67bbd8 100644 --- a/drivers/irqchip/irq-tegra.c +++ b/drivers/irqchip/irq-tegra.c @@ -264,7 +264,7 @@ static int tegra_ictlr_domain_alloc(struct irq_domain *domain, irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, &tegra_ictlr_chip, - &info->base[ictlr]); + info->base[ictlr]); } parent_args = *args; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 9eeea19..5503e43 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -925,10 +925,11 @@ static int crypt_convert(struct crypt_config *cc, switch (r) { /* async */ - case -EINPROGRESS: case -EBUSY: wait_for_completion(&ctx->restart); reinit_completion(&ctx->restart); + /* fall through*/ + case -EINPROGRESS: ctx->req = NULL; ctx->cc_sector++; continue; @@ -1345,8 +1346,10 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); struct crypt_config *cc = io->cc; - if (error == -EINPROGRESS) + if (error == -EINPROGRESS) { + complete(&ctx->restart); return; + } if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); @@ -1357,15 +1360,12 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); if (!atomic_dec_and_test(&ctx->cc_pending)) - goto done; + return; if (bio_data_dir(io->base_bio) == READ) kcryptd_crypt_read_done(io); else kcryptd_crypt_write_io_submit(io, 1); -done: - if (!completion_done(&ctx->restart)) - complete(&ctx->restart); } static void kcryptd_crypt(struct work_struct *work) diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index c8a18e4..720ceeb 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1298,21 +1298,22 @@ static int table_load(struct dm_ioctl *param, size_t param_size) goto err_unlock_md_type; } - if (dm_get_md_type(md) == DM_TYPE_NONE) + if (dm_get_md_type(md) == DM_TYPE_NONE) { /* Initial table load: acquire type of table. */ dm_set_md_type(md, dm_table_get_type(t)); - else if (dm_get_md_type(md) != dm_table_get_type(t)) { + + /* setup md->queue to reflect md's type (may block) */ + r = dm_setup_md_queue(md); + if (r) { + DMWARN("unable to set up device queue for new table."); + goto err_unlock_md_type; + } + } else if (dm_get_md_type(md) != dm_table_get_type(t)) { DMWARN("can't change device type after initial table load."); r = -EINVAL; goto err_unlock_md_type; } - /* setup md->queue to reflect md's type (may block) */ - r = dm_setup_md_queue(md); - if (r) { - DMWARN("unable to set up device queue for new table."); - goto err_unlock_md_type; - } dm_unlock_md_type(md); /* stage inactive table */ diff --git a/drivers/md/dm.c b/drivers/md/dm.c index f8c7ca3..a930b72 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1082,18 +1082,26 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) dm_put(md); } -static void free_rq_clone(struct request *clone) +static void free_rq_clone(struct request *clone, bool must_be_mapped) { struct dm_rq_target_io *tio = clone->end_io_data; struct mapped_device *md = tio->md; + WARN_ON_ONCE(must_be_mapped && !clone->q); + blk_rq_unprep_clone(clone); - if (clone->q->mq_ops) + if (md->type == DM_TYPE_MQ_REQUEST_BASED) + /* stacked on blk-mq queue(s) */ tio->ti->type->release_clone_rq(clone); else if (!md->queue->mq_ops) /* request_fn queue stacked on request_fn queue(s) */ free_clone_request(md, clone); + /* + * NOTE: for the blk-mq queue stacked on request_fn queue(s) case: + * no need to call free_clone_request() because we leverage blk-mq by + * allocating the clone at the end of the blk-mq pdu (see: clone_rq) + */ if (!md->queue->mq_ops) free_rq_tio(tio); @@ -1124,7 +1132,7 @@ static void dm_end_request(struct request *clone, int error) rq->sense_len = clone->sense_len; } - free_rq_clone(clone); + free_rq_clone(clone, true); if (!rq->q->mq_ops) blk_end_request_all(rq, error); else @@ -1143,7 +1151,7 @@ static void dm_unprep_request(struct request *rq) } if (clone) - free_rq_clone(clone); + free_rq_clone(clone, false); } /* @@ -2662,9 +2670,6 @@ static int dm_init_request_based_queue(struct mapped_device *md) { struct request_queue *q = NULL; - if (md->queue->elevator) - return 0; - /* Fully initialize the queue */ q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); if (!q) diff --git a/drivers/md/md.c b/drivers/md/md.c index d4f31e1..593a024 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -4818,12 +4818,12 @@ static void md_free(struct kobject *ko) if (mddev->sysfs_state) sysfs_put(mddev->sysfs_state); + if (mddev->queue) + blk_cleanup_queue(mddev->queue); if (mddev->gendisk) { del_gendisk(mddev->gendisk); put_disk(mddev->gendisk); } - if (mddev->queue) - blk_cleanup_queue(mddev->queue); kfree(mddev); } diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 2cb59a6..6a68ef5 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -188,8 +188,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) } dev[j] = rdev1; - disk_stack_limits(mddev->gendisk, rdev1->bdev, - rdev1->data_offset << 9); + if (mddev->queue) + disk_stack_limits(mddev->gendisk, rdev1->bdev, + rdev1->data_offset << 9); if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) conf->has_merge_bvec = 1; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 77dfd72..1ba97fd 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1078,9 +1078,6 @@ again: pr_debug("skip op %ld on disc %d for sector %llu\n", bi->bi_rw, i, (unsigned long long)sh->sector); clear_bit(R5_LOCKED, &sh->dev[i].flags); - if (sh->batch_head) - set_bit(STRIPE_BATCH_ERR, - &sh->batch_head->state); set_bit(STRIPE_HANDLE, &sh->state); } @@ -1971,17 +1968,30 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) put_cpu(); } +static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp) +{ + struct stripe_head *sh; + + sh = kmem_cache_zalloc(sc, gfp); + if (sh) { + spin_lock_init(&sh->stripe_lock); + spin_lock_init(&sh->batch_lock); + INIT_LIST_HEAD(&sh->batch_list); + INIT_LIST_HEAD(&sh->lru); + atomic_set(&sh->count, 1); + } + return sh; +} static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) { struct stripe_head *sh; - sh = kmem_cache_zalloc(conf->slab_cache, gfp); + + sh = alloc_stripe(conf->slab_cache, gfp); if (!sh) return 0; sh->raid_conf = conf; - spin_lock_init(&sh->stripe_lock); - if (grow_buffers(sh, gfp)) { shrink_buffers(sh); kmem_cache_free(conf->slab_cache, sh); @@ -1990,13 +2000,8 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) sh->hash_lock_index = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; /* we just created an active stripe so... */ - atomic_set(&sh->count, 1); atomic_inc(&conf->active_stripes); - INIT_LIST_HEAD(&sh->lru); - spin_lock_init(&sh->batch_lock); - INIT_LIST_HEAD(&sh->batch_list); - sh->batch_head = NULL; release_stripe(sh); conf->max_nr_stripes++; return 1; @@ -2060,6 +2065,35 @@ static struct flex_array *scribble_alloc(int num, int cnt, gfp_t flags) return ret; } +static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors) +{ + unsigned long cpu; + int err = 0; + + mddev_suspend(conf->mddev); + get_online_cpus(); + for_each_present_cpu(cpu) { + struct raid5_percpu *percpu; + struct flex_array *scribble; + + percpu = per_cpu_ptr(conf->percpu, cpu); + scribble = scribble_alloc(new_disks, + new_sectors / STRIPE_SECTORS, + GFP_NOIO); + + if (scribble) { + flex_array_free(percpu->scribble); + percpu->scribble = scribble; + } else { + err = -ENOMEM; + break; + } + } + put_online_cpus(); + mddev_resume(conf->mddev); + return err; +} + static int resize_stripes(struct r5conf *conf, int newsize) { /* Make all the stripes able to hold 'newsize' devices. @@ -2088,7 +2122,6 @@ static int resize_stripes(struct r5conf *conf, int newsize) struct stripe_head *osh, *nsh; LIST_HEAD(newstripes); struct disk_info *ndisks; - unsigned long cpu; int err; struct kmem_cache *sc; int i; @@ -2109,13 +2142,11 @@ static int resize_stripes(struct r5conf *conf, int newsize) return -ENOMEM; for (i = conf->max_nr_stripes; i; i--) { - nsh = kmem_cache_zalloc(sc, GFP_KERNEL); + nsh = alloc_stripe(sc, GFP_KERNEL); if (!nsh) break; nsh->raid_conf = conf; - spin_lock_init(&nsh->stripe_lock); - list_add(&nsh->lru, &newstripes); } if (i) { @@ -2142,13 +2173,11 @@ static int resize_stripes(struct r5conf *conf, int newsize) lock_device_hash_lock(conf, hash)); osh = get_free_stripe(conf, hash); unlock_device_hash_lock(conf, hash); - atomic_set(&nsh->count, 1); + for(i=0; i<conf->pool_size; i++) { nsh->dev[i].page = osh->dev[i].page; nsh->dev[i].orig_page = osh->dev[i].page; } - for( ; i<newsize; i++) - nsh->dev[i].page = NULL; nsh->hash_lock_index = hash; kmem_cache_free(conf->slab_cache, osh); cnt++; @@ -2174,25 +2203,6 @@ static int resize_stripes(struct r5conf *conf, int newsize) } else err = -ENOMEM; - get_online_cpus(); - for_each_present_cpu(cpu) { - struct raid5_percpu *percpu; - struct flex_array *scribble; - - percpu = per_cpu_ptr(conf->percpu, cpu); - scribble = scribble_alloc(newsize, conf->chunk_sectors / - STRIPE_SECTORS, GFP_NOIO); - - if (scribble) { - flex_array_free(percpu->scribble); - percpu->scribble = scribble; - } else { - err = -ENOMEM; - break; - } - } - put_online_cpus(); - /* Step 4, return new stripes to service */ while(!list_empty(&newstripes)) { nsh = list_entry(newstripes.next, struct stripe_head, lru); @@ -2212,7 +2222,8 @@ static int resize_stripes(struct r5conf *conf, int newsize) conf->slab_cache = sc; conf->active_name = 1-conf->active_name; - conf->pool_size = newsize; + if (!err) + conf->pool_size = newsize; return err; } @@ -2434,7 +2445,7 @@ static void raid5_end_write_request(struct bio *bi, int error) } rdev_dec_pending(rdev, conf->mddev); - if (sh->batch_head && !uptodate) + if (sh->batch_head && !uptodate && !replacement) set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) @@ -3278,7 +3289,9 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, /* reconstruct-write isn't being forced */ return 0; for (i = 0; i < s->failed; i++) { - if (!test_bit(R5_UPTODATE, &fdev[i]->flags) && + if (s->failed_num[i] != sh->pd_idx && + s->failed_num[i] != sh->qd_idx && + !test_bit(R5_UPTODATE, &fdev[i]->flags) && !test_bit(R5_OVERWRITE, &fdev[i]->flags)) return 1; } @@ -3298,6 +3311,7 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, */ BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); BUG_ON(test_bit(R5_Wantread, &dev->flags)); + BUG_ON(sh->batch_head); if ((s->uptodate == disks - 1) && (s->failed && (disk_idx == s->failed_num[0] || disk_idx == s->failed_num[1]))) { @@ -3366,7 +3380,6 @@ static void handle_stripe_fill(struct stripe_head *sh, { int i; - BUG_ON(sh->batch_head); /* look for blocks to read/compute, skip this if a compute * is already in flight, or if the stripe contents are in the * midst of changing due to a write @@ -4198,15 +4211,9 @@ static void check_break_stripe_batch_list(struct stripe_head *sh) return; head_sh = sh; - do { - sh = list_first_entry(&sh->batch_list, - struct stripe_head, batch_list); - BUG_ON(sh == head_sh); - } while (!test_bit(STRIPE_DEGRADED, &sh->state)); - while (sh != head_sh) { - next = list_first_entry(&sh->batch_list, - struct stripe_head, batch_list); + list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { + list_del_init(&sh->batch_list); set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG, @@ -4226,8 +4233,6 @@ static void check_break_stripe_batch_list(struct stripe_head *sh) set_bit(STRIPE_HANDLE, &sh->state); release_stripe(sh); - - sh = next; } } @@ -6221,8 +6226,11 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu percpu->spare_page = alloc_page(GFP_KERNEL); if (!percpu->scribble) percpu->scribble = scribble_alloc(max(conf->raid_disks, - conf->previous_raid_disks), conf->chunk_sectors / - STRIPE_SECTORS, GFP_KERNEL); + conf->previous_raid_disks), + max(conf->chunk_sectors, + conf->prev_chunk_sectors) + / STRIPE_SECTORS, + GFP_KERNEL); if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) { free_scratch_buffer(conf, percpu); @@ -7198,6 +7206,15 @@ static int check_reshape(struct mddev *mddev) if (!check_stripe_cache(mddev)) return -ENOSPC; + if (mddev->new_chunk_sectors > mddev->chunk_sectors || + mddev->delta_disks > 0) + if (resize_chunks(conf, + conf->previous_raid_disks + + max(0, mddev->delta_disks), + max(mddev->new_chunk_sectors, + mddev->chunk_sectors) + ) < 0) + return -ENOMEM; return resize_stripes(conf, (conf->previous_raid_disks + mddev->delta_disks)); } diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c index 9c64b5d..110fd70 100644 --- a/drivers/media/platform/marvell-ccic/mcam-core.c +++ b/drivers/media/platform/marvell-ccic/mcam-core.c @@ -116,8 +116,8 @@ static struct mcam_format_struct { .planar = false, }, { - .desc = "UYVY 4:2:2", - .pixelformat = V4L2_PIX_FMT_UYVY, + .desc = "YVYU 4:2:2", + .pixelformat = V4L2_PIX_FMT_YVYU, .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8, .bpp = 2, .planar = false, @@ -748,7 +748,7 @@ static void mcam_ctlr_image(struct mcam_camera *cam) switch (fmt->pixelformat) { case V4L2_PIX_FMT_YUYV: - case V4L2_PIX_FMT_UYVY: + case V4L2_PIX_FMT_YVYU: widthy = fmt->width * 2; widthuv = 0; break; @@ -784,15 +784,15 @@ static void mcam_ctlr_image(struct mcam_camera *cam) case V4L2_PIX_FMT_YUV420: case V4L2_PIX_FMT_YVU420: mcam_reg_write_mask(cam, REG_CTRL0, - C0_DF_YUV | C0_YUV_420PL | C0_YUVE_YVYU, C0_DF_MASK); + C0_DF_YUV | C0_YUV_420PL | C0_YUVE_VYUY, C0_DF_MASK); break; case V4L2_PIX_FMT_YUYV: mcam_reg_write_mask(cam, REG_CTRL0, - C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_UYVY, C0_DF_MASK); + C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_NOSWAP, C0_DF_MASK); break; - case V4L2_PIX_FMT_UYVY: + case V4L2_PIX_FMT_YVYU: mcam_reg_write_mask(cam, REG_CTRL0, - C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_YUYV, C0_DF_MASK); + C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_SWAP24, C0_DF_MASK); break; case V4L2_PIX_FMT_JPEG: mcam_reg_write_mask(cam, REG_CTRL0, diff --git a/drivers/media/platform/marvell-ccic/mcam-core.h b/drivers/media/platform/marvell-ccic/mcam-core.h index aa0c6ea..7ffdf4d 100644 --- a/drivers/media/platform/marvell-ccic/mcam-core.h +++ b/drivers/media/platform/marvell-ccic/mcam-core.h @@ -330,10 +330,10 @@ int mccic_resume(struct mcam_camera *cam); #define C0_YUVE_YVYU 0x00010000 /* Y1CrY0Cb */ #define C0_YUVE_VYUY 0x00020000 /* CrY1CbY0 */ #define C0_YUVE_UYVY 0x00030000 /* CbY1CrY0 */ -#define C0_YUVE_XYUV 0x00000000 /* 420: .YUV */ -#define C0_YUVE_XYVU 0x00010000 /* 420: .YVU */ -#define C0_YUVE_XUVY 0x00020000 /* 420: .UVY */ -#define C0_YUVE_XVUY 0x00030000 /* 420: .VUY */ +#define C0_YUVE_NOSWAP 0x00000000 /* no bytes swapping */ +#define C0_YUVE_SWAP13 0x00010000 /* swap byte 1 and 3 */ +#define C0_YUVE_SWAP24 0x00020000 /* swap byte 2 and 4 */ +#define C0_YUVE_SWAP1324 0x00030000 /* swap bytes 1&3 and 2&4 */ /* Bayer bits 18,19 if needed */ #define C0_EOF_VSYNC 0x00400000 /* Generate EOF by VSYNC */ #define C0_VEDGE_CTRL 0x00800000 /* Detect falling edge of VSYNC */ diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c index 9351f64..6460f8e 100644 --- a/drivers/media/platform/soc_camera/rcar_vin.c +++ b/drivers/media/platform/soc_camera/rcar_vin.c @@ -135,6 +135,8 @@ #define VIN_MAX_WIDTH 2048 #define VIN_MAX_HEIGHT 2048 +#define TIMEOUT_MS 100 + enum chip_id { RCAR_GEN2, RCAR_H1, @@ -820,7 +822,10 @@ static void rcar_vin_wait_stop_streaming(struct rcar_vin_priv *priv) if (priv->state == STOPPING) { priv->request_to_stop = true; spin_unlock_irq(&priv->lock); - wait_for_completion(&priv->capture_stop); + if (!wait_for_completion_timeout( + &priv->capture_stop, + msecs_to_jiffies(TIMEOUT_MS))) + priv->state = STOPPED; spin_lock_irq(&priv->lock); } } diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 2c25271..60f7141 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -1029,6 +1029,18 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) md->reset_done &= ~type; } +int mmc_access_rpmb(struct mmc_queue *mq) +{ + struct mmc_blk_data *md = mq->data; + /* + * If this is a RPMB partition access, return ture + */ + if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) + return true; + + return false; +} + static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 236d194..8efa368 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -38,7 +38,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) return BLKPREP_KILL; } - if (mq && mmc_card_removed(mq->card)) + if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) return BLKPREP_KILL; req->cmd_flags |= REQ_DONTPREP; diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index 5752d50..99e6521 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h @@ -73,4 +73,6 @@ extern void mmc_queue_bounce_post(struct mmc_queue_req *); extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *); extern void mmc_packed_clean(struct mmc_queue *); +extern int mmc_access_rpmb(struct mmc_queue *); + #endif diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index c296bc0..92e7671 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -2651,6 +2651,7 @@ int mmc_pm_notify(struct notifier_block *notify_block, switch (mode) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: + case PM_RESTORE_PREPARE: spin_lock_irqsave(&host->lock, flags); host->rescan_disable = 1; spin_unlock_irqrestore(&host->lock, flags); diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 38b2926..5f5adaf 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -589,9 +589,11 @@ static int dw_mci_idmac_init(struct dw_mci *host) host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); /* Forward link the descriptor list */ - for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) + for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) { p->des3 = cpu_to_le32(host->sg_dma + (sizeof(struct idmac_desc) * (i + 1))); + p->des1 = 0; + } /* Set the last descriptor as the end-of-ring descriptor */ p->des3 = cpu_to_le32(host->sg_dma); @@ -1300,7 +1302,8 @@ static int dw_mci_get_cd(struct mmc_host *mmc) int gpio_cd = mmc_gpio_get_cd(mmc); /* Use platform get_cd function, else try onboard card detect */ - if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) + if ((brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) || + (mmc->caps & MMC_CAP_NONREMOVABLE)) present = 1; else if (!IS_ERR_VALUE(gpio_cd)) present = gpio_cd; diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 2b6ef6b..7eff087 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -1408,7 +1408,7 @@ static int sh_mmcif_probe(struct platform_device *pdev) host = mmc_priv(mmc); host->mmc = mmc; host->addr = reg; - host->timeout = msecs_to_jiffies(1000); + host->timeout = msecs_to_jiffies(10000); host->ccs_enable = !pd || !pd->ccs_unsupported; host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present; diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 7c8b169..3af137f 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -223,7 +223,7 @@ static int m25p_probe(struct spi_device *spi) */ if (data && data->type) flash_name = data->type; - else if (!strcmp(spi->modalias, "nor-jedec")) + else if (!strcmp(spi->modalias, "spi-nor")) flash_name = NULL; /* auto-detect */ else flash_name = spi->modalias; @@ -255,7 +255,7 @@ static int m25p_remove(struct spi_device *spi) * since most of these flash are compatible to some extent, and their * differences can often be differentiated by the JEDEC read-ID command, we * encourage new users to add support to the spi-nor library, and simply bind - * against a generic string here (e.g., "nor-jedec"). + * against a generic string here (e.g., "jedec,spi-nor"). * * Many flash names are kept here in this list (as well as in spi-nor.c) to * keep them available as module aliases for existing platforms. @@ -305,7 +305,7 @@ static const struct spi_device_id m25p_ids[] = { * Generic support for SPI NOR that can be identified by the JEDEC READ * ID opcode (0x9F). Use this, if possible. */ - {"nor-jedec"}, + {"spi-nor"}, { }, }; MODULE_DEVICE_TABLE(spi, m25p_ids); diff --git a/drivers/mtd/tests/readtest.c b/drivers/mtd/tests/readtest.c index a3196b7..58df07a 100644 --- a/drivers/mtd/tests/readtest.c +++ b/drivers/mtd/tests/readtest.c @@ -191,9 +191,11 @@ static int __init mtd_readtest_init(void) err = ret; } - err = mtdtest_relax(); - if (err) + ret = mtdtest_relax(); + if (ret) { + err = ret; goto out; + } } if (err) diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index db2c05b..c9eb78f 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c @@ -310,6 +310,8 @@ static void ubiblock_do_work(struct work_struct *work) blk_rq_map_sg(req->q, req, pdu->usgl.sg); ret = ubiblock_read(pdu); + rq_flush_dcache_pages(req); + blk_mq_end_request(req, ret); } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 78dde56..d5fe5d5 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -82,6 +82,8 @@ #include <net/bond_3ad.h> #include <net/bond_alb.h> +#include "bonding_priv.h" + /*---------------------------- Module parameters ----------------------------*/ /* monitor all links that often (in milliseconds). <=0 disables monitoring */ @@ -4542,6 +4544,8 @@ unsigned int bond_get_num_tx_queues(void) int bond_create(struct net *net, const char *name) { struct net_device *bond_dev; + struct bonding *bond; + struct alb_bond_info *bond_info; int res; rtnl_lock(); @@ -4555,6 +4559,14 @@ int bond_create(struct net *net, const char *name) return -ENOMEM; } + /* + * Initialize rx_hashtbl_used_head to RLB_NULL_INDEX. + * It is set to 0 by default which is wrong. + */ + bond = netdev_priv(bond_dev); + bond_info = &(BOND_ALB_INFO(bond)); + bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX; + dev_net_set(bond_dev, net); bond_dev->rtnl_link_ops = &bond_link_ops; diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index 62694cf..b20b35ac 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c @@ -4,6 +4,7 @@ #include <net/netns/generic.h> #include <net/bonding.h> +#include "bonding_priv.h" static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) diff --git a/drivers/net/bonding/bonding_priv.h b/drivers/net/bonding/bonding_priv.h new file mode 100644 index 0000000..5a4d81a --- /dev/null +++ b/drivers/net/bonding/bonding_priv.h @@ -0,0 +1,25 @@ +/* + * Bond several ethernet interfaces into a Cisco, running 'Etherchannel'. + * + * Portions are (c) Copyright 1995 Simon "Guru Aleph-Null" Janes + * NCM: Network and Communications Management, Inc. + * + * BUT, I'm the one who modified it for ethernet, so: + * (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov + * + * This software may be used and distributed according to the terms + * of the GNU Public License, incorporated herein by reference. + * + */ + +#ifndef _BONDING_PRIV_H +#define _BONDING_PRIV_H + +#define DRV_VERSION "3.7.1" +#define DRV_RELDATE "April 27, 2011" +#define DRV_NAME "bonding" +#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" + +#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" + +#endif diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 58808f6..e8c96b8 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -112,7 +112,7 @@ config PCH_CAN config CAN_GRCAN tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices" - depends on OF + depends on OF && HAS_DMA ---help--- Say Y here if you want to use Aeroflex Gaisler GRCAN or GRHCAN. Note that the driver supports little endian, even though little diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 4643914..8b17a90 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -1102,7 +1102,7 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv, if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | MSG_FLAG_NERR)) { - netdev_err(priv->netdev, "Unknow error (flags: 0x%02x)\n", + netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n", msg->u.rx_can_header.flag); stats->rx_errors++; diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 6bddfe0..fc55e8e 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -509,10 +509,11 @@ static int xcan_rx(struct net_device *ndev) cf->can_id |= CAN_RTR_FLAG; } - if (!(id_xcan & XCAN_IDR_SRR_MASK)) { - data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET); - data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET); + /* DW1/DW2 must always be read to remove message from RXFIFO */ + data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET); + data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET); + if (!(cf->can_id & CAN_RTR_FLAG)) { /* Change Xilinx CAN data format to socketCAN data format */ if (cf->can_dlc > 0) *(__be32 *)(cf->data) = cpu_to_be32(data[0]); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index af639ab..cf309aa9 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1469,6 +1469,9 @@ static void __exit mv88e6xxx_cleanup(void) #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171) unregister_switch_driver(&mv88e6171_switch_driver); #endif +#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352) + unregister_switch_driver(&mv88e6352_switch_driver); +#endif #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65) unregister_switch_driver(&mv88e6123_61_65_switch_driver); #endif diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c index b36ee9e..d686b9c 100644 --- a/drivers/net/ethernet/8390/etherh.c +++ b/drivers/net/ethernet/8390/etherh.c @@ -523,7 +523,7 @@ static int etherh_addr(char *addr, struct expansion_card *ec) char *s; if (!ecard_readchunk(&cd, ec, 0xf5, 0)) { - printk(KERN_ERR "%s: unable to read podule description string\n", + printk(KERN_ERR "%s: unable to read module description string\n", dev_name(&ec->dev)); goto no_addr; } diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h index eba070f..89cd11d 100644 --- a/drivers/net/ethernet/altera/altera_msgdmahw.h +++ b/drivers/net/ethernet/altera/altera_msgdmahw.h @@ -58,15 +58,12 @@ struct msgdma_extended_desc { /* Tx buffer control flags */ #define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \ - MSGDMA_DESC_CTL_TR_ERR_IRQ | \ MSGDMA_DESC_CTL_GO) -#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \ - MSGDMA_DESC_CTL_GO) +#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_GO) #define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \ MSGDMA_DESC_CTL_TR_COMP_IRQ | \ - MSGDMA_DESC_CTL_TR_ERR_IRQ | \ MSGDMA_DESC_CTL_GO) #define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \ diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 90a7630..da48e66 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -391,6 +391,12 @@ static int tse_rx(struct altera_tse_private *priv, int limit) "RCV pktstatus %08X pktlength %08X\n", pktstatus, pktlength); + /* DMA trasfer from TSE starts with 2 aditional bytes for + * IP payload alignment. Status returned by get_rx_status() + * contains DMA transfer length. Packet is 2 bytes shorter. + */ + pktlength -= 2; + count++; next_entry = (++priv->rx_cons) % priv->rx_ring_size; @@ -777,6 +783,8 @@ static int init_phy(struct net_device *dev) struct altera_tse_private *priv = netdev_priv(dev); struct phy_device *phydev; struct device_node *phynode; + bool fixed_link = false; + int rc = 0; /* Avoid init phy in case of no phy present */ if (!priv->phy_iface) @@ -789,13 +797,32 @@ static int init_phy(struct net_device *dev) phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0); if (!phynode) { - netdev_dbg(dev, "no phy-handle found\n"); - if (!priv->mdio) { - netdev_err(dev, - "No phy-handle nor local mdio specified\n"); - return -ENODEV; + /* check if a fixed-link is defined in device-tree */ + if (of_phy_is_fixed_link(priv->device->of_node)) { + rc = of_phy_register_fixed_link(priv->device->of_node); + if (rc < 0) { + netdev_err(dev, "cannot register fixed PHY\n"); + return rc; + } + + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + phynode = of_node_get(priv->device->of_node); + fixed_link = true; + + netdev_dbg(dev, "fixed-link detected\n"); + phydev = of_phy_connect(dev, phynode, + &altera_tse_adjust_link, + 0, priv->phy_iface); + } else { + netdev_dbg(dev, "no phy-handle found\n"); + if (!priv->mdio) { + netdev_err(dev, "No phy-handle nor local mdio specified\n"); + return -ENODEV; + } + phydev = connect_local_phy(dev); } - phydev = connect_local_phy(dev); } else { netdev_dbg(dev, "phy-handle found\n"); phydev = of_phy_connect(dev, phynode, @@ -819,10 +846,10 @@ static int init_phy(struct net_device *dev) /* Broken HW is sometimes missing the pull-up resistor on the * MDIO line, which results in reads to non-existent devices returning * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent - * device as well. + * device as well. If a fixed-link is used the phy_id is always 0. * Note: phydev->phy_id is the result of reading the UID PHY registers. */ - if (phydev->phy_id == 0) { + if ((phydev->phy_id == 0) && !fixed_link) { netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id); phy_disconnect(phydev); return -ENODEV; diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index c638c85..4269160 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -179,7 +179,8 @@ config SUNLANCE config AMD_XGBE tristate "AMD 10GbE Ethernet driver" - depends on (OF_NET || ACPI) && HAS_IOMEM + depends on (OF_NET || ACPI) && HAS_IOMEM && HAS_DMA + depends on ARM64 || COMPILE_TEST select PHYLIB select AMD_XGBE_PHY select BITREVERSE diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig index f4054d24..19e38af 100644 --- a/drivers/net/ethernet/apm/xgene/Kconfig +++ b/drivers/net/ethernet/apm/xgene/Kconfig @@ -1,6 +1,7 @@ config NET_XGENE tristate "APM X-Gene SoC Ethernet Driver" depends on HAS_DMA + depends on ARCH_XGENE || COMPILE_TEST select PHYLIB help This is the Ethernet driver for the on-chip ethernet interface on the diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig index 8e262e2..dea29ee 100644 --- a/drivers/net/ethernet/arc/Kconfig +++ b/drivers/net/ethernet/arc/Kconfig @@ -25,8 +25,7 @@ config ARC_EMAC_CORE config ARC_EMAC tristate "ARC EMAC support" select ARC_EMAC_CORE - depends on OF_IRQ - depends on OF_NET + depends on OF_IRQ && OF_NET && HAS_DMA ---help--- On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x non-standard on-chip ethernet device ARC EMAC 10/100 is used. @@ -35,7 +34,7 @@ config ARC_EMAC config EMAC_ROCKCHIP tristate "Rockchip EMAC support" select ARC_EMAC_CORE - depends on OF_IRQ && OF_NET && REGULATOR + depends on OF_IRQ && OF_NET && REGULATOR && HAS_DMA ---help--- Support for Rockchip RK3066/RK3188 EMAC ethernet controllers. This selects Rockchip SoC glue layer support for the diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h index 74df16a..88a6271 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h @@ -129,7 +129,7 @@ s32 atl1e_restart_autoneg(struct atl1e_hw *hw); #define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8 #define TWSI_CTRL_SW_LDSTART 0x800 #define TWSI_CTRL_HW_LDSTART 0x1000 -#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x0x7F +#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F #define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15 #define TWSI_CTRL_LD_EXIST 0x400000 #define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3 diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 7e3d87a..e2c043e 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -543,7 +543,7 @@ struct bcm_sysport_tx_counters { u32 jbr; /* RO # of xmited jabber count*/ u32 bytes; /* RO # of xmited byte count */ u32 pok; /* RO # of xmited good pkt */ - u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */ + u32 uc; /* RO (0x4f0) # of xmited unicast pkt */ }; struct bcm_sysport_mib { diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index de77d3a..21e3c38 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1260,7 +1260,7 @@ static int bgmac_poll(struct napi_struct *napi, int weight) /* Poll again if more events arrived in the meantime */ if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX)) - return handled; + return weight; if (handled < weight) { napi_complete(napi); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 355d5fe..a3b0f7a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -521,6 +521,7 @@ struct bnx2x_fp_txdata { }; enum bnx2x_tpa_mode_t { + TPA_MODE_DISABLED, TPA_MODE_LRO, TPA_MODE_GRO }; @@ -589,7 +590,6 @@ struct bnx2x_fastpath { /* TPA related */ struct bnx2x_agg_info *tpa_info; - u8 disable_tpa; #ifdef BNX2X_STOP_ON_ERROR u64 tpa_queue_used; #endif @@ -1545,9 +1545,7 @@ struct bnx2x { #define USING_MSIX_FLAG (1 << 5) #define USING_MSI_FLAG (1 << 6) #define DISABLE_MSI_FLAG (1 << 7) -#define TPA_ENABLE_FLAG (1 << 8) #define NO_MCP_FLAG (1 << 9) -#define GRO_ENABLE_FLAG (1 << 10) #define MF_FUNC_DIS (1 << 11) #define OWN_CNIC_IRQ (1 << 12) #define NO_ISCSI_OOO_FLAG (1 << 13) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 2f63467..ec56a9b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -947,10 +947,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) u16 frag_size, pages; #ifdef BNX2X_STOP_ON_ERROR /* sanity check */ - if (fp->disable_tpa && + if (fp->mode == TPA_MODE_DISABLED && (CQE_TYPE_START(cqe_fp_type) || CQE_TYPE_STOP(cqe_fp_type))) - BNX2X_ERR("START/STOP packet while disable_tpa type %x\n", + BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n", CQE_TYPE(cqe_fp_type)); #endif @@ -1396,7 +1396,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) DP(NETIF_MSG_IFUP, "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); - if (!fp->disable_tpa) { + if (fp->mode != TPA_MODE_DISABLED) { /* Fill the per-aggregation pool */ for (i = 0; i < MAX_AGG_QS(bp); i++) { struct bnx2x_agg_info *tpa_info = @@ -1410,7 +1410,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n", j); bnx2x_free_tpa_pool(bp, fp, i); - fp->disable_tpa = 1; + fp->mode = TPA_MODE_DISABLED; break; } dma_unmap_addr_set(first_buf, mapping, 0); @@ -1438,7 +1438,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) ring_prod); bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); - fp->disable_tpa = 1; + fp->mode = TPA_MODE_DISABLED; ring_prod = 0; break; } @@ -1560,7 +1560,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) bnx2x_free_rx_bds(fp); - if (!fp->disable_tpa) + if (fp->mode != TPA_MODE_DISABLED) bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); } } @@ -2477,17 +2477,19 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index) /* set the tpa flag for each queue. The tpa flag determines the queue * minimal size so it must be set prior to queue memory allocation */ - fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG || - (bp->flags & GRO_ENABLE_FLAG && - bnx2x_mtu_allows_gro(bp->dev->mtu))); - if (bp->flags & TPA_ENABLE_FLAG) + if (bp->dev->features & NETIF_F_LRO) fp->mode = TPA_MODE_LRO; - else if (bp->flags & GRO_ENABLE_FLAG) + else if (bp->dev->features & NETIF_F_GRO && + bnx2x_mtu_allows_gro(bp->dev->mtu)) fp->mode = TPA_MODE_GRO; + else + fp->mode = TPA_MODE_DISABLED; - /* We don't want TPA on an FCoE L2 ring */ - if (IS_FCOE_FP(fp)) - fp->disable_tpa = 1; + /* We don't want TPA if it's disabled in bp + * or if this is an FCoE L2 ring. + */ + if (bp->disable_tpa || IS_FCOE_FP(fp)) + fp->mode = TPA_MODE_DISABLED; } int bnx2x_load_cnic(struct bnx2x *bp) @@ -2608,7 +2610,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* * Zero fastpath structures preserving invariants like napi, which are * allocated only once, fp index, max_cos, bp pointer. - * Also set fp->disable_tpa and txdata_ptr. + * Also set fp->mode and txdata_ptr. */ DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); for_each_queue(bp, i) @@ -3247,7 +3249,7 @@ int bnx2x_low_latency_recv(struct napi_struct *napi) if ((bp->state == BNX2X_STATE_CLOSED) || (bp->state == BNX2X_STATE_ERROR) || - (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG))) + (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO))) return LL_FLUSH_FAILED; if (!bnx2x_fp_lock_poll(fp)) @@ -4543,7 +4545,7 @@ alloc_mem_err: * In these cases we disable the queue * Min size is different for OOO, TPA and non-TPA queues */ - if (ring_size < (fp->disable_tpa ? + if (ring_size < (fp->mode == TPA_MODE_DISABLED ? MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) { /* release memory allocated for this queue */ bnx2x_free_fp_mem_at(bp, index); @@ -4784,6 +4786,11 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu) { struct bnx2x *bp = netdev_priv(dev); + if (pci_num_vf(bp->pdev)) { + DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n"); + return -EPERM; + } + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { BNX2X_ERR("Can't perform change MTU during parity recovery\n"); return -EAGAIN; @@ -4809,66 +4816,71 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev, { struct bnx2x *bp = netdev_priv(dev); + if (pci_num_vf(bp->pdev)) { + netdev_features_t changed = dev->features ^ features; + + /* Revert the requested changes in features if they + * would require internal reload of PF in bnx2x_set_features(). + */ + if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) { + features &= ~NETIF_F_RXCSUM; + features |= dev->features & NETIF_F_RXCSUM; + } + + if (changed & NETIF_F_LOOPBACK) { + features &= ~NETIF_F_LOOPBACK; + features |= dev->features & NETIF_F_LOOPBACK; + } + } + /* TPA requires Rx CSUM offloading */ if (!(features & NETIF_F_RXCSUM)) { features &= ~NETIF_F_LRO; features &= ~NETIF_F_GRO; } - /* Note: do not disable SW GRO in kernel when HW GRO is off */ - if (bp->disable_tpa) - features &= ~NETIF_F_LRO; - return features; } int bnx2x_set_features(struct net_device *dev, netdev_features_t features) { struct bnx2x *bp = netdev_priv(dev); - u32 flags = bp->flags; - u32 changes; + netdev_features_t changes = features ^ dev->features; bool bnx2x_reload = false; + int rc; - if (features & NETIF_F_LRO) - flags |= TPA_ENABLE_FLAG; - else - flags &= ~TPA_ENABLE_FLAG; - - if (features & NETIF_F_GRO) - flags |= GRO_ENABLE_FLAG; - else - flags &= ~GRO_ENABLE_FLAG; - - if (features & NETIF_F_LOOPBACK) { - if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { - bp->link_params.loopback_mode = LOOPBACK_BMAC; - bnx2x_reload = true; - } - } else { - if (bp->link_params.loopback_mode != LOOPBACK_NONE) { - bp->link_params.loopback_mode = LOOPBACK_NONE; - bnx2x_reload = true; + /* VFs or non SRIOV PFs should be able to change loopback feature */ + if (!pci_num_vf(bp->pdev)) { + if (features & NETIF_F_LOOPBACK) { + if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { + bp->link_params.loopback_mode = LOOPBACK_BMAC; + bnx2x_reload = true; + } + } else { + if (bp->link_params.loopback_mode != LOOPBACK_NONE) { + bp->link_params.loopback_mode = LOOPBACK_NONE; + bnx2x_reload = true; + } } } - changes = flags ^ bp->flags; - /* if GRO is changed while LRO is enabled, don't force a reload */ - if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG)) - changes &= ~GRO_ENABLE_FLAG; + if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO)) + changes &= ~NETIF_F_GRO; /* if GRO is changed while HW TPA is off, don't force a reload */ - if ((changes & GRO_ENABLE_FLAG) && bp->disable_tpa) - changes &= ~GRO_ENABLE_FLAG; + if ((changes & NETIF_F_GRO) && bp->disable_tpa) + changes &= ~NETIF_F_GRO; if (changes) bnx2x_reload = true; - bp->flags = flags; - if (bnx2x_reload) { - if (bp->recovery_state == BNX2X_RECOVERY_DONE) - return bnx2x_reload_if_running(dev); + if (bp->recovery_state == BNX2X_RECOVERY_DONE) { + dev->features = features; + rc = bnx2x_reload_if_running(dev); + return rc ? rc : 1; + } /* else: bnx2x_nic_load() will be called at end of recovery */ } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index adcacda..d7a7175 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -969,7 +969,7 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, { int i; - if (fp->disable_tpa) + if (fp->mode == TPA_MODE_DISABLED) return; for (i = 0; i < last; i++) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index e3d853c..48ed005 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1843,6 +1843,12 @@ static int bnx2x_set_ringparam(struct net_device *dev, "set ring params command parameters: rx_pending = %d, tx_pending = %d\n", ering->rx_pending, ering->tx_pending); + if (pci_num_vf(bp->pdev)) { + DP(BNX2X_MSG_IOV, + "VFs are enabled, can not change ring parameters\n"); + return -EPERM; + } + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { DP(BNX2X_MSG_ETHTOOL, "Handling parity error recovery. Try again later\n"); @@ -2899,6 +2905,12 @@ static void bnx2x_self_test(struct net_device *dev, u8 is_serdes, link_up; int rc, cnt = 0; + if (pci_num_vf(bp->pdev)) { + DP(BNX2X_MSG_IOV, + "VFs are enabled, can not perform self test\n"); + return; + } + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { netdev_err(bp->dev, "Handling parity error recovery. Try again later\n"); @@ -3468,6 +3480,11 @@ static int bnx2x_set_channels(struct net_device *dev, channels->rx_count, channels->tx_count, channels->other_count, channels->combined_count); + if (pci_num_vf(bp->pdev)) { + DP(BNX2X_MSG_IOV, "VFs are enabled, can not set channels\n"); + return -EPERM; + } + /* We don't support separate rx / tx channels. * We don't allow setting 'other' channels. */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index b9f85fcc..fd52ce9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -3128,7 +3128,7 @@ static unsigned long bnx2x_get_q_flags(struct bnx2x *bp, __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags); } - if (!fp->disable_tpa) { + if (fp->mode != TPA_MODE_DISABLED) { __set_bit(BNX2X_Q_FLG_TPA, &flags); __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags); if (fp->mode == TPA_MODE_GRO) @@ -3176,7 +3176,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, u16 sge_sz = 0; u16 tpa_agg_size = 0; - if (!fp->disable_tpa) { + if (fp->mode != TPA_MODE_DISABLED) { pause->sge_th_lo = SGE_TH_LO(bp); pause->sge_th_hi = SGE_TH_HI(bp); @@ -3304,7 +3304,7 @@ static void bnx2x_pf_init(struct bnx2x *bp) /* This flag is relevant for E1x only. * E2 doesn't have a TPA configuration in a function level. */ - flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; + flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0; func_init.func_flgs = flags; func_init.pf_id = BP_FUNC(bp); @@ -12107,11 +12107,8 @@ static int bnx2x_init_bp(struct bnx2x *bp) /* Set TPA flags */ if (bp->disable_tpa) { - bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); + bp->dev->hw_features &= ~NETIF_F_LRO; bp->dev->features &= ~NETIF_F_LRO; - } else { - bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); - bp->dev->features |= NETIF_F_LRO; } if (CHIP_IS_E1(bp)) @@ -13371,6 +13368,17 @@ static int bnx2x_init_one(struct pci_dev *pdev, bool is_vf; int cnic_cnt; + /* Management FW 'remembers' living interfaces. Allow it some time + * to forget previously living interfaces, allowing a proper re-load. + */ + if (is_kdump_kernel()) { + ktime_t now = ktime_get_boottime(); + ktime_t fw_ready_time = ktime_set(5, 0); + + if (ktime_before(now, fw_ready_time)) + msleep(ktime_ms_delta(fw_ready_time, now)); + } + /* An estimated maximum supported CoS number according to the chip * version. * We will try to roughly estimate the maximum number of CoSes this chip diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 15b2d16..06b8c0d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -594,7 +594,7 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req)); /* select tpa mode to request */ - if (!fp->disable_tpa) { + if (fp->mode != TPA_MODE_DISABLED) { flags |= VFPF_QUEUE_FLG_TPA; flags |= VFPF_QUEUE_FLG_TPA_IPV6; if (fp->mode == TPA_MODE_GRO) diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 1270b18..069952f 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -18129,7 +18129,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, rtnl_lock(); - tp->pcierr_recovery = true; + /* We needn't recover from permanent error */ + if (state == pci_channel_io_frozen) + tp->pcierr_recovery = true; /* We probably don't have netdev yet */ if (!netdev || !netif_running(netdev)) diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 9f53872..61aa570 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -707,6 +707,9 @@ static void gem_rx_refill(struct macb *bp) /* properly align Ethernet header */ skb_reserve(skb, NET_IP_ALIGN); + } else { + bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED); + bp->rx_ring[entry].ctrl = 0; } } @@ -978,7 +981,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) struct macb_queue *queue = dev_id; struct macb *bp = queue->bp; struct net_device *dev = bp->dev; - u32 status; + u32 status, ctrl; status = queue_readl(queue, ISR); @@ -1034,6 +1037,15 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) * add that if/when we get our hands on a full-blown MII PHY. */ + if (status & MACB_BIT(RXUBR)) { + ctrl = macb_readl(bp, NCR); + macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); + macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); + + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + macb_writel(bp, ISR, MACB_BIT(RXUBR)); + } + if (status & MACB_BIT(ISR_ROVR)) { /* We missed at least one packet */ if (macb_is_gem(bp)) @@ -1473,9 +1485,9 @@ static void macb_init_rings(struct macb *bp) for (i = 0; i < TX_RING_SIZE; i++) { bp->queues[0].tx_ring[i].addr = 0; bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); - bp->queues[0].tx_head = 0; - bp->queues[0].tx_tail = 0; } + bp->queues[0].tx_head = 0; + bp->queues[0].tx_tail = 0; bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); bp->rx_tail = 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 5959e3a..e8578a7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -492,7 +492,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, memoffset = (mtype * (edc_size * 1024 * 1024)); else { mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap, - MA_EXT_MEMORY1_BAR_A)); + MA_EXT_MEMORY0_BAR_A)); memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; } diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index fb0bc3c..a6dcbf8 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4846,7 +4846,8 @@ err: } static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev, u32 filter_mask) + struct net_device *dev, u32 filter_mask, + int nlflags) { struct be_adapter *adapter = netdev_priv(dev); int status = 0; @@ -4868,7 +4869,7 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, return ndo_dflt_bridge_getlink(skb, pid, seq, dev, hsw_mode == PORT_FWD_TYPE_VEPA ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB, - 0, 0); + 0, 0, nlflags); } #ifdef CONFIG_BE2NET_VXLAN diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index f6a3a7a..66d47e4 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -988,7 +988,10 @@ fec_restart(struct net_device *ndev) rcntl |= 0x40000000 | 0x00000020; /* RGMII, RMII or MII */ - if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII) + if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII || + fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || + fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) rcntl |= (1 << 6); else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) rcntl |= (1 << 8); diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 291c870..2a0dc12 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -3347,7 +3347,7 @@ static int ehea_register_memory_hooks(void) { int ret = 0; - if (atomic_inc_and_test(&ehea_memory_hooks_registered)) + if (atomic_inc_return(&ehea_memory_hooks_registered) > 1) return 0; ret = ehea_create_busmap(); @@ -3381,12 +3381,14 @@ out3: out2: unregister_reboot_notifier(&ehea_reboot_nb); out: + atomic_dec(&ehea_memory_hooks_registered); return ret; } static void ehea_unregister_memory_hooks(void) { - if (atomic_read(&ehea_memory_hooks_registered)) + /* Only remove the hooks if we've registered them */ + if (atomic_read(&ehea_memory_hooks_registered) == 0) return; unregister_reboot_notifier(&ehea_reboot_nb); diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index cd7675a..1813476 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1238,7 +1238,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) return -EINVAL; for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) - if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) + if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) break; if (i == IBMVETH_NUM_BUFF_POOLS) @@ -1257,7 +1257,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { adapter->rx_buff_pool[i].active = 1; - if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { + if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) { dev->mtu = new_mtu; vio_cmo_set_dev_desired(viodev, ibmveth_get_desired_dma diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 5d9ceb1..0abc942 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -40,6 +40,7 @@ #include <linux/ptp_classify.h> #include <linux/mii.h> #include <linux/mdio.h> +#include <linux/pm_qos.h> #include "hw.h" struct e1000_info; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 1b0661e..c754b20 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -610,7 +610,7 @@ static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, unsigned int total_bytes = 0, total_packets = 0; u16 cleaned_count = fm10k_desc_unused(rx_ring); - do { + while (likely(total_packets < budget)) { union fm10k_rx_desc *rx_desc; /* return some buffers to hardware, one at a time is too slow */ @@ -659,7 +659,7 @@ static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, /* update budget accounting */ total_packets++; - } while (likely(total_packets < budget)); + } /* place incomplete frames back on ring for completion */ rx_ring->skb = skb; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 24481cd..a54c144 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8053,10 +8053,10 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, #ifdef HAVE_BRIDGE_FILTER static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, - u32 __always_unused filter_mask) + u32 __always_unused filter_mask, int nlflags) #else static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev) + struct net_device *dev, int nlflags) #endif /* HAVE_BRIDGE_FILTER */ { struct i40e_netdev_priv *np = netdev_priv(dev); @@ -8078,7 +8078,8 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, if (!veb) return 0; - return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode); + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, + nlflags); } #endif /* HAVE_BRIDGE_ATTRIBS */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 8457d03..a0a9b1f 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1036,7 +1036,7 @@ static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx) adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; if (q_vector->rx.ring) - adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL; + adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; netif_napi_del(&q_vector->napi); @@ -1207,6 +1207,8 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, q_vector = adapter->q_vector[v_idx]; if (!q_vector) q_vector = kzalloc(size, GFP_KERNEL); + else + memset(q_vector, 0, size); if (!q_vector) return -ENOMEM; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index d3f4b0c..5be12a0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8044,7 +8044,7 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, - u32 filter_mask) + u32 filter_mask, int nlflags) { struct ixgbe_adapter *adapter = netdev_priv(dev); @@ -8052,7 +8052,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, return 0; return ndo_dflt_bridge_getlink(skb, pid, seq, dev, - adapter->bridge_mode, 0, 0); + adapter->bridge_mode, 0, 0, nlflags); } static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index a16d267..e71cdde 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3612,7 +3612,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); if (!dst_mac || is_link_local_ether_addr(dst_mac)) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index af829c5..7ace07d 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1508,7 +1508,8 @@ static int pxa168_eth_probe(struct platform_device *pdev) np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); if (!np) { dev_err(&pdev->dev, "missing phy-handle\n"); - return -EINVAL; + err = -EINVAL; + goto err_netdev; } of_property_read_u32(np, "reg", &pep->phy_addr); pep->phy_intf = of_get_phy_mode(pdev->dev.of_node); @@ -1526,7 +1527,7 @@ static int pxa168_eth_probe(struct platform_device *pdev) pep->smi_bus = mdiobus_alloc(); if (pep->smi_bus == NULL) { err = -ENOMEM; - goto err_base; + goto err_netdev; } pep->smi_bus->priv = pep; pep->smi_bus->name = "pxa168_eth smi"; @@ -1551,13 +1552,10 @@ err_mdiobus: mdiobus_unregister(pep->smi_bus); err_free_mdio: mdiobus_free(pep->smi_bus); -err_base: - iounmap(pep->base); err_netdev: free_netdev(dev); err_clk: - clk_disable(clk); - clk_put(clk); + clk_disable_unprepare(clk); return err; } @@ -1574,13 +1572,9 @@ static int pxa168_eth_remove(struct platform_device *pdev) if (pep->phy) phy_disconnect(pep->phy); if (pep->clk) { - clk_disable(pep->clk); - clk_put(pep->clk); - pep->clk = NULL; + clk_disable_unprepare(pep->clk); } - iounmap(pep->base); - pep->base = NULL; mdiobus_unregister(pep->smi_bus); mdiobus_free(pep->smi_bus); unregister_netdev(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 3f44e2b..a2ddf3d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1102,20 +1102,21 @@ static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc) struct mlx4_en_priv *priv = netdev_priv(dev); /* check if requested function is supported by the device */ - if ((hfunc == ETH_RSS_HASH_TOP && - !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) || - (hfunc == ETH_RSS_HASH_XOR && - !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))) - return -EINVAL; + if (hfunc == ETH_RSS_HASH_TOP) { + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) + return -EINVAL; + if (!(dev->features & NETIF_F_RXHASH)) + en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n"); + return 0; + } else if (hfunc == ETH_RSS_HASH_XOR) { + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR)) + return -EINVAL; + if (dev->features & NETIF_F_RXHASH) + en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n"); + return 0; + } - priv->rss_hash_fn = hfunc; - if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH)) - en_warn(priv, - "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n"); - if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH)) - en_warn(priv, - "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n"); - return 0; + return -EINVAL; } static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key, @@ -1189,6 +1190,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index, priv->prof->rss_rings = rss_rings; if (key) memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE); + if (hfunc != ETH_RSS_HASH_NO_CHANGE) + priv->rss_hash_fn = hfunc; if (port_up) { err = mlx4_en_start_port(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 0f1afc0..32f5ec7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1467,6 +1467,7 @@ static void mlx4_en_service_task(struct work_struct *work) if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) mlx4_en_ptp_overflow_check(mdev); + mlx4_en_recover_from_oom(priv); queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY); } @@ -1721,7 +1722,7 @@ mac_err: cq_err: while (rx_index--) { mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); - mlx4_en_free_affinity_hint(priv, i); + mlx4_en_free_affinity_hint(priv, rx_index); } for (i = 0; i < priv->rx_ring_num; i++) mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 54f0e5a..0a56f01 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -139,7 +139,7 @@ static unsigned long en_stats_adder(__be64 *start, __be64 *next, int num) int i; int offset = next - start; - for (i = 0; i <= num; i++) { + for (i = 0; i < num; i++) { ret += be64_to_cpu(*curr); curr += offset; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 4fdd3c3..2a77a6b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -244,6 +244,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp); } +static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring) +{ + BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size); + return ring->prod == ring->cons; +} + static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) { *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); @@ -315,8 +321,7 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, ring->cons, ring->prod); /* Unmap and free Rx buffers */ - BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size); - while (ring->cons != ring->prod) { + while (!mlx4_en_is_ring_empty(ring)) { index = ring->cons & ring->size_mask; en_dbg(DRV, priv, "Processing descriptor:%d\n", index); mlx4_en_free_rx_desc(priv, ring, index); @@ -491,6 +496,23 @@ err_allocator: return err; } +/* We recover from out of memory by scheduling our napi poll + * function (mlx4_en_process_cq), which tries to allocate + * all missing RX buffers (call to mlx4_en_refill_rx_buffers). + */ +void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv) +{ + int ring; + + if (!priv->port_up) + return; + + for (ring = 0; ring < priv->rx_ring_num; ring++) { + if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) + napi_reschedule(&priv->rx_cq[ring]->napi); + } +} + void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring **pring, u32 size, u16 stride) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 1783705..f7bf312 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -143,8 +143,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; ring->queue_index = queue_index; - if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index)) - cpumask_set_cpu(queue_index, &ring->affinity_mask); + if (queue_index < priv->num_tx_rings_p_up) + cpumask_set_cpu_local_first(queue_index, + priv->mdev->dev->numa_node, + &ring->affinity_mask); *pring = ring; return 0; @@ -213,7 +215,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, &ring->qp, &ring->qp_state); - if (!user_prio && cpu_online(ring->queue_index)) + if (!cpumask_empty(&ring->affinity_mask)) netif_set_xps_queue(priv->dev, &ring->affinity_mask, ring->queue_index); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index a407981..e30bf57 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -56,11 +56,13 @@ MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)"); #define MLX4_GET(dest, source, offset) \ do { \ void *__p = (char *) (source) + (offset); \ + u64 val; \ switch (sizeof (dest)) { \ case 1: (dest) = *(u8 *) __p; break; \ case 2: (dest) = be16_to_cpup(__p); break; \ case 4: (dest) = be32_to_cpup(__p); break; \ - case 8: (dest) = be64_to_cpup(__p); break; \ + case 8: val = get_unaligned((u64 *)__p); \ + (dest) = be64_to_cpu(val); break; \ default: __buggy_use_of_MLX4_GET(); \ } \ } while (0) @@ -1605,9 +1607,17 @@ static void get_board_id(void *vsd, char *board_id) * swaps each 4-byte word before passing it back to * us. Therefore we need to swab it before printing. */ - for (i = 0; i < 4; ++i) - ((u32 *) board_id)[i] = - swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4)); + u32 *bid_u32 = (u32 *)board_id; + + for (i = 0; i < 4; ++i) { + u32 *addr; + u32 val; + + addr = (u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4); + val = get_unaligned(addr); + val = swab32(val); + put_unaligned(val, &bid_u32[i]); + } } } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 9de3021..d021f07 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -774,6 +774,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev); +void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv); int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring **pring, u32 size, u16 stride, int node); diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index c7f28bf..92fce1b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -2845,7 +2845,7 @@ int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, { int err; int eqn = vhcr->in_modifier; - int res_id = (slave << 8) | eqn; + int res_id = (slave << 10) | eqn; struct mlx4_eq_context *eqc = inbox->buf; int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz; int mtt_size = eq_get_mtt_size(eqc); @@ -3051,7 +3051,7 @@ int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_info *cmd) { int eqn = vhcr->in_modifier; - int res_id = eqn | (slave << 8); + int res_id = eqn | (slave << 10); struct res_eq *eq; int err; @@ -3108,7 +3108,7 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) return 0; mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]); - res_id = (slave << 8) | event_eq->eqn; + res_id = (slave << 10) | event_eq->eqn; err = get_res(dev, slave, res_id, RES_EQ, &req); if (err) goto unlock; @@ -3131,7 +3131,7 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) memcpy(mailbox->buf, (u8 *) eqe, 28); - in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16); + in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16); err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0, MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B, @@ -3157,7 +3157,7 @@ int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_info *cmd) { int eqn = vhcr->in_modifier; - int res_id = eqn | (slave << 8); + int res_id = eqn | (slave << 10); struct res_eq *eq; int err; @@ -4714,13 +4714,13 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave) break; case RES_EQ_HW: - err = mlx4_cmd(dev, slave, eqn & 0xff, + err = mlx4_cmd(dev, slave, eqn & 0x3ff, 1, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n", - slave, eqn); + slave, eqn & 0x3ff); atomic_dec(&eq->mtt->ref_count); state = RES_EQ_RESERVED; break; diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 1412f5a..2bae502 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -69,11 +69,7 @@ #include <net/ip.h> #include <net/tcp.h> #include <asm/byteorder.h> -#include <asm/io.h> #include <asm/processor.h> -#ifdef CONFIG_MTRR -#include <asm/mtrr.h> -#endif #include <net/busy_poll.h> #include "myri10ge_mcp.h" @@ -242,8 +238,7 @@ struct myri10ge_priv { unsigned int rdma_tags_available; int intr_coal_delay; __be32 __iomem *intr_coal_delay_ptr; - int mtrr; - int wc_enabled; + int wc_cookie; int down_cnt; wait_queue_head_t down_wq; struct work_struct watchdog_work; @@ -1905,7 +1900,7 @@ static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = { "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", "tx_heartbeat_errors", "tx_window_errors", /* device-specific stats */ - "tx_boundary", "WC", "irq", "MSI", "MSIX", + "tx_boundary", "irq", "MSI", "MSIX", "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", "serial_number", "watchdog_resets", #ifdef CONFIG_MYRI10GE_DCA @@ -1984,7 +1979,6 @@ myri10ge_get_ethtool_stats(struct net_device *netdev, data[i] = ((u64 *)&link_stats)[i]; data[i++] = (unsigned int)mgp->tx_boundary; - data[i++] = (unsigned int)mgp->wc_enabled; data[i++] = (unsigned int)mgp->pdev->irq; data[i++] = (unsigned int)mgp->msi_enabled; data[i++] = (unsigned int)mgp->msix_enabled; @@ -4040,14 +4034,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) mgp->board_span = pci_resource_len(pdev, 0); mgp->iomem_base = pci_resource_start(pdev, 0); - mgp->mtrr = -1; - mgp->wc_enabled = 0; -#ifdef CONFIG_MTRR - mgp->mtrr = mtrr_add(mgp->iomem_base, mgp->board_span, - MTRR_TYPE_WRCOMB, 1); - if (mgp->mtrr >= 0) - mgp->wc_enabled = 1; -#endif + mgp->wc_cookie = arch_phys_wc_add(mgp->iomem_base, mgp->board_span); mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span); if (mgp->sram == NULL) { dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n", @@ -4146,14 +4133,14 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto abort_with_state; } if (mgp->msix_enabled) - dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, WC %s\n", + dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, MTRR %s, WC Enabled\n", mgp->num_slices, mgp->tx_boundary, mgp->fw_name, - (mgp->wc_enabled ? "Enabled" : "Disabled")); + (mgp->wc_cookie > 0 ? "Enabled" : "Disabled")); else - dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", + dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, MTRR %s, WC Enabled\n", mgp->msi_enabled ? "MSI" : "xPIC", pdev->irq, mgp->tx_boundary, mgp->fw_name, - (mgp->wc_enabled ? "Enabled" : "Disabled")); + (mgp->wc_cookie > 0 ? "Enabled" : "Disabled")); board_number++; return 0; @@ -4175,10 +4162,7 @@ abort_with_ioremap: iounmap(mgp->sram); abort_with_mtrr: -#ifdef CONFIG_MTRR - if (mgp->mtrr >= 0) - mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); -#endif + arch_phys_wc_del(mgp->wc_cookie); dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), mgp->cmd, mgp->cmd_bus); @@ -4220,11 +4204,7 @@ static void myri10ge_remove(struct pci_dev *pdev) pci_restore_state(pdev); iounmap(mgp->sram); - -#ifdef CONFIG_MTRR - if (mgp->mtrr >= 0) - mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); -#endif + arch_phys_wc_del(mgp->wc_cookie); myri10ge_free_slices(mgp); kfree(mgp->msix_vectors); dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c index 5c40683..7b43a3b 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c @@ -135,7 +135,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter) int i, j; struct nx_host_tx_ring *tx_ring = adapter->tx_ring; - spin_lock(&adapter->tx_clean_lock); + spin_lock_bh(&adapter->tx_clean_lock); cmd_buf = tx_ring->cmd_buf_arr; for (i = 0; i < tx_ring->num_desc; i++) { buffrag = cmd_buf->frag_array; @@ -159,7 +159,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter) } cmd_buf++; } - spin_unlock(&adapter->tx_clean_lock); + spin_unlock_bh(&adapter->tx_clean_lock); } void netxen_free_sw_resources(struct netxen_adapter *adapter) @@ -1764,7 +1764,7 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) int done = 0; struct nx_host_tx_ring *tx_ring = adapter->tx_ring; - if (!spin_trylock(&adapter->tx_clean_lock)) + if (!spin_trylock_bh(&adapter->tx_clean_lock)) return 1; sw_consumer = tx_ring->sw_consumer; @@ -1819,7 +1819,7 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) */ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); done = (sw_consumer == hw_consumer); - spin_unlock(&adapter->tx_clean_lock); + spin_unlock_bh(&adapter->tx_clean_lock); return done; } diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index f66641d..6af028d 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -912,6 +912,8 @@ qca_spi_probe(struct spi_device *spi_device) qca->spi_dev = spi_device; qca->legacy_mode = legacy_mode; + spi_set_drvdata(spi_device, qcaspi_devs); + mac = of_get_mac_address(spi_device->dev.of_node); if (mac) @@ -944,8 +946,6 @@ qca_spi_probe(struct spi_device *spi_device) return -EFAULT; } - spi_set_drvdata(spi_device, qcaspi_devs); - qcaspi_init_device_debugfs(qca); return 0; diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index c70ab40..3df51fa 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -6884,7 +6884,7 @@ static void r8169_csum_workaround(struct rtl8169_private *tp, rtl8169_start_xmit(nskb, tp->dev); } while (segs); - dev_kfree_skb(skb); + dev_consume_skb_any(skb); } else if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb_checksum_help(skb) < 0) goto drop; @@ -6896,7 +6896,7 @@ static void r8169_csum_workaround(struct rtl8169_private *tp, drop: stats = &tp->dev->stats; stats->tx_dropped++; - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); } } diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index a570a60..ec25153 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -4176,14 +4176,15 @@ static int rocker_port_bridge_setlink(struct net_device *dev, static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, - u32 filter_mask) + u32 filter_mask, int nlflags) { struct rocker_port *rocker_port = netdev_priv(dev); u16 mode = BRIDGE_MODE_UNDEF; u32 mask = BR_LEARNING | BR_LEARNING_SYNC; return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, - rocker_port->brport_flags, mask); + rocker_port->brport_flags, mask, + nlflags); } static int rocker_port_get_phys_port_name(struct net_device *dev, diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 14b363a..630f0b7 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -2238,9 +2238,10 @@ static int smc_drv_probe(struct platform_device *pdev) const struct of_device_id *match = NULL; struct smc_local *lp; struct net_device *ndev; - struct resource *res, *ires; + struct resource *res; unsigned int __iomem *addr; unsigned long irq_flags = SMC_IRQ_FLAGS; + unsigned long irq_resflags; int ret; ndev = alloc_etherdev(sizeof(struct smc_local)); @@ -2332,16 +2333,19 @@ static int smc_drv_probe(struct platform_device *pdev) goto out_free_netdev; } - ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!ires) { + ndev->irq = platform_get_irq(pdev, 0); + if (ndev->irq <= 0) { ret = -ENODEV; goto out_release_io; } - - ndev->irq = ires->start; - - if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK) - irq_flags = ires->flags & IRQF_TRIGGER_MASK; + /* + * If this platform does not specify any special irqflags, or if + * the resource supplies a trigger, override the irqflags with + * the trigger flags from the resource. + */ + irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq)); + if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK) + irq_flags = irq_resflags & IRQF_TRIGGER_MASK; ret = smc_request_attrib(pdev, ndev); if (ret) diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 41047c9..959aeea 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2418,9 +2418,9 @@ static int smsc911x_drv_probe(struct platform_device *pdev) struct net_device *dev; struct smsc911x_data *pdata; struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev); - struct resource *res, *irq_res; + struct resource *res; unsigned int intcfg = 0; - int res_size, irq_flags; + int res_size, irq, irq_flags; int retval; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, @@ -2434,8 +2434,8 @@ static int smsc911x_drv_probe(struct platform_device *pdev) } res_size = resource_size(res); - irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!irq_res) { + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { pr_warn("Could not allocate irq resource\n"); retval = -ENODEV; goto out_0; @@ -2455,8 +2455,8 @@ static int smsc911x_drv_probe(struct platform_device *pdev) SET_NETDEV_DEV(dev, &pdev->dev); pdata = netdev_priv(dev); - dev->irq = irq_res->start; - irq_flags = irq_res->flags & IRQF_TRIGGER_MASK; + dev->irq = irq; + irq_flags = irq_get_trigger_type(irq); pdata->ioaddr = ioremap_nocache(res->start, res_size); pdata->dev = dev; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 705bbdf..68aec5c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -23,6 +23,7 @@ *******************************************************************************/ #include <linux/platform_device.h> +#include <linux/module.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_net.h> diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 2bef655..9b7e0a3 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -1765,7 +1765,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); - if (ndev && slave->open) + if (ndev && slave->open && + slave->link_interface != SGMII_LINK_MAC_PHY && + slave->link_interface != XGMII_LINK_MAC_PHY) netif_carrier_on(ndev); } else { writel(mac_control, GBE_REG_ADDR(slave, emac_regs, @@ -1773,7 +1775,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev, cpsw_ale_control_set(gbe_dev->ale, slave->port_num, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); - if (ndev) + if (ndev && + slave->link_interface != SGMII_LINK_MAC_PHY && + slave->link_interface != XGMII_LINK_MAC_PHY) netif_carrier_off(ndev); } diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 690a4c3..af2694d 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -707,8 +707,8 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) cur_p->app0 |= STS_CTRL_APP0_SOP; cur_p->len = skb_headlen(skb); - cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len, - DMA_TO_DEVICE); + cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); cur_p->app4 = (unsigned long)skb; for (ii = 0; ii < num_frag; ii++) { diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index a10b316..41071d3 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -128,7 +128,6 @@ struct ndis_tcp_ip_checksum_info; struct hv_netvsc_packet { /* Bookkeeping stuff */ u32 status; - bool part_of_skb; bool is_data_pkt; bool xmit_more; /* from skb */ @@ -612,6 +611,15 @@ struct multi_send_data { u32 count; /* counter of batched packets */ }; +/* The context of the netvsc device */ +struct net_device_context { + /* point back to our device context */ + struct hv_device *device_ctx; + struct delayed_work dwork; + struct work_struct work; + u32 msg_enable; /* debug level */ +}; + /* Per netvsc device */ struct netvsc_device { struct hv_device *dev; @@ -667,6 +675,9 @@ struct netvsc_device { struct multi_send_data msd[NR_CPUS]; u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ u32 pkt_align; /* alignment bytes, e.g. 8 */ + + /* The net device context */ + struct net_device_context *nd_ctx; }; /* NdisInitialize message */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 2e8ad06..ea091bc 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -826,7 +826,6 @@ int netvsc_send(struct hv_device *device, u16 q_idx = packet->q_idx; u32 pktlen = packet->total_data_buflen, msd_len = 0; unsigned int section_index = NETVSC_INVALID_INDEX; - struct sk_buff *skb = NULL; unsigned long flag; struct multi_send_data *msdp; struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL; @@ -889,11 +888,6 @@ int netvsc_send(struct hv_device *device, } else { packet->page_buf_cnt = 0; packet->total_data_buflen += msd_len; - if (!packet->part_of_skb) { - skb = (struct sk_buff *)(unsigned long)packet-> - send_completion_tid; - packet->send_completion_tid = 0; - } } if (msdp->pkt) @@ -929,12 +923,8 @@ int netvsc_send(struct hv_device *device, if (cur_send) ret = netvsc_send_pkt(cur_send, net_device); - if (ret != 0) { - if (section_index != NETVSC_INVALID_INDEX) - netvsc_free_send_slot(net_device, section_index); - } else if (skb) { - dev_kfree_skb_any(skb); - } + if (ret != 0 && section_index != NETVSC_INVALID_INDEX) + netvsc_free_send_slot(net_device, section_index); return ret; } @@ -1197,6 +1187,9 @@ int netvsc_device_add(struct hv_device *device, void *additional_info) */ ndev = net_device->ndev; + /* Add netvsc_device context to netvsc_device */ + net_device->nd_ctx = netdev_priv(ndev); + /* Initialize the NetVSC channel extension */ init_completion(&net_device->channel_init_wait); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index a3a9d38..5993c7e 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -40,18 +40,21 @@ #include "hyperv_net.h" -struct net_device_context { - /* point back to our device context */ - struct hv_device *device_ctx; - struct delayed_work dwork; - struct work_struct work; -}; #define RING_SIZE_MIN 64 static int ring_size = 128; module_param(ring_size, int, S_IRUGO); MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); +static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFUP | + NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | + NETIF_MSG_TX_ERR; + +static int debug = -1; +module_param(debug, int, S_IRUGO); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + static void do_set_multicast(struct work_struct *w) { struct net_device_context *ndevctx = @@ -235,9 +238,6 @@ void netvsc_xmit_completion(void *context) struct sk_buff *skb = (struct sk_buff *) (unsigned long)packet->send_completion_tid; - if (!packet->part_of_skb) - kfree(packet); - if (skb) dev_kfree_skb_any(skb); } @@ -389,7 +389,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) u32 net_trans_info; u32 hash; u32 skb_length; - u32 head_room; u32 pkt_sz; struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; @@ -402,7 +401,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) check_size: skb_length = skb->len; - head_room = skb_headroom(skb); num_data_pgs = netvsc_get_slots(skb) + 2; if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) { net_alert_ratelimited("packet too big: %u pages (%u bytes)\n", @@ -421,20 +419,14 @@ check_size: pkt_sz = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE; - if (head_room < pkt_sz) { - packet = kmalloc(pkt_sz, GFP_ATOMIC); - if (!packet) { - /* out of memory, drop packet */ - netdev_err(net, "unable to alloc hv_netvsc_packet\n"); - ret = -ENOMEM; - goto drop; - } - packet->part_of_skb = false; - } else { - /* Use the headroom for building up the packet */ - packet = (struct hv_netvsc_packet *)skb->head; - packet->part_of_skb = true; + ret = skb_cow_head(skb, pkt_sz); + if (ret) { + netdev_err(net, "unable to alloc hv_netvsc_packet\n"); + ret = -ENOMEM; + goto drop; } + /* Use the headroom for building up the packet */ + packet = (struct hv_netvsc_packet *)skb->head; packet->status = 0; packet->xmit_more = skb->xmit_more; @@ -591,8 +583,6 @@ drop: net->stats.tx_bytes += skb_length; net->stats.tx_packets++; } else { - if (packet && !packet->part_of_skb) - kfree(packet); if (ret != -EAGAIN) { dev_kfree_skb_any(skb); net->stats.tx_dropped++; @@ -888,6 +878,11 @@ static int netvsc_probe(struct hv_device *dev, net_device_ctx = netdev_priv(net); net_device_ctx->device_ctx = dev; + net_device_ctx->msg_enable = netif_msg_init(debug, default_msg); + if (netif_msg_probe(net_device_ctx)) + netdev_dbg(net, "netvsc msg_enable: %d\n", + net_device_ctx->msg_enable); + hv_set_drvdata(dev, net); INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); INIT_WORK(&net_device_ctx->work, do_set_multicast); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 0d92efe..9118cea 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -429,7 +429,8 @@ int rndis_filter_receive(struct hv_device *dev, rndis_msg = pkt->data; - dump_rndis_message(dev, rndis_msg); + if (netif_msg_rx_err(net_dev->nd_ctx)) + dump_rndis_message(dev, rndis_msg); switch (rndis_msg->ndis_msg_type) { case RNDIS_MSG_PACKET: diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 3802665..67d00fb 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -85,6 +85,7 @@ struct at86rf230_local { struct ieee802154_hw *hw; struct at86rf2xx_chip_data *data; struct regmap *regmap; + int slp_tr; struct completion state_complete; struct at86rf230_state_change state; @@ -95,163 +96,164 @@ struct at86rf230_local { unsigned long cal_timeout; s8 max_frame_retries; bool is_tx; + bool is_tx_from_off; u8 tx_retry; struct sk_buff *tx_skb; struct at86rf230_state_change tx; }; -#define RG_TRX_STATUS (0x01) -#define SR_TRX_STATUS 0x01, 0x1f, 0 -#define SR_RESERVED_01_3 0x01, 0x20, 5 -#define SR_CCA_STATUS 0x01, 0x40, 6 -#define SR_CCA_DONE 0x01, 0x80, 7 -#define RG_TRX_STATE (0x02) -#define SR_TRX_CMD 0x02, 0x1f, 0 -#define SR_TRAC_STATUS 0x02, 0xe0, 5 -#define RG_TRX_CTRL_0 (0x03) -#define SR_CLKM_CTRL 0x03, 0x07, 0 -#define SR_CLKM_SHA_SEL 0x03, 0x08, 3 -#define SR_PAD_IO_CLKM 0x03, 0x30, 4 -#define SR_PAD_IO 0x03, 0xc0, 6 -#define RG_TRX_CTRL_1 (0x04) -#define SR_IRQ_POLARITY 0x04, 0x01, 0 -#define SR_IRQ_MASK_MODE 0x04, 0x02, 1 -#define SR_SPI_CMD_MODE 0x04, 0x0c, 2 -#define SR_RX_BL_CTRL 0x04, 0x10, 4 -#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5 -#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6 -#define SR_PA_EXT_EN 0x04, 0x80, 7 -#define RG_PHY_TX_PWR (0x05) -#define SR_TX_PWR 0x05, 0x0f, 0 -#define SR_PA_LT 0x05, 0x30, 4 -#define SR_PA_BUF_LT 0x05, 0xc0, 6 -#define RG_PHY_RSSI (0x06) -#define SR_RSSI 0x06, 0x1f, 0 -#define SR_RND_VALUE 0x06, 0x60, 5 -#define SR_RX_CRC_VALID 0x06, 0x80, 7 -#define RG_PHY_ED_LEVEL (0x07) -#define SR_ED_LEVEL 0x07, 0xff, 0 -#define RG_PHY_CC_CCA (0x08) -#define SR_CHANNEL 0x08, 0x1f, 0 -#define SR_CCA_MODE 0x08, 0x60, 5 -#define SR_CCA_REQUEST 0x08, 0x80, 7 -#define RG_CCA_THRES (0x09) -#define SR_CCA_ED_THRES 0x09, 0x0f, 0 -#define SR_RESERVED_09_1 0x09, 0xf0, 4 -#define RG_RX_CTRL (0x0a) -#define SR_PDT_THRES 0x0a, 0x0f, 0 -#define SR_RESERVED_0a_1 0x0a, 0xf0, 4 -#define RG_SFD_VALUE (0x0b) -#define SR_SFD_VALUE 0x0b, 0xff, 0 -#define RG_TRX_CTRL_2 (0x0c) -#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0 -#define SR_SUB_MODE 0x0c, 0x04, 2 -#define SR_BPSK_QPSK 0x0c, 0x08, 3 -#define SR_OQPSK_SUB1_RC_EN 0x0c, 0x10, 4 -#define SR_RESERVED_0c_5 0x0c, 0x60, 5 -#define SR_RX_SAFE_MODE 0x0c, 0x80, 7 -#define RG_ANT_DIV (0x0d) -#define SR_ANT_CTRL 0x0d, 0x03, 0 -#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2 -#define SR_ANT_DIV_EN 0x0d, 0x08, 3 -#define SR_RESERVED_0d_2 0x0d, 0x70, 4 -#define SR_ANT_SEL 0x0d, 0x80, 7 -#define RG_IRQ_MASK (0x0e) -#define SR_IRQ_MASK 0x0e, 0xff, 0 -#define RG_IRQ_STATUS (0x0f) -#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0 -#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1 -#define SR_IRQ_2_RX_START 0x0f, 0x04, 2 -#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3 -#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4 -#define SR_IRQ_5_AMI 0x0f, 0x20, 5 -#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6 -#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7 -#define RG_VREG_CTRL (0x10) -#define SR_RESERVED_10_6 0x10, 0x03, 0 -#define SR_DVDD_OK 0x10, 0x04, 2 -#define SR_DVREG_EXT 0x10, 0x08, 3 -#define SR_RESERVED_10_3 0x10, 0x30, 4 -#define SR_AVDD_OK 0x10, 0x40, 6 -#define SR_AVREG_EXT 0x10, 0x80, 7 -#define RG_BATMON (0x11) -#define SR_BATMON_VTH 0x11, 0x0f, 0 -#define SR_BATMON_HR 0x11, 0x10, 4 -#define SR_BATMON_OK 0x11, 0x20, 5 -#define SR_RESERVED_11_1 0x11, 0xc0, 6 -#define RG_XOSC_CTRL (0x12) -#define SR_XTAL_TRIM 0x12, 0x0f, 0 -#define SR_XTAL_MODE 0x12, 0xf0, 4 -#define RG_RX_SYN (0x15) -#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0 -#define SR_RESERVED_15_2 0x15, 0x70, 4 -#define SR_RX_PDT_DIS 0x15, 0x80, 7 -#define RG_XAH_CTRL_1 (0x17) -#define SR_RESERVED_17_8 0x17, 0x01, 0 -#define SR_AACK_PROM_MODE 0x17, 0x02, 1 -#define SR_AACK_ACK_TIME 0x17, 0x04, 2 -#define SR_RESERVED_17_5 0x17, 0x08, 3 -#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4 -#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5 -#define SR_CSMA_LBT_MODE 0x17, 0x40, 6 -#define SR_RESERVED_17_1 0x17, 0x80, 7 -#define RG_FTN_CTRL (0x18) -#define SR_RESERVED_18_2 0x18, 0x7f, 0 -#define SR_FTN_START 0x18, 0x80, 7 -#define RG_PLL_CF (0x1a) -#define SR_RESERVED_1a_2 0x1a, 0x7f, 0 -#define SR_PLL_CF_START 0x1a, 0x80, 7 -#define RG_PLL_DCU (0x1b) -#define SR_RESERVED_1b_3 0x1b, 0x3f, 0 -#define SR_RESERVED_1b_2 0x1b, 0x40, 6 -#define SR_PLL_DCU_START 0x1b, 0x80, 7 -#define RG_PART_NUM (0x1c) -#define SR_PART_NUM 0x1c, 0xff, 0 -#define RG_VERSION_NUM (0x1d) -#define SR_VERSION_NUM 0x1d, 0xff, 0 -#define RG_MAN_ID_0 (0x1e) -#define SR_MAN_ID_0 0x1e, 0xff, 0 -#define RG_MAN_ID_1 (0x1f) -#define SR_MAN_ID_1 0x1f, 0xff, 0 -#define RG_SHORT_ADDR_0 (0x20) -#define SR_SHORT_ADDR_0 0x20, 0xff, 0 -#define RG_SHORT_ADDR_1 (0x21) -#define SR_SHORT_ADDR_1 0x21, 0xff, 0 -#define RG_PAN_ID_0 (0x22) -#define SR_PAN_ID_0 0x22, 0xff, 0 -#define RG_PAN_ID_1 (0x23) -#define SR_PAN_ID_1 0x23, 0xff, 0 -#define RG_IEEE_ADDR_0 (0x24) -#define SR_IEEE_ADDR_0 0x24, 0xff, 0 -#define RG_IEEE_ADDR_1 (0x25) -#define SR_IEEE_ADDR_1 0x25, 0xff, 0 -#define RG_IEEE_ADDR_2 (0x26) -#define SR_IEEE_ADDR_2 0x26, 0xff, 0 -#define RG_IEEE_ADDR_3 (0x27) -#define SR_IEEE_ADDR_3 0x27, 0xff, 0 -#define RG_IEEE_ADDR_4 (0x28) -#define SR_IEEE_ADDR_4 0x28, 0xff, 0 -#define RG_IEEE_ADDR_5 (0x29) -#define SR_IEEE_ADDR_5 0x29, 0xff, 0 -#define RG_IEEE_ADDR_6 (0x2a) -#define SR_IEEE_ADDR_6 0x2a, 0xff, 0 -#define RG_IEEE_ADDR_7 (0x2b) -#define SR_IEEE_ADDR_7 0x2b, 0xff, 0 -#define RG_XAH_CTRL_0 (0x2c) -#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0 -#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1 -#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4 -#define RG_CSMA_SEED_0 (0x2d) -#define SR_CSMA_SEED_0 0x2d, 0xff, 0 -#define RG_CSMA_SEED_1 (0x2e) -#define SR_CSMA_SEED_1 0x2e, 0x07, 0 -#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3 -#define SR_AACK_DIS_ACK 0x2e, 0x10, 4 -#define SR_AACK_SET_PD 0x2e, 0x20, 5 -#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6 -#define RG_CSMA_BE (0x2f) -#define SR_MIN_BE 0x2f, 0x0f, 0 -#define SR_MAX_BE 0x2f, 0xf0, 4 +#define RG_TRX_STATUS (0x01) +#define SR_TRX_STATUS 0x01, 0x1f, 0 +#define SR_RESERVED_01_3 0x01, 0x20, 5 +#define SR_CCA_STATUS 0x01, 0x40, 6 +#define SR_CCA_DONE 0x01, 0x80, 7 +#define RG_TRX_STATE (0x02) +#define SR_TRX_CMD 0x02, 0x1f, 0 +#define SR_TRAC_STATUS 0x02, 0xe0, 5 +#define RG_TRX_CTRL_0 (0x03) +#define SR_CLKM_CTRL 0x03, 0x07, 0 +#define SR_CLKM_SHA_SEL 0x03, 0x08, 3 +#define SR_PAD_IO_CLKM 0x03, 0x30, 4 +#define SR_PAD_IO 0x03, 0xc0, 6 +#define RG_TRX_CTRL_1 (0x04) +#define SR_IRQ_POLARITY 0x04, 0x01, 0 +#define SR_IRQ_MASK_MODE 0x04, 0x02, 1 +#define SR_SPI_CMD_MODE 0x04, 0x0c, 2 +#define SR_RX_BL_CTRL 0x04, 0x10, 4 +#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5 +#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6 +#define SR_PA_EXT_EN 0x04, 0x80, 7 +#define RG_PHY_TX_PWR (0x05) +#define SR_TX_PWR 0x05, 0x0f, 0 +#define SR_PA_LT 0x05, 0x30, 4 +#define SR_PA_BUF_LT 0x05, 0xc0, 6 +#define RG_PHY_RSSI (0x06) +#define SR_RSSI 0x06, 0x1f, 0 +#define SR_RND_VALUE 0x06, 0x60, 5 +#define SR_RX_CRC_VALID 0x06, 0x80, 7 +#define RG_PHY_ED_LEVEL (0x07) +#define SR_ED_LEVEL 0x07, 0xff, 0 +#define RG_PHY_CC_CCA (0x08) +#define SR_CHANNEL 0x08, 0x1f, 0 +#define SR_CCA_MODE 0x08, 0x60, 5 +#define SR_CCA_REQUEST 0x08, 0x80, 7 +#define RG_CCA_THRES (0x09) +#define SR_CCA_ED_THRES 0x09, 0x0f, 0 +#define SR_RESERVED_09_1 0x09, 0xf0, 4 +#define RG_RX_CTRL (0x0a) +#define SR_PDT_THRES 0x0a, 0x0f, 0 +#define SR_RESERVED_0a_1 0x0a, 0xf0, 4 +#define RG_SFD_VALUE (0x0b) +#define SR_SFD_VALUE 0x0b, 0xff, 0 +#define RG_TRX_CTRL_2 (0x0c) +#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0 +#define SR_SUB_MODE 0x0c, 0x04, 2 +#define SR_BPSK_QPSK 0x0c, 0x08, 3 +#define SR_OQPSK_SUB1_RC_EN 0x0c, 0x10, 4 +#define SR_RESERVED_0c_5 0x0c, 0x60, 5 +#define SR_RX_SAFE_MODE 0x0c, 0x80, 7 +#define RG_ANT_DIV (0x0d) +#define SR_ANT_CTRL 0x0d, 0x03, 0 +#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2 +#define SR_ANT_DIV_EN 0x0d, 0x08, 3 +#define SR_RESERVED_0d_2 0x0d, 0x70, 4 +#define SR_ANT_SEL 0x0d, 0x80, 7 +#define RG_IRQ_MASK (0x0e) +#define SR_IRQ_MASK 0x0e, 0xff, 0 +#define RG_IRQ_STATUS (0x0f) +#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0 +#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1 +#define SR_IRQ_2_RX_START 0x0f, 0x04, 2 +#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3 +#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4 +#define SR_IRQ_5_AMI 0x0f, 0x20, 5 +#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6 +#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7 +#define RG_VREG_CTRL (0x10) +#define SR_RESERVED_10_6 0x10, 0x03, 0 +#define SR_DVDD_OK 0x10, 0x04, 2 +#define SR_DVREG_EXT 0x10, 0x08, 3 +#define SR_RESERVED_10_3 0x10, 0x30, 4 +#define SR_AVDD_OK 0x10, 0x40, 6 +#define SR_AVREG_EXT 0x10, 0x80, 7 +#define RG_BATMON (0x11) +#define SR_BATMON_VTH 0x11, 0x0f, 0 +#define SR_BATMON_HR 0x11, 0x10, 4 +#define SR_BATMON_OK 0x11, 0x20, 5 +#define SR_RESERVED_11_1 0x11, 0xc0, 6 +#define RG_XOSC_CTRL (0x12) +#define SR_XTAL_TRIM 0x12, 0x0f, 0 +#define SR_XTAL_MODE 0x12, 0xf0, 4 +#define RG_RX_SYN (0x15) +#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0 +#define SR_RESERVED_15_2 0x15, 0x70, 4 +#define SR_RX_PDT_DIS 0x15, 0x80, 7 +#define RG_XAH_CTRL_1 (0x17) +#define SR_RESERVED_17_8 0x17, 0x01, 0 +#define SR_AACK_PROM_MODE 0x17, 0x02, 1 +#define SR_AACK_ACK_TIME 0x17, 0x04, 2 +#define SR_RESERVED_17_5 0x17, 0x08, 3 +#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4 +#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5 +#define SR_CSMA_LBT_MODE 0x17, 0x40, 6 +#define SR_RESERVED_17_1 0x17, 0x80, 7 +#define RG_FTN_CTRL (0x18) +#define SR_RESERVED_18_2 0x18, 0x7f, 0 +#define SR_FTN_START 0x18, 0x80, 7 +#define RG_PLL_CF (0x1a) +#define SR_RESERVED_1a_2 0x1a, 0x7f, 0 +#define SR_PLL_CF_START 0x1a, 0x80, 7 +#define RG_PLL_DCU (0x1b) +#define SR_RESERVED_1b_3 0x1b, 0x3f, 0 +#define SR_RESERVED_1b_2 0x1b, 0x40, 6 +#define SR_PLL_DCU_START 0x1b, 0x80, 7 +#define RG_PART_NUM (0x1c) +#define SR_PART_NUM 0x1c, 0xff, 0 +#define RG_VERSION_NUM (0x1d) +#define SR_VERSION_NUM 0x1d, 0xff, 0 +#define RG_MAN_ID_0 (0x1e) +#define SR_MAN_ID_0 0x1e, 0xff, 0 +#define RG_MAN_ID_1 (0x1f) +#define SR_MAN_ID_1 0x1f, 0xff, 0 +#define RG_SHORT_ADDR_0 (0x20) +#define SR_SHORT_ADDR_0 0x20, 0xff, 0 +#define RG_SHORT_ADDR_1 (0x21) +#define SR_SHORT_ADDR_1 0x21, 0xff, 0 +#define RG_PAN_ID_0 (0x22) +#define SR_PAN_ID_0 0x22, 0xff, 0 +#define RG_PAN_ID_1 (0x23) +#define SR_PAN_ID_1 0x23, 0xff, 0 +#define RG_IEEE_ADDR_0 (0x24) +#define SR_IEEE_ADDR_0 0x24, 0xff, 0 +#define RG_IEEE_ADDR_1 (0x25) +#define SR_IEEE_ADDR_1 0x25, 0xff, 0 +#define RG_IEEE_ADDR_2 (0x26) +#define SR_IEEE_ADDR_2 0x26, 0xff, 0 +#define RG_IEEE_ADDR_3 (0x27) +#define SR_IEEE_ADDR_3 0x27, 0xff, 0 +#define RG_IEEE_ADDR_4 (0x28) +#define SR_IEEE_ADDR_4 0x28, 0xff, 0 +#define RG_IEEE_ADDR_5 (0x29) +#define SR_IEEE_ADDR_5 0x29, 0xff, 0 +#define RG_IEEE_ADDR_6 (0x2a) +#define SR_IEEE_ADDR_6 0x2a, 0xff, 0 +#define RG_IEEE_ADDR_7 (0x2b) +#define SR_IEEE_ADDR_7 0x2b, 0xff, 0 +#define RG_XAH_CTRL_0 (0x2c) +#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0 +#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1 +#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4 +#define RG_CSMA_SEED_0 (0x2d) +#define SR_CSMA_SEED_0 0x2d, 0xff, 0 +#define RG_CSMA_SEED_1 (0x2e) +#define SR_CSMA_SEED_1 0x2e, 0x07, 0 +#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3 +#define SR_AACK_DIS_ACK 0x2e, 0x10, 4 +#define SR_AACK_SET_PD 0x2e, 0x20, 5 +#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6 +#define RG_CSMA_BE (0x2f) +#define SR_MIN_BE 0x2f, 0x0f, 0 +#define SR_MAX_BE 0x2f, 0xf0, 4 #define CMD_REG 0x80 #define CMD_REG_MASK 0x3f @@ -292,6 +294,8 @@ struct at86rf230_local { #define STATE_BUSY_RX_AACK_NOCLK 0x1E #define STATE_TRANSITION_IN_PROGRESS 0x1F +#define TRX_STATE_MASK (0x1F) + #define AT86RF2XX_NUMREGS 0x3F static void @@ -336,6 +340,14 @@ at86rf230_write_subreg(struct at86rf230_local *lp, return regmap_update_bits(lp->regmap, addr, mask, data << shift); } +static inline void +at86rf230_slp_tr_rising_edge(struct at86rf230_local *lp) +{ + gpio_set_value(lp->slp_tr, 1); + udelay(1); + gpio_set_value(lp->slp_tr, 0); +} + static bool at86rf230_reg_writeable(struct device *dev, unsigned int reg) { @@ -509,7 +521,7 @@ at86rf230_async_state_assert(void *context) struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; const u8 *buf = ctx->buf; - const u8 trx_state = buf[1] & 0x1f; + const u8 trx_state = buf[1] & TRX_STATE_MASK; /* Assert state change */ if (trx_state != ctx->to_state) { @@ -609,11 +621,17 @@ at86rf230_async_state_delay(void *context) switch (ctx->to_state) { case STATE_RX_AACK_ON: tim = ktime_set(0, c->t_off_to_aack * NSEC_PER_USEC); + /* state change from TRX_OFF to RX_AACK_ON to do a + * calibration, we need to reset the timeout for the + * next one. + */ + lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT; goto change; + case STATE_TX_ARET_ON: case STATE_TX_ON: tim = ktime_set(0, c->t_off_to_tx_on * NSEC_PER_USEC); - /* state change from TRX_OFF to TX_ON to do a - * calibration, we need to reset the timeout for the + /* state change from TRX_OFF to TX_ON or ARET_ON to do + * a calibration, we need to reset the timeout for the * next one. */ lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT; @@ -667,7 +685,7 @@ at86rf230_async_state_change_start(void *context) struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; u8 *buf = ctx->buf; - const u8 trx_state = buf[1] & 0x1f; + const u8 trx_state = buf[1] & TRX_STATE_MASK; int rc; /* Check for "possible" STATE_TRANSITION_IN_PROGRESS */ @@ -773,16 +791,6 @@ at86rf230_tx_on(void *context) } static void -at86rf230_tx_trac_error(void *context) -{ - struct at86rf230_state_change *ctx = context; - struct at86rf230_local *lp = ctx->lp; - - at86rf230_async_state_change(lp, ctx, STATE_TX_ON, - at86rf230_tx_on, true); -} - -static void at86rf230_tx_trac_check(void *context) { struct at86rf230_state_change *ctx = context; @@ -791,12 +799,12 @@ at86rf230_tx_trac_check(void *context) const u8 trac = (buf[1] & 0xe0) >> 5; /* If trac status is different than zero we need to do a state change - * to STATE_FORCE_TRX_OFF then STATE_TX_ON to recover the transceiver - * state to TX_ON. + * to STATE_FORCE_TRX_OFF then STATE_RX_AACK_ON to recover the + * transceiver. */ if (trac) at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF, - at86rf230_tx_trac_error, true); + at86rf230_tx_on, true); else at86rf230_tx_on(context); } @@ -941,13 +949,18 @@ at86rf230_write_frame_complete(void *context) u8 *buf = ctx->buf; int rc; - buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE; - buf[1] = STATE_BUSY_TX; ctx->trx.len = 2; - ctx->msg.complete = NULL; - rc = spi_async(lp->spi, &ctx->msg); - if (rc) - at86rf230_async_error(lp, ctx, rc); + + if (gpio_is_valid(lp->slp_tr)) { + at86rf230_slp_tr_rising_edge(lp); + } else { + buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE; + buf[1] = STATE_BUSY_TX; + ctx->msg.complete = NULL; + rc = spi_async(lp->spi, &ctx->msg); + if (rc) + at86rf230_async_error(lp, ctx, rc); + } } static void @@ -993,12 +1006,21 @@ at86rf230_xmit_start(void *context) * are in STATE_TX_ON. The pfad differs here, so we change * the complete handler. */ - if (lp->tx_aret) - at86rf230_async_state_change(lp, ctx, STATE_TX_ON, - at86rf230_xmit_tx_on, false); - else + if (lp->tx_aret) { + if (lp->is_tx_from_off) { + lp->is_tx_from_off = false; + at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON, + at86rf230_xmit_tx_on, + false); + } else { + at86rf230_async_state_change(lp, ctx, STATE_TX_ON, + at86rf230_xmit_tx_on, + false); + } + } else { at86rf230_async_state_change(lp, ctx, STATE_TX_ON, at86rf230_write_frame, false); + } } static int @@ -1017,11 +1039,13 @@ at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) * to TX_ON, the lp->cal_timeout should be reinit by state_delay * function then to start in the next 5 minutes. */ - if (time_is_before_jiffies(lp->cal_timeout)) + if (time_is_before_jiffies(lp->cal_timeout)) { + lp->is_tx_from_off = true; at86rf230_async_state_change(lp, ctx, STATE_TRX_OFF, at86rf230_xmit_start, false); - else + } else { at86rf230_xmit_start(ctx); + } return 0; } @@ -1037,9 +1061,6 @@ at86rf230_ed(struct ieee802154_hw *hw, u8 *level) static int at86rf230_start(struct ieee802154_hw *hw) { - struct at86rf230_local *lp = hw->priv; - - lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT; return at86rf230_sync_state_change(hw->priv, STATE_RX_AACK_ON); } @@ -1673,6 +1694,7 @@ static int at86rf230_probe(struct spi_device *spi) lp = hw->priv; lp->hw = hw; lp->spi = spi; + lp->slp_tr = slp_tr; hw->parent = &spi->dev; hw->vif_data_size = sizeof(*lp); ieee802154_random_extended_addr(&hw->phy->perm_extended_addr); diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index b227a13..9f59f17 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -599,10 +599,18 @@ static int macvlan_open(struct net_device *dev) goto del_unicast; } + if (dev->flags & IFF_PROMISC) { + err = dev_set_promiscuity(lowerdev, 1); + if (err < 0) + goto clear_multi; + } + hash_add: macvlan_hash_add(vlan); return 0; +clear_multi: + dev_set_allmulti(lowerdev, -1); del_unicast: dev_uc_del(lowerdev, dev->dev_addr); out: @@ -638,6 +646,9 @@ static int macvlan_stop(struct net_device *dev) if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(lowerdev, -1); + if (dev->flags & IFF_PROMISC) + dev_set_promiscuity(lowerdev, -1); + dev_uc_del(lowerdev, dev->dev_addr); hash_del: @@ -696,6 +707,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change) if (dev->flags & IFF_UP) { if (change & IFF_ALLMULTI) dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); + if (change & IFF_PROMISC) + dev_set_promiscuity(lowerdev, + dev->flags & IFF_PROMISC ? 1 : -1); + } } diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 8fadaa1..70641d2 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -27,6 +27,7 @@ config AMD_PHY config AMD_XGBE_PHY tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs" depends on (OF || ACPI) && HAS_IOMEM + depends on ARM64 || COMPILE_TEST ---help--- Currently supports the AMD 10GbE PHY diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 49ce7ec..53d1815 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -80,7 +80,8 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) * assume the pin serves as pull-up. If direction is * output, the default value is high. */ - gpio_set_value(bitbang->mdo, 1 ^ bitbang->mdo_active_low); + gpio_set_value_cansleep(bitbang->mdo, + 1 ^ bitbang->mdo_active_low); return; } @@ -96,7 +97,8 @@ static int mdio_get(struct mdiobb_ctrl *ctrl) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - return gpio_get_value(bitbang->mdio) ^ bitbang->mdio_active_low; + return gpio_get_value_cansleep(bitbang->mdio) ^ + bitbang->mdio_active_low; } static void mdio_set(struct mdiobb_ctrl *ctrl, int what) @@ -105,9 +107,11 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what) container_of(ctrl, struct mdio_gpio_info, ctrl); if (bitbang->mdo) - gpio_set_value(bitbang->mdo, what ^ bitbang->mdo_active_low); + gpio_set_value_cansleep(bitbang->mdo, + what ^ bitbang->mdo_active_low); else - gpio_set_value(bitbang->mdio, what ^ bitbang->mdio_active_low); + gpio_set_value_cansleep(bitbang->mdio, + what ^ bitbang->mdio_active_low); } static void mdc_set(struct mdiobb_ctrl *ctrl, int what) @@ -115,7 +119,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - gpio_set_value(bitbang->mdc, what ^ bitbang->mdc_active_low); + gpio_set_value_cansleep(bitbang->mdc, what ^ bitbang->mdc_active_low); } static struct mdiobb_ops mdio_gpio_ops = { @@ -164,7 +168,10 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, if (!new_bus->irq[i]) new_bus->irq[i] = PHY_POLL; - snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id); + if (bus_id != -1) + snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id); + else + strncpy(new_bus->id, "gpio", MII_BUS_ID_SIZE); if (devm_gpio_request(dev, bitbang->mdc, "mdc")) goto out_free_bus; diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c index 1a87a58..66edd99 100644 --- a/drivers/net/phy/mdio-mux-gpio.c +++ b/drivers/net/phy/mdio-mux-gpio.c @@ -12,33 +12,30 @@ #include <linux/module.h> #include <linux/phy.h> #include <linux/mdio-mux.h> -#include <linux/of_gpio.h> +#include <linux/gpio/consumer.h> #define DRV_VERSION "1.1" #define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver" -#define MDIO_MUX_GPIO_MAX_BITS 8 - struct mdio_mux_gpio_state { - struct gpio_desc *gpio[MDIO_MUX_GPIO_MAX_BITS]; - unsigned int num_gpios; + struct gpio_descs *gpios; void *mux_handle; }; static int mdio_mux_gpio_switch_fn(int current_child, int desired_child, void *data) { - int values[MDIO_MUX_GPIO_MAX_BITS]; - unsigned int n; struct mdio_mux_gpio_state *s = data; + int values[s->gpios->ndescs]; + unsigned int n; if (current_child == desired_child) return 0; - for (n = 0; n < s->num_gpios; n++) { + for (n = 0; n < s->gpios->ndescs; n++) values[n] = (desired_child >> n) & 1; - } - gpiod_set_array_cansleep(s->num_gpios, s->gpio, values); + + gpiod_set_array_cansleep(s->gpios->ndescs, s->gpios->desc, values); return 0; } @@ -46,56 +43,33 @@ static int mdio_mux_gpio_switch_fn(int current_child, int desired_child, static int mdio_mux_gpio_probe(struct platform_device *pdev) { struct mdio_mux_gpio_state *s; - int num_gpios; - unsigned int n; int r; - if (!pdev->dev.of_node) - return -ENODEV; - - num_gpios = of_gpio_count(pdev->dev.of_node); - if (num_gpios <= 0 || num_gpios > MDIO_MUX_GPIO_MAX_BITS) - return -ENODEV; - s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; - s->num_gpios = num_gpios; - - for (n = 0; n < num_gpios; ) { - struct gpio_desc *gpio = gpiod_get_index(&pdev->dev, NULL, n, - GPIOD_OUT_LOW); - if (IS_ERR(gpio)) { - r = PTR_ERR(gpio); - goto err; - } - s->gpio[n] = gpio; - n++; - } + s->gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW); + if (IS_ERR(s->gpios)) + return PTR_ERR(s->gpios); r = mdio_mux_init(&pdev->dev, mdio_mux_gpio_switch_fn, &s->mux_handle, s); - if (r == 0) { - pdev->dev.platform_data = s; - return 0; - } -err: - while (n) { - n--; - gpiod_put(s->gpio[n]); + if (r != 0) { + gpiod_put_array(s->gpios); + return r; } - return r; + + pdev->dev.platform_data = s; + return 0; } static int mdio_mux_gpio_remove(struct platform_device *pdev) { - unsigned int n; struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev); mdio_mux_uninit(s->mux_handle); - for (n = 0; n < s->num_gpios; n++) - gpiod_put(s->gpio[n]); + gpiod_put_array(s->gpios); return 0; } diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 1190fd8..ebdc357 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -548,7 +548,8 @@ static int kszphy_probe(struct phy_device *phydev) } clk = devm_clk_get(&phydev->dev, "rmii-ref"); - if (!IS_ERR(clk)) { + /* NOTE: clk may be NULL if building without CONFIG_HAVE_CLK */ + if (!IS_ERR_OR_NULL(clk)) { unsigned long rate = clk_get_rate(clk); bool rmii_ref_clk_sel_25_mhz; diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c index 911b216..05005c6 100644 --- a/drivers/net/ppp/ppp_mppe.c +++ b/drivers/net/ppp/ppp_mppe.c @@ -478,7 +478,6 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, struct blkcipher_desc desc = { .tfm = state->arc4 }; unsigned ccount; int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; - int sanity = 0; struct scatterlist sg_in[1], sg_out[1]; if (isize <= PPP_HDRLEN + MPPE_OVHD) { @@ -514,31 +513,19 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, "mppe_decompress[%d]: ENCRYPTED bit not set!\n", state->unit); state->sanity_errors += 100; - sanity = 1; + goto sanity_error; } if (!state->stateful && !flushed) { printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set in " "stateless mode!\n", state->unit); state->sanity_errors += 100; - sanity = 1; + goto sanity_error; } if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) { printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set on " "flag packet!\n", state->unit); state->sanity_errors += 100; - sanity = 1; - } - - if (sanity) { - if (state->sanity_errors < SANITY_MAX) - return DECOMP_ERROR; - else - /* - * Take LCP down if the peer is sending too many bogons. - * We don't want to do this for a single or just a few - * instances since it could just be due to packet corruption. - */ - return DECOMP_FATALERROR; + goto sanity_error; } /* @@ -546,6 +533,13 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, */ if (!state->stateful) { + /* Discard late packet */ + if ((ccount - state->ccount) % MPPE_CCOUNT_SPACE + > MPPE_CCOUNT_SPACE / 2) { + state->sanity_errors++; + goto sanity_error; + } + /* RFC 3078, sec 8.1. Rekey for every packet. */ while (state->ccount != ccount) { mppe_rekey(state, 0); @@ -649,6 +643,16 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, state->sanity_errors >>= 1; return osize; + +sanity_error: + if (state->sanity_errors < SANITY_MAX) + return DECOMP_ERROR; + else + /* Take LCP down if the peer is sending too many bogons. + * We don't want to do this for a single or just a few + * instances since it could just be due to packet corruption. + */ + return DECOMP_FATALERROR; } /* diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index aa1dd92..b62a5e3 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -465,6 +465,10 @@ static void pppoe_unbind_sock_work(struct work_struct *work) struct sock *sk = sk_pppox(po); lock_sock(sk); + if (po->pppoe_dev) { + dev_put(po->pppoe_dev); + po->pppoe_dev = NULL; + } pppox_unbind_sock(sk); release_sock(sk); sock_put(sk); diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index ac4d03b..aafa1a1 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -4116,6 +4116,7 @@ static struct usb_device_id rtl8152_table[] = { {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, {} }; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 733f4fe..3c86b10 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1285,7 +1285,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, struct net_device *net) { struct usbnet *dev = netdev_priv(net); - int length; + unsigned int length; struct urb *urb = NULL; struct skb_data *entry; struct driver_info *info = dev->driver_info; @@ -1413,7 +1413,7 @@ not_drop: } } else netif_dbg(dev, tx_queued, dev->net, - "> tx, len %d, type 0x%x\n", length, skb->protocol); + "> tx, len %u, type 0x%x\n", length, skb->protocol); #ifdef CONFIG_PM deferred: #endif diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 154116a..27a5f95 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -730,12 +730,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, /* Only change unicasts */ if (!(is_multicast_ether_addr(f->eth_addr) || is_zero_ether_addr(f->eth_addr))) { - int rc = vxlan_fdb_replace(f, ip, port, vni, + notify |= vxlan_fdb_replace(f, ip, port, vni, ifindex); - - if (rc < 0) - return rc; - notify |= rc; } else return -EOPNOTSUPP; } diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 0acd079..3ad79bb 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1103,28 +1103,14 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf, struct sk_buff *skb; struct ath_frame_info *fi; struct ieee80211_tx_info *info; - struct ieee80211_vif *vif; struct ath_hw *ah = sc->sc_ah; if (sc->tx99_state || !ah->tpc_enabled) return MAX_RATE_POWER; skb = bf->bf_mpdu; - info = IEEE80211_SKB_CB(skb); - vif = info->control.vif; - - if (!vif) { - max_power = sc->cur_chan->cur_txpower; - goto out; - } - - if (vif->bss_conf.txpower_type != NL80211_TX_POWER_LIMITED) { - max_power = min_t(u8, sc->cur_chan->cur_txpower, - 2 * vif->bss_conf.txpower); - goto out; - } - fi = get_frame_info(skb); + info = IEEE80211_SKB_CB(skb); if (!AR_SREV_9300_20_OR_LATER(ah)) { int txpower = fi->tx_power; @@ -1161,25 +1147,26 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf, txpower -= 2; txpower = max(txpower, 0); - max_power = min_t(u8, ah->tx_power[rateidx], - 2 * vif->bss_conf.txpower); - max_power = min_t(u8, max_power, txpower); + max_power = min_t(u8, ah->tx_power[rateidx], txpower); + + /* XXX: clamp minimum TX power at 1 for AR9160 since if + * max_power is set to 0, frames are transmitted at max + * TX power + */ + if (!max_power && !AR_SREV_9280_20_OR_LATER(ah)) + max_power = 1; } else if (!bf->bf_state.bfs_paprd) { if (rateidx < 8 && (info->flags & IEEE80211_TX_CTL_STBC)) max_power = min_t(u8, ah->tx_power_stbc[rateidx], - 2 * vif->bss_conf.txpower); + fi->tx_power); else max_power = min_t(u8, ah->tx_power[rateidx], - 2 * vif->bss_conf.txpower); - max_power = min(max_power, fi->tx_power); + fi->tx_power); } else { max_power = ah->paprd_training_power; } -out: - /* XXX: clamp minimum TX power at 1 for AR9160 since if max_power - * is set to 0, frames are transmitted at max TX power - */ - return (!max_power && !AR_SREV_9280_20_OR_LATER(ah)) ? 1 : max_power; + + return max_power; } static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, @@ -2129,6 +2116,7 @@ static void setup_frame_info(struct ieee80211_hw *hw, struct ath_node *an = NULL; enum ath9k_key_type keytype; bool short_preamble = false; + u8 txpower; /* * We check if Short Preamble is needed for the CTS rate by @@ -2145,6 +2133,16 @@ static void setup_frame_info(struct ieee80211_hw *hw, if (sta) an = (struct ath_node *) sta->drv_priv; + if (tx_info->control.vif) { + struct ieee80211_vif *vif = tx_info->control.vif; + + txpower = 2 * vif->bss_conf.txpower; + } else { + struct ath_softc *sc = hw->priv; + + txpower = sc->cur_chan->cur_txpower; + } + memset(fi, 0, sizeof(*fi)); fi->txq = -1; if (hw_key) @@ -2155,7 +2153,7 @@ static void setup_frame_info(struct ieee80211_hw *hw, fi->keyix = ATH9K_TXKEYIX_INVALID; fi->keytype = keytype; fi->framelen = framelen; - fi->tx_power = MAX_RATE_POWER; + fi->tx_power = txpower; if (!rate) return; diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index bfdf3fa..62db2e5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h @@ -244,6 +244,7 @@ enum iwl_ucode_tlv_flag { * longer than the passive one, which is essential for fragmented scan. * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source. * IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR + * @IWL_UCODE_TLV_API_TX_POWER_DEV: new API for tx power. * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, * regardless of the band or the number of the probes. FW will calculate * the actual dwell time. @@ -260,6 +261,7 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = BIT(9), IWL_UCODE_TLV_API_HDC_PHASE_0 = BIT(10), + IWL_UCODE_TLV_API_TX_POWER_DEV = BIT(11), IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), IWL_UCODE_TLV_API_SCD_CFG = BIT(15), IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16), diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index 6dfed12..56254a8 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -421,8 +421,9 @@ struct iwl_trans_txq_scd_cfg { * * All the handlers MUST be implemented * - * @start_hw: starts the HW- from that point on, the HW can send interrupts - * May sleep + * @start_hw: starts the HW. If low_power is true, the NIC needs to be taken + * out of a low power state. From that point on, the HW can send + * interrupts. May sleep. * @op_mode_leave: Turn off the HW RF kill indication if on * May sleep * @start_fw: allocates and inits all the resources for the transport @@ -432,10 +433,11 @@ struct iwl_trans_txq_scd_cfg { * the SCD base address in SRAM, then provide it here, or 0 otherwise. * May sleep * @stop_device: stops the whole device (embedded CPU put to reset) and stops - * the HW. From that point on, the HW will be in low power but will still - * issue interrupt if the HW RF kill is triggered. This callback must do - * the right thing and not crash even if start_hw() was called but not - * start_fw(). May sleep + * the HW. If low_power is true, the NIC will be put in low power state. + * From that point on, the HW will be stopped but will still issue an + * interrupt if the HW RF kill switch is triggered. + * This callback must do the right thing and not crash even if %start_hw() + * was called but not &start_fw(). May sleep. * @d3_suspend: put the device into the correct mode for WoWLAN during * suspend. This is optional, if not implemented WoWLAN will not be * supported. This callback may sleep. @@ -491,14 +493,14 @@ struct iwl_trans_txq_scd_cfg { */ struct iwl_trans_ops { - int (*start_hw)(struct iwl_trans *iwl_trans); + int (*start_hw)(struct iwl_trans *iwl_trans, bool low_power); void (*op_mode_leave)(struct iwl_trans *iwl_trans); int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill); int (*update_sf)(struct iwl_trans *trans, struct iwl_sf_region *st_fwrd_space); void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); - void (*stop_device)(struct iwl_trans *trans); + void (*stop_device)(struct iwl_trans *trans, bool low_power); void (*d3_suspend)(struct iwl_trans *trans, bool test); int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status, @@ -652,11 +654,16 @@ static inline void iwl_trans_configure(struct iwl_trans *trans, trans->ops->configure(trans, trans_cfg); } -static inline int iwl_trans_start_hw(struct iwl_trans *trans) +static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power) { might_sleep(); - return trans->ops->start_hw(trans); + return trans->ops->start_hw(trans, low_power); +} + +static inline int iwl_trans_start_hw(struct iwl_trans *trans) +{ + return trans->ops->start_hw(trans, true); } static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans) @@ -703,15 +710,21 @@ static inline int iwl_trans_update_sf(struct iwl_trans *trans, return 0; } -static inline void iwl_trans_stop_device(struct iwl_trans *trans) +static inline void _iwl_trans_stop_device(struct iwl_trans *trans, + bool low_power) { might_sleep(); - trans->ops->stop_device(trans); + trans->ops->stop_device(trans, low_power); trans->state = IWL_TRANS_NO_FW; } +static inline void iwl_trans_stop_device(struct iwl_trans *trans) +{ + _iwl_trans_stop_device(trans, true); +} + static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test) { might_sleep(); diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index a6c48c7..1b1b2bf 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -1726,7 +1726,7 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm, results->matched_profiles = le32_to_cpu(query->matched_profiles); memcpy(results->matches, query->matches, sizeof(results->matches)); -#ifdef CPTCFG_IWLWIFI_DEBUGFS +#ifdef CONFIG_IWLWIFI_DEBUGFS mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done); #endif diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h index 4fc0938b..b1baa33 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h @@ -298,6 +298,40 @@ struct iwl_uapsd_misbehaving_ap_notif { } __packed; /** + * struct iwl_reduce_tx_power_cmd - TX power reduction command + * REDUCE_TX_POWER_CMD = 0x9f + * @flags: (reserved for future implementation) + * @mac_context_id: id of the mac ctx for which we are reducing TX power. + * @pwr_restriction: TX power restriction in dBms. + */ +struct iwl_reduce_tx_power_cmd { + u8 flags; + u8 mac_context_id; + __le16 pwr_restriction; +} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */ + +/** + * struct iwl_dev_tx_power_cmd - TX power reduction command + * REDUCE_TX_POWER_CMD = 0x9f + * @set_mode: 0 - MAC tx power, 1 - device tx power + * @mac_context_id: id of the mac ctx for which we are reducing TX power. + * @pwr_restriction: TX power restriction in 1/8 dBms. + * @dev_24: device TX power restriction in 1/8 dBms + * @dev_52_low: device TX power restriction upper band - low + * @dev_52_high: device TX power restriction upper band - high + */ +struct iwl_dev_tx_power_cmd { + __le32 set_mode; + __le32 mac_context_id; + __le16 pwr_restriction; + __le16 dev_24; + __le16 dev_52_low; + __le16 dev_52_high; +} __packed; /* TX_REDUCED_POWER_API_S_VER_2 */ + +#define IWL_DEV_MAX_TX_POWER 0x7FFF + +/** * struct iwl_beacon_filter_cmd * REPLY_BEACON_FILTERING_CMD = 0xd2 (command) * @id_and_color: MAC contex identifier diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index 4f81dcf..d6cced4 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -122,46 +122,6 @@ enum iwl_scan_complete_status { SCAN_COMP_STATUS_ERR_ALLOC_TE = 0x0C, }; -/** - * struct iwl_scan_results_notif - scan results for one channel - * ( SCAN_RESULTS_NOTIFICATION = 0x83 ) - * @channel: which channel the results are from - * @band: 0 for 5.2 GHz, 1 for 2.4 GHz - * @probe_status: SCAN_PROBE_STATUS_*, indicates success of probe request - * @num_probe_not_sent: # of request that weren't sent due to not enough time - * @duration: duration spent in channel, in usecs - * @statistics: statistics gathered for this channel - */ -struct iwl_scan_results_notif { - u8 channel; - u8 band; - u8 probe_status; - u8 num_probe_not_sent; - __le32 duration; - __le32 statistics[SCAN_RESULTS_STATISTICS]; -} __packed; /* SCAN_RESULT_NTF_API_S_VER_2 */ - -/** - * struct iwl_scan_complete_notif - notifies end of scanning (all channels) - * ( SCAN_COMPLETE_NOTIFICATION = 0x84 ) - * @scanned_channels: number of channels scanned (and number of valid results) - * @status: one of SCAN_COMP_STATUS_* - * @bt_status: BT on/off status - * @last_channel: last channel that was scanned - * @tsf_low: TSF timer (lower half) in usecs - * @tsf_high: TSF timer (higher half) in usecs - * @results: array of scan results, only "scanned_channels" of them are valid - */ -struct iwl_scan_complete_notif { - u8 scanned_channels; - u8 status; - u8 bt_status; - u8 last_channel; - __le32 tsf_low; - __le32 tsf_high; - struct iwl_scan_results_notif results[]; -} __packed; /* SCAN_COMPLETE_NTF_API_S_VER_2 */ - /* scan offload */ #define IWL_SCAN_MAX_BLACKLIST_LEN 64 #define IWL_SCAN_SHORT_BLACKLIST_LEN 16 @@ -554,7 +514,7 @@ struct iwl_scan_req_unified_lmac { } __packed; /** - * struct iwl_lmac_scan_results_notif - scan results for one channel - + * struct iwl_scan_results_notif - scan results for one channel - * SCAN_RESULT_NTF_API_S_VER_3 * @channel: which channel the results are from * @band: 0 for 5.2 GHz, 1 for 2.4 GHz @@ -562,7 +522,7 @@ struct iwl_scan_req_unified_lmac { * @num_probe_not_sent: # of request that weren't sent due to not enough time * @duration: duration spent in channel, in usecs */ -struct iwl_lmac_scan_results_notif { +struct iwl_scan_results_notif { u8 channel; u8 band; u8 probe_status; diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index aab68cb..01b1da6 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -281,19 +281,6 @@ struct iwl_tx_ant_cfg_cmd { __le32 valid; } __packed; -/** - * struct iwl_reduce_tx_power_cmd - TX power reduction command - * REDUCE_TX_POWER_CMD = 0x9f - * @flags: (reserved for future implementation) - * @mac_context_id: id of the mac ctx for which we are reducing TX power. - * @pwr_restriction: TX power restriction in dBms. - */ -struct iwl_reduce_tx_power_cmd { - u8 flags; - u8 mac_context_id; - __le16 pwr_restriction; -} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */ - /* * Calibration control struct. * Sent as part of the phy configuration command. diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index bc5eac4..df86963 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -322,7 +322,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) lockdep_assert_held(&mvm->mutex); - if (WARN_ON_ONCE(mvm->init_ucode_complete || mvm->calibrating)) + if (WARN_ON_ONCE(mvm->calibrating)) return 0; iwl_init_notification_wait(&mvm->notif_wait, @@ -396,8 +396,6 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) */ ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, MVM_UCODE_CALIB_TIMEOUT); - if (!ret) - mvm->init_ucode_complete = true; if (ret && iwl_mvm_is_radio_killed(mvm)) { IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); @@ -494,15 +492,6 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, mvm->fw_dump_desc = desc; - /* stop recording */ - if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { - iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); - } else { - iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0); - /* wait before we collect the data till the DBGC stop */ - udelay(100); - } - queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay); return 0; @@ -658,25 +647,24 @@ int iwl_mvm_up(struct iwl_mvm *mvm) * module loading, load init ucode now * (for example, if we were in RFKILL) */ - if (!mvm->init_ucode_complete) { - ret = iwl_run_init_mvm_ucode(mvm, false); - if (ret && !iwlmvm_mod_params.init_dbg) { - IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); - /* this can't happen */ - if (WARN_ON(ret > 0)) - ret = -ERFKILL; - goto error; - } - if (!iwlmvm_mod_params.init_dbg) { - /* - * should stop and start HW since that INIT - * image just loaded - */ - iwl_trans_stop_device(mvm->trans); - ret = iwl_trans_start_hw(mvm->trans); - if (ret) - return ret; - } + ret = iwl_run_init_mvm_ucode(mvm, false); + if (ret && !iwlmvm_mod_params.init_dbg) { + IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); + /* this can't happen */ + if (WARN_ON(ret > 0)) + ret = -ERFKILL; + goto error; + } + if (!iwlmvm_mod_params.init_dbg) { + /* + * Stop and start the transport without entering low power + * mode. This will save the state of other components on the + * device that are triggered by the INIT firwmare (MFUART). + */ + _iwl_trans_stop_device(mvm->trans, false); + _iwl_trans_start_hw(mvm->trans, false); + if (ret) + return ret; } if (iwlmvm_mod_params.init_dbg) diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 8455517..40265b9 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -1322,7 +1322,7 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); iwl_mvm_d0i3_enable_tx(mvm, NULL); - ret = iwl_mvm_update_quotas(mvm, false, NULL); + ret = iwl_mvm_update_quotas(mvm, true, NULL); if (ret) IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", ret); @@ -1471,8 +1471,8 @@ static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) return NULL; } -static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - s8 tx_power) +static int iwl_mvm_set_tx_power_old(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, s8 tx_power) { /* FW is in charge of regulatory enforcement */ struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = { @@ -1485,6 +1485,26 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, &reduce_txpwr_cmd); } +static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + s16 tx_power) +{ + struct iwl_dev_tx_power_cmd cmd = { + .set_mode = 0, + .mac_context_id = + cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), + .pwr_restriction = cpu_to_le16(8 * tx_power), + }; + + if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_TX_POWER_DEV)) + return iwl_mvm_set_tx_power_old(mvm, vif, tx_power); + + if (tx_power == IWL_DEFAULT_MAX_TX_POWER) + cmd.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); + + return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, + sizeof(cmd), &cmd); +} + static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index d5522a1..cf70f68 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -603,7 +603,6 @@ struct iwl_mvm { enum iwl_ucode_type cur_ucode; bool ucode_loaded; - bool init_ucode_complete; bool calibrating; u32 error_event_table; u32 log_event_table; diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index a08b03d..1c66297 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -865,6 +865,16 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work) return; mutex_lock(&mvm->mutex); + + /* stop recording */ + if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { + iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); + } else { + iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0); + /* wait before we collect the data till the DBGC stop */ + udelay(100); + } + iwl_mvm_fw_error_dump(mvm); /* start recording again if the firmware is not crashed */ diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index 78ec7db..d6314dd 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c @@ -478,6 +478,11 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac, if (vif->type != NL80211_IFTYPE_STATION) return; + if (sig == 0) { + IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n"); + return; + } + mvmvif->bf_data.ave_beacon_signal = sig; /* BT Coex */ diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 74e1c86..1075a21 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -319,7 +319,7 @@ int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_device_cmd *cmd) { struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_scan_complete_notif *notif = (void *)pkt->data; + struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data; IWL_DEBUG_SCAN(mvm, "Scan offload iteration complete: status=0x%x scanned channels=%d\n", diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 2de8fbf..47bbf57 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -5,8 +5,8 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,8 +31,8 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -104,7 +104,7 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct page *page; + struct page *page = NULL; dma_addr_t phys; u32 size; u8 power; @@ -131,6 +131,7 @@ static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans) DMA_FROM_DEVICE); if (dma_mapping_error(trans->dev, phys)) { __free_pages(page, order); + page = NULL; continue; } IWL_INFO(trans, @@ -1020,7 +1021,7 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) iwl_pcie_tx_start(trans, scd_addr); } -static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) +static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill, was_hw_rfkill; @@ -1115,7 +1116,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) { if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) - iwl_trans_pcie_stop_device(trans); + iwl_trans_pcie_stop_device(trans, true); } static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) @@ -1200,7 +1201,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, return 0; } -static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) +static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) { bool hw_rfkill; int err; diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index f0188c8..2721cf8 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c @@ -126,7 +126,7 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request, do { status = usb_control_msg(udev, pipe, request, reqtype, value, - index, pdata, len, 0); /*max. timeout*/ + index, pdata, len, 1000); if (status < 0) { /* firmware download is checksumed, don't retry */ if ((value >= FW_8192C_START_ADDRESS && diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c index 8be2096..deeaed5 100644 --- a/drivers/parisc/superio.c +++ b/drivers/parisc/superio.c @@ -348,7 +348,7 @@ int superio_fixup_irq(struct pci_dev *pcidev) BUG(); return -1; } - printk("superio_fixup_irq(%s) ven 0x%x dev 0x%x from %pf\n", + printk(KERN_DEBUG "superio_fixup_irq(%s) ven 0x%x dev 0x%x from %ps\n", pci_name(pcidev), pcidev->vendor, pcidev->device, __builtin_return_address(0)); diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 89dca77..18ee208 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -1110,7 +1110,7 @@ void devm_pinctrl_put(struct pinctrl *p) EXPORT_SYMBOL_GPL(devm_pinctrl_put); int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps, - bool dup, bool locked) + bool dup) { int i, ret; struct pinctrl_maps *maps_node; @@ -1178,11 +1178,9 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps, maps_node->maps = maps; } - if (!locked) - mutex_lock(&pinctrl_maps_mutex); + mutex_lock(&pinctrl_maps_mutex); list_add_tail(&maps_node->node, &pinctrl_maps); - if (!locked) - mutex_unlock(&pinctrl_maps_mutex); + mutex_unlock(&pinctrl_maps_mutex); return 0; } @@ -1197,7 +1195,7 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps, int pinctrl_register_mappings(struct pinctrl_map const *maps, unsigned num_maps) { - return pinctrl_register_map(maps, num_maps, true, false); + return pinctrl_register_map(maps, num_maps, true); } void pinctrl_unregister_map(struct pinctrl_map const *map) diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h index 75476b3..b24ea84 100644 --- a/drivers/pinctrl/core.h +++ b/drivers/pinctrl/core.h @@ -183,7 +183,7 @@ static inline struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, } int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps, - bool dup, bool locked); + bool dup); void pinctrl_unregister_map(struct pinctrl_map const *map); extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev); diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index eda13de..0bbf7d7 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c @@ -92,7 +92,7 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename, dt_map->num_maps = num_maps; list_add_tail(&dt_map->node, &p->dt_maps); - return pinctrl_register_map(map, num_maps, false, true); + return pinctrl_register_map(map, num_maps, false); } struct pinctrl_dev *of_pinctrl_get(struct device_node *np) diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index 493294c..474812e 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c @@ -881,6 +881,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset, if (!mtk_eint_get_mask(pctl, eint_num)) { mtk_eint_mask(d); unmask = 1; + } else { + unmask = 0; } clr_bit = 0xff << eint_offset; diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-370.c b/drivers/pinctrl/mvebu/pinctrl-armada-370.c index 42f930f..03aa58c 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-370.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-370.c @@ -364,7 +364,7 @@ static struct mvebu_mpp_mode mv88f6710_mpp_modes[] = { MPP_FUNCTION(0x5, "audio", "mclk"), MPP_FUNCTION(0x6, "uart0", "cts")), MPP_MODE(63, - MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "spi0", "sck"), MPP_FUNCTION(0x2, "tclk", NULL)), MPP_MODE(64, diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c index b2d2221..ae4115e 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c @@ -260,6 +260,7 @@ static int pmic_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned function, val = 1; } + val = val << PMIC_GPIO_REG_MODE_DIR_SHIFT; val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT; val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT; @@ -417,7 +418,7 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin, return ret; val = pad->buffer_type << PMIC_GPIO_REG_OUT_TYPE_SHIFT; - val = pad->strength << PMIC_GPIO_REG_OUT_STRENGTH_SHIFT; + val |= pad->strength << PMIC_GPIO_REG_OUT_STRENGTH_SHIFT; ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_DIG_OUT_CTL, val); if (ret < 0) @@ -466,12 +467,13 @@ static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev, seq_puts(s, " ---"); } else { - if (!pad->input_enabled) { + if (pad->input_enabled) { ret = pmic_gpio_read(state, pad, PMIC_MPP_REG_RT_STS); - if (!ret) { - ret &= PMIC_MPP_REG_RT_STS_VAL_MASK; - pad->out_value = ret; - } + if (ret < 0) + return; + + ret &= PMIC_MPP_REG_RT_STS_VAL_MASK; + pad->out_value = ret; } seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in"); diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c index 8f36c5f..211b942 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c @@ -370,6 +370,7 @@ static int pmic_mpp_set_mux(struct pinctrl_dev *pctldev, unsigned function, } } + val = val << PMIC_MPP_REG_MODE_DIR_SHIFT; val |= pad->function << PMIC_MPP_REG_MODE_FUNCTION_SHIFT; val |= pad->out_value & PMIC_MPP_REG_MODE_VALUE_MASK; @@ -576,10 +577,11 @@ static void pmic_mpp_config_dbg_show(struct pinctrl_dev *pctldev, if (pad->input_enabled) { ret = pmic_mpp_read(state, pad, PMIC_MPP_REG_RT_STS); - if (!ret) { - ret &= PMIC_MPP_REG_RT_STS_VAL_MASK; - pad->out_value = ret; - } + if (ret < 0) + return; + + ret &= PMIC_MPP_REG_RT_STS_VAL_MASK; + pad->out_value = ret; } seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in"); diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c index 594c918..1ef02da 100644 --- a/drivers/platform/x86/acerhdf.c +++ b/drivers/platform/x86/acerhdf.c @@ -372,7 +372,8 @@ static int acerhdf_bind(struct thermal_zone_device *thermal, return 0; if (thermal_zone_bind_cooling_device(thermal, 0, cdev, - THERMAL_NO_LIMIT, THERMAL_NO_LIMIT)) { + THERMAL_NO_LIMIT, THERMAL_NO_LIMIT, + THERMAL_WEIGHT_DEFAULT)) { pr_err("error binding cooling dev\n"); return -EINVAL; } diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index b3d419a..b496db8 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -830,6 +830,13 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data) */ static const struct dmi_system_id no_hw_rfkill_list[] = { { + .ident = "Lenovo G40-30", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo G40-30"), + }, + }, + { .ident = "Lenovo Yoga 2 11 / 13 / Pro", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 7769575..9bb9ad6 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -2115,7 +2115,7 @@ static int hotkey_mask_get(void) return 0; } -void static hotkey_mask_warn_incomplete_mask(void) +static void hotkey_mask_warn_incomplete_mask(void) { /* log only what the user can fix... */ const u32 wantedmask = hotkey_driver_mask & diff --git a/drivers/power/axp288_fuel_gauge.c b/drivers/power/axp288_fuel_gauge.c index ca1cc5a..bd1dbfe 100644 --- a/drivers/power/axp288_fuel_gauge.c +++ b/drivers/power/axp288_fuel_gauge.c @@ -1149,6 +1149,7 @@ static struct platform_driver axp288_fuel_gauge_driver = { module_platform_driver(axp288_fuel_gauge_driver); +MODULE_AUTHOR("Ramakrishna Pallala <ramakrishna.pallala@intel.com>"); MODULE_AUTHOR("Todd Brandt <todd.e.brandt@linux.intel.com>"); MODULE_DESCRIPTION("Xpower AXP288 Fuel Gauge Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c index a57433d..b6b9837 100644 --- a/drivers/power/bq27x00_battery.c +++ b/drivers/power/bq27x00_battery.c @@ -1109,6 +1109,14 @@ static void __exit bq27x00_battery_exit(void) } module_exit(bq27x00_battery_exit); +#ifdef CONFIG_BATTERY_BQ27X00_PLATFORM +MODULE_ALIAS("platform:bq27000-battery"); +#endif + +#ifdef CONFIG_BATTERY_BQ27X00_I2C +MODULE_ALIAS("i2c:bq27000-battery"); +#endif + MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>"); MODULE_DESCRIPTION("BQ27x00 battery monitor driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/power/collie_battery.c b/drivers/power/collie_battery.c index 2da9ed8..8a971b3 100644 --- a/drivers/power/collie_battery.c +++ b/drivers/power/collie_battery.c @@ -347,7 +347,7 @@ static int collie_bat_probe(struct ucb1x00_dev *dev) goto err_psy_reg_main; } - psy_main_cfg.drv_data = &collie_bat_bu; + psy_bu_cfg.drv_data = &collie_bat_bu; collie_bat_bu.psy = power_supply_register(&dev->ucb->dev, &collie_bat_bu_desc, &psy_bu_cfg); diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index aad9c33..17d93a7 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig @@ -41,6 +41,7 @@ config POWER_RESET_AXXIA config POWER_RESET_BRCMSTB bool "Broadcom STB reset driver" depends on ARM || MIPS || COMPILE_TEST + depends on MFD_SYSCON default ARCH_BRCMSTB help This driver provides restart support for Broadcom STB boards. diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c index 01c7055..ca461eb 100644 --- a/drivers/power/reset/at91-reset.c +++ b/drivers/power/reset/at91-reset.c @@ -212,9 +212,9 @@ static int at91_reset_platform_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, idx + 1 ); at91_ramc_base[idx] = devm_ioremap(&pdev->dev, res->start, resource_size(res)); - if (IS_ERR(at91_ramc_base[idx])) { + if (!at91_ramc_base[idx]) { dev_err(&pdev->dev, "Could not map ram controller address\n"); - return PTR_ERR(at91_ramc_base[idx]); + return -ENOMEM; } } diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c index 7ef193b..1e08195 100644 --- a/drivers/power/reset/ltc2952-poweroff.c +++ b/drivers/power/reset/ltc2952-poweroff.c @@ -120,18 +120,7 @@ static enum hrtimer_restart ltc2952_poweroff_timer_wde(struct hrtimer *timer) static void ltc2952_poweroff_start_wde(struct ltc2952_poweroff *data) { - if (hrtimer_start(&data->timer_wde, data->wde_interval, - HRTIMER_MODE_REL)) { - /* - * The device will not toggle the watchdog reset, - * thus shut down is only safe if the PowerPath controller - * has a long enough time-off before triggering a hardware - * power-off. - * - * Only sending a warning as the system will power-off anyway - */ - dev_err(data->dev, "unable to start the timer\n"); - } + hrtimer_start(&data->timer_wde, data->wde_interval, HRTIMER_MODE_REL); } static enum hrtimer_restart @@ -165,9 +154,8 @@ static irqreturn_t ltc2952_poweroff_handler(int irq, void *dev_id) } if (gpiod_get_value(data->gpio_trigger)) { - if (hrtimer_start(&data->timer_trigger, data->trigger_delay, - HRTIMER_MODE_REL)) - dev_err(data->dev, "unable to start the wait timer\n"); + hrtimer_start(&data->timer_trigger, data->trigger_delay, + HRTIMER_MODE_REL); } else { hrtimer_cancel(&data->timer_trigger); /* omitting return value check, timer should have been valid */ diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 6149ae0..0fe4ad8 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -164,6 +164,16 @@ config RTC_DRV_ABB5ZES3 This driver can also be built as a module. If so, the module will be called rtc-ab-b5ze-s3. +config RTC_DRV_ABX80X + tristate "Abracon ABx80x" + help + If you say yes here you get support for Abracon AB080X and AB180X + families of ultra-low-power battery- and capacitor-backed real-time + clock chips. + + This driver can also be built as a module. If so, the module + will be called rtc-abx80x. + config RTC_DRV_AS3722 tristate "ams AS3722 RTC driver" depends on MFD_AS3722 diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index c31731c..2b82e2b 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_RTC_DRV_88PM80X) += rtc-88pm80x.o obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o obj-$(CONFIG_RTC_DRV_ABB5ZES3) += rtc-ab-b5ze-s3.o +obj-$(CONFIG_RTC_DRV_ABX80X) += rtc-abx80x.o obj-$(CONFIG_RTC_DRV_ARMADA38X) += rtc-armada38x.o obj-$(CONFIG_RTC_DRV_AS3722) += rtc-as3722.o obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c new file mode 100644 index 0000000..4337c3b --- /dev/null +++ b/drivers/rtc/rtc-abx80x.c @@ -0,0 +1,307 @@ +/* + * A driver for the I2C members of the Abracon AB x8xx RTC family, + * and compatible: AB 1805 and AB 0805 + * + * Copyright 2014-2015 Macq S.A. + * + * Author: Philippe De Muyter <phdm@macqel.be> + * Author: Alexandre Belloni <alexandre.belloni@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/bcd.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/rtc.h> + +#define ABX8XX_REG_HTH 0x00 +#define ABX8XX_REG_SC 0x01 +#define ABX8XX_REG_MN 0x02 +#define ABX8XX_REG_HR 0x03 +#define ABX8XX_REG_DA 0x04 +#define ABX8XX_REG_MO 0x05 +#define ABX8XX_REG_YR 0x06 +#define ABX8XX_REG_WD 0x07 + +#define ABX8XX_REG_CTRL1 0x10 +#define ABX8XX_CTRL_WRITE BIT(1) +#define ABX8XX_CTRL_12_24 BIT(6) + +#define ABX8XX_REG_CFG_KEY 0x1f +#define ABX8XX_CFG_KEY_MISC 0x9d + +#define ABX8XX_REG_ID0 0x28 + +#define ABX8XX_REG_TRICKLE 0x20 +#define ABX8XX_TRICKLE_CHARGE_ENABLE 0xa0 +#define ABX8XX_TRICKLE_STANDARD_DIODE 0x8 +#define ABX8XX_TRICKLE_SCHOTTKY_DIODE 0x4 + +static u8 trickle_resistors[] = {0, 3, 6, 11}; + +enum abx80x_chip {AB0801, AB0803, AB0804, AB0805, + AB1801, AB1803, AB1804, AB1805, ABX80X}; + +struct abx80x_cap { + u16 pn; + bool has_tc; +}; + +static struct abx80x_cap abx80x_caps[] = { + [AB0801] = {.pn = 0x0801}, + [AB0803] = {.pn = 0x0803}, + [AB0804] = {.pn = 0x0804, .has_tc = true}, + [AB0805] = {.pn = 0x0805, .has_tc = true}, + [AB1801] = {.pn = 0x1801}, + [AB1803] = {.pn = 0x1803}, + [AB1804] = {.pn = 0x1804, .has_tc = true}, + [AB1805] = {.pn = 0x1805, .has_tc = true}, + [ABX80X] = {.pn = 0} +}; + +static struct i2c_driver abx80x_driver; + +static int abx80x_enable_trickle_charger(struct i2c_client *client, + u8 trickle_cfg) +{ + int err; + + /* + * Write the configuration key register to enable access to the Trickle + * register + */ + err = i2c_smbus_write_byte_data(client, ABX8XX_REG_CFG_KEY, + ABX8XX_CFG_KEY_MISC); + if (err < 0) { + dev_err(&client->dev, "Unable to write configuration key\n"); + return -EIO; + } + + err = i2c_smbus_write_byte_data(client, ABX8XX_REG_TRICKLE, + ABX8XX_TRICKLE_CHARGE_ENABLE | + trickle_cfg); + if (err < 0) { + dev_err(&client->dev, "Unable to write trickle register\n"); + return -EIO; + } + + return 0; +} + +static int abx80x_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct i2c_client *client = to_i2c_client(dev); + unsigned char buf[8]; + int err; + + err = i2c_smbus_read_i2c_block_data(client, ABX8XX_REG_HTH, + sizeof(buf), buf); + if (err < 0) { + dev_err(&client->dev, "Unable to read date\n"); + return -EIO; + } + + tm->tm_sec = bcd2bin(buf[ABX8XX_REG_SC] & 0x7F); + tm->tm_min = bcd2bin(buf[ABX8XX_REG_MN] & 0x7F); + tm->tm_hour = bcd2bin(buf[ABX8XX_REG_HR] & 0x3F); + tm->tm_wday = buf[ABX8XX_REG_WD] & 0x7; + tm->tm_mday = bcd2bin(buf[ABX8XX_REG_DA] & 0x3F); + tm->tm_mon = bcd2bin(buf[ABX8XX_REG_MO] & 0x1F) - 1; + tm->tm_year = bcd2bin(buf[ABX8XX_REG_YR]) + 100; + + err = rtc_valid_tm(tm); + if (err < 0) + dev_err(&client->dev, "retrieved date/time is not valid.\n"); + + return err; +} + +static int abx80x_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct i2c_client *client = to_i2c_client(dev); + unsigned char buf[8]; + int err; + + if (tm->tm_year < 100) + return -EINVAL; + + buf[ABX8XX_REG_HTH] = 0; + buf[ABX8XX_REG_SC] = bin2bcd(tm->tm_sec); + buf[ABX8XX_REG_MN] = bin2bcd(tm->tm_min); + buf[ABX8XX_REG_HR] = bin2bcd(tm->tm_hour); + buf[ABX8XX_REG_DA] = bin2bcd(tm->tm_mday); + buf[ABX8XX_REG_MO] = bin2bcd(tm->tm_mon + 1); + buf[ABX8XX_REG_YR] = bin2bcd(tm->tm_year - 100); + buf[ABX8XX_REG_WD] = tm->tm_wday; + + err = i2c_smbus_write_i2c_block_data(client, ABX8XX_REG_HTH, + sizeof(buf), buf); + if (err < 0) { + dev_err(&client->dev, "Unable to write to date registers\n"); + return -EIO; + } + + return 0; +} + +static const struct rtc_class_ops abx80x_rtc_ops = { + .read_time = abx80x_rtc_read_time, + .set_time = abx80x_rtc_set_time, +}; + +static int abx80x_dt_trickle_cfg(struct device_node *np) +{ + const char *diode; + int trickle_cfg = 0; + int i, ret; + u32 tmp; + + ret = of_property_read_string(np, "abracon,tc-diode", &diode); + if (ret) + return ret; + + if (!strcmp(diode, "standard")) + trickle_cfg |= ABX8XX_TRICKLE_STANDARD_DIODE; + else if (!strcmp(diode, "schottky")) + trickle_cfg |= ABX8XX_TRICKLE_SCHOTTKY_DIODE; + else + return -EINVAL; + + ret = of_property_read_u32(np, "abracon,tc-resistor", &tmp); + if (ret) + return ret; + + for (i = 0; i < sizeof(trickle_resistors); i++) + if (trickle_resistors[i] == tmp) + break; + + if (i == sizeof(trickle_resistors)) + return -EINVAL; + + return (trickle_cfg | i); +} + +static int abx80x_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device_node *np = client->dev.of_node; + struct rtc_device *rtc; + int i, data, err, trickle_cfg = -EINVAL; + char buf[7]; + unsigned int part = id->driver_data; + unsigned int partnumber; + unsigned int majrev, minrev; + unsigned int lot; + unsigned int wafer; + unsigned int uid; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) + return -ENODEV; + + err = i2c_smbus_read_i2c_block_data(client, ABX8XX_REG_ID0, + sizeof(buf), buf); + if (err < 0) { + dev_err(&client->dev, "Unable to read partnumber\n"); + return -EIO; + } + + partnumber = (buf[0] << 8) | buf[1]; + majrev = buf[2] >> 3; + minrev = buf[2] & 0x7; + lot = ((buf[4] & 0x80) << 2) | ((buf[6] & 0x80) << 1) | buf[3]; + uid = ((buf[4] & 0x7f) << 8) | buf[5]; + wafer = (buf[6] & 0x7c) >> 2; + dev_info(&client->dev, "model %04x, revision %u.%u, lot %x, wafer %x, uid %x\n", + partnumber, majrev, minrev, lot, wafer, uid); + + data = i2c_smbus_read_byte_data(client, ABX8XX_REG_CTRL1); + if (data < 0) { + dev_err(&client->dev, "Unable to read control register\n"); + return -EIO; + } + + err = i2c_smbus_write_byte_data(client, ABX8XX_REG_CTRL1, + ((data & ~ABX8XX_CTRL_12_24) | + ABX8XX_CTRL_WRITE)); + if (err < 0) { + dev_err(&client->dev, "Unable to write control register\n"); + return -EIO; + } + + /* part autodetection */ + if (part == ABX80X) { + for (i = 0; abx80x_caps[i].pn; i++) + if (partnumber == abx80x_caps[i].pn) + break; + if (abx80x_caps[i].pn == 0) { + dev_err(&client->dev, "Unknown part: %04x\n", + partnumber); + return -EINVAL; + } + part = i; + } + + if (partnumber != abx80x_caps[part].pn) { + dev_err(&client->dev, "partnumber mismatch %04x != %04x\n", + partnumber, abx80x_caps[part].pn); + return -EINVAL; + } + + if (np && abx80x_caps[part].has_tc) + trickle_cfg = abx80x_dt_trickle_cfg(np); + + if (trickle_cfg > 0) { + dev_info(&client->dev, "Enabling trickle charger: %02x\n", + trickle_cfg); + abx80x_enable_trickle_charger(client, trickle_cfg); + } + + rtc = devm_rtc_device_register(&client->dev, abx80x_driver.driver.name, + &abx80x_rtc_ops, THIS_MODULE); + + if (IS_ERR(rtc)) + return PTR_ERR(rtc); + + i2c_set_clientdata(client, rtc); + + return 0; +} + +static int abx80x_remove(struct i2c_client *client) +{ + return 0; +} + +static const struct i2c_device_id abx80x_id[] = { + { "abx80x", ABX80X }, + { "ab0801", AB0801 }, + { "ab0803", AB0803 }, + { "ab0804", AB0804 }, + { "ab0805", AB0805 }, + { "ab1801", AB1801 }, + { "ab1803", AB1803 }, + { "ab1804", AB1804 }, + { "ab1805", AB1805 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, abx80x_id); + +static struct i2c_driver abx80x_driver = { + .driver = { + .name = "rtc-abx80x", + }, + .probe = abx80x_probe, + .remove = abx80x_remove, + .id_table = abx80x_id, +}; + +module_i2c_driver(abx80x_driver); + +MODULE_AUTHOR("Philippe De Muyter <phdm@macqel.be>"); +MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@free-electrons.com>"); +MODULE_DESCRIPTION("Abracon ABX80X RTC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c index 43e04af..4b62d1a 100644 --- a/drivers/rtc/rtc-armada38x.c +++ b/drivers/rtc/rtc-armada38x.c @@ -40,6 +40,13 @@ struct armada38x_rtc { void __iomem *regs; void __iomem *regs_soc; spinlock_t lock; + /* + * While setting the time, the RTC TIME register should not be + * accessed. Setting the RTC time involves sleeping during + * 100ms, so a mutex instead of a spinlock is used to protect + * it + */ + struct mutex mutex_time; int irq; }; @@ -57,10 +64,9 @@ static void rtc_delayed_write(u32 val, struct armada38x_rtc *rtc, int offset) static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct armada38x_rtc *rtc = dev_get_drvdata(dev); - unsigned long time, time_check, flags; - - spin_lock_irqsave(&rtc->lock, flags); + unsigned long time, time_check; + mutex_lock(&rtc->mutex_time); time = readl(rtc->regs + RTC_TIME); /* * WA for failing time set attempts. As stated in HW ERRATA if @@ -71,7 +77,7 @@ static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm) if ((time_check - time) > 1) time_check = readl(rtc->regs + RTC_TIME); - spin_unlock_irqrestore(&rtc->lock, flags); + mutex_unlock(&rtc->mutex_time); rtc_time_to_tm(time_check, tm); @@ -94,19 +100,12 @@ static int armada38x_rtc_set_time(struct device *dev, struct rtc_time *tm) * then wait for 100ms before writing to the time register to be * sure that the data will be taken into account. */ - spin_lock_irqsave(&rtc->lock, flags); - + mutex_lock(&rtc->mutex_time); rtc_delayed_write(0, rtc, RTC_STATUS); - - spin_unlock_irqrestore(&rtc->lock, flags); - msleep(100); - - spin_lock_irqsave(&rtc->lock, flags); - rtc_delayed_write(time, rtc, RTC_TIME); + mutex_unlock(&rtc->mutex_time); - spin_unlock_irqrestore(&rtc->lock, flags); out: return ret; } @@ -230,6 +229,7 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev) return -ENOMEM; spin_lock_init(&rtc->lock); + mutex_init(&rtc->mutex_time); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rtc"); rtc->regs = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index c43aca6..0fc3fe5 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c @@ -667,6 +667,8 @@ static struct raw3215_info *raw3215_alloc_info(void) info->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA); info->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA); if (!info->buffer || !info->inbuf) { + kfree(info->inbuf); + kfree(info->buffer); kfree(info); return NULL; } diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 7600639..add419d 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -149,7 +149,6 @@ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset); static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg); static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id); static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code); -static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id); /* Functions */ @@ -1340,11 +1339,11 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance) } /* Now complete the io */ + scsi_dma_unmap(cmd); + cmd->scsi_done(cmd); tw_dev->state[request_id] = TW_S_COMPLETED; twa_free_request_id(tw_dev, request_id); tw_dev->posted_request_count--; - tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); - twa_unmap_scsi_data(tw_dev, request_id); } /* Check for valid status after each drain */ @@ -1402,26 +1401,6 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm } } /* End twa_load_sgl() */ -/* This function will perform a pci-dma mapping for a scatter gather list */ -static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id) -{ - int use_sg; - struct scsi_cmnd *cmd = tw_dev->srb[request_id]; - - use_sg = scsi_dma_map(cmd); - if (!use_sg) - return 0; - else if (use_sg < 0) { - TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list"); - return 0; - } - - cmd->SCp.phase = TW_PHASE_SGLIST; - cmd->SCp.have_data_in = use_sg; - - return use_sg; -} /* End twa_map_scsi_sg_data() */ - /* This function will poll for a response interrupt of a request */ static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds) { @@ -1600,9 +1579,11 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev) (tw_dev->state[i] != TW_S_INITIAL) && (tw_dev->state[i] != TW_S_COMPLETED)) { if (tw_dev->srb[i]) { - tw_dev->srb[i]->result = (DID_RESET << 16); - tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); - twa_unmap_scsi_data(tw_dev, i); + struct scsi_cmnd *cmd = tw_dev->srb[i]; + + cmd->result = (DID_RESET << 16); + scsi_dma_unmap(cmd); + cmd->scsi_done(cmd); } } } @@ -1781,21 +1762,18 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_ /* Save the scsi command for use by the ISR */ tw_dev->srb[request_id] = SCpnt; - /* Initialize phase to zero */ - SCpnt->SCp.phase = TW_PHASE_INITIAL; - retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); switch (retval) { case SCSI_MLQUEUE_HOST_BUSY: + scsi_dma_unmap(SCpnt); twa_free_request_id(tw_dev, request_id); - twa_unmap_scsi_data(tw_dev, request_id); break; case 1: - tw_dev->state[request_id] = TW_S_COMPLETED; - twa_free_request_id(tw_dev, request_id); - twa_unmap_scsi_data(tw_dev, request_id); SCpnt->result = (DID_ERROR << 16); + scsi_dma_unmap(SCpnt); done(SCpnt); + tw_dev->state[request_id] = TW_S_COMPLETED; + twa_free_request_id(tw_dev, request_id); retval = 0; } out: @@ -1863,8 +1841,8 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH); } else { - sg_count = twa_map_scsi_sg_data(tw_dev, request_id); - if (sg_count == 0) + sg_count = scsi_dma_map(srb); + if (sg_count < 0) goto out; scsi_for_each_sg(srb, sg, sg_count, i) { @@ -1979,15 +1957,6 @@ static char *twa_string_lookup(twa_message_type *table, unsigned int code) return(table[index].text); } /* End twa_string_lookup() */ -/* This function will perform a pci-dma unmap */ -static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id) -{ - struct scsi_cmnd *cmd = tw_dev->srb[request_id]; - - if (cmd->SCp.phase == TW_PHASE_SGLIST) - scsi_dma_unmap(cmd); -} /* End twa_unmap_scsi_data() */ - /* This function gets called when a disk is coming on-line */ static int twa_slave_configure(struct scsi_device *sdev) { diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h index 040f721..0fdc83c 100644 --- a/drivers/scsi/3w-9xxx.h +++ b/drivers/scsi/3w-9xxx.h @@ -324,11 +324,6 @@ static twa_message_type twa_error_table[] = { #define TW_CURRENT_DRIVER_BUILD 0 #define TW_CURRENT_DRIVER_BRANCH 0 -/* Phase defines */ -#define TW_PHASE_INITIAL 0 -#define TW_PHASE_SINGLE 1 -#define TW_PHASE_SGLIST 2 - /* Misc defines */ #define TW_9550SX_DRAIN_COMPLETED 0xFFFF #define TW_SECTOR_SIZE 512 diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index 2361772..f837485 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -290,26 +290,6 @@ static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id) return 0; } /* End twl_post_command_packet() */ -/* This function will perform a pci-dma mapping for a scatter gather list */ -static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id) -{ - int use_sg; - struct scsi_cmnd *cmd = tw_dev->srb[request_id]; - - use_sg = scsi_dma_map(cmd); - if (!use_sg) - return 0; - else if (use_sg < 0) { - TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list"); - return 0; - } - - cmd->SCp.phase = TW_PHASE_SGLIST; - cmd->SCp.have_data_in = use_sg; - - return use_sg; -} /* End twl_map_scsi_sg_data() */ - /* This function hands scsi cdb's to the firmware */ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg) { @@ -357,8 +337,8 @@ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, if (!sglistarg) { /* Map sglist from scsi layer to cmd packet */ if (scsi_sg_count(srb)) { - sg_count = twl_map_scsi_sg_data(tw_dev, request_id); - if (sg_count == 0) + sg_count = scsi_dma_map(srb); + if (sg_count <= 0) goto out; scsi_for_each_sg(srb, sg, sg_count, i) { @@ -1102,15 +1082,6 @@ out: return retval; } /* End twl_initialize_device_extension() */ -/* This function will perform a pci-dma unmap */ -static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id) -{ - struct scsi_cmnd *cmd = tw_dev->srb[request_id]; - - if (cmd->SCp.phase == TW_PHASE_SGLIST) - scsi_dma_unmap(cmd); -} /* End twl_unmap_scsi_data() */ - /* This function will handle attention interrupts */ static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev) { @@ -1251,11 +1222,11 @@ static irqreturn_t twl_interrupt(int irq, void *dev_instance) } /* Now complete the io */ + scsi_dma_unmap(cmd); + cmd->scsi_done(cmd); tw_dev->state[request_id] = TW_S_COMPLETED; twl_free_request_id(tw_dev, request_id); tw_dev->posted_request_count--; - tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); - twl_unmap_scsi_data(tw_dev, request_id); } /* Check for another response interrupt */ @@ -1400,10 +1371,12 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res if ((tw_dev->state[i] != TW_S_FINISHED) && (tw_dev->state[i] != TW_S_INITIAL) && (tw_dev->state[i] != TW_S_COMPLETED)) { - if (tw_dev->srb[i]) { - tw_dev->srb[i]->result = (DID_RESET << 16); - tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); - twl_unmap_scsi_data(tw_dev, i); + struct scsi_cmnd *cmd = tw_dev->srb[i]; + + if (cmd) { + cmd->result = (DID_RESET << 16); + scsi_dma_unmap(cmd); + cmd->scsi_done(cmd); } } } @@ -1507,9 +1480,6 @@ static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_ /* Save the scsi command for use by the ISR */ tw_dev->srb[request_id] = SCpnt; - /* Initialize phase to zero */ - SCpnt->SCp.phase = TW_PHASE_INITIAL; - retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); if (retval) { tw_dev->state[request_id] = TW_S_COMPLETED; diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h index d474892..fec6449 100644 --- a/drivers/scsi/3w-sas.h +++ b/drivers/scsi/3w-sas.h @@ -103,10 +103,6 @@ static char *twl_aen_severity_table[] = #define TW_CURRENT_DRIVER_BUILD 0 #define TW_CURRENT_DRIVER_BRANCH 0 -/* Phase defines */ -#define TW_PHASE_INITIAL 0 -#define TW_PHASE_SGLIST 2 - /* Misc defines */ #define TW_SECTOR_SIZE 512 #define TW_MAX_UNITS 32 diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index c75f204..2940bd7 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -1271,32 +1271,6 @@ static int tw_initialize_device_extension(TW_Device_Extension *tw_dev) return 0; } /* End tw_initialize_device_extension() */ -static int tw_map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) -{ - int use_sg; - - dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data()\n"); - - use_sg = scsi_dma_map(cmd); - if (use_sg < 0) { - printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n"); - return 0; - } - - cmd->SCp.phase = TW_PHASE_SGLIST; - cmd->SCp.have_data_in = use_sg; - - return use_sg; -} /* End tw_map_scsi_sg_data() */ - -static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) -{ - dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n"); - - if (cmd->SCp.phase == TW_PHASE_SGLIST) - scsi_dma_unmap(cmd); -} /* End tw_unmap_scsi_data() */ - /* This function will reset a device extension */ static int tw_reset_device_extension(TW_Device_Extension *tw_dev) { @@ -1319,8 +1293,8 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev) srb = tw_dev->srb[i]; if (srb != NULL) { srb->result = (DID_RESET << 16); - tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); - tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[i]); + scsi_dma_unmap(srb); + srb->scsi_done(srb); } } } @@ -1767,8 +1741,8 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id) command_packet->byte8.io.lba = lba; command_packet->byte6.block_count = num_sectors; - use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]); - if (!use_sg) + use_sg = scsi_dma_map(srb); + if (use_sg <= 0) return 1; scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) { @@ -1955,9 +1929,6 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c /* Save the scsi command for use by the ISR */ tw_dev->srb[request_id] = SCpnt; - /* Initialize phase to zero */ - SCpnt->SCp.phase = TW_PHASE_INITIAL; - switch (*command) { case READ_10: case READ_6: @@ -2185,12 +2156,11 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance) /* Now complete the io */ if ((error != TW_ISR_DONT_COMPLETE)) { + scsi_dma_unmap(tw_dev->srb[request_id]); + tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); tw_dev->state[request_id] = TW_S_COMPLETED; tw_state_request_finish(tw_dev, request_id); tw_dev->posted_request_count--; - tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); - - tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]); } } diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h index 29b0b84e..6f65e66 100644 --- a/drivers/scsi/3w-xxxx.h +++ b/drivers/scsi/3w-xxxx.h @@ -195,11 +195,6 @@ static unsigned char tw_sense_table[][4] = #define TW_AEN_SMART_FAIL 0x000F #define TW_AEN_SBUF_FAIL 0x0024 -/* Phase defines */ -#define TW_PHASE_INITIAL 0 -#define TW_PHASE_SINGLE 1 -#define TW_PHASE_SGLIST 2 - /* Misc defines */ #define TW_ALIGNMENT_6000 64 /* 64 bytes */ #define TW_ALIGNMENT_7000 4 /* 4 bytes */ diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c index ec43276..b95d277 100644 --- a/drivers/scsi/aha1542.c +++ b/drivers/scsi/aha1542.c @@ -375,9 +375,10 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) u8 lun = cmd->device->lun; unsigned long flags; int bufflen = scsi_bufflen(cmd); - int mbo; + int mbo, sg_count; struct mailbox *mb = aha1542->mb; struct ccb *ccb = aha1542->ccb; + struct chain *cptr; if (*cmd->cmnd == REQUEST_SENSE) { /* Don't do the command - we have the sense data already */ @@ -397,6 +398,13 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len); } #endif + if (bufflen) { /* allocate memory before taking host_lock */ + sg_count = scsi_sg_count(cmd); + cptr = kmalloc(sizeof(*cptr) * sg_count, GFP_KERNEL | GFP_DMA); + if (!cptr) + return SCSI_MLQUEUE_HOST_BUSY; + } + /* Use the outgoing mailboxes in a round-robin fashion, because this is how the host adapter will scan for them */ @@ -441,19 +449,10 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) if (bufflen) { struct scatterlist *sg; - struct chain *cptr; - int i, sg_count = scsi_sg_count(cmd); + int i; ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */ - cmd->host_scribble = kmalloc(sizeof(*cptr)*sg_count, - GFP_KERNEL | GFP_DMA); - cptr = (struct chain *) cmd->host_scribble; - if (cptr == NULL) { - /* free the claimed mailbox slot */ - aha1542->int_cmds[mbo] = NULL; - spin_unlock_irqrestore(sh->host_lock, flags); - return SCSI_MLQUEUE_HOST_BUSY; - } + cmd->host_scribble = (void *)cptr; scsi_for_each_sg(cmd, sg, sg_count, i) { any2scsi(cptr[i].dataptr, isa_page_to_bus(sg_page(sg)) + sg->offset); diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 262ab83..9f77d23 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -226,6 +226,7 @@ static struct { {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, {"Promise", "", NULL, BLIST_SPARSELUN}, + {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024}, {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN}, {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN}, diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 60aae01..6efab1c 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -897,6 +897,12 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, */ if (*bflags & BLIST_MAX_512) blk_queue_max_hw_sectors(sdev->request_queue, 512); + /* + * Max 1024 sector transfer length for targets that report incorrect + * max/optimal lengths and relied on the old block layer safe default + */ + else if (*bflags & BLIST_MAX_1024) + blk_queue_max_hw_sectors(sdev->request_queue, 1024); /* * Some devices may not want to have a start command automatically diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c index cd4c293..fe8875f 100644 --- a/drivers/sh/pm_runtime.c +++ b/drivers/sh/pm_runtime.c @@ -80,9 +80,10 @@ static int __init sh_pm_runtime_init(void) if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { if (!of_machine_is_compatible("renesas,emev2") && !of_machine_is_compatible("renesas,r7s72100") && - !of_machine_is_compatible("renesas,r8a73a4") && #ifndef CONFIG_PM_GENERIC_DOMAINS_OF + !of_machine_is_compatible("renesas,r8a73a4") && !of_machine_is_compatible("renesas,r8a7740") && + !of_machine_is_compatible("renesas,sh73a0") && #endif !of_machine_is_compatible("renesas,r8a7778") && !of_machine_is_compatible("renesas,r8a7779") && @@ -90,9 +91,7 @@ static int __init sh_pm_runtime_init(void) !of_machine_is_compatible("renesas,r8a7791") && !of_machine_is_compatible("renesas,r8a7792") && !of_machine_is_compatible("renesas,r8a7793") && - !of_machine_is_compatible("renesas,r8a7794") && - !of_machine_is_compatible("renesas,sh7372") && - !of_machine_is_compatible("renesas,sh73a0")) + !of_machine_is_compatible("renesas,r8a7794")) return 0; } diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 198f96b..72b0590 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -78,6 +78,7 @@ config SPI_ATMEL config SPI_BCM2835 tristate "BCM2835 SPI controller" depends on ARCH_BCM2835 || COMPILE_TEST + depends on GPIOLIB help This selects a driver for the Broadcom BCM2835 SPI master. @@ -302,7 +303,7 @@ config SPI_FSL_SPI config SPI_FSL_DSPI tristate "Freescale DSPI controller" select REGMAP_MMIO - depends on SOC_VF610 || COMPILE_TEST + depends on SOC_VF610 || SOC_LS1021A || COMPILE_TEST help This enables support for the Freescale DSPI controller in master mode. VF610 platform uses the controller. diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index f63864a..37875cf 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -164,13 +164,12 @@ static int bcm2835_spi_transfer_one_poll(struct spi_master *master, unsigned long xfer_time_us) { struct bcm2835_spi *bs = spi_master_get_devdata(master); - unsigned long timeout = jiffies + - max(4 * xfer_time_us * HZ / 1000000, 2uL); + /* set timeout to 1 second of maximum polling */ + unsigned long timeout = jiffies + HZ; /* enable HW block without interrupts */ bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA); - /* set timeout to 4x the expected time, or 2 jiffies */ /* loop until finished the transfer */ while (bs->rx_len) { /* read from fifo as much as possible */ diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c index 5ef6638..840a498 100644 --- a/drivers/spi/spi-bitbang.c +++ b/drivers/spi/spi-bitbang.c @@ -180,7 +180,6 @@ int spi_bitbang_setup(struct spi_device *spi) { struct spi_bitbang_cs *cs = spi->controller_state; struct spi_bitbang *bitbang; - int retval; unsigned long flags; bitbang = spi_master_get_devdata(spi->master); @@ -197,9 +196,11 @@ int spi_bitbang_setup(struct spi_device *spi) if (!cs->txrx_word) return -EINVAL; - retval = bitbang->setup_transfer(spi, NULL); - if (retval < 0) - return retval; + if (bitbang->setup_transfer) { + int retval = bitbang->setup_transfer(spi, NULL); + if (retval < 0) + return retval; + } dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs); @@ -295,9 +296,11 @@ static int spi_bitbang_transfer_one(struct spi_master *master, /* init (-1) or override (1) transfer params */ if (do_setup != 0) { - status = bitbang->setup_transfer(spi, t); - if (status < 0) - break; + if (bitbang->setup_transfer) { + status = bitbang->setup_transfer(spi, t); + if (status < 0) + break; + } if (do_setup == -1) do_setup = 0; } diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c index 9c46a30..896add8 100644 --- a/drivers/spi/spi-fsl-cpm.c +++ b/drivers/spi/spi-fsl-cpm.c @@ -24,6 +24,7 @@ #include <linux/of_address.h> #include <linux/spi/spi.h> #include <linux/types.h> +#include <linux/platform_device.h> #include "spi-fsl-cpm.h" #include "spi-fsl-lib.h" @@ -269,17 +270,6 @@ static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) if (mspi->flags & SPI_CPM2) { pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); out_be16(spi_base, pram_ofs); - } else { - struct spi_pram __iomem *pram = spi_base; - u16 rpbase = in_be16(&pram->rpbase); - - /* Microcode relocation patch applied? */ - if (rpbase) { - pram_ofs = rpbase; - } else { - pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); - out_be16(spi_base, pram_ofs); - } } iounmap(spi_base); @@ -292,7 +282,6 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) struct device_node *np = dev->of_node; const u32 *iprop; int size; - unsigned long pram_ofs; unsigned long bds_ofs; if (!(mspi->flags & SPI_CPM_MODE)) @@ -319,8 +308,26 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) } } - pram_ofs = fsl_spi_cpm_get_pram(mspi); - if (IS_ERR_VALUE(pram_ofs)) { + if (mspi->flags & SPI_CPM1) { + struct resource *res; + void *pram; + + res = platform_get_resource(to_platform_device(dev), + IORESOURCE_MEM, 1); + pram = devm_ioremap_resource(dev, res); + if (IS_ERR(pram)) + mspi->pram = NULL; + else + mspi->pram = pram; + } else { + unsigned long pram_ofs = fsl_spi_cpm_get_pram(mspi); + + if (IS_ERR_VALUE(pram_ofs)) + mspi->pram = NULL; + else + mspi->pram = cpm_muram_addr(pram_ofs); + } + if (mspi->pram == NULL) { dev_err(dev, "can't allocate spi parameter ram\n"); goto err_pram; } @@ -346,8 +353,6 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) goto err_dummy_rx; } - mspi->pram = cpm_muram_addr(pram_ofs); - mspi->tx_bd = cpm_muram_addr(bds_ofs); mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd)); @@ -375,7 +380,8 @@ err_dummy_rx: err_dummy_tx: cpm_muram_free(bds_ofs); err_bds: - cpm_muram_free(pram_ofs); + if (!(mspi->flags & SPI_CPM1)) + cpm_muram_free(cpm_muram_offset(mspi->pram)); err_pram: fsl_spi_free_dummy_rx(); return -ENOMEM; diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c index d0a73a0..80d245a 100644 --- a/drivers/spi/spi-fsl-espi.c +++ b/drivers/spi/spi-fsl-espi.c @@ -359,14 +359,16 @@ static void fsl_espi_rw_trans(struct spi_message *m, struct fsl_espi_transfer *trans, u8 *rx_buff) { struct fsl_espi_transfer *espi_trans = trans; - unsigned int n_tx = espi_trans->n_tx; - unsigned int n_rx = espi_trans->n_rx; + unsigned int total_len = espi_trans->len; struct spi_transfer *t; u8 *local_buf; u8 *rx_buf = rx_buff; unsigned int trans_len; unsigned int addr; - int i, pos, loop; + unsigned int tx_only; + unsigned int rx_pos = 0; + unsigned int pos; + int i, loop; local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); if (!local_buf) { @@ -374,36 +376,48 @@ static void fsl_espi_rw_trans(struct spi_message *m, return; } - for (pos = 0, loop = 0; pos < n_rx; pos += trans_len, loop++) { - trans_len = n_rx - pos; - if (trans_len > SPCOM_TRANLEN_MAX - n_tx) - trans_len = SPCOM_TRANLEN_MAX - n_tx; + for (pos = 0, loop = 0; pos < total_len; pos += trans_len, loop++) { + trans_len = total_len - pos; i = 0; + tx_only = 0; list_for_each_entry(t, &m->transfers, transfer_list) { if (t->tx_buf) { memcpy(local_buf + i, t->tx_buf, t->len); i += t->len; + if (!t->rx_buf) + tx_only += t->len; } } + /* Add additional TX bytes to compensate SPCOM_TRANLEN_MAX */ + if (loop > 0) + trans_len += tx_only; + + if (trans_len > SPCOM_TRANLEN_MAX) + trans_len = SPCOM_TRANLEN_MAX; + + /* Update device offset */ if (pos > 0) { addr = fsl_espi_cmd2addr(local_buf); - addr += pos; + addr += rx_pos; fsl_espi_addr2cmd(addr, local_buf); } - espi_trans->n_tx = n_tx; - espi_trans->n_rx = trans_len; - espi_trans->len = trans_len + n_tx; + espi_trans->len = trans_len; espi_trans->tx_buf = local_buf; espi_trans->rx_buf = local_buf; fsl_espi_do_trans(m, espi_trans); - memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len); + /* If there is at least one RX byte then copy it to rx_buf */ + if (tx_only < SPCOM_TRANLEN_MAX) + memcpy(rx_buf + rx_pos, espi_trans->rx_buf + tx_only, + trans_len - tx_only); + + rx_pos += trans_len - tx_only; if (loop > 0) - espi_trans->actual_length += espi_trans->len - n_tx; + espi_trans->actual_length += espi_trans->len - tx_only; else espi_trans->actual_length += espi_trans->len; } @@ -418,6 +432,7 @@ static int fsl_espi_do_one_msg(struct spi_master *master, u8 *rx_buf = NULL; unsigned int n_tx = 0; unsigned int n_rx = 0; + unsigned int xfer_len = 0; struct fsl_espi_transfer espi_trans; list_for_each_entry(t, &m->transfers, transfer_list) { @@ -427,11 +442,13 @@ static int fsl_espi_do_one_msg(struct spi_master *master, n_rx += t->len; rx_buf = t->rx_buf; } + if ((t->tx_buf) || (t->rx_buf)) + xfer_len += t->len; } espi_trans.n_tx = n_tx; espi_trans.n_rx = n_rx; - espi_trans.len = n_tx + n_rx; + espi_trans.len = xfer_len; espi_trans.actual_length = 0; espi_trans.status = 0; diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 4df8942..d1a5b9f 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -1210,6 +1210,7 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master, struct omap2_mcspi *mcspi; struct omap2_mcspi_dma *mcspi_dma; struct spi_transfer *t; + int status; spi = m->spi; mcspi = spi_master_get_devdata(master); @@ -1229,7 +1230,8 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master, tx_buf ? "tx" : "", rx_buf ? "rx" : "", t->bits_per_word); - return -EINVAL; + status = -EINVAL; + goto out; } if (m->is_dma_mapped || len < DMA_MIN_BYTES) @@ -1241,7 +1243,8 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master, if (dma_mapping_error(mcspi->dev, t->tx_dma)) { dev_dbg(mcspi->dev, "dma %cX %d bytes error\n", 'T', len); - return -EINVAL; + status = -EINVAL; + goto out; } } if (mcspi_dma->dma_rx && rx_buf != NULL) { @@ -1253,14 +1256,19 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master, if (tx_buf != NULL) dma_unmap_single(mcspi->dev, t->tx_dma, len, DMA_TO_DEVICE); - return -EINVAL; + status = -EINVAL; + goto out; } } } omap2_mcspi_work(mcspi, m); + /* spi_finalize_current_message() changes the status inside the + * spi_message, save the status here. */ + status = m->status; +out: spi_finalize_current_message(master); - return 0; + return status; } static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index d5d7d22..50910d8 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -583,6 +583,15 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) rx_dev = master->dma_rx->device->dev; list_for_each_entry(xfer, &msg->transfers, transfer_list) { + /* + * Restore the original value of tx_buf or rx_buf if they are + * NULL. + */ + if (xfer->tx_buf == master->dummy_tx) + xfer->tx_buf = NULL; + if (xfer->rx_buf == master->dummy_rx) + xfer->rx_buf = NULL; + if (!master->can_dma(master, msg->spi, xfer)) continue; diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c index 8199b0a..1cf24e4 100644 --- a/drivers/staging/gdm724x/gdm_mux.c +++ b/drivers/staging/gdm724x/gdm_mux.c @@ -158,7 +158,7 @@ static int up_to_host(struct mux_rx *r) unsigned int start_flag; unsigned int payload_size; unsigned short packet_type; - int dummy_cnt; + int total_len; u32 packet_size_sum = r->offset; int index; int ret = TO_HOST_INVALID_PACKET; @@ -176,10 +176,10 @@ static int up_to_host(struct mux_rx *r) break; } - dummy_cnt = ALIGN(MUX_HEADER_SIZE + payload_size, 4); + total_len = ALIGN(MUX_HEADER_SIZE + payload_size, 4); if (len - packet_size_sum < - MUX_HEADER_SIZE + payload_size + dummy_cnt) { + total_len) { pr_err("invalid payload : %d %d %04x\n", payload_size, len, packet_type); break; @@ -202,7 +202,7 @@ static int up_to_host(struct mux_rx *r) break; } - packet_size_sum += MUX_HEADER_SIZE + payload_size + dummy_cnt; + packet_size_sum += total_len; if (len - packet_size_sum <= MUX_HEADER_SIZE + 2) { ret = r->callback(NULL, 0, @@ -361,7 +361,6 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index, struct mux_pkt_header *mux_header; struct mux_tx *t = NULL; static u32 seq_num = 1; - int dummy_cnt; int total_len; int ret; unsigned long flags; @@ -374,9 +373,7 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index, spin_lock_irqsave(&mux_dev->write_lock, flags); - dummy_cnt = ALIGN(MUX_HEADER_SIZE + len, 4); - - total_len = len + MUX_HEADER_SIZE + dummy_cnt; + total_len = ALIGN(MUX_HEADER_SIZE + len, 4); t = alloc_mux_tx(total_len); if (!t) { @@ -392,7 +389,8 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index, mux_header->packet_type = __cpu_to_le16(packet_type[tty_index]); memcpy(t->buf+MUX_HEADER_SIZE, data, len); - memset(t->buf+MUX_HEADER_SIZE+len, 0, dummy_cnt); + memset(t->buf+MUX_HEADER_SIZE+len, 0, total_len - MUX_HEADER_SIZE - + len); t->len = total_len; t->callback = cb; diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig index b78643f..072dac0 100644 --- a/drivers/staging/media/omap4iss/Kconfig +++ b/drivers/staging/media/omap4iss/Kconfig @@ -2,6 +2,7 @@ config VIDEO_OMAP4 bool "OMAP 4 Camera support" depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4 depends on HAS_DMA + select MFD_SYSCON select VIDEOBUF2_DMA_CONTIG ---help--- Driver for an OMAP 4 ISS controller. diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c index e0ad5e5..7ced940 100644 --- a/drivers/staging/media/omap4iss/iss.c +++ b/drivers/staging/media/omap4iss/iss.c @@ -17,6 +17,7 @@ #include <linux/dma-mapping.h> #include <linux/i2c.h> #include <linux/interrupt.h> +#include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> @@ -1386,6 +1387,16 @@ static int iss_probe(struct platform_device *pdev) platform_set_drvdata(pdev, iss); + /* + * TODO: When implementing DT support switch to syscon regmap lookup by + * phandle. + */ + iss->syscon = syscon_regmap_lookup_by_compatible("syscon"); + if (IS_ERR(iss->syscon)) { + ret = PTR_ERR(iss->syscon); + goto error; + } + /* Clocks */ ret = iss_map_mem_resource(pdev, iss, OMAP4_ISS_MEM_TOP); if (ret < 0) diff --git a/drivers/staging/media/omap4iss/iss.h b/drivers/staging/media/omap4iss/iss.h index 734cfee..35df8b4 100644 --- a/drivers/staging/media/omap4iss/iss.h +++ b/drivers/staging/media/omap4iss/iss.h @@ -29,6 +29,8 @@ #include "iss_ipipe.h" #include "iss_resizer.h" +struct regmap; + #define to_iss_device(ptr_module) \ container_of(ptr_module, struct iss_device, ptr_module) #define to_device(ptr_module) \ @@ -79,6 +81,7 @@ struct iss_reg { /* * struct iss_device - ISS device structure. + * @syscon: Regmap for the syscon register space * @crashed: Bitmask of crashed entities (indexed by entity ID) */ struct iss_device { @@ -93,6 +96,7 @@ struct iss_device { struct resource *res[OMAP4_ISS_MEM_LAST]; void __iomem *regs[OMAP4_ISS_MEM_LAST]; + struct regmap *syscon; u64 raw_dmamask; diff --git a/drivers/staging/media/omap4iss/iss_csiphy.c b/drivers/staging/media/omap4iss/iss_csiphy.c index 7c3d55d..748607f 100644 --- a/drivers/staging/media/omap4iss/iss_csiphy.c +++ b/drivers/staging/media/omap4iss/iss_csiphy.c @@ -13,6 +13,7 @@ #include <linux/delay.h> #include <linux/device.h> +#include <linux/regmap.h> #include "../../../../arch/arm/mach-omap2/control.h" @@ -140,9 +141,11 @@ int omap4iss_csiphy_config(struct iss_device *iss, * - bit [18] : CSIPHY1 CTRLCLK enable * - bit [17:16] : CSIPHY1 config: 00 d-phy, 01/10 ccp2 */ - cam_rx_ctrl = omap4_ctrl_pad_readl( - OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_CAMERA_RX); - + /* + * TODO: When implementing DT support specify the CONTROL_CAMERA_RX + * register offset in the syscon property instead of hardcoding it. + */ + regmap_read(iss->syscon, 0x68, &cam_rx_ctrl); if (subdevs->interface == ISS_INTERFACE_CSI2A_PHY1) { cam_rx_ctrl &= ~(OMAP4_CAMERARX_CSI21_LANEENABLE_MASK | @@ -166,8 +169,7 @@ int omap4iss_csiphy_config(struct iss_device *iss, cam_rx_ctrl |= OMAP4_CAMERARX_CSI22_CTRLCLKEN_MASK; } - omap4_ctrl_pad_writel(cam_rx_ctrl, - OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_CAMERA_RX); + regmap_write(iss->syscon, 0x68, cam_rx_ctrl); /* Reset used lane count */ csi2->phy->used_data_lanes = 0; diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c index 42fba3f..cb0b63877 100644 --- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c +++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c @@ -1900,23 +1900,20 @@ static int r871x_mp_ioctl_hdl(struct net_device *dev, struct mp_ioctl_handler *phandler; struct mp_ioctl_param *poidparam; unsigned long BytesRead, BytesWritten, BytesNeeded; - u8 *pparmbuf = NULL, bset; + u8 *pparmbuf, bset; u16 len; uint status; int ret = 0; - if ((!p->length) || (!p->pointer)) { - ret = -EINVAL; - goto _r871x_mp_ioctl_hdl_exit; - } + if ((!p->length) || (!p->pointer)) + return -EINVAL; + bset = (u8)(p->flags & 0xFFFF); len = p->length; - pparmbuf = NULL; pparmbuf = memdup_user(p->pointer, len); - if (IS_ERR(pparmbuf)) { - ret = PTR_ERR(pparmbuf); - goto _r871x_mp_ioctl_hdl_exit; - } + if (IS_ERR(pparmbuf)) + return PTR_ERR(pparmbuf); + poidparam = (struct mp_ioctl_param *)pparmbuf; if (poidparam->subcode >= MAX_MP_IOCTL_SUBCODE) { ret = -EINVAL; diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c index 3c7ea95..dbbb2f8 100644 --- a/drivers/staging/sm750fb/sm750.c +++ b/drivers/staging/sm750fb/sm750.c @@ -1250,7 +1250,7 @@ err_enable: return -ENODEV; } -static void __exit lynxfb_pci_remove(struct pci_dev *pdev) +static void lynxfb_pci_remove(struct pci_dev *pdev) { struct fb_info *info; struct lynx_share *share; diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c index 1cdcf49..e00c060 100644 --- a/drivers/staging/vt6655/card.c +++ b/drivers/staging/vt6655/card.c @@ -362,12 +362,16 @@ bool CARDbSetPhyParameter(struct vnt_private *pDevice, u8 bb_type) * Return Value: none */ bool CARDbUpdateTSF(struct vnt_private *pDevice, unsigned char byRxRate, - u64 qwBSSTimestamp, u64 qwLocalTSF) + u64 qwBSSTimestamp) { + u64 local_tsf; u64 qwTSFOffset = 0; - if (qwBSSTimestamp != qwLocalTSF) { - qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp, qwLocalTSF); + CARDbGetCurrentTSF(pDevice, &local_tsf); + + if (qwBSSTimestamp != local_tsf) { + qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp, + local_tsf); /* adjust TSF, HW's TSF add TSF Offset reg */ VNSvOutPortD(pDevice->PortOffset + MAC_REG_TSFOFST, (u32)qwTSFOffset); VNSvOutPortD(pDevice->PortOffset + MAC_REG_TSFOFST + 4, (u32)(qwTSFOffset >> 32)); diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h index 2dfc419..16cca49 100644 --- a/drivers/staging/vt6655/card.h +++ b/drivers/staging/vt6655/card.h @@ -83,7 +83,7 @@ bool CARDbRadioPowerOff(struct vnt_private *); bool CARDbRadioPowerOn(struct vnt_private *); bool CARDbSetPhyParameter(struct vnt_private *, u8); bool CARDbUpdateTSF(struct vnt_private *, unsigned char byRxRate, - u64 qwBSSTimestamp, u64 qwLocalTSF); + u64 qwBSSTimestamp); bool CARDbSetBeaconPeriod(struct vnt_private *, unsigned short wBeaconInterval); #endif /* __CARD_H__ */ diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index 4bb4f8e..0343ae3 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -912,7 +912,11 @@ static int vnt_int_report_rate(struct vnt_private *priv, if (!(tsr1 & TSR1_TERR)) { info->status.rates[0].idx = idx; - info->flags |= IEEE80211_TX_STAT_ACK; + + if (info->flags & IEEE80211_TX_CTL_NO_ACK) + info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; + else + info->flags |= IEEE80211_TX_STAT_ACK; } return 0; @@ -937,9 +941,6 @@ static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx) /* Only the status of first TD in the chain is correct */ if (pTD->m_td1TD1.byTCR & TCR_STP) { if ((pTD->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) != 0) { - - vnt_int_report_rate(pDevice, pTD->pTDInfo, byTsr0, byTsr1); - if (!(byTsr1 & TSR1_TERR)) { if (byTsr0 != 0) { pr_debug(" Tx[%d] OK but has error. tsr1[%02X] tsr0[%02X]\n", @@ -958,6 +959,9 @@ static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx) (int)uIdx, byTsr1, byTsr0); } } + + vnt_int_report_rate(pDevice, pTD->pTDInfo, byTsr0, byTsr1); + device_free_tx_buf(pDevice, pTD); pDevice->iTDUsed[uIdx]--; } @@ -989,10 +993,8 @@ static void device_free_tx_buf(struct vnt_private *pDevice, PSTxDesc pDesc) skb->len, DMA_TO_DEVICE); } - if (pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) + if (skb) ieee80211_tx_status_irqsafe(pDevice->hw, skb); - else - dev_kfree_skb_irq(skb); pTDInfo->skb_dma = 0; pTDInfo->skb = NULL; @@ -1204,14 +1206,6 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) if (dma_idx == TYPE_AC0DMA) head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB; - priv->iTDUsed[dma_idx]++; - - /* Take ownership */ - wmb(); - head_td->m_td0TD0.f1Owner = OWNED_BY_NIC; - - /* get Next */ - wmb(); priv->apCurrTD[dma_idx] = head_td->next; spin_unlock_irqrestore(&priv->lock, flags); @@ -1232,11 +1226,18 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma); + /* Poll Transmit the adapter */ + wmb(); + head_td->m_td0TD0.f1Owner = OWNED_BY_NIC; + wmb(); /* second memory barrier */ + if (head_td->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) MACvTransmitAC0(priv->PortOffset); else MACvTransmit0(priv->PortOffset); + priv->iTDUsed[dma_idx]++; + spin_unlock_irqrestore(&priv->lock, flags); return 0; @@ -1416,9 +1417,16 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, priv->current_aid = conf->aid; - if (changed & BSS_CHANGED_BSSID) + if (changed & BSS_CHANGED_BSSID) { + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + MACvWriteBSSIDAddress(priv->PortOffset, (u8 *)conf->bssid); + spin_unlock_irqrestore(&priv->lock, flags); + } + if (changed & BSS_CHANGED_BASIC_RATES) { priv->basic_rates = conf->basic_rates; @@ -1477,7 +1485,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_ASSOC && priv->op_mode != NL80211_IFTYPE_AP) { if (conf->assoc) { CARDbUpdateTSF(priv, conf->beacon_rate->hw_value, - conf->sync_device_ts, conf->sync_tsf); + conf->sync_tsf); CARDbSetBeaconPeriod(priv, conf->beacon_int); diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c index f6c2cf8..5c58996 100644 --- a/drivers/staging/vt6656/rxtx.c +++ b/drivers/staging/vt6656/rxtx.c @@ -805,10 +805,18 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) vnt_schedule_command(priv, WLAN_CMD_SETPOWER); } - if (current_rate > RATE_11M) - pkt_type = priv->packet_type; - else + if (current_rate > RATE_11M) { + if (info->band == IEEE80211_BAND_5GHZ) { + pkt_type = PK_TYPE_11A; + } else { + if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT) + pkt_type = PK_TYPE_11GB; + else + pkt_type = PK_TYPE_11GA; + } + } else { pkt_type = PK_TYPE_11B; + } spin_lock_irqsave(&priv->lock, flags); diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index dbbb437..118938e 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -42,6 +42,17 @@ config THERMAL_OF Say 'Y' here if you need to build thermal infrastructure based on device tree. +config THERMAL_WRITABLE_TRIPS + bool "Enable writable trip points" + help + This option allows the system integrator to choose whether + trip temperatures can be changed from userspace. The + writable trips need to be specified when setting up the + thermal zone but the choice here takes precedence. + + Say 'Y' here if you would like to allow userspace tools to + change trip temperatures. + choice prompt "Default Thermal governor" default THERMAL_DEFAULT_GOV_STEP_WISE @@ -71,6 +82,14 @@ config THERMAL_DEFAULT_GOV_USER_SPACE Select this if you want to let the user space manage the platform thermals. +config THERMAL_DEFAULT_GOV_POWER_ALLOCATOR + bool "power_allocator" + select THERMAL_GOV_POWER_ALLOCATOR + help + Select this if you want to control temperature based on + system and device power allocation. This governor can only + operate on cooling devices that implement the power API. + endchoice config THERMAL_GOV_FAIR_SHARE @@ -99,6 +118,12 @@ config THERMAL_GOV_USER_SPACE help Enable this to let the user space manage the platform thermals. +config THERMAL_GOV_POWER_ALLOCATOR + bool "Power allocator thermal governor" + help + Enable this to manage platform thermals by dynamically + allocating and limiting power to devices. + config CPU_THERMAL bool "generic cpu cooling support" depends on CPU_FREQ @@ -136,6 +161,14 @@ config THERMAL_EMULATION because userland can easily disable the thermal policy by simply flooding this sysfs node with low temperature values. +config HISI_THERMAL + tristate "Hisilicon thermal driver" + depends on ARCH_HISI && CPU_THERMAL && OF + help + Enable this to plug hisilicon's thermal sensor driver into the Linux + thermal framework. cpufreq is used as the cooling device to throttle + CPUs when the passive trip is crossed. + config IMX_THERMAL tristate "Temperature sensor driver for Freescale i.MX SoCs" depends on CPU_THERMAL @@ -321,4 +354,15 @@ depends on ARCH_STI && OF source "drivers/thermal/st/Kconfig" endmenu +config QCOM_SPMI_TEMP_ALARM + tristate "Qualcomm SPMI PMIC Temperature Alarm" + depends on OF && SPMI && IIO + select REGMAP_SPMI + help + This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP) + PMIC devices. It shows up in sysfs as a thermal sensor with multiple + trip points. The temperature reported by the thermal sensor reflects the + real time die temperature if an ADC is present or an estimate of the + temperature based upon the over temperature stage value. + endif diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index 873ca15..535dfee 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -14,6 +14,7 @@ thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o thermal_sys-$(CONFIG_THERMAL_GOV_BANG_BANG) += gov_bang_bang.o thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o +thermal_sys-$(CONFIG_THERMAL_GOV_POWER_ALLOCATOR) += power_allocator.o # cpufreq cooling thermal_sys-$(CONFIG_CPU_THERMAL) += cpu_cooling.o @@ -22,6 +23,7 @@ thermal_sys-$(CONFIG_CPU_THERMAL) += cpu_cooling.o thermal_sys-$(CONFIG_CLOCK_THERMAL) += clock_cooling.o # platform thermal drivers +obj-$(CONFIG_QCOM_SPMI_TEMP_ALARM) += qcom-spmi-temp-alarm.o obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o obj-$(CONFIG_ROCKCHIP_THERMAL) += rockchip_thermal.o obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o @@ -41,3 +43,4 @@ obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/ obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/ obj-$(CONFIG_ST_THERMAL) += st/ obj-$(CONFIG_TEGRA_SOCTHERM) += tegra_soctherm.o +obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index f65f0d1..6509c61 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -26,10 +26,13 @@ #include <linux/thermal.h> #include <linux/cpufreq.h> #include <linux/err.h> +#include <linux/pm_opp.h> #include <linux/slab.h> #include <linux/cpu.h> #include <linux/cpu_cooling.h> +#include <trace/events/thermal.h> + /* * Cooling state <-> CPUFreq frequency * @@ -45,6 +48,19 @@ */ /** + * struct power_table - frequency to power conversion + * @frequency: frequency in KHz + * @power: power in mW + * + * This structure is built when the cooling device registers and helps + * in translating frequency to power and viceversa. + */ +struct power_table { + u32 frequency; + u32 power; +}; + +/** * struct cpufreq_cooling_device - data for cooling device with cpufreq * @id: unique integer value corresponding to each cpufreq_cooling_device * registered. @@ -58,6 +74,15 @@ * cpufreq frequencies. * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device. * @node: list_head to link all cpufreq_cooling_device together. + * @last_load: load measured by the latest call to cpufreq_get_actual_power() + * @time_in_idle: previous reading of the absolute time that this cpu was idle + * @time_in_idle_timestamp: wall time of the last invocation of + * get_cpu_idle_time_us() + * @dyn_power_table: array of struct power_table for frequency to power + * conversion, sorted in ascending order. + * @dyn_power_table_entries: number of entries in the @dyn_power_table array + * @cpu_dev: the first cpu_device from @allowed_cpus that has OPPs registered + * @plat_get_static_power: callback to calculate the static power * * This structure is required for keeping information of each registered * cpufreq_cooling_device. @@ -71,6 +96,13 @@ struct cpufreq_cooling_device { unsigned int *freq_table; /* In descending order */ struct cpumask allowed_cpus; struct list_head node; + u32 last_load; + u64 *time_in_idle; + u64 *time_in_idle_timestamp; + struct power_table *dyn_power_table; + int dyn_power_table_entries; + struct device *cpu_dev; + get_static_t plat_get_static_power; }; static DEFINE_IDR(cpufreq_idr); static DEFINE_MUTEX(cooling_cpufreq_lock); @@ -186,23 +218,237 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb, unsigned long max_freq = 0; struct cpufreq_cooling_device *cpufreq_dev; - if (event != CPUFREQ_ADJUST) - return 0; + switch (event) { - mutex_lock(&cooling_cpufreq_lock); - list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { - if (!cpumask_test_cpu(policy->cpu, - &cpufreq_dev->allowed_cpus)) + case CPUFREQ_ADJUST: + mutex_lock(&cooling_cpufreq_lock); + list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { + if (!cpumask_test_cpu(policy->cpu, + &cpufreq_dev->allowed_cpus)) + continue; + + max_freq = cpufreq_dev->cpufreq_val; + + if (policy->max != max_freq) + cpufreq_verify_within_limits(policy, 0, + max_freq); + } + mutex_unlock(&cooling_cpufreq_lock); + break; + default: + return NOTIFY_DONE; + } + + return NOTIFY_OK; +} + +/** + * build_dyn_power_table() - create a dynamic power to frequency table + * @cpufreq_device: the cpufreq cooling device in which to store the table + * @capacitance: dynamic power coefficient for these cpus + * + * Build a dynamic power to frequency table for this cpu and store it + * in @cpufreq_device. This table will be used in cpu_power_to_freq() and + * cpu_freq_to_power() to convert between power and frequency + * efficiently. Power is stored in mW, frequency in KHz. The + * resulting table is in ascending order. + * + * Return: 0 on success, -E* on error. + */ +static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device, + u32 capacitance) +{ + struct power_table *power_table; + struct dev_pm_opp *opp; + struct device *dev = NULL; + int num_opps = 0, cpu, i, ret = 0; + unsigned long freq; + + rcu_read_lock(); + + for_each_cpu(cpu, &cpufreq_device->allowed_cpus) { + dev = get_cpu_device(cpu); + if (!dev) { + dev_warn(&cpufreq_device->cool_dev->device, + "No cpu device for cpu %d\n", cpu); continue; + } + + num_opps = dev_pm_opp_get_opp_count(dev); + if (num_opps > 0) { + break; + } else if (num_opps < 0) { + ret = num_opps; + goto unlock; + } + } - max_freq = cpufreq_dev->cpufreq_val; + if (num_opps == 0) { + ret = -EINVAL; + goto unlock; + } - if (policy->max != max_freq) - cpufreq_verify_within_limits(policy, 0, max_freq); + power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL); + if (!power_table) { + ret = -ENOMEM; + goto unlock; } - mutex_unlock(&cooling_cpufreq_lock); - return 0; + for (freq = 0, i = 0; + opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp); + freq++, i++) { + u32 freq_mhz, voltage_mv; + u64 power; + + freq_mhz = freq / 1000000; + voltage_mv = dev_pm_opp_get_voltage(opp) / 1000; + + /* + * Do the multiplication with MHz and millivolt so as + * to not overflow. + */ + power = (u64)capacitance * freq_mhz * voltage_mv * voltage_mv; + do_div(power, 1000000000); + + /* frequency is stored in power_table in KHz */ + power_table[i].frequency = freq / 1000; + + /* power is stored in mW */ + power_table[i].power = power; + } + + if (i == 0) { + ret = PTR_ERR(opp); + goto unlock; + } + + cpufreq_device->cpu_dev = dev; + cpufreq_device->dyn_power_table = power_table; + cpufreq_device->dyn_power_table_entries = i; + +unlock: + rcu_read_unlock(); + return ret; +} + +static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_device, + u32 freq) +{ + int i; + struct power_table *pt = cpufreq_device->dyn_power_table; + + for (i = 1; i < cpufreq_device->dyn_power_table_entries; i++) + if (freq < pt[i].frequency) + break; + + return pt[i - 1].power; +} + +static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_device, + u32 power) +{ + int i; + struct power_table *pt = cpufreq_device->dyn_power_table; + + for (i = 1; i < cpufreq_device->dyn_power_table_entries; i++) + if (power < pt[i].power) + break; + + return pt[i - 1].frequency; +} + +/** + * get_load() - get load for a cpu since last updated + * @cpufreq_device: &struct cpufreq_cooling_device for this cpu + * @cpu: cpu number + * + * Return: The average load of cpu @cpu in percentage since this + * function was last called. + */ +static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu) +{ + u32 load; + u64 now, now_idle, delta_time, delta_idle; + + now_idle = get_cpu_idle_time(cpu, &now, 0); + delta_idle = now_idle - cpufreq_device->time_in_idle[cpu]; + delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu]; + + if (delta_time <= delta_idle) + load = 0; + else + load = div64_u64(100 * (delta_time - delta_idle), delta_time); + + cpufreq_device->time_in_idle[cpu] = now_idle; + cpufreq_device->time_in_idle_timestamp[cpu] = now; + + return load; +} + +/** + * get_static_power() - calculate the static power consumed by the cpus + * @cpufreq_device: struct &cpufreq_cooling_device for this cpu cdev + * @tz: thermal zone device in which we're operating + * @freq: frequency in KHz + * @power: pointer in which to store the calculated static power + * + * Calculate the static power consumed by the cpus described by + * @cpu_actor running at frequency @freq. This function relies on a + * platform specific function that should have been provided when the + * actor was registered. If it wasn't, the static power is assumed to + * be negligible. The calculated static power is stored in @power. + * + * Return: 0 on success, -E* on failure. + */ +static int get_static_power(struct cpufreq_cooling_device *cpufreq_device, + struct thermal_zone_device *tz, unsigned long freq, + u32 *power) +{ + struct dev_pm_opp *opp; + unsigned long voltage; + struct cpumask *cpumask = &cpufreq_device->allowed_cpus; + unsigned long freq_hz = freq * 1000; + + if (!cpufreq_device->plat_get_static_power || + !cpufreq_device->cpu_dev) { + *power = 0; + return 0; + } + + rcu_read_lock(); + + opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz, + true); + voltage = dev_pm_opp_get_voltage(opp); + + rcu_read_unlock(); + + if (voltage == 0) { + dev_warn_ratelimited(cpufreq_device->cpu_dev, + "Failed to get voltage for frequency %lu: %ld\n", + freq_hz, IS_ERR(opp) ? PTR_ERR(opp) : 0); + return -EINVAL; + } + + return cpufreq_device->plat_get_static_power(cpumask, tz->passive_delay, + voltage, power); +} + +/** + * get_dynamic_power() - calculate the dynamic power + * @cpufreq_device: &cpufreq_cooling_device for this cdev + * @freq: current frequency + * + * Return: the dynamic power consumed by the cpus described by + * @cpufreq_device. + */ +static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_device, + unsigned long freq) +{ + u32 raw_cpu_power; + + raw_cpu_power = cpu_freq_to_power(cpufreq_device, freq); + return (raw_cpu_power * cpufreq_device->last_load) / 100; } /* cpufreq cooling device callback functions are defined below */ @@ -280,8 +526,205 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, return 0; } +/** + * cpufreq_get_requested_power() - get the current power + * @cdev: &thermal_cooling_device pointer + * @tz: a valid thermal zone device pointer + * @power: pointer in which to store the resulting power + * + * Calculate the current power consumption of the cpus in milliwatts + * and store it in @power. This function should actually calculate + * the requested power, but it's hard to get the frequency that + * cpufreq would have assigned if there were no thermal limits. + * Instead, we calculate the current power on the assumption that the + * immediate future will look like the immediate past. + * + * We use the current frequency and the average load since this + * function was last called. In reality, there could have been + * multiple opps since this function was last called and that affects + * the load calculation. While it's not perfectly accurate, this + * simplification is good enough and works. REVISIT this, as more + * complex code may be needed if experiments show that it's not + * accurate enough. + * + * Return: 0 on success, -E* if getting the static power failed. + */ +static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev, + struct thermal_zone_device *tz, + u32 *power) +{ + unsigned long freq; + int i = 0, cpu, ret; + u32 static_power, dynamic_power, total_load = 0; + struct cpufreq_cooling_device *cpufreq_device = cdev->devdata; + u32 *load_cpu = NULL; + + cpu = cpumask_any_and(&cpufreq_device->allowed_cpus, cpu_online_mask); + + /* + * All the CPUs are offline, thus the requested power by + * the cdev is 0 + */ + if (cpu >= nr_cpu_ids) { + *power = 0; + return 0; + } + + freq = cpufreq_quick_get(cpu); + + if (trace_thermal_power_cpu_get_power_enabled()) { + u32 ncpus = cpumask_weight(&cpufreq_device->allowed_cpus); + + load_cpu = devm_kcalloc(&cdev->device, ncpus, sizeof(*load_cpu), + GFP_KERNEL); + } + + for_each_cpu(cpu, &cpufreq_device->allowed_cpus) { + u32 load; + + if (cpu_online(cpu)) + load = get_load(cpufreq_device, cpu); + else + load = 0; + + total_load += load; + if (trace_thermal_power_cpu_limit_enabled() && load_cpu) + load_cpu[i] = load; + + i++; + } + + cpufreq_device->last_load = total_load; + + dynamic_power = get_dynamic_power(cpufreq_device, freq); + ret = get_static_power(cpufreq_device, tz, freq, &static_power); + if (ret) { + if (load_cpu) + devm_kfree(&cdev->device, load_cpu); + return ret; + } + + if (load_cpu) { + trace_thermal_power_cpu_get_power( + &cpufreq_device->allowed_cpus, + freq, load_cpu, i, dynamic_power, static_power); + + devm_kfree(&cdev->device, load_cpu); + } + + *power = static_power + dynamic_power; + return 0; +} + +/** + * cpufreq_state2power() - convert a cpu cdev state to power consumed + * @cdev: &thermal_cooling_device pointer + * @tz: a valid thermal zone device pointer + * @state: cooling device state to be converted + * @power: pointer in which to store the resulting power + * + * Convert cooling device state @state into power consumption in + * milliwatts assuming 100% load. Store the calculated power in + * @power. + * + * Return: 0 on success, -EINVAL if the cooling device state could not + * be converted into a frequency or other -E* if there was an error + * when calculating the static power. + */ +static int cpufreq_state2power(struct thermal_cooling_device *cdev, + struct thermal_zone_device *tz, + unsigned long state, u32 *power) +{ + unsigned int freq, num_cpus; + cpumask_t cpumask; + u32 static_power, dynamic_power; + int ret; + struct cpufreq_cooling_device *cpufreq_device = cdev->devdata; + + cpumask_and(&cpumask, &cpufreq_device->allowed_cpus, cpu_online_mask); + num_cpus = cpumask_weight(&cpumask); + + /* None of our cpus are online, so no power */ + if (num_cpus == 0) { + *power = 0; + return 0; + } + + freq = cpufreq_device->freq_table[state]; + if (!freq) + return -EINVAL; + + dynamic_power = cpu_freq_to_power(cpufreq_device, freq) * num_cpus; + ret = get_static_power(cpufreq_device, tz, freq, &static_power); + if (ret) + return ret; + + *power = static_power + dynamic_power; + return 0; +} + +/** + * cpufreq_power2state() - convert power to a cooling device state + * @cdev: &thermal_cooling_device pointer + * @tz: a valid thermal zone device pointer + * @power: power in milliwatts to be converted + * @state: pointer in which to store the resulting state + * + * Calculate a cooling device state for the cpus described by @cdev + * that would allow them to consume at most @power mW and store it in + * @state. Note that this calculation depends on external factors + * such as the cpu load or the current static power. Calling this + * function with the same power as input can yield different cooling + * device states depending on those external factors. + * + * Return: 0 on success, -ENODEV if no cpus are online or -EINVAL if + * the calculated frequency could not be converted to a valid state. + * The latter should not happen unless the frequencies available to + * cpufreq have changed since the initialization of the cpu cooling + * device. + */ +static int cpufreq_power2state(struct thermal_cooling_device *cdev, + struct thermal_zone_device *tz, u32 power, + unsigned long *state) +{ + unsigned int cpu, cur_freq, target_freq; + int ret; + s32 dyn_power; + u32 last_load, normalised_power, static_power; + struct cpufreq_cooling_device *cpufreq_device = cdev->devdata; + + cpu = cpumask_any_and(&cpufreq_device->allowed_cpus, cpu_online_mask); + + /* None of our cpus are online */ + if (cpu >= nr_cpu_ids) + return -ENODEV; + + cur_freq = cpufreq_quick_get(cpu); + ret = get_static_power(cpufreq_device, tz, cur_freq, &static_power); + if (ret) + return ret; + + dyn_power = power - static_power; + dyn_power = dyn_power > 0 ? dyn_power : 0; + last_load = cpufreq_device->last_load ?: 1; + normalised_power = (dyn_power * 100) / last_load; + target_freq = cpu_power_to_freq(cpufreq_device, normalised_power); + + *state = cpufreq_cooling_get_level(cpu, target_freq); + if (*state == THERMAL_CSTATE_INVALID) { + dev_warn_ratelimited(&cdev->device, + "Failed to convert %dKHz for cpu %d into a cdev state\n", + target_freq, cpu); + return -EINVAL; + } + + trace_thermal_power_cpu_limit(&cpufreq_device->allowed_cpus, + target_freq, *state, power); + return 0; +} + /* Bind cpufreq callbacks to thermal cooling device ops */ -static struct thermal_cooling_device_ops const cpufreq_cooling_ops = { +static struct thermal_cooling_device_ops cpufreq_cooling_ops = { .get_max_state = cpufreq_get_max_state, .get_cur_state = cpufreq_get_cur_state, .set_cur_state = cpufreq_set_cur_state, @@ -311,6 +754,9 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table, * @np: a valid struct device_node to the cooling device device tree node * @clip_cpus: cpumask of cpus where the frequency constraints will happen. * Normally this should be same as cpufreq policy->related_cpus. + * @capacitance: dynamic power coefficient for these cpus + * @plat_static_func: function to calculate the static power consumed by these + * cpus (optional) * * This interface function registers the cpufreq cooling device with the name * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq @@ -322,13 +768,14 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table, */ static struct thermal_cooling_device * __cpufreq_cooling_register(struct device_node *np, - const struct cpumask *clip_cpus) + const struct cpumask *clip_cpus, u32 capacitance, + get_static_t plat_static_func) { struct thermal_cooling_device *cool_dev; struct cpufreq_cooling_device *cpufreq_dev; char dev_name[THERMAL_NAME_LENGTH]; struct cpufreq_frequency_table *pos, *table; - unsigned int freq, i; + unsigned int freq, i, num_cpus; int ret; table = cpufreq_frequency_get_table(cpumask_first(clip_cpus)); @@ -341,6 +788,23 @@ __cpufreq_cooling_register(struct device_node *np, if (!cpufreq_dev) return ERR_PTR(-ENOMEM); + num_cpus = cpumask_weight(clip_cpus); + cpufreq_dev->time_in_idle = kcalloc(num_cpus, + sizeof(*cpufreq_dev->time_in_idle), + GFP_KERNEL); + if (!cpufreq_dev->time_in_idle) { + cool_dev = ERR_PTR(-ENOMEM); + goto free_cdev; + } + + cpufreq_dev->time_in_idle_timestamp = + kcalloc(num_cpus, sizeof(*cpufreq_dev->time_in_idle_timestamp), + GFP_KERNEL); + if (!cpufreq_dev->time_in_idle_timestamp) { + cool_dev = ERR_PTR(-ENOMEM); + goto free_time_in_idle; + } + /* Find max levels */ cpufreq_for_each_valid_entry(pos, table) cpufreq_dev->max_level++; @@ -349,7 +813,7 @@ __cpufreq_cooling_register(struct device_node *np, cpufreq_dev->max_level, GFP_KERNEL); if (!cpufreq_dev->freq_table) { cool_dev = ERR_PTR(-ENOMEM); - goto free_cdev; + goto free_time_in_idle_timestamp; } /* max_level is an index, not a counter */ @@ -357,6 +821,20 @@ __cpufreq_cooling_register(struct device_node *np, cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus); + if (capacitance) { + cpufreq_cooling_ops.get_requested_power = + cpufreq_get_requested_power; + cpufreq_cooling_ops.state2power = cpufreq_state2power; + cpufreq_cooling_ops.power2state = cpufreq_power2state; + cpufreq_dev->plat_get_static_power = plat_static_func; + + ret = build_dyn_power_table(cpufreq_dev, capacitance); + if (ret) { + cool_dev = ERR_PTR(ret); + goto free_table; + } + } + ret = get_idr(&cpufreq_idr, &cpufreq_dev->id); if (ret) { cool_dev = ERR_PTR(ret); @@ -402,6 +880,10 @@ remove_idr: release_idr(&cpufreq_idr, cpufreq_dev->id); free_table: kfree(cpufreq_dev->freq_table); +free_time_in_idle_timestamp: + kfree(cpufreq_dev->time_in_idle_timestamp); +free_time_in_idle: + kfree(cpufreq_dev->time_in_idle); free_cdev: kfree(cpufreq_dev); @@ -422,7 +904,7 @@ free_cdev: struct thermal_cooling_device * cpufreq_cooling_register(const struct cpumask *clip_cpus) { - return __cpufreq_cooling_register(NULL, clip_cpus); + return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL); } EXPORT_SYMBOL_GPL(cpufreq_cooling_register); @@ -446,11 +928,78 @@ of_cpufreq_cooling_register(struct device_node *np, if (!np) return ERR_PTR(-EINVAL); - return __cpufreq_cooling_register(np, clip_cpus); + return __cpufreq_cooling_register(np, clip_cpus, 0, NULL); } EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register); /** + * cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions + * @clip_cpus: cpumask of cpus where the frequency constraints will happen + * @capacitance: dynamic power coefficient for these cpus + * @plat_static_func: function to calculate the static power consumed by these + * cpus (optional) + * + * This interface function registers the cpufreq cooling device with + * the name "thermal-cpufreq-%x". This api can support multiple + * instances of cpufreq cooling devices. Using this function, the + * cooling device will implement the power extensions by using a + * simple cpu power model. The cpus must have registered their OPPs + * using the OPP library. + * + * An optional @plat_static_func may be provided to calculate the + * static power consumed by these cpus. If the platform's static + * power consumption is unknown or negligible, make it NULL. + * + * Return: a valid struct thermal_cooling_device pointer on success, + * on failure, it returns a corresponding ERR_PTR(). + */ +struct thermal_cooling_device * +cpufreq_power_cooling_register(const struct cpumask *clip_cpus, u32 capacitance, + get_static_t plat_static_func) +{ + return __cpufreq_cooling_register(NULL, clip_cpus, capacitance, + plat_static_func); +} +EXPORT_SYMBOL(cpufreq_power_cooling_register); + +/** + * of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions + * @np: a valid struct device_node to the cooling device device tree node + * @clip_cpus: cpumask of cpus where the frequency constraints will happen + * @capacitance: dynamic power coefficient for these cpus + * @plat_static_func: function to calculate the static power consumed by these + * cpus (optional) + * + * This interface function registers the cpufreq cooling device with + * the name "thermal-cpufreq-%x". This api can support multiple + * instances of cpufreq cooling devices. Using this API, the cpufreq + * cooling device will be linked to the device tree node provided. + * Using this function, the cooling device will implement the power + * extensions by using a simple cpu power model. The cpus must have + * registered their OPPs using the OPP library. + * + * An optional @plat_static_func may be provided to calculate the + * static power consumed by these cpus. If the platform's static + * power consumption is unknown or negligible, make it NULL. + * + * Return: a valid struct thermal_cooling_device pointer on success, + * on failure, it returns a corresponding ERR_PTR(). + */ +struct thermal_cooling_device * +of_cpufreq_power_cooling_register(struct device_node *np, + const struct cpumask *clip_cpus, + u32 capacitance, + get_static_t plat_static_func) +{ + if (!np) + return ERR_PTR(-EINVAL); + + return __cpufreq_cooling_register(np, clip_cpus, capacitance, + plat_static_func); +} +EXPORT_SYMBOL(of_cpufreq_power_cooling_register); + +/** * cpufreq_cooling_unregister - function to remove cpufreq cooling device. * @cdev: thermal cooling device pointer. * @@ -475,6 +1024,8 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) thermal_cooling_device_unregister(cpufreq_dev->cool_dev); release_idr(&cpufreq_idr, cpufreq_dev->id); + kfree(cpufreq_dev->time_in_idle_timestamp); + kfree(cpufreq_dev->time_in_idle); kfree(cpufreq_dev->freq_table); kfree(cpufreq_dev); } diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c index 20adfbe..2fb273c 100644 --- a/drivers/thermal/db8500_thermal.c +++ b/drivers/thermal/db8500_thermal.c @@ -76,7 +76,7 @@ static int db8500_cdev_bind(struct thermal_zone_device *thermal, upper = lower = i > max_state ? max_state : i; ret = thermal_zone_bind_cooling_device(thermal, i, cdev, - upper, lower); + upper, lower, THERMAL_WEIGHT_DEFAULT); dev_info(&cdev->device, "%s bind to %d: %d-%s\n", cdev->type, i, ret, ret ? "fail" : "succeed"); diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c index 6e0a3fb..c2c10bb 100644 --- a/drivers/thermal/fair_share.c +++ b/drivers/thermal/fair_share.c @@ -59,17 +59,17 @@ static int get_trip_level(struct thermal_zone_device *tz) } static long get_target_state(struct thermal_zone_device *tz, - struct thermal_cooling_device *cdev, int weight, int level) + struct thermal_cooling_device *cdev, int percentage, int level) { unsigned long max_state; cdev->ops->get_max_state(cdev, &max_state); - return (long)(weight * level * max_state) / (100 * tz->trips); + return (long)(percentage * level * max_state) / (100 * tz->trips); } /** - * fair_share_throttle - throttles devices asscciated with the given zone + * fair_share_throttle - throttles devices associated with the given zone * @tz - thermal_zone_device * * Throttling Logic: This uses three parameters to calculate the new @@ -77,7 +77,7 @@ static long get_target_state(struct thermal_zone_device *tz, * * Parameters used for Throttling: * P1. max_state: Maximum throttle state exposed by the cooling device. - * P2. weight[i]/100: + * P2. percentage[i]/100: * How 'effective' the 'i'th device is, in cooling the given zone. * P3. cur_trip_level/max_no_of_trips: * This describes the extent to which the devices should be throttled. @@ -88,28 +88,33 @@ static long get_target_state(struct thermal_zone_device *tz, */ static int fair_share_throttle(struct thermal_zone_device *tz, int trip) { - const struct thermal_zone_params *tzp; - struct thermal_cooling_device *cdev; struct thermal_instance *instance; - int i; + int total_weight = 0; + int total_instance = 0; int cur_trip_level = get_trip_level(tz); - if (!tz->tzp || !tz->tzp->tbp) - return -EINVAL; + list_for_each_entry(instance, &tz->thermal_instances, tz_node) { + if (instance->trip != trip) + continue; + + total_weight += instance->weight; + total_instance++; + } - tzp = tz->tzp; + list_for_each_entry(instance, &tz->thermal_instances, tz_node) { + int percentage; + struct thermal_cooling_device *cdev = instance->cdev; - for (i = 0; i < tzp->num_tbps; i++) { - if (!tzp->tbp[i].cdev) + if (instance->trip != trip) continue; - cdev = tzp->tbp[i].cdev; - instance = get_thermal_instance(tz, cdev, trip); - if (!instance) - continue; + if (!total_weight) + percentage = 100 / total_instance; + else + percentage = (instance->weight * 100) / total_weight; - instance->target = get_target_state(tz, cdev, - tzp->tbp[i].weight, cur_trip_level); + instance->target = get_target_state(tz, cdev, percentage, + cur_trip_level); instance->cdev->updated = false; thermal_cdev_update(cdev); diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c new file mode 100644 index 0000000..d5dd357 --- /dev/null +++ b/drivers/thermal/hisi_thermal.c @@ -0,0 +1,421 @@ +/* + * Hisilicon thermal sensor driver + * + * Copyright (c) 2014-2015 Hisilicon Limited. + * Copyright (c) 2014-2015 Linaro Limited. + * + * Xinwei Kong <kong.kongxinwei@hisilicon.com> + * Leo Yan <leo.yan@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/cpufreq.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/io.h> + +#include "thermal_core.h" + +#define TEMP0_TH (0x4) +#define TEMP0_RST_TH (0x8) +#define TEMP0_CFG (0xC) +#define TEMP0_EN (0x10) +#define TEMP0_INT_EN (0x14) +#define TEMP0_INT_CLR (0x18) +#define TEMP0_RST_MSK (0x1C) +#define TEMP0_VALUE (0x28) + +#define HISI_TEMP_BASE (-60) +#define HISI_TEMP_RESET (100000) + +#define HISI_MAX_SENSORS 4 + +struct hisi_thermal_sensor { + struct hisi_thermal_data *thermal; + struct thermal_zone_device *tzd; + + long sensor_temp; + uint32_t id; + uint32_t thres_temp; +}; + +struct hisi_thermal_data { + struct mutex thermal_lock; /* protects register data */ + struct platform_device *pdev; + struct clk *clk; + struct hisi_thermal_sensor sensors[HISI_MAX_SENSORS]; + + int irq, irq_bind_sensor; + bool irq_enabled; + + void __iomem *regs; +}; + +/* in millicelsius */ +static inline int _step_to_temp(int step) +{ + /* + * Every step equals (1 * 200) / 255 celsius, and finally + * need convert to millicelsius. + */ + return (HISI_TEMP_BASE + (step * 200 / 255)) * 1000; +} + +static inline long _temp_to_step(long temp) +{ + return ((temp / 1000 - HISI_TEMP_BASE) * 255 / 200); +} + +static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data, + struct hisi_thermal_sensor *sensor) +{ + long val; + + mutex_lock(&data->thermal_lock); + + /* disable interrupt */ + writel(0x0, data->regs + TEMP0_INT_EN); + writel(0x1, data->regs + TEMP0_INT_CLR); + + /* disable module firstly */ + writel(0x0, data->regs + TEMP0_EN); + + /* select sensor id */ + writel((sensor->id << 12), data->regs + TEMP0_CFG); + + /* enable module */ + writel(0x1, data->regs + TEMP0_EN); + + usleep_range(3000, 5000); + + val = readl(data->regs + TEMP0_VALUE); + val = _step_to_temp(val); + + mutex_unlock(&data->thermal_lock); + + return val; +} + +static void hisi_thermal_enable_bind_irq_sensor + (struct hisi_thermal_data *data) +{ + struct hisi_thermal_sensor *sensor; + + mutex_lock(&data->thermal_lock); + + sensor = &data->sensors[data->irq_bind_sensor]; + + /* setting the hdak time */ + writel(0x0, data->regs + TEMP0_CFG); + + /* disable module firstly */ + writel(0x0, data->regs + TEMP0_RST_MSK); + writel(0x0, data->regs + TEMP0_EN); + + /* select sensor id */ + writel((sensor->id << 12), data->regs + TEMP0_CFG); + + /* enable for interrupt */ + writel(_temp_to_step(sensor->thres_temp) | 0x0FFFFFF00, + data->regs + TEMP0_TH); + + writel(_temp_to_step(HISI_TEMP_RESET), data->regs + TEMP0_RST_TH); + + /* enable module */ + writel(0x1, data->regs + TEMP0_RST_MSK); + writel(0x1, data->regs + TEMP0_EN); + + writel(0x0, data->regs + TEMP0_INT_CLR); + writel(0x1, data->regs + TEMP0_INT_EN); + + usleep_range(3000, 5000); + + mutex_unlock(&data->thermal_lock); +} + +static void hisi_thermal_disable_sensor(struct hisi_thermal_data *data) +{ + mutex_lock(&data->thermal_lock); + + /* disable sensor module */ + writel(0x0, data->regs + TEMP0_INT_EN); + writel(0x0, data->regs + TEMP0_RST_MSK); + writel(0x0, data->regs + TEMP0_EN); + + mutex_unlock(&data->thermal_lock); +} + +static int hisi_thermal_get_temp(void *_sensor, long *temp) +{ + struct hisi_thermal_sensor *sensor = _sensor; + struct hisi_thermal_data *data = sensor->thermal; + + int sensor_id = 0, i; + long max_temp = 0; + + *temp = hisi_thermal_get_sensor_temp(data, sensor); + + sensor->sensor_temp = *temp; + + for (i = 0; i < HISI_MAX_SENSORS; i++) { + if (data->sensors[i].sensor_temp >= max_temp) { + max_temp = data->sensors[i].sensor_temp; + sensor_id = i; + } + } + + mutex_lock(&data->thermal_lock); + data->irq_bind_sensor = sensor_id; + mutex_unlock(&data->thermal_lock); + + dev_dbg(&data->pdev->dev, "id=%d, irq=%d, temp=%ld, thres=%d\n", + sensor->id, data->irq_enabled, *temp, sensor->thres_temp); + /* + * Bind irq to sensor for two cases: + * Reenable alarm IRQ if temperature below threshold; + * if irq has been enabled, always set it; + */ + if (data->irq_enabled) { + hisi_thermal_enable_bind_irq_sensor(data); + return 0; + } + + if (max_temp < sensor->thres_temp) { + data->irq_enabled = true; + hisi_thermal_enable_bind_irq_sensor(data); + enable_irq(data->irq); + } + + return 0; +} + +static struct thermal_zone_of_device_ops hisi_of_thermal_ops = { + .get_temp = hisi_thermal_get_temp, +}; + +static irqreturn_t hisi_thermal_alarm_irq(int irq, void *dev) +{ + struct hisi_thermal_data *data = dev; + + disable_irq_nosync(irq); + data->irq_enabled = false; + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev) +{ + struct hisi_thermal_data *data = dev; + struct hisi_thermal_sensor *sensor; + int i; + + mutex_lock(&data->thermal_lock); + sensor = &data->sensors[data->irq_bind_sensor]; + + dev_crit(&data->pdev->dev, "THERMAL ALARM: T > %d\n", + sensor->thres_temp / 1000); + mutex_unlock(&data->thermal_lock); + + for (i = 0; i < HISI_MAX_SENSORS; i++) + thermal_zone_device_update(data->sensors[i].tzd); + + return IRQ_HANDLED; +} + +static int hisi_thermal_register_sensor(struct platform_device *pdev, + struct hisi_thermal_data *data, + struct hisi_thermal_sensor *sensor, + int index) +{ + int ret, i; + const struct thermal_trip *trip; + + sensor->id = index; + sensor->thermal = data; + + sensor->tzd = thermal_zone_of_sensor_register(&pdev->dev, sensor->id, + sensor, &hisi_of_thermal_ops); + if (IS_ERR(sensor->tzd)) { + ret = PTR_ERR(sensor->tzd); + dev_err(&pdev->dev, "failed to register sensor id %d: %d\n", + sensor->id, ret); + return ret; + } + + trip = of_thermal_get_trip_points(sensor->tzd); + + for (i = 0; i < of_thermal_get_ntrips(sensor->tzd); i++) { + if (trip[i].type == THERMAL_TRIP_PASSIVE) { + sensor->thres_temp = trip[i].temperature; + break; + } + } + + return 0; +} + +static const struct of_device_id of_hisi_thermal_match[] = { + { .compatible = "hisilicon,tsensor" }, + { /* end */ } +}; +MODULE_DEVICE_TABLE(of, of_hisi_thermal_match); + +static void hisi_thermal_toggle_sensor(struct hisi_thermal_sensor *sensor, + bool on) +{ + struct thermal_zone_device *tzd = sensor->tzd; + + tzd->ops->set_mode(tzd, + on ? THERMAL_DEVICE_ENABLED : THERMAL_DEVICE_DISABLED); +} + +static int hisi_thermal_probe(struct platform_device *pdev) +{ + struct hisi_thermal_data *data; + struct resource *res; + int i; + int ret; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + mutex_init(&data->thermal_lock); + data->pdev = pdev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(data->regs)) { + dev_err(&pdev->dev, "failed to get io address\n"); + return PTR_ERR(data->regs); + } + + data->irq = platform_get_irq(pdev, 0); + if (data->irq < 0) + return data->irq; + + ret = devm_request_threaded_irq(&pdev->dev, data->irq, + hisi_thermal_alarm_irq, + hisi_thermal_alarm_irq_thread, + 0, "hisi_thermal", data); + if (ret < 0) { + dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret); + return ret; + } + + platform_set_drvdata(pdev, data); + + data->clk = devm_clk_get(&pdev->dev, "thermal_clk"); + if (IS_ERR(data->clk)) { + ret = PTR_ERR(data->clk); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, + "failed to get thermal clk: %d\n", ret); + return ret; + } + + /* enable clock for thermal */ + ret = clk_prepare_enable(data->clk); + if (ret) { + dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret); + return ret; + } + + for (i = 0; i < HISI_MAX_SENSORS; ++i) { + ret = hisi_thermal_register_sensor(pdev, data, + &data->sensors[i], i); + if (ret) { + dev_err(&pdev->dev, + "failed to register thermal sensor: %d\n", ret); + goto err_get_sensor_data; + } + } + + hisi_thermal_enable_bind_irq_sensor(data); + data->irq_enabled = true; + + for (i = 0; i < HISI_MAX_SENSORS; i++) + hisi_thermal_toggle_sensor(&data->sensors[i], true); + + return 0; + +err_get_sensor_data: + clk_disable_unprepare(data->clk); + + return ret; +} + +static int hisi_thermal_remove(struct platform_device *pdev) +{ + struct hisi_thermal_data *data = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < HISI_MAX_SENSORS; i++) { + struct hisi_thermal_sensor *sensor = &data->sensors[i]; + + hisi_thermal_toggle_sensor(sensor, false); + thermal_zone_of_sensor_unregister(&pdev->dev, sensor->tzd); + } + + hisi_thermal_disable_sensor(data); + clk_disable_unprepare(data->clk); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int hisi_thermal_suspend(struct device *dev) +{ + struct hisi_thermal_data *data = dev_get_drvdata(dev); + + hisi_thermal_disable_sensor(data); + data->irq_enabled = false; + + clk_disable_unprepare(data->clk); + + return 0; +} + +static int hisi_thermal_resume(struct device *dev) +{ + struct hisi_thermal_data *data = dev_get_drvdata(dev); + + clk_prepare_enable(data->clk); + + data->irq_enabled = true; + hisi_thermal_enable_bind_irq_sensor(data); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(hisi_thermal_pm_ops, + hisi_thermal_suspend, hisi_thermal_resume); + +static struct platform_driver hisi_thermal_driver = { + .driver = { + .name = "hisi_thermal", + .owner = THIS_MODULE, + .pm = &hisi_thermal_pm_ops, + .of_match_table = of_hisi_thermal_match, + }, + .probe = hisi_thermal_probe, + .remove = hisi_thermal_remove, +}; + +module_platform_driver(hisi_thermal_driver); + +MODULE_AUTHOR("Xinwei Kong <kong.kongxinwei@hisilicon.com>"); +MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>"); +MODULE_DESCRIPTION("Hisilicon thermal driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index 2ccbc07..fde4c28 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -306,7 +306,8 @@ static int imx_bind(struct thermal_zone_device *tz, ret = thermal_zone_bind_cooling_device(tz, IMX_TRIP_PASSIVE, cdev, THERMAL_NO_LIMIT, - THERMAL_NO_LIMIT); + THERMAL_NO_LIMIT, + THERMAL_WEIGHT_DEFAULT); if (ret) { dev_err(&tz->device, "binding zone %s with cdev %s failed:%d\n", diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c index e34ccd5..2e67161 100644 --- a/drivers/thermal/intel_powerclamp.c +++ b/drivers/thermal/intel_powerclamp.c @@ -206,51 +206,57 @@ static void find_target_mwait(void) } +struct pkg_cstate_info { + bool skip; + int msr_index; + int cstate_id; +}; + +#define PKG_CSTATE_INIT(id) { \ + .msr_index = MSR_PKG_C##id##_RESIDENCY, \ + .cstate_id = id \ + } + +static struct pkg_cstate_info pkg_cstates[] = { + PKG_CSTATE_INIT(2), + PKG_CSTATE_INIT(3), + PKG_CSTATE_INIT(6), + PKG_CSTATE_INIT(7), + PKG_CSTATE_INIT(8), + PKG_CSTATE_INIT(9), + PKG_CSTATE_INIT(10), + {NULL}, +}; + static bool has_pkg_state_counter(void) { - u64 tmp; - return !rdmsrl_safe(MSR_PKG_C2_RESIDENCY, &tmp) || - !rdmsrl_safe(MSR_PKG_C3_RESIDENCY, &tmp) || - !rdmsrl_safe(MSR_PKG_C6_RESIDENCY, &tmp) || - !rdmsrl_safe(MSR_PKG_C7_RESIDENCY, &tmp); + u64 val; + struct pkg_cstate_info *info = pkg_cstates; + + /* check if any one of the counter msrs exists */ + while (info->msr_index) { + if (!rdmsrl_safe(info->msr_index, &val)) + return true; + info++; + } + + return false; } static u64 pkg_state_counter(void) { u64 val; u64 count = 0; - - static bool skip_c2; - static bool skip_c3; - static bool skip_c6; - static bool skip_c7; - - if (!skip_c2) { - if (!rdmsrl_safe(MSR_PKG_C2_RESIDENCY, &val)) - count += val; - else - skip_c2 = true; - } - - if (!skip_c3) { - if (!rdmsrl_safe(MSR_PKG_C3_RESIDENCY, &val)) - count += val; - else - skip_c3 = true; - } - - if (!skip_c6) { - if (!rdmsrl_safe(MSR_PKG_C6_RESIDENCY, &val)) - count += val; - else - skip_c6 = true; - } - - if (!skip_c7) { - if (!rdmsrl_safe(MSR_PKG_C7_RESIDENCY, &val)) - count += val; - else - skip_c7 = true; + struct pkg_cstate_info *info = pkg_cstates; + + while (info->msr_index) { + if (!info->skip) { + if (!rdmsrl_safe(info->msr_index, &val)) + count += val; + else + info->skip = true; + } + info++; } return count; @@ -667,7 +673,7 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = { }; /* runs on Nehalem and later */ -static const struct x86_cpu_id intel_powerclamp_ids[] = { +static const struct x86_cpu_id intel_powerclamp_ids[] __initconst = { { X86_VENDOR_INTEL, 6, 0x1a}, { X86_VENDOR_INTEL, 6, 0x1c}, { X86_VENDOR_INTEL, 6, 0x1e}, @@ -689,13 +695,14 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = { { X86_VENDOR_INTEL, 6, 0x46}, { X86_VENDOR_INTEL, 6, 0x4c}, { X86_VENDOR_INTEL, 6, 0x4d}, + { X86_VENDOR_INTEL, 6, 0x4f}, { X86_VENDOR_INTEL, 6, 0x56}, { X86_VENDOR_INTEL, 6, 0x57}, {} }; MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids); -static int powerclamp_probe(void) +static int __init powerclamp_probe(void) { if (!x86_match_cpu(intel_powerclamp_ids)) { pr_err("Intel powerclamp does not run on family %d model %d\n", @@ -761,7 +768,7 @@ file_error: debugfs_remove_recursive(debug_dir); } -static int powerclamp_init(void) +static int __init powerclamp_init(void) { int retval; int bitmap_size; @@ -810,7 +817,7 @@ exit_free: } module_init(powerclamp_init); -static void powerclamp_exit(void) +static void __exit powerclamp_exit(void) { unregister_hotcpu_notifier(&powerclamp_cpu_notifier); end_power_clamp(); diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index 668fb1b..b295b2b 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c @@ -58,6 +58,8 @@ struct __thermal_bind_params { * @mode: current thermal zone device mode (enabled/disabled) * @passive_delay: polling interval while passive cooling is activated * @polling_delay: zone polling interval + * @slope: slope of the temperature adjustment curve + * @offset: offset of the temperature adjustment curve * @ntrips: number of trip points * @trips: an array of trip points (0..ntrips - 1) * @num_tbps: number of thermal bind params @@ -70,6 +72,8 @@ struct __thermal_zone { enum thermal_device_mode mode; int passive_delay; int polling_delay; + int slope; + int offset; /* trip data */ int ntrips; @@ -227,7 +231,8 @@ static int of_thermal_bind(struct thermal_zone_device *thermal, ret = thermal_zone_bind_cooling_device(thermal, tbp->trip_id, cdev, tbp->max, - tbp->min); + tbp->min, + tbp->usage); if (ret) return ret; } @@ -581,7 +586,7 @@ static int thermal_of_populate_bind_params(struct device_node *np, u32 prop; /* Default weight. Usage is optional */ - __tbp->usage = 0; + __tbp->usage = THERMAL_WEIGHT_DEFAULT; ret = of_property_read_u32(np, "contribution", &prop); if (ret == 0) __tbp->usage = prop; @@ -715,7 +720,7 @@ static int thermal_of_populate_trip(struct device_node *np, * @np parameter and fills the read data into a __thermal_zone data structure * and return this pointer. * - * TODO: Missing properties to parse: thermal-sensor-names and coefficients + * TODO: Missing properties to parse: thermal-sensor-names * * Return: On success returns a valid struct __thermal_zone, * otherwise, it returns a corresponding ERR_PTR(). Caller must @@ -727,7 +732,7 @@ thermal_of_build_thermal_zone(struct device_node *np) struct device_node *child = NULL, *gchild; struct __thermal_zone *tz; int ret, i; - u32 prop; + u32 prop, coef[2]; if (!np) { pr_err("no thermal zone np\n"); @@ -752,6 +757,20 @@ thermal_of_build_thermal_zone(struct device_node *np) } tz->polling_delay = prop; + /* + * REVIST: for now, the thermal framework supports only + * one sensor per thermal zone. Thus, we are considering + * only the first two values as slope and offset. + */ + ret = of_property_read_u32_array(np, "coefficients", coef, 2); + if (ret == 0) { + tz->slope = coef[0]; + tz->offset = coef[1]; + } else { + tz->slope = 1; + tz->offset = 0; + } + /* trips */ child = of_get_child_by_name(np, "trips"); @@ -865,6 +884,8 @@ int __init of_parse_thermal_zones(void) for_each_child_of_node(np, child) { struct thermal_zone_device *zone; struct thermal_zone_params *tzp; + int i, mask = 0; + u32 prop; /* Check whether child is enabled or not */ if (!of_device_is_available(child)) @@ -891,8 +912,18 @@ int __init of_parse_thermal_zones(void) /* No hwmon because there might be hwmon drivers registering */ tzp->no_hwmon = true; + if (!of_property_read_u32(child, "sustainable-power", &prop)) + tzp->sustainable_power = prop; + + for (i = 0; i < tz->ntrips; i++) + mask |= 1 << i; + + /* these two are left for temperature drivers to use */ + tzp->slope = tz->slope; + tzp->offset = tz->offset; + zone = thermal_zone_device_register(child->name, tz->ntrips, - 0, tz, + mask, tz, ops, tzp, tz->passive_delay, tz->polling_delay); diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c new file mode 100644 index 0000000..4672250 --- /dev/null +++ b/drivers/thermal/power_allocator.c @@ -0,0 +1,539 @@ +/* + * A power allocator to manage temperature + * + * Copyright (C) 2014 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "Power allocator: " fmt + +#include <linux/rculist.h> +#include <linux/slab.h> +#include <linux/thermal.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/thermal_power_allocator.h> + +#include "thermal_core.h" + +#define FRAC_BITS 10 +#define int_to_frac(x) ((x) << FRAC_BITS) +#define frac_to_int(x) ((x) >> FRAC_BITS) + +/** + * mul_frac() - multiply two fixed-point numbers + * @x: first multiplicand + * @y: second multiplicand + * + * Return: the result of multiplying two fixed-point numbers. The + * result is also a fixed-point number. + */ +static inline s64 mul_frac(s64 x, s64 y) +{ + return (x * y) >> FRAC_BITS; +} + +/** + * div_frac() - divide two fixed-point numbers + * @x: the dividend + * @y: the divisor + * + * Return: the result of dividing two fixed-point numbers. The + * result is also a fixed-point number. + */ +static inline s64 div_frac(s64 x, s64 y) +{ + return div_s64(x << FRAC_BITS, y); +} + +/** + * struct power_allocator_params - parameters for the power allocator governor + * @err_integral: accumulated error in the PID controller. + * @prev_err: error in the previous iteration of the PID controller. + * Used to calculate the derivative term. + * @trip_switch_on: first passive trip point of the thermal zone. The + * governor switches on when this trip point is crossed. + * @trip_max_desired_temperature: last passive trip point of the thermal + * zone. The temperature we are + * controlling for. + */ +struct power_allocator_params { + s64 err_integral; + s32 prev_err; + int trip_switch_on; + int trip_max_desired_temperature; +}; + +/** + * pid_controller() - PID controller + * @tz: thermal zone we are operating in + * @current_temp: the current temperature in millicelsius + * @control_temp: the target temperature in millicelsius + * @max_allocatable_power: maximum allocatable power for this thermal zone + * + * This PID controller increases the available power budget so that the + * temperature of the thermal zone gets as close as possible to + * @control_temp and limits the power if it exceeds it. k_po is the + * proportional term when we are overshooting, k_pu is the + * proportional term when we are undershooting. integral_cutoff is a + * threshold below which we stop accumulating the error. The + * accumulated error is only valid if the requested power will make + * the system warmer. If the system is mostly idle, there's no point + * in accumulating positive error. + * + * Return: The power budget for the next period. + */ +static u32 pid_controller(struct thermal_zone_device *tz, + unsigned long current_temp, + unsigned long control_temp, + u32 max_allocatable_power) +{ + s64 p, i, d, power_range; + s32 err, max_power_frac; + struct power_allocator_params *params = tz->governor_data; + + max_power_frac = int_to_frac(max_allocatable_power); + + err = ((s32)control_temp - (s32)current_temp); + err = int_to_frac(err); + + /* Calculate the proportional term */ + p = mul_frac(err < 0 ? tz->tzp->k_po : tz->tzp->k_pu, err); + + /* + * Calculate the integral term + * + * if the error is less than cut off allow integration (but + * the integral is limited to max power) + */ + i = mul_frac(tz->tzp->k_i, params->err_integral); + + if (err < int_to_frac(tz->tzp->integral_cutoff)) { + s64 i_next = i + mul_frac(tz->tzp->k_i, err); + + if (abs64(i_next) < max_power_frac) { + i = i_next; + params->err_integral += err; + } + } + + /* + * Calculate the derivative term + * + * We do err - prev_err, so with a positive k_d, a decreasing + * error (i.e. driving closer to the line) results in less + * power being applied, slowing down the controller) + */ + d = mul_frac(tz->tzp->k_d, err - params->prev_err); + d = div_frac(d, tz->passive_delay); + params->prev_err = err; + + power_range = p + i + d; + + /* feed-forward the known sustainable dissipatable power */ + power_range = tz->tzp->sustainable_power + frac_to_int(power_range); + + power_range = clamp(power_range, (s64)0, (s64)max_allocatable_power); + + trace_thermal_power_allocator_pid(tz, frac_to_int(err), + frac_to_int(params->err_integral), + frac_to_int(p), frac_to_int(i), + frac_to_int(d), power_range); + + return power_range; +} + +/** + * divvy_up_power() - divvy the allocated power between the actors + * @req_power: each actor's requested power + * @max_power: each actor's maximum available power + * @num_actors: size of the @req_power, @max_power and @granted_power's array + * @total_req_power: sum of @req_power + * @power_range: total allocated power + * @granted_power: output array: each actor's granted power + * @extra_actor_power: an appropriately sized array to be used in the + * function as temporary storage of the extra power given + * to the actors + * + * This function divides the total allocated power (@power_range) + * fairly between the actors. It first tries to give each actor a + * share of the @power_range according to how much power it requested + * compared to the rest of the actors. For example, if only one actor + * requests power, then it receives all the @power_range. If + * three actors each requests 1mW, each receives a third of the + * @power_range. + * + * If any actor received more than their maximum power, then that + * surplus is re-divvied among the actors based on how far they are + * from their respective maximums. + * + * Granted power for each actor is written to @granted_power, which + * should've been allocated by the calling function. + */ +static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors, + u32 total_req_power, u32 power_range, + u32 *granted_power, u32 *extra_actor_power) +{ + u32 extra_power, capped_extra_power; + int i; + + /* + * Prevent division by 0 if none of the actors request power. + */ + if (!total_req_power) + total_req_power = 1; + + capped_extra_power = 0; + extra_power = 0; + for (i = 0; i < num_actors; i++) { + u64 req_range = req_power[i] * power_range; + + granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range, + total_req_power); + + if (granted_power[i] > max_power[i]) { + extra_power += granted_power[i] - max_power[i]; + granted_power[i] = max_power[i]; + } + + extra_actor_power[i] = max_power[i] - granted_power[i]; + capped_extra_power += extra_actor_power[i]; + } + + if (!extra_power) + return; + + /* + * Re-divvy the reclaimed extra among actors based on + * how far they are from the max + */ + extra_power = min(extra_power, capped_extra_power); + if (capped_extra_power > 0) + for (i = 0; i < num_actors; i++) + granted_power[i] += (extra_actor_power[i] * + extra_power) / capped_extra_power; +} + +static int allocate_power(struct thermal_zone_device *tz, + unsigned long current_temp, + unsigned long control_temp) +{ + struct thermal_instance *instance; + struct power_allocator_params *params = tz->governor_data; + u32 *req_power, *max_power, *granted_power, *extra_actor_power; + u32 total_req_power, max_allocatable_power; + u32 total_granted_power, power_range; + int i, num_actors, total_weight, ret = 0; + int trip_max_desired_temperature = params->trip_max_desired_temperature; + + mutex_lock(&tz->lock); + + num_actors = 0; + total_weight = 0; + list_for_each_entry(instance, &tz->thermal_instances, tz_node) { + if ((instance->trip == trip_max_desired_temperature) && + cdev_is_power_actor(instance->cdev)) { + num_actors++; + total_weight += instance->weight; + } + } + + /* + * We need to allocate three arrays of the same size: + * req_power, max_power and granted_power. They are going to + * be needed until this function returns. Allocate them all + * in one go to simplify the allocation and deallocation + * logic. + */ + BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power)); + BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power)); + BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power)); + req_power = devm_kcalloc(&tz->device, num_actors * 4, + sizeof(*req_power), GFP_KERNEL); + if (!req_power) { + ret = -ENOMEM; + goto unlock; + } + + max_power = &req_power[num_actors]; + granted_power = &req_power[2 * num_actors]; + extra_actor_power = &req_power[3 * num_actors]; + + i = 0; + total_req_power = 0; + max_allocatable_power = 0; + + list_for_each_entry(instance, &tz->thermal_instances, tz_node) { + int weight; + struct thermal_cooling_device *cdev = instance->cdev; + + if (instance->trip != trip_max_desired_temperature) + continue; + + if (!cdev_is_power_actor(cdev)) + continue; + + if (cdev->ops->get_requested_power(cdev, tz, &req_power[i])) + continue; + + if (!total_weight) + weight = 1 << FRAC_BITS; + else + weight = instance->weight; + + req_power[i] = frac_to_int(weight * req_power[i]); + + if (power_actor_get_max_power(cdev, tz, &max_power[i])) + continue; + + total_req_power += req_power[i]; + max_allocatable_power += max_power[i]; + + i++; + } + + power_range = pid_controller(tz, current_temp, control_temp, + max_allocatable_power); + + divvy_up_power(req_power, max_power, num_actors, total_req_power, + power_range, granted_power, extra_actor_power); + + total_granted_power = 0; + i = 0; + list_for_each_entry(instance, &tz->thermal_instances, tz_node) { + if (instance->trip != trip_max_desired_temperature) + continue; + + if (!cdev_is_power_actor(instance->cdev)) + continue; + + power_actor_set_power(instance->cdev, instance, + granted_power[i]); + total_granted_power += granted_power[i]; + + i++; + } + + trace_thermal_power_allocator(tz, req_power, total_req_power, + granted_power, total_granted_power, + num_actors, power_range, + max_allocatable_power, current_temp, + (s32)control_temp - (s32)current_temp); + + devm_kfree(&tz->device, req_power); +unlock: + mutex_unlock(&tz->lock); + + return ret; +} + +static int get_governor_trips(struct thermal_zone_device *tz, + struct power_allocator_params *params) +{ + int i, ret, last_passive; + bool found_first_passive; + + found_first_passive = false; + last_passive = -1; + ret = -EINVAL; + + for (i = 0; i < tz->trips; i++) { + enum thermal_trip_type type; + + ret = tz->ops->get_trip_type(tz, i, &type); + if (ret) + return ret; + + if (!found_first_passive) { + if (type == THERMAL_TRIP_PASSIVE) { + params->trip_switch_on = i; + found_first_passive = true; + } + } else if (type == THERMAL_TRIP_PASSIVE) { + last_passive = i; + } else { + break; + } + } + + if (last_passive != -1) { + params->trip_max_desired_temperature = last_passive; + ret = 0; + } else { + ret = -EINVAL; + } + + return ret; +} + +static void reset_pid_controller(struct power_allocator_params *params) +{ + params->err_integral = 0; + params->prev_err = 0; +} + +static void allow_maximum_power(struct thermal_zone_device *tz) +{ + struct thermal_instance *instance; + struct power_allocator_params *params = tz->governor_data; + + list_for_each_entry(instance, &tz->thermal_instances, tz_node) { + if ((instance->trip != params->trip_max_desired_temperature) || + (!cdev_is_power_actor(instance->cdev))) + continue; + + instance->target = 0; + instance->cdev->updated = false; + thermal_cdev_update(instance->cdev); + } +} + +/** + * power_allocator_bind() - bind the power_allocator governor to a thermal zone + * @tz: thermal zone to bind it to + * + * Check that the thermal zone is valid for this governor, that is, it + * has two thermal trips. If so, initialize the PID controller + * parameters and bind it to the thermal zone. + * + * Return: 0 on success, -EINVAL if the trips were invalid or -ENOMEM + * if we ran out of memory. + */ +static int power_allocator_bind(struct thermal_zone_device *tz) +{ + int ret; + struct power_allocator_params *params; + unsigned long switch_on_temp, control_temp; + u32 temperature_threshold; + + if (!tz->tzp || !tz->tzp->sustainable_power) { + dev_err(&tz->device, + "power_allocator: missing sustainable_power\n"); + return -EINVAL; + } + + params = devm_kzalloc(&tz->device, sizeof(*params), GFP_KERNEL); + if (!params) + return -ENOMEM; + + ret = get_governor_trips(tz, params); + if (ret) { + dev_err(&tz->device, + "thermal zone %s has wrong trip setup for power allocator\n", + tz->type); + goto free; + } + + ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, + &switch_on_temp); + if (ret) + goto free; + + ret = tz->ops->get_trip_temp(tz, params->trip_max_desired_temperature, + &control_temp); + if (ret) + goto free; + + temperature_threshold = control_temp - switch_on_temp; + + tz->tzp->k_po = tz->tzp->k_po ?: + int_to_frac(tz->tzp->sustainable_power) / temperature_threshold; + tz->tzp->k_pu = tz->tzp->k_pu ?: + int_to_frac(2 * tz->tzp->sustainable_power) / + temperature_threshold; + tz->tzp->k_i = tz->tzp->k_i ?: int_to_frac(10) / 1000; + /* + * The default for k_d and integral_cutoff is 0, so we can + * leave them as they are. + */ + + reset_pid_controller(params); + + tz->governor_data = params; + + return 0; + +free: + devm_kfree(&tz->device, params); + return ret; +} + +static void power_allocator_unbind(struct thermal_zone_device *tz) +{ + dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id); + devm_kfree(&tz->device, tz->governor_data); + tz->governor_data = NULL; +} + +static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) +{ + int ret; + unsigned long switch_on_temp, control_temp, current_temp; + struct power_allocator_params *params = tz->governor_data; + + /* + * We get called for every trip point but we only need to do + * our calculations once + */ + if (trip != params->trip_max_desired_temperature) + return 0; + + ret = thermal_zone_get_temp(tz, ¤t_temp); + if (ret) { + dev_warn(&tz->device, "Failed to get temperature: %d\n", ret); + return ret; + } + + ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, + &switch_on_temp); + if (ret) { + dev_warn(&tz->device, + "Failed to get switch on temperature: %d\n", ret); + return ret; + } + + if (current_temp < switch_on_temp) { + tz->passive = 0; + reset_pid_controller(params); + allow_maximum_power(tz); + return 0; + } + + tz->passive = 1; + + ret = tz->ops->get_trip_temp(tz, params->trip_max_desired_temperature, + &control_temp); + if (ret) { + dev_warn(&tz->device, + "Failed to get the maximum desired temperature: %d\n", + ret); + return ret; + } + + return allocate_power(tz, current_temp, control_temp); +} + +static struct thermal_governor thermal_gov_power_allocator = { + .name = "power_allocator", + .bind_to_tz = power_allocator_bind, + .unbind_from_tz = power_allocator_unbind, + .throttle = power_allocator_throttle, +}; + +int thermal_gov_power_allocator_register(void) +{ + return thermal_register_governor(&thermal_gov_power_allocator); +} + +void thermal_gov_power_allocator_unregister(void) +{ + thermal_unregister_governor(&thermal_gov_power_allocator); +} diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c new file mode 100644 index 0000000..c8d27b8 --- /dev/null +++ b/drivers/thermal/qcom-spmi-temp-alarm.c @@ -0,0 +1,309 @@ +/* + * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/iio/consumer.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/thermal.h> + +#define QPNP_TM_REG_TYPE 0x04 +#define QPNP_TM_REG_SUBTYPE 0x05 +#define QPNP_TM_REG_STATUS 0x08 +#define QPNP_TM_REG_SHUTDOWN_CTRL1 0x40 +#define QPNP_TM_REG_ALARM_CTRL 0x46 + +#define QPNP_TM_TYPE 0x09 +#define QPNP_TM_SUBTYPE 0x08 + +#define STATUS_STAGE_MASK 0x03 + +#define SHUTDOWN_CTRL1_THRESHOLD_MASK 0x03 + +#define ALARM_CTRL_FORCE_ENABLE 0x80 + +/* + * Trip point values based on threshold control + * 0 = {105 C, 125 C, 145 C} + * 1 = {110 C, 130 C, 150 C} + * 2 = {115 C, 135 C, 155 C} + * 3 = {120 C, 140 C, 160 C} +*/ +#define TEMP_STAGE_STEP 20000 /* Stage step: 20.000 C */ +#define TEMP_STAGE_HYSTERESIS 2000 + +#define TEMP_THRESH_MIN 105000 /* Threshold Min: 105 C */ +#define TEMP_THRESH_STEP 5000 /* Threshold step: 5 C */ + +#define THRESH_MIN 0 + +/* Temperature in Milli Celsius reported during stage 0 if no ADC is present */ +#define DEFAULT_TEMP 37000 + +struct qpnp_tm_chip { + struct regmap *map; + struct thermal_zone_device *tz_dev; + long temp; + unsigned int thresh; + unsigned int stage; + unsigned int prev_stage; + unsigned int base; + struct iio_channel *adc; +}; + +static int qpnp_tm_read(struct qpnp_tm_chip *chip, u16 addr, u8 *data) +{ + unsigned int val; + int ret; + + ret = regmap_read(chip->map, chip->base + addr, &val); + if (ret < 0) + return ret; + + *data = val; + return 0; +} + +static int qpnp_tm_write(struct qpnp_tm_chip *chip, u16 addr, u8 data) +{ + return regmap_write(chip->map, chip->base + addr, data); +} + +/* + * This function updates the internal temp value based on the + * current thermal stage and threshold as well as the previous stage + */ +static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip) +{ + unsigned int stage; + int ret; + u8 reg = 0; + + ret = qpnp_tm_read(chip, QPNP_TM_REG_STATUS, ®); + if (ret < 0) + return ret; + + stage = reg & STATUS_STAGE_MASK; + + if (stage > chip->stage) { + /* increasing stage, use lower bound */ + chip->temp = (stage - 1) * TEMP_STAGE_STEP + + chip->thresh * TEMP_THRESH_STEP + + TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN; + } else if (stage < chip->stage) { + /* decreasing stage, use upper bound */ + chip->temp = stage * TEMP_STAGE_STEP + + chip->thresh * TEMP_THRESH_STEP - + TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN; + } + + chip->stage = stage; + + return 0; +} + +static int qpnp_tm_get_temp(void *data, long *temp) +{ + struct qpnp_tm_chip *chip = data; + int ret, mili_celsius; + + if (!temp) + return -EINVAL; + + if (IS_ERR(chip->adc)) { + ret = qpnp_tm_update_temp_no_adc(chip); + if (ret < 0) + return ret; + } else { + ret = iio_read_channel_processed(chip->adc, &mili_celsius); + if (ret < 0) + return ret; + + chip->temp = mili_celsius; + } + + *temp = chip->temp < 0 ? 0 : chip->temp; + + return 0; +} + +static const struct thermal_zone_of_device_ops qpnp_tm_sensor_ops = { + .get_temp = qpnp_tm_get_temp, +}; + +static irqreturn_t qpnp_tm_isr(int irq, void *data) +{ + struct qpnp_tm_chip *chip = data; + + thermal_zone_device_update(chip->tz_dev); + + return IRQ_HANDLED; +} + +/* + * This function initializes the internal temp value based on only the + * current thermal stage and threshold. Setup threshold control and + * disable shutdown override. + */ +static int qpnp_tm_init(struct qpnp_tm_chip *chip) +{ + int ret; + u8 reg; + + chip->thresh = THRESH_MIN; + chip->temp = DEFAULT_TEMP; + + ret = qpnp_tm_read(chip, QPNP_TM_REG_STATUS, ®); + if (ret < 0) + return ret; + + chip->stage = reg & STATUS_STAGE_MASK; + + if (chip->stage) + chip->temp = chip->thresh * TEMP_THRESH_STEP + + (chip->stage - 1) * TEMP_STAGE_STEP + + TEMP_THRESH_MIN; + + /* + * Set threshold and disable software override of stage 2 and 3 + * shutdowns. + */ + reg = chip->thresh & SHUTDOWN_CTRL1_THRESHOLD_MASK; + ret = qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg); + if (ret < 0) + return ret; + + /* Enable the thermal alarm PMIC module in always-on mode. */ + reg = ALARM_CTRL_FORCE_ENABLE; + ret = qpnp_tm_write(chip, QPNP_TM_REG_ALARM_CTRL, reg); + + return ret; +} + +static int qpnp_tm_probe(struct platform_device *pdev) +{ + struct qpnp_tm_chip *chip; + struct device_node *node; + u8 type, subtype; + u32 res[2]; + int ret, irq; + + node = pdev->dev.of_node; + + chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + dev_set_drvdata(&pdev->dev, chip); + + chip->map = dev_get_regmap(pdev->dev.parent, NULL); + if (!chip->map) + return -ENXIO; + + ret = of_property_read_u32_array(node, "reg", res, 2); + if (ret < 0) + return ret; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + /* ADC based measurements are optional */ + chip->adc = iio_channel_get(&pdev->dev, "thermal"); + if (PTR_ERR(chip->adc) == -EPROBE_DEFER) + return PTR_ERR(chip->adc); + + chip->base = res[0]; + + ret = qpnp_tm_read(chip, QPNP_TM_REG_TYPE, &type); + if (ret < 0) { + dev_err(&pdev->dev, "could not read type\n"); + goto fail; + } + + ret = qpnp_tm_read(chip, QPNP_TM_REG_SUBTYPE, &subtype); + if (ret < 0) { + dev_err(&pdev->dev, "could not read subtype\n"); + goto fail; + } + + if (type != QPNP_TM_TYPE || subtype != QPNP_TM_SUBTYPE) { + dev_err(&pdev->dev, "invalid type 0x%02x or subtype 0x%02x\n", + type, subtype); + ret = -ENODEV; + goto fail; + } + + ret = qpnp_tm_init(chip); + if (ret < 0) { + dev_err(&pdev->dev, "init failed\n"); + goto fail; + } + + ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, qpnp_tm_isr, + IRQF_ONESHOT, node->name, chip); + if (ret < 0) + goto fail; + + chip->tz_dev = thermal_zone_of_sensor_register(&pdev->dev, 0, chip, + &qpnp_tm_sensor_ops); + if (IS_ERR(chip->tz_dev)) { + dev_err(&pdev->dev, "failed to register sensor\n"); + ret = PTR_ERR(chip->tz_dev); + goto fail; + } + + return 0; + +fail: + if (!IS_ERR(chip->adc)) + iio_channel_release(chip->adc); + + return ret; +} + +static int qpnp_tm_remove(struct platform_device *pdev) +{ + struct qpnp_tm_chip *chip = dev_get_drvdata(&pdev->dev); + + thermal_zone_of_sensor_unregister(&pdev->dev, chip->tz_dev); + if (!IS_ERR(chip->adc)) + iio_channel_release(chip->adc); + + return 0; +} + +static const struct of_device_id qpnp_tm_match_table[] = { + { .compatible = "qcom,spmi-temp-alarm" }, + { } +}; +MODULE_DEVICE_TABLE(of, qpnp_tm_match_table); + +static struct platform_driver qpnp_tm_driver = { + .driver = { + .name = "spmi-temp-alarm", + .of_match_table = qpnp_tm_match_table, + }, + .probe = qpnp_tm_probe, + .remove = qpnp_tm_remove, +}; +module_platform_driver(qpnp_tm_driver); + +MODULE_ALIAS("platform:spmi-temp-alarm"); +MODULE_DESCRIPTION("QPNP PMIC Temperature Alarm driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c index 3aa46ac..cd8f5f93 100644 --- a/drivers/thermal/rockchip_thermal.c +++ b/drivers/thermal/rockchip_thermal.c @@ -529,7 +529,7 @@ static int rockchip_thermal_probe(struct platform_device *pdev) thermal->pclk = devm_clk_get(&pdev->dev, "apb_pclk"); if (IS_ERR(thermal->pclk)) { - error = PTR_ERR(thermal->clk); + error = PTR_ERR(thermal->pclk); dev_err(&pdev->dev, "failed to get apb_pclk clock: %d\n", error); return error; diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index 1d30b09..531f4b17 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -97,6 +97,32 @@ #define EXYNOS4412_MUX_ADDR_VALUE 6 #define EXYNOS4412_MUX_ADDR_SHIFT 20 +/* Exynos5433 specific registers */ +#define EXYNOS5433_TMU_REG_CONTROL1 0x024 +#define EXYNOS5433_TMU_SAMPLING_INTERVAL 0x02c +#define EXYNOS5433_TMU_COUNTER_VALUE0 0x030 +#define EXYNOS5433_TMU_COUNTER_VALUE1 0x034 +#define EXYNOS5433_TMU_REG_CURRENT_TEMP1 0x044 +#define EXYNOS5433_THD_TEMP_RISE3_0 0x050 +#define EXYNOS5433_THD_TEMP_RISE7_4 0x054 +#define EXYNOS5433_THD_TEMP_FALL3_0 0x060 +#define EXYNOS5433_THD_TEMP_FALL7_4 0x064 +#define EXYNOS5433_TMU_REG_INTEN 0x0c0 +#define EXYNOS5433_TMU_REG_INTPEND 0x0c8 +#define EXYNOS5433_TMU_EMUL_CON 0x110 +#define EXYNOS5433_TMU_PD_DET_EN 0x130 + +#define EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT 16 +#define EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT 23 +#define EXYNOS5433_TRIMINFO_SENSOR_ID_MASK \ + (0xf << EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT) +#define EXYNOS5433_TRIMINFO_CALIB_SEL_MASK BIT(23) + +#define EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING 0 +#define EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING 1 + +#define EXYNOS5433_PD_DET_EN 1 + /*exynos5440 specific registers*/ #define EXYNOS5440_TMU_S0_7_TRIM 0x000 #define EXYNOS5440_TMU_S0_7_CTRL 0x020 @@ -484,6 +510,101 @@ out: return ret; } +static int exynos5433_tmu_initialize(struct platform_device *pdev) +{ + struct exynos_tmu_data *data = platform_get_drvdata(pdev); + struct exynos_tmu_platform_data *pdata = data->pdata; + struct thermal_zone_device *tz = data->tzd; + unsigned int status, trim_info; + unsigned int rising_threshold = 0, falling_threshold = 0; + unsigned long temp, temp_hist; + int ret = 0, threshold_code, i, sensor_id, cal_type; + + status = readb(data->base + EXYNOS_TMU_REG_STATUS); + if (!status) { + ret = -EBUSY; + goto out; + } + + trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO); + sanitize_temp_error(data, trim_info); + + /* Read the temperature sensor id */ + sensor_id = (trim_info & EXYNOS5433_TRIMINFO_SENSOR_ID_MASK) + >> EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT; + dev_info(&pdev->dev, "Temperature sensor ID: 0x%x\n", sensor_id); + + /* Read the calibration mode */ + writel(trim_info, data->base + EXYNOS_TMU_REG_TRIMINFO); + cal_type = (trim_info & EXYNOS5433_TRIMINFO_CALIB_SEL_MASK) + >> EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT; + + switch (cal_type) { + case EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING: + pdata->cal_type = TYPE_ONE_POINT_TRIMMING; + break; + case EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING: + pdata->cal_type = TYPE_TWO_POINT_TRIMMING; + break; + default: + pdata->cal_type = TYPE_ONE_POINT_TRIMMING; + break; + }; + + dev_info(&pdev->dev, "Calibration type is %d-point calibration\n", + cal_type ? 2 : 1); + + /* Write temperature code for rising and falling threshold */ + for (i = 0; i < of_thermal_get_ntrips(tz); i++) { + int rising_reg_offset, falling_reg_offset; + int j = 0; + + switch (i) { + case 0: + case 1: + case 2: + case 3: + rising_reg_offset = EXYNOS5433_THD_TEMP_RISE3_0; + falling_reg_offset = EXYNOS5433_THD_TEMP_FALL3_0; + j = i; + break; + case 4: + case 5: + case 6: + case 7: + rising_reg_offset = EXYNOS5433_THD_TEMP_RISE7_4; + falling_reg_offset = EXYNOS5433_THD_TEMP_FALL7_4; + j = i - 4; + break; + default: + continue; + } + + /* Write temperature code for rising threshold */ + tz->ops->get_trip_temp(tz, i, &temp); + temp /= MCELSIUS; + threshold_code = temp_to_code(data, temp); + + rising_threshold = readl(data->base + rising_reg_offset); + rising_threshold |= (threshold_code << j * 8); + writel(rising_threshold, data->base + rising_reg_offset); + + /* Write temperature code for falling threshold */ + tz->ops->get_trip_hyst(tz, i, &temp_hist); + temp_hist = temp - (temp_hist / MCELSIUS); + threshold_code = temp_to_code(data, temp_hist); + + falling_threshold = readl(data->base + falling_reg_offset); + falling_threshold &= ~(0xff << j * 8); + falling_threshold |= (threshold_code << j * 8); + writel(falling_threshold, data->base + falling_reg_offset); + } + + data->tmu_clear_irqs(data); +out: + return ret; +} + static int exynos5440_tmu_initialize(struct platform_device *pdev) { struct exynos_tmu_data *data = platform_get_drvdata(pdev); @@ -643,6 +764,48 @@ static void exynos4210_tmu_control(struct platform_device *pdev, bool on) writel(con, data->base + EXYNOS_TMU_REG_CONTROL); } +static void exynos5433_tmu_control(struct platform_device *pdev, bool on) +{ + struct exynos_tmu_data *data = platform_get_drvdata(pdev); + struct thermal_zone_device *tz = data->tzd; + unsigned int con, interrupt_en, pd_det_en; + + con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL)); + + if (on) { + con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); + interrupt_en = + (of_thermal_is_trip_valid(tz, 7) + << EXYNOS7_TMU_INTEN_RISE7_SHIFT) | + (of_thermal_is_trip_valid(tz, 6) + << EXYNOS7_TMU_INTEN_RISE6_SHIFT) | + (of_thermal_is_trip_valid(tz, 5) + << EXYNOS7_TMU_INTEN_RISE5_SHIFT) | + (of_thermal_is_trip_valid(tz, 4) + << EXYNOS7_TMU_INTEN_RISE4_SHIFT) | + (of_thermal_is_trip_valid(tz, 3) + << EXYNOS7_TMU_INTEN_RISE3_SHIFT) | + (of_thermal_is_trip_valid(tz, 2) + << EXYNOS7_TMU_INTEN_RISE2_SHIFT) | + (of_thermal_is_trip_valid(tz, 1) + << EXYNOS7_TMU_INTEN_RISE1_SHIFT) | + (of_thermal_is_trip_valid(tz, 0) + << EXYNOS7_TMU_INTEN_RISE0_SHIFT); + + interrupt_en |= + interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT; + } else { + con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); + interrupt_en = 0; /* Disable all interrupts */ + } + + pd_det_en = on ? EXYNOS5433_PD_DET_EN : 0; + + writel(pd_det_en, data->base + EXYNOS5433_TMU_PD_DET_EN); + writel(interrupt_en, data->base + EXYNOS5433_TMU_REG_INTEN); + writel(con, data->base + EXYNOS_TMU_REG_CONTROL); +} + static void exynos5440_tmu_control(struct platform_device *pdev, bool on) { struct exynos_tmu_data *data = platform_get_drvdata(pdev); @@ -770,6 +933,8 @@ static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data, if (data->soc == SOC_ARCH_EXYNOS5260) emul_con = EXYNOS5260_EMUL_CON; + if (data->soc == SOC_ARCH_EXYNOS5433) + emul_con = EXYNOS5433_TMU_EMUL_CON; else if (data->soc == SOC_ARCH_EXYNOS7) emul_con = EXYNOS7_TMU_REG_EMUL_CON; else @@ -882,6 +1047,9 @@ static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data) } else if (data->soc == SOC_ARCH_EXYNOS7) { tmu_intstat = EXYNOS7_TMU_REG_INTPEND; tmu_intclear = EXYNOS7_TMU_REG_INTPEND; + } else if (data->soc == SOC_ARCH_EXYNOS5433) { + tmu_intstat = EXYNOS5433_TMU_REG_INTPEND; + tmu_intclear = EXYNOS5433_TMU_REG_INTPEND; } else { tmu_intstat = EXYNOS_TMU_REG_INTSTAT; tmu_intclear = EXYNOS_TMU_REG_INTCLEAR; @@ -926,6 +1094,7 @@ static const struct of_device_id exynos_tmu_match[] = { { .compatible = "samsung,exynos5260-tmu", }, { .compatible = "samsung,exynos5420-tmu", }, { .compatible = "samsung,exynos5420-tmu-ext-triminfo", }, + { .compatible = "samsung,exynos5433-tmu", }, { .compatible = "samsung,exynos5440-tmu", }, { .compatible = "samsung,exynos7-tmu", }, { /* sentinel */ }, @@ -949,6 +1118,8 @@ static int exynos_of_get_soc_type(struct device_node *np) else if (of_device_is_compatible(np, "samsung,exynos5420-tmu-ext-triminfo")) return SOC_ARCH_EXYNOS5420_TRIMINFO; + else if (of_device_is_compatible(np, "samsung,exynos5433-tmu")) + return SOC_ARCH_EXYNOS5433; else if (of_device_is_compatible(np, "samsung,exynos5440-tmu")) return SOC_ARCH_EXYNOS5440; else if (of_device_is_compatible(np, "samsung,exynos7-tmu")) @@ -1069,6 +1240,13 @@ static int exynos_map_dt_data(struct platform_device *pdev) data->tmu_set_emulation = exynos4412_tmu_set_emulation; data->tmu_clear_irqs = exynos4210_tmu_clear_irqs; break; + case SOC_ARCH_EXYNOS5433: + data->tmu_initialize = exynos5433_tmu_initialize; + data->tmu_control = exynos5433_tmu_control; + data->tmu_read = exynos4412_tmu_read; + data->tmu_set_emulation = exynos4412_tmu_set_emulation; + data->tmu_clear_irqs = exynos4210_tmu_clear_irqs; + break; case SOC_ARCH_EXYNOS5440: data->tmu_initialize = exynos5440_tmu_initialize; data->tmu_control = exynos5440_tmu_control; @@ -1172,7 +1350,9 @@ static int exynos_tmu_probe(struct platform_device *pdev) goto err_clk_sec; } - if (data->soc == SOC_ARCH_EXYNOS7) { + switch (data->soc) { + case SOC_ARCH_EXYNOS5433: + case SOC_ARCH_EXYNOS7: data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk"); if (IS_ERR(data->sclk)) { dev_err(&pdev->dev, "Failed to get sclk\n"); @@ -1184,7 +1364,10 @@ static int exynos_tmu_probe(struct platform_device *pdev) goto err_clk; } } - } + break; + default: + break; + }; ret = exynos_tmu_initialize(pdev); if (ret) { diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h index 4d71ec6..440c714 100644 --- a/drivers/thermal/samsung/exynos_tmu.h +++ b/drivers/thermal/samsung/exynos_tmu.h @@ -33,6 +33,7 @@ enum soc_type { SOC_ARCH_EXYNOS5260, SOC_ARCH_EXYNOS5420, SOC_ARCH_EXYNOS5420_TRIMINFO, + SOC_ARCH_EXYNOS5433, SOC_ARCH_EXYNOS5440, SOC_ARCH_EXYNOS7, }; diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 4108db7..04659bf 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -75,6 +75,58 @@ static struct thermal_governor *__find_governor(const char *name) return NULL; } +/** + * bind_previous_governor() - bind the previous governor of the thermal zone + * @tz: a valid pointer to a struct thermal_zone_device + * @failed_gov_name: the name of the governor that failed to register + * + * Register the previous governor of the thermal zone after a new + * governor has failed to be bound. + */ +static void bind_previous_governor(struct thermal_zone_device *tz, + const char *failed_gov_name) +{ + if (tz->governor && tz->governor->bind_to_tz) { + if (tz->governor->bind_to_tz(tz)) { + dev_err(&tz->device, + "governor %s failed to bind and the previous one (%s) failed to bind again, thermal zone %s has no governor\n", + failed_gov_name, tz->governor->name, tz->type); + tz->governor = NULL; + } + } +} + +/** + * thermal_set_governor() - Switch to another governor + * @tz: a valid pointer to a struct thermal_zone_device + * @new_gov: pointer to the new governor + * + * Change the governor of thermal zone @tz. + * + * Return: 0 on success, an error if the new governor's bind_to_tz() failed. + */ +static int thermal_set_governor(struct thermal_zone_device *tz, + struct thermal_governor *new_gov) +{ + int ret = 0; + + if (tz->governor && tz->governor->unbind_from_tz) + tz->governor->unbind_from_tz(tz); + + if (new_gov && new_gov->bind_to_tz) { + ret = new_gov->bind_to_tz(tz); + if (ret) { + bind_previous_governor(tz, new_gov->name); + + return ret; + } + } + + tz->governor = new_gov; + + return ret; +} + int thermal_register_governor(struct thermal_governor *governor) { int err; @@ -107,8 +159,15 @@ int thermal_register_governor(struct thermal_governor *governor) name = pos->tzp->governor_name; - if (!strncasecmp(name, governor->name, THERMAL_NAME_LENGTH)) - pos->governor = governor; + if (!strncasecmp(name, governor->name, THERMAL_NAME_LENGTH)) { + int ret; + + ret = thermal_set_governor(pos, governor); + if (ret) + dev_err(&pos->device, + "Failed to set governor %s for thermal zone %s: %d\n", + governor->name, pos->type, ret); + } } mutex_unlock(&thermal_list_lock); @@ -134,7 +193,7 @@ void thermal_unregister_governor(struct thermal_governor *governor) list_for_each_entry(pos, &thermal_tz_list, node) { if (!strncasecmp(pos->governor->name, governor->name, THERMAL_NAME_LENGTH)) - pos->governor = NULL; + thermal_set_governor(pos, NULL); } mutex_unlock(&thermal_list_lock); @@ -218,7 +277,8 @@ static void print_bind_err_msg(struct thermal_zone_device *tz, static void __bind(struct thermal_zone_device *tz, int mask, struct thermal_cooling_device *cdev, - unsigned long *limits) + unsigned long *limits, + unsigned int weight) { int i, ret; @@ -233,7 +293,8 @@ static void __bind(struct thermal_zone_device *tz, int mask, upper = limits[i * 2 + 1]; } ret = thermal_zone_bind_cooling_device(tz, i, cdev, - upper, lower); + upper, lower, + weight); if (ret) print_bind_err_msg(tz, cdev, ret); } @@ -280,7 +341,8 @@ static void bind_cdev(struct thermal_cooling_device *cdev) continue; tzp->tbp[i].cdev = cdev; __bind(pos, tzp->tbp[i].trip_mask, cdev, - tzp->tbp[i].binding_limits); + tzp->tbp[i].binding_limits, + tzp->tbp[i].weight); } } @@ -319,7 +381,8 @@ static void bind_tz(struct thermal_zone_device *tz) continue; tzp->tbp[i].cdev = pos; __bind(tz, tzp->tbp[i].trip_mask, pos, - tzp->tbp[i].binding_limits); + tzp->tbp[i].binding_limits, + tzp->tbp[i].weight); } } exit: @@ -713,7 +776,8 @@ passive_store(struct device *dev, struct device_attribute *attr, thermal_zone_bind_cooling_device(tz, THERMAL_TRIPS_NONE, cdev, THERMAL_NO_LIMIT, - THERMAL_NO_LIMIT); + THERMAL_NO_LIMIT, + THERMAL_WEIGHT_DEFAULT); } mutex_unlock(&thermal_list_lock); if (!tz->passive_delay) @@ -765,8 +829,9 @@ policy_store(struct device *dev, struct device_attribute *attr, if (!gov) goto exit; - tz->governor = gov; - ret = count; + ret = thermal_set_governor(tz, gov); + if (!ret) + ret = count; exit: mutex_unlock(&tz->lock); @@ -810,6 +875,158 @@ emul_temp_store(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store); #endif/*CONFIG_THERMAL_EMULATION*/ +static ssize_t +sustainable_power_show(struct device *dev, struct device_attribute *devattr, + char *buf) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + + if (tz->tzp) + return sprintf(buf, "%u\n", tz->tzp->sustainable_power); + else + return -EIO; +} + +static ssize_t +sustainable_power_store(struct device *dev, struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct thermal_zone_device *tz = to_thermal_zone(dev); + u32 sustainable_power; + + if (!tz->tzp) + return -EIO; + + if (kstrtou32(buf, 10, &sustainable_power)) + return -EINVAL; + + tz->tzp->sustainable_power = sustainable_power; + + return count; +} +static DEVICE_ATTR(sustainable_power, S_IWUSR | S_IRUGO, sustainable_power_show, + sustainable_power_store); + +#define create_s32_tzp_attr(name) \ + static ssize_t \ + name##_show(struct device *dev, struct device_attribute *devattr, \ + char *buf) \ + { \ + struct thermal_zone_device *tz = to_thermal_zone(dev); \ + \ + if (tz->tzp) \ + return sprintf(buf, "%u\n", tz->tzp->name); \ + else \ + return -EIO; \ + } \ + \ + static ssize_t \ + name##_store(struct device *dev, struct device_attribute *devattr, \ + const char *buf, size_t count) \ + { \ + struct thermal_zone_device *tz = to_thermal_zone(dev); \ + s32 value; \ + \ + if (!tz->tzp) \ + return -EIO; \ + \ + if (kstrtos32(buf, 10, &value)) \ + return -EINVAL; \ + \ + tz->tzp->name = value; \ + \ + return count; \ + } \ + static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, name##_show, name##_store) + +create_s32_tzp_attr(k_po); +create_s32_tzp_attr(k_pu); +create_s32_tzp_attr(k_i); +create_s32_tzp_attr(k_d); +create_s32_tzp_attr(integral_cutoff); +create_s32_tzp_attr(slope); +create_s32_tzp_attr(offset); +#undef create_s32_tzp_attr + +static struct device_attribute *dev_tzp_attrs[] = { + &dev_attr_sustainable_power, + &dev_attr_k_po, + &dev_attr_k_pu, + &dev_attr_k_i, + &dev_attr_k_d, + &dev_attr_integral_cutoff, + &dev_attr_slope, + &dev_attr_offset, +}; + +static int create_tzp_attrs(struct device *dev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dev_tzp_attrs); i++) { + int ret; + struct device_attribute *dev_attr = dev_tzp_attrs[i]; + + ret = device_create_file(dev, dev_attr); + if (ret) + return ret; + } + + return 0; +} + +/** + * power_actor_get_max_power() - get the maximum power that a cdev can consume + * @cdev: pointer to &thermal_cooling_device + * @tz: a valid thermal zone device pointer + * @max_power: pointer in which to store the maximum power + * + * Calculate the maximum power consumption in milliwats that the + * cooling device can currently consume and store it in @max_power. + * + * Return: 0 on success, -EINVAL if @cdev doesn't support the + * power_actor API or -E* on other error. + */ +int power_actor_get_max_power(struct thermal_cooling_device *cdev, + struct thermal_zone_device *tz, u32 *max_power) +{ + if (!cdev_is_power_actor(cdev)) + return -EINVAL; + + return cdev->ops->state2power(cdev, tz, 0, max_power); +} + +/** + * power_actor_set_power() - limit the maximum power that a cooling device can consume + * @cdev: pointer to &thermal_cooling_device + * @instance: thermal instance to update + * @power: the power in milliwatts + * + * Set the cooling device to consume at most @power milliwatts. + * + * Return: 0 on success, -EINVAL if the cooling device does not + * implement the power actor API or -E* for other failures. + */ +int power_actor_set_power(struct thermal_cooling_device *cdev, + struct thermal_instance *instance, u32 power) +{ + unsigned long state; + int ret; + + if (!cdev_is_power_actor(cdev)) + return -EINVAL; + + ret = cdev->ops->power2state(cdev, instance->tz, power, &state); + if (ret) + return ret; + + instance->target = state; + cdev->updated = false; + thermal_cdev_update(cdev); + + return 0; +} + static DEVICE_ATTR(type, 0444, type_show, NULL); static DEVICE_ATTR(temp, 0444, temp_show, NULL); static DEVICE_ATTR(mode, 0644, mode_show, mode_store); @@ -917,6 +1134,34 @@ static const struct attribute_group *cooling_device_attr_groups[] = { NULL, }; +static ssize_t +thermal_cooling_device_weight_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct thermal_instance *instance; + + instance = container_of(attr, struct thermal_instance, weight_attr); + + return sprintf(buf, "%d\n", instance->weight); +} + +static ssize_t +thermal_cooling_device_weight_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct thermal_instance *instance; + int ret, weight; + + ret = kstrtoint(buf, 0, &weight); + if (ret) + return ret; + + instance = container_of(attr, struct thermal_instance, weight_attr); + instance->weight = weight; + + return count; +} /* Device management */ /** @@ -931,6 +1176,9 @@ static const struct attribute_group *cooling_device_attr_groups[] = { * @lower: the Minimum cooling state can be used for this trip point. * THERMAL_NO_LIMIT means no lower limit, * and the cooling device can be in cooling state 0. + * @weight: The weight of the cooling device to be bound to the + * thermal zone. Use THERMAL_WEIGHT_DEFAULT for the + * default value * * This interface function bind a thermal cooling device to the certain trip * point of a thermal zone device. @@ -941,7 +1189,8 @@ static const struct attribute_group *cooling_device_attr_groups[] = { int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, int trip, struct thermal_cooling_device *cdev, - unsigned long upper, unsigned long lower) + unsigned long upper, unsigned long lower, + unsigned int weight) { struct thermal_instance *dev; struct thermal_instance *pos; @@ -986,6 +1235,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, dev->upper = upper; dev->lower = lower; dev->target = THERMAL_NO_TARGET; + dev->weight = weight; result = get_idr(&tz->idr, &tz->lock, &dev->id); if (result) @@ -1006,6 +1256,16 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, if (result) goto remove_symbol_link; + sprintf(dev->weight_attr_name, "cdev%d_weight", dev->id); + sysfs_attr_init(&dev->weight_attr.attr); + dev->weight_attr.attr.name = dev->weight_attr_name; + dev->weight_attr.attr.mode = S_IWUSR | S_IRUGO; + dev->weight_attr.show = thermal_cooling_device_weight_show; + dev->weight_attr.store = thermal_cooling_device_weight_store; + result = device_create_file(&tz->device, &dev->weight_attr); + if (result) + goto remove_trip_file; + mutex_lock(&tz->lock); mutex_lock(&cdev->lock); list_for_each_entry(pos, &tz->thermal_instances, tz_node) @@ -1023,6 +1283,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, if (!result) return 0; + device_remove_file(&tz->device, &dev->weight_attr); +remove_trip_file: device_remove_file(&tz->device, &dev->attr); remove_symbol_link: sysfs_remove_link(&tz->device.kobj, dev->name); @@ -1377,7 +1639,8 @@ static int create_trip_attrs(struct thermal_zone_device *tz, int mask) tz->trip_temp_attrs[indx].name; tz->trip_temp_attrs[indx].attr.attr.mode = S_IRUGO; tz->trip_temp_attrs[indx].attr.show = trip_point_temp_show; - if (mask & (1 << indx)) { + if (IS_ENABLED(CONFIG_THERMAL_WRITABLE_TRIPS) && + mask & (1 << indx)) { tz->trip_temp_attrs[indx].attr.attr.mode |= S_IWUSR; tz->trip_temp_attrs[indx].attr.store = trip_point_temp_store; @@ -1454,7 +1717,7 @@ static void remove_trip_attrs(struct thermal_zone_device *tz) struct thermal_zone_device *thermal_zone_device_register(const char *type, int trips, int mask, void *devdata, struct thermal_zone_device_ops *ops, - const struct thermal_zone_params *tzp, + struct thermal_zone_params *tzp, int passive_delay, int polling_delay) { struct thermal_zone_device *tz; @@ -1462,6 +1725,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type, int result; int count; int passive = 0; + struct thermal_governor *governor; if (type && strlen(type) >= THERMAL_NAME_LENGTH) return ERR_PTR(-EINVAL); @@ -1548,13 +1812,24 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type, if (result) goto unregister; + /* Add thermal zone params */ + result = create_tzp_attrs(&tz->device); + if (result) + goto unregister; + /* Update 'this' zone's governor information */ mutex_lock(&thermal_governor_lock); if (tz->tzp) - tz->governor = __find_governor(tz->tzp->governor_name); + governor = __find_governor(tz->tzp->governor_name); else - tz->governor = def_governor; + governor = def_governor; + + result = thermal_set_governor(tz, governor); + if (result) { + mutex_unlock(&thermal_governor_lock); + goto unregister; + } mutex_unlock(&thermal_governor_lock); @@ -1643,7 +1918,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) device_remove_file(&tz->device, &dev_attr_mode); device_remove_file(&tz->device, &dev_attr_policy); remove_trip_attrs(tz); - tz->governor = NULL; + thermal_set_governor(tz, NULL); thermal_remove_hwmon_sysfs(tz); release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); @@ -1799,7 +2074,11 @@ static int __init thermal_register_governors(void) if (result) return result; - return thermal_gov_user_space_register(); + result = thermal_gov_user_space_register(); + if (result) + return result; + + return thermal_gov_power_allocator_register(); } static void thermal_unregister_governors(void) @@ -1808,6 +2087,7 @@ static void thermal_unregister_governors(void) thermal_gov_fair_share_unregister(); thermal_gov_bang_bang_unregister(); thermal_gov_user_space_unregister(); + thermal_gov_power_allocator_unregister(); } static int __init thermal_init(void) diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 0531c75..d7ac1fc 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -46,8 +46,11 @@ struct thermal_instance { unsigned long target; /* expected cooling state */ char attr_name[THERMAL_NAME_LENGTH]; struct device_attribute attr; + char weight_attr_name[THERMAL_NAME_LENGTH]; + struct device_attribute weight_attr; struct list_head tz_node; /* node in tz->thermal_instances */ struct list_head cdev_node; /* node in cdev->thermal_instances */ + unsigned int weight; /* The weight of the cooling device */ }; int thermal_register_governor(struct thermal_governor *); @@ -85,6 +88,14 @@ static inline int thermal_gov_user_space_register(void) { return 0; } static inline void thermal_gov_user_space_unregister(void) {} #endif /* CONFIG_THERMAL_GOV_USER_SPACE */ +#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR +int thermal_gov_power_allocator_register(void); +void thermal_gov_power_allocator_unregister(void); +#else +static inline int thermal_gov_power_allocator_register(void) { return 0; } +static inline void thermal_gov_power_allocator_unregister(void) {} +#endif /* CONFIG_THERMAL_GOV_POWER_ALLOCATOR */ + /* device tree support */ #ifdef CONFIG_THERMAL_OF int of_parse_thermal_zones(void); @@ -103,7 +114,7 @@ static inline int of_thermal_get_ntrips(struct thermal_zone_device *tz) static inline bool of_thermal_is_trip_valid(struct thermal_zone_device *tz, int trip) { - return 0; + return false; } static inline const struct thermal_trip * of_thermal_get_trip_points(struct thermal_zone_device *tz) diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c index 62a5d44..63e2f5c 100644 --- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c +++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c @@ -43,6 +43,8 @@ #include "ti-bandgap.h" +static int ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id); + /*** Helper functions to access registers and their bitfields ***/ /** @@ -103,19 +105,15 @@ do { \ */ static int ti_bandgap_power(struct ti_bandgap *bgp, bool on) { - int i, ret = 0; + int i; - if (!TI_BANDGAP_HAS(bgp, POWER_SWITCH)) { - ret = -ENOTSUPP; - goto exit; - } + if (!TI_BANDGAP_HAS(bgp, POWER_SWITCH)) + return -ENOTSUPP; for (i = 0; i < bgp->conf->sensor_count; i++) /* active on 0 */ RMW_BITS(bgp, i, temp_sensor_ctrl, bgap_tempsoff_mask, !on); - -exit: - return ret; + return 0; } /** @@ -263,18 +261,13 @@ static int ti_bandgap_adc_to_mcelsius(struct ti_bandgap *bgp, int adc_val, int *t) { const struct ti_bandgap_data *conf = bgp->conf; - int ret = 0; /* look up for temperature in the table and return the temperature */ - if (adc_val < conf->adc_start_val || adc_val > conf->adc_end_val) { - ret = -ERANGE; - goto exit; - } + if (adc_val < conf->adc_start_val || adc_val > conf->adc_end_val) + return -ERANGE; *t = bgp->conf->conv_table[adc_val - conf->adc_start_val]; - -exit: - return ret; + return 0; } /** @@ -295,16 +288,14 @@ int ti_bandgap_mcelsius_to_adc(struct ti_bandgap *bgp, long temp, int *adc) { const struct ti_bandgap_data *conf = bgp->conf; const int *conv_table = bgp->conf->conv_table; - int high, low, mid, ret = 0; + int high, low, mid; low = 0; high = conf->adc_end_val - conf->adc_start_val; mid = (high + low) / 2; - if (temp < conv_table[low] || temp > conv_table[high]) { - ret = -ERANGE; - goto exit; - } + if (temp < conv_table[low] || temp > conv_table[high]) + return -ERANGE; while (low < high) { if (temp < conv_table[mid]) @@ -315,9 +306,7 @@ int ti_bandgap_mcelsius_to_adc(struct ti_bandgap *bgp, long temp, int *adc) } *adc = conf->adc_start_val + low; - -exit: - return ret; + return 0; } /** @@ -343,13 +332,11 @@ int ti_bandgap_add_hyst(struct ti_bandgap *bgp, int adc_val, int hyst_val, */ ret = ti_bandgap_adc_to_mcelsius(bgp, adc_val, &temp); if (ret < 0) - goto exit; + return ret; temp += hyst_val; ret = ti_bandgap_mcelsius_to_adc(bgp, temp, sum); - -exit: return ret; } @@ -468,22 +455,18 @@ exit: */ static inline int ti_bandgap_validate(struct ti_bandgap *bgp, int id) { - int ret = 0; - if (!bgp || IS_ERR(bgp)) { pr_err("%s: invalid bandgap pointer\n", __func__); - ret = -EINVAL; - goto exit; + return -EINVAL; } if ((id < 0) || (id >= bgp->conf->sensor_count)) { dev_err(bgp->dev, "%s: sensor id out of range (%d)\n", __func__, id); - ret = -ERANGE; + return -ERANGE; } -exit: - return ret; + return 0; } /** @@ -511,12 +494,10 @@ static int _ti_bandgap_write_threshold(struct ti_bandgap *bgp, int id, int val, ret = ti_bandgap_validate(bgp, id); if (ret) - goto exit; + return ret; - if (!TI_BANDGAP_HAS(bgp, TALERT)) { - ret = -ENOTSUPP; - goto exit; - } + if (!TI_BANDGAP_HAS(bgp, TALERT)) + return -ENOTSUPP; ts_data = bgp->conf->sensors[id].ts_data; tsr = bgp->conf->sensors[id].registers; @@ -529,17 +510,15 @@ static int _ti_bandgap_write_threshold(struct ti_bandgap *bgp, int id, int val, } if (ret) - goto exit; + return ret; ret = ti_bandgap_mcelsius_to_adc(bgp, val, &adc_val); if (ret < 0) - goto exit; + return ret; spin_lock(&bgp->lock); ret = ti_bandgap_update_alert_threshold(bgp, id, adc_val, hot); spin_unlock(&bgp->lock); - -exit: return ret; } @@ -582,7 +561,7 @@ static int _ti_bandgap_read_threshold(struct ti_bandgap *bgp, int id, temp = ti_bandgap_readl(bgp, tsr->bgap_threshold); temp = (temp & mask) >> __ffs(mask); - ret |= ti_bandgap_adc_to_mcelsius(bgp, temp, &temp); + ret = ti_bandgap_adc_to_mcelsius(bgp, temp, &temp); if (ret) { dev_err(bgp->dev, "failed to read thot\n"); ret = -EIO; @@ -852,11 +831,17 @@ int ti_bandgap_read_temperature(struct ti_bandgap *bgp, int id, if (ret) return ret; + if (!TI_BANDGAP_HAS(bgp, MODE_CONFIG)) { + ret = ti_bandgap_force_single_read(bgp, id); + if (ret) + return ret; + } + spin_lock(&bgp->lock); temp = ti_bandgap_read_temp(bgp, id); spin_unlock(&bgp->lock); - ret |= ti_bandgap_adc_to_mcelsius(bgp, temp, &temp); + ret = ti_bandgap_adc_to_mcelsius(bgp, temp, &temp); if (ret) return -EIO; @@ -917,7 +902,8 @@ void *ti_bandgap_get_sensor_data(struct ti_bandgap *bgp, int id) static int ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id) { - u32 temp = 0, counter = 1000; + u32 counter = 1000; + struct temp_sensor_registers *tsr; /* Select single conversion mode */ if (TI_BANDGAP_HAS(bgp, MODE_CONFIG)) @@ -925,16 +911,27 @@ ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id) /* Start of Conversion = 1 */ RMW_BITS(bgp, id, temp_sensor_ctrl, bgap_soc_mask, 1); - /* Wait until DTEMP is updated */ - temp = ti_bandgap_read_temp(bgp, id); - while ((temp == 0) && --counter) - temp = ti_bandgap_read_temp(bgp, id); - /* REVISIT: Check correct condition for end of conversion */ + /* Wait for EOCZ going up */ + tsr = bgp->conf->sensors[id].registers; + + while (--counter) { + if (ti_bandgap_readl(bgp, tsr->temp_sensor_ctrl) & + tsr->bgap_eocz_mask) + break; + } /* Start of Conversion = 0 */ RMW_BITS(bgp, id, temp_sensor_ctrl, bgap_soc_mask, 0); + /* Wait for EOCZ going down */ + counter = 1000; + while (--counter) { + if (!(ti_bandgap_readl(bgp, tsr->temp_sensor_ctrl) & + tsr->bgap_eocz_mask)) + break; + } + return 0; } @@ -1220,11 +1217,10 @@ int ti_bandgap_probe(struct platform_device *pdev) goto free_irqs; } - bgp->div_clk = clk_get(NULL, bgp->conf->div_ck_name); + bgp->div_clk = clk_get(NULL, bgp->conf->div_ck_name); ret = IS_ERR(bgp->div_clk); if (ret) { - dev_err(&pdev->dev, - "failed to request div_ts_ck clock ref\n"); + dev_err(&pdev->dev, "failed to request div_ts_ck clock ref\n"); ret = PTR_ERR(bgp->div_clk); goto free_irqs; } diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c index a38c175..c7c5b37 100644 --- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c +++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c @@ -75,7 +75,7 @@ static inline int ti_thermal_hotspot_temperature(int t, int s, int c) } /* thermal zone ops */ -/* Get temperature callback function for thermal zone*/ +/* Get temperature callback function for thermal zone */ static inline int __ti_thermal_get_temp(void *devdata, long *temp) { struct thermal_zone_device *pcb_tz = NULL; @@ -146,7 +146,8 @@ static int ti_thermal_bind(struct thermal_zone_device *thermal, return thermal_zone_bind_cooling_device(thermal, 0, cdev, /* bind with min and max states defined by cpu_cooling */ THERMAL_NO_LIMIT, - THERMAL_NO_LIMIT); + THERMAL_NO_LIMIT, + THERMAL_WEIGHT_DEFAULT); } /* Unbind callback functions for thermal zone */ diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c index 9ea3d9d..50d1d2c 100644 --- a/drivers/thermal/x86_pkg_temp_thermal.c +++ b/drivers/thermal/x86_pkg_temp_thermal.c @@ -68,7 +68,7 @@ struct phy_dev_entry { struct thermal_zone_device *tzone; }; -static const struct thermal_zone_params pkg_temp_tz_params = { +static struct thermal_zone_params pkg_temp_tz_params = { .no_hwmon = true, }; diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index f1e5742..5bab1c6 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c @@ -299,11 +299,27 @@ static int xen_initial_domain_console_init(void) return 0; } +static void xen_console_update_evtchn(struct xencons_info *info) +{ + if (xen_hvm_domain()) { + uint64_t v; + int err; + + err = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v); + if (!err && v) + info->evtchn = v; + } else + info->evtchn = xen_start_info->console.domU.evtchn; +} + void xen_console_resume(void) { struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE); - if (info != NULL && info->irq) + if (info != NULL && info->irq) { + if (!xen_initial_domain()) + xen_console_update_evtchn(info); rebind_evtchn_irq(info->evtchn, info->irq); + } } static void xencons_disconnect_backend(struct xencons_info *info) diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 91abc00..2c34c32 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -3170,7 +3170,7 @@ static int gsmtty_break_ctl(struct tty_struct *tty, int state) return gsmtty_modem_update(dlci, encode); } -static void gsmtty_remove(struct tty_driver *driver, struct tty_struct *tty) +static void gsmtty_cleanup(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; struct gsm_mux *gsm = dlci->gsm; @@ -3178,7 +3178,6 @@ static void gsmtty_remove(struct tty_driver *driver, struct tty_struct *tty) dlci_put(dlci); dlci_put(gsm->dlci[0]); mux_put(gsm); - driver->ttys[tty->index] = NULL; } /* Virtual ttys for the demux */ @@ -3199,7 +3198,7 @@ static const struct tty_operations gsmtty_ops = { .tiocmget = gsmtty_tiocmget, .tiocmset = gsmtty_tiocmset, .break_ctl = gsmtty_break_ctl, - .remove = gsmtty_remove, + .cleanup = gsmtty_cleanup, }; diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c index 644ddb8..bbc4ce6 100644 --- a/drivers/tty/n_hdlc.c +++ b/drivers/tty/n_hdlc.c @@ -600,7 +600,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file, add_wait_queue(&tty->read_wait, &wait); for (;;) { - if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { + if (test_bit(TTY_OTHER_DONE, &tty->flags)) { ret = -EIO; break; } @@ -828,7 +828,7 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp, /* set bits for operations that won't block */ if (n_hdlc->rx_buf_list.head) mask |= POLLIN | POLLRDNORM; /* readable */ - if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) + if (test_bit(TTY_OTHER_DONE, &tty->flags)) mask |= POLLHUP; if (tty_hung_up_p(filp)) mask |= POLLHUP; diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index cf6e0f2..cc57a3a 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1949,6 +1949,18 @@ static inline int input_available_p(struct tty_struct *tty, int poll) return ldata->commit_head - ldata->read_tail >= amt; } +static inline int check_other_done(struct tty_struct *tty) +{ + int done = test_bit(TTY_OTHER_DONE, &tty->flags); + if (done) { + /* paired with cmpxchg() in check_other_closed(); ensures + * read buffer head index is not stale + */ + smp_mb__after_atomic(); + } + return done; +} + /** * copy_from_read_buf - copy read data directly * @tty: terminal device @@ -2167,7 +2179,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, struct n_tty_data *ldata = tty->disc_data; unsigned char __user *b = buf; DEFINE_WAIT_FUNC(wait, woken_wake_function); - int c; + int c, done; int minimum, time; ssize_t retval = 0; long timeout; @@ -2235,8 +2247,10 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, ((minimum - (b - buf)) >= 1)) ldata->minimum_to_wake = (minimum - (b - buf)); + done = check_other_done(tty); + if (!input_available_p(tty, 0)) { - if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { + if (done) { retval = -EIO; break; } @@ -2443,12 +2457,12 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file, poll_wait(file, &tty->read_wait, wait); poll_wait(file, &tty->write_wait, wait); + if (check_other_done(tty)) + mask |= POLLHUP; if (input_available_p(tty, 1)) mask |= POLLIN | POLLRDNORM; if (tty->packet && tty->link->ctrl_status) mask |= POLLPRI | POLLIN | POLLRDNORM; - if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) - mask |= POLLHUP; if (tty_hung_up_p(file)) mask |= POLLHUP; if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) { diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index e72ee62..4d5e840 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -53,9 +53,8 @@ static void pty_close(struct tty_struct *tty, struct file *filp) /* Review - krefs on tty_link ?? */ if (!tty->link) return; - tty_flush_to_ldisc(tty->link); set_bit(TTY_OTHER_CLOSED, &tty->link->flags); - wake_up_interruptible(&tty->link->read_wait); + tty_flip_buffer_push(tty->link->port); wake_up_interruptible(&tty->link->write_wait); if (tty->driver->subtype == PTY_TYPE_MASTER) { set_bit(TTY_OTHER_CLOSED, &tty->flags); @@ -243,7 +242,9 @@ static int pty_open(struct tty_struct *tty, struct file *filp) goto out; clear_bit(TTY_IO_ERROR, &tty->flags); + /* TTY_OTHER_CLOSED must be cleared before TTY_OTHER_DONE */ clear_bit(TTY_OTHER_CLOSED, &tty->link->flags); + clear_bit(TTY_OTHER_DONE, &tty->link->flags); set_bit(TTY_THROTTLED, &tty->flags); return 0; diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 08da4d3..46bcebb 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -1998,6 +1998,8 @@ pci_wch_ch38x_setup(struct serial_private *priv, #define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250 #define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470 +#define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358 + /* Unknown vendors/cards - this should not be in linux/pci_ids.h */ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584 #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588 @@ -2520,6 +2522,13 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { .subdevice = PCI_ANY_ID, .setup = pci_xr17v35x_setup, }, + { + .vendor = PCI_VENDOR_ID_EXAR, + .device = PCI_DEVICE_ID_EXAR_XR17V8358, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_xr17v35x_setup, + }, /* * Xircom cards */ @@ -2999,6 +3008,7 @@ enum pci_board_num_t { pbn_exar_XR17V352, pbn_exar_XR17V354, pbn_exar_XR17V358, + pbn_exar_XR17V8358, pbn_exar_ibm_saturn, pbn_pasemi_1682M, pbn_ni8430_2, @@ -3685,6 +3695,14 @@ static struct pciserial_board pci_boards[] = { .reg_shift = 0, .first_offset = 0, }, + [pbn_exar_XR17V8358] = { + .flags = FL_BASE0, + .num_ports = 16, + .base_baud = 7812500, + .uart_offset = 0x400, + .reg_shift = 0, + .first_offset = 0, + }, [pbn_exar_ibm_saturn] = { .flags = FL_BASE0, .num_ports = 1, @@ -5080,7 +5098,7 @@ static struct pci_device_id serial_pci_tbl[] = { 0, 0, pbn_exar_XR17C158 }, /* - * Exar Corp. XR17V35[248] Dual/Quad/Octal PCIe UARTs + * Exar Corp. XR17V[48]35[248] Dual/Quad/Octal/Hexa PCIe UARTs */ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V352, PCI_ANY_ID, PCI_ANY_ID, @@ -5094,7 +5112,10 @@ static struct pci_device_id serial_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_exar_XR17V358 }, - + { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V8358, + PCI_ANY_ID, PCI_ANY_ID, + 0, + 0, pbn_exar_XR17V8358 }, /* * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke) */ diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 5a4e9d5..6f5a072 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -1639,6 +1639,9 @@ static int pl011_startup(struct uart_port *port) writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS); + /* Assume that TX IRQ doesn't work until we see one: */ + uap->tx_irq_seen = 0; + spin_lock_irq(&uap->port.lock); /* restore RTS and DTR */ @@ -1702,7 +1705,7 @@ static void pl011_shutdown(struct uart_port *port) spin_lock_irq(&uap->port.lock); uap->im = 0; writew(uap->im, uap->port.membase + UART011_IMSC); - writew(0xffff & ~UART011_TXIS, uap->port.membase + UART011_ICR); + writew(0xffff, uap->port.membase + UART011_ICR); spin_unlock_irq(&uap->port.lock); pl011_dma_shutdown(uap); diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index d58fe47..27dade2 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -880,6 +880,7 @@ static int atmel_prepare_tx_dma(struct uart_port *port) config.direction = DMA_MEM_TO_DEV; config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; config.dst_addr = port->mapbase + ATMEL_US_THR; + config.dst_maxburst = 1; ret = dmaengine_slave_config(atmel_port->chan_tx, &config); @@ -1059,6 +1060,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port) config.direction = DMA_DEV_TO_MEM; config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; config.src_addr = port->mapbase + ATMEL_US_RHR; + config.src_maxburst = 1; ret = dmaengine_slave_config(atmel_port->chan_rx, &config); diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c index 5fdc9f3..6dc471e 100644 --- a/drivers/tty/serial/earlycon.c +++ b/drivers/tty/serial/earlycon.c @@ -187,13 +187,8 @@ static int __init param_setup_earlycon(char *buf) return 0; err = setup_earlycon(buf); - if (err == -ENOENT) { - pr_warn("no match for %s\n", buf); - err = 0; - } else if (err == -EALREADY) { - pr_warn("already registered\n"); - err = 0; - } + if (err == -ENOENT || err == -EALREADY) + return 0; return err; } early_param("earlycon", param_setup_earlycon); diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c index 5b73afb..137381e 100644 --- a/drivers/tty/serial/of_serial.c +++ b/drivers/tty/serial/of_serial.c @@ -346,7 +346,6 @@ static const struct of_device_id of_platform_serial_table[] = { { .compatible = "ibm,qpace-nwp-serial", .data = (void *)PORT_NWPSERIAL, }, #endif - { .type = "serial", .data = (void *)PORT_UNKNOWN, }, { /* end of list */ }, }; diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 211479a..7f49172 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -1735,6 +1735,8 @@ static int serial_omap_probe(struct platform_device *pdev) err_add_port: pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); + pm_qos_remove_request(&up->pm_qos_request); + device_init_wakeup(up->dev, false); err_rs485: err_port_line: return ret; diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index cf08876..a0ae942 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -1068,8 +1068,9 @@ static int s3c64xx_serial_startup(struct uart_port *port) spin_lock_irqsave(&port->lock, flags); ufcon = rd_regl(port, S3C2410_UFCON); - ufcon |= S3C2410_UFCON_RESETRX | S3C2410_UFCON_RESETTX | - S5PV210_UFCON_RXTRIG8; + ufcon |= S3C2410_UFCON_RESETRX | S5PV210_UFCON_RXTRIG8; + if (!uart_console(port)) + ufcon |= S3C2410_UFCON_RESETTX; wr_regl(port, S3C2410_UFCON, ufcon); enable_rx_pio(ourport); diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index eb5b03b..0b7bb12 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -1770,7 +1770,7 @@ static const struct file_operations uart_proc_fops = { * @port: the port to write the message * @s: array of characters * @count: number of characters in string to write - * @write: function to write character to port + * @putchar: function to write character to port */ void uart_console_write(struct uart_port *port, const char *s, unsigned int count, diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c index 708eead..b1c6bd3 100644 --- a/drivers/tty/serial/uartlite.c +++ b/drivers/tty/serial/uartlite.c @@ -632,7 +632,8 @@ MODULE_DEVICE_TABLE(of, ulite_of_match); static int ulite_probe(struct platform_device *pdev) { - struct resource *res, *res2; + struct resource *res; + int irq; int id = pdev->id; #ifdef CONFIG_OF const __be32 *prop; @@ -646,11 +647,11 @@ static int ulite_probe(struct platform_device *pdev) if (!res) return -ENODEV; - res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res2) - return -ENODEV; + irq = platform_get_irq(pdev, 0); + if (irq <= 0) + return -ENXIO; - return ulite_assign(&pdev->dev, id, res->start, res2->start); + return ulite_assign(&pdev->dev, id, res->start, irq); } static int ulite_remove(struct platform_device *pdev) diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index f218ec6..3ddbac7 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -1331,9 +1331,9 @@ static SIMPLE_DEV_PM_OPS(cdns_uart_dev_pm_ops, cdns_uart_suspend, */ static int cdns_uart_probe(struct platform_device *pdev) { - int rc, id; + int rc, id, irq; struct uart_port *port; - struct resource *res, *res2; + struct resource *res; struct cdns_uart *cdns_uart_data; cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data), @@ -1380,9 +1380,9 @@ static int cdns_uart_probe(struct platform_device *pdev) goto err_out_clk_disable; } - res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res2) { - rc = -ENODEV; + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + rc = -ENXIO; goto err_out_clk_disable; } @@ -1411,7 +1411,7 @@ static int cdns_uart_probe(struct platform_device *pdev) * and triggers invocation of the config_port() entry point. */ port->mapbase = res->start; - port->irq = res2->start; + port->irq = irq; port->dev = &pdev->dev; port->uartclk = clk_get_rate(cdns_uart_data->uartclk); port->private_data = cdns_uart_data; diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c index 7566164..2f78b77 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c @@ -37,6 +37,28 @@ #define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF) +/* + * If all tty flip buffers have been processed by flush_to_ldisc() or + * dropped by tty_buffer_flush(), check if the linked pty has been closed. + * If so, wake the reader/poll to process + */ +static inline void check_other_closed(struct tty_struct *tty) +{ + unsigned long flags, old; + + /* transition from TTY_OTHER_CLOSED => TTY_OTHER_DONE must be atomic */ + for (flags = ACCESS_ONCE(tty->flags); + test_bit(TTY_OTHER_CLOSED, &flags); + ) { + old = flags; + __set_bit(TTY_OTHER_DONE, &flags); + flags = cmpxchg(&tty->flags, old, flags); + if (old == flags) { + wake_up_interruptible(&tty->read_wait); + break; + } + } +} /** * tty_buffer_lock_exclusive - gain exclusive access to buffer @@ -229,6 +251,8 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld) if (ld && ld->ops->flush_buffer) ld->ops->flush_buffer(tty); + check_other_closed(tty); + atomic_dec(&buf->priority); mutex_unlock(&buf->lock); } @@ -471,8 +495,10 @@ static void flush_to_ldisc(struct work_struct *work) smp_rmb(); count = head->commit - head->read; if (!count) { - if (next == NULL) + if (next == NULL) { + check_other_closed(tty); break; + } buf->head = next; tty_buffer_free(port, head); continue; @@ -489,19 +515,6 @@ static void flush_to_ldisc(struct work_struct *work) } /** - * tty_flush_to_ldisc - * @tty: tty to push - * - * Push the terminal flip buffers to the line discipline. - * - * Must not be called from IRQ context. - */ -void tty_flush_to_ldisc(struct tty_struct *tty) -{ - flush_work(&tty->port->buf.work); -} - -/** * tty_flip_buffer_push - terminal * @port: tty port to push * diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index 632fc81..8e53fe4 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c @@ -536,7 +536,7 @@ EXPORT_SYMBOL(tty_termios_hw_change); * Locking: termios_rwsem */ -static int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios) +int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios) { struct ktermios old_termios; struct tty_ldisc *ld; @@ -569,6 +569,7 @@ static int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios) up_write(&tty->termios_rwsem); return 0; } +EXPORT_SYMBOL_GPL(tty_set_termios); /** * set_termios - set termios values for a tty diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c index dfb05ed..5b7061a 100644 --- a/drivers/usb/chipidea/debug.c +++ b/drivers/usb/chipidea/debug.c @@ -88,9 +88,13 @@ static ssize_t ci_port_test_write(struct file *file, const char __user *ubuf, char buf[32]; int ret; - if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) + count = min_t(size_t, sizeof(buf) - 1, count); + if (copy_from_user(buf, ubuf, count)) return -EFAULT; + /* sscanf requires a zero terminated string */ + buf[count] = '\0'; + if (sscanf(buf, "%u", &mode) != 1) return -EINVAL; diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c index 083acf4..19d655a 100644 --- a/drivers/usb/chipidea/otg_fsm.c +++ b/drivers/usb/chipidea/otg_fsm.c @@ -520,7 +520,6 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on) { struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm); - mutex_unlock(&fsm->lock); if (on) { ci_role_stop(ci); ci_role_start(ci, CI_ROLE_HOST); @@ -529,7 +528,6 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on) hw_device_reset(ci); ci_role_start(ci, CI_ROLE_GADGET); } - mutex_lock(&fsm->lock); return 0; } @@ -537,12 +535,10 @@ static int ci_otg_start_gadget(struct otg_fsm *fsm, int on) { struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm); - mutex_unlock(&fsm->lock); if (on) usb_gadget_vbus_connect(&ci->gadget); else usb_gadget_vbus_disconnect(&ci->gadget); - mutex_lock(&fsm->lock); return 0; } diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 3e15add..5c8f581 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1142,11 +1142,16 @@ static int acm_probe(struct usb_interface *intf, } while (buflen > 0) { + elength = buffer[0]; + if (!elength) { + dev_err(&intf->dev, "skipping garbage byte\n"); + elength = 1; + goto next_desc; + } if (buffer[1] != USB_DT_CS_INTERFACE) { dev_err(&intf->dev, "skipping garbage\n"); goto next_desc; } - elength = buffer[0]; switch (buffer[2]) { case USB_CDC_UNION_TYPE: /* we've found it */ diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 41e510a..d85abfe 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -106,6 +106,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x04f3, 0x010c), .driver_info = USB_QUIRK_DEVICE_QUALIFIER }, + { USB_DEVICE(0x04f3, 0x0125), .driver_info = + USB_QUIRK_DEVICE_QUALIFIER }, + { USB_DEVICE(0x04f3, 0x016f), .driver_info = USB_QUIRK_DEVICE_QUALIFIER }, diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index edba534..6b486a3 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c @@ -65,8 +65,8 @@ #define USBOTGSS_IRQENABLE_SET_MISC 0x003c #define USBOTGSS_IRQENABLE_CLR_MISC 0x0040 #define USBOTGSS_IRQMISC_OFFSET 0x03fc -#define USBOTGSS_UTMI_OTG_CTRL 0x0080 -#define USBOTGSS_UTMI_OTG_STATUS 0x0084 +#define USBOTGSS_UTMI_OTG_STATUS 0x0080 +#define USBOTGSS_UTMI_OTG_CTRL 0x0084 #define USBOTGSS_UTMI_OTG_OFFSET 0x0480 #define USBOTGSS_TXFIFO_DEPTH 0x0508 #define USBOTGSS_RXFIFO_DEPTH 0x050c @@ -98,20 +98,20 @@ #define USBOTGSS_IRQMISC_DISCHRGVBUS_FALL (1 << 3) #define USBOTGSS_IRQMISC_IDPULLUP_FALL (1 << 0) -/* UTMI_OTG_CTRL REGISTER */ -#define USBOTGSS_UTMI_OTG_CTRL_DRVVBUS (1 << 5) -#define USBOTGSS_UTMI_OTG_CTRL_CHRGVBUS (1 << 4) -#define USBOTGSS_UTMI_OTG_CTRL_DISCHRGVBUS (1 << 3) -#define USBOTGSS_UTMI_OTG_CTRL_IDPULLUP (1 << 0) - /* UTMI_OTG_STATUS REGISTER */ -#define USBOTGSS_UTMI_OTG_STATUS_SW_MODE (1 << 31) -#define USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT (1 << 9) -#define USBOTGSS_UTMI_OTG_STATUS_TXBITSTUFFENABLE (1 << 8) -#define USBOTGSS_UTMI_OTG_STATUS_IDDIG (1 << 4) -#define USBOTGSS_UTMI_OTG_STATUS_SESSEND (1 << 3) -#define USBOTGSS_UTMI_OTG_STATUS_SESSVALID (1 << 2) -#define USBOTGSS_UTMI_OTG_STATUS_VBUSVALID (1 << 1) +#define USBOTGSS_UTMI_OTG_STATUS_DRVVBUS (1 << 5) +#define USBOTGSS_UTMI_OTG_STATUS_CHRGVBUS (1 << 4) +#define USBOTGSS_UTMI_OTG_STATUS_DISCHRGVBUS (1 << 3) +#define USBOTGSS_UTMI_OTG_STATUS_IDPULLUP (1 << 0) + +/* UTMI_OTG_CTRL REGISTER */ +#define USBOTGSS_UTMI_OTG_CTRL_SW_MODE (1 << 31) +#define USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT (1 << 9) +#define USBOTGSS_UTMI_OTG_CTRL_TXBITSTUFFENABLE (1 << 8) +#define USBOTGSS_UTMI_OTG_CTRL_IDDIG (1 << 4) +#define USBOTGSS_UTMI_OTG_CTRL_SESSEND (1 << 3) +#define USBOTGSS_UTMI_OTG_CTRL_SESSVALID (1 << 2) +#define USBOTGSS_UTMI_OTG_CTRL_VBUSVALID (1 << 1) struct dwc3_omap { struct device *dev; @@ -119,7 +119,7 @@ struct dwc3_omap { int irq; void __iomem *base; - u32 utmi_otg_status; + u32 utmi_otg_ctrl; u32 utmi_otg_offset; u32 irqmisc_offset; u32 irq_eoi_offset; @@ -153,15 +153,15 @@ static inline void dwc3_omap_writel(void __iomem *base, u32 offset, u32 value) writel(value, base + offset); } -static u32 dwc3_omap_read_utmi_status(struct dwc3_omap *omap) +static u32 dwc3_omap_read_utmi_ctrl(struct dwc3_omap *omap) { - return dwc3_omap_readl(omap->base, USBOTGSS_UTMI_OTG_STATUS + + return dwc3_omap_readl(omap->base, USBOTGSS_UTMI_OTG_CTRL + omap->utmi_otg_offset); } -static void dwc3_omap_write_utmi_status(struct dwc3_omap *omap, u32 value) +static void dwc3_omap_write_utmi_ctrl(struct dwc3_omap *omap, u32 value) { - dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_STATUS + + dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_CTRL + omap->utmi_otg_offset, value); } @@ -235,25 +235,25 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap, } } - val = dwc3_omap_read_utmi_status(omap); - val &= ~(USBOTGSS_UTMI_OTG_STATUS_IDDIG - | USBOTGSS_UTMI_OTG_STATUS_VBUSVALID - | USBOTGSS_UTMI_OTG_STATUS_SESSEND); - val |= USBOTGSS_UTMI_OTG_STATUS_SESSVALID - | USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT; - dwc3_omap_write_utmi_status(omap, val); + val = dwc3_omap_read_utmi_ctrl(omap); + val &= ~(USBOTGSS_UTMI_OTG_CTRL_IDDIG + | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID + | USBOTGSS_UTMI_OTG_CTRL_SESSEND); + val |= USBOTGSS_UTMI_OTG_CTRL_SESSVALID + | USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT; + dwc3_omap_write_utmi_ctrl(omap, val); break; case OMAP_DWC3_VBUS_VALID: dev_dbg(omap->dev, "VBUS Connect\n"); - val = dwc3_omap_read_utmi_status(omap); - val &= ~USBOTGSS_UTMI_OTG_STATUS_SESSEND; - val |= USBOTGSS_UTMI_OTG_STATUS_IDDIG - | USBOTGSS_UTMI_OTG_STATUS_VBUSVALID - | USBOTGSS_UTMI_OTG_STATUS_SESSVALID - | USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT; - dwc3_omap_write_utmi_status(omap, val); + val = dwc3_omap_read_utmi_ctrl(omap); + val &= ~USBOTGSS_UTMI_OTG_CTRL_SESSEND; + val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG + | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID + | USBOTGSS_UTMI_OTG_CTRL_SESSVALID + | USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT; + dwc3_omap_write_utmi_ctrl(omap, val); break; case OMAP_DWC3_ID_FLOAT: @@ -263,13 +263,13 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap, case OMAP_DWC3_VBUS_OFF: dev_dbg(omap->dev, "VBUS Disconnect\n"); - val = dwc3_omap_read_utmi_status(omap); - val &= ~(USBOTGSS_UTMI_OTG_STATUS_SESSVALID - | USBOTGSS_UTMI_OTG_STATUS_VBUSVALID - | USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT); - val |= USBOTGSS_UTMI_OTG_STATUS_SESSEND - | USBOTGSS_UTMI_OTG_STATUS_IDDIG; - dwc3_omap_write_utmi_status(omap, val); + val = dwc3_omap_read_utmi_ctrl(omap); + val &= ~(USBOTGSS_UTMI_OTG_CTRL_SESSVALID + | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID + | USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT); + val |= USBOTGSS_UTMI_OTG_CTRL_SESSEND + | USBOTGSS_UTMI_OTG_CTRL_IDDIG; + dwc3_omap_write_utmi_ctrl(omap, val); break; default: @@ -422,22 +422,22 @@ static void dwc3_omap_set_utmi_mode(struct dwc3_omap *omap) struct device_node *node = omap->dev->of_node; int utmi_mode = 0; - reg = dwc3_omap_read_utmi_status(omap); + reg = dwc3_omap_read_utmi_ctrl(omap); of_property_read_u32(node, "utmi-mode", &utmi_mode); switch (utmi_mode) { case DWC3_OMAP_UTMI_MODE_SW: - reg |= USBOTGSS_UTMI_OTG_STATUS_SW_MODE; + reg |= USBOTGSS_UTMI_OTG_CTRL_SW_MODE; break; case DWC3_OMAP_UTMI_MODE_HW: - reg &= ~USBOTGSS_UTMI_OTG_STATUS_SW_MODE; + reg &= ~USBOTGSS_UTMI_OTG_CTRL_SW_MODE; break; default: dev_dbg(omap->dev, "UNKNOWN utmi mode %d\n", utmi_mode); } - dwc3_omap_write_utmi_status(omap, reg); + dwc3_omap_write_utmi_ctrl(omap, reg); } static int dwc3_omap_extcon_register(struct dwc3_omap *omap) @@ -614,7 +614,7 @@ static int dwc3_omap_suspend(struct device *dev) { struct dwc3_omap *omap = dev_get_drvdata(dev); - omap->utmi_otg_status = dwc3_omap_read_utmi_status(omap); + omap->utmi_otg_ctrl = dwc3_omap_read_utmi_ctrl(omap); dwc3_omap_disable_irqs(omap); return 0; @@ -624,7 +624,7 @@ static int dwc3_omap_resume(struct device *dev) { struct dwc3_omap *omap = dev_get_drvdata(dev); - dwc3_omap_write_utmi_status(omap, omap->utmi_otg_status); + dwc3_omap_write_utmi_ctrl(omap, omap->utmi_otg_ctrl); dwc3_omap_enable_irqs(omap); pm_runtime_disable(dev); diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index c42765b..0495c94 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -1295,6 +1295,7 @@ static void purge_configs_funcs(struct gadget_info *gi) } } c->next_interface_id = 0; + memset(c->interface, 0, sizeof(c->interface)); c->superspeed = 0; c->highspeed = 0; c->fullspeed = 0; diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index 13dfc99..f7f35a3 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -437,12 +437,20 @@ static int hidg_setup(struct usb_function *f, | USB_REQ_GET_DESCRIPTOR): switch (value >> 8) { case HID_DT_HID: + { + struct hid_descriptor hidg_desc_copy = hidg_desc; + VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n"); + hidg_desc_copy.desc[0].bDescriptorType = HID_DT_REPORT; + hidg_desc_copy.desc[0].wDescriptorLength = + cpu_to_le16(hidg->report_desc_length); + length = min_t(unsigned short, length, - hidg_desc.bLength); - memcpy(req->buf, &hidg_desc, length); + hidg_desc_copy.bLength); + memcpy(req->buf, &hidg_desc_copy, length); goto respond; break; + } case HID_DT_REPORT: VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: REPORT\n"); length = min_t(unsigned short, length, @@ -632,6 +640,10 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); hidg_hs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); hidg_fs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); + /* + * We can use hidg_desc struct here but we should not relay + * that its content won't change after returning from this function. + */ hidg_desc.desc[0].bDescriptorType = HID_DT_REPORT; hidg_desc.desc[0].wDescriptorLength = cpu_to_le16(hidg->report_desc_length); diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index 89179ab..7ee0579 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -113,6 +113,7 @@ struct gs_port { int write_allocated; struct gs_buf port_write_buf; wait_queue_head_t drain_wait; /* wait while writes drain */ + bool write_busy; /* REVISIT this state ... */ struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */ @@ -363,7 +364,7 @@ __acquires(&port->port_lock) int status = 0; bool do_tty_wake = false; - while (!list_empty(pool)) { + while (!port->write_busy && !list_empty(pool)) { struct usb_request *req; int len; @@ -393,9 +394,11 @@ __acquires(&port->port_lock) * NOTE that we may keep sending data for a while after * the TTY closed (dev->ioport->port_tty is NULL). */ + port->write_busy = true; spin_unlock(&port->port_lock); status = usb_ep_queue(in, req, GFP_ATOMIC); spin_lock(&port->port_lock); + port->write_busy = false; if (status) { pr_debug("%s: %s %s err %d\n", diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c index c30b7b5..1194b09 100644 --- a/drivers/usb/gadget/legacy/acm_ms.c +++ b/drivers/usb/gadget/legacy/acm_ms.c @@ -121,7 +121,7 @@ static struct usb_function *f_msg; /* * We _always_ have both ACM and mass storage functions. */ -static int __init acm_ms_do_config(struct usb_configuration *c) +static int acm_ms_do_config(struct usb_configuration *c) { struct fsg_opts *opts; int status; @@ -174,7 +174,7 @@ static struct usb_configuration acm_ms_config_driver = { /*-------------------------------------------------------------------------*/ -static int __init acm_ms_bind(struct usb_composite_dev *cdev) +static int acm_ms_bind(struct usb_composite_dev *cdev) { struct usb_gadget *gadget = cdev->gadget; struct fsg_opts *opts; @@ -249,7 +249,7 @@ fail_get_msg: return status; } -static int __exit acm_ms_unbind(struct usb_composite_dev *cdev) +static int acm_ms_unbind(struct usb_composite_dev *cdev) { usb_put_function(f_msg); usb_put_function_instance(fi_msg); @@ -258,13 +258,13 @@ static int __exit acm_ms_unbind(struct usb_composite_dev *cdev) return 0; } -static __refdata struct usb_composite_driver acm_ms_driver = { +static struct usb_composite_driver acm_ms_driver = { .name = "g_acm_ms", .dev = &device_desc, .max_speed = USB_SPEED_SUPER, .strings = dev_strings, .bind = acm_ms_bind, - .unbind = __exit_p(acm_ms_unbind), + .unbind = acm_ms_unbind, }; module_usb_composite_driver(acm_ms_driver); diff --git a/drivers/usb/gadget/legacy/audio.c b/drivers/usb/gadget/legacy/audio.c index f46a395..f289caf 100644 --- a/drivers/usb/gadget/legacy/audio.c +++ b/drivers/usb/gadget/legacy/audio.c @@ -167,7 +167,7 @@ static const struct usb_descriptor_header *otg_desc[] = { /*-------------------------------------------------------------------------*/ -static int __init audio_do_config(struct usb_configuration *c) +static int audio_do_config(struct usb_configuration *c) { int status; @@ -216,7 +216,7 @@ static struct usb_configuration audio_config_driver = { /*-------------------------------------------------------------------------*/ -static int __init audio_bind(struct usb_composite_dev *cdev) +static int audio_bind(struct usb_composite_dev *cdev) { #ifndef CONFIG_GADGET_UAC1 struct f_uac2_opts *uac2_opts; @@ -276,7 +276,7 @@ fail: return status; } -static int __exit audio_unbind(struct usb_composite_dev *cdev) +static int audio_unbind(struct usb_composite_dev *cdev) { #ifdef CONFIG_GADGET_UAC1 if (!IS_ERR_OR_NULL(f_uac1)) @@ -292,13 +292,13 @@ static int __exit audio_unbind(struct usb_composite_dev *cdev) return 0; } -static __refdata struct usb_composite_driver audio_driver = { +static struct usb_composite_driver audio_driver = { .name = "g_audio", .dev = &device_desc, .strings = audio_strings, .max_speed = USB_SPEED_HIGH, .bind = audio_bind, - .unbind = __exit_p(audio_unbind), + .unbind = audio_unbind, }; module_usb_composite_driver(audio_driver); diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c index 2e85d94..afd3e37 100644 --- a/drivers/usb/gadget/legacy/cdc2.c +++ b/drivers/usb/gadget/legacy/cdc2.c @@ -104,7 +104,7 @@ static struct usb_function_instance *fi_ecm; /* * We _always_ have both CDC ECM and CDC ACM functions. */ -static int __init cdc_do_config(struct usb_configuration *c) +static int cdc_do_config(struct usb_configuration *c) { int status; @@ -153,7 +153,7 @@ static struct usb_configuration cdc_config_driver = { /*-------------------------------------------------------------------------*/ -static int __init cdc_bind(struct usb_composite_dev *cdev) +static int cdc_bind(struct usb_composite_dev *cdev) { struct usb_gadget *gadget = cdev->gadget; struct f_ecm_opts *ecm_opts; @@ -211,7 +211,7 @@ fail: return status; } -static int __exit cdc_unbind(struct usb_composite_dev *cdev) +static int cdc_unbind(struct usb_composite_dev *cdev) { usb_put_function(f_acm); usb_put_function_instance(fi_serial); @@ -222,13 +222,13 @@ static int __exit cdc_unbind(struct usb_composite_dev *cdev) return 0; } -static __refdata struct usb_composite_driver cdc_driver = { +static struct usb_composite_driver cdc_driver = { .name = "g_cdc", .dev = &device_desc, .strings = dev_strings, .max_speed = USB_SPEED_HIGH, .bind = cdc_bind, - .unbind = __exit_p(cdc_unbind), + .unbind = cdc_unbind, }; module_usb_composite_driver(cdc_driver); diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c index 633683a..204b10b 100644 --- a/drivers/usb/gadget/legacy/dbgp.c +++ b/drivers/usb/gadget/legacy/dbgp.c @@ -284,7 +284,7 @@ fail_1: return -ENODEV; } -static int __init dbgp_bind(struct usb_gadget *gadget, +static int dbgp_bind(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { int err, stp; @@ -406,7 +406,7 @@ fail: return err; } -static __refdata struct usb_gadget_driver dbgp_driver = { +static struct usb_gadget_driver dbgp_driver = { .function = "dbgp", .max_speed = USB_SPEED_HIGH, .bind = dbgp_bind, diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c index c5fdc61..a3323dc 100644 --- a/drivers/usb/gadget/legacy/ether.c +++ b/drivers/usb/gadget/legacy/ether.c @@ -222,7 +222,7 @@ static struct usb_function *f_rndis; * the first one present. That's to make Microsoft's drivers happy, * and to follow DOCSIS 1.0 (cable modem standard). */ -static int __init rndis_do_config(struct usb_configuration *c) +static int rndis_do_config(struct usb_configuration *c) { int status; @@ -264,7 +264,7 @@ MODULE_PARM_DESC(use_eem, "use CDC EEM mode"); /* * We _always_ have an ECM, CDC Subset, or EEM configuration. */ -static int __init eth_do_config(struct usb_configuration *c) +static int eth_do_config(struct usb_configuration *c) { int status = 0; @@ -318,7 +318,7 @@ static struct usb_configuration eth_config_driver = { /*-------------------------------------------------------------------------*/ -static int __init eth_bind(struct usb_composite_dev *cdev) +static int eth_bind(struct usb_composite_dev *cdev) { struct usb_gadget *gadget = cdev->gadget; struct f_eem_opts *eem_opts = NULL; @@ -447,7 +447,7 @@ fail: return status; } -static int __exit eth_unbind(struct usb_composite_dev *cdev) +static int eth_unbind(struct usb_composite_dev *cdev) { if (has_rndis()) { usb_put_function(f_rndis); @@ -466,13 +466,13 @@ static int __exit eth_unbind(struct usb_composite_dev *cdev) return 0; } -static __refdata struct usb_composite_driver eth_driver = { +static struct usb_composite_driver eth_driver = { .name = "g_ether", .dev = &device_desc, .strings = dev_strings, .max_speed = USB_SPEED_SUPER, .bind = eth_bind, - .unbind = __exit_p(eth_unbind), + .unbind = eth_unbind, }; module_usb_composite_driver(eth_driver); diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c index b01b88e..7b9ef7e 100644 --- a/drivers/usb/gadget/legacy/g_ffs.c +++ b/drivers/usb/gadget/legacy/g_ffs.c @@ -163,7 +163,7 @@ static int gfs_unbind(struct usb_composite_dev *cdev); static int gfs_do_config(struct usb_configuration *c); -static __refdata struct usb_composite_driver gfs_driver = { +static struct usb_composite_driver gfs_driver = { .name = DRIVER_NAME, .dev = &gfs_dev_desc, .strings = gfs_dev_strings, diff --git a/drivers/usb/gadget/legacy/gmidi.c b/drivers/usb/gadget/legacy/gmidi.c index e02a095..da19c48 100644 --- a/drivers/usb/gadget/legacy/gmidi.c +++ b/drivers/usb/gadget/legacy/gmidi.c @@ -118,7 +118,7 @@ static struct usb_gadget_strings *dev_strings[] = { static struct usb_function_instance *fi_midi; static struct usb_function *f_midi; -static int __exit midi_unbind(struct usb_composite_dev *dev) +static int midi_unbind(struct usb_composite_dev *dev) { usb_put_function(f_midi); usb_put_function_instance(fi_midi); @@ -133,7 +133,7 @@ static struct usb_configuration midi_config = { .MaxPower = CONFIG_USB_GADGET_VBUS_DRAW, }; -static int __init midi_bind_config(struct usb_configuration *c) +static int midi_bind_config(struct usb_configuration *c) { int status; @@ -150,7 +150,7 @@ static int __init midi_bind_config(struct usb_configuration *c) return 0; } -static int __init midi_bind(struct usb_composite_dev *cdev) +static int midi_bind(struct usb_composite_dev *cdev) { struct f_midi_opts *midi_opts; int status; @@ -185,13 +185,13 @@ put: return status; } -static __refdata struct usb_composite_driver midi_driver = { +static struct usb_composite_driver midi_driver = { .name = (char *) longname, .dev = &device_desc, .strings = dev_strings, .max_speed = USB_SPEED_HIGH, .bind = midi_bind, - .unbind = __exit_p(midi_unbind), + .unbind = midi_unbind, }; module_usb_composite_driver(midi_driver); diff --git a/drivers/usb/gadget/legacy/hid.c b/drivers/usb/gadget/legacy/hid.c index 614b06d..2baa572 100644 --- a/drivers/usb/gadget/legacy/hid.c +++ b/drivers/usb/gadget/legacy/hid.c @@ -106,7 +106,7 @@ static struct usb_gadget_strings *dev_strings[] = { /****************************** Configurations ******************************/ -static int __init do_config(struct usb_configuration *c) +static int do_config(struct usb_configuration *c) { struct hidg_func_node *e, *n; int status = 0; @@ -147,7 +147,7 @@ static struct usb_configuration config_driver = { /****************************** Gadget Bind ******************************/ -static int __init hid_bind(struct usb_composite_dev *cdev) +static int hid_bind(struct usb_composite_dev *cdev) { struct usb_gadget *gadget = cdev->gadget; struct list_head *tmp; @@ -205,7 +205,7 @@ put: return status; } -static int __exit hid_unbind(struct usb_composite_dev *cdev) +static int hid_unbind(struct usb_composite_dev *cdev) { struct hidg_func_node *n; @@ -216,7 +216,7 @@ static int __exit hid_unbind(struct usb_composite_dev *cdev) return 0; } -static int __init hidg_plat_driver_probe(struct platform_device *pdev) +static int hidg_plat_driver_probe(struct platform_device *pdev) { struct hidg_func_descriptor *func = dev_get_platdata(&pdev->dev); struct hidg_func_node *entry; @@ -252,13 +252,13 @@ static int hidg_plat_driver_remove(struct platform_device *pdev) /****************************** Some noise ******************************/ -static __refdata struct usb_composite_driver hidg_driver = { +static struct usb_composite_driver hidg_driver = { .name = "g_hid", .dev = &device_desc, .strings = dev_strings, .max_speed = USB_SPEED_HIGH, .bind = hid_bind, - .unbind = __exit_p(hid_unbind), + .unbind = hid_unbind, }; static struct platform_driver hidg_plat_driver = { diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c index 8e27a8c..e7bfb08 100644 --- a/drivers/usb/gadget/legacy/mass_storage.c +++ b/drivers/usb/gadget/legacy/mass_storage.c @@ -130,7 +130,7 @@ static int msg_thread_exits(struct fsg_common *common) return 0; } -static int __init msg_do_config(struct usb_configuration *c) +static int msg_do_config(struct usb_configuration *c) { struct fsg_opts *opts; int ret; @@ -170,7 +170,7 @@ static struct usb_configuration msg_config_driver = { /****************************** Gadget Bind ******************************/ -static int __init msg_bind(struct usb_composite_dev *cdev) +static int msg_bind(struct usb_composite_dev *cdev) { static const struct fsg_operations ops = { .thread_exits = msg_thread_exits, @@ -248,7 +248,7 @@ static int msg_unbind(struct usb_composite_dev *cdev) /****************************** Some noise ******************************/ -static __refdata struct usb_composite_driver msg_driver = { +static struct usb_composite_driver msg_driver = { .name = "g_mass_storage", .dev = &msg_device_desc, .max_speed = USB_SPEED_SUPER, diff --git a/drivers/usb/gadget/legacy/multi.c b/drivers/usb/gadget/legacy/multi.c index 39d27bb..b21b51f 100644 --- a/drivers/usb/gadget/legacy/multi.c +++ b/drivers/usb/gadget/legacy/multi.c @@ -149,7 +149,7 @@ static struct usb_function *f_acm_rndis; static struct usb_function *f_rndis; static struct usb_function *f_msg_rndis; -static __init int rndis_do_config(struct usb_configuration *c) +static int rndis_do_config(struct usb_configuration *c) { struct fsg_opts *fsg_opts; int ret; @@ -237,7 +237,7 @@ static struct usb_function *f_acm_multi; static struct usb_function *f_ecm; static struct usb_function *f_msg_multi; -static __init int cdc_do_config(struct usb_configuration *c) +static int cdc_do_config(struct usb_configuration *c) { struct fsg_opts *fsg_opts; int ret; @@ -466,7 +466,7 @@ fail: return status; } -static int __exit multi_unbind(struct usb_composite_dev *cdev) +static int multi_unbind(struct usb_composite_dev *cdev) { #ifdef CONFIG_USB_G_MULTI_CDC usb_put_function(f_msg_multi); @@ -497,13 +497,13 @@ static int __exit multi_unbind(struct usb_composite_dev *cdev) /****************************** Some noise ******************************/ -static __refdata struct usb_composite_driver multi_driver = { +static struct usb_composite_driver multi_driver = { .name = "g_multi", .dev = &device_desc, .strings = dev_strings, .max_speed = USB_SPEED_HIGH, .bind = multi_bind, - .unbind = __exit_p(multi_unbind), + .unbind = multi_unbind, .needs_serial = 1, }; diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c index e90e23d..6ce7421 100644 --- a/drivers/usb/gadget/legacy/ncm.c +++ b/drivers/usb/gadget/legacy/ncm.c @@ -107,7 +107,7 @@ static struct usb_function *f_ncm; /*-------------------------------------------------------------------------*/ -static int __init ncm_do_config(struct usb_configuration *c) +static int ncm_do_config(struct usb_configuration *c) { int status; @@ -143,7 +143,7 @@ static struct usb_configuration ncm_config_driver = { /*-------------------------------------------------------------------------*/ -static int __init gncm_bind(struct usb_composite_dev *cdev) +static int gncm_bind(struct usb_composite_dev *cdev) { struct usb_gadget *gadget = cdev->gadget; struct f_ncm_opts *ncm_opts; @@ -186,7 +186,7 @@ fail: return status; } -static int __exit gncm_unbind(struct usb_composite_dev *cdev) +static int gncm_unbind(struct usb_composite_dev *cdev) { if (!IS_ERR_OR_NULL(f_ncm)) usb_put_function(f_ncm); @@ -195,13 +195,13 @@ static int __exit gncm_unbind(struct usb_composite_dev *cdev) return 0; } -static __refdata struct usb_composite_driver ncm_driver = { +static struct usb_composite_driver ncm_driver = { .name = "g_ncm", .dev = &device_desc, .strings = dev_strings, .max_speed = USB_SPEED_HIGH, .bind = gncm_bind, - .unbind = __exit_p(gncm_unbind), + .unbind = gncm_unbind, }; module_usb_composite_driver(ncm_driver); diff --git a/drivers/usb/gadget/legacy/nokia.c b/drivers/usb/gadget/legacy/nokia.c index 9b8fd70..4bb498a 100644 --- a/drivers/usb/gadget/legacy/nokia.c +++ b/drivers/usb/gadget/legacy/nokia.c @@ -118,7 +118,7 @@ static struct usb_function_instance *fi_obex1; static struct usb_function_instance *fi_obex2; static struct usb_function_instance *fi_phonet; -static int __init nokia_bind_config(struct usb_configuration *c) +static int nokia_bind_config(struct usb_configuration *c) { struct usb_function *f_acm; struct usb_function *f_phonet = NULL; @@ -224,7 +224,7 @@ err_get_acm: return status; } -static int __init nokia_bind(struct usb_composite_dev *cdev) +static int nokia_bind(struct usb_composite_dev *cdev) { struct usb_gadget *gadget = cdev->gadget; int status; @@ -307,7 +307,7 @@ err_usb: return status; } -static int __exit nokia_unbind(struct usb_composite_dev *cdev) +static int nokia_unbind(struct usb_composite_dev *cdev) { if (!IS_ERR_OR_NULL(f_obex1_cfg2)) usb_put_function(f_obex1_cfg2); @@ -338,13 +338,13 @@ static int __exit nokia_unbind(struct usb_composite_dev *cdev) return 0; } -static __refdata struct usb_composite_driver nokia_driver = { +static struct usb_composite_driver nokia_driver = { .name = "g_nokia", .dev = &device_desc, .strings = dev_strings, .max_speed = USB_SPEED_HIGH, .bind = nokia_bind, - .unbind = __exit_p(nokia_unbind), + .unbind = nokia_unbind, }; module_usb_composite_driver(nokia_driver); diff --git a/drivers/usb/gadget/legacy/printer.c b/drivers/usb/gadget/legacy/printer.c index d5b6ee7..1ce7df1 100644 --- a/drivers/usb/gadget/legacy/printer.c +++ b/drivers/usb/gadget/legacy/printer.c @@ -126,7 +126,7 @@ static struct usb_configuration printer_cfg_driver = { .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER, }; -static int __init printer_do_config(struct usb_configuration *c) +static int printer_do_config(struct usb_configuration *c) { struct usb_gadget *gadget = c->cdev->gadget; int status = 0; @@ -152,7 +152,7 @@ static int __init printer_do_config(struct usb_configuration *c) return status; } -static int __init printer_bind(struct usb_composite_dev *cdev) +static int printer_bind(struct usb_composite_dev *cdev) { struct f_printer_opts *opts; int ret, len; @@ -191,7 +191,7 @@ static int __init printer_bind(struct usb_composite_dev *cdev) return ret; } -static int __exit printer_unbind(struct usb_composite_dev *cdev) +static int printer_unbind(struct usb_composite_dev *cdev) { usb_put_function(f_printer); usb_put_function_instance(fi_printer); @@ -199,7 +199,7 @@ static int __exit printer_unbind(struct usb_composite_dev *cdev) return 0; } -static __refdata struct usb_composite_driver printer_driver = { +static struct usb_composite_driver printer_driver = { .name = shortname, .dev = &device_desc, .strings = dev_strings, diff --git a/drivers/usb/gadget/legacy/serial.c b/drivers/usb/gadget/legacy/serial.c index 1f5f978..8b7528f 100644 --- a/drivers/usb/gadget/legacy/serial.c +++ b/drivers/usb/gadget/legacy/serial.c @@ -174,7 +174,7 @@ out: return ret; } -static int __init gs_bind(struct usb_composite_dev *cdev) +static int gs_bind(struct usb_composite_dev *cdev) { int status; @@ -230,7 +230,7 @@ static int gs_unbind(struct usb_composite_dev *cdev) return 0; } -static __refdata struct usb_composite_driver gserial_driver = { +static struct usb_composite_driver gserial_driver = { .name = "g_serial", .dev = &device_desc, .strings = dev_strings, diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c index 8b80add..f9b4882 100644 --- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c +++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c @@ -2397,7 +2397,7 @@ static int usb_target_bind(struct usb_composite_dev *cdev) return 0; } -static __refdata struct usb_composite_driver usbg_driver = { +static struct usb_composite_driver usbg_driver = { .name = "g_target", .dev = &usbg_device_desc, .strings = usbg_strings, diff --git a/drivers/usb/gadget/legacy/webcam.c b/drivers/usb/gadget/legacy/webcam.c index 04a3da2..72c976b 100644 --- a/drivers/usb/gadget/legacy/webcam.c +++ b/drivers/usb/gadget/legacy/webcam.c @@ -334,7 +334,7 @@ static const struct uvc_descriptor_header * const uvc_ss_streaming_cls[] = { * USB configuration */ -static int __init +static int webcam_config_bind(struct usb_configuration *c) { int status = 0; @@ -358,7 +358,7 @@ static struct usb_configuration webcam_config_driver = { .MaxPower = CONFIG_USB_GADGET_VBUS_DRAW, }; -static int /* __init_or_exit */ +static int webcam_unbind(struct usb_composite_dev *cdev) { if (!IS_ERR_OR_NULL(f_uvc)) @@ -368,7 +368,7 @@ webcam_unbind(struct usb_composite_dev *cdev) return 0; } -static int __init +static int webcam_bind(struct usb_composite_dev *cdev) { struct f_uvc_opts *uvc_opts; @@ -422,7 +422,7 @@ error: * Driver */ -static __refdata struct usb_composite_driver webcam_driver = { +static struct usb_composite_driver webcam_driver = { .name = "g_webcam", .dev = &webcam_device_descriptor, .strings = webcam_device_strings, diff --git a/drivers/usb/gadget/legacy/zero.c b/drivers/usb/gadget/legacy/zero.c index 5ee9515..c986e8a 100644 --- a/drivers/usb/gadget/legacy/zero.c +++ b/drivers/usb/gadget/legacy/zero.c @@ -272,7 +272,7 @@ static struct usb_function_instance *func_inst_lb; module_param_named(qlen, gzero_options.qlen, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(qlen, "depth of loopback queue"); -static int __init zero_bind(struct usb_composite_dev *cdev) +static int zero_bind(struct usb_composite_dev *cdev) { struct f_ss_opts *ss_opts; struct f_lb_opts *lb_opts; @@ -400,7 +400,7 @@ static int zero_unbind(struct usb_composite_dev *cdev) return 0; } -static __refdata struct usb_composite_driver zero_driver = { +static struct usb_composite_driver zero_driver = { .name = "zero", .dev = &device_desc, .strings = dev_strings, diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c index 2fbedca..fc42264 100644 --- a/drivers/usb/gadget/udc/at91_udc.c +++ b/drivers/usb/gadget/udc/at91_udc.c @@ -1942,7 +1942,7 @@ err_unprepare_fclk: return retval; } -static int __exit at91udc_remove(struct platform_device *pdev) +static int at91udc_remove(struct platform_device *pdev) { struct at91_udc *udc = platform_get_drvdata(pdev); unsigned long flags; @@ -2018,7 +2018,7 @@ static int at91udc_resume(struct platform_device *pdev) #endif static struct platform_driver at91_udc_driver = { - .remove = __exit_p(at91udc_remove), + .remove = at91udc_remove, .shutdown = at91udc_shutdown, .suspend = at91udc_suspend, .resume = at91udc_resume, diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index 4c01953..351d485 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -2186,7 +2186,7 @@ static int usba_udc_probe(struct platform_device *pdev) return 0; } -static int __exit usba_udc_remove(struct platform_device *pdev) +static int usba_udc_remove(struct platform_device *pdev) { struct usba_udc *udc; int i; @@ -2258,7 +2258,7 @@ static int usba_udc_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(usba_udc_pm_ops, usba_udc_suspend, usba_udc_resume); static struct platform_driver udc_driver = { - .remove = __exit_p(usba_udc_remove), + .remove = usba_udc_remove, .driver = { .name = "atmel_usba_udc", .pm = &usba_udc_pm_ops, diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c index 55fcb93..c60022b 100644 --- a/drivers/usb/gadget/udc/fsl_udc_core.c +++ b/drivers/usb/gadget/udc/fsl_udc_core.c @@ -2525,7 +2525,7 @@ err_kfree: /* Driver removal function * Free resources and finish pending transactions */ -static int __exit fsl_udc_remove(struct platform_device *pdev) +static int fsl_udc_remove(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev); @@ -2663,7 +2663,7 @@ static const struct platform_device_id fsl_udc_devtype[] = { }; MODULE_DEVICE_TABLE(platform, fsl_udc_devtype); static struct platform_driver udc_driver = { - .remove = __exit_p(fsl_udc_remove), + .remove = fsl_udc_remove, /* Just for FSL i.mx SoC currently */ .id_table = fsl_udc_devtype, /* these suspend and resume are not usb suspend and resume */ diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c index fb4df15..3970f45 100644 --- a/drivers/usb/gadget/udc/fusb300_udc.c +++ b/drivers/usb/gadget/udc/fusb300_udc.c @@ -1342,7 +1342,7 @@ static const struct usb_gadget_ops fusb300_gadget_ops = { .udc_stop = fusb300_udc_stop, }; -static int __exit fusb300_remove(struct platform_device *pdev) +static int fusb300_remove(struct platform_device *pdev) { struct fusb300 *fusb300 = platform_get_drvdata(pdev); @@ -1492,7 +1492,7 @@ clean_up: } static struct platform_driver fusb300_driver = { - .remove = __exit_p(fusb300_remove), + .remove = fusb300_remove, .driver = { .name = (char *) udc_name, }, diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c index 8c7c83c..309706f 100644 --- a/drivers/usb/gadget/udc/m66592-udc.c +++ b/drivers/usb/gadget/udc/m66592-udc.c @@ -1528,7 +1528,7 @@ static const struct usb_gadget_ops m66592_gadget_ops = { .pullup = m66592_pullup, }; -static int __exit m66592_remove(struct platform_device *pdev) +static int m66592_remove(struct platform_device *pdev) { struct m66592 *m66592 = platform_get_drvdata(pdev); @@ -1695,7 +1695,7 @@ clean_up: /*-------------------------------------------------------------------------*/ static struct platform_driver m66592_driver = { - .remove = __exit_p(m66592_remove), + .remove = m66592_remove, .driver = { .name = (char *) udc_name, }, diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c index 2495fe9..0293f71 100644 --- a/drivers/usb/gadget/udc/r8a66597-udc.c +++ b/drivers/usb/gadget/udc/r8a66597-udc.c @@ -1820,7 +1820,7 @@ static const struct usb_gadget_ops r8a66597_gadget_ops = { .set_selfpowered = r8a66597_set_selfpowered, }; -static int __exit r8a66597_remove(struct platform_device *pdev) +static int r8a66597_remove(struct platform_device *pdev) { struct r8a66597 *r8a66597 = platform_get_drvdata(pdev); @@ -1974,7 +1974,7 @@ clean_up2: /*-------------------------------------------------------------------------*/ static struct platform_driver r8a66597_driver = { - .remove = __exit_p(r8a66597_remove), + .remove = r8a66597_remove, .driver = { .name = (char *) udc_name, }, diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c index dd3e9fd..1f24274 100644 --- a/drivers/usb/gadget/udc/udc-xilinx.c +++ b/drivers/usb/gadget/udc/udc-xilinx.c @@ -2071,8 +2071,8 @@ static int xudc_probe(struct platform_device *pdev) /* Map the registers */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); udc->addr = devm_ioremap_resource(&pdev->dev, res); - if (!udc->addr) - return -ENOMEM; + if (IS_ERR(udc->addr)) + return PTR_ERR(udc->addr); irq = platform_get_irq(pdev, 0); if (irq < 0) { diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c index 9db74ca..275c92e 100644 --- a/drivers/usb/host/ehci-msm.c +++ b/drivers/usb/host/ehci-msm.c @@ -88,13 +88,20 @@ static int ehci_msm_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hcd->regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(hcd->regs)) { - ret = PTR_ERR(hcd->regs); + if (!res) { + dev_err(&pdev->dev, "Unable to get memory resource\n"); + ret = -ENODEV; goto put_hcd; } + hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); + hcd->regs = devm_ioremap(&pdev->dev, hcd->rsrc_start, hcd->rsrc_len); + if (!hcd->regs) { + dev_err(&pdev->dev, "ioremap failed\n"); + ret = -ENOMEM; + goto put_hcd; + } /* * OTG driver takes care of PHY initialization, clock management, diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index f5397a5..7d34cbf 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2026,8 +2026,13 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, break; case COMP_DEV_ERR: case COMP_STALL: + frame->status = -EPROTO; + skip_td = true; + break; case COMP_TX_ERR: frame->status = -EPROTO; + if (event_trb != td->last_trb) + return 0; skip_td = true; break; case COMP_STOP: @@ -2640,7 +2645,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) xhci_halt(xhci); hw_died: spin_unlock(&xhci->lock); - return -ESHUTDOWN; + return IRQ_HANDLED; } /* diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 8e421b8..ea75e8c 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1267,7 +1267,7 @@ union xhci_trb { * since the command ring is 64-byte aligned. * It must also be greater than 16. */ -#define TRBS_PER_SEGMENT 64 +#define TRBS_PER_SEGMENT 256 /* Allow two commands + a link TRB, along with any reserved command TRBs */ #define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3) #define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16) diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c index 1e0e10d..3af263c 100644 --- a/drivers/usb/phy/phy-isp1301-omap.c +++ b/drivers/usb/phy/phy-isp1301-omap.c @@ -94,7 +94,7 @@ struct isp1301 { #if defined(CONFIG_MACH_OMAP_H2) || defined(CONFIG_MACH_OMAP_H3) -#if defined(CONFIG_TPS65010) || defined(CONFIG_TPS65010_MODULE) +#if defined(CONFIG_TPS65010) || (defined(CONFIG_TPS65010_MODULE) && defined(MODULE)) #include <linux/i2c/tps65010.h> diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 84ce2d7..9031750 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -127,6 +127,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ + { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 829604d..f5257af 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -61,7 +61,6 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(DCU10_VENDOR_ID, DCU10_PRODUCT_ID) }, { USB_DEVICE(SITECOM_VENDOR_ID, SITECOM_PRODUCT_ID) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_ID) }, - { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_ID) }, { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_SX1), .driver_info = PL2303_QUIRK_UART_STATE_IDX0 }, { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X65), diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 71fd9da..e3b7af8 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -62,10 +62,6 @@ #define ALCATEL_VENDOR_ID 0x11f7 #define ALCATEL_PRODUCT_ID 0x02df -/* Samsung I330 phone cradle */ -#define SAMSUNG_VENDOR_ID 0x04e8 -#define SAMSUNG_PRODUCT_ID 0x8001 - #define SIEMENS_VENDOR_ID 0x11f5 #define SIEMENS_PRODUCT_ID_SX1 0x0001 #define SIEMENS_PRODUCT_ID_X65 0x0003 diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c index bf2bd40..60afb39 100644 --- a/drivers/usb/serial/visor.c +++ b/drivers/usb/serial/visor.c @@ -95,7 +95,7 @@ static const struct usb_device_id id_table[] = { .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(ACER_VENDOR_ID, ACER_S10_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, - { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID), + { USB_DEVICE_INTERFACE_CLASS(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID, 0xff), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SPH_I500_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h index 9893d69..f58caa9 100644 --- a/drivers/usb/storage/uas-detect.h +++ b/drivers/usb/storage/uas-detect.h @@ -51,7 +51,8 @@ static int uas_find_endpoints(struct usb_host_interface *alt, } static int uas_use_uas_driver(struct usb_interface *intf, - const struct usb_device_id *id) + const struct usb_device_id *id, + unsigned long *flags_ret) { struct usb_host_endpoint *eps[4] = { }; struct usb_device *udev = interface_to_usbdev(intf); @@ -73,7 +74,7 @@ static int uas_use_uas_driver(struct usb_interface *intf, * this writing the following versions exist: * ASM1051 - no uas support version * ASM1051 - with broken (*) uas support - * ASM1053 - with working uas support + * ASM1053 - with working uas support, but problems with large xfers * ASM1153 - with working uas support * * Devices with these chips re-use a number of device-ids over the @@ -103,6 +104,9 @@ static int uas_use_uas_driver(struct usb_interface *intf, } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) { /* Possibly an ASM1051, disable uas */ flags |= US_FL_IGNORE_UAS; + } else { + /* ASM1053, these have issues with large transfers */ + flags |= US_FL_MAX_SECTORS_240; } } @@ -132,5 +136,8 @@ static int uas_use_uas_driver(struct usb_interface *intf, return 0; } + if (flags_ret) + *flags_ret = flags; + return 1; } diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 6cdabdc..6d3122a 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -759,7 +759,10 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd) static int uas_slave_alloc(struct scsi_device *sdev) { - sdev->hostdata = (void *)sdev->host->hostdata; + struct uas_dev_info *devinfo = + (struct uas_dev_info *)sdev->host->hostdata; + + sdev->hostdata = devinfo; /* USB has unusual DMA-alignment requirements: Although the * starting address of each scatter-gather element doesn't matter, @@ -778,6 +781,11 @@ static int uas_slave_alloc(struct scsi_device *sdev) */ blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1)); + if (devinfo->flags & US_FL_MAX_SECTORS_64) + blk_queue_max_hw_sectors(sdev->request_queue, 64); + else if (devinfo->flags & US_FL_MAX_SECTORS_240) + blk_queue_max_hw_sectors(sdev->request_queue, 240); + return 0; } @@ -887,8 +895,9 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id) struct Scsi_Host *shost = NULL; struct uas_dev_info *devinfo; struct usb_device *udev = interface_to_usbdev(intf); + unsigned long dev_flags; - if (!uas_use_uas_driver(intf, id)) + if (!uas_use_uas_driver(intf, id, &dev_flags)) return -ENODEV; if (uas_switch_interface(udev, intf)) @@ -910,8 +919,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id) devinfo->udev = udev; devinfo->resetting = 0; devinfo->shutdown = 0; - devinfo->flags = id->driver_info; - usb_stor_adjust_quirks(udev, &devinfo->flags); + devinfo->flags = dev_flags; init_usb_anchor(&devinfo->cmd_urbs); init_usb_anchor(&devinfo->sense_urbs); init_usb_anchor(&devinfo->data_urbs); diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index d684b4b..caf1888 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -766,6 +766,13 @@ UNUSUAL_DEV( 0x059f, 0x0643, 0x0000, 0x0000, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_GO_SLOW ), +/* Reported by Christian Schaller <cschalle@redhat.com> */ +UNUSUAL_DEV( 0x059f, 0x0651, 0x0000, 0x0000, + "LaCie", + "External HDD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_WP_DETECT ), + /* Submitted by Joel Bourquard <numlock@freesurf.ch> * Some versions of this device need the SubClass and Protocol overrides * while others don't. diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 5600c33..6c10c88 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -479,7 +479,8 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags) US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT | US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 | US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE | - US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES); + US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES | + US_FL_MAX_SECTORS_240); p = quirks; while (*p) { @@ -520,6 +521,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags) case 'f': f |= US_FL_NO_REPORT_OPCODES; break; + case 'g': + f |= US_FL_MAX_SECTORS_240; + break; case 'h': f |= US_FL_CAPACITY_HEURISTICS; break; @@ -1080,7 +1084,7 @@ static int storage_probe(struct usb_interface *intf, /* If uas is enabled and this device can do uas then ignore it. */ #if IS_ENABLED(CONFIG_USB_UAS) - if (uas_use_uas_driver(intf, id)) + if (uas_use_uas_driver(intf, id, NULL)) return -ENXIO; #endif diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 69fab0f..e9851ad 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -907,8 +907,14 @@ static void vfio_pci_request(void *device_data, unsigned int count) mutex_lock(&vdev->igate); if (vdev->req_trigger) { - dev_dbg(&vdev->pdev->dev, "Requesting device from user\n"); + if (!(count % 10)) + dev_notice_ratelimited(&vdev->pdev->dev, + "Relaying device request to user (#%u)\n", + count); eventfd_signal(vdev->req_trigger, 1); + } else if (count == 0) { + dev_warn(&vdev->pdev->dev, + "No device request channel registered, blocked until released by user\n"); } mutex_unlock(&vdev->igate); diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 0d33662..e1278fe 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -710,6 +710,8 @@ void *vfio_del_group_dev(struct device *dev) void *device_data = device->device_data; struct vfio_unbound_dev *unbound; unsigned int i = 0; + long ret; + bool interrupted = false; /* * The group exists so long as we have a device reference. Get @@ -755,9 +757,22 @@ void *vfio_del_group_dev(struct device *dev) vfio_device_put(device); - } while (wait_event_interruptible_timeout(vfio.release_q, - !vfio_dev_present(group, dev), - HZ * 10) <= 0); + if (interrupted) { + ret = wait_event_timeout(vfio.release_q, + !vfio_dev_present(group, dev), HZ * 10); + } else { + ret = wait_event_interruptible_timeout(vfio.release_q, + !vfio_dev_present(group, dev), HZ * 10); + if (ret == -ERESTARTSYS) { + interrupted = true; + dev_warn(dev, + "Device is currently in use, task" + " \"%s\" (%d) " + "blocked until device is released", + current->comm, task_pid_nr(current)); + } + } + } while (ret <= 0); vfio_group_put(group); diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c index 5db43fc..7dd4631 100644 --- a/drivers/xen/events/events_2l.c +++ b/drivers/xen/events/events_2l.c @@ -345,6 +345,15 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } +static void evtchn_2l_resume(void) +{ + int i; + + for_each_online_cpu(i) + memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) * + EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD); +} + static const struct evtchn_ops evtchn_ops_2l = { .max_channels = evtchn_2l_max_channels, .nr_channels = evtchn_2l_max_channels, @@ -356,6 +365,7 @@ static const struct evtchn_ops evtchn_ops_2l = { .mask = evtchn_2l_mask, .unmask = evtchn_2l_unmask, .handle_events = evtchn_2l_handle_events, + .resume = evtchn_2l_resume, }; void __init xen_evtchn_2l_init(void) diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 70fba97..2b8553b 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -529,8 +529,8 @@ static unsigned int __startup_pirq(unsigned int irq) if (rc) goto err; - bind_evtchn_to_cpu(evtchn, 0); info->evtchn = evtchn; + bind_evtchn_to_cpu(evtchn, 0); rc = xen_evtchn_port_setup(info); if (rc) @@ -1279,8 +1279,9 @@ void rebind_evtchn_irq(int evtchn, int irq) mutex_unlock(&irq_mapping_update_lock); - /* new event channels are always bound to cpu 0 */ - irq_set_affinity(irq, cpumask_of(0)); + bind_evtchn_to_cpu(evtchn, info->cpu); + /* This will be deferred until interrupt is processed */ + irq_set_affinity(irq, cpumask_of(info->cpu)); /* Unmask the event channel. */ enable_irq(irq); diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index d5bb1a3..8927485 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -327,30 +327,10 @@ static int map_grant_pages(struct grant_map *map) return err; } -struct unmap_grant_pages_callback_data -{ - struct completion completion; - int result; -}; - -static void unmap_grant_callback(int result, - struct gntab_unmap_queue_data *data) -{ - struct unmap_grant_pages_callback_data* d = data->data; - - d->result = result; - complete(&d->completion); -} - static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) { int i, err = 0; struct gntab_unmap_queue_data unmap_data; - struct unmap_grant_pages_callback_data data; - - init_completion(&data.completion); - unmap_data.data = &data; - unmap_data.done= &unmap_grant_callback; if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { int pgno = (map->notify.addr >> PAGE_SHIFT); @@ -367,11 +347,9 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) unmap_data.pages = map->pages + offset; unmap_data.count = pages; - gnttab_unmap_refs_async(&unmap_data); - - wait_for_completion(&data.completion); - if (data.result) - return data.result; + err = gnttab_unmap_refs_sync(&unmap_data); + if (err) + return err; for (i = 0; i < pages; i++) { if (map->unmap_ops[offset+i].status) diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 17972fb..b1c7170 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -123,6 +123,11 @@ struct gnttab_ops { int (*query_foreign_access)(grant_ref_t ref); }; +struct unmap_refs_callback_data { + struct completion completion; + int result; +}; + static struct gnttab_ops *gnttab_interface; static int grant_table_version; @@ -863,6 +868,29 @@ void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item) } EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async); +static void unmap_refs_callback(int result, + struct gntab_unmap_queue_data *data) +{ + struct unmap_refs_callback_data *d = data->data; + + d->result = result; + complete(&d->completion); +} + +int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item) +{ + struct unmap_refs_callback_data data; + + init_completion(&data.completion); + item->data = &data; + item->done = &unmap_refs_callback; + gnttab_unmap_refs_async(item); + wait_for_completion(&data.completion); + + return data.result; +} +EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync); + static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) { int rc; diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index bf19407..9e6a851 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -131,6 +131,8 @@ static void do_suspend(void) goto out_resume; } + xen_arch_suspend(); + si.cancelled = 1; err = stop_machine(xen_suspend, &si, cpumask_of(0)); @@ -148,11 +150,12 @@ static void do_suspend(void) si.cancelled = 1; } + xen_arch_resume(); + out_resume: - if (!si.cancelled) { - xen_arch_resume(); + if (!si.cancelled) xs_resume(); - } else + else xs_suspend_cancel(); dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE); diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 810ad41..4c54932 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -235,7 +235,7 @@ retry: #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { - xen_io_tlb_start = (void *)__get_free_pages(__GFP_NOWARN, order); + xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order); if (xen_io_tlb_start) break; order--; diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c index 75fe3d4..9c23420 100644 --- a/drivers/xen/xen-pciback/conf_space.c +++ b/drivers/xen/xen-pciback/conf_space.c @@ -16,8 +16,8 @@ #include "conf_space.h" #include "conf_space_quirks.h" -bool permissive; -module_param(permissive, bool, 0644); +bool xen_pcibk_permissive; +module_param_named(permissive, xen_pcibk_permissive, bool, 0644); /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, * xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */ @@ -262,7 +262,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value) * This means that some fields may still be read-only because * they have entries in the config_field list that intercept * the write and do nothing. */ - if (dev_data->permissive || permissive) { + if (dev_data->permissive || xen_pcibk_permissive) { switch (size) { case 1: err = pci_write_config_byte(dev, offset, diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h index 2e1d73d..62461a8 100644 --- a/drivers/xen/xen-pciback/conf_space.h +++ b/drivers/xen/xen-pciback/conf_space.h @@ -64,7 +64,7 @@ struct config_field_entry { void *data; }; -extern bool permissive; +extern bool xen_pcibk_permissive; #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c index c2260a0..ad3d17d 100644 --- a/drivers/xen/xen-pciback/conf_space_header.c +++ b/drivers/xen/xen-pciback/conf_space_header.c @@ -118,7 +118,7 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) cmd->val = value; - if (!permissive && (!dev_data || !dev_data->permissive)) + if (!xen_pcibk_permissive && (!dev_data || !dev_data->permissive)) return 0; /* Only allow the guest to control certain bits. */ diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 564b315..5390a67 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -57,6 +57,7 @@ #include <xen/xen.h> #include <xen/xenbus.h> #include <xen/events.h> +#include <xen/xen-ops.h> #include <xen/page.h> #include <xen/hvm.h> @@ -735,6 +736,30 @@ static int __init xenstored_local_init(void) return err; } +static int xenbus_resume_cb(struct notifier_block *nb, + unsigned long action, void *data) +{ + int err = 0; + + if (xen_hvm_domain()) { + uint64_t v; + + err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); + if (!err && v) + xen_store_evtchn = v; + else + pr_warn("Cannot update xenstore event channel: %d\n", + err); + } else + xen_store_evtchn = xen_start_info->store_evtchn; + + return err; +} + +static struct notifier_block xenbus_resume_nb = { + .notifier_call = xenbus_resume_cb, +}; + static int __init xenbus_init(void) { int err = 0; @@ -793,6 +818,10 @@ static int __init xenbus_init(void) goto out_error; } + if ((xen_store_domain_type != XS_LOCAL) && + (xen_store_domain_type != XS_UNKNOWN)) + xen_resume_notifier_register(&xenbus_resume_nb); + #ifdef CONFIG_XEN_COMPAT_XENFS /* * Create xenfs mountpoint in /proc for compatibility with diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index cde698a..a2ae427 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1802,6 +1802,8 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev) set_nlink(inode, btrfs_stack_inode_nlink(inode_item)); inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item)); BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item); + BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item); + inode->i_version = btrfs_stack_inode_sequence(inode_item); inode->i_rdev = 0; *rdev = btrfs_stack_inode_rdev(inode_item); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1eef4ee..7effed6 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3178,10 +3178,8 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, bi = btrfs_item_ptr_offset(leaf, path->slots[0]); write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); btrfs_mark_buffer_dirty(leaf); - btrfs_release_path(path); fail: - if (ret) - btrfs_abort_transaction(trans, root, ret); + btrfs_release_path(path); return ret; } @@ -3305,8 +3303,7 @@ again: spin_lock(&block_group->lock); if (block_group->cached != BTRFS_CACHE_FINISHED || - !btrfs_test_opt(root, SPACE_CACHE) || - block_group->delalloc_bytes) { + !btrfs_test_opt(root, SPACE_CACHE)) { /* * don't bother trying to write stuff out _if_ * a) we're not cached, @@ -3408,17 +3405,14 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans, int loops = 0; spin_lock(&cur_trans->dirty_bgs_lock); - if (!list_empty(&cur_trans->dirty_bgs)) { - list_splice_init(&cur_trans->dirty_bgs, &dirty); + if (list_empty(&cur_trans->dirty_bgs)) { + spin_unlock(&cur_trans->dirty_bgs_lock); + return 0; } + list_splice_init(&cur_trans->dirty_bgs, &dirty); spin_unlock(&cur_trans->dirty_bgs_lock); again: - if (list_empty(&dirty)) { - btrfs_free_path(path); - return 0; - } - /* * make sure all the block groups on our dirty list actually * exist @@ -3431,18 +3425,16 @@ again: return -ENOMEM; } + /* + * cache_write_mutex is here only to save us from balance or automatic + * removal of empty block groups deleting this block group while we are + * writing out the cache + */ + mutex_lock(&trans->transaction->cache_write_mutex); while (!list_empty(&dirty)) { cache = list_first_entry(&dirty, struct btrfs_block_group_cache, dirty_list); - - /* - * cache_write_mutex is here only to save us from balance - * deleting this block group while we are writing out the - * cache - */ - mutex_lock(&trans->transaction->cache_write_mutex); - /* * this can happen if something re-dirties a block * group that is already under IO. Just wait for it to @@ -3493,9 +3485,30 @@ again: ret = 0; } } - if (!ret) + if (!ret) { ret = write_one_cache_group(trans, root, path, cache); - mutex_unlock(&trans->transaction->cache_write_mutex); + /* + * Our block group might still be attached to the list + * of new block groups in the transaction handle of some + * other task (struct btrfs_trans_handle->new_bgs). This + * means its block group item isn't yet in the extent + * tree. If this happens ignore the error, as we will + * try again later in the critical section of the + * transaction commit. + */ + if (ret == -ENOENT) { + ret = 0; + spin_lock(&cur_trans->dirty_bgs_lock); + if (list_empty(&cache->dirty_list)) { + list_add_tail(&cache->dirty_list, + &cur_trans->dirty_bgs); + btrfs_get_block_group(cache); + } + spin_unlock(&cur_trans->dirty_bgs_lock); + } else if (ret) { + btrfs_abort_transaction(trans, root, ret); + } + } /* if its not on the io list, we need to put the block group */ if (should_put) @@ -3503,7 +3516,16 @@ again: if (ret) break; + + /* + * Avoid blocking other tasks for too long. It might even save + * us from writing caches for block groups that are going to be + * removed. + */ + mutex_unlock(&trans->transaction->cache_write_mutex); + mutex_lock(&trans->transaction->cache_write_mutex); } + mutex_unlock(&trans->transaction->cache_write_mutex); /* * go through delayed refs for all the stuff we've just kicked off @@ -3514,8 +3536,15 @@ again: loops++; spin_lock(&cur_trans->dirty_bgs_lock); list_splice_init(&cur_trans->dirty_bgs, &dirty); + /* + * dirty_bgs_lock protects us from concurrent block group + * deletes too (not just cache_write_mutex). + */ + if (!list_empty(&dirty)) { + spin_unlock(&cur_trans->dirty_bgs_lock); + goto again; + } spin_unlock(&cur_trans->dirty_bgs_lock); - goto again; } btrfs_free_path(path); @@ -3588,8 +3617,11 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, ret = 0; } } - if (!ret) + if (!ret) { ret = write_one_cache_group(trans, root, path, cache); + if (ret) + btrfs_abort_transaction(trans, root, ret); + } /* if its not on the io list, we need to put the block group */ if (should_put) @@ -7537,7 +7569,7 @@ static void unuse_block_rsv(struct btrfs_fs_info *fs_info, * returns the key for the extent through ins, and a tree buffer for * the first block of the extent through buf. * - * returns the tree buffer or NULL. + * returns the tree buffer or an ERR_PTR on error. */ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -7548,6 +7580,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, struct btrfs_key ins; struct btrfs_block_rsv *block_rsv; struct extent_buffer *buf; + struct btrfs_delayed_extent_op *extent_op; u64 flags = 0; int ret; u32 blocksize = root->nodesize; @@ -7568,13 +7601,14 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, ret = btrfs_reserve_extent(root, blocksize, blocksize, empty_size, hint, &ins, 0, 0); - if (ret) { - unuse_block_rsv(root->fs_info, block_rsv, blocksize); - return ERR_PTR(ret); - } + if (ret) + goto out_unuse; buf = btrfs_init_new_buffer(trans, root, ins.objectid, level); - BUG_ON(IS_ERR(buf)); /* -ENOMEM */ + if (IS_ERR(buf)) { + ret = PTR_ERR(buf); + goto out_free_reserved; + } if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { if (parent == 0) @@ -7584,9 +7618,11 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, BUG_ON(parent > 0); if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { - struct btrfs_delayed_extent_op *extent_op; extent_op = btrfs_alloc_delayed_extent_op(); - BUG_ON(!extent_op); /* -ENOMEM */ + if (!extent_op) { + ret = -ENOMEM; + goto out_free_buf; + } if (key) memcpy(&extent_op->key, key, sizeof(extent_op->key)); else @@ -7601,13 +7637,24 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, extent_op->level = level; ret = btrfs_add_delayed_tree_ref(root->fs_info, trans, - ins.objectid, - ins.offset, parent, root_objectid, - level, BTRFS_ADD_DELAYED_EXTENT, - extent_op, 0); - BUG_ON(ret); /* -ENOMEM */ + ins.objectid, ins.offset, + parent, root_objectid, level, + BTRFS_ADD_DELAYED_EXTENT, + extent_op, 0); + if (ret) + goto out_free_delayed; } return buf; + +out_free_delayed: + btrfs_free_delayed_extent_op(extent_op); +out_free_buf: + free_extent_buffer(buf); +out_free_reserved: + btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0); +out_unuse: + unuse_block_rsv(root->fs_info, block_rsv, blocksize); + return ERR_PTR(ret); } struct walk_control { diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 782f3bc..c32d226 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4560,36 +4560,37 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb) do { index--; page = eb->pages[index]; - if (page && mapped) { + if (!page) + continue; + if (mapped) spin_lock(&page->mapping->private_lock); + /* + * We do this since we'll remove the pages after we've + * removed the eb from the radix tree, so we could race + * and have this page now attached to the new eb. So + * only clear page_private if it's still connected to + * this eb. + */ + if (PagePrivate(page) && + page->private == (unsigned long)eb) { + BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); + BUG_ON(PageDirty(page)); + BUG_ON(PageWriteback(page)); /* - * We do this since we'll remove the pages after we've - * removed the eb from the radix tree, so we could race - * and have this page now attached to the new eb. So - * only clear page_private if it's still connected to - * this eb. + * We need to make sure we haven't be attached + * to a new eb. */ - if (PagePrivate(page) && - page->private == (unsigned long)eb) { - BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); - BUG_ON(PageDirty(page)); - BUG_ON(PageWriteback(page)); - /* - * We need to make sure we haven't be attached - * to a new eb. - */ - ClearPagePrivate(page); - set_page_private(page, 0); - /* One for the page private */ - page_cache_release(page); - } - spin_unlock(&page->mapping->private_lock); - - } - if (page) { - /* One for when we alloced the page */ + ClearPagePrivate(page); + set_page_private(page, 0); + /* One for the page private */ page_cache_release(page); } + + if (mapped) + spin_unlock(&page->mapping->private_lock); + + /* One for when we alloced the page */ + page_cache_release(page); } while (index != 0); } @@ -4771,6 +4772,25 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, start >> PAGE_CACHE_SHIFT); if (eb && atomic_inc_not_zero(&eb->refs)) { rcu_read_unlock(); + /* + * Lock our eb's refs_lock to avoid races with + * free_extent_buffer. When we get our eb it might be flagged + * with EXTENT_BUFFER_STALE and another task running + * free_extent_buffer might have seen that flag set, + * eb->refs == 2, that the buffer isn't under IO (dirty and + * writeback flags not set) and it's still in the tree (flag + * EXTENT_BUFFER_TREE_REF set), therefore being in the process + * of decrementing the extent buffer's reference count twice. + * So here we could race and increment the eb's reference count, + * clear its stale flag, mark it as dirty and drop our reference + * before the other task finishes executing free_extent_buffer, + * which would later result in an attempt to free an extent + * buffer that is dirty. + */ + if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) { + spin_lock(&eb->refs_lock); + spin_unlock(&eb->refs_lock); + } mark_extent_buffer_accessed(eb, NULL); return eb; } @@ -4870,6 +4890,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, mark_extent_buffer_accessed(exists, p); goto free_eb; } + exists = NULL; /* * Do this so attach doesn't complain and we need to @@ -4933,12 +4954,12 @@ again: return eb; free_eb: + WARN_ON(!atomic_dec_and_test(&eb->refs)); for (i = 0; i < num_pages; i++) { if (eb->pages[i]) unlock_page(eb->pages[i]); } - WARN_ON(!atomic_dec_and_test(&eb->refs)); btrfs_release_extent_buffer(eb); return exists; } diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 81fa75a..9dbe5b5 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -86,7 +86,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root, mapping_set_gfp_mask(inode->i_mapping, mapping_gfp_mask(inode->i_mapping) & - ~(GFP_NOFS & ~__GFP_HIGHMEM)); + ~(__GFP_FS | __GFP_HIGHMEM)); return inode; } @@ -1218,7 +1218,7 @@ out: * * This function writes out a free space cache struct to disk for quick recovery * on mount. This will return 0 if it was successfull in writing the cache out, - * and -1 if it was not. + * or an errno if it was not. */ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, struct btrfs_free_space_ctl *ctl, @@ -1235,12 +1235,12 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, int must_iput = 0; if (!i_size_read(inode)) - return -1; + return -EIO; WARN_ON(io_ctl->pages); ret = io_ctl_init(io_ctl, inode, root, 1); if (ret) - return -1; + return ret; if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) { down_write(&block_group->data_rwsem); @@ -1258,7 +1258,9 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, } /* Lock all pages first so we can lock the extent safely. */ - io_ctl_prepare_pages(io_ctl, inode, 0); + ret = io_ctl_prepare_pages(io_ctl, inode, 0); + if (ret) + goto out; lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, 0, &cached_state); @@ -3464,6 +3466,7 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root, struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; int ret; struct btrfs_io_ctl io_ctl; + bool release_metadata = true; if (!btrfs_test_opt(root, INODE_MAP_CACHE)) return 0; @@ -3471,11 +3474,20 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root, memset(&io_ctl, 0, sizeof(io_ctl)); ret = __btrfs_write_out_cache(root, inode, ctl, NULL, &io_ctl, trans, path, 0); - if (!ret) + if (!ret) { + /* + * At this point writepages() didn't error out, so our metadata + * reservation is released when the writeback finishes, at + * inode.c:btrfs_finish_ordered_io(), regardless of it finishing + * with or without an error. + */ + release_metadata = false; ret = btrfs_wait_cache_io(root, trans, NULL, &io_ctl, path, 0); + } if (ret) { - btrfs_delalloc_release_metadata(inode, inode->i_size); + if (release_metadata) + btrfs_delalloc_release_metadata(inode, inode->i_size); #ifdef DEBUG btrfs_err(root->fs_info, "failed to write free ino cache for root %llu", diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index ada4d24..8bb01367 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3632,25 +3632,28 @@ static void btrfs_read_locked_inode(struct inode *inode) BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); + inode->i_version = btrfs_inode_sequence(leaf, inode_item); + inode->i_generation = BTRFS_I(inode)->generation; + inode->i_rdev = 0; + rdev = btrfs_inode_rdev(leaf, inode_item); + + BTRFS_I(inode)->index_cnt = (u64)-1; + BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); + +cache_index: /* * If we were modified in the current generation and evicted from memory * and then re-read we need to do a full sync since we don't have any * idea about which extents were modified before we were evicted from * cache. + * + * This is required for both inode re-read from disk and delayed inode + * in delayed_nodes_tree. */ if (BTRFS_I(inode)->last_trans == root->fs_info->generation) set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); - inode->i_version = btrfs_inode_sequence(leaf, inode_item); - inode->i_generation = BTRFS_I(inode)->generation; - inode->i_rdev = 0; - rdev = btrfs_inode_rdev(leaf, inode_item); - - BTRFS_I(inode)->index_cnt = (u64)-1; - BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); - -cache_index: path->slots[0]++; if (inode->i_nlink != 1 || path->slots[0] >= btrfs_header_nritems(leaf)) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index b05653f..1c22c65 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2410,7 +2410,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, "Attempt to delete subvolume %llu during send", dest->root_key.objectid); err = -EPERM; - goto out_dput; + goto out_unlock_inode; } d_invalidate(dentry); @@ -2505,6 +2505,7 @@ out_up_write: root_flags & ~BTRFS_ROOT_SUBVOL_DEAD); spin_unlock(&dest->root_item_lock); } +out_unlock_inode: mutex_unlock(&inode->i_mutex); if (!err) { shrink_dcache_sb(root->fs_info->sb); diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 157cc54..760c4a5 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -722,6 +722,7 @@ void btrfs_start_ordered_extent(struct inode *inode, int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) { int ret = 0; + int ret_wb = 0; u64 end; u64 orig_end; struct btrfs_ordered_extent *ordered; @@ -741,9 +742,14 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) if (ret) return ret; - ret = filemap_fdatawait_range(inode->i_mapping, start, orig_end); - if (ret) - return ret; + /* + * If we have a writeback error don't return immediately. Wait first + * for any ordered extents that haven't completed yet. This is to make + * sure no one can dirty the same page ranges and call writepages() + * before the ordered extents complete - to avoid failures (-EEXIST) + * when adding the new ordered extents to the ordered tree. + */ + ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end); end = orig_end; while (1) { @@ -767,7 +773,7 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) break; end--; } - return ret; + return ret_wb ? ret_wb : ret; } /* diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 8bcd2a0..96aebf3 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1058,6 +1058,7 @@ static int contains_pending_extent(struct btrfs_trans_handle *trans, struct extent_map *em; struct list_head *search_list = &trans->transaction->pending_chunks; int ret = 0; + u64 physical_start = *start; again: list_for_each_entry(em, search_list, list) { @@ -1068,9 +1069,9 @@ again: for (i = 0; i < map->num_stripes; i++) { if (map->stripes[i].dev != device) continue; - if (map->stripes[i].physical >= *start + len || + if (map->stripes[i].physical >= physical_start + len || map->stripes[i].physical + em->orig_block_len <= - *start) + physical_start) continue; *start = map->stripes[i].physical + em->orig_block_len; @@ -1193,8 +1194,14 @@ again: */ if (contains_pending_extent(trans, device, &search_start, - hole_size)) - hole_size = 0; + hole_size)) { + if (key.offset >= search_start) { + hole_size = key.offset - search_start; + } else { + WARN_ON_ONCE(1); + hole_size = 0; + } + } if (hole_size > max_hole_size) { max_hole_start = search_start; diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c index da94e41..5373567 100644 --- a/fs/configfs/mount.c +++ b/fs/configfs/mount.c @@ -173,5 +173,5 @@ MODULE_LICENSE("GPL"); MODULE_VERSION("0.0.2"); MODULE_DESCRIPTION("Simple RAM filesystem for user driven kernel subsystem configuration."); -module_init(configfs_init); +core_initcall(configfs_init); module_exit(configfs_exit); diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c index 59fedbc..86a2121 100644 --- a/fs/efivarfs/super.c +++ b/fs/efivarfs/super.c @@ -121,7 +121,7 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, int len, i; int err = -ENOMEM; - entry = kmalloc(sizeof(*entry), GFP_KERNEL); + entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return err; @@ -659,6 +659,9 @@ int setup_arg_pages(struct linux_binprm *bprm, if (stack_base > STACK_SIZE_MAX) stack_base = STACK_SIZE_MAX; + /* Add space for stack randomization. */ + stack_base += (STACK_RND_MASK << PAGE_SHIFT); + /* Make sure we didn't let the argument array grow too large. */ if (vma->vm_end - vma->vm_start > stack_base) return -ENOMEM; diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index 18228c2..024f228 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig @@ -64,8 +64,8 @@ config EXT4_FS_SECURITY If you are not using a security module that requires using extended attributes for file security labels, say N. -config EXT4_FS_ENCRYPTION - bool "Ext4 Encryption" +config EXT4_ENCRYPTION + tristate "Ext4 Encryption" depends on EXT4_FS select CRYPTO_AES select CRYPTO_CBC @@ -81,6 +81,11 @@ config EXT4_FS_ENCRYPTION efficient since it avoids caching the encrypted and decrypted pages in the page cache. +config EXT4_FS_ENCRYPTION + bool + default y + depends on EXT4_ENCRYPTION + config EXT4_DEBUG bool "EXT4 debugging support" depends on EXT4_FS diff --git a/fs/ext4/crypto_fname.c b/fs/ext4/crypto_fname.c index ca2f594..fded02f 100644 --- a/fs/ext4/crypto_fname.c +++ b/fs/ext4/crypto_fname.c @@ -66,6 +66,7 @@ static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx, int res = 0; char iv[EXT4_CRYPTO_BLOCK_SIZE]; struct scatterlist sg[1]; + int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK); char *workbuf; if (iname->len <= 0 || iname->len > ctx->lim) @@ -73,6 +74,7 @@ static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx, ciphertext_len = (iname->len < EXT4_CRYPTO_BLOCK_SIZE) ? EXT4_CRYPTO_BLOCK_SIZE : iname->len; + ciphertext_len = ext4_fname_crypto_round_up(ciphertext_len, padding); ciphertext_len = (ciphertext_len > ctx->lim) ? ctx->lim : ciphertext_len; @@ -101,7 +103,7 @@ static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx, /* Create encryption request */ sg_init_table(sg, 1); sg_set_page(sg, ctx->workpage, PAGE_SIZE, 0); - ablkcipher_request_set_crypt(req, sg, sg, iname->len, iv); + ablkcipher_request_set_crypt(req, sg, sg, ciphertext_len, iv); res = crypto_ablkcipher_encrypt(req); if (res == -EINPROGRESS || res == -EBUSY) { BUG_ON(req->base.data != &ecr); @@ -198,106 +200,57 @@ static int ext4_fname_decrypt(struct ext4_fname_crypto_ctx *ctx, return oname->len; } +static const char *lookup_table = + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,"; + /** * ext4_fname_encode_digest() - * * Encodes the input digest using characters from the set [a-zA-Z0-9_+]. * The encoded string is roughly 4/3 times the size of the input string. */ -int ext4_fname_encode_digest(char *dst, char *src, u32 len) +static int digest_encode(const char *src, int len, char *dst) { - static const char *lookup_table = - "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_+"; - u32 current_chunk, num_chunks, i; - char tmp_buf[3]; - u32 c0, c1, c2, c3; - - current_chunk = 0; - num_chunks = len/3; - for (i = 0; i < num_chunks; i++) { - c0 = src[3*i] & 0x3f; - c1 = (((src[3*i]>>6)&0x3) | ((src[3*i+1] & 0xf)<<2)) & 0x3f; - c2 = (((src[3*i+1]>>4)&0xf) | ((src[3*i+2] & 0x3)<<4)) & 0x3f; - c3 = (src[3*i+2]>>2) & 0x3f; - dst[4*i] = lookup_table[c0]; - dst[4*i+1] = lookup_table[c1]; - dst[4*i+2] = lookup_table[c2]; - dst[4*i+3] = lookup_table[c3]; - } - if (i*3 < len) { - memset(tmp_buf, 0, 3); - memcpy(tmp_buf, &src[3*i], len-3*i); - c0 = tmp_buf[0] & 0x3f; - c1 = (((tmp_buf[0]>>6)&0x3) | ((tmp_buf[1] & 0xf)<<2)) & 0x3f; - c2 = (((tmp_buf[1]>>4)&0xf) | ((tmp_buf[2] & 0x3)<<4)) & 0x3f; - c3 = (tmp_buf[2]>>2) & 0x3f; - dst[4*i] = lookup_table[c0]; - dst[4*i+1] = lookup_table[c1]; - dst[4*i+2] = lookup_table[c2]; - dst[4*i+3] = lookup_table[c3]; + int i = 0, bits = 0, ac = 0; + char *cp = dst; + + while (i < len) { + ac += (((unsigned char) src[i]) << bits); + bits += 8; + do { + *cp++ = lookup_table[ac & 0x3f]; + ac >>= 6; + bits -= 6; + } while (bits >= 6); i++; } - return (i * 4); + if (bits) + *cp++ = lookup_table[ac & 0x3f]; + return cp - dst; } -/** - * ext4_fname_hash() - - * - * This function computes the hash of the input filename, and sets the output - * buffer to the *encoded* digest. It returns the length of the digest as its - * return value. Errors are returned as negative numbers. We trust the caller - * to allocate sufficient memory to oname string. - */ -static int ext4_fname_hash(struct ext4_fname_crypto_ctx *ctx, - const struct ext4_str *iname, - struct ext4_str *oname) +static int digest_decode(const char *src, int len, char *dst) { - struct scatterlist sg; - struct hash_desc desc = { - .tfm = (struct crypto_hash *)ctx->htfm, - .flags = CRYPTO_TFM_REQ_MAY_SLEEP - }; - int res = 0; - - if (iname->len <= EXT4_FNAME_CRYPTO_DIGEST_SIZE) { - res = ext4_fname_encode_digest(oname->name, iname->name, - iname->len); - oname->len = res; - return res; - } - - sg_init_one(&sg, iname->name, iname->len); - res = crypto_hash_init(&desc); - if (res) { - printk(KERN_ERR - "%s: Error initializing crypto hash; res = [%d]\n", - __func__, res); - goto out; - } - res = crypto_hash_update(&desc, &sg, iname->len); - if (res) { - printk(KERN_ERR - "%s: Error updating crypto hash; res = [%d]\n", - __func__, res); - goto out; - } - res = crypto_hash_final(&desc, - &oname->name[EXT4_FNAME_CRYPTO_DIGEST_SIZE]); - if (res) { - printk(KERN_ERR - "%s: Error finalizing crypto hash; res = [%d]\n", - __func__, res); - goto out; + int i = 0, bits = 0, ac = 0; + const char *p; + char *cp = dst; + + while (i < len) { + p = strchr(lookup_table, src[i]); + if (p == NULL || src[i] == 0) + return -2; + ac += (p - lookup_table) << bits; + bits += 6; + if (bits >= 8) { + *cp++ = ac & 0xff; + ac >>= 8; + bits -= 8; + } + i++; } - /* Encode the digest as a printable string--this will increase the - * size of the digest */ - oname->name[0] = 'I'; - res = ext4_fname_encode_digest(oname->name+1, - &oname->name[EXT4_FNAME_CRYPTO_DIGEST_SIZE], - EXT4_FNAME_CRYPTO_DIGEST_SIZE) + 1; - oname->len = res; -out: - return res; + if (ac) + return -1; + return cp - dst; } /** @@ -405,6 +358,7 @@ struct ext4_fname_crypto_ctx *ext4_get_fname_crypto_ctx( if (IS_ERR(ctx)) return ctx; + ctx->flags = ei->i_crypt_policy_flags; if (ctx->has_valid_key) { if (ctx->key.mode != EXT4_ENCRYPTION_MODE_AES_256_CTS) { printk_once(KERN_WARNING @@ -517,6 +471,7 @@ int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx, u32 namelen) { u32 ciphertext_len; + int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK); if (ctx == NULL) return -EIO; @@ -524,6 +479,7 @@ int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx, return -EACCES; ciphertext_len = (namelen < EXT4_CRYPTO_BLOCK_SIZE) ? EXT4_CRYPTO_BLOCK_SIZE : namelen; + ciphertext_len = ext4_fname_crypto_round_up(ciphertext_len, padding); ciphertext_len = (ciphertext_len > ctx->lim) ? ctx->lim : ciphertext_len; return (int) ciphertext_len; @@ -539,10 +495,13 @@ int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx, u32 ilen, struct ext4_str *crypto_str) { unsigned int olen; + int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK); if (!ctx) return -EIO; - olen = ext4_fname_crypto_round_up(ilen, EXT4_CRYPTO_BLOCK_SIZE); + if (padding < EXT4_CRYPTO_BLOCK_SIZE) + padding = EXT4_CRYPTO_BLOCK_SIZE; + olen = ext4_fname_crypto_round_up(ilen, padding); crypto_str->len = olen; if (olen < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2) olen = EXT4_FNAME_CRYPTO_DIGEST_SIZE*2; @@ -571,9 +530,13 @@ void ext4_fname_crypto_free_buffer(struct ext4_str *crypto_str) * ext4_fname_disk_to_usr() - converts a filename from disk space to user space */ int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, - const struct ext4_str *iname, - struct ext4_str *oname) + struct dx_hash_info *hinfo, + const struct ext4_str *iname, + struct ext4_str *oname) { + char buf[24]; + int ret; + if (ctx == NULL) return -EIO; if (iname->len < 3) { @@ -587,18 +550,33 @@ int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, } if (ctx->has_valid_key) return ext4_fname_decrypt(ctx, iname, oname); - else - return ext4_fname_hash(ctx, iname, oname); + + if (iname->len <= EXT4_FNAME_CRYPTO_DIGEST_SIZE) { + ret = digest_encode(iname->name, iname->len, oname->name); + oname->len = ret; + return ret; + } + if (hinfo) { + memcpy(buf, &hinfo->hash, 4); + memcpy(buf+4, &hinfo->minor_hash, 4); + } else + memset(buf, 0, 8); + memcpy(buf + 8, iname->name + iname->len - 16, 16); + oname->name[0] = '_'; + ret = digest_encode(buf, 24, oname->name+1); + oname->len = ret + 1; + return ret + 1; } int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, + struct dx_hash_info *hinfo, const struct ext4_dir_entry_2 *de, struct ext4_str *oname) { struct ext4_str iname = {.name = (unsigned char *) de->name, .len = de->name_len }; - return _ext4_fname_disk_to_usr(ctx, &iname, oname); + return _ext4_fname_disk_to_usr(ctx, hinfo, &iname, oname); } @@ -640,10 +618,11 @@ int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx, const struct qstr *iname, struct dx_hash_info *hinfo) { - struct ext4_str tmp, tmp2; + struct ext4_str tmp; int ret = 0; + char buf[EXT4_FNAME_CRYPTO_DIGEST_SIZE+1]; - if (!ctx || !ctx->has_valid_key || + if (!ctx || ((iname->name[0] == '.') && ((iname->len == 1) || ((iname->name[1] == '.') && (iname->len == 2))))) { @@ -651,59 +630,90 @@ int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx, return 0; } + if (!ctx->has_valid_key && iname->name[0] == '_') { + if (iname->len != 33) + return -ENOENT; + ret = digest_decode(iname->name+1, iname->len, buf); + if (ret != 24) + return -ENOENT; + memcpy(&hinfo->hash, buf, 4); + memcpy(&hinfo->minor_hash, buf + 4, 4); + return 0; + } + + if (!ctx->has_valid_key && iname->name[0] != '_') { + if (iname->len > 43) + return -ENOENT; + ret = digest_decode(iname->name, iname->len, buf); + ext4fs_dirhash(buf, ret, hinfo); + return 0; + } + /* First encrypt the plaintext name */ ret = ext4_fname_crypto_alloc_buffer(ctx, iname->len, &tmp); if (ret < 0) return ret; ret = ext4_fname_encrypt(ctx, iname, &tmp); - if (ret < 0) - goto out; - - tmp2.len = (4 * ((EXT4_FNAME_CRYPTO_DIGEST_SIZE + 2) / 3)) + 1; - tmp2.name = kmalloc(tmp2.len + 1, GFP_KERNEL); - if (tmp2.name == NULL) { - ret = -ENOMEM; - goto out; + if (ret >= 0) { + ext4fs_dirhash(tmp.name, tmp.len, hinfo); + ret = 0; } - ret = ext4_fname_hash(ctx, &tmp, &tmp2); - if (ret > 0) - ext4fs_dirhash(tmp2.name, tmp2.len, hinfo); - ext4_fname_crypto_free_buffer(&tmp2); -out: ext4_fname_crypto_free_buffer(&tmp); return ret; } -/** - * ext4_fname_disk_to_htree() - converts a filename from disk space to htree-access string - */ -int ext4_fname_disk_to_hash(struct ext4_fname_crypto_ctx *ctx, - const struct ext4_dir_entry_2 *de, - struct dx_hash_info *hinfo) +int ext4_fname_match(struct ext4_fname_crypto_ctx *ctx, struct ext4_str *cstr, + int len, const char * const name, + struct ext4_dir_entry_2 *de) { - struct ext4_str iname = {.name = (unsigned char *) de->name, - .len = de->name_len}; - struct ext4_str tmp; - int ret; + int ret = -ENOENT; + int bigname = (*name == '_'); - if (!ctx || - ((iname.name[0] == '.') && - ((iname.len == 1) || - ((iname.name[1] == '.') && (iname.len == 2))))) { - ext4fs_dirhash(iname.name, iname.len, hinfo); - return 0; + if (ctx->has_valid_key) { + if (cstr->name == NULL) { + struct qstr istr; + + ret = ext4_fname_crypto_alloc_buffer(ctx, len, cstr); + if (ret < 0) + goto errout; + istr.name = name; + istr.len = len; + ret = ext4_fname_encrypt(ctx, &istr, cstr); + if (ret < 0) + goto errout; + } + } else { + if (cstr->name == NULL) { + cstr->name = kmalloc(32, GFP_KERNEL); + if (cstr->name == NULL) + return -ENOMEM; + if ((bigname && (len != 33)) || + (!bigname && (len > 43))) + goto errout; + ret = digest_decode(name+bigname, len-bigname, + cstr->name); + if (ret < 0) { + ret = -ENOENT; + goto errout; + } + cstr->len = ret; + } + if (bigname) { + if (de->name_len < 16) + return 0; + ret = memcmp(de->name + de->name_len - 16, + cstr->name + 8, 16); + return (ret == 0) ? 1 : 0; + } } - - tmp.len = (4 * ((EXT4_FNAME_CRYPTO_DIGEST_SIZE + 2) / 3)) + 1; - tmp.name = kmalloc(tmp.len + 1, GFP_KERNEL); - if (tmp.name == NULL) - return -ENOMEM; - - ret = ext4_fname_hash(ctx, &iname, &tmp); - if (ret > 0) - ext4fs_dirhash(tmp.name, tmp.len, hinfo); - ext4_fname_crypto_free_buffer(&tmp); + if (de->name_len != cstr->len) + return 0; + ret = memcmp(de->name, cstr->name, cstr->len); + return (ret == 0) ? 1 : 0; +errout: + kfree(cstr->name); + cstr->name = NULL; return ret; } diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c index c8392af..52170d0 100644 --- a/fs/ext4/crypto_key.c +++ b/fs/ext4/crypto_key.c @@ -110,6 +110,7 @@ int ext4_generate_encryption_key(struct inode *inode) } res = 0; + ei->i_crypt_policy_flags = ctx.flags; if (S_ISREG(inode->i_mode)) crypt_key->mode = ctx.contents_encryption_mode; else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) diff --git a/fs/ext4/crypto_policy.c b/fs/ext4/crypto_policy.c index 30eaf9e..a6d6291 100644 --- a/fs/ext4/crypto_policy.c +++ b/fs/ext4/crypto_policy.c @@ -37,6 +37,8 @@ static int ext4_is_encryption_context_consistent_with_policy( return 0; return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor, EXT4_KEY_DESCRIPTOR_SIZE) == 0 && + (ctx.flags == + policy->flags) && (ctx.contents_encryption_mode == policy->contents_encryption_mode) && (ctx.filenames_encryption_mode == @@ -56,25 +58,25 @@ static int ext4_create_encryption_context_from_policy( printk(KERN_WARNING "%s: Invalid contents encryption mode %d\n", __func__, policy->contents_encryption_mode); - res = -EINVAL; - goto out; + return -EINVAL; } if (!ext4_valid_filenames_enc_mode(policy->filenames_encryption_mode)) { printk(KERN_WARNING "%s: Invalid filenames encryption mode %d\n", __func__, policy->filenames_encryption_mode); - res = -EINVAL; - goto out; + return -EINVAL; } + if (policy->flags & ~EXT4_POLICY_FLAGS_VALID) + return -EINVAL; ctx.contents_encryption_mode = policy->contents_encryption_mode; ctx.filenames_encryption_mode = policy->filenames_encryption_mode; + ctx.flags = policy->flags; BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE); get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE); res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION, EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, sizeof(ctx), 0); -out: if (!res) ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT); return res; @@ -115,6 +117,7 @@ int ext4_get_policy(struct inode *inode, struct ext4_encryption_policy *policy) policy->version = 0; policy->contents_encryption_mode = ctx.contents_encryption_mode; policy->filenames_encryption_mode = ctx.filenames_encryption_mode; + policy->flags = ctx.flags; memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor, EXT4_KEY_DESCRIPTOR_SIZE); return 0; @@ -176,6 +179,7 @@ int ext4_inherit_context(struct inode *parent, struct inode *child) EXT4_ENCRYPTION_MODE_AES_256_XTS; ctx.filenames_encryption_mode = EXT4_ENCRYPTION_MODE_AES_256_CTS; + ctx.flags = 0; memset(ctx.master_key_descriptor, 0x42, EXT4_KEY_DESCRIPTOR_SIZE); res = 0; diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index 61db51a5..5665d82 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -249,7 +249,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx) } else { /* Directory is encrypted */ err = ext4_fname_disk_to_usr(enc_ctx, - de, &fname_crypto_str); + NULL, de, &fname_crypto_str); if (err < 0) goto errout; if (!dir_emit(ctx, diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index ef267ad..9a83f14 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -911,6 +911,7 @@ struct ext4_inode_info { /* on-disk additional length */ __u16 i_extra_isize; + char i_crypt_policy_flags; /* Indicate the inline data space. */ u16 i_inline_off; @@ -1066,12 +1067,6 @@ extern void ext4_set_bits(void *bm, int cur, int len); /* Metadata checksum algorithm codes */ #define EXT4_CRC32C_CHKSUM 1 -/* Encryption algorithms */ -#define EXT4_ENCRYPTION_MODE_INVALID 0 -#define EXT4_ENCRYPTION_MODE_AES_256_XTS 1 -#define EXT4_ENCRYPTION_MODE_AES_256_GCM 2 -#define EXT4_ENCRYPTION_MODE_AES_256_CBC 3 - /* * Structure of the super block */ @@ -2093,9 +2088,11 @@ u32 ext4_fname_crypto_round_up(u32 size, u32 blksize); int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx, u32 ilen, struct ext4_str *crypto_str); int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, + struct dx_hash_info *hinfo, const struct ext4_str *iname, struct ext4_str *oname); int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, + struct dx_hash_info *hinfo, const struct ext4_dir_entry_2 *de, struct ext4_str *oname); int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx, @@ -2104,11 +2101,12 @@ int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx, int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx, const struct qstr *iname, struct dx_hash_info *hinfo); -int ext4_fname_disk_to_hash(struct ext4_fname_crypto_ctx *ctx, - const struct ext4_dir_entry_2 *de, - struct dx_hash_info *hinfo); int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx, u32 namelen); +int ext4_fname_match(struct ext4_fname_crypto_ctx *ctx, struct ext4_str *cstr, + int len, const char * const name, + struct ext4_dir_entry_2 *de); + #ifdef CONFIG_EXT4_FS_ENCRYPTION void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx); @@ -2891,7 +2889,6 @@ extern int ext4_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags); extern int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblocks); -extern int ext4_extent_tree_init(handle_t *, struct inode *); extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int num, struct ext4_ext_path *path); diff --git a/fs/ext4/ext4_crypto.h b/fs/ext4/ext4_crypto.h index c2ba35a..d75159c 100644 --- a/fs/ext4/ext4_crypto.h +++ b/fs/ext4/ext4_crypto.h @@ -20,12 +20,20 @@ struct ext4_encryption_policy { char version; char contents_encryption_mode; char filenames_encryption_mode; + char flags; char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE]; } __attribute__((__packed__)); #define EXT4_ENCRYPTION_CONTEXT_FORMAT_V1 1 #define EXT4_KEY_DERIVATION_NONCE_SIZE 16 +#define EXT4_POLICY_FLAGS_PAD_4 0x00 +#define EXT4_POLICY_FLAGS_PAD_8 0x01 +#define EXT4_POLICY_FLAGS_PAD_16 0x02 +#define EXT4_POLICY_FLAGS_PAD_32 0x03 +#define EXT4_POLICY_FLAGS_PAD_MASK 0x03 +#define EXT4_POLICY_FLAGS_VALID 0x03 + /** * Encryption context for inode * @@ -41,7 +49,7 @@ struct ext4_encryption_context { char format; char contents_encryption_mode; char filenames_encryption_mode; - char reserved; + char flags; char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE]; char nonce[EXT4_KEY_DERIVATION_NONCE_SIZE]; } __attribute__((__packed__)); @@ -120,6 +128,7 @@ struct ext4_fname_crypto_ctx { struct crypto_hash *htfm; struct page *workpage; struct ext4_encryption_key key; + unsigned flags : 8; unsigned has_valid_key : 1; unsigned ctfm_key_is_ready : 1; }; diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index 3445035..d418431 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c @@ -87,6 +87,12 @@ int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle) ext4_put_nojournal(handle); return 0; } + + if (!handle->h_transaction) { + err = jbd2_journal_stop(handle); + return handle->h_err ? handle->h_err : err; + } + sb = handle->h_transaction->t_journal->j_private; err = handle->h_err; rc = jbd2_journal_stop(handle); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 973816b..e003a1e 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -377,7 +377,7 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); ext4_lblk_t last = lblock + len - 1; - if (lblock > last) + if (len == 0 || lblock > last) return 0; return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); } @@ -4927,13 +4927,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) if (ret) return ret; - /* - * currently supporting (pre)allocate mode for extent-based - * files _only_ - */ - if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) - return -EOPNOTSUPP; - if (mode & FALLOC_FL_COLLAPSE_RANGE) return ext4_collapse_range(inode, offset, len); @@ -4955,6 +4948,14 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) mutex_lock(&inode->i_mutex); + /* + * We only support preallocation for extent-based files only + */ + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { + ret = -EOPNOTSUPP; + goto out; + } + if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > i_size_read(inode)) { new_size = offset + len; @@ -5395,6 +5396,14 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) loff_t new_size, ioffset; int ret; + /* + * We need to test this early because xfstests assumes that a + * collapse range of (0, 1) will return EOPNOTSUPP if the file + * system does not support collapse range. + */ + if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) + return -EOPNOTSUPP; + /* Collapse range works only on fs block size aligned offsets. */ if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) || len & (EXT4_CLUSTER_SIZE(sb) - 1)) diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index d33d5a6..26724ae 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -703,6 +703,14 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, BUG_ON(end < lblk); + if ((status & EXTENT_STATUS_DELAYED) && + (status & EXTENT_STATUS_WRITTEN)) { + ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as " + " delayed and written which can potentially " + " cause data loss.\n", lblk, len); + WARN_ON(1); + } + newes.es_lblk = lblk; newes.es_len = len; ext4_es_store_pblock_status(&newes, pblk, status); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index cbd0654..0554b0b 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -531,6 +531,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, status = map->m_flags & EXT4_MAP_UNWRITTEN ? EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && + !(status & EXTENT_STATUS_WRITTEN) && ext4_find_delalloc_range(inode, map->m_lblk, map->m_lblk + map->m_len - 1)) status |= EXTENT_STATUS_DELAYED; @@ -635,6 +636,7 @@ found: status = map->m_flags & EXT4_MAP_UNWRITTEN ? EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && + !(status & EXTENT_STATUS_WRITTEN) && ext4_find_delalloc_range(inode, map->m_lblk, map->m_lblk + map->m_len - 1)) status |= EXTENT_STATUS_DELAYED; @@ -4343,7 +4345,7 @@ static void ext4_update_other_inodes_time(struct super_block *sb, int inode_size = EXT4_INODE_SIZE(sb); oi.orig_ino = orig_ino; - ino = orig_ino & ~(inodes_per_block - 1); + ino = (orig_ino & ~(inodes_per_block - 1)) + 1; for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) { if (ino == orig_ino) continue; diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 7223b0b..814f3be 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -640,7 +640,7 @@ static struct stats dx_show_leaf(struct inode *dir, ext4_put_fname_crypto_ctx(&ctx); ctx = NULL; } - res = ext4_fname_disk_to_usr(ctx, de, + res = ext4_fname_disk_to_usr(ctx, NULL, de, &fname_crypto_str); if (res < 0) { printk(KERN_WARNING "Error " @@ -653,15 +653,8 @@ static struct stats dx_show_leaf(struct inode *dir, name = fname_crypto_str.name; len = fname_crypto_str.len; } - res = ext4_fname_disk_to_hash(ctx, de, - &h); - if (res < 0) { - printk(KERN_WARNING "Error " - "converting filename " - "from disk to htree" - "\n"); - h.hash = 0xDEADBEEF; - } + ext4fs_dirhash(de->name, de->name_len, + &h); printk("%*.s:(E)%x.%u ", len, name, h.hash, (unsigned) ((char *) de - base)); @@ -1008,15 +1001,7 @@ static int htree_dirblock_to_tree(struct file *dir_file, /* silently ignore the rest of the block */ break; } -#ifdef CONFIG_EXT4_FS_ENCRYPTION - err = ext4_fname_disk_to_hash(ctx, de, hinfo); - if (err < 0) { - count = err; - goto errout; - } -#else ext4fs_dirhash(de->name, de->name_len, hinfo); -#endif if ((hinfo->hash < start_hash) || ((hinfo->hash == start_hash) && (hinfo->minor_hash < start_minor_hash))) @@ -1032,7 +1017,7 @@ static int htree_dirblock_to_tree(struct file *dir_file, &tmp_str); } else { /* Directory is encrypted */ - err = ext4_fname_disk_to_usr(ctx, de, + err = ext4_fname_disk_to_usr(ctx, hinfo, de, &fname_crypto_str); if (err < 0) { count = err; @@ -1193,26 +1178,10 @@ static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de, int count = 0; char *base = (char *) de; struct dx_hash_info h = *hinfo; -#ifdef CONFIG_EXT4_FS_ENCRYPTION - struct ext4_fname_crypto_ctx *ctx = NULL; - int err; - - ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); -#endif while ((char *) de < base + blocksize) { if (de->name_len && de->inode) { -#ifdef CONFIG_EXT4_FS_ENCRYPTION - err = ext4_fname_disk_to_hash(ctx, de, &h); - if (err < 0) { - ext4_put_fname_crypto_ctx(&ctx); - return err; - } -#else ext4fs_dirhash(de->name, de->name_len, &h); -#endif map_tail--; map_tail->hash = h.hash; map_tail->offs = ((char *) de - base)>>2; @@ -1223,9 +1192,6 @@ static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de, /* XXX: do we need to check rec_len == 0 case? -Chris */ de = ext4_next_entry(de, blocksize); } -#ifdef CONFIG_EXT4_FS_ENCRYPTION - ext4_put_fname_crypto_ctx(&ctx); -#endif return count; } @@ -1287,16 +1253,8 @@ static inline int ext4_match(struct ext4_fname_crypto_ctx *ctx, return 0; #ifdef CONFIG_EXT4_FS_ENCRYPTION - if (ctx) { - /* Directory is encrypted */ - res = ext4_fname_disk_to_usr(ctx, de, fname_crypto_str); - if (res < 0) - return res; - if (len != res) - return 0; - res = memcmp(name, fname_crypto_str->name, len); - return (res == 0) ? 1 : 0; - } + if (ctx) + return ext4_fname_match(ctx, fname_crypto_str, len, name, de); #endif if (len != de->name_len) return 0; @@ -1324,16 +1282,6 @@ int search_dir(struct buffer_head *bh, char *search_buf, int buf_size, if (IS_ERR(ctx)) return -1; - if (ctx != NULL) { - /* Allocate buffer to hold maximum name length */ - res = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN, - &fname_crypto_str); - if (res < 0) { - ext4_put_fname_crypto_ctx(&ctx); - return -1; - } - } - de = (struct ext4_dir_entry_2 *)search_buf; dlimit = search_buf + buf_size; while ((char *) de < dlimit) { @@ -1872,14 +1820,6 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode, return res; } reclen = EXT4_DIR_REC_LEN(res); - - /* Allocate buffer to hold maximum name length */ - res = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN, - &fname_crypto_str); - if (res < 0) { - ext4_put_fname_crypto_ctx(&ctx); - return -1; - } } de = (struct ext4_dir_entry_2 *)buf; diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 8a8ec62..cf0c472 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -1432,12 +1432,15 @@ static int ext4_flex_group_add(struct super_block *sb, goto exit; /* * We will always be modifying at least the superblock and GDT - * block. If we are adding a group past the last current GDT block, + * blocks. If we are adding a group past the last current GDT block, * we will also modify the inode and the dindirect block. If we * are adding a group with superblock/GDT backups we will also * modify each of the reserved GDT dindirect blocks. */ - credit = flex_gd->count * 4 + reserved_gdb; + credit = 3; /* sb, resize inode, resize inode dindirect */ + /* GDT blocks */ + credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb)); + credit += reserved_gdb; /* Reserved GDT dindirect blocks */ handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit); if (IS_ERR(handle)) { err = PTR_ERR(handle); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index f06d058..ca9d4a2 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -294,6 +294,8 @@ static void __save_error_info(struct super_block *sb, const char *func, struct ext4_super_block *es = EXT4_SB(sb)->s_es; EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; + if (bdev_read_only(sb->s_bdev)) + return; es->s_state |= cpu_to_le16(EXT4_ERROR_FS); es->s_last_error_time = cpu_to_le32(get_seconds()); strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func)); diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c index 19f78f2..187b789 100644 --- a/fs/ext4/symlink.c +++ b/fs/ext4/symlink.c @@ -74,7 +74,7 @@ static void *ext4_follow_link(struct dentry *dentry, struct nameidata *nd) goto errout; } pstr.name = paddr; - res = _ext4_fname_disk_to_usr(ctx, &cstr, &pstr); + res = _ext4_fname_disk_to_usr(ctx, NULL, &cstr, &pstr); if (res < 0) goto errout; /* Null-terminate the name */ diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index b91b0e1..1e1aae6 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -1513,6 +1513,7 @@ static int f2fs_write_data_pages(struct address_space *mapping, { struct inode *inode = mapping->host; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + bool locked = false; int ret; long diff; @@ -1533,7 +1534,13 @@ static int f2fs_write_data_pages(struct address_space *mapping, diff = nr_pages_to_write(sbi, DATA, wbc); + if (!S_ISDIR(inode->i_mode)) { + mutex_lock(&sbi->writepages); + locked = true; + } ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); + if (locked) + mutex_unlock(&sbi->writepages); f2fs_submit_merged_bio(sbi, DATA, WRITE); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index d8921cf..8de34ab 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -625,6 +625,7 @@ struct f2fs_sb_info { struct mutex cp_mutex; /* checkpoint procedure lock */ struct rw_semaphore cp_rwsem; /* blocking FS operations */ struct rw_semaphore node_write; /* locking node writes */ + struct mutex writepages; /* mutex for writepages() */ wait_queue_head_t cp_wait; struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 7e3794e..658e807 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -298,16 +298,14 @@ fail: static void *f2fs_follow_link(struct dentry *dentry, struct nameidata *nd) { - struct page *page; + struct page *page = page_follow_link_light(dentry, nd); - page = page_follow_link_light(dentry, nd); - if (IS_ERR(page)) + if (IS_ERR_OR_NULL(page)) return page; /* this is broken symlink case */ if (*nd_get_link(nd) == 0) { - kunmap(page); - page_cache_release(page); + page_put_link(dentry, nd, page); return ERR_PTR(-ENOENT); } return page; diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 160b883..b2dd1b0 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -1035,6 +1035,7 @@ try_onemore: sbi->raw_super = raw_super; sbi->raw_super_buf = raw_super_buf; mutex_init(&sbi->gc_mutex); + mutex_init(&sbi->writepages); mutex_init(&sbi->cp_mutex); init_rwsem(&sbi->node_write); clear_sbi_flag(sbi, SBI_POR_DOING); diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index ef26317..07d8d8f 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -581,7 +581,7 @@ static int hostfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, if (name == NULL) goto out_put; - fd = file_create(name, mode & S_IFMT); + fd = file_create(name, mode & 0777); if (fd < 0) error = fd; else diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index b5128c6..a9079d0 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c @@ -842,15 +842,23 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh, { jbd2_journal_revoke_header_t *header; int offset, max; + int csum_size = 0; + __u32 rcount; int record_len = 4; header = (jbd2_journal_revoke_header_t *) bh->b_data; offset = sizeof(jbd2_journal_revoke_header_t); - max = be32_to_cpu(header->r_count); + rcount = be32_to_cpu(header->r_count); if (!jbd2_revoke_block_csum_verify(journal, header)) return -EINVAL; + if (jbd2_journal_has_csum_v2or3(journal)) + csum_size = sizeof(struct jbd2_journal_revoke_tail); + if (rcount > journal->j_blocksize - csum_size) + return -EINVAL; + max = rcount; + if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) record_len = 8; diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c index c6cbaef..14214da 100644 --- a/fs/jbd2/revoke.c +++ b/fs/jbd2/revoke.c @@ -577,7 +577,7 @@ static void write_one_revoke_record(journal_t *journal, { int csum_size = 0; struct buffer_head *descriptor; - int offset; + int sz, offset; journal_header_t *header; /* If we are already aborting, this all becomes a noop. We @@ -594,9 +594,14 @@ static void write_one_revoke_record(journal_t *journal, if (jbd2_journal_has_csum_v2or3(journal)) csum_size = sizeof(struct jbd2_journal_revoke_tail); + if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) + sz = 8; + else + sz = 4; + /* Make sure we have a descriptor with space left for the record */ if (descriptor) { - if (offset >= journal->j_blocksize - csum_size) { + if (offset + sz > journal->j_blocksize - csum_size) { flush_descriptor(journal, descriptor, offset, write_op); descriptor = NULL; } @@ -619,16 +624,13 @@ static void write_one_revoke_record(journal_t *journal, *descriptorp = descriptor; } - if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) { + if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) * ((__be64 *)(&descriptor->b_data[offset])) = cpu_to_be64(record->blocknr); - offset += 8; - - } else { + else * ((__be32 *)(&descriptor->b_data[offset])) = cpu_to_be32(record->blocknr); - offset += 4; - } + offset += sz; *offsetp = offset; } diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 5f09370..ff2f2e6 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -551,7 +551,6 @@ int jbd2_journal_extend(handle_t *handle, int nblocks) int result; int wanted; - WARN_ON(!transaction); if (is_handle_aborted(handle)) return -EROFS; journal = transaction->t_journal; @@ -627,7 +626,6 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask) tid_t tid; int need_to_start, ret; - WARN_ON(!transaction); /* If we've had an abort of any type, don't even think about * actually doing the restart! */ if (is_handle_aborted(handle)) @@ -785,7 +783,6 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, int need_copy = 0; unsigned long start_lock, time_lock; - WARN_ON(!transaction); if (is_handle_aborted(handle)) return -EROFS; journal = transaction->t_journal; @@ -1051,7 +1048,6 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh) int err; jbd_debug(5, "journal_head %p\n", jh); - WARN_ON(!transaction); err = -EROFS; if (is_handle_aborted(handle)) goto out; @@ -1266,7 +1262,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) struct journal_head *jh; int ret = 0; - WARN_ON(!transaction); if (is_handle_aborted(handle)) return -EROFS; journal = transaction->t_journal; @@ -1397,7 +1392,6 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh) int err = 0; int was_modified = 0; - WARN_ON(!transaction); if (is_handle_aborted(handle)) return -EROFS; journal = transaction->t_journal; @@ -1530,8 +1524,22 @@ int jbd2_journal_stop(handle_t *handle) tid_t tid; pid_t pid; - if (!transaction) - goto free_and_exit; + if (!transaction) { + /* + * Handle is already detached from the transaction so + * there is nothing to do other than decrease a refcount, + * or free the handle if refcount drops to zero + */ + if (--handle->h_ref > 0) { + jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1, + handle->h_ref); + return err; + } else { + if (handle->h_rsv_handle) + jbd2_free_handle(handle->h_rsv_handle); + goto free_and_exit; + } + } journal = transaction->t_journal; J_ASSERT(journal_current_handle() == handle); @@ -2373,7 +2381,6 @@ int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode) transaction_t *transaction = handle->h_transaction; journal_t *journal; - WARN_ON(!transaction); if (is_handle_aborted(handle)) return -EROFS; journal = transaction->t_journal; diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index f131fc2..fffca95 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -518,7 +518,14 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, if (!kn) goto err_out1; - ret = ida_simple_get(&root->ino_ida, 1, 0, GFP_KERNEL); + /* + * If the ino of the sysfs entry created for a kmem cache gets + * allocated from an ida layer, which is accounted to the memcg that + * owns the cache, the memcg will get pinned forever. So do not account + * ino ida allocations. + */ + ret = ida_simple_get(&root->ino_ida, 1, 0, + GFP_KERNEL | __GFP_NOACCOUNT); if (ret < 0) goto err_out2; kn->ino = ret; @@ -1415,6 +1415,7 @@ static int lookup_fast(struct nameidata *nd, */ if (nd->flags & LOOKUP_RCU) { unsigned seq; + bool negative; dentry = __d_lookup_rcu(parent, &nd->last, &seq); if (!dentry) goto unlazy; @@ -1424,8 +1425,11 @@ static int lookup_fast(struct nameidata *nd, * the dentry name information from lookup. */ *inode = dentry->d_inode; + negative = d_is_negative(dentry); if (read_seqcount_retry(&dentry->d_seq, seq)) return -ECHILD; + if (negative) + return -ENOENT; /* * This sequence count validates that the parent had no @@ -1472,6 +1476,10 @@ unlazy: goto need_lookup; } + if (unlikely(d_is_negative(dentry))) { + dput(dentry); + return -ENOENT; + } path->mnt = mnt; path->dentry = dentry; err = follow_managed(path, nd->flags); @@ -1583,10 +1591,10 @@ static inline int walk_component(struct nameidata *nd, struct path *path, goto out_err; inode = path->dentry->d_inode; + err = -ENOENT; + if (d_is_negative(path->dentry)) + goto out_path_put; } - err = -ENOENT; - if (d_is_negative(path->dentry)) - goto out_path_put; if (should_follow_link(path->dentry, follow)) { if (nd->flags & LOOKUP_RCU) { @@ -3036,14 +3044,13 @@ retry_lookup: BUG_ON(nd->flags & LOOKUP_RCU); inode = path->dentry->d_inode; -finish_lookup: - /* we _can_ be in RCU mode here */ error = -ENOENT; if (d_is_negative(path->dentry)) { path_to_nameidata(path, nd); goto out; } - +finish_lookup: + /* we _can_ be in RCU mode here */ if (should_follow_link(path->dentry, !symlink_ok)) { if (nd->flags & LOOKUP_RCU) { if (unlikely(nd->path.mnt != path->mnt || @@ -3226,7 +3233,7 @@ static struct file *path_openat(int dfd, struct filename *pathname, if (unlikely(file->f_flags & __O_TMPFILE)) { error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened); - goto out; + goto out2; } error = path_init(dfd, pathname, flags, nd); @@ -3256,6 +3263,7 @@ static struct file *path_openat(int dfd, struct filename *pathname, } out: path_cleanup(nd); +out2: if (!(opened & FILE_OPENED)) { BUG_ON(!error); put_filp(file); diff --git a/fs/namespace.c b/fs/namespace.c index 1f4f9da..1b9e111 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -3179,6 +3179,12 @@ bool fs_fully_visible(struct file_system_type *type) if (mnt->mnt.mnt_sb->s_type != type) continue; + /* This mount is not fully visible if it's root directory + * is not the root directory of the filesystem. + */ + if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root) + continue; + /* This mount is not fully visible if there are any child mounts * that cover anything except for empty directories. */ diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c index 03d647b..cdefaa3 100644 --- a/fs/nfsd/blocklayout.c +++ b/fs/nfsd/blocklayout.c @@ -181,6 +181,17 @@ nfsd4_block_proc_layoutcommit(struct inode *inode, } const struct nfsd4_layout_ops bl_layout_ops = { + /* + * Pretend that we send notification to the client. This is a blatant + * lie to force recent Linux clients to cache our device IDs. + * We rarely ever change the device ID, so the harm of leaking deviceids + * for a while isn't too bad. Unfortunately RFC5661 is a complete mess + * in this regard, but I filed errata 4119 for this a while ago, and + * hopefully the Linux client will eventually start caching deviceids + * without this again. + */ + .notify_types = + NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE, .proc_getdeviceinfo = nfsd4_block_proc_getdeviceinfo, .encode_getdeviceinfo = nfsd4_block_encode_getdeviceinfo, .proc_layoutget = nfsd4_block_proc_layoutget, diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 5827785..5694cfb 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -224,7 +224,7 @@ static int nfs_cb_stat_to_errno(int status) } static int decode_cb_op_status(struct xdr_stream *xdr, enum nfs_opnum4 expected, - enum nfsstat4 *status) + int *status) { __be32 *p; u32 op; @@ -235,7 +235,7 @@ static int decode_cb_op_status(struct xdr_stream *xdr, enum nfs_opnum4 expected, op = be32_to_cpup(p++); if (unlikely(op != expected)) goto out_unexpected; - *status = be32_to_cpup(p); + *status = nfs_cb_stat_to_errno(be32_to_cpup(p)); return 0; out_overflow: print_overflow_msg(__func__, xdr); @@ -446,22 +446,16 @@ out_overflow: static int decode_cb_sequence4res(struct xdr_stream *xdr, struct nfsd4_callback *cb) { - enum nfsstat4 nfserr; int status; if (cb->cb_minorversion == 0) return 0; - status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &nfserr); - if (unlikely(status)) - goto out; - if (unlikely(nfserr != NFS4_OK)) - goto out_default; - status = decode_cb_sequence4resok(xdr, cb); -out: - return status; -out_default: - return nfs_cb_stat_to_errno(nfserr); + status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_status); + if (unlikely(status || cb->cb_status)) + return status; + + return decode_cb_sequence4resok(xdr, cb); } /* @@ -524,26 +518,19 @@ static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, struct nfsd4_callback *cb) { struct nfs4_cb_compound_hdr hdr; - enum nfsstat4 nfserr; int status; status = decode_cb_compound4res(xdr, &hdr); if (unlikely(status)) - goto out; + return status; if (cb != NULL) { status = decode_cb_sequence4res(xdr, cb); - if (unlikely(status)) - goto out; + if (unlikely(status || cb->cb_status)) + return status; } - status = decode_cb_op_status(xdr, OP_CB_RECALL, &nfserr); - if (unlikely(status)) - goto out; - if (unlikely(nfserr != NFS4_OK)) - status = nfs_cb_stat_to_errno(nfserr); -out: - return status; + return decode_cb_op_status(xdr, OP_CB_RECALL, &cb->cb_status); } #ifdef CONFIG_NFSD_PNFS @@ -621,24 +608,18 @@ static int nfs4_xdr_dec_cb_layout(struct rpc_rqst *rqstp, struct nfsd4_callback *cb) { struct nfs4_cb_compound_hdr hdr; - enum nfsstat4 nfserr; int status; status = decode_cb_compound4res(xdr, &hdr); if (unlikely(status)) - goto out; + return status; + if (cb) { status = decode_cb_sequence4res(xdr, cb); - if (unlikely(status)) - goto out; + if (unlikely(status || cb->cb_status)) + return status; } - status = decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &nfserr); - if (unlikely(status)) - goto out; - if (unlikely(nfserr != NFS4_OK)) - status = nfs_cb_stat_to_errno(nfserr); -out: - return status; + return decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &cb->cb_status); } #endif /* CONFIG_NFSD_PNFS */ @@ -898,13 +879,6 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata) if (!nfsd41_cb_get_slot(clp, task)) return; } - spin_lock(&clp->cl_lock); - if (list_empty(&cb->cb_per_client)) { - /* This is the first call, not a restart */ - cb->cb_done = false; - list_add(&cb->cb_per_client, &clp->cl_callbacks); - } - spin_unlock(&clp->cl_lock); rpc_call_start(task); } @@ -918,22 +892,33 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata) if (clp->cl_minorversion) { /* No need for lock, access serialized in nfsd4_cb_prepare */ - ++clp->cl_cb_session->se_cb_seq_nr; + if (!task->tk_status) + ++clp->cl_cb_session->se_cb_seq_nr; clear_bit(0, &clp->cl_cb_slot_busy); rpc_wake_up_next(&clp->cl_cb_waitq); dprintk("%s: freed slot, new seqid=%d\n", __func__, clp->cl_cb_session->se_cb_seq_nr); } - if (clp->cl_cb_client != task->tk_client) { - /* We're shutting down or changing cl_cb_client; leave - * it to nfsd4_process_cb_update to restart the call if - * necessary. */ + /* + * If the backchannel connection was shut down while this + * task was queued, we need to resubmit it after setting up + * a new backchannel connection. + * + * Note that if we lost our callback connection permanently + * the submission code will error out, so we don't need to + * handle that case here. + */ + if (task->tk_flags & RPC_TASK_KILLED) { + task->tk_status = 0; + cb->cb_need_restart = true; return; } - if (cb->cb_done) - return; + if (cb->cb_status) { + WARN_ON_ONCE(task->tk_status); + task->tk_status = cb->cb_status; + } switch (cb->cb_ops->done(cb, task)) { case 0: @@ -949,21 +934,17 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata) default: BUG(); } - cb->cb_done = true; } static void nfsd4_cb_release(void *calldata) { struct nfsd4_callback *cb = calldata; - struct nfs4_client *clp = cb->cb_clp; - - if (cb->cb_done) { - spin_lock(&clp->cl_lock); - list_del(&cb->cb_per_client); - spin_unlock(&clp->cl_lock); + if (cb->cb_need_restart) + nfsd4_run_cb(cb); + else cb->cb_ops->release(cb); - } + } static const struct rpc_call_ops nfsd4_cb_ops = { @@ -1058,9 +1039,6 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb) nfsd4_mark_cb_down(clp, err); return; } - /* Yay, the callback channel's back! Restart any callbacks: */ - list_for_each_entry(cb, &clp->cl_callbacks, cb_per_client) - queue_work(callback_wq, &cb->cb_work); } static void @@ -1071,8 +1049,12 @@ nfsd4_run_cb_work(struct work_struct *work) struct nfs4_client *clp = cb->cb_clp; struct rpc_clnt *clnt; - if (cb->cb_ops && cb->cb_ops->prepare) - cb->cb_ops->prepare(cb); + if (cb->cb_need_restart) { + cb->cb_need_restart = false; + } else { + if (cb->cb_ops && cb->cb_ops->prepare) + cb->cb_ops->prepare(cb); + } if (clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK) nfsd4_process_cb_update(cb); @@ -1084,6 +1066,15 @@ nfsd4_run_cb_work(struct work_struct *work) cb->cb_ops->release(cb); return; } + + /* + * Don't send probe messages for 4.1 or later. + */ + if (!cb->cb_ops && clp->cl_minorversion) { + clp->cl_cb_state = NFSD4_CB_UP; + return; + } + cb->cb_msg.rpc_cred = clp->cl_cb_cred; rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN, cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb); @@ -1098,8 +1089,8 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp, cb->cb_msg.rpc_resp = cb; cb->cb_ops = ops; INIT_WORK(&cb->cb_work, nfsd4_run_cb_work); - INIT_LIST_HEAD(&cb->cb_per_client); - cb->cb_done = true; + cb->cb_status = 0; + cb->cb_need_restart = false; } void nfsd4_run_cb(struct nfsd4_callback *cb) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 38f2d7a..039f9c8a 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -94,6 +94,7 @@ static struct kmem_cache *lockowner_slab; static struct kmem_cache *file_slab; static struct kmem_cache *stateid_slab; static struct kmem_cache *deleg_slab; +static struct kmem_cache *odstate_slab; static void free_session(struct nfsd4_session *); @@ -281,6 +282,7 @@ put_nfs4_file(struct nfs4_file *fi) if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) { hlist_del_rcu(&fi->fi_hash); spin_unlock(&state_lock); + WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate)); WARN_ON_ONCE(!list_empty(&fi->fi_delegations)); call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu); } @@ -471,6 +473,86 @@ static void nfs4_file_put_access(struct nfs4_file *fp, u32 access) __nfs4_file_put_access(fp, O_RDONLY); } +/* + * Allocate a new open/delegation state counter. This is needed for + * pNFS for proper return on close semantics. + * + * Note that we only allocate it for pNFS-enabled exports, otherwise + * all pointers to struct nfs4_clnt_odstate are always NULL. + */ +static struct nfs4_clnt_odstate * +alloc_clnt_odstate(struct nfs4_client *clp) +{ + struct nfs4_clnt_odstate *co; + + co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL); + if (co) { + co->co_client = clp; + atomic_set(&co->co_odcount, 1); + } + return co; +} + +static void +hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co) +{ + struct nfs4_file *fp = co->co_file; + + lockdep_assert_held(&fp->fi_lock); + list_add(&co->co_perfile, &fp->fi_clnt_odstate); +} + +static inline void +get_clnt_odstate(struct nfs4_clnt_odstate *co) +{ + if (co) + atomic_inc(&co->co_odcount); +} + +static void +put_clnt_odstate(struct nfs4_clnt_odstate *co) +{ + struct nfs4_file *fp; + + if (!co) + return; + + fp = co->co_file; + if (atomic_dec_and_lock(&co->co_odcount, &fp->fi_lock)) { + list_del(&co->co_perfile); + spin_unlock(&fp->fi_lock); + + nfsd4_return_all_file_layouts(co->co_client, fp); + kmem_cache_free(odstate_slab, co); + } +} + +static struct nfs4_clnt_odstate * +find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new) +{ + struct nfs4_clnt_odstate *co; + struct nfs4_client *cl; + + if (!new) + return NULL; + + cl = new->co_client; + + spin_lock(&fp->fi_lock); + list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) { + if (co->co_client == cl) { + get_clnt_odstate(co); + goto out; + } + } + co = new; + co->co_file = fp; + hash_clnt_odstate_locked(new); +out: + spin_unlock(&fp->fi_lock); + return co; +} + struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab) { @@ -606,7 +688,8 @@ static void block_delegations(struct knfsd_fh *fh) } static struct nfs4_delegation * -alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh) +alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh, + struct nfs4_clnt_odstate *odstate) { struct nfs4_delegation *dp; long n; @@ -631,6 +714,8 @@ alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh) INIT_LIST_HEAD(&dp->dl_perfile); INIT_LIST_HEAD(&dp->dl_perclnt); INIT_LIST_HEAD(&dp->dl_recall_lru); + dp->dl_clnt_odstate = odstate; + get_clnt_odstate(odstate); dp->dl_type = NFS4_OPEN_DELEGATE_READ; dp->dl_retries = 1; nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client, @@ -714,6 +799,7 @@ static void destroy_delegation(struct nfs4_delegation *dp) spin_lock(&state_lock); unhash_delegation_locked(dp); spin_unlock(&state_lock); + put_clnt_odstate(dp->dl_clnt_odstate); nfs4_put_deleg_lease(dp->dl_stid.sc_file); nfs4_put_stid(&dp->dl_stid); } @@ -724,6 +810,7 @@ static void revoke_delegation(struct nfs4_delegation *dp) WARN_ON(!list_empty(&dp->dl_recall_lru)); + put_clnt_odstate(dp->dl_clnt_odstate); nfs4_put_deleg_lease(dp->dl_stid.sc_file); if (clp->cl_minorversion == 0) @@ -933,6 +1020,7 @@ static void nfs4_free_ol_stateid(struct nfs4_stid *stid) { struct nfs4_ol_stateid *stp = openlockstateid(stid); + put_clnt_odstate(stp->st_clnt_odstate); release_all_access(stp); if (stp->st_stateowner) nfs4_put_stateowner(stp->st_stateowner); @@ -1538,7 +1626,6 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name) INIT_LIST_HEAD(&clp->cl_openowners); INIT_LIST_HEAD(&clp->cl_delegations); INIT_LIST_HEAD(&clp->cl_lru); - INIT_LIST_HEAD(&clp->cl_callbacks); INIT_LIST_HEAD(&clp->cl_revoked); #ifdef CONFIG_NFSD_PNFS INIT_LIST_HEAD(&clp->cl_lo_states); @@ -1634,6 +1721,7 @@ __destroy_client(struct nfs4_client *clp) while (!list_empty(&reaplist)) { dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); list_del_init(&dp->dl_recall_lru); + put_clnt_odstate(dp->dl_clnt_odstate); nfs4_put_deleg_lease(dp->dl_stid.sc_file); nfs4_put_stid(&dp->dl_stid); } @@ -3057,6 +3145,7 @@ static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval, spin_lock_init(&fp->fi_lock); INIT_LIST_HEAD(&fp->fi_stateids); INIT_LIST_HEAD(&fp->fi_delegations); + INIT_LIST_HEAD(&fp->fi_clnt_odstate); fh_copy_shallow(&fp->fi_fhandle, fh); fp->fi_deleg_file = NULL; fp->fi_had_conflict = false; @@ -3073,6 +3162,7 @@ static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval, void nfsd4_free_slabs(void) { + kmem_cache_destroy(odstate_slab); kmem_cache_destroy(openowner_slab); kmem_cache_destroy(lockowner_slab); kmem_cache_destroy(file_slab); @@ -3103,8 +3193,14 @@ nfsd4_init_slabs(void) sizeof(struct nfs4_delegation), 0, 0, NULL); if (deleg_slab == NULL) goto out_free_stateid_slab; + odstate_slab = kmem_cache_create("nfsd4_odstate", + sizeof(struct nfs4_clnt_odstate), 0, 0, NULL); + if (odstate_slab == NULL) + goto out_free_deleg_slab; return 0; +out_free_deleg_slab: + kmem_cache_destroy(deleg_slab); out_free_stateid_slab: kmem_cache_destroy(stateid_slab); out_free_file_slab: @@ -3581,6 +3677,14 @@ alloc_stateid: open->op_stp = nfs4_alloc_open_stateid(clp); if (!open->op_stp) return nfserr_jukebox; + + if (nfsd4_has_session(cstate) && + (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) { + open->op_odstate = alloc_clnt_odstate(clp); + if (!open->op_odstate) + return nfserr_jukebox; + } + return nfs_ok; } @@ -3869,7 +3973,7 @@ out_fput: static struct nfs4_delegation * nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, - struct nfs4_file *fp) + struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate) { int status; struct nfs4_delegation *dp; @@ -3877,7 +3981,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, if (fp->fi_had_conflict) return ERR_PTR(-EAGAIN); - dp = alloc_init_deleg(clp, fh); + dp = alloc_init_deleg(clp, fh, odstate); if (!dp) return ERR_PTR(-ENOMEM); @@ -3903,6 +4007,7 @@ out_unlock: spin_unlock(&state_lock); out: if (status) { + put_clnt_odstate(dp->dl_clnt_odstate); nfs4_put_stid(&dp->dl_stid); return ERR_PTR(status); } @@ -3980,7 +4085,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, default: goto out_no_deleg; } - dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file); + dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate); if (IS_ERR(dp)) goto out_no_deleg; @@ -4069,6 +4174,11 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf release_open_stateid(stp); goto out; } + + stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp, + open->op_odstate); + if (stp->st_clnt_odstate == open->op_odstate) + open->op_odstate = NULL; } update_stateid(&stp->st_stid.sc_stateid); memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); @@ -4129,6 +4239,8 @@ void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate, kmem_cache_free(file_slab, open->op_file); if (open->op_stp) nfs4_put_stid(&open->op_stp->st_stid); + if (open->op_odstate) + kmem_cache_free(odstate_slab, open->op_odstate); } __be32 @@ -4385,10 +4497,17 @@ static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_s return nfserr_old_stateid; } +static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols) +{ + if (ols->st_stateowner->so_is_open_owner && + !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) + return nfserr_bad_stateid; + return nfs_ok; +} + static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) { struct nfs4_stid *s; - struct nfs4_ol_stateid *ols; __be32 status = nfserr_bad_stateid; if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) @@ -4418,13 +4537,7 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) break; case NFS4_OPEN_STID: case NFS4_LOCK_STID: - ols = openlockstateid(s); - if (ols->st_stateowner->so_is_open_owner - && !(openowner(ols->st_stateowner)->oo_flags - & NFS4_OO_CONFIRMED)) - status = nfserr_bad_stateid; - else - status = nfs_ok; + status = nfsd4_check_openowner_confirmed(openlockstateid(s)); break; default: printk("unknown stateid type %x\n", s->sc_type); @@ -4516,8 +4629,8 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate, status = nfs4_check_fh(current_fh, stp); if (status) goto out; - if (stp->st_stateowner->so_is_open_owner - && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) + status = nfsd4_check_openowner_confirmed(stp); + if (status) goto out; status = nfs4_check_openmode(stp, flags); if (status) @@ -4852,9 +4965,6 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, update_stateid(&stp->st_stid.sc_stateid); memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); - nfsd4_return_all_file_layouts(stp->st_stateowner->so_client, - stp->st_stid.sc_file); - nfsd4_close_open_stateid(stp); /* put reference from nfs4_preprocess_seqid_op */ @@ -6488,6 +6598,7 @@ nfs4_state_shutdown_net(struct net *net) list_for_each_safe(pos, next, &reaplist) { dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); list_del_init(&dp->dl_recall_lru); + put_clnt_odstate(dp->dl_clnt_odstate); nfs4_put_deleg_lease(dp->dl_stid.sc_file); nfs4_put_stid(&dp->dl_stid); } diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 4f3bfeb..dbc4f85 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -63,12 +63,12 @@ typedef struct { struct nfsd4_callback { struct nfs4_client *cb_clp; - struct list_head cb_per_client; u32 cb_minorversion; struct rpc_message cb_msg; struct nfsd4_callback_ops *cb_ops; struct work_struct cb_work; - bool cb_done; + int cb_status; + bool cb_need_restart; }; struct nfsd4_callback_ops { @@ -126,6 +126,7 @@ struct nfs4_delegation { struct list_head dl_perfile; struct list_head dl_perclnt; struct list_head dl_recall_lru; /* delegation recalled */ + struct nfs4_clnt_odstate *dl_clnt_odstate; u32 dl_type; time_t dl_time; /* For recall: */ @@ -332,7 +333,6 @@ struct nfs4_client { int cl_cb_state; struct nfsd4_callback cl_cb_null; struct nfsd4_session *cl_cb_session; - struct list_head cl_callbacks; /* list of in-progress callbacks */ /* for all client information that callback code might need: */ spinlock_t cl_lock; @@ -465,6 +465,17 @@ static inline struct nfs4_lockowner * lockowner(struct nfs4_stateowner *so) } /* + * Per-client state indicating no. of opens and outstanding delegations + * on a file from a particular client.'od' stands for 'open & delegation' + */ +struct nfs4_clnt_odstate { + struct nfs4_client *co_client; + struct nfs4_file *co_file; + struct list_head co_perfile; + atomic_t co_odcount; +}; + +/* * nfs4_file: a file opened by some number of (open) nfs4_stateowners. * * These objects are global. nfsd keeps one instance of a nfs4_file per @@ -485,6 +496,7 @@ struct nfs4_file { struct list_head fi_delegations; struct rcu_head fi_rcu; }; + struct list_head fi_clnt_odstate; /* One each for O_RDONLY, O_WRONLY, O_RDWR: */ struct file * fi_fds[3]; /* @@ -526,6 +538,7 @@ struct nfs4_ol_stateid { struct list_head st_perstateowner; struct list_head st_locks; struct nfs4_stateowner * st_stateowner; + struct nfs4_clnt_odstate * st_clnt_odstate; unsigned char st_access_bmap; unsigned char st_deny_bmap; struct nfs4_ol_stateid * st_openstp; diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h index f982ae8..2f8c092 100644 --- a/fs/nfsd/xdr4.h +++ b/fs/nfsd/xdr4.h @@ -247,6 +247,7 @@ struct nfsd4_open { struct nfs4_openowner *op_openowner; /* used during processing */ struct nfs4_file *op_file; /* used during processing */ struct nfs4_ol_stateid *op_stp; /* used during processing */ + struct nfs4_clnt_odstate *op_odstate; /* used during processing */ struct nfs4_acl *op_acl; struct xdr_netobj op_label; }; diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 059f371..919fd5b 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -388,7 +388,7 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node, nchildren = nilfs_btree_node_get_nchildren(node); if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN || - level > NILFS_BTREE_LEVEL_MAX || + level >= NILFS_BTREE_LEVEL_MAX || nchildren < 0 || nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) { pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n", diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index a6944b2..fdf4b41 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -757,6 +757,19 @@ lookup: if (tmpres) { spin_unlock(&dlm->spinlock); spin_lock(&tmpres->spinlock); + + /* + * Right after dlm spinlock was released, dlm_thread could have + * purged the lockres. Check if lockres got unhashed. If so + * start over. + */ + if (hlist_unhashed(&tmpres->hash_node)) { + spin_unlock(&tmpres->spinlock); + dlm_lockres_put(tmpres); + tmpres = NULL; + goto lookup; + } + /* Wait on the thread that is mastering the resource */ if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { __dlm_wait_on_lockres(tmpres); diff --git a/fs/splice.c b/fs/splice.c index 476024b..bfe62ae 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -1161,7 +1161,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, long ret, bytes; umode_t i_mode; size_t len; - int i, flags; + int i, flags, more; /* * We require the input being a regular file, as we don't want to @@ -1204,6 +1204,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, * Don't block on output, we have to drain the direct pipe. */ sd->flags &= ~SPLICE_F_NONBLOCK; + more = sd->flags & SPLICE_F_MORE; while (len) { size_t read_len; @@ -1217,6 +1218,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, sd->total_len = read_len; /* + * If more data is pending, set SPLICE_F_MORE + * If this is the last data and SPLICE_F_MORE was not set + * initially, clears it. + */ + if (read_len < len) + sd->flags |= SPLICE_F_MORE; + else if (!more) + sd->flags &= ~SPLICE_F_MORE; + /* * NOTE: nonblocking mode only applies to the input. We * must not do the output in nonblocking mode as then we * could get stuck data in the internal pipe: diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index f5ca0e9..1c3002e 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -124,7 +124,6 @@ #ifndef ACPI_USE_SYSTEM_INTTYPES typedef unsigned char u8; -typedef unsigned char u8; typedef unsigned short u16; typedef short s16; typedef COMPILER_DEPENDENT_UINT64 u64; diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 2dd405c..45c39a3 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -186,6 +186,7 @@ {0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ {0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ {0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x665f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index a1b25e3..b7299fe 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -220,7 +220,7 @@ enum rq_flag_bits { /* This mask is used for both bio and request merge checking */ #define REQ_NOMERGE_FLAGS \ - (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) + (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_FLUSH_SEQ) #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) #define REQ_THROTTLED (1ULL << __REQ_THROTTLED) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index cdf13ca..371e560 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -9,10 +9,24 @@ + __GNUC_MINOR__ * 100 \ + __GNUC_PATCHLEVEL__) - /* Optimization barrier */ + /* The "volatile" is due to gcc bugs */ #define barrier() __asm__ __volatile__("": : :"memory") +/* + * This version is i.e. to prevent dead stores elimination on @ptr + * where gcc and llvm may behave differently when otherwise using + * normal barrier(): while gcc behavior gets along with a normal + * barrier(), llvm needs an explicit input variable to be assumed + * clobbered. The issue is as follows: while the inline asm might + * access any memory it wants, the compiler could have fit all of + * @ptr into memory registers instead, and since @ptr never escaped + * from that, it proofed that the inline asm wasn't touching any of + * it. This version works well with both compilers, i.e. we're telling + * the compiler that the inline asm absolutely may see the contents + * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 + */ +#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory") /* * This macro obfuscates arithmetic on a variable address so that gcc diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index ba147a1..0c9a2f2 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h @@ -13,9 +13,12 @@ /* Intel ECC compiler doesn't support gcc specific asm stmts. * It uses intrinsics to do the equivalent things. */ +#undef barrier_data #undef RELOC_HIDE #undef OPTIMIZER_HIDE_VAR +#define barrier_data(ptr) barrier() + #define RELOC_HIDE(ptr, off) \ ({ unsigned long __ptr; \ __ptr = (unsigned long) (ptr); \ diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 0e41ca0..8677225 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -169,6 +169,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); # define barrier() __memory_barrier() #endif +#ifndef barrier_data +# define barrier_data(ptr) barrier() +#endif + /* Unreachable code */ #ifndef unreachable # define unreachable() do { } while (1) diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h index bd95527..c156f50 100644 --- a/include/linux/cpu_cooling.h +++ b/include/linux/cpu_cooling.h @@ -28,6 +28,9 @@ #include <linux/thermal.h> #include <linux/cpumask.h> +typedef int (*get_static_t)(cpumask_t *cpumask, int interval, + unsigned long voltage, u32 *power); + #ifdef CONFIG_CPU_THERMAL /** * cpufreq_cooling_register - function to create cpufreq cooling device. @@ -36,6 +39,10 @@ struct thermal_cooling_device * cpufreq_cooling_register(const struct cpumask *clip_cpus); +struct thermal_cooling_device * +cpufreq_power_cooling_register(const struct cpumask *clip_cpus, + u32 capacitance, get_static_t plat_static_func); + /** * of_cpufreq_cooling_register - create cpufreq cooling device based on DT. * @np: a valid struct device_node to the cooling device device tree node. @@ -45,6 +52,12 @@ cpufreq_cooling_register(const struct cpumask *clip_cpus); struct thermal_cooling_device * of_cpufreq_cooling_register(struct device_node *np, const struct cpumask *clip_cpus); + +struct thermal_cooling_device * +of_cpufreq_power_cooling_register(struct device_node *np, + const struct cpumask *clip_cpus, + u32 capacitance, + get_static_t plat_static_func); #else static inline struct thermal_cooling_device * of_cpufreq_cooling_register(struct device_node *np, @@ -52,6 +65,15 @@ of_cpufreq_cooling_register(struct device_node *np, { return ERR_PTR(-ENOSYS); } + +static inline struct thermal_cooling_device * +of_cpufreq_power_cooling_register(struct device_node *np, + const struct cpumask *clip_cpus, + u32 capacitance, + get_static_t plat_static_func) +{ + return NULL; +} #endif /** @@ -68,11 +90,28 @@ cpufreq_cooling_register(const struct cpumask *clip_cpus) return ERR_PTR(-ENOSYS); } static inline struct thermal_cooling_device * +cpufreq_power_cooling_register(const struct cpumask *clip_cpus, + u32 capacitance, get_static_t plat_static_func) +{ + return NULL; +} + +static inline struct thermal_cooling_device * of_cpufreq_cooling_register(struct device_node *np, const struct cpumask *clip_cpus) { return ERR_PTR(-ENOSYS); } + +static inline struct thermal_cooling_device * +of_cpufreq_power_cooling_register(struct device_node *np, + const struct cpumask *clip_cpus, + u32 capacitance, + get_static_t plat_static_func) +{ + return NULL; +} + static inline void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) { diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 46e83c2..f9ecf63 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -46,7 +46,7 @@ const char *ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int len); const char *ftrace_print_array_seq(struct trace_seq *p, - const void *buf, int buf_len, + const void *buf, int count, size_t el_size); struct trace_iterator; diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 97a9373..15928f0 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -30,6 +30,7 @@ struct vm_area_struct; #define ___GFP_HARDWALL 0x20000u #define ___GFP_THISNODE 0x40000u #define ___GFP_RECLAIMABLE 0x80000u +#define ___GFP_NOACCOUNT 0x100000u #define ___GFP_NOTRACK 0x200000u #define ___GFP_NO_KSWAPD 0x400000u #define ___GFP_OTHER_NODE 0x800000u @@ -87,6 +88,7 @@ struct vm_area_struct; #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */ #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */ #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */ +#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* Don't account to kmemcg */ #define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */ #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD) diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index 36ec4ae..9de976b 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -95,8 +95,6 @@ struct device_node; -extern struct irq_chip gic_arch_extn; - void gic_set_irqchip_flags(unsigned long flags); void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *, u32 offset, struct device_node *); diff --git a/include/linux/kexec.h b/include/linux/kexec.h index e60a745..e804306 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -40,6 +40,10 @@ #error KEXEC_CONTROL_MEMORY_LIMIT not defined #endif +#ifndef KEXEC_CONTROL_MEMORY_GFP +#define KEXEC_CONTROL_MEMORY_GFP GFP_KERNEL +#endif + #ifndef KEXEC_CONTROL_PAGE_SIZE #error KEXEC_CONTROL_PAGE_SIZE not defined #endif diff --git a/include/linux/libata.h b/include/linux/libata.h index 8dad4a3..28aeae4 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -205,6 +205,7 @@ enum { ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */ ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */ ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */ + ATA_LFLAG_CHANGED = (1 << 10), /* LPM state changed on this link */ /* struct ata_port flags */ ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ @@ -309,6 +310,12 @@ enum { */ ATA_TMOUT_PMP_SRST_WAIT = 5000, + /* When the LPM policy is set to ATA_LPM_MAX_POWER, there might + * be a spurious PHY event, so ignore the first PHY event that + * occurs within 10s after the policy change. + */ + ATA_TMOUT_SPURIOUS_PHY = 10000, + /* ATA bus states */ BUS_UNKNOWN = 0, BUS_DMA = 1, @@ -788,6 +795,8 @@ struct ata_link { struct ata_eh_context eh_context; struct ata_device device[ATA_MAX_DEVICES]; + + unsigned long last_lpm_change; /* when last LPM change happened */ }; #define ATA_LINK_CLEAR_BEGIN offsetof(struct ata_link, active_tag) #define ATA_LINK_CLEAR_END offsetof(struct ata_link, device[0]) @@ -1201,6 +1210,7 @@ extern struct ata_device *ata_dev_pair(struct ata_device *adev); extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap); extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q); +extern bool sata_lpm_ignore_phy_events(struct ata_link *link); extern int ata_cable_40wire(struct ata_port *ap); extern int ata_cable_80wire(struct ata_port *ap); diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 72dff5f..6c89181 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -463,6 +463,8 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) if (!memcg_kmem_enabled()) return true; + if (gfp & __GFP_NOACCOUNT) + return true; /* * __GFP_NOFAIL allocations will move on even if charging is not * possible. Therefore we don't even try, and have this allocation @@ -522,6 +524,8 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) { if (!memcg_kmem_enabled()) return cachep; + if (gfp & __GFP_NOACCOUNT) + return cachep; if (gfp & __GFP_NOFAIL) return cachep; if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index bcbde79..05b9a69 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -25,7 +25,6 @@ #ifndef _LINUX_NETDEVICE_H #define _LINUX_NETDEVICE_H -#include <linux/pm_qos.h> #include <linux/timer.h> #include <linux/bug.h> #include <linux/delay.h> @@ -60,6 +59,7 @@ struct phy_device; struct wireless_dev; /* 802.15.4 specific */ struct wpan_dev; +struct mpls_dev; void netdev_set_default_ethtool_ops(struct net_device *dev, const struct ethtool_ops *ops); @@ -976,7 +976,8 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, * u16 flags) * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, - * struct net_device *dev, u32 filter_mask) + * struct net_device *dev, u32 filter_mask, + * int nlflags) * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, * u16 flags); * @@ -1172,7 +1173,8 @@ struct net_device_ops { int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, - u32 filter_mask); + u32 filter_mask, + int nlflags); int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, u16 flags); @@ -1496,8 +1498,6 @@ enum netdev_priv_flags { * * @qdisc_tx_busylock: XXX: need comments on this one * - * @pm_qos_req: Power Management QoS object - * * FIXME: cleanup struct net_device such that network protocol info * moves out. */ @@ -1627,6 +1627,9 @@ struct net_device { void *ax25_ptr; struct wireless_dev *ieee80211_ptr; struct wpan_dev *ieee802154_ptr; +#if IS_ENABLED(CONFIG_MPLS_ROUTING) + struct mpls_dev __rcu *mpls_ptr; +#endif /* * Cache lines mostly used on receive path (including eth_type_trans()) @@ -2021,10 +2024,10 @@ struct pcpu_sw_netstats { ({ \ typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \ if (pcpu_stats) { \ - int i; \ - for_each_possible_cpu(i) { \ + int __cpu; \ + for_each_possible_cpu(__cpu) { \ typeof(type) *stat; \ - stat = per_cpu_ptr(pcpu_stats, i); \ + stat = per_cpu_ptr(pcpu_stats, __cpu); \ u64_stats_init(&stat->syncp); \ } \ } \ diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index ab8f76d..f2fdb5a 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h @@ -39,12 +39,24 @@ static inline void br_drop_fake_rtable(struct sk_buff *skb) static inline int nf_bridge_get_physinif(const struct sk_buff *skb) { - return skb->nf_bridge ? skb->nf_bridge->physindev->ifindex : 0; + struct nf_bridge_info *nf_bridge; + + if (skb->nf_bridge == NULL) + return 0; + + nf_bridge = skb->nf_bridge; + return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0; } static inline int nf_bridge_get_physoutif(const struct sk_buff *skb) { - return skb->nf_bridge ? skb->nf_bridge->physoutdev->ifindex : 0; + struct nf_bridge_info *nf_bridge; + + if (skb->nf_bridge == NULL) + return 0; + + nf_bridge = skb->nf_bridge; + return nf_bridge->physoutdev ? nf_bridge->physoutdev->ifindex : 0; } static inline struct net_device * diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h index ff3fea3..9abb763 100644 --- a/include/linux/nilfs2_fs.h +++ b/include/linux/nilfs2_fs.h @@ -460,7 +460,7 @@ struct nilfs_btree_node { /* level */ #define NILFS_BTREE_LEVEL_DATA 0 #define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1) -#define NILFS_BTREE_LEVEL_MAX 14 +#define NILFS_BTREE_LEVEL_MAX 14 /* Max level (exclusive) */ /** * struct nilfs_palloc_group_desc - block group descriptor diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 38cff8f..2f7b9a4 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2541,10 +2541,6 @@ #define PCI_VENDOR_ID_INTEL 0x8086 #define PCI_DEVICE_ID_INTEL_EESSC 0x0008 -#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100 -#define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154 -#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150 -#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 #define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320 #define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321 #define PCI_DEVICE_ID_INTEL_PXH_0 0x0329 diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index e23d242..dbcbcc5 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -282,7 +282,8 @@ static inline bool rht_shrink_below_30(const struct rhashtable *ht, static inline bool rht_grow_above_100(const struct rhashtable *ht, const struct bucket_table *tbl) { - return atomic_read(&ht->nelems) > tbl->size; + return atomic_read(&ht->nelems) > tbl->size && + (!ht->p.max_size || tbl->size < ht->p.max_size); } /* The bucket lock is selected based on the hash and protects mutations diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 2da5d10..7b8e260 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -122,5 +122,5 @@ extern int ndo_dflt_fdb_del(struct ndmsg *ndm, extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u16 mode, - u32 flags, u32 mask); + u32 flags, u32 mask, int nlflags); #endif /* __LINUX_RTNETLINK_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 8222ae4..26a2e61 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -175,14 +175,6 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); extern void calc_global_load(unsigned long ticks); extern void update_cpu_load_nohz(void); -/* Notifier for when a task gets migrated to a new CPU */ -struct task_migration_notifier { - struct task_struct *task; - int from_cpu; - int to_cpu; -}; -extern void register_task_migration_notifier(struct notifier_block *n); - extern unsigned long get_parent_ip(unsigned long addr); extern void dump_cpu_task(int cpu); diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h index 6341f5b..a30b172 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h @@ -18,7 +18,7 @@ static inline int rt_task(struct task_struct *p) #ifdef CONFIG_RT_MUTEXES extern int rt_mutex_getprio(struct task_struct *p); extern void rt_mutex_setprio(struct task_struct *p, int prio); -extern int rt_mutex_check_prio(struct task_struct *task, int newprio); +extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio); extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task); extern void rt_mutex_adjust_pi(struct task_struct *p); static inline bool tsk_is_pi_blocked(struct task_struct *tsk) @@ -31,9 +31,10 @@ static inline int rt_mutex_getprio(struct task_struct *p) return p->normal_prio; } -static inline int rt_mutex_check_prio(struct task_struct *task, int newprio) +static inline int rt_mutex_get_effective_prio(struct task_struct *task, + int newprio) { - return 0; + return newprio; } static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 06793b5..66e374d 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -773,6 +773,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags, int node); +struct sk_buff *__build_skb(void *data, unsigned int frag_size); struct sk_buff *build_skb(void *data, unsigned int frag_size); static inline struct sk_buff *alloc_skb(unsigned int size, gfp_t priority) diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 0caa3a2..3b29115 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -145,11 +145,19 @@ struct tcp_sock { * read the code and the spec side by side (and laugh ...) * See RFC793 and RFC1122. The RFC writes these in capitals. */ + u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived + * sum(delta(rcv_nxt)), or how many bytes + * were acked. + */ u32 rcv_nxt; /* What we want to receive next */ u32 copied_seq; /* Head of yet unread data */ u32 rcv_wup; /* rcv_nxt on last window update sent */ u32 snd_nxt; /* Next sequence we send */ + u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked + * sum(delta(snd_una)), or how many bytes + * were acked. + */ u32 snd_una; /* First byte we want an ack for */ u32 snd_sml; /* Last byte of the most recently transmitted small packet */ u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 5eac316..037e9df 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -40,6 +40,9 @@ /* No upper/lower limit requirement */ #define THERMAL_NO_LIMIT ((u32)~0) +/* Default weight of a bound cooling device */ +#define THERMAL_WEIGHT_DEFAULT 0 + /* Unit conversion macros */ #define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \ ((long)t-2732+5)/10 : ((long)t-2732-5)/10) @@ -56,10 +59,13 @@ #define DEFAULT_THERMAL_GOVERNOR "fair_share" #elif defined(CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE) #define DEFAULT_THERMAL_GOVERNOR "user_space" +#elif defined(CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR) +#define DEFAULT_THERMAL_GOVERNOR "power_allocator" #endif struct thermal_zone_device; struct thermal_cooling_device; +struct thermal_instance; enum thermal_device_mode { THERMAL_DEVICE_DISABLED = 0, @@ -113,6 +119,12 @@ struct thermal_cooling_device_ops { int (*get_max_state) (struct thermal_cooling_device *, unsigned long *); int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *); int (*set_cur_state) (struct thermal_cooling_device *, unsigned long); + int (*get_requested_power)(struct thermal_cooling_device *, + struct thermal_zone_device *, u32 *); + int (*state2power)(struct thermal_cooling_device *, + struct thermal_zone_device *, unsigned long, u32 *); + int (*power2state)(struct thermal_cooling_device *, + struct thermal_zone_device *, u32, unsigned long *); }; struct thermal_cooling_device { @@ -144,8 +156,7 @@ struct thermal_attr { * @devdata: private pointer for device private data * @trips: number of trip points the thermal zone supports * @passive_delay: number of milliseconds to wait between polls when - * performing passive cooling. Currenty only used by the - * step-wise governor + * performing passive cooling. * @polling_delay: number of milliseconds to wait between polls when * checking whether trip points have been crossed (0 for * interrupt driven systems) @@ -155,13 +166,13 @@ struct thermal_attr { * @last_temperature: previous temperature read * @emul_temperature: emulated temperature when using CONFIG_THERMAL_EMULATION * @passive: 1 if you've crossed a passive trip point, 0 otherwise. - * Currenty only used by the step-wise governor. * @forced_passive: If > 0, temperature at which to switch on all ACPI * processor cooling devices. Currently only used by the * step-wise governor. * @ops: operations this &thermal_zone_device supports * @tzp: thermal zone parameters * @governor: pointer to the governor for this thermal zone + * @governor_data: private pointer for governor data * @thermal_instances: list of &struct thermal_instance of this thermal zone * @idr: &struct idr to generate unique id for this zone's cooling * devices @@ -186,8 +197,9 @@ struct thermal_zone_device { int passive; unsigned int forced_passive; struct thermal_zone_device_ops *ops; - const struct thermal_zone_params *tzp; + struct thermal_zone_params *tzp; struct thermal_governor *governor; + void *governor_data; struct list_head thermal_instances; struct idr idr; struct mutex lock; @@ -198,12 +210,19 @@ struct thermal_zone_device { /** * struct thermal_governor - structure that holds thermal governor information * @name: name of the governor + * @bind_to_tz: callback called when binding to a thermal zone. If it + * returns 0, the governor is bound to the thermal zone, + * otherwise it fails. + * @unbind_from_tz: callback called when a governor is unbound from a + * thermal zone. * @throttle: callback called for every trip point even if temperature is * below the trip point temperature * @governor_list: node in thermal_governor_list (in thermal_core.c) */ struct thermal_governor { char name[THERMAL_NAME_LENGTH]; + int (*bind_to_tz)(struct thermal_zone_device *tz); + void (*unbind_from_tz)(struct thermal_zone_device *tz); int (*throttle)(struct thermal_zone_device *tz, int trip); struct list_head governor_list; }; @@ -214,9 +233,12 @@ struct thermal_bind_params { /* * This is a measure of 'how effectively these devices can - * cool 'this' thermal zone. The shall be determined by platform - * characterization. This is on a 'percentage' scale. - * See Documentation/thermal/sysfs-api.txt for more information. + * cool 'this' thermal zone. It shall be determined by + * platform characterization. This value is relative to the + * rest of the weights so a cooling device whose weight is + * double that of another cooling device is twice as + * effective. See Documentation/thermal/sysfs-api.txt for more + * information. */ int weight; @@ -253,6 +275,44 @@ struct thermal_zone_params { int num_tbps; /* Number of tbp entries */ struct thermal_bind_params *tbp; + + /* + * Sustainable power (heat) that this thermal zone can dissipate in + * mW + */ + u32 sustainable_power; + + /* + * Proportional parameter of the PID controller when + * overshooting (i.e., when temperature is below the target) + */ + s32 k_po; + + /* + * Proportional parameter of the PID controller when + * undershooting + */ + s32 k_pu; + + /* Integral parameter of the PID controller */ + s32 k_i; + + /* Derivative parameter of the PID controller */ + s32 k_d; + + /* threshold below which the error is no longer accumulated */ + s32 integral_cutoff; + + /* + * @slope: slope of a linear temperature adjustment curve. + * Used by thermal zone drivers. + */ + int slope; + /* + * @offset: offset of a linear temperature adjustment curve. + * Used by thermal zone drivers (default 0). + */ + int offset; }; struct thermal_genl_event { @@ -316,14 +376,25 @@ void thermal_zone_of_sensor_unregister(struct device *dev, #endif #if IS_ENABLED(CONFIG_THERMAL) +static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) +{ + return cdev->ops->get_requested_power && cdev->ops->state2power && + cdev->ops->power2state; +} + +int power_actor_get_max_power(struct thermal_cooling_device *, + struct thermal_zone_device *tz, u32 *max_power); +int power_actor_set_power(struct thermal_cooling_device *, + struct thermal_instance *, u32); struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, void *, struct thermal_zone_device_ops *, - const struct thermal_zone_params *, int, int); + struct thermal_zone_params *, int, int); void thermal_zone_device_unregister(struct thermal_zone_device *); int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, struct thermal_cooling_device *, - unsigned long, unsigned long); + unsigned long, unsigned long, + unsigned int); int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, struct thermal_cooling_device *); void thermal_zone_device_update(struct thermal_zone_device *); @@ -343,6 +414,14 @@ struct thermal_instance *get_thermal_instance(struct thermal_zone_device *, void thermal_cdev_update(struct thermal_cooling_device *); void thermal_notify_framework(struct thermal_zone_device *, int); #else +static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) +{ return false; } +static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev, + struct thermal_zone_device *tz, u32 *max_power) +{ return 0; } +static inline int power_actor_set_power(struct thermal_cooling_device *cdev, + struct thermal_instance *tz, u32 power) +{ return 0; } static inline struct thermal_zone_device *thermal_zone_device_register( const char *type, int trips, int mask, void *devdata, struct thermal_zone_device_ops *ops, diff --git a/include/linux/tty.h b/include/linux/tty.h index 358a337..d76631f 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -339,6 +339,7 @@ struct tty_file_private { #define TTY_EXCLUSIVE 3 /* Exclusive open mode */ #define TTY_DEBUG 4 /* Debugging */ #define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */ +#define TTY_OTHER_DONE 6 /* Closed pty has completed input processing */ #define TTY_LDISC_OPEN 11 /* Line discipline is open */ #define TTY_PTY_LOCK 16 /* pty private */ #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */ @@ -462,7 +463,6 @@ extern int tty_hung_up_p(struct file *filp); extern void do_SAK(struct tty_struct *tty); extern void __do_SAK(struct tty_struct *tty); extern void no_tty(void); -extern void tty_flush_to_ldisc(struct tty_struct *tty); extern void tty_buffer_free_all(struct tty_port *port); extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld); extern void tty_buffer_init(struct tty_port *port); @@ -491,6 +491,7 @@ static inline speed_t tty_get_baud_rate(struct tty_struct *tty) extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old); extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b); +extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt); extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); extern void tty_ldisc_deref(struct tty_ldisc *); diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h index 0ee05da..0383552 100644 --- a/include/linux/uidgid.h +++ b/include/linux/uidgid.h @@ -109,12 +109,12 @@ static inline bool gid_lte(kgid_t left, kgid_t right) static inline bool uid_valid(kuid_t uid) { - return !uid_eq(uid, INVALID_UID); + return __kuid_val(uid) != (uid_t) -1; } static inline bool gid_valid(kgid_t gid) { - return !gid_eq(gid, INVALID_GID); + return __kgid_val(gid) != (gid_t) -1; } #ifdef CONFIG_USER_NS diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h index a7f2604..7f5f78b 100644 --- a/include/linux/usb_usual.h +++ b/include/linux/usb_usual.h @@ -77,6 +77,8 @@ /* Cannot handle ATA_12 or ATA_16 CDBs */ \ US_FLAG(NO_REPORT_OPCODES, 0x04000000) \ /* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \ + US_FLAG(MAX_SECTORS_240, 0x08000000) \ + /* Sets max_sectors to 240 */ \ #define US_FLAG(name, value) US_FL_##name = value , enum { US_DO_ALL_FLAGS }; diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h index d5f4fb6..f9b2ce5 100644 --- a/include/linux/util_macros.h +++ b/include/linux/util_macros.h @@ -5,7 +5,7 @@ ({ \ typeof(as) __fc_i, __fc_as = (as) - 1; \ typeof(x) __fc_x = (x); \ - typeof(*a) *__fc_a = (a); \ + typeof(*a) const *__fc_a = (a); \ for (__fc_i = 0; __fc_i < __fc_as; __fc_i++) { \ if (__fc_x op DIV_ROUND_CLOSEST(__fc_a[__fc_i] + \ __fc_a[__fc_i + 1], 2)) \ diff --git a/include/net/bonding.h b/include/net/bonding.h index fda6fee..78ed135 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -30,13 +30,6 @@ #include <net/bond_alb.h> #include <net/bond_options.h> -#define DRV_VERSION "3.7.1" -#define DRV_RELDATE "April 27, 2011" -#define DRV_NAME "bonding" -#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" - -#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" - #define BOND_MAX_ARP_TARGETS 16 #define BOND_DEFAULT_MIIMON 100 diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index eeda676..6ea16c8 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -30,11 +30,13 @@ struct wpan_phy_cca; struct cfg802154_ops { struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy, const char *name, + unsigned char name_assign_type, int type); void (*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy, struct net_device *dev); int (*add_virtual_intf)(struct wpan_phy *wpan_phy, const char *name, + unsigned char name_assign_type, enum nl802154_iftype type, __le64 extended_addr); int (*del_virtual_intf)(struct wpan_phy *wpan_phy, diff --git a/include/net/codel.h b/include/net/codel.h index aeee280..1e18005 100644 --- a/include/net/codel.h +++ b/include/net/codel.h @@ -120,11 +120,13 @@ static inline u32 codel_time_to_us(codel_time_t val) * struct codel_params - contains codel parameters * @target: target queue size (in time units) * @interval: width of moving time window + * @mtu: device mtu, or minimal queue backlog in bytes. * @ecn: is Explicit Congestion Notification enabled */ struct codel_params { codel_time_t target; codel_time_t interval; + u32 mtu; bool ecn; }; @@ -166,10 +168,12 @@ struct codel_stats { u32 ecn_mark; }; -static void codel_params_init(struct codel_params *params) +static void codel_params_init(struct codel_params *params, + const struct Qdisc *sch) { params->interval = MS2TIME(100); params->target = MS2TIME(5); + params->mtu = psched_mtu(qdisc_dev(sch)); params->ecn = false; } @@ -180,7 +184,7 @@ static void codel_vars_init(struct codel_vars *vars) static void codel_stats_init(struct codel_stats *stats) { - stats->maxpacket = 256; + stats->maxpacket = 0; } /* @@ -234,7 +238,7 @@ static bool codel_should_drop(const struct sk_buff *skb, stats->maxpacket = qdisc_pkt_len(skb); if (codel_time_before(vars->ldelay, params->target) || - sch->qstats.backlog <= stats->maxpacket) { + sch->qstats.backlog <= params->mtu) { /* went below - stay below for at least interval */ vars->first_above_time = 0; return false; diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 7b5887c..48a81582 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -279,12 +279,6 @@ static inline void inet_csk_reqsk_queue_add(struct sock *sk, void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, unsigned long timeout); -static inline void inet_csk_reqsk_queue_removed(struct sock *sk, - struct request_sock *req) -{ - reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); -} - static inline void inet_csk_reqsk_queue_added(struct sock *sk, const unsigned long timeout) { @@ -306,19 +300,7 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue); } -static inline void inet_csk_reqsk_queue_unlink(struct sock *sk, - struct request_sock *req) -{ - reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req); -} - -static inline void inet_csk_reqsk_queue_drop(struct sock *sk, - struct request_sock *req) -{ - inet_csk_reqsk_queue_unlink(sk, req); - inet_csk_reqsk_queue_removed(sk, req); - reqsk_put(req); -} +void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req); void inet_csk_destroy_sock(struct sock *sk); void inet_csk_prepare_forced_close(struct sock *sk); diff --git a/include/net/mac80211.h b/include/net/mac80211.h index b4bef11..8e3668b 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1666,6 +1666,8 @@ struct ieee80211_tx_control { * @sta: station table entry, %NULL for per-vif queue * @tid: the TID for this queue (unused for per-vif queue) * @ac: the AC for this queue + * @drv_priv: data area for driver use, will always be aligned to + * sizeof(void *). * * The driver can obtain packets from this queue by calling * ieee80211_tx_dequeue(). diff --git a/include/net/mac802154.h b/include/net/mac802154.h index e18e7fd..7df28a4 100644 --- a/include/net/mac802154.h +++ b/include/net/mac802154.h @@ -247,19 +247,109 @@ static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src) __put_unaligned_memmove64(swab64p(le64_src), be64_dst); } -/* Basic interface to register ieee802154 device */ +/** + * ieee802154_alloc_hw - Allocate a new hardware device + * + * This must be called once for each hardware device. The returned pointer + * must be used to refer to this device when calling other functions. + * mac802154 allocates a private data area for the driver pointed to by + * @priv in &struct ieee802154_hw, the size of this area is given as + * @priv_data_len. + * + * @priv_data_len: length of private data + * @ops: callbacks for this device + * + * Return: A pointer to the new hardware device, or %NULL on error. + */ struct ieee802154_hw * ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops); + +/** + * ieee802154_free_hw - free hardware descriptor + * + * This function frees everything that was allocated, including the + * private data for the driver. You must call ieee802154_unregister_hw() + * before calling this function. + * + * @hw: the hardware to free + */ void ieee802154_free_hw(struct ieee802154_hw *hw); + +/** + * ieee802154_register_hw - Register hardware device + * + * You must call this function before any other functions in + * mac802154. Note that before a hardware can be registered, you + * need to fill the contained wpan_phy's information. + * + * @hw: the device to register as returned by ieee802154_alloc_hw() + * + * Return: 0 on success. An error code otherwise. + */ int ieee802154_register_hw(struct ieee802154_hw *hw); + +/** + * ieee802154_unregister_hw - Unregister a hardware device + * + * This function instructs mac802154 to free allocated resources + * and unregister netdevices from the networking subsystem. + * + * @hw: the hardware to unregister + */ void ieee802154_unregister_hw(struct ieee802154_hw *hw); +/** + * ieee802154_rx - receive frame + * + * Use this function to hand received frames to mac802154. The receive + * buffer in @skb must start with an IEEE 802.15.4 header. In case of a + * paged @skb is used, the driver is recommended to put the ieee802154 + * header of the frame on the linear part of the @skb to avoid memory + * allocation and/or memcpy by the stack. + * + * This function may not be called in IRQ context. Calls to this function + * for a single hardware must be synchronized against each other. + * + * @hw: the hardware this frame came in on + * @skb: the buffer to receive, owned by mac802154 after this call + */ void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb); + +/** + * ieee802154_rx_irqsafe - receive frame + * + * Like ieee802154_rx() but can be called in IRQ context + * (internally defers to a tasklet.) + * + * @hw: the hardware this frame came in on + * @skb: the buffer to receive, owned by mac802154 after this call + * @lqi: link quality indicator + */ void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi); - +/** + * ieee802154_wake_queue - wake ieee802154 queue + * @hw: pointer as obtained from ieee802154_alloc_hw(). + * + * Drivers should use this function instead of netif_wake_queue. + */ void ieee802154_wake_queue(struct ieee802154_hw *hw); + +/** + * ieee802154_stop_queue - stop ieee802154 queue + * @hw: pointer as obtained from ieee802154_alloc_hw(). + * + * Drivers should use this function instead of netif_stop_queue. + */ void ieee802154_stop_queue(struct ieee802154_hw *hw); + +/** + * ieee802154_xmit_complete - frame transmission complete + * + * @hw: pointer as obtained from ieee802154_alloc_hw(). + * @skb: buffer for transmission + * @ifs_handling: indicate interframe space handling + */ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb, bool ifs_handling); diff --git a/include/net/request_sock.h b/include/net/request_sock.h index fe41f3c..9f4265c 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -212,24 +212,6 @@ static inline int reqsk_queue_empty(struct request_sock_queue *queue) return queue->rskq_accept_head == NULL; } -static inline void reqsk_queue_unlink(struct request_sock_queue *queue, - struct request_sock *req) -{ - struct listen_sock *lopt = queue->listen_opt; - struct request_sock **prev; - - spin_lock(&queue->syn_wait_lock); - - prev = &lopt->syn_table[req->rsk_hash]; - while (*prev != req) - prev = &(*prev)->dl_next; - *prev = req->dl_next; - - spin_unlock(&queue->syn_wait_lock); - if (del_timer(&req->rsk_timer)) - reqsk_put(req); -} - static inline void reqsk_queue_add(struct request_sock_queue *queue, struct request_sock *req, struct sock *parent, diff --git a/include/net/tcp.h b/include/net/tcp.h index 051dc5c2..6d204f3 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -576,7 +576,7 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) } /* tcp.c */ -void tcp_get_info(const struct sock *, struct tcp_info *); +void tcp_get_info(struct sock *, struct tcp_info *); /* Read 'sendfile()'-style from a TCP socket */ typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, @@ -804,6 +804,8 @@ enum tcp_ca_ack_event_flags { /* Requires ECN/ECT set on all packets */ #define TCP_CONG_NEEDS_ECN 0x2 +union tcp_cc_info; + struct tcp_congestion_ops { struct list_head list; u32 key; @@ -829,7 +831,8 @@ struct tcp_congestion_ops { /* hook for packet ack accounting (optional) */ void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us); /* get info for inet_diag (optional) */ - int (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb); + size_t (*get_info)(struct sock *sk, u32 ext, int *attr, + union tcp_cc_info *info); char name[TCP_CA_NAME_MAX]; struct module *owner; diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index ce55906..ac54c27 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h @@ -160,7 +160,7 @@ static inline int rdma_ip2gid(struct sockaddr *addr, union ib_gid *gid) } /* Important - sockaddr should be a union of sockaddr_in and sockaddr_in6 */ -static inline int rdma_gid2ip(struct sockaddr *out, union ib_gid *gid) +static inline void rdma_gid2ip(struct sockaddr *out, union ib_gid *gid) { if (ipv6_addr_v4mapped((struct in6_addr *)gid)) { struct sockaddr_in *out_in = (struct sockaddr_in *)out; @@ -173,7 +173,6 @@ static inline int rdma_gid2ip(struct sockaddr *out, union ib_gid *gid) out_in->sin6_family = AF_INET6; memcpy(&out_in->sin6_addr.s6_addr, gid->raw, 16); } - return 0; } static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr, diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h index 0e3ff30..39ed2d2 100644 --- a/include/rdma/ib_cm.h +++ b/include/rdma/ib_cm.h @@ -105,7 +105,8 @@ enum ib_cm_data_size { IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE = 216, IB_CM_SIDR_REP_PRIVATE_DATA_SIZE = 136, IB_CM_SIDR_REP_INFO_LENGTH = 72, - IB_CM_COMPARE_SIZE = 64 + /* compare done u32 at a time */ + IB_CM_COMPARE_SIZE = (64 / sizeof(u32)) }; struct ib_cm_id; @@ -337,8 +338,8 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id); #define IB_SDP_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFFFF0000ULL) struct ib_cm_compare_data { - u8 data[IB_CM_COMPARE_SIZE]; - u8 mask[IB_CM_COMPARE_SIZE]; + u32 data[IB_CM_COMPARE_SIZE]; + u32 mask[IB_CM_COMPARE_SIZE]; }; /** diff --git a/include/rdma/iw_portmap.h b/include/rdma/iw_portmap.h index 928b277..fda3167 100644 --- a/include/rdma/iw_portmap.h +++ b/include/rdma/iw_portmap.h @@ -148,6 +148,16 @@ int iwpm_add_mapping_cb(struct sk_buff *, struct netlink_callback *); int iwpm_add_and_query_mapping_cb(struct sk_buff *, struct netlink_callback *); /** + * iwpm_remote_info_cb - Process remote connecting peer address info, which + * the port mapper has received from the connecting peer + * + * @cb: Contains the received message (payload and netlink header) + * + * Stores the IPv4/IPv6 address info in a hash table + */ +int iwpm_remote_info_cb(struct sk_buff *, struct netlink_callback *); + +/** * iwpm_mapping_error_cb - Process port mapper notification for error * * @skb: @@ -175,6 +185,21 @@ int iwpm_mapping_info_cb(struct sk_buff *, struct netlink_callback *); int iwpm_ack_mapping_info_cb(struct sk_buff *, struct netlink_callback *); /** + * iwpm_get_remote_info - Get the remote connecting peer address info + * + * @mapped_loc_addr: Mapped local address of the listening peer + * @mapped_rem_addr: Mapped remote address of the connecting peer + * @remote_addr: To store the remote address of the connecting peer + * @nl_client: The index of the netlink client + * + * The remote address info is retrieved and provided to the client in + * the remote_addr. After that it is removed from the hash table + */ +int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr, + struct sockaddr_storage *mapped_rem_addr, + struct sockaddr_storage *remote_addr, u8 nl_client); + +/** * iwpm_create_mapinfo - Store local and mapped IPv4/IPv6 address * info in a hash table * @local_addr: Local ip/tcp address diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h index 183eaab..96e3f56 100644 --- a/include/scsi/scsi_devinfo.h +++ b/include/scsi/scsi_devinfo.h @@ -36,5 +36,6 @@ for sequential scan */ #define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */ #define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */ +#define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */ #endif diff --git a/include/sound/designware_i2s.h b/include/sound/designware_i2s.h index 26f406e..3a8fca9 100644 --- a/include/sound/designware_i2s.h +++ b/include/sound/designware_i2s.h @@ -1,5 +1,5 @@ /* - * Copyright (ST) 2012 Rajeev Kumar (rajeev-dlh.kumar@st.com) + * Copyright (ST) 2012 Rajeev Kumar (rajeevkumar.linux@gmail.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h index 0de95cc..5bd1346 100644 --- a/include/sound/emu10k1.h +++ b/include/sound/emu10k1.h @@ -41,7 +41,8 @@ #define EMUPAGESIZE 4096 #define MAXREQVOICES 8 -#define MAXPAGES 8192 +#define MAXPAGES0 4096 /* 32 bit mode */ +#define MAXPAGES1 8192 /* 31 bit mode */ #define RESERVED 0 #define NUM_MIDI 16 #define NUM_G 64 /* use all channels */ @@ -50,8 +51,7 @@ /* FIXME? - according to the OSS driver the EMU10K1 needs a 29 bit DMA mask */ #define EMU10K1_DMA_MASK 0x7fffffffUL /* 31bit */ -#define AUDIGY_DMA_MASK 0x7fffffffUL /* 31bit FIXME - 32 should work? */ - /* See ALSA bug #1276 - rlrevell */ +#define AUDIGY_DMA_MASK 0xffffffffUL /* 32bit mode */ #define TMEMSIZE 256*1024 #define TMEMSIZEREG 4 @@ -466,8 +466,11 @@ #define MAPB 0x0d /* Cache map B */ -#define MAP_PTE_MASK 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */ -#define MAP_PTI_MASK 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */ +#define MAP_PTE_MASK0 0xfffff000 /* The 20 MSBs of the PTE indexed by the PTI */ +#define MAP_PTI_MASK0 0x00000fff /* The 12 bit index to one of the 4096 PTE dwords */ + +#define MAP_PTE_MASK1 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */ +#define MAP_PTI_MASK1 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */ /* 0x0e, 0x0f: Not used */ @@ -1704,6 +1707,7 @@ struct snd_emu10k1 { unsigned short model; /* subsystem id */ unsigned int card_type; /* EMU10K1_CARD_* */ unsigned int ecard_ctrl; /* ecard control bits */ + unsigned int address_mode; /* address mode */ unsigned long dma_mask; /* PCI DMA mask */ unsigned int delay_pcm_irq; /* in samples */ int max_cache_pages; /* max memory size / PAGE_SIZE */ diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index 0bc8364..1065095 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -287,7 +287,7 @@ struct device; .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\ .tlv.p = (tlv_array), \ .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \ - .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) } + .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 1) } #define SOC_DAPM_SINGLE_TLV_VIRT(xname, max, tlv_array) \ SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0, tlv_array) #define SOC_DAPM_ENUM(xname, xenum) \ diff --git a/include/sound/soc.h b/include/sound/soc.h index fcb312b..f6226914 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -387,8 +387,20 @@ int snd_soc_codec_set_pll(struct snd_soc_codec *codec, int pll_id, int source, int snd_soc_register_card(struct snd_soc_card *card); int snd_soc_unregister_card(struct snd_soc_card *card); int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card); +#ifdef CONFIG_PM_SLEEP int snd_soc_suspend(struct device *dev); int snd_soc_resume(struct device *dev); +#else +static inline int snd_soc_suspend(struct device *dev) +{ + return 0; +} + +static inline int snd_soc_resume(struct device *dev) +{ + return 0; +} +#endif int snd_soc_poweroff(struct device *dev); int snd_soc_register_platform(struct device *dev, const struct snd_soc_platform_driver *platform_drv); diff --git a/include/sound/spear_dma.h b/include/sound/spear_dma.h index 65aca51..e290de4 100644 --- a/include/sound/spear_dma.h +++ b/include/sound/spear_dma.h @@ -1,7 +1,7 @@ /* * linux/spear_dma.h * -* Copyright (ST) 2012 Rajeev Kumar (rajeev-dlh.kumar@st.com) +* Copyright (ST) 2012 Rajeev Kumar (rajeevkumar.linux@gmail.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h index 0f4f95d..8b1f806 100644 --- a/include/trace/events/thermal.h +++ b/include/trace/events/thermal.h @@ -77,6 +77,64 @@ TRACE_EVENT(thermal_zone_trip, __entry->trip_type) ); +TRACE_EVENT(thermal_power_cpu_get_power, + TP_PROTO(const struct cpumask *cpus, unsigned long freq, u32 *load, + size_t load_len, u32 dynamic_power, u32 static_power), + + TP_ARGS(cpus, freq, load, load_len, dynamic_power, static_power), + + TP_STRUCT__entry( + __bitmask(cpumask, num_possible_cpus()) + __field(unsigned long, freq ) + __dynamic_array(u32, load, load_len) + __field(size_t, load_len ) + __field(u32, dynamic_power ) + __field(u32, static_power ) + ), + + TP_fast_assign( + __assign_bitmask(cpumask, cpumask_bits(cpus), + num_possible_cpus()); + __entry->freq = freq; + memcpy(__get_dynamic_array(load), load, + load_len * sizeof(*load)); + __entry->load_len = load_len; + __entry->dynamic_power = dynamic_power; + __entry->static_power = static_power; + ), + + TP_printk("cpus=%s freq=%lu load={%s} dynamic_power=%d static_power=%d", + __get_bitmask(cpumask), __entry->freq, + __print_array(__get_dynamic_array(load), __entry->load_len, 4), + __entry->dynamic_power, __entry->static_power) +); + +TRACE_EVENT(thermal_power_cpu_limit, + TP_PROTO(const struct cpumask *cpus, unsigned int freq, + unsigned long cdev_state, u32 power), + + TP_ARGS(cpus, freq, cdev_state, power), + + TP_STRUCT__entry( + __bitmask(cpumask, num_possible_cpus()) + __field(unsigned int, freq ) + __field(unsigned long, cdev_state) + __field(u32, power ) + ), + + TP_fast_assign( + __assign_bitmask(cpumask, cpumask_bits(cpus), + num_possible_cpus()); + __entry->freq = freq; + __entry->cdev_state = cdev_state; + __entry->power = power; + ), + + TP_printk("cpus=%s freq=%u cdev_state=%lu power=%u", + __get_bitmask(cpumask), __entry->freq, __entry->cdev_state, + __entry->power) +); + #endif /* _TRACE_THERMAL_H */ /* This part must be outside protection */ diff --git a/include/trace/events/thermal_power_allocator.h b/include/trace/events/thermal_power_allocator.h new file mode 100644 index 0000000..12e1321 --- /dev/null +++ b/include/trace/events/thermal_power_allocator.h @@ -0,0 +1,87 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM thermal_power_allocator + +#if !defined(_TRACE_THERMAL_POWER_ALLOCATOR_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_THERMAL_POWER_ALLOCATOR_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(thermal_power_allocator, + TP_PROTO(struct thermal_zone_device *tz, u32 *req_power, + u32 total_req_power, u32 *granted_power, + u32 total_granted_power, size_t num_actors, + u32 power_range, u32 max_allocatable_power, + unsigned long current_temp, s32 delta_temp), + TP_ARGS(tz, req_power, total_req_power, granted_power, + total_granted_power, num_actors, power_range, + max_allocatable_power, current_temp, delta_temp), + TP_STRUCT__entry( + __field(int, tz_id ) + __dynamic_array(u32, req_power, num_actors ) + __field(u32, total_req_power ) + __dynamic_array(u32, granted_power, num_actors) + __field(u32, total_granted_power ) + __field(size_t, num_actors ) + __field(u32, power_range ) + __field(u32, max_allocatable_power ) + __field(unsigned long, current_temp ) + __field(s32, delta_temp ) + ), + TP_fast_assign( + __entry->tz_id = tz->id; + memcpy(__get_dynamic_array(req_power), req_power, + num_actors * sizeof(*req_power)); + __entry->total_req_power = total_req_power; + memcpy(__get_dynamic_array(granted_power), granted_power, + num_actors * sizeof(*granted_power)); + __entry->total_granted_power = total_granted_power; + __entry->num_actors = num_actors; + __entry->power_range = power_range; + __entry->max_allocatable_power = max_allocatable_power; + __entry->current_temp = current_temp; + __entry->delta_temp = delta_temp; + ), + + TP_printk("thermal_zone_id=%d req_power={%s} total_req_power=%u granted_power={%s} total_granted_power=%u power_range=%u max_allocatable_power=%u current_temperature=%lu delta_temperature=%d", + __entry->tz_id, + __print_array(__get_dynamic_array(req_power), + __entry->num_actors, 4), + __entry->total_req_power, + __print_array(__get_dynamic_array(granted_power), + __entry->num_actors, 4), + __entry->total_granted_power, __entry->power_range, + __entry->max_allocatable_power, __entry->current_temp, + __entry->delta_temp) +); + +TRACE_EVENT(thermal_power_allocator_pid, + TP_PROTO(struct thermal_zone_device *tz, s32 err, s32 err_integral, + s64 p, s64 i, s64 d, s32 output), + TP_ARGS(tz, err, err_integral, p, i, d, output), + TP_STRUCT__entry( + __field(int, tz_id ) + __field(s32, err ) + __field(s32, err_integral) + __field(s64, p ) + __field(s64, i ) + __field(s64, d ) + __field(s32, output ) + ), + TP_fast_assign( + __entry->tz_id = tz->id; + __entry->err = err; + __entry->err_integral = err_integral; + __entry->p = p; + __entry->i = i; + __entry->d = d; + __entry->output = output; + ), + + TP_printk("thermal_zone_id=%d err=%d err_integral=%d p=%lld i=%lld d=%lld output=%d", + __entry->tz_id, __entry->err, __entry->err_integral, + __entry->p, __entry->i, __entry->d, __entry->output) +); +#endif /* _TRACE_THERMAL_POWER_ALLOCATOR_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h index d65c0a0..c7093c7 100644 --- a/include/uapi/linux/inet_diag.h +++ b/include/uapi/linux/inet_diag.h @@ -143,4 +143,8 @@ struct tcp_dctcp_info { __u32 dctcp_ab_tot; }; +union tcp_cc_info { + struct tcpvegas_info vegas; + struct tcp_dctcp_info dctcp; +}; #endif /* _UAPI_INET_DIAG_H_ */ diff --git a/include/uapi/linux/mpls.h b/include/uapi/linux/mpls.h index bc9abfe..139d4dd 100644 --- a/include/uapi/linux/mpls.h +++ b/include/uapi/linux/mpls.h @@ -31,4 +31,14 @@ struct mpls_label { #define MPLS_LS_TTL_MASK 0x000000FF #define MPLS_LS_TTL_SHIFT 0 +/* Reserved labels */ +#define MPLS_LABEL_IPV4NULL 0 /* RFC3032 */ +#define MPLS_LABEL_RTALERT 1 /* RFC3032 */ +#define MPLS_LABEL_IPV6NULL 2 /* RFC3032 */ +#define MPLS_LABEL_IMPLNULL 3 /* RFC3032 */ +#define MPLS_LABEL_ENTROPY 7 /* RFC6790 */ +#define MPLS_LABEL_GAL 13 /* RFC5586 */ +#define MPLS_LABEL_OAMALERT 14 /* RFC3429 */ +#define MPLS_LABEL_EXTENSION 15 /* RFC7274 */ + #endif /* _UAPI_MPLS_H */ diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index 3b97183..faa72f4 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -112,6 +112,7 @@ enum { #define TCP_FASTOPEN 23 /* Enable FastOpen on listeners */ #define TCP_TIMESTAMP 24 #define TCP_NOTSENT_LOWAT 25 /* limit number of unsent bytes in write queue */ +#define TCP_CC_INFO 26 /* Get Congestion Control (optional) info */ struct tcp_repair_opt { __u32 opt_code; @@ -189,6 +190,8 @@ struct tcp_info { __u64 tcpi_pacing_rate; __u64 tcpi_max_pacing_rate; + __u64 tcpi_bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked */ + __u64 tcpi_bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived */ }; /* for TCP_MD5SIG socket option */ diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h index a3318f3..915980a 100644 --- a/include/uapi/linux/virtio_ring.h +++ b/include/uapi/linux/virtio_ring.h @@ -155,7 +155,7 @@ static inline unsigned vring_size(unsigned int num, unsigned long align) } /* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */ -/* Assuming a given event_idx value from the other size, if +/* Assuming a given event_idx value from the other side, if * we have just incremented index from old to new_idx, * should we trigger an event? */ static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old) diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h index de69170..6e4bb42 100644 --- a/include/uapi/rdma/rdma_netlink.h +++ b/include/uapi/rdma/rdma_netlink.h @@ -37,6 +37,7 @@ enum { RDMA_NL_IWPM_ADD_MAPPING, RDMA_NL_IWPM_QUERY_MAPPING, RDMA_NL_IWPM_REMOVE_MAPPING, + RDMA_NL_IWPM_REMOTE_INFO, RDMA_NL_IWPM_HANDLE_ERR, RDMA_NL_IWPM_MAPINFO, RDMA_NL_IWPM_MAPINFO_NUM, diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index 143ca5f..4478f4b 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h @@ -191,6 +191,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, struct gnttab_unmap_grant_ref *kunmap_ops, struct page **pages, unsigned int count); void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item); +int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item); /* Perform a batch of grant map/copy operations. Retry every batch slot diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index c643e6a..0ce4f32 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@ -13,6 +13,7 @@ void xen_arch_post_suspend(int suspend_cancelled); void xen_timer_resume(void); void xen_arch_resume(void); +void xen_arch_suspend(void); void xen_resume_notifier_register(struct notifier_block *nb); void xen_resume_notifier_unregister(struct notifier_block *nb); diff --git a/init/do_mounts.c b/init/do_mounts.c index 8369ffa..a95bbdb 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -225,10 +225,11 @@ dev_t name_to_dev_t(const char *name) #endif if (strncmp(name, "/dev/", 5) != 0) { - unsigned maj, min; + unsigned maj, min, offset; char dummy; - if (sscanf(name, "%u:%u%c", &maj, &min, &dummy) == 2) { + if ((sscanf(name, "%u:%u%c", &maj, &min, &dummy) == 2) || + (sscanf(name, "%u:%u:%u:%c", &maj, &min, &offset, &dummy) == 3)) { res = MKDEV(maj, min); if (maj != MAJOR(res) || min != MINOR(res)) goto fail; diff --git a/kernel/Makefile b/kernel/Makefile index 0f8f8b0..60c302c 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -197,9 +197,9 @@ x509.genkey: @echo >>x509.genkey "x509_extensions = myexts" @echo >>x509.genkey @echo >>x509.genkey "[ req_distinguished_name ]" - @echo >>x509.genkey "O = Magrathea" - @echo >>x509.genkey "CN = Glacier signing key" - @echo >>x509.genkey "emailAddress = slartibartfast@magrathea.h2g2" + @echo >>x509.genkey "#O = Unspecified company" + @echo >>x509.genkey "CN = Build time autogenerated kernel key" + @echo >>x509.genkey "#emailAddress = unspecified.user@unspecified.company" @echo >>x509.genkey @echo >>x509.genkey "[ myexts ]" @echo >>x509.genkey "basicConstraints=critical,CA:FALSE" diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 4139a0f..54f0e7f 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -357,8 +357,8 @@ select_insn: ALU64_MOD_X: if (unlikely(SRC == 0)) return 0; - tmp = DST; - DST = do_div(tmp, SRC); + div64_u64_rem(DST, SRC, &tmp); + DST = tmp; CONT; ALU_MOD_X: if (unlikely(SRC == 0)) @@ -367,8 +367,8 @@ select_insn: DST = do_div(tmp, (u32) SRC); CONT; ALU64_MOD_K: - tmp = DST; - DST = do_div(tmp, IMM); + div64_u64_rem(DST, IMM, &tmp); + DST = tmp; CONT; ALU_MOD_K: tmp = (u32) DST; @@ -377,7 +377,7 @@ select_insn: ALU64_DIV_X: if (unlikely(SRC == 0)) return 0; - do_div(DST, SRC); + DST = div64_u64(DST, SRC); CONT; ALU_DIV_X: if (unlikely(SRC == 0)) @@ -387,7 +387,7 @@ select_insn: DST = (u32) tmp; CONT; ALU64_DIV_K: - do_div(DST, IMM); + DST = div64_u64(DST, IMM); CONT; ALU_DIV_K: tmp = (u32) DST; diff --git a/kernel/events/core.c b/kernel/events/core.c index 81aa3a4..1a3bf48 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -913,10 +913,30 @@ static void put_ctx(struct perf_event_context *ctx) * Those places that change perf_event::ctx will hold both * perf_event_ctx::mutex of the 'old' and 'new' ctx value. * - * Lock ordering is by mutex address. There is one other site where - * perf_event_context::mutex nests and that is put_event(). But remember that - * that is a parent<->child context relation, and migration does not affect - * children, therefore these two orderings should not interact. + * Lock ordering is by mutex address. There are two other sites where + * perf_event_context::mutex nests and those are: + * + * - perf_event_exit_task_context() [ child , 0 ] + * __perf_event_exit_task() + * sync_child_event() + * put_event() [ parent, 1 ] + * + * - perf_event_init_context() [ parent, 0 ] + * inherit_task_group() + * inherit_group() + * inherit_event() + * perf_event_alloc() + * perf_init_event() + * perf_try_init_event() [ child , 1 ] + * + * While it appears there is an obvious deadlock here -- the parent and child + * nesting levels are inverted between the two. This is in fact safe because + * life-time rules separate them. That is an exiting task cannot fork, and a + * spawning task cannot (yet) exit. + * + * But remember that that these are parent<->child context relations, and + * migration does not affect children, therefore these two orderings should not + * interact. * * The change in perf_event::ctx does not affect children (as claimed above) * because the sys_perf_event_open() case will install a new event and break @@ -3657,9 +3677,6 @@ static void perf_remove_from_owner(struct perf_event *event) } } -/* - * Called when the last reference to the file is gone. - */ static void put_event(struct perf_event *event) { struct perf_event_context *ctx; @@ -3697,6 +3714,9 @@ int perf_event_release_kernel(struct perf_event *event) } EXPORT_SYMBOL_GPL(perf_event_release_kernel); +/* + * Called when the last reference to the file is gone. + */ static int perf_release(struct inode *inode, struct file *file) { put_event(file->private_data); @@ -7364,7 +7384,12 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) return -ENODEV; if (event->group_leader != event) { - ctx = perf_event_ctx_lock(event->group_leader); + /* + * This ctx->mutex can nest when we're called through + * inheritance. See the perf_event_ctx_lock_nested() comment. + */ + ctx = perf_event_ctx_lock_nested(event->group_leader, + SINGLE_DEPTH_NESTING); BUG_ON(!ctx); } diff --git a/kernel/irq/dummychip.c b/kernel/irq/dummychip.c index 988dc58..2feb6fe 100644 --- a/kernel/irq/dummychip.c +++ b/kernel/irq/dummychip.c @@ -57,5 +57,6 @@ struct irq_chip dummy_irq_chip = { .irq_ack = noop, .irq_mask = noop, .irq_unmask = noop, + .flags = IRQCHIP_SKIP_SET_WAKE, }; EXPORT_SYMBOL_GPL(dummy_irq_chip); diff --git a/kernel/kexec.c b/kernel/kexec.c index 38c25b1..7a36fdc 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -707,7 +707,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image, do { unsigned long pfn, epfn, addr, eaddr; - pages = kimage_alloc_pages(GFP_KERNEL, order); + pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order); if (!pages) break; pfn = page_to_pfn(pages); diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index b732793..b025295 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -265,15 +265,17 @@ struct task_struct *rt_mutex_get_top_task(struct task_struct *task) } /* - * Called by sched_setscheduler() to check whether the priority change - * is overruled by a possible priority boosting. + * Called by sched_setscheduler() to get the priority which will be + * effective after the change. */ -int rt_mutex_check_prio(struct task_struct *task, int newprio) +int rt_mutex_get_effective_prio(struct task_struct *task, int newprio) { if (!task_has_pi_waiters(task)) - return 0; + return newprio; - return task_top_pi_waiter(task)->task->prio <= newprio; + if (task_top_pi_waiter(task)->task->prio <= newprio) + return task_top_pi_waiter(task)->task->prio; + return newprio; } /* diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 233165d..8cf7304 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -162,11 +162,14 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO; module_param(kthread_prio, int, 0644); -/* Delay in jiffies for grace-period initialization delays. */ -static int gp_init_delay = IS_ENABLED(CONFIG_RCU_TORTURE_TEST_SLOW_INIT) - ? CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY - : 0; +/* Delay in jiffies for grace-period initialization delays, debug only. */ +#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT +static int gp_init_delay = CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY; module_param(gp_init_delay, int, 0644); +#else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */ +static const int gp_init_delay; +#endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */ +#define PER_RCU_NODE_PERIOD 10 /* Number of grace periods between delays. */ /* * Track the rcutorture test sequence number and the update version @@ -1843,9 +1846,8 @@ static int rcu_gp_init(struct rcu_state *rsp) raw_spin_unlock_irq(&rnp->lock); cond_resched_rcu_qs(); ACCESS_ONCE(rsp->gp_activity) = jiffies; - if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_SLOW_INIT) && - gp_init_delay > 0 && - !(rsp->gpnum % (rcu_num_nodes * 10))) + if (gp_init_delay > 0 && + !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD))) schedule_timeout_uninterruptible(gp_init_delay); } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f9123a8..57bd333 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1016,13 +1016,6 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) rq_clock_skip_update(rq, true); } -static ATOMIC_NOTIFIER_HEAD(task_migration_notifier); - -void register_task_migration_notifier(struct notifier_block *n) -{ - atomic_notifier_chain_register(&task_migration_notifier, n); -} - #ifdef CONFIG_SMP void set_task_cpu(struct task_struct *p, unsigned int new_cpu) { @@ -1053,18 +1046,10 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) trace_sched_migrate_task(p, new_cpu); if (task_cpu(p) != new_cpu) { - struct task_migration_notifier tmn; - if (p->sched_class->migrate_task_rq) p->sched_class->migrate_task_rq(p, new_cpu); p->se.nr_migrations++; perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); - - tmn.task = p; - tmn.from_cpu = task_cpu(p); - tmn.to_cpu = new_cpu; - - atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn); } __set_task_cpu(p, new_cpu); @@ -3315,15 +3300,18 @@ static void __setscheduler_params(struct task_struct *p, /* Actually do priority change: must hold pi & rq lock. */ static void __setscheduler(struct rq *rq, struct task_struct *p, - const struct sched_attr *attr) + const struct sched_attr *attr, bool keep_boost) { __setscheduler_params(p, attr); /* - * If we get here, there was no pi waiters boosting the - * task. It is safe to use the normal prio. + * Keep a potential priority boosting if called from + * sched_setscheduler(). */ - p->prio = normal_prio(p); + if (keep_boost) + p->prio = rt_mutex_get_effective_prio(p, normal_prio(p)); + else + p->prio = normal_prio(p); if (dl_prio(p->prio)) p->sched_class = &dl_sched_class; @@ -3423,7 +3411,7 @@ static int __sched_setscheduler(struct task_struct *p, int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : MAX_RT_PRIO - 1 - attr->sched_priority; int retval, oldprio, oldpolicy = -1, queued, running; - int policy = attr->sched_policy; + int new_effective_prio, policy = attr->sched_policy; unsigned long flags; const struct sched_class *prev_class; struct rq *rq; @@ -3605,15 +3593,14 @@ change: oldprio = p->prio; /* - * Special case for priority boosted tasks. - * - * If the new priority is lower or equal (user space view) - * than the current (boosted) priority, we just store the new + * Take priority boosted tasks into account. If the new + * effective priority is unchanged, we just store the new * normal parameters and do not touch the scheduler class and * the runqueue. This will be done when the task deboost * itself. */ - if (rt_mutex_check_prio(p, newprio)) { + new_effective_prio = rt_mutex_get_effective_prio(p, newprio); + if (new_effective_prio == oldprio) { __setscheduler_params(p, attr); task_rq_unlock(rq, p, &flags); return 0; @@ -3627,7 +3614,7 @@ change: put_prev_task(rq, p); prev_class = p->sched_class; - __setscheduler(rq, p, attr); + __setscheduler(rq, p, attr, true); if (running) p->sched_class->set_curr_task(rq); @@ -7012,27 +6999,23 @@ static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, unsigned long flags; long cpu = (long)hcpu; struct dl_bw *dl_b; + bool overflow; + int cpus; - switch (action & ~CPU_TASKS_FROZEN) { + switch (action) { case CPU_DOWN_PREPARE: - /* explicitly allow suspend */ - if (!(action & CPU_TASKS_FROZEN)) { - bool overflow; - int cpus; - - rcu_read_lock_sched(); - dl_b = dl_bw_of(cpu); + rcu_read_lock_sched(); + dl_b = dl_bw_of(cpu); - raw_spin_lock_irqsave(&dl_b->lock, flags); - cpus = dl_bw_cpus(cpu); - overflow = __dl_overflow(dl_b, cpus, 0, 0); - raw_spin_unlock_irqrestore(&dl_b->lock, flags); + raw_spin_lock_irqsave(&dl_b->lock, flags); + cpus = dl_bw_cpus(cpu); + overflow = __dl_overflow(dl_b, cpus, 0, 0); + raw_spin_unlock_irqrestore(&dl_b->lock, flags); - rcu_read_unlock_sched(); + rcu_read_unlock_sched(); - if (overflow) - return notifier_from_errno(-EBUSY); - } + if (overflow) + return notifier_from_errno(-EBUSY); cpuset_update_active_cpus(false); break; case CPU_DOWN_PREPARE_FROZEN: @@ -7361,7 +7344,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p) queued = task_on_rq_queued(p); if (queued) dequeue_task(rq, p, 0); - __setscheduler(rq, p, &attr); + __setscheduler(rq, p, &attr, false); if (queued) { enqueue_task(rq, p, 0); resched_curr(rq); diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index deef1ca..fefcb1f 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -81,7 +81,6 @@ static void cpuidle_idle_call(void) struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); int next_state, entered_state; - unsigned int broadcast; bool reflect; /* @@ -150,17 +149,6 @@ static void cpuidle_idle_call(void) goto exit_idle; } - broadcast = drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP; - - /* - * Tell the time framework to switch to a broadcast timer - * because our local timer will be shutdown. If a local timer - * is used from another cpu as a broadcast timer, this call may - * fail if it is not available - */ - if (broadcast && tick_broadcast_enter()) - goto use_default; - /* Take note of the planned idle state. */ idle_set_state(this_rq(), &drv->states[next_state]); @@ -174,8 +162,8 @@ static void cpuidle_idle_call(void) /* The cpu is no longer idle or about to enter idle. */ idle_set_state(this_rq(), NULL); - if (broadcast) - tick_broadcast_exit(); + if (entered_state == -EBUSY) + goto use_default; /* * Give the governor an opportunity to reflect on the outcome diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 11dc22a..637a094 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -117,11 +117,7 @@ static int __clockevents_set_state(struct clock_event_device *dev, /* Transition with new state-specific callbacks */ switch (state) { case CLOCK_EVT_STATE_DETACHED: - /* - * This is an internal state, which is guaranteed to go from - * SHUTDOWN to DETACHED. No driver interaction required. - */ - return 0; + /* The clockevent device is getting replaced. Shut it down. */ case CLOCK_EVT_STATE_SHUTDOWN: return dev->set_state_shutdown(dev); diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 692bf71..25a086b 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -178,12 +178,13 @@ ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) EXPORT_SYMBOL(ftrace_print_hex_seq); const char * -ftrace_print_array_seq(struct trace_seq *p, const void *buf, int buf_len, +ftrace_print_array_seq(struct trace_seq *p, const void *buf, int count, size_t el_size) { const char *ret = trace_seq_buffer_ptr(p); const char *prefix = ""; void *ptr = (void *)buf; + size_t buf_len = count * el_size; trace_seq_putc(p, '{'); diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 2316f50..506edcc5 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -41,6 +41,8 @@ #define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) #define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) +static DEFINE_MUTEX(watchdog_proc_mutex); + #ifdef CONFIG_HARDLOCKUP_DETECTOR static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED; #else @@ -608,26 +610,36 @@ void watchdog_nmi_enable_all(void) { int cpu; - if (!watchdog_user_enabled) - return; + mutex_lock(&watchdog_proc_mutex); + + if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) + goto unlock; get_online_cpus(); for_each_online_cpu(cpu) watchdog_nmi_enable(cpu); put_online_cpus(); + +unlock: + mutex_lock(&watchdog_proc_mutex); } void watchdog_nmi_disable_all(void) { int cpu; + mutex_lock(&watchdog_proc_mutex); + if (!watchdog_running) - return; + goto unlock; get_online_cpus(); for_each_online_cpu(cpu) watchdog_nmi_disable(cpu); put_online_cpus(); + +unlock: + mutex_unlock(&watchdog_proc_mutex); } #else static int watchdog_nmi_enable(unsigned int cpu) { return 0; } @@ -744,8 +756,6 @@ static int proc_watchdog_update(void) } -static DEFINE_MUTEX(watchdog_proc_mutex); - /* * common function for watchdog, nmi_watchdog and soft_watchdog parameter * diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1767057..ba2b0c8 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1281,6 +1281,7 @@ config RCU_TORTURE_TEST_SLOW_INIT_DELAY int "How much to slow down RCU grace-period initialization" range 0 5 default 3 + depends on RCU_TORTURE_TEST_SLOW_INIT help This option specifies the number of jiffies to wait between each rcu_node structure initialization. diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 4fecaedc..777eda7 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -10,8 +10,11 @@ config KASAN help Enables kernel address sanitizer - runtime memory debugger, designed to find out-of-bounds accesses and use-after-free bugs. - This is strictly debugging feature. It consumes about 1/8 - of available memory and brings about ~x3 performance slowdown. + This is strictly a debugging feature and it requires a gcc version + of 4.9.2 or later. Detection of out of bounds accesses to stack or + global variables requires gcc 5.0 or later. + This feature consumes about 1/8 of available memory and brings about + ~x3 performance slowdown. For better error detection enable CONFIG_STACKTRACE, and add slub_debug=U to boot cmdline. @@ -40,6 +43,7 @@ config KASAN_INLINE memory accesses. This is faster than outline (in some workloads it gives about x2 boost over outline instrumentation), but make kernel's .text size much bigger. + This requires a gcc version of 5.0 or later. endchoice diff --git a/lib/find_last_bit.c b/lib/find_last_bit.c deleted file mode 100644 index 3e3be40..0000000 --- a/lib/find_last_bit.c +++ /dev/null @@ -1,41 +0,0 @@ -/* find_last_bit.c: fallback find next bit implementation - * - * Copyright (C) 2008 IBM Corporation - * Written by Rusty Russell <rusty@rustcorp.com.au> - * (Inspired by David Howell's find_next_bit implementation) - * - * Rewritten by Yury Norov <yury.norov@gmail.com> to decrease - * size and improve performance, 2015. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <linux/bitops.h> -#include <linux/bitmap.h> -#include <linux/export.h> -#include <linux/kernel.h> - -#ifndef find_last_bit - -unsigned long find_last_bit(const unsigned long *addr, unsigned long size) -{ - if (size) { - unsigned long val = BITMAP_LAST_WORD_MASK(size); - unsigned long idx = (size-1) / BITS_PER_LONG; - - do { - val &= addr[idx]; - if (val) - return idx * BITS_PER_LONG + __fls(val); - - val = ~0ul; - } while (idx--); - } - return size; -} -EXPORT_SYMBOL(find_last_bit); - -#endif diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 4898442..b28df40 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -405,13 +405,18 @@ int rhashtable_insert_rehash(struct rhashtable *ht) if (rht_grow_above_75(ht, tbl)) size *= 2; - /* More than two rehashes (not resizes) detected. */ - else if (WARN_ON(old_tbl != tbl && old_tbl->size == size)) + /* Do not schedule more than one rehash */ + else if (old_tbl != tbl) return -EBUSY; new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); - if (new_tbl == NULL) + if (new_tbl == NULL) { + /* Schedule async resize/rehash to try allocation + * non-atomic context. + */ + schedule_work(&ht->run_work); return -ENOMEM; + } err = rhashtable_rehash_attach(ht, tbl, new_tbl); if (err) { diff --git a/lib/string.c b/lib/string.c index a579201..bb3d4b6 100644 --- a/lib/string.c +++ b/lib/string.c @@ -607,7 +607,7 @@ EXPORT_SYMBOL(memset); void memzero_explicit(void *s, size_t count) { memset(s, 0, count); - barrier(); + barrier_data(s); } EXPORT_SYMBOL(memzero_explicit); diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c index 329caf5..4ca5fe0 100644 --- a/mm/hwpoison-inject.c +++ b/mm/hwpoison-inject.c @@ -34,13 +34,13 @@ static int hwpoison_inject(void *data, u64 val) if (!hwpoison_filter_enable) goto inject; - if (!PageLRU(p) && !PageHuge(p)) - shake_page(p, 0); + if (!PageLRU(hpage) && !PageHuge(p)) + shake_page(hpage, 0); /* * This implies unable to support non-LRU pages. */ - if (!PageLRU(p) && !PageHuge(p)) - return 0; + if (!PageLRU(hpage) && !PageHuge(p)) + goto put_out; /* * do a racy check with elevated page count, to make sure PG_hwpoison @@ -52,11 +52,14 @@ static int hwpoison_inject(void *data, u64 val) err = hwpoison_filter(hpage); unlock_page(hpage); if (err) - return 0; + goto put_out; inject: pr_info("Injecting memory failure at pfn %#lx\n", pfn); return memory_failure(pfn, 18, MF_COUNT_INCREASED); +put_out: + put_page(hpage); + return 0; } static int hwpoison_unpoison(void *data, u64 val) diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 5405aff..f0fe4f2 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -115,7 +115,8 @@ #define BYTES_PER_POINTER sizeof(void *) /* GFP bitmask for kmemleak internal allocations */ -#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ +#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \ + __GFP_NOACCOUNT)) | \ __GFP_NORETRY | __GFP_NOMEMALLOC | \ __GFP_NOWARN) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index d9359b7..501820c 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1187,10 +1187,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags) * The check (unnecessarily) ignores LRU pages being isolated and * walked by the page reclaim code, however that's not a big loss. */ - if (!PageHuge(p) && !PageTransTail(p)) { - if (!PageLRU(p)) - shake_page(p, 0); - if (!PageLRU(p)) { + if (!PageHuge(p)) { + if (!PageLRU(hpage)) + shake_page(hpage, 0); + if (!PageLRU(hpage)) { /* * shake_page could have turned it free. */ @@ -1777,12 +1777,12 @@ int soft_offline_page(struct page *page, int flags) } else if (ret == 0) { /* for free pages */ if (PageHuge(page)) { set_page_hwpoison_huge_page(hpage); - dequeue_hwpoisoned_huge_page(hpage); - atomic_long_add(1 << compound_order(hpage), + if (!dequeue_hwpoisoned_huge_page(hpage)) + atomic_long_add(1 << compound_order(hpage), &num_poisoned_pages); } else { - SetPageHWPoison(page); - atomic_long_inc(&num_poisoned_pages); + if (!TestSetPageHWPoison(page)) + atomic_long_inc(&num_poisoned_pages); } } unset_migratetype_isolate(page, MIGRATE_MOVABLE); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index ede2629..7477432 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2518,7 +2518,7 @@ static void __init check_numabalancing_enable(void) if (numabalancing_override) set_numabalancing_state(numabalancing_override == 1); - if (nr_node_ids > 1 && !numabalancing_override) { + if (num_online_nodes() > 1 && !numabalancing_override) { pr_info("%s automatic NUMA balancing. " "Configure with numa_balancing= or the " "kernel.numa_balancing sysctl", diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 5daf556..eb59f7e 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -580,7 +580,7 @@ static long long pos_ratio_polynom(unsigned long setpoint, long x; x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, - limit - setpoint + 1); + (limit - setpoint) | 1); pos_ratio = x; pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; @@ -807,7 +807,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, * scale global setpoint to bdi's: * bdi_setpoint = setpoint * bdi_thresh / thresh */ - x = div_u64((u64)bdi_thresh << 16, thresh + 1); + x = div_u64((u64)bdi_thresh << 16, thresh | 1); bdi_setpoint = setpoint * (u64)x >> 16; /* * Use span=(8*write_bw) in single bdi case as indicated by @@ -822,7 +822,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, if (bdi_dirty < x_intercept - span / 4) { pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty), - x_intercept - bdi_setpoint + 1); + (x_intercept - bdi_setpoint) | 1); } else pos_ratio /= 4; diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 755a42c..303c908 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -101,7 +101,8 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype) buddy_idx = __find_buddy_index(page_idx, order); buddy = page + (buddy_idx - page_idx); - if (!is_migrate_isolate_page(buddy)) { + if (pfn_valid_within(page_to_pfn(buddy)) && + !is_migrate_isolate_page(buddy)) { __isolate_free_page(page, order); kernel_map_pages(page, (1 << order), 1); set_page_refcounted(page); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 476709b..4663c3d 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -1557,7 +1557,8 @@ static int hci_dev_do_close(struct hci_dev *hdev) { BT_DBG("%s %p", hdev->name, hdev); - if (!hci_dev_test_flag(hdev, HCI_UNREGISTER)) { + if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && + test_bit(HCI_UP, &hdev->flags)) { /* Execute vendor specific shutdown routine */ if (hdev->shutdown) hdev->shutdown(hdev); diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 4096089..e29ad70 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -170,7 +170,7 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb, struct br_port_msg *bpm; struct nlattr *nest, *nest2; - nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI); + nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0); if (!nlh) return -EMSGSIZE; diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 0e4ddb8..4b5c236 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -394,7 +394,7 @@ errout: * Dump information about all ports, in response to GETLINK */ int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev, u32 filter_mask) + struct net_device *dev, u32 filter_mask, int nlflags) { struct net_bridge_port *port = br_port_get_rtnl(dev); @@ -402,7 +402,7 @@ int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) return 0; - return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, NLM_F_MULTI, + return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags, filter_mask, dev); } diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 6ca0251..3362c29 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -828,7 +828,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port); int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags); int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags); int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, - u32 filter_mask); + u32 filter_mask, int nlflags); #ifdef CONFIG_SYSFS /* br_sysfs_if.c */ diff --git a/net/core/dev.c b/net/core/dev.c index 1796cef5..2c1c67f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3079,7 +3079,7 @@ static struct rps_dev_flow * set_rps_cpu(struct net_device *dev, struct sk_buff *skb, struct rps_dev_flow *rflow, u16 next_cpu) { - if (next_cpu != RPS_NO_CPU) { + if (next_cpu < nr_cpu_ids) { #ifdef CONFIG_RFS_ACCEL struct netdev_rx_queue *rxqueue; struct rps_dev_flow_table *flow_table; @@ -3184,7 +3184,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, * If the desired CPU (where last recvmsg was done) is * different from current CPU (one in the rx-queue flow * table entry), switch if one of the following holds: - * - Current CPU is unset (equal to RPS_NO_CPU). + * - Current CPU is unset (>= nr_cpu_ids). * - Current CPU is offline. * - The current CPU's queue tail has advanced beyond the * last packet that was enqueued using this table entry. @@ -3192,14 +3192,14 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, * have been dequeued, thus preserving in order delivery. */ if (unlikely(tcpu != next_cpu) && - (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || + (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || ((int)(per_cpu(softnet_data, tcpu).input_queue_head - rflow->last_qtail)) >= 0)) { tcpu = next_cpu; rflow = set_rps_cpu(dev, skb, rflow, next_cpu); } - if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { + if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { *rflowp = rflow; cpu = tcpu; goto done; @@ -3240,14 +3240,14 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, struct rps_dev_flow_table *flow_table; struct rps_dev_flow *rflow; bool expire = true; - int cpu; + unsigned int cpu; rcu_read_lock(); flow_table = rcu_dereference(rxqueue->rps_flow_table); if (flow_table && flow_id <= flow_table->mask) { rflow = &flow_table->flows[flow_id]; cpu = ACCESS_ONCE(rflow->cpu); - if (rflow->filter == filter_id && cpu != RPS_NO_CPU && + if (rflow->filter == filter_id && cpu < nr_cpu_ids && ((int)(per_cpu(softnet_data, cpu).input_queue_head - rflow->last_qtail) < (int)(10 * flow_table->mask))) @@ -5209,7 +5209,7 @@ static int __netdev_upper_dev_link(struct net_device *dev, if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper)) return -EBUSY; - if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper)) + if (__netdev_find_adj(dev, upper_dev, &dev->adj_list.upper)) return -EEXIST; if (master && netdev_master_upper_dev_get(dev)) diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 78fc04a..572af00 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -601,7 +601,7 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh) } err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, - RTM_GETNSID, net, peer, -1); + RTM_NEWNSID, net, peer, -1); if (err < 0) goto err_out; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 358d52a..666e092 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2854,7 +2854,7 @@ static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask, int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u16 mode, - u32 flags, u32 mask) + u32 flags, u32 mask, int nlflags) { struct nlmsghdr *nlh; struct ifinfomsg *ifm; @@ -2863,7 +2863,7 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; struct net_device *br_dev = netdev_master_upper_dev_get(dev); - nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), NLM_F_MULTI); + nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags); if (nlh == NULL) return -EMSGSIZE; @@ -2969,7 +2969,8 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { if (idx >= cb->args[0] && br_dev->netdev_ops->ndo_bridge_getlink( - skb, portid, seq, dev, filter_mask) < 0) + skb, portid, seq, dev, filter_mask, + NLM_F_MULTI) < 0) break; idx++; } @@ -2977,7 +2978,8 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) if (ops->ndo_bridge_getlink) { if (idx >= cb->args[0] && ops->ndo_bridge_getlink(skb, portid, seq, dev, - filter_mask) < 0) + filter_mask, + NLM_F_MULTI) < 0) break; idx++; } @@ -3018,7 +3020,7 @@ static int rtnl_bridge_notify(struct net_device *dev) goto errout; } - err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0); + err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0); if (err < 0) goto errout; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index d1967da..3cfff2a 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -280,13 +280,14 @@ nodata: EXPORT_SYMBOL(__alloc_skb); /** - * build_skb - build a network buffer + * __build_skb - build a network buffer * @data: data buffer provided by caller - * @frag_size: size of fragment, or 0 if head was kmalloced + * @frag_size: size of data, or 0 if head was kmalloced * * Allocate a new &sk_buff. Caller provides space holding head and * skb_shared_info. @data must have been allocated by kmalloc() only if - * @frag_size is 0, otherwise data should come from the page allocator. + * @frag_size is 0, otherwise data should come from the page allocator + * or vmalloc() * The return is the new skb buffer. * On a failure the return is %NULL, and @data is not freed. * Notes : @@ -297,7 +298,7 @@ EXPORT_SYMBOL(__alloc_skb); * before giving packet to stack. * RX rings only contains data buffers, not full skbs. */ -struct sk_buff *build_skb(void *data, unsigned int frag_size) +struct sk_buff *__build_skb(void *data, unsigned int frag_size) { struct skb_shared_info *shinfo; struct sk_buff *skb; @@ -311,7 +312,6 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size) memset(skb, 0, offsetof(struct sk_buff, tail)); skb->truesize = SKB_TRUESIZE(size); - skb->head_frag = frag_size != 0; atomic_set(&skb->users, 1); skb->head = data; skb->data = data; @@ -328,6 +328,23 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size) return skb; } + +/* build_skb() is wrapper over __build_skb(), that specifically + * takes care of skb->head and skb->pfmemalloc + * This means that if @frag_size is not zero, then @data must be backed + * by a page fragment, not kmalloc() or vmalloc() + */ +struct sk_buff *build_skb(void *data, unsigned int frag_size) +{ + struct sk_buff *skb = __build_skb(data, frag_size); + + if (skb && frag_size) { + skb->head_frag = 1; + if (virt_to_head_page(data)->pfmemalloc) + skb->pfmemalloc = 1; + } + return skb; +} EXPORT_SYMBOL(build_skb); struct netdev_alloc_cache { @@ -348,7 +365,8 @@ static struct page *__page_frag_refill(struct netdev_alloc_cache *nc, gfp_t gfp = gfp_mask; if (order) { - gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY; + gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | + __GFP_NOMEMALLOC; page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); nc->frag.size = PAGE_SIZE << (page ? order : 0); } diff --git a/net/core/sock.c b/net/core/sock.c index e891bcf..292f422 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1474,8 +1474,8 @@ void sk_release_kernel(struct sock *sk) return; sock_hold(sk); - sock_net_set(sk, get_net(&init_net)); sock_release(sk->sk_socket); + sock_net_set(sk, get_net(&init_net)); sock_put(sk); } EXPORT_SYMBOL(sk_release_kernel); diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 2b4f21d..ccf4c56 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -453,7 +453,8 @@ static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) iph->saddr, iph->daddr); if (req) { nsk = dccp_check_req(sk, skb, req); - reqsk_put(req); + if (!nsk) + reqsk_put(req); return nsk; } nsk = inet_lookup_established(sock_net(sk), &dccp_hashinfo, diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 9d05510..5165571 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -301,7 +301,8 @@ static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) &iph->daddr, inet6_iif(skb)); if (req) { nsk = dccp_check_req(sk, skb, req); - reqsk_put(req); + if (!nsk) + reqsk_put(req); return nsk; } nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo, diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 5f56666..30addee 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -186,8 +186,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, if (child == NULL) goto listen_overflow; - inet_csk_reqsk_queue_unlink(sk, req); - inet_csk_reqsk_queue_removed(sk, req); + inet_csk_reqsk_queue_drop(sk, req); inet_csk_reqsk_queue_add(sk, req, child); out: return child; diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 079a224..e6f6cc3 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -633,7 +633,7 @@ static int dsa_of_probe(struct device *dev) if (cd->sw_addr > PHY_MAX_ADDR) continue; - if (!of_property_read_u32(np, "eeprom-length", &eeprom_len)) + if (!of_property_read_u32(child, "eeprom-length", &eeprom_len)) cd->eeprom_len = eeprom_len; for_each_available_child_of_node(child, port) { diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile index 05dab29..4adfd4d 100644 --- a/net/ieee802154/Makefile +++ b/net/ieee802154/Makefile @@ -3,7 +3,9 @@ obj-$(CONFIG_IEEE802154_SOCKET) += ieee802154_socket.o obj-y += 6lowpan/ ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o core.o \ - header_ops.o sysfs.o nl802154.o + header_ops.o sysfs.o nl802154.o trace.o ieee802154_socket-y := socket.o +CFLAGS_trace.o := -I$(src) + ccflags-y += -D__CHECK_ENDIAN__ diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c index 1b9d25f6..346c666 100644 --- a/net/ieee802154/nl-phy.c +++ b/net/ieee802154/nl-phy.c @@ -175,6 +175,7 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info) int rc = -ENOBUFS; struct net_device *dev; int type = __IEEE802154_DEV_INVALID; + unsigned char name_assign_type; pr_debug("%s\n", __func__); @@ -190,8 +191,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info) if (devname[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1] != '\0') return -EINVAL; /* phy name should be null-terminated */ + name_assign_type = NET_NAME_USER; } else { devname = "wpan%d"; + name_assign_type = NET_NAME_ENUM; } if (strlen(devname) >= IFNAMSIZ) @@ -221,7 +224,7 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info) } dev = rdev_add_virtual_intf_deprecated(wpan_phy_to_rdev(phy), devname, - type); + name_assign_type, type); if (IS_ERR(dev)) { rc = PTR_ERR(dev); goto nla_put_failure; diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index a4daf91..f3c12f6 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -589,7 +589,7 @@ static int nl802154_new_interface(struct sk_buff *skb, struct genl_info *info) return rdev_add_virtual_intf(rdev, nla_data(info->attrs[NL802154_ATTR_IFNAME]), - type, extended_addr); + NET_NAME_USER, type, extended_addr); } static int nl802154_del_interface(struct sk_buff *skb, struct genl_info *info) diff --git a/net/ieee802154/rdev-ops.h b/net/ieee802154/rdev-ops.h index 7c46732..7b5a9dd 100644 --- a/net/ieee802154/rdev-ops.h +++ b/net/ieee802154/rdev-ops.h @@ -4,13 +4,16 @@ #include <net/cfg802154.h> #include "core.h" +#include "trace.h" static inline struct net_device * rdev_add_virtual_intf_deprecated(struct cfg802154_registered_device *rdev, - const char *name, int type) + const char *name, + unsigned char name_assign_type, + int type) { return rdev->ops->add_virtual_intf_deprecated(&rdev->wpan_phy, name, - type); + name_assign_type, type); } static inline void @@ -22,75 +25,131 @@ rdev_del_virtual_intf_deprecated(struct cfg802154_registered_device *rdev, static inline int rdev_add_virtual_intf(struct cfg802154_registered_device *rdev, char *name, + unsigned char name_assign_type, enum nl802154_iftype type, __le64 extended_addr) { - return rdev->ops->add_virtual_intf(&rdev->wpan_phy, name, type, + int ret; + + trace_802154_rdev_add_virtual_intf(&rdev->wpan_phy, name, type, extended_addr); + ret = rdev->ops->add_virtual_intf(&rdev->wpan_phy, name, + name_assign_type, type, + extended_addr); + trace_802154_rdev_return_int(&rdev->wpan_phy, ret); + return ret; } static inline int rdev_del_virtual_intf(struct cfg802154_registered_device *rdev, struct wpan_dev *wpan_dev) { - return rdev->ops->del_virtual_intf(&rdev->wpan_phy, wpan_dev); + int ret; + + trace_802154_rdev_del_virtual_intf(&rdev->wpan_phy, wpan_dev); + ret = rdev->ops->del_virtual_intf(&rdev->wpan_phy, wpan_dev); + trace_802154_rdev_return_int(&rdev->wpan_phy, ret); + return ret; } static inline int rdev_set_channel(struct cfg802154_registered_device *rdev, u8 page, u8 channel) { - return rdev->ops->set_channel(&rdev->wpan_phy, page, channel); + int ret; + + trace_802154_rdev_set_channel(&rdev->wpan_phy, page, channel); + ret = rdev->ops->set_channel(&rdev->wpan_phy, page, channel); + trace_802154_rdev_return_int(&rdev->wpan_phy, ret); + return ret; } static inline int rdev_set_cca_mode(struct cfg802154_registered_device *rdev, const struct wpan_phy_cca *cca) { - return rdev->ops->set_cca_mode(&rdev->wpan_phy, cca); + int ret; + + trace_802154_rdev_set_cca_mode(&rdev->wpan_phy, cca); + ret = rdev->ops->set_cca_mode(&rdev->wpan_phy, cca); + trace_802154_rdev_return_int(&rdev->wpan_phy, ret); + return ret; } static inline int rdev_set_pan_id(struct cfg802154_registered_device *rdev, struct wpan_dev *wpan_dev, __le16 pan_id) { - return rdev->ops->set_pan_id(&rdev->wpan_phy, wpan_dev, pan_id); + int ret; + + trace_802154_rdev_set_pan_id(&rdev->wpan_phy, wpan_dev, pan_id); + ret = rdev->ops->set_pan_id(&rdev->wpan_phy, wpan_dev, pan_id); + trace_802154_rdev_return_int(&rdev->wpan_phy, ret); + return ret; } static inline int rdev_set_short_addr(struct cfg802154_registered_device *rdev, struct wpan_dev *wpan_dev, __le16 short_addr) { - return rdev->ops->set_short_addr(&rdev->wpan_phy, wpan_dev, short_addr); + int ret; + + trace_802154_rdev_set_short_addr(&rdev->wpan_phy, wpan_dev, short_addr); + ret = rdev->ops->set_short_addr(&rdev->wpan_phy, wpan_dev, short_addr); + trace_802154_rdev_return_int(&rdev->wpan_phy, ret); + return ret; } static inline int rdev_set_backoff_exponent(struct cfg802154_registered_device *rdev, struct wpan_dev *wpan_dev, u8 min_be, u8 max_be) { - return rdev->ops->set_backoff_exponent(&rdev->wpan_phy, wpan_dev, + int ret; + + trace_802154_rdev_set_backoff_exponent(&rdev->wpan_phy, wpan_dev, min_be, max_be); + ret = rdev->ops->set_backoff_exponent(&rdev->wpan_phy, wpan_dev, + min_be, max_be); + trace_802154_rdev_return_int(&rdev->wpan_phy, ret); + return ret; } static inline int rdev_set_max_csma_backoffs(struct cfg802154_registered_device *rdev, struct wpan_dev *wpan_dev, u8 max_csma_backoffs) { - return rdev->ops->set_max_csma_backoffs(&rdev->wpan_phy, wpan_dev, - max_csma_backoffs); + int ret; + + trace_802154_rdev_set_csma_backoffs(&rdev->wpan_phy, wpan_dev, + max_csma_backoffs); + ret = rdev->ops->set_max_csma_backoffs(&rdev->wpan_phy, wpan_dev, + max_csma_backoffs); + trace_802154_rdev_return_int(&rdev->wpan_phy, ret); + return ret; } static inline int rdev_set_max_frame_retries(struct cfg802154_registered_device *rdev, struct wpan_dev *wpan_dev, s8 max_frame_retries) { - return rdev->ops->set_max_frame_retries(&rdev->wpan_phy, wpan_dev, + int ret; + + trace_802154_rdev_set_max_frame_retries(&rdev->wpan_phy, wpan_dev, max_frame_retries); + ret = rdev->ops->set_max_frame_retries(&rdev->wpan_phy, wpan_dev, + max_frame_retries); + trace_802154_rdev_return_int(&rdev->wpan_phy, ret); + return ret; } static inline int rdev_set_lbt_mode(struct cfg802154_registered_device *rdev, struct wpan_dev *wpan_dev, bool mode) { - return rdev->ops->set_lbt_mode(&rdev->wpan_phy, wpan_dev, mode); + int ret; + + trace_802154_rdev_set_lbt_mode(&rdev->wpan_phy, wpan_dev, mode); + ret = rdev->ops->set_lbt_mode(&rdev->wpan_phy, wpan_dev, mode); + trace_802154_rdev_return_int(&rdev->wpan_phy, ret); + return ret; } #endif /* __CFG802154_RDEV_OPS */ diff --git a/net/ieee802154/trace.c b/net/ieee802154/trace.c new file mode 100644 index 0000000..95f997f --- /dev/null +++ b/net/ieee802154/trace.c @@ -0,0 +1,7 @@ +#include <linux/module.h> + +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "trace.h" + +#endif diff --git a/net/ieee802154/trace.h b/net/ieee802154/trace.h new file mode 100644 index 0000000..5ac25eb --- /dev/null +++ b/net/ieee802154/trace.h @@ -0,0 +1,247 @@ +/* Based on net/wireless/tracing.h */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM cfg802154 + +#if !defined(__RDEV_CFG802154_OPS_TRACE) || defined(TRACE_HEADER_MULTI_READ) +#define __RDEV_CFG802154_OPS_TRACE + +#include <linux/tracepoint.h> + +#include <net/cfg802154.h> + +#define MAXNAME 32 +#define WPAN_PHY_ENTRY __array(char, wpan_phy_name, MAXNAME) +#define WPAN_PHY_ASSIGN strlcpy(__entry->wpan_phy_name, \ + wpan_phy_name(wpan_phy), \ + MAXNAME) +#define WPAN_PHY_PR_FMT "%s" +#define WPAN_PHY_PR_ARG __entry->wpan_phy_name + +#define WPAN_DEV_ENTRY __field(u32, identifier) +#define WPAN_DEV_ASSIGN (__entry->identifier) = (!IS_ERR_OR_NULL(wpan_dev) \ + ? wpan_dev->identifier : 0) +#define WPAN_DEV_PR_FMT "wpan_dev(%u)" +#define WPAN_DEV_PR_ARG (__entry->identifier) + +#define WPAN_CCA_ENTRY __field(enum nl802154_cca_modes, cca_mode) \ + __field(enum nl802154_cca_opts, cca_opt) +#define WPAN_CCA_ASSIGN \ + do { \ + (__entry->cca_mode) = cca->mode; \ + (__entry->cca_opt) = cca->opt; \ + } while (0) +#define WPAN_CCA_PR_FMT "cca_mode: %d, cca_opt: %d" +#define WPAN_CCA_PR_ARG __entry->cca_mode, __entry->cca_opt + +#define BOOL_TO_STR(bo) (bo) ? "true" : "false" + +/************************************************************* + * rdev->ops traces * + *************************************************************/ + +TRACE_EVENT(802154_rdev_add_virtual_intf, + TP_PROTO(struct wpan_phy *wpan_phy, char *name, + enum nl802154_iftype type, __le64 extended_addr), + TP_ARGS(wpan_phy, name, type, extended_addr), + TP_STRUCT__entry( + WPAN_PHY_ENTRY + __string(vir_intf_name, name ? name : "<noname>") + __field(enum nl802154_iftype, type) + __field(__le64, extended_addr) + ), + TP_fast_assign( + WPAN_PHY_ASSIGN; + __assign_str(vir_intf_name, name ? name : "<noname>"); + __entry->type = type; + __entry->extended_addr = extended_addr; + ), + TP_printk(WPAN_PHY_PR_FMT ", virtual intf name: %s, type: %d, ea %llx", + WPAN_PHY_PR_ARG, __get_str(vir_intf_name), __entry->type, + __le64_to_cpu(__entry->extended_addr)) +); + +TRACE_EVENT(802154_rdev_del_virtual_intf, + TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev), + TP_ARGS(wpan_phy, wpan_dev), + TP_STRUCT__entry( + WPAN_PHY_ENTRY + WPAN_DEV_ENTRY + ), + TP_fast_assign( + WPAN_PHY_ASSIGN; + WPAN_DEV_ASSIGN; + ), + TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT, WPAN_PHY_PR_ARG, + WPAN_DEV_PR_ARG) +); + +TRACE_EVENT(802154_rdev_set_channel, + TP_PROTO(struct wpan_phy *wpan_phy, u8 page, u8 channel), + TP_ARGS(wpan_phy, page, channel), + TP_STRUCT__entry( + WPAN_PHY_ENTRY + __field(u8, page) + __field(u8, channel) + ), + TP_fast_assign( + WPAN_PHY_ASSIGN; + __entry->page = page; + __entry->channel = channel; + ), + TP_printk(WPAN_PHY_PR_FMT ", page: %d, channel: %d", WPAN_PHY_PR_ARG, + __entry->page, __entry->channel) +); + +TRACE_EVENT(802154_rdev_set_cca_mode, + TP_PROTO(struct wpan_phy *wpan_phy, const struct wpan_phy_cca *cca), + TP_ARGS(wpan_phy, cca), + TP_STRUCT__entry( + WPAN_PHY_ENTRY + WPAN_CCA_ENTRY + ), + TP_fast_assign( + WPAN_PHY_ASSIGN; + WPAN_CCA_ASSIGN; + ), + TP_printk(WPAN_PHY_PR_FMT ", " WPAN_CCA_PR_FMT, WPAN_PHY_PR_ARG, + WPAN_CCA_PR_ARG) +); + +DECLARE_EVENT_CLASS(802154_le16_template, + TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, + __le16 le16arg), + TP_ARGS(wpan_phy, wpan_dev, le16arg), + TP_STRUCT__entry( + WPAN_PHY_ENTRY + WPAN_DEV_ENTRY + __field(__le16, le16arg) + ), + TP_fast_assign( + WPAN_PHY_ASSIGN; + WPAN_DEV_ASSIGN; + __entry->le16arg = le16arg; + ), + TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT ", pan id: 0x%04x", + WPAN_PHY_PR_ARG, WPAN_DEV_PR_ARG, + __le16_to_cpu(__entry->le16arg)) +); + +DEFINE_EVENT(802154_le16_template, 802154_rdev_set_pan_id, + TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, + __le16 le16arg), + TP_ARGS(wpan_phy, wpan_dev, le16arg) +); + +DEFINE_EVENT_PRINT(802154_le16_template, 802154_rdev_set_short_addr, + TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, + __le16 le16arg), + TP_ARGS(wpan_phy, wpan_dev, le16arg), + TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT ", sa: 0x%04x", + WPAN_PHY_PR_ARG, WPAN_DEV_PR_ARG, + __le16_to_cpu(__entry->le16arg)) +); + +TRACE_EVENT(802154_rdev_set_backoff_exponent, + TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, + u8 min_be, u8 max_be), + TP_ARGS(wpan_phy, wpan_dev, min_be, max_be), + TP_STRUCT__entry( + WPAN_PHY_ENTRY + WPAN_DEV_ENTRY + __field(u8, min_be) + __field(u8, max_be) + ), + TP_fast_assign( + WPAN_PHY_ASSIGN; + WPAN_DEV_ASSIGN; + __entry->min_be = min_be; + __entry->max_be = max_be; + ), + + TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT + ", min be: %d, max_be: %d", WPAN_PHY_PR_ARG, + WPAN_DEV_PR_ARG, __entry->min_be, __entry->max_be) +); + +TRACE_EVENT(802154_rdev_set_csma_backoffs, + TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, + u8 max_csma_backoffs), + TP_ARGS(wpan_phy, wpan_dev, max_csma_backoffs), + TP_STRUCT__entry( + WPAN_PHY_ENTRY + WPAN_DEV_ENTRY + __field(u8, max_csma_backoffs) + ), + TP_fast_assign( + WPAN_PHY_ASSIGN; + WPAN_DEV_ASSIGN; + __entry->max_csma_backoffs = max_csma_backoffs; + ), + + TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT + ", max csma backoffs: %d", WPAN_PHY_PR_ARG, + WPAN_DEV_PR_ARG, __entry->max_csma_backoffs) +); + +TRACE_EVENT(802154_rdev_set_max_frame_retries, + TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, + s8 max_frame_retries), + TP_ARGS(wpan_phy, wpan_dev, max_frame_retries), + TP_STRUCT__entry( + WPAN_PHY_ENTRY + WPAN_DEV_ENTRY + __field(s8, max_frame_retries) + ), + TP_fast_assign( + WPAN_PHY_ASSIGN; + WPAN_DEV_ASSIGN; + __entry->max_frame_retries = max_frame_retries; + ), + + TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT + ", max frame retries: %d", WPAN_PHY_PR_ARG, + WPAN_DEV_PR_ARG, __entry->max_frame_retries) +); + +TRACE_EVENT(802154_rdev_set_lbt_mode, + TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, + bool mode), + TP_ARGS(wpan_phy, wpan_dev, mode), + TP_STRUCT__entry( + WPAN_PHY_ENTRY + WPAN_DEV_ENTRY + __field(bool, mode) + ), + TP_fast_assign( + WPAN_PHY_ASSIGN; + WPAN_DEV_ASSIGN; + __entry->mode = mode; + ), + TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT + ", lbt mode: %s", WPAN_PHY_PR_ARG, + WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->mode)) +); + +TRACE_EVENT(802154_rdev_return_int, + TP_PROTO(struct wpan_phy *wpan_phy, int ret), + TP_ARGS(wpan_phy, ret), + TP_STRUCT__entry( + WPAN_PHY_ENTRY + __field(int, ret) + ), + TP_fast_assign( + WPAN_PHY_ASSIGN; + __entry->ret = ret; + ), + TP_printk(WPAN_PHY_PR_FMT ", returned: %d", WPAN_PHY_PR_ARG, + __entry->ret) +); + +#endif /* !__RDEV_CFG802154_OPS_TRACE || TRACE_HEADER_MULTI_READ */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace +#include <trace/define_trace.h> diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 5c3dd62..8976ca4 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -564,6 +564,40 @@ int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req) } EXPORT_SYMBOL(inet_rtx_syn_ack); +/* return true if req was found in the syn_table[] */ +static bool reqsk_queue_unlink(struct request_sock_queue *queue, + struct request_sock *req) +{ + struct listen_sock *lopt = queue->listen_opt; + struct request_sock **prev; + bool found = false; + + spin_lock(&queue->syn_wait_lock); + + for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL; + prev = &(*prev)->dl_next) { + if (*prev == req) { + *prev = req->dl_next; + found = true; + break; + } + } + + spin_unlock(&queue->syn_wait_lock); + if (del_timer(&req->rsk_timer)) + reqsk_put(req); + return found; +} + +void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req) +{ + if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) { + reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); + reqsk_put(req); + } +} +EXPORT_SYMBOL(inet_csk_reqsk_queue_drop); + static void reqsk_timer_handler(unsigned long data) { struct request_sock *req = (struct request_sock *)data; diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index bb77ebd..4d32262 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -224,14 +224,16 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, handler->idiag_get_info(sk, r, info); if (sk->sk_state < TCP_TIME_WAIT) { - int err = 0; + union tcp_cc_info info; + size_t sz = 0; + int attr; rcu_read_lock(); ca_ops = READ_ONCE(icsk->icsk_ca_ops); if (ca_ops && ca_ops->get_info) - err = ca_ops->get_info(sk, ext, skb); + sz = ca_ops->get_info(sk, ext, &attr, &info); rcu_read_unlock(); - if (err < 0) + if (sz && nla_put(skb, attr, sz, &info) < 0) goto errout; } diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index a93f260..05ff44b 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -158,6 +158,7 @@ void ping_unhash(struct sock *sk) if (sk_hashed(sk)) { write_lock_bh(&ping_table.lock); hlist_nulls_del(&sk->sk_nulls_node); + sk_nulls_node_init(&sk->sk_nulls_node); sock_put(sk); isk->inet_num = 0; isk->inet_sport = 0; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index a78540f..bff62fc 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -962,10 +962,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) if (dst_metric_locked(dst, RTAX_MTU)) return; - if (dst->dev->mtu < mtu) - return; - - if (rt->rt_pmtu && rt->rt_pmtu < mtu) + if (ipv4_mtu(dst) < mtu) return; if (mtu < ip_rt_min_pmtu) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 8c5cd9e..46efa03 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -252,6 +252,7 @@ #include <linux/types.h> #include <linux/fcntl.h> #include <linux/poll.h> +#include <linux/inet_diag.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/skbuff.h> @@ -2592,7 +2593,7 @@ EXPORT_SYMBOL(compat_tcp_setsockopt); #endif /* Return information about state of tcp endpoint in API format. */ -void tcp_get_info(const struct sock *sk, struct tcp_info *info) +void tcp_get_info(struct sock *sk, struct tcp_info *info) { const struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); @@ -2663,6 +2664,11 @@ void tcp_get_info(const struct sock *sk, struct tcp_info *info) rate = READ_ONCE(sk->sk_max_pacing_rate); info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL; + + spin_lock_bh(&sk->sk_lock.slock); + info->tcpi_bytes_acked = tp->bytes_acked; + info->tcpi_bytes_received = tp->bytes_received; + spin_unlock_bh(&sk->sk_lock.slock); } EXPORT_SYMBOL_GPL(tcp_get_info); @@ -2734,6 +2740,26 @@ static int do_tcp_getsockopt(struct sock *sk, int level, return -EFAULT; return 0; } + case TCP_CC_INFO: { + const struct tcp_congestion_ops *ca_ops; + union tcp_cc_info info; + size_t sz = 0; + int attr; + + if (get_user(len, optlen)) + return -EFAULT; + + ca_ops = icsk->icsk_ca_ops; + if (ca_ops && ca_ops->get_info) + sz = ca_ops->get_info(sk, ~0U, &attr, &info); + + len = min_t(unsigned int, len, sz); + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &info, len)) + return -EFAULT; + return 0; + } case TCP_QUICKACK: val = !icsk->icsk_ack.pingpong; break; diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index 4376016..4c41c12 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c @@ -277,7 +277,8 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) } } -static int dctcp_get_info(struct sock *sk, u32 ext, struct sk_buff *skb) +static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr, + union tcp_cc_info *info) { const struct dctcp *ca = inet_csk_ca(sk); @@ -286,18 +287,17 @@ static int dctcp_get_info(struct sock *sk, u32 ext, struct sk_buff *skb) */ if (ext & (1 << (INET_DIAG_DCTCPINFO - 1)) || ext & (1 << (INET_DIAG_VEGASINFO - 1))) { - struct tcp_dctcp_info info; - - memset(&info, 0, sizeof(info)); + memset(info, 0, sizeof(struct tcp_dctcp_info)); if (inet_csk(sk)->icsk_ca_ops != &dctcp_reno) { - info.dctcp_enabled = 1; - info.dctcp_ce_state = (u16) ca->ce_state; - info.dctcp_alpha = ca->dctcp_alpha; - info.dctcp_ab_ecn = ca->acked_bytes_ecn; - info.dctcp_ab_tot = ca->acked_bytes_total; + info->dctcp.dctcp_enabled = 1; + info->dctcp.dctcp_ce_state = (u16) ca->ce_state; + info->dctcp.dctcp_alpha = ca->dctcp_alpha; + info->dctcp.dctcp_ab_ecn = ca->acked_bytes_ecn; + info->dctcp.dctcp_ab_tot = ca->acked_bytes_total; } - return nla_put(skb, INET_DIAG_DCTCPINFO, sizeof(info), &info); + *attr = INET_DIAG_DCTCPINFO; + return sizeof(*info); } return 0; } diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index e3d87ac..3c673d5 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -206,6 +206,7 @@ static bool tcp_fastopen_create_child(struct sock *sk, skb_set_owner_r(skb2, child); __skb_queue_tail(&child->sk_receive_queue, skb2); tp->syn_data_acked = 1; + tp->bytes_received = end_seq - TCP_SKB_CB(skb)->seq - 1; } else { end_seq = TCP_SKB_CB(skb)->seq + 1; } diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c index 67476f0..f71002e 100644 --- a/net/ipv4/tcp_illinois.c +++ b/net/ipv4/tcp_illinois.c @@ -300,24 +300,25 @@ static u32 tcp_illinois_ssthresh(struct sock *sk) } /* Extract info for Tcp socket info provided via netlink. */ -static int tcp_illinois_info(struct sock *sk, u32 ext, struct sk_buff *skb) +static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr, + union tcp_cc_info *info) { const struct illinois *ca = inet_csk_ca(sk); if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { - struct tcpvegas_info info = { - .tcpv_enabled = 1, - .tcpv_rttcnt = ca->cnt_rtt, - .tcpv_minrtt = ca->base_rtt, - }; + info->vegas.tcpv_enabled = 1; + info->vegas.tcpv_rttcnt = ca->cnt_rtt; + info->vegas.tcpv_minrtt = ca->base_rtt; + info->vegas.tcpv_rtt = 0; - if (info.tcpv_rttcnt > 0) { + if (info->vegas.tcpv_rttcnt > 0) { u64 t = ca->sum_rtt; - do_div(t, info.tcpv_rttcnt); - info.tcpv_rtt = t; + do_div(t, info->vegas.tcpv_rttcnt); + info->vegas.tcpv_rtt = t; } - return nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); + *attr = INET_DIAG_VEGASINFO; + return sizeof(struct tcpvegas_info); } return 0; } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3a4d9b34..bc790ea 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1820,14 +1820,12 @@ advance_sp: for (j = 0; j < used_sacks; j++) tp->recv_sack_cache[i++] = sp[j]; - tcp_mark_lost_retrans(sk); - - tcp_verify_left_out(tp); - if ((state.reord < tp->fackets_out) && ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker)) tcp_update_reordering(sk, tp->fackets_out - state.reord, 0); + tcp_mark_lost_retrans(sk); + tcp_verify_left_out(tp); out: #if FASTRETRANS_DEBUG > 0 @@ -3280,6 +3278,24 @@ static inline bool tcp_may_update_window(const struct tcp_sock *tp, (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd); } +/* If we update tp->snd_una, also update tp->bytes_acked */ +static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack) +{ + u32 delta = ack - tp->snd_una; + + tp->bytes_acked += delta; + tp->snd_una = ack; +} + +/* If we update tp->rcv_nxt, also update tp->bytes_received */ +static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq) +{ + u32 delta = seq - tp->rcv_nxt; + + tp->bytes_received += delta; + tp->rcv_nxt = seq; +} + /* Update our send window. * * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 @@ -3315,7 +3331,7 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 } } - tp->snd_una = ack; + tcp_snd_una_update(tp, ack); return flag; } @@ -3497,7 +3513,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) * Note, we use the fact that SND.UNA>=SND.WL2. */ tcp_update_wl(tp, ack_seq); - tp->snd_una = ack; + tcp_snd_una_update(tp, ack); flag |= FLAG_WIN_UPDATE; tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE); @@ -4236,7 +4252,7 @@ static void tcp_ofo_queue(struct sock *sk) tail = skb_peek_tail(&sk->sk_receive_queue); eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen); - tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; + tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); if (!eaten) __skb_queue_tail(&sk->sk_receive_queue, skb); if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) @@ -4404,7 +4420,7 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int __skb_pull(skb, hdrlen); eaten = (tail && tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0; - tcp_sk(sk)->rcv_nxt = TCP_SKB_CB(skb)->end_seq; + tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq); if (!eaten) { __skb_queue_tail(&sk->sk_receive_queue, skb); skb_set_owner_r(skb, sk); @@ -4497,7 +4513,7 @@ queue_and_out: eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); } - tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; + tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); if (skb->len) tcp_event_data_recv(sk, skb); if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) @@ -5245,7 +5261,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, tcp_rcv_rtt_measure_ts(sk, skb); __skb_pull(skb, tcp_header_len); - tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; + tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER); eaten = 1; } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 3571f2b..fc1c658 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1348,7 +1348,8 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr); if (req) { nsk = tcp_check_req(sk, skb, req, false); - reqsk_put(req); + if (!nsk) + reqsk_put(req); return nsk; } diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 63d6311..e5d7649 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -755,10 +755,11 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, if (!child) goto listen_overflow; - inet_csk_reqsk_queue_unlink(sk, req); - inet_csk_reqsk_queue_removed(sk, req); - + inet_csk_reqsk_queue_drop(sk, req); inet_csk_reqsk_queue_add(sk, req, child); + /* Warning: caller must not call reqsk_put(req); + * child stole last reference on it. + */ return child; listen_overflow: diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 8c8d7e0..a369e8a 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2812,39 +2812,65 @@ begin_fwd: } } -/* Send a fin. The caller locks the socket for us. This cannot be - * allowed to fail queueing a FIN frame under any circumstances. +/* We allow to exceed memory limits for FIN packets to expedite + * connection tear down and (memory) recovery. + * Otherwise tcp_send_fin() could be tempted to either delay FIN + * or even be forced to close flow without any FIN. + */ +static void sk_forced_wmem_schedule(struct sock *sk, int size) +{ + int amt, status; + + if (size <= sk->sk_forward_alloc) + return; + amt = sk_mem_pages(size); + sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; + sk_memory_allocated_add(sk, amt, &status); +} + +/* Send a FIN. The caller locks the socket for us. + * We should try to send a FIN packet really hard, but eventually give up. */ void tcp_send_fin(struct sock *sk) { + struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk); struct tcp_sock *tp = tcp_sk(sk); - struct sk_buff *skb = tcp_write_queue_tail(sk); - int mss_now; - /* Optimization, tack on the FIN if we have a queue of - * unsent frames. But be careful about outgoing SACKS - * and IP options. + /* Optimization, tack on the FIN if we have one skb in write queue and + * this skb was not yet sent, or we are under memory pressure. + * Note: in the latter case, FIN packet will be sent after a timeout, + * as TCP stack thinks it has already been transmitted. */ - mss_now = tcp_current_mss(sk); - - if (tcp_send_head(sk)) { - TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN; - TCP_SKB_CB(skb)->end_seq++; + if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) { +coalesce: + TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN; + TCP_SKB_CB(tskb)->end_seq++; tp->write_seq++; + if (!tcp_send_head(sk)) { + /* This means tskb was already sent. + * Pretend we included the FIN on previous transmit. + * We need to set tp->snd_nxt to the value it would have + * if FIN had been sent. This is because retransmit path + * does not change tp->snd_nxt. + */ + tp->snd_nxt++; + return; + } } else { - /* Socket is locked, keep trying until memory is available. */ - for (;;) { - skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); - if (skb) - break; - yield(); + skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation); + if (unlikely(!skb)) { + if (tskb) + goto coalesce; + return; } + skb_reserve(skb, MAX_TCP_HEADER); + sk_forced_wmem_schedule(sk, skb->truesize); /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ tcp_init_nondata_skb(skb, tp->write_seq, TCPHDR_ACK | TCPHDR_FIN); tcp_queue_skb(sk, skb); } - __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); + __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF); } /* We get here when a process closes a file descriptor (either due to diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index c71a1b8..a6cea1d 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c @@ -286,18 +286,19 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked) } /* Extract info for Tcp socket info provided via netlink. */ -int tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb) +size_t tcp_vegas_get_info(struct sock *sk, u32 ext, int *attr, + union tcp_cc_info *info) { const struct vegas *ca = inet_csk_ca(sk); + if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { - struct tcpvegas_info info = { - .tcpv_enabled = ca->doing_vegas_now, - .tcpv_rttcnt = ca->cntRTT, - .tcpv_rtt = ca->baseRTT, - .tcpv_minrtt = ca->minRTT, - }; - - return nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); + info->vegas.tcpv_enabled = ca->doing_vegas_now, + info->vegas.tcpv_rttcnt = ca->cntRTT, + info->vegas.tcpv_rtt = ca->baseRTT, + info->vegas.tcpv_minrtt = ca->minRTT, + + *attr = INET_DIAG_VEGASINFO; + return sizeof(struct tcpvegas_info); } return 0; } diff --git a/net/ipv4/tcp_vegas.h b/net/ipv4/tcp_vegas.h index e8a6b33..ef9da53 100644 --- a/net/ipv4/tcp_vegas.h +++ b/net/ipv4/tcp_vegas.h @@ -19,6 +19,7 @@ void tcp_vegas_init(struct sock *sk); void tcp_vegas_state(struct sock *sk, u8 ca_state); void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us); void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event); -int tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb); +size_t tcp_vegas_get_info(struct sock *sk, u32 ext, int *attr, + union tcp_cc_info *info); #endif /* __TCP_VEGAS_H */ diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c index b3c57cc..c10732e 100644 --- a/net/ipv4/tcp_westwood.c +++ b/net/ipv4/tcp_westwood.c @@ -256,18 +256,19 @@ static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event) } /* Extract info for Tcp socket info provided via netlink. */ -static int tcp_westwood_info(struct sock *sk, u32 ext, struct sk_buff *skb) +static size_t tcp_westwood_info(struct sock *sk, u32 ext, int *attr, + union tcp_cc_info *info) { const struct westwood *ca = inet_csk_ca(sk); if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { - struct tcpvegas_info info = { - .tcpv_enabled = 1, - .tcpv_rtt = jiffies_to_usecs(ca->rtt), - .tcpv_minrtt = jiffies_to_usecs(ca->rtt_min), - }; + info->vegas.tcpv_enabled = 1; + info->vegas.tcpv_rttcnt = 0; + info->vegas.tcpv_rtt = jiffies_to_usecs(ca->rtt), + info->vegas.tcpv_minrtt = jiffies_to_usecs(ca->rtt_min), - return nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); + *attr = INET_DIAG_VEGASINFO; + return sizeof(struct tcpvegas_info); } return 0; } diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index b5e6cc1..a38d3ac 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -1246,7 +1246,6 @@ static void ip6gre_tunnel_setup(struct net_device *dev) static int ip6gre_tunnel_init(struct net_device *dev) { struct ip6_tnl *tunnel; - int i; tunnel = netdev_priv(dev); @@ -1260,16 +1259,10 @@ static int ip6gre_tunnel_init(struct net_device *dev) if (ipv6_addr_any(&tunnel->parms.raddr)) dev->header_ops = &ip6gre_header_ops; - dev->tstats = alloc_percpu(struct pcpu_sw_netstats); + dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; - for_each_possible_cpu(i) { - struct pcpu_sw_netstats *ip6gre_tunnel_stats; - ip6gre_tunnel_stats = per_cpu_ptr(dev->tstats, i); - u64_stats_init(&ip6gre_tunnel_stats->syncp); - } - return 0; } diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 7fde1f2..c217775 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -886,22 +886,45 @@ static int ip6_dst_lookup_tail(struct sock *sk, #endif int err; - if (!*dst) - *dst = ip6_route_output(net, sk, fl6); - - err = (*dst)->error; - if (err) - goto out_err_release; + /* The correct way to handle this would be to do + * ip6_route_get_saddr, and then ip6_route_output; however, + * the route-specific preferred source forces the + * ip6_route_output call _before_ ip6_route_get_saddr. + * + * In source specific routing (no src=any default route), + * ip6_route_output will fail given src=any saddr, though, so + * that's why we try it again later. + */ + if (ipv6_addr_any(&fl6->saddr) && (!*dst || !(*dst)->error)) { + struct rt6_info *rt; + bool had_dst = *dst != NULL; - if (ipv6_addr_any(&fl6->saddr)) { - struct rt6_info *rt = (struct rt6_info *) *dst; + if (!had_dst) + *dst = ip6_route_output(net, sk, fl6); + rt = (*dst)->error ? NULL : (struct rt6_info *)*dst; err = ip6_route_get_saddr(net, rt, &fl6->daddr, sk ? inet6_sk(sk)->srcprefs : 0, &fl6->saddr); if (err) goto out_err_release; + + /* If we had an erroneous initial result, pretend it + * never existed and let the SA-enabled version take + * over. + */ + if (!had_dst && (*dst)->error) { + dst_release(*dst); + *dst = NULL; + } } + if (!*dst) + *dst = ip6_route_output(net, sk, fl6); + + err = (*dst)->error; + if (err) + goto out_err_release; + #ifdef CONFIG_IPV6_OPTIMISTIC_DAD /* * Here if the dst entry we've looked up diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 5c48293..d358888 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2245,9 +2245,10 @@ int ip6_route_get_saddr(struct net *net, unsigned int prefs, struct in6_addr *saddr) { - struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt); + struct inet6_dev *idev = + rt ? ip6_dst_idev((struct dst_entry *)rt) : NULL; int err = 0; - if (rt->rt6i_prefsrc.plen) + if (rt && rt->rt6i_prefsrc.plen) *saddr = rt->rt6i_prefsrc.addr; else err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL, diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index ad51df8..b6575d6 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -946,7 +946,8 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb) &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb)); if (req) { nsk = tcp_check_req(sk, skb, req, false); - reqsk_put(req); + if (!nsk) + reqsk_put(req); return nsk; } nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo, diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index b4ac596..bab5c63 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -819,13 +819,15 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, * (because if we remove a STA after ops->remove_interface() * the driver will have removed the vif info already!) * - * This is relevant only in WDS mode, in all other modes we've - * already removed all stations when disconnecting or similar, - * so warn otherwise. + * In WDS mode a station must exist here and be flushed, for + * AP_VLANs stations may exist since there's nothing else that + * would have removed them, but in other modes there shouldn't + * be any stations. */ flushed = sta_info_flush(sdata); - WARN_ON_ONCE((sdata->vif.type != NL80211_IFTYPE_WDS && flushed > 0) || - (sdata->vif.type == NL80211_IFTYPE_WDS && flushed != 1)); + WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_AP_VLAN && + ((sdata->vif.type != NL80211_IFTYPE_WDS && flushed > 0) || + (sdata->vif.type == NL80211_IFTYPE_WDS && flushed != 1))); /* don't count this interface for promisc/allmulti while it is down */ if (sdata->flags & IEEE80211_SDATA_ALLMULTI) diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 12971b7..2880f2a 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -66,6 +66,7 @@ static const struct rhashtable_params sta_rht_params = { .nelem_hint = 3, /* start small */ + .automatic_shrinking = true, .head_offset = offsetof(struct sta_info, hash_node), .key_offset = offsetof(struct sta_info, sta.addr), .key_len = ETH_ALEN, @@ -157,8 +158,24 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, const u8 *addr) { struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + struct rhash_head *tmp; + const struct bucket_table *tbl; + + rcu_read_lock(); + tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash); - return rhashtable_lookup_fast(&local->sta_hash, addr, sta_rht_params); + for_each_sta_info(local, tbl, addr, sta, tmp) { + if (sta->sdata == sdata) { + rcu_read_unlock(); + /* this is safe as the caller must already hold + * another rcu read section or the mutex + */ + return sta; + } + } + rcu_read_unlock(); + return NULL; } /* diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c index 5d9f68c..70be9c7 100644 --- a/net/mac802154/cfg.c +++ b/net/mac802154/cfg.c @@ -22,13 +22,14 @@ static struct net_device * ieee802154_add_iface_deprecated(struct wpan_phy *wpan_phy, - const char *name, int type) + const char *name, + unsigned char name_assign_type, int type) { struct ieee802154_local *local = wpan_phy_priv(wpan_phy); struct net_device *dev; rtnl_lock(); - dev = ieee802154_if_add(local, name, type, + dev = ieee802154_if_add(local, name, name_assign_type, type, cpu_to_le64(0x0000000000000000ULL)); rtnl_unlock(); @@ -45,12 +46,14 @@ static void ieee802154_del_iface_deprecated(struct wpan_phy *wpan_phy, static int ieee802154_add_iface(struct wpan_phy *phy, const char *name, + unsigned char name_assign_type, enum nl802154_iftype type, __le64 extended_addr) { struct ieee802154_local *local = wpan_phy_priv(phy); struct net_device *err; - err = ieee802154_if_add(local, name, type, extended_addr); + err = ieee802154_if_add(local, name, name_assign_type, type, + extended_addr); return PTR_ERR_OR_ZERO(err); } diff --git a/net/mac802154/ieee802154_i.h b/net/mac802154/ieee802154_i.h index bebd70f..127ba18 100644 --- a/net/mac802154/ieee802154_i.h +++ b/net/mac802154/ieee802154_i.h @@ -182,7 +182,8 @@ void ieee802154_iface_exit(void); void ieee802154_if_remove(struct ieee802154_sub_if_data *sdata); struct net_device * ieee802154_if_add(struct ieee802154_local *local, const char *name, - enum nl802154_iftype type, __le64 extended_addr); + unsigned char name_assign_type, enum nl802154_iftype type, + __le64 extended_addr); void ieee802154_remove_interfaces(struct ieee802154_local *local); #endif /* __IEEE802154_I_H */ diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c index 38b56f9..91b75ab 100644 --- a/net/mac802154/iface.c +++ b/net/mac802154/iface.c @@ -522,7 +522,8 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata, struct net_device * ieee802154_if_add(struct ieee802154_local *local, const char *name, - enum nl802154_iftype type, __le64 extended_addr) + unsigned char name_assign_type, enum nl802154_iftype type, + __le64 extended_addr) { struct net_device *ndev = NULL; struct ieee802154_sub_if_data *sdata = NULL; @@ -531,7 +532,7 @@ ieee802154_if_add(struct ieee802154_local *local, const char *name, ASSERT_RTNL(); ndev = alloc_netdev(sizeof(*sdata) + local->hw.vif_data_size, name, - NET_NAME_UNKNOWN, ieee802154_if_setup); + name_assign_type, ieee802154_if_setup); if (!ndev) return ERR_PTR(-ENOMEM); diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c index dcf7395..5b2be12 100644 --- a/net/mac802154/llsec.c +++ b/net/mac802154/llsec.c @@ -134,7 +134,7 @@ llsec_key_alloc(const struct ieee802154_llsec_key *template) for (i = 0; i < ARRAY_SIZE(key->tfm); i++) { key->tfm[i] = crypto_alloc_aead("ccm(aes)", 0, CRYPTO_ALG_ASYNC); - if (!key->tfm[i]) + if (IS_ERR(key->tfm[i])) goto err_tfm; if (crypto_aead_setkey(key->tfm[i], template->key, IEEE802154_LLSEC_KEY_SIZE)) @@ -144,7 +144,7 @@ llsec_key_alloc(const struct ieee802154_llsec_key *template) } key->tfm0 = crypto_alloc_blkcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC); - if (!key->tfm0) + if (IS_ERR(key->tfm0)) goto err_tfm; if (crypto_blkcipher_setkey(key->tfm0, template->key, diff --git a/net/mac802154/main.c b/net/mac802154/main.c index 8500378..08cb32d 100644 --- a/net/mac802154/main.c +++ b/net/mac802154/main.c @@ -161,18 +161,21 @@ int ieee802154_register_hw(struct ieee802154_hw *hw) rtnl_lock(); - dev = ieee802154_if_add(local, "wpan%d", NL802154_IFTYPE_NODE, + dev = ieee802154_if_add(local, "wpan%d", NET_NAME_ENUM, + NL802154_IFTYPE_NODE, cpu_to_le64(0x0000000000000000ULL)); if (IS_ERR(dev)) { rtnl_unlock(); rc = PTR_ERR(dev); - goto out_wq; + goto out_phy; } rtnl_unlock(); return 0; +out_phy: + wpan_phy_unregister(local->phy); out_wq: destroy_workqueue(local->workqueue); out: diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index db8a2ea..7b3f732 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -53,6 +53,11 @@ static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index) return rt; } +static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev) +{ + return rcu_dereference_rtnl(dev->mpls_ptr); +} + static bool mpls_output_possible(const struct net_device *dev) { return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev); @@ -136,6 +141,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, struct mpls_route *rt; struct mpls_entry_decoded dec; struct net_device *out_dev; + struct mpls_dev *mdev; unsigned int hh_len; unsigned int new_header_size; unsigned int mtu; @@ -143,6 +149,10 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, /* Careful this entire function runs inside of an rcu critical section */ + mdev = mpls_dev_get(dev); + if (!mdev || !mdev->input_enabled) + goto drop; + if (skb->pkt_type != PACKET_HOST) goto drop; @@ -352,9 +362,9 @@ static int mpls_route_add(struct mpls_route_config *cfg) if (!dev) goto errout; - /* For now just support ethernet devices */ + /* Ensure this is a supported device */ err = -EINVAL; - if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK)) + if (!mpls_dev_get(dev)) goto errout; err = -EINVAL; @@ -428,10 +438,89 @@ errout: return err; } +#define MPLS_PERDEV_SYSCTL_OFFSET(field) \ + (&((struct mpls_dev *)0)->field) + +static const struct ctl_table mpls_dev_table[] = { + { + .procname = "input", + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + .data = MPLS_PERDEV_SYSCTL_OFFSET(input_enabled), + }, + { } +}; + +static int mpls_dev_sysctl_register(struct net_device *dev, + struct mpls_dev *mdev) +{ + char path[sizeof("net/mpls/conf/") + IFNAMSIZ]; + struct ctl_table *table; + int i; + + table = kmemdup(&mpls_dev_table, sizeof(mpls_dev_table), GFP_KERNEL); + if (!table) + goto out; + + /* Table data contains only offsets relative to the base of + * the mdev at this point, so make them absolute. + */ + for (i = 0; i < ARRAY_SIZE(mpls_dev_table); i++) + table[i].data = (char *)mdev + (uintptr_t)table[i].data; + + snprintf(path, sizeof(path), "net/mpls/conf/%s", dev->name); + + mdev->sysctl = register_net_sysctl(dev_net(dev), path, table); + if (!mdev->sysctl) + goto free; + + return 0; + +free: + kfree(table); +out: + return -ENOBUFS; +} + +static void mpls_dev_sysctl_unregister(struct mpls_dev *mdev) +{ + struct ctl_table *table; + + table = mdev->sysctl->ctl_table_arg; + unregister_net_sysctl_table(mdev->sysctl); + kfree(table); +} + +static struct mpls_dev *mpls_add_dev(struct net_device *dev) +{ + struct mpls_dev *mdev; + int err = -ENOMEM; + + ASSERT_RTNL(); + + mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); + if (!mdev) + return ERR_PTR(err); + + err = mpls_dev_sysctl_register(dev, mdev); + if (err) + goto free; + + rcu_assign_pointer(dev->mpls_ptr, mdev); + + return mdev; + +free: + kfree(mdev); + return ERR_PTR(err); +} + static void mpls_ifdown(struct net_device *dev) { struct mpls_route __rcu **platform_label; struct net *net = dev_net(dev); + struct mpls_dev *mdev; unsigned index; platform_label = rtnl_dereference(net->mpls.platform_label); @@ -443,14 +532,35 @@ static void mpls_ifdown(struct net_device *dev) continue; rt->rt_dev = NULL; } + + mdev = mpls_dev_get(dev); + if (!mdev) + return; + + mpls_dev_sysctl_unregister(mdev); + + RCU_INIT_POINTER(dev->mpls_ptr, NULL); + + kfree(mdev); } static int mpls_dev_notify(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct mpls_dev *mdev; switch(event) { + case NETDEV_REGISTER: + /* For now just support ethernet devices */ + if ((dev->type == ARPHRD_ETHER) || + (dev->type == ARPHRD_LOOPBACK)) { + mdev = mpls_add_dev(dev); + if (IS_ERR(mdev)) + return notifier_from_errno(PTR_ERR(mdev)); + } + break; + case NETDEV_UNREGISTER: mpls_ifdown(dev); break; @@ -536,6 +646,15 @@ int nla_get_labels(const struct nlattr *nla, if ((dec.bos != bos) || dec.ttl || dec.tc) return -EINVAL; + switch (dec.label) { + case MPLS_LABEL_IMPLNULL: + /* RFC3032: This is a label that an LSR may + * assign and distribute, but which never + * actually appears in the encapsulation. + */ + return -EINVAL; + } + label[i] = dec.label; } *labels = nla_labels; @@ -816,7 +935,7 @@ static int resize_platform_label_table(struct net *net, size_t limit) } /* In case the predefined labels need to be populated */ - if (limit > LABEL_IPV4_EXPLICIT_NULL) { + if (limit > MPLS_LABEL_IPV4NULL) { struct net_device *lo = net->loopback_dev; rt0 = mpls_rt_alloc(lo->addr_len); if (!rt0) @@ -826,7 +945,7 @@ static int resize_platform_label_table(struct net *net, size_t limit) rt0->rt_via_table = NEIGH_LINK_TABLE; memcpy(rt0->rt_via, lo->dev_addr, lo->addr_len); } - if (limit > LABEL_IPV6_EXPLICIT_NULL) { + if (limit > MPLS_LABEL_IPV6NULL) { struct net_device *lo = net->loopback_dev; rt2 = mpls_rt_alloc(lo->addr_len); if (!rt2) @@ -854,15 +973,15 @@ static int resize_platform_label_table(struct net *net, size_t limit) memcpy(labels, old, cp_size); /* If needed set the predefined labels */ - if ((old_limit <= LABEL_IPV6_EXPLICIT_NULL) && - (limit > LABEL_IPV6_EXPLICIT_NULL)) { - RCU_INIT_POINTER(labels[LABEL_IPV6_EXPLICIT_NULL], rt2); + if ((old_limit <= MPLS_LABEL_IPV6NULL) && + (limit > MPLS_LABEL_IPV6NULL)) { + RCU_INIT_POINTER(labels[MPLS_LABEL_IPV6NULL], rt2); rt2 = NULL; } - if ((old_limit <= LABEL_IPV4_EXPLICIT_NULL) && - (limit > LABEL_IPV4_EXPLICIT_NULL)) { - RCU_INIT_POINTER(labels[LABEL_IPV4_EXPLICIT_NULL], rt0); + if ((old_limit <= MPLS_LABEL_IPV4NULL) && + (limit > MPLS_LABEL_IPV4NULL)) { + RCU_INIT_POINTER(labels[MPLS_LABEL_IPV4NULL], rt0); rt0 = NULL; } @@ -912,7 +1031,7 @@ static int mpls_platform_labels(struct ctl_table *table, int write, return ret; } -static struct ctl_table mpls_table[] = { +static const struct ctl_table mpls_table[] = { { .procname = "platform_labels", .data = NULL, diff --git a/net/mpls/internal.h b/net/mpls/internal.h index fb6de92..b064c34 100644 --- a/net/mpls/internal.h +++ b/net/mpls/internal.h @@ -1,16 +1,6 @@ #ifndef MPLS_INTERNAL_H #define MPLS_INTERNAL_H -#define LABEL_IPV4_EXPLICIT_NULL 0 /* RFC3032 */ -#define LABEL_ROUTER_ALERT_LABEL 1 /* RFC3032 */ -#define LABEL_IPV6_EXPLICIT_NULL 2 /* RFC3032 */ -#define LABEL_IMPLICIT_NULL 3 /* RFC3032 */ -#define LABEL_ENTROPY_INDICATOR 7 /* RFC6790 */ -#define LABEL_GAL 13 /* RFC5586 */ -#define LABEL_OAM_ALERT 14 /* RFC3429 */ -#define LABEL_EXTENSION 15 /* RFC7274 */ - - struct mpls_shim_hdr { __be32 label_stack_entry; }; @@ -22,6 +12,12 @@ struct mpls_entry_decoded { u8 bos; }; +struct mpls_dev { + int input_enabled; + + struct ctl_table_header *sysctl; +}; + struct sk_buff; static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 78af83b..ad9d11f 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -4340,7 +4340,6 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, case NFT_CONTINUE: case NFT_BREAK: case NFT_RETURN: - desc->len = sizeof(data->verdict); break; case NFT_JUMP: case NFT_GOTO: @@ -4355,10 +4354,10 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, chain->use++; data->verdict.chain = chain; - desc->len = sizeof(data); break; } + desc->len = sizeof(data->verdict); desc->type = NFT_DATA_VERDICT; return 0; } diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c index 57d3e1a..0522fc9 100644 --- a/net/netfilter/nft_reject.c +++ b/net/netfilter/nft_reject.c @@ -63,6 +63,8 @@ int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr) if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code)) goto nla_put_failure; break; + default: + break; } return 0; diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c index 62cabee..635dbba 100644 --- a/net/netfilter/nft_reject_inet.c +++ b/net/netfilter/nft_reject_inet.c @@ -108,6 +108,8 @@ static int nft_reject_inet_dump(struct sk_buff *skb, if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code)) goto nla_put_failure; break; + default: + break; } return 0; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 19909d0..daa0b81 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1629,13 +1629,11 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size, if (data == NULL) return NULL; - skb = build_skb(data, size); + skb = __build_skb(data, size); if (skb == NULL) vfree(data); - else { - skb->head_frag = 0; + else skb->destructor = netlink_skb_destructor; - } return skb; } @@ -3141,7 +3139,6 @@ static const struct rhashtable_params netlink_rhashtable_params = { .key_len = netlink_compare_arg_len, .obj_hashfn = netlink_hash, .obj_cmpfn = netlink_compare, - .max_size = 65536, .automatic_shrinking = true, }; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 5102c3c..b5989c6 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2311,11 +2311,14 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) tlen = dev->needed_tailroom; skb = sock_alloc_send_skb(&po->sk, hlen + tlen + sizeof(struct sockaddr_ll), - 0, &err); + !need_wait, &err); - if (unlikely(skb == NULL)) + if (unlikely(skb == NULL)) { + /* we assume the socket was initially writeable ... */ + if (likely(len_sum > 0)) + err = len_sum; goto out_status; - + } tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, addr, hlen); if (tp_len > dev->mtu + dev->hard_header_len) { diff --git a/net/rds/connection.c b/net/rds/connection.c index 14f04139..da6da57 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -126,7 +126,10 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr, struct rds_transport *loop_trans; unsigned long flags; int ret; + struct rds_transport *otrans = trans; + if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP) + goto new_conn; rcu_read_lock(); conn = rds_conn_lookup(head, laddr, faddr, trans); if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport && @@ -142,6 +145,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr, if (conn) goto out; +new_conn: conn = kmem_cache_zalloc(rds_conn_slab, gfp); if (!conn) { conn = ERR_PTR(-ENOMEM); @@ -230,13 +234,22 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr, /* Creating normal conn */ struct rds_connection *found; - found = rds_conn_lookup(head, laddr, faddr, trans); + if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP) + found = NULL; + else + found = rds_conn_lookup(head, laddr, faddr, trans); if (found) { trans->conn_free(conn->c_transport_data); kmem_cache_free(rds_conn_slab, conn); conn = found; } else { - hlist_add_head_rcu(&conn->c_hash_node, head); + if ((is_outgoing && otrans->t_type == RDS_TRANS_TCP) || + (otrans->t_type != RDS_TRANS_TCP)) { + /* Only the active side should be added to + * reconnect list for TCP. + */ + hlist_add_head_rcu(&conn->c_hash_node, head); + } rds_cong_add_conn(conn); rds_conn_count++; } diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 31b74f5..8a09ee7 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -183,8 +183,17 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even /* If the peer gave us the last packet it saw, process this as if * we had received a regular ACK. */ - if (dp && dp->dp_ack_seq) - rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL); + if (dp) { + /* dp structure start is not guaranteed to be 8 bytes aligned. + * Since dp_ack_seq is 64-bit extended load operations can be + * used so go through get_unaligned to avoid unaligned errors. + */ + __be64 dp_ack_seq = get_unaligned(&dp->dp_ack_seq); + + if (dp_ack_seq) + rds_send_drop_acked(conn, be64_to_cpu(dp_ack_seq), + NULL); + } rds_connect_complete(conn); } diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c index f9f564a..973109c7 100644 --- a/net/rds/tcp_connect.c +++ b/net/rds/tcp_connect.c @@ -62,6 +62,7 @@ void rds_tcp_state_change(struct sock *sk) case TCP_ESTABLISHED: rds_connect_complete(conn); break; + case TCP_CLOSE_WAIT: case TCP_CLOSE: rds_conn_drop(conn); default: diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index 23ab4dcd..0da49e3 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c @@ -45,12 +45,45 @@ static void rds_tcp_accept_worker(struct work_struct *work); static DECLARE_WORK(rds_tcp_listen_work, rds_tcp_accept_worker); static struct socket *rds_tcp_listen_sock; +static int rds_tcp_keepalive(struct socket *sock) +{ + /* values below based on xs_udp_default_timeout */ + int keepidle = 5; /* send a probe 'keepidle' secs after last data */ + int keepcnt = 5; /* number of unack'ed probes before declaring dead */ + int keepalive = 1; + int ret = 0; + + ret = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, + (char *)&keepalive, sizeof(keepalive)); + if (ret < 0) + goto bail; + + ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPCNT, + (char *)&keepcnt, sizeof(keepcnt)); + if (ret < 0) + goto bail; + + ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPIDLE, + (char *)&keepidle, sizeof(keepidle)); + if (ret < 0) + goto bail; + + /* KEEPINTVL is the interval between successive probes. We follow + * the model in xs_tcp_finish_connecting() and re-use keepidle. + */ + ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPINTVL, + (char *)&keepidle, sizeof(keepidle)); +bail: + return ret; +} + static int rds_tcp_accept_one(struct socket *sock) { struct socket *new_sock = NULL; struct rds_connection *conn; int ret; struct inet_sock *inet; + struct rds_tcp_connection *rs_tcp; ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type, sock->sk->sk_protocol, &new_sock); @@ -63,6 +96,10 @@ static int rds_tcp_accept_one(struct socket *sock) if (ret < 0) goto out; + ret = rds_tcp_keepalive(new_sock); + if (ret < 0) + goto out; + rds_tcp_tune(new_sock); inet = inet_sk(new_sock->sk); @@ -77,6 +114,15 @@ static int rds_tcp_accept_one(struct socket *sock) ret = PTR_ERR(conn); goto out; } + /* An incoming SYN request came in, and TCP just accepted it. + * We always create a new conn for listen side of TCP, and do not + * add it to the c_hash_list. + * + * If the client reboots, this conn will need to be cleaned up. + * rds_tcp_state_change() will do that cleanup + */ + rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data; + WARN_ON(!rs_tcp || rs_tcp->t_sock); /* * see the comment above rds_queue_delayed_reconnect() diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 8e47251..295d14b 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -63,7 +63,6 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a, skb->mark = c->mark; /* using overlimits stats to count how many packets marked */ ca->tcf_qstats.overlimits++; - nf_ct_put(c); goto out; } @@ -82,7 +81,6 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a, nf_ct_put(c); out: - skb->nfct = NULL; spin_unlock(&ca->tcf_lock); return ca->tcf_action; } diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 8b0470e..b6ef9a0 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -308,12 +308,11 @@ replay: case RTM_DELTFILTER: err = tp->ops->delete(tp, fh); if (err == 0) { - tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER); - if (tcf_destroy(tp, false)) { - struct tcf_proto *next = rtnl_dereference(tp->next); + struct tcf_proto *next = rtnl_dereference(tp->next); + tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER); + if (tcf_destroy(tp, false)) RCU_INIT_POINTER(*back, next); - } } goto errout; case RTM_GETTFILTER: diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c index de28f8e..7a0bdb1 100644 --- a/net/sched/sch_codel.c +++ b/net/sched/sch_codel.c @@ -164,7 +164,7 @@ static int codel_init(struct Qdisc *sch, struct nlattr *opt) sch->limit = DEFAULT_CODEL_LIMIT; - codel_params_init(&q->params); + codel_params_init(&q->params, sch); codel_vars_init(&q->vars); codel_stats_init(&q->stats); diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 1e52dec..c244c45b 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -391,7 +391,7 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) q->perturbation = prandom_u32(); INIT_LIST_HEAD(&q->new_flows); INIT_LIST_HEAD(&q->old_flows); - codel_params_init(&q->cparams); + codel_params_init(&q->cparams, sch); codel_stats_init(&q->cstats); q->cparams.ecn = true; diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index a4ca451..634529e 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -229,7 +229,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) break; } - if (q->backlog + qdisc_pkt_len(skb) <= q->limit) { + if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) { q->backlog += qdisc_pkt_len(skb); return qdisc_enqueue_tail(skb, sch); } @@ -553,7 +553,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) opt.limit = q->limit; opt.DP = q->DP; - opt.backlog = q->backlog; + opt.backlog = gred_backlog(table, q, sch); opt.prio = q->prio; opt.qth_min = q->parms.qth_min >> q->parms.Wlog; opt.qth_max = q->parms.qth_max >> q->parms.Wlog; diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c index 1ec19f6..eeeba5a 100644 --- a/net/sunrpc/auth_gss/gss_rpc_xdr.c +++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c @@ -793,20 +793,26 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp, { u32 value_follows; int err; + struct page *scratch; + + scratch = alloc_page(GFP_KERNEL); + if (!scratch) + return -ENOMEM; + xdr_set_scratch_buffer(xdr, page_address(scratch), PAGE_SIZE); /* res->status */ err = gssx_dec_status(xdr, &res->status); if (err) - return err; + goto out_free; /* res->context_handle */ err = gssx_dec_bool(xdr, &value_follows); if (err) - return err; + goto out_free; if (value_follows) { err = gssx_dec_ctx(xdr, res->context_handle); if (err) - return err; + goto out_free; } else { res->context_handle = NULL; } @@ -814,11 +820,11 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp, /* res->output_token */ err = gssx_dec_bool(xdr, &value_follows); if (err) - return err; + goto out_free; if (value_follows) { err = gssx_dec_buffer(xdr, res->output_token); if (err) - return err; + goto out_free; } else { res->output_token = NULL; } @@ -826,14 +832,17 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp, /* res->delegated_cred_handle */ err = gssx_dec_bool(xdr, &value_follows); if (err) - return err; + goto out_free; if (value_follows) { /* we do not support upcall servers sending this data. */ - return -EINVAL; + err = -EINVAL; + goto out_free; } /* res->options */ err = gssx_dec_option_array(xdr, &res->options); +out_free: + __free_page(scratch); return err; } diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 3613e72..70e3dac 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -591,14 +591,14 @@ void tipc_bearer_stop(struct net *net) /* Caller should hold rtnl_lock to protect the bearer */ static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg, - struct tipc_bearer *bearer) + struct tipc_bearer *bearer, int nlflags) { void *hdr; struct nlattr *attrs; struct nlattr *prop; hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, - NLM_F_MULTI, TIPC_NL_BEARER_GET); + nlflags, TIPC_NL_BEARER_GET); if (!hdr) return -EMSGSIZE; @@ -657,7 +657,7 @@ int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb) if (!bearer) continue; - err = __tipc_nl_add_bearer(&msg, bearer); + err = __tipc_nl_add_bearer(&msg, bearer, NLM_F_MULTI); if (err) break; } @@ -705,7 +705,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info) goto err_out; } - err = __tipc_nl_add_bearer(&msg, bearer); + err = __tipc_nl_add_bearer(&msg, bearer, 0); if (err) goto err_out; rtnl_unlock(); @@ -857,14 +857,14 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) } static int __tipc_nl_add_media(struct tipc_nl_msg *msg, - struct tipc_media *media) + struct tipc_media *media, int nlflags) { void *hdr; struct nlattr *attrs; struct nlattr *prop; hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, - NLM_F_MULTI, TIPC_NL_MEDIA_GET); + nlflags, TIPC_NL_MEDIA_GET); if (!hdr) return -EMSGSIZE; @@ -916,7 +916,8 @@ int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb) rtnl_lock(); for (; media_info_array[i] != NULL; i++) { - err = __tipc_nl_add_media(&msg, media_info_array[i]); + err = __tipc_nl_add_media(&msg, media_info_array[i], + NLM_F_MULTI); if (err) break; } @@ -963,7 +964,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info) goto err_out; } - err = __tipc_nl_add_media(&msg, media); + err = __tipc_nl_add_media(&msg, media, 0); if (err) goto err_out; rtnl_unlock(); diff --git a/net/tipc/link.c b/net/tipc/link.c index a6b30df..43a515d 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1145,11 +1145,8 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr) } /* Synchronize with parallel link if applicable */ if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) { - link_handle_out_of_seq_msg(l_ptr, skb); - if (link_synch(l_ptr)) - link_retrieve_defq(l_ptr, &head); - skb = NULL; - goto unlock; + if (!link_synch(l_ptr)) + goto unlock; } l_ptr->next_in_no++; if (unlikely(!skb_queue_empty(&l_ptr->deferdq))) @@ -2013,7 +2010,7 @@ msg_full: /* Caller should hold appropriate locks to protect the link */ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, - struct tipc_link *link) + struct tipc_link *link, int nlflags) { int err; void *hdr; @@ -2022,7 +2019,7 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, struct tipc_net *tn = net_generic(net, tipc_net_id); hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, - NLM_F_MULTI, TIPC_NL_LINK_GET); + nlflags, TIPC_NL_LINK_GET); if (!hdr) return -EMSGSIZE; @@ -2095,7 +2092,7 @@ static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg, if (!node->links[i]) continue; - err = __tipc_nl_add_link(net, msg, node->links[i]); + err = __tipc_nl_add_link(net, msg, node->links[i], NLM_F_MULTI); if (err) return err; } @@ -2143,7 +2140,6 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) err = __tipc_nl_add_node_links(net, &msg, node, &prev_link); tipc_node_unlock(node); - tipc_node_put(node); if (err) goto out; @@ -2210,7 +2206,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info) goto err_out; } - err = __tipc_nl_add_link(net, &msg, link); + err = __tipc_nl_add_link(net, &msg, link, 0); if (err) goto err_out; diff --git a/net/tipc/server.c b/net/tipc/server.c index ab6183c..77ff03e 100644 --- a/net/tipc/server.c +++ b/net/tipc/server.c @@ -102,7 +102,7 @@ static void tipc_conn_kref_release(struct kref *kref) } saddr->scope = -TIPC_NODE_SCOPE; kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr)); - sk_release_kernel(sk); + sock_release(sock); con->sock = NULL; } @@ -321,12 +321,9 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con) struct socket *sock = NULL; int ret; - ret = sock_create_kern(AF_TIPC, SOCK_SEQPACKET, 0, &sock); + ret = __sock_create(s->net, AF_TIPC, SOCK_SEQPACKET, 0, &sock, 1); if (ret < 0) return NULL; - - sk_change_net(sock->sk, s->net); - ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE, (char *)&s->imp, sizeof(s->imp)); if (ret < 0) @@ -376,7 +373,7 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con) create_err: kernel_sock_shutdown(sock, SHUT_RDWR); - sk_release_kernel(sock->sk); + sock_release(sock); return NULL; } diff --git a/net/tipc/socket.c b/net/tipc/socket.c index ee90d74..9074b5c 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -1764,13 +1764,14 @@ static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) { u32 dnode, dport = 0; - int err = -TIPC_ERR_NO_PORT; + int err; struct sk_buff *skb; struct tipc_sock *tsk; struct tipc_net *tn; struct sock *sk; while (skb_queue_len(inputq)) { + err = -TIPC_ERR_NO_PORT; skb = NULL; dport = tipc_skb_peek_port(inputq, dport); tsk = tipc_sk_lookup(net, dport); diff --git a/net/unix/garbage.c b/net/unix/garbage.c index 99f7012..a73a226 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -95,39 +95,36 @@ static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait); unsigned int unix_tot_inflight; - struct sock *unix_get_socket(struct file *filp) { struct sock *u_sock = NULL; struct inode *inode = file_inode(filp); - /* - * Socket ? - */ + /* Socket ? */ if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { struct socket *sock = SOCKET_I(inode); struct sock *s = sock->sk; - /* - * PF_UNIX ? - */ + /* PF_UNIX ? */ if (s && sock->ops && sock->ops->family == PF_UNIX) u_sock = s; } return u_sock; } -/* - * Keep the number of times in flight count for the file - * descriptor if it is for an AF_UNIX socket. +/* Keep the number of times in flight count for the file + * descriptor if it is for an AF_UNIX socket. */ void unix_inflight(struct file *fp) { struct sock *s = unix_get_socket(fp); + if (s) { struct unix_sock *u = unix_sk(s); + spin_lock(&unix_gc_lock); + if (atomic_long_inc_return(&u->inflight) == 1) { BUG_ON(!list_empty(&u->link)); list_add_tail(&u->link, &gc_inflight_list); @@ -142,10 +139,13 @@ void unix_inflight(struct file *fp) void unix_notinflight(struct file *fp) { struct sock *s = unix_get_socket(fp); + if (s) { struct unix_sock *u = unix_sk(s); + spin_lock(&unix_gc_lock); BUG_ON(list_empty(&u->link)); + if (atomic_long_dec_and_test(&u->inflight)) list_del_init(&u->link); unix_tot_inflight--; @@ -161,32 +161,27 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), spin_lock(&x->sk_receive_queue.lock); skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { - /* - * Do we have file descriptors ? - */ + /* Do we have file descriptors ? */ if (UNIXCB(skb).fp) { bool hit = false; - /* - * Process the descriptors of this socket - */ + /* Process the descriptors of this socket */ int nfd = UNIXCB(skb).fp->count; struct file **fp = UNIXCB(skb).fp->fp; + while (nfd--) { - /* - * Get the socket the fd matches - * if it indeed does so - */ + /* Get the socket the fd matches if it indeed does so */ struct sock *sk = unix_get_socket(*fp++); + if (sk) { struct unix_sock *u = unix_sk(sk); - /* - * Ignore non-candidates, they could + /* Ignore non-candidates, they could * have been added to the queues after * starting the garbage collection */ if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { hit = true; + func(u); } } @@ -203,24 +198,22 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), static void scan_children(struct sock *x, void (*func)(struct unix_sock *), struct sk_buff_head *hitlist) { - if (x->sk_state != TCP_LISTEN) + if (x->sk_state != TCP_LISTEN) { scan_inflight(x, func, hitlist); - else { + } else { struct sk_buff *skb; struct sk_buff *next; struct unix_sock *u; LIST_HEAD(embryos); - /* - * For a listening socket collect the queued embryos + /* For a listening socket collect the queued embryos * and perform a scan on them as well. */ spin_lock(&x->sk_receive_queue.lock); skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { u = unix_sk(skb->sk); - /* - * An embryo cannot be in-flight, so it's safe + /* An embryo cannot be in-flight, so it's safe * to use the list link. */ BUG_ON(!list_empty(&u->link)); @@ -249,8 +242,7 @@ static void inc_inflight(struct unix_sock *usk) static void inc_inflight_move_tail(struct unix_sock *u) { atomic_long_inc(&u->inflight); - /* - * If this still might be part of a cycle, move it to the end + /* If this still might be part of a cycle, move it to the end * of the list, so that it's checked even if it was already * passed over */ @@ -263,8 +255,7 @@ static bool gc_in_progress; void wait_for_unix_gc(void) { - /* - * If number of inflight sockets is insane, + /* If number of inflight sockets is insane, * force a garbage collect right now. */ if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress) @@ -288,8 +279,7 @@ void unix_gc(void) goto out; gc_in_progress = true; - /* - * First, select candidates for garbage collection. Only + /* First, select candidates for garbage collection. Only * in-flight sockets are considered, and from those only ones * which don't have any external reference. * @@ -320,15 +310,13 @@ void unix_gc(void) } } - /* - * Now remove all internal in-flight reference to children of + /* Now remove all internal in-flight reference to children of * the candidates. */ list_for_each_entry(u, &gc_candidates, link) scan_children(&u->sk, dec_inflight, NULL); - /* - * Restore the references for children of all candidates, + /* Restore the references for children of all candidates, * which have remaining references. Do this recursively, so * only those remain, which form cyclic references. * @@ -350,8 +338,7 @@ void unix_gc(void) } list_del(&cursor); - /* - * not_cycle_list contains those sockets which do not make up a + /* not_cycle_list contains those sockets which do not make up a * cycle. Restore these to the inflight list. */ while (!list_empty(¬_cycle_list)) { @@ -360,8 +347,7 @@ void unix_gc(void) list_move_tail(&u->link, &gc_inflight_list); } - /* - * Now gc_candidates contains only garbage. Restore original + /* Now gc_candidates contains only garbage. Restore original * inflight counters for these as well, and remove the skbuffs * which are creating the cycle(s). */ diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c index 37d0220..db7a2e5 100644 --- a/sound/pci/emu10k1/emu10k1.c +++ b/sound/pci/emu10k1/emu10k1.c @@ -183,8 +183,10 @@ static int snd_card_emu10k1_probe(struct pci_dev *pci, } #endif - strcpy(card->driver, emu->card_capabilities->driver); - strcpy(card->shortname, emu->card_capabilities->name); + strlcpy(card->driver, emu->card_capabilities->driver, + sizeof(card->driver)); + strlcpy(card->shortname, emu->card_capabilities->name, + sizeof(card->shortname)); snprintf(card->longname, sizeof(card->longname), "%s (rev.%d, serial:0x%x) at 0x%lx, irq %i", card->shortname, emu->revision, emu->serial, emu->port, emu->irq); diff --git a/sound/pci/emu10k1/emu10k1_callback.c b/sound/pci/emu10k1/emu10k1_callback.c index 874cd76..d2c7ea3 100644 --- a/sound/pci/emu10k1/emu10k1_callback.c +++ b/sound/pci/emu10k1/emu10k1_callback.c @@ -415,7 +415,7 @@ start_voice(struct snd_emux_voice *vp) snd_emu10k1_ptr_write(hw, Z2, ch, 0); /* invalidate maps */ - temp = (hw->silent_page.addr << 1) | MAP_PTI_MASK; + temp = (hw->silent_page.addr << hw->address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0); snd_emu10k1_ptr_write(hw, MAPA, ch, temp); snd_emu10k1_ptr_write(hw, MAPB, ch, temp); #if 0 @@ -436,7 +436,7 @@ start_voice(struct snd_emux_voice *vp) snd_emu10k1_ptr_write(hw, CDF, ch, sample); /* invalidate maps */ - temp = ((unsigned int)hw->silent_page.addr << 1) | MAP_PTI_MASK; + temp = ((unsigned int)hw->silent_page.addr << hw_address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0); snd_emu10k1_ptr_write(hw, MAPA, ch, temp); snd_emu10k1_ptr_write(hw, MAPB, ch, temp); diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c index 54079f5..a454814 100644 --- a/sound/pci/emu10k1/emu10k1_main.c +++ b/sound/pci/emu10k1/emu10k1_main.c @@ -282,7 +282,7 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume) snd_emu10k1_ptr_write(emu, TCB, 0, 0); /* taken from original driver */ snd_emu10k1_ptr_write(emu, TCBS, 0, 4); /* taken from original driver */ - silent_page = (emu->silent_page.addr << 1) | MAP_PTI_MASK; + silent_page = (emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0); for (ch = 0; ch < NUM_G; ch++) { snd_emu10k1_ptr_write(emu, MAPA, ch, silent_page); snd_emu10k1_ptr_write(emu, MAPB, ch, silent_page); @@ -348,6 +348,11 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume) outl(reg | A_IOCFG_GPOUT0, emu->port + A_IOCFG); } + if (emu->address_mode == 0) { + /* use 16M in 4G */ + outl(inl(emu->port + HCFG) | HCFG_EXPANDED_MEM, emu->port + HCFG); + } + return 0; } @@ -1446,7 +1451,7 @@ static struct snd_emu_chip_details emu_chip_details[] = { * */ {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x20011102, - .driver = "Audigy2", .name = "SB Audigy 2 ZS Notebook [SB0530]", + .driver = "Audigy2", .name = "Audigy 2 ZS Notebook [SB0530]", .id = "Audigy2", .emu10k2_chip = 1, .ca0108_chip = 1, @@ -1596,7 +1601,7 @@ static struct snd_emu_chip_details emu_chip_details[] = { .adc_1361t = 1, /* 24 bit capture instead of 16bit */ .ac97_chip = 1} , {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10051102, - .driver = "Audigy2", .name = "SB Audigy 2 Platinum EX [SB0280]", + .driver = "Audigy2", .name = "Audigy 2 Platinum EX [SB0280]", .id = "Audigy2", .emu10k2_chip = 1, .ca0102_chip = 1, @@ -1902,8 +1907,10 @@ int snd_emu10k1_create(struct snd_card *card, is_audigy = emu->audigy = c->emu10k2_chip; + /* set addressing mode */ + emu->address_mode = is_audigy ? 0 : 1; /* set the DMA transfer mask */ - emu->dma_mask = is_audigy ? AUDIGY_DMA_MASK : EMU10K1_DMA_MASK; + emu->dma_mask = emu->address_mode ? EMU10K1_DMA_MASK : AUDIGY_DMA_MASK; if (pci_set_dma_mask(pci, emu->dma_mask) < 0 || pci_set_consistent_dma_mask(pci, emu->dma_mask) < 0) { dev_err(card->dev, @@ -1928,7 +1935,7 @@ int snd_emu10k1_create(struct snd_card *card, emu->max_cache_pages = max_cache_bytes >> PAGE_SHIFT; if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), - 32 * 1024, &emu->ptb_pages) < 0) { + (emu->address_mode ? 32 : 16) * 1024, &emu->ptb_pages) < 0) { err = -ENOMEM; goto error; } @@ -2027,8 +2034,8 @@ int snd_emu10k1_create(struct snd_card *card, /* Clear silent pages and set up pointers */ memset(emu->silent_page.area, 0, PAGE_SIZE); - silent_page = emu->silent_page.addr << 1; - for (idx = 0; idx < MAXPAGES; idx++) + silent_page = emu->silent_page.addr << emu->address_mode; + for (idx = 0; idx < (emu->address_mode ? MAXPAGES1 : MAXPAGES0); idx++) ((u32 *)emu->ptb_pages.area)[idx] = cpu_to_le32(silent_page | idx); /* set up voice indices */ diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c index 0dc0738..14a305b 100644 --- a/sound/pci/emu10k1/emupcm.c +++ b/sound/pci/emu10k1/emupcm.c @@ -380,7 +380,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu, snd_emu10k1_ptr_write(emu, Z1, voice, 0); snd_emu10k1_ptr_write(emu, Z2, voice, 0); /* invalidate maps */ - silent_page = ((unsigned int)emu->silent_page.addr << 1) | MAP_PTI_MASK; + silent_page = ((unsigned int)emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0); snd_emu10k1_ptr_write(emu, MAPA, voice, silent_page); snd_emu10k1_ptr_write(emu, MAPB, voice, silent_page); /* modulation envelope */ diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c index c68e6dd..4f1f69b 100644 --- a/sound/pci/emu10k1/memory.c +++ b/sound/pci/emu10k1/memory.c @@ -34,10 +34,11 @@ * aligned pages in others */ #define __set_ptb_entry(emu,page,addr) \ - (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page))) + (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << (emu->address_mode)) | (page))) #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE) -#define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES) +#define MAX_ALIGN_PAGES0 (MAXPAGES0 / UNIT_PAGES) +#define MAX_ALIGN_PAGES1 (MAXPAGES1 / UNIT_PAGES) /* get aligned page from offset address */ #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT) /* get offset address from aligned page */ @@ -124,7 +125,7 @@ static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct lis } page = blk->mapped_page + blk->pages; } - size = MAX_ALIGN_PAGES - page; + size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page; if (size >= max_size) { *nextp = pos; return page; @@ -181,7 +182,7 @@ static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) q = get_emu10k1_memblk(p, mapped_link); end_page = q->mapped_page; } else - end_page = MAX_ALIGN_PAGES; + end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0); /* remove links */ list_del(&blk->mapped_link); @@ -307,7 +308,7 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst if (snd_BUG_ON(!emu)) return NULL; if (snd_BUG_ON(runtime->dma_bytes <= 0 || - runtime->dma_bytes >= MAXPAGES * EMUPAGESIZE)) + runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE)) return NULL; hdr = emu->memhdr; if (snd_BUG_ON(!hdr)) diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 873ed1b..b49feff 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -873,14 +873,15 @@ struct hda_pcm *snd_hda_codec_pcm_new(struct hda_codec *codec, struct hda_pcm *pcm; va_list args; - va_start(args, fmt); pcm = kzalloc(sizeof(*pcm), GFP_KERNEL); if (!pcm) return NULL; pcm->codec = codec; kref_init(&pcm->kref); + va_start(args, fmt); pcm->name = kvasprintf(GFP_KERNEL, fmt, args); + va_end(args); if (!pcm->name) { kfree(pcm); return NULL; @@ -2082,6 +2083,16 @@ static struct snd_kcontrol_new vmaster_mute_mode = { .put = vmaster_mute_mode_put, }; +/* meta hook to call each driver's vmaster hook */ +static void vmaster_hook(void *private_data, int enabled) +{ + struct hda_vmaster_mute_hook *hook = private_data; + + if (hook->mute_mode != HDA_VMUTE_FOLLOW_MASTER) + enabled = hook->mute_mode; + hook->hook(hook->codec, enabled); +} + /** * snd_hda_add_vmaster_hook - Add a vmaster hook for mute-LED * @codec: the HDA codec @@ -2100,9 +2111,9 @@ int snd_hda_add_vmaster_hook(struct hda_codec *codec, if (!hook->hook || !hook->sw_kctl) return 0; - snd_ctl_add_vmaster_hook(hook->sw_kctl, hook->hook, codec); hook->codec = codec; hook->mute_mode = HDA_VMUTE_FOLLOW_MASTER; + snd_ctl_add_vmaster_hook(hook->sw_kctl, vmaster_hook, hook); if (!expose_enum_ctl) return 0; kctl = snd_ctl_new1(&vmaster_mute_mode, hook); @@ -2128,14 +2139,7 @@ void snd_hda_sync_vmaster_hook(struct hda_vmaster_mute_hook *hook) */ if (hook->codec->bus->shutdown) return; - switch (hook->mute_mode) { - case HDA_VMUTE_FOLLOW_MASTER: - snd_ctl_sync_vmaster_hook(hook->sw_kctl); - break; - default: - hook->hook(hook->codec, hook->mute_mode); - break; - } + snd_ctl_sync_vmaster_hook(hook->sw_kctl); } EXPORT_SYMBOL_GPL(snd_hda_sync_vmaster_hook); diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 3d2597b..788f969 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -3259,7 +3259,8 @@ static int create_input_ctls(struct hda_codec *codec) val = PIN_IN; if (cfg->inputs[i].type == AUTO_PIN_MIC) val |= snd_hda_get_default_vref(codec, pin); - if (pin != spec->hp_mic_pin) + if (pin != spec->hp_mic_pin && + !snd_hda_codec_get_pin_target(codec, pin)) set_pin_target(codec, pin, val, false); if (mixer) { diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 06199e4..e2afd53 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4190,11 +4190,18 @@ static void alc_shutup_dell_xps13(struct hda_codec *codec) static void alc_fixup_dell_xps13(struct hda_codec *codec, const struct hda_fixup *fix, int action) { - if (action == HDA_FIXUP_ACT_PROBE) { - struct alc_spec *spec = codec->spec; - struct hda_input_mux *imux = &spec->gen.input_mux; - int i; + struct alc_spec *spec = codec->spec; + struct hda_input_mux *imux = &spec->gen.input_mux; + int i; + switch (action) { + case HDA_FIXUP_ACT_PRE_PROBE: + /* mic pin 0x19 must be initialized with Vref Hi-Z, otherwise + * it causes a click noise at start up + */ + snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ); + break; + case HDA_FIXUP_ACT_PROBE: spec->shutup = alc_shutup_dell_xps13; /* Make the internal mic the default input source. */ @@ -4204,6 +4211,7 @@ static void alc_fixup_dell_xps13(struct hda_codec *codec, break; } } + break; } } diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c index 0a4ad5f..d51703e 100644 --- a/sound/pci/hda/thinkpad_helper.c +++ b/sound/pci/hda/thinkpad_helper.c @@ -72,6 +72,7 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec, if (led_set_func(TPACPI_LED_MUTE, false) >= 0) { old_vmaster_hook = spec->vmaster_mute.hook; spec->vmaster_mute.hook = update_tpacpi_mute_led; + spec->vmaster_mute_enum = 1; removefunc = false; } if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) { diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c index 69528ae..be4d741 100644 --- a/sound/soc/codecs/rt5645.c +++ b/sound/soc/codecs/rt5645.c @@ -18,6 +18,7 @@ #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/gpio.h> +#include <linux/acpi.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> @@ -2656,6 +2657,15 @@ static const struct i2c_device_id rt5645_i2c_id[] = { }; MODULE_DEVICE_TABLE(i2c, rt5645_i2c_id); +#ifdef CONFIG_ACPI +static struct acpi_device_id rt5645_acpi_match[] = { + { "10EC5645", 0 }, + { "10EC5650", 0 }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, rt5645_acpi_match); +#endif + static int rt5645_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { @@ -2770,7 +2780,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c, case RT5645_DMIC_DATA_GPIO12: regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1, - RT5645_DMIC_1_DP_MASK, RT5645_DMIC_2_DP_GPIO12); + RT5645_DMIC_2_DP_MASK, RT5645_DMIC_2_DP_GPIO12); regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1, RT5645_GP12_PIN_MASK, RT5645_GP12_PIN_DMIC2_SDA); @@ -2872,6 +2882,7 @@ static struct i2c_driver rt5645_i2c_driver = { .driver = { .name = "rt5645", .owner = THIS_MODULE, + .acpi_match_table = ACPI_PTR(rt5645_acpi_match), }, .probe = rt5645_i2c_probe, .remove = rt5645_i2c_remove, diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c index af18258..169aa47 100644 --- a/sound/soc/codecs/rt5677.c +++ b/sound/soc/codecs/rt5677.c @@ -62,6 +62,9 @@ static const struct reg_default init_list[] = { {RT5677_PR_BASE + 0x1e, 0x0000}, {RT5677_PR_BASE + 0x12, 0x0eaa}, {RT5677_PR_BASE + 0x14, 0x018a}, + {RT5677_PR_BASE + 0x15, 0x0490}, + {RT5677_PR_BASE + 0x38, 0x0f71}, + {RT5677_PR_BASE + 0x39, 0x0f71}, }; #define RT5677_INIT_REG_LEN ARRAY_SIZE(init_list) @@ -914,7 +917,7 @@ static int set_dmic_clk(struct snd_soc_dapm_widget *w, { struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); - int idx = rl6231_calc_dmic_clk(rt5677->sysclk); + int idx = rl6231_calc_dmic_clk(rt5677->lrck[RT5677_AIF1] << 8); if (idx < 0) dev_err(codec->dev, "Failed to set DMIC clock\n"); diff --git a/sound/soc/codecs/tfa9879.c b/sound/soc/codecs/tfa9879.c index 16f1b71..aab0af6 100644 --- a/sound/soc/codecs/tfa9879.c +++ b/sound/soc/codecs/tfa9879.c @@ -280,8 +280,8 @@ static int tfa9879_i2c_probe(struct i2c_client *i2c, int i; tfa9879 = devm_kzalloc(&i2c->dev, sizeof(*tfa9879), GFP_KERNEL); - if (IS_ERR(tfa9879)) - return PTR_ERR(tfa9879); + if (!tfa9879) + return -ENOMEM; i2c_set_clientdata(i2c, tfa9879); diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index e8bb8ee..0d48804 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -1357,7 +1357,7 @@ static int fsl_ssi_probe(struct platform_device *pdev) } ssi_private->irq = platform_get_irq(pdev, 0); - if (!ssi_private->irq) { + if (ssi_private->irq < 0) { dev_err(&pdev->dev, "no irq for node %s\n", pdev->name); return ssi_private->irq; } diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile index cd9aee9..3853ec2 100644 --- a/sound/soc/intel/Makefile +++ b/sound/soc/intel/Makefile @@ -4,7 +4,7 @@ obj-$(CONFIG_SND_SOC_INTEL_SST) += common/ # Platform Support obj-$(CONFIG_SND_SOC_INTEL_HASWELL) += haswell/ obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += baytrail/ -obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += atom/ +obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += atom/ # Machine support obj-$(CONFIG_SND_SOC_INTEL_SST) += boards/ diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c index 1efb33b..a839dbf 100644 --- a/sound/soc/intel/baytrail/sst-baytrail-ipc.c +++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c @@ -759,7 +759,6 @@ fw_err: dsp_new_err: sst_ipc_fini(ipc); ipc_init_err: - kfree(byt); return err; } diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c index 344a1e9..324eceb 100644 --- a/sound/soc/intel/haswell/sst-haswell-ipc.c +++ b/sound/soc/intel/haswell/sst-haswell-ipc.c @@ -2201,7 +2201,6 @@ dma_err: dsp_new_err: sst_ipc_fini(ipc); ipc_init_err: - kfree(hsw); return ret; } EXPORT_SYMBOL_GPL(sst_hsw_dsp_init); diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c index 6698d05..dc790ab 100644 --- a/sound/soc/qcom/lpass-cpu.c +++ b/sound/soc/qcom/lpass-cpu.c @@ -194,7 +194,7 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai); - int ret; + int ret = -EINVAL; switch (cmd) { case SNDRV_PCM_TRIGGER_START: diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c index 326d3c3..5bf7236 100644 --- a/sound/soc/samsung/s3c24xx-i2s.c +++ b/sound/soc/samsung/s3c24xx-i2s.c @@ -461,8 +461,8 @@ static int s3c24xx_iis_dev_probe(struct platform_device *pdev) return -ENOENT; } s3c24xx_i2s.regs = devm_ioremap_resource(&pdev->dev, res); - if (s3c24xx_i2s.regs == NULL) - return -ENXIO; + if (IS_ERR(s3c24xx_i2s.regs)) + return PTR_ERR(s3c24xx_i2s.regs); s3c24xx_i2s_pcm_stereo_out.dma_addr = res->start + S3C2410_IISFIFO; s3c24xx_i2s_pcm_stereo_in.dma_addr = res->start + S3C2410_IISFIFO; diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c index ac3756f..144308f 100644 --- a/sound/soc/sh/rcar/dma.c +++ b/sound/soc/sh/rcar/dma.c @@ -156,6 +156,7 @@ static int rsnd_dmaen_init(struct rsnd_priv *priv, struct rsnd_dma *dma, int id, (void *)id); } if (IS_ERR_OR_NULL(dmaen->chan)) { + dmaen->chan = NULL; dev_err(dev, "can't get dma channel\n"); goto rsnd_dma_channel_err; } diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c index ab37add..82e350e 100644 --- a/sound/synth/emux/emux_oss.c +++ b/sound/synth/emux/emux_oss.c @@ -118,12 +118,8 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure) if (snd_BUG_ON(!arg || !emu)) return -ENXIO; - mutex_lock(&emu->register_mutex); - - if (!snd_emux_inc_count(emu)) { - mutex_unlock(&emu->register_mutex); + if (!snd_emux_inc_count(emu)) return -EFAULT; - } memset(&callback, 0, sizeof(callback)); callback.owner = THIS_MODULE; @@ -135,7 +131,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure) if (p == NULL) { snd_printk(KERN_ERR "can't create port\n"); snd_emux_dec_count(emu); - mutex_unlock(&emu->register_mutex); return -ENOMEM; } @@ -148,8 +143,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure) reset_port_mode(p, arg->seq_mode); snd_emux_reset_port(p); - - mutex_unlock(&emu->register_mutex); return 0; } @@ -195,13 +188,11 @@ snd_emux_close_seq_oss(struct snd_seq_oss_arg *arg) if (snd_BUG_ON(!emu)) return -ENXIO; - mutex_lock(&emu->register_mutex); snd_emux_sounds_off_all(p); snd_soundfont_close_check(emu->sflist, SF_CLIENT_NO(p->chset.port)); snd_seq_event_port_detach(p->chset.client, p->chset.port); snd_emux_dec_count(emu); - mutex_unlock(&emu->register_mutex); return 0; } diff --git a/sound/synth/emux/emux_seq.c b/sound/synth/emux/emux_seq.c index 7778b8e..a020920 100644 --- a/sound/synth/emux/emux_seq.c +++ b/sound/synth/emux/emux_seq.c @@ -124,12 +124,10 @@ snd_emux_detach_seq(struct snd_emux *emu) if (emu->voices) snd_emux_terminate_all(emu); - mutex_lock(&emu->register_mutex); if (emu->client >= 0) { snd_seq_delete_kernel_client(emu->client); emu->client = -1; } - mutex_unlock(&emu->register_mutex); } @@ -269,8 +267,8 @@ snd_emux_event_input(struct snd_seq_event *ev, int direct, void *private_data, /* * increment usage count */ -int -snd_emux_inc_count(struct snd_emux *emu) +static int +__snd_emux_inc_count(struct snd_emux *emu) { emu->used++; if (!try_module_get(emu->ops.owner)) @@ -284,12 +282,21 @@ snd_emux_inc_count(struct snd_emux *emu) return 1; } +int snd_emux_inc_count(struct snd_emux *emu) +{ + int ret; + + mutex_lock(&emu->register_mutex); + ret = __snd_emux_inc_count(emu); + mutex_unlock(&emu->register_mutex); + return ret; +} /* * decrease usage count */ -void -snd_emux_dec_count(struct snd_emux *emu) +static void +__snd_emux_dec_count(struct snd_emux *emu) { module_put(emu->card->module); emu->used--; @@ -298,6 +305,12 @@ snd_emux_dec_count(struct snd_emux *emu) module_put(emu->ops.owner); } +void snd_emux_dec_count(struct snd_emux *emu) +{ + mutex_lock(&emu->register_mutex); + __snd_emux_dec_count(emu); + mutex_unlock(&emu->register_mutex); +} /* * Routine that is called upon a first use of a particular port @@ -317,7 +330,7 @@ snd_emux_use(void *private_data, struct snd_seq_port_subscribe *info) mutex_lock(&emu->register_mutex); snd_emux_init_port(p); - snd_emux_inc_count(emu); + __snd_emux_inc_count(emu); mutex_unlock(&emu->register_mutex); return 0; } @@ -340,7 +353,7 @@ snd_emux_unuse(void *private_data, struct snd_seq_port_subscribe *info) mutex_lock(&emu->register_mutex); snd_emux_sounds_off_all(p); - snd_emux_dec_count(emu); + __snd_emux_dec_count(emu); mutex_unlock(&emu->register_mutex); return 0; } diff --git a/tools/lib/api/Makefile b/tools/lib/api/Makefile index d8fe29f..8bd9606 100644 --- a/tools/lib/api/Makefile +++ b/tools/lib/api/Makefile @@ -16,7 +16,7 @@ MAKEFLAGS += --no-print-directory LIBFILE = $(OUTPUT)libapi.a CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) -CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -D_FORTIFY_SOURCE=2 -fPIC +CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 RM = rm -f diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile index 0c356fb..18ffccf 100644 --- a/tools/lib/lockdep/Makefile +++ b/tools/lib/lockdep/Makefile @@ -14,9 +14,10 @@ define allow-override $(eval $(1) = $(2))) endef -# Allow setting CC and AR, or setting CROSS_COMPILE as a prefix. +# Allow setting CC and AR and LD, or setting CROSS_COMPILE as a prefix. $(call allow-override,CC,$(CROSS_COMPILE)gcc) $(call allow-override,AR,$(CROSS_COMPILE)ar) +$(call allow-override,LD,$(CROSS_COMPILE)ld) INSTALL = install diff --git a/tools/lib/lockdep/uinclude/linux/kernel.h b/tools/lib/lockdep/uinclude/linux/kernel.h index a11e3c3..cd2cc59 100644 --- a/tools/lib/lockdep/uinclude/linux/kernel.h +++ b/tools/lib/lockdep/uinclude/linux/kernel.h @@ -28,6 +28,9 @@ #define __init #define noinline #define list_add_tail_rcu list_add_tail +#define list_for_each_entry_rcu list_for_each_entry +#define barrier() +#define synchronize_sched() #ifndef CALLER_ADDR0 #define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index e0917c0..29f94f6 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -3865,7 +3865,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, } else if (el_size == 4) { trace_seq_printf(s, "%u", *(uint32_t *)num); } else if (el_size == 8) { - trace_seq_printf(s, "%lu", *(uint64_t *)num); + trace_seq_printf(s, "%"PRIu64, *(uint64_t *)num); } else { trace_seq_printf(s, "BAD SIZE:%d 0x%x", el_size, *(uint8_t *)num); diff --git a/tools/perf/Makefile b/tools/perf/Makefile index c699dc3..d31a7bb 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -24,7 +24,7 @@ unexport MAKEFLAGS # (To override it, run 'make JOBS=1' and similar.) # ifeq ($(JOBS),) - JOBS := $(shell egrep -c '^processor|^CPU' /proc/cpuinfo 2>/dev/null) + JOBS := $(shell (getconf _NPROCESSORS_ONLN || egrep -c '^processor|^CPU[0-9]' /proc/cpuinfo) 2>/dev/null) ifeq ($(JOBS),0) JOBS := 1 endif diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c index bedff6b..ad0d9b5 100644 --- a/tools/perf/bench/futex-requeue.c +++ b/tools/perf/bench/futex-requeue.c @@ -132,6 +132,9 @@ int bench_futex_requeue(int argc, const char **argv, if (!fshared) futex_flag = FUTEX_PRIVATE_FLAG; + if (nrequeue > nthreads) + nrequeue = nthreads; + printf("Run summary [PID %d]: Requeuing %d threads (from [%s] %p to %p), " "%d at a time.\n\n", getpid(), nthreads, fshared ? "shared":"private", &futex1, &futex2, nrequeue); @@ -161,20 +164,18 @@ int bench_futex_requeue(int argc, const char **argv, /* Ok, all threads are patiently blocked, start requeueing */ gettimeofday(&start, NULL); - for (nrequeued = 0; nrequeued < nthreads; nrequeued += nrequeue) { + while (nrequeued < nthreads) { /* * Do not wakeup any tasks blocked on futex1, allowing * us to really measure futex_wait functionality. */ - futex_cmp_requeue(&futex1, 0, &futex2, 0, - nrequeue, futex_flag); + nrequeued += futex_cmp_requeue(&futex1, 0, &futex2, 0, + nrequeue, futex_flag); } + gettimeofday(&end, NULL); timersub(&end, &start, &runtime); - if (nrequeued > nthreads) - nrequeued = nthreads; - update_stats(&requeued_stats, nrequeued); update_stats(&requeuetime_stats, runtime.tv_usec); @@ -184,7 +185,7 @@ int bench_futex_requeue(int argc, const char **argv, } /* everybody should be blocked on futex2, wake'em up */ - nrequeued = futex_wake(&futex2, nthreads, futex_flag); + nrequeued = futex_wake(&futex2, nrequeued, futex_flag); if (nthreads != nrequeued) warnx("couldn't wakeup all tasks (%d/%d)", nrequeued, nthreads); diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c index ebfa163..ba5efa4 100644 --- a/tools/perf/bench/numa.c +++ b/tools/perf/bench/numa.c @@ -180,7 +180,7 @@ static const struct option options[] = { OPT_INTEGER('H', "thp" , &p0.thp, "MADV_NOHUGEPAGE < 0 < MADV_HUGEPAGE"), OPT_BOOLEAN('c', "show_convergence", &p0.show_convergence, "show convergence details"), OPT_BOOLEAN('m', "measure_convergence", &p0.measure_convergence, "measure convergence latency"), - OPT_BOOLEAN('q', "quiet" , &p0.show_quiet, "bzero the initial allocations"), + OPT_BOOLEAN('q', "quiet" , &p0.show_quiet, "quiet mode"), OPT_BOOLEAN('S', "serialize-startup", &p0.serialize_startup,"serialize thread startup"), /* Special option string parsing callbacks: */ @@ -828,6 +828,9 @@ static int count_process_nodes(int process_nr) td = g->threads + task_nr; node = numa_node_of_cpu(td->curr_cpu); + if (node < 0) /* curr_cpu was likely still -1 */ + return 0; + node_present[node] = 1; } @@ -882,6 +885,11 @@ static void calc_convergence_compression(int *strong) for (p = 0; p < g->p.nr_proc; p++) { unsigned int nodes = count_process_nodes(p); + if (!nodes) { + *strong = 0; + return; + } + nodes_min = min(nodes, nodes_min); nodes_max = max(nodes, nodes_max); } @@ -1395,7 +1403,7 @@ static void print_res(const char *name, double val, if (!name) name = "main,"; - if (g->p.show_quiet) + if (!g->p.show_quiet) printf(" %-30s %15.3f, %-15s %s\n", name, val, txt_unit, txt_short); else printf(" %14.3f %s\n", val, txt_long); diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 63ea013..1634186 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -319,7 +319,7 @@ static int page_stat_cmp(struct page_stat *a, struct page_stat *b) return 0; } -static struct page_stat *search_page_alloc_stat(struct page_stat *stat, bool create) +static struct page_stat *search_page_alloc_stat(struct page_stat *pstat, bool create) { struct rb_node **node = &page_alloc_tree.rb_node; struct rb_node *parent = NULL; @@ -331,7 +331,7 @@ static struct page_stat *search_page_alloc_stat(struct page_stat *stat, bool cre parent = *node; data = rb_entry(*node, struct page_stat, node); - cmp = page_stat_cmp(data, stat); + cmp = page_stat_cmp(data, pstat); if (cmp < 0) node = &parent->rb_left; else if (cmp > 0) @@ -345,10 +345,10 @@ static struct page_stat *search_page_alloc_stat(struct page_stat *stat, bool cre data = zalloc(sizeof(*data)); if (data != NULL) { - data->page = stat->page; - data->order = stat->order; - data->gfp_flags = stat->gfp_flags; - data->migrate_type = stat->migrate_type; + data->page = pstat->page; + data->order = pstat->order; + data->gfp_flags = pstat->gfp_flags; + data->migrate_type = pstat->migrate_type; rb_link_node(&data->node, parent, node); rb_insert_color(&data->node, &page_alloc_tree); @@ -375,7 +375,7 @@ static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel, unsigned int migrate_type = perf_evsel__intval(evsel, sample, "migratetype"); u64 bytes = kmem_page_size << order; - struct page_stat *stat; + struct page_stat *pstat; struct page_stat this = { .order = order, .gfp_flags = gfp_flags, @@ -401,21 +401,21 @@ static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel, * This is to find the current page (with correct gfp flags and * migrate type) at free event. */ - stat = search_page(page, true); - if (stat == NULL) + pstat = search_page(page, true); + if (pstat == NULL) return -ENOMEM; - stat->order = order; - stat->gfp_flags = gfp_flags; - stat->migrate_type = migrate_type; + pstat->order = order; + pstat->gfp_flags = gfp_flags; + pstat->migrate_type = migrate_type; this.page = page; - stat = search_page_alloc_stat(&this, true); - if (stat == NULL) + pstat = search_page_alloc_stat(&this, true); + if (pstat == NULL) return -ENOMEM; - stat->nr_alloc++; - stat->alloc_bytes += bytes; + pstat->nr_alloc++; + pstat->alloc_bytes += bytes; order_stats[order][migrate_type]++; @@ -428,7 +428,7 @@ static int perf_evsel__process_page_free_event(struct perf_evsel *evsel, u64 page; unsigned int order = perf_evsel__intval(evsel, sample, "order"); u64 bytes = kmem_page_size << order; - struct page_stat *stat; + struct page_stat *pstat; struct page_stat this = { .order = order, }; @@ -441,8 +441,8 @@ static int perf_evsel__process_page_free_event(struct perf_evsel *evsel, nr_page_frees++; total_page_free_bytes += bytes; - stat = search_page(page, false); - if (stat == NULL) { + pstat = search_page(page, false); + if (pstat == NULL) { pr_debug2("missing free at page %"PRIx64" (order: %d)\n", page, order); @@ -453,18 +453,18 @@ static int perf_evsel__process_page_free_event(struct perf_evsel *evsel, } this.page = page; - this.gfp_flags = stat->gfp_flags; - this.migrate_type = stat->migrate_type; + this.gfp_flags = pstat->gfp_flags; + this.migrate_type = pstat->migrate_type; - rb_erase(&stat->node, &page_tree); - free(stat); + rb_erase(&pstat->node, &page_tree); + free(pstat); - stat = search_page_alloc_stat(&this, false); - if (stat == NULL) + pstat = search_page_alloc_stat(&this, false); + if (pstat == NULL) return -ENOENT; - stat->nr_free++; - stat->free_bytes += bytes; + pstat->nr_free++; + pstat->free_bytes += bytes; return 0; } @@ -640,9 +640,9 @@ static void print_page_summary(void) nr_page_frees, total_page_free_bytes / 1024); printf("\n"); - printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests", + printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests", nr_alloc_freed, (total_alloc_freed_bytes) / 1024); - printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total alloc-only requests", + printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc-only requests", nr_page_allocs - nr_alloc_freed, (total_page_alloc_bytes - total_alloc_freed_bytes) / 1024); printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free-only requests", diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 476cdf7..b63aeda 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -329,7 +329,7 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist, fprintf(stdout, "\n\n"); } - if (sort_order == default_sort_order && + if (sort_order == NULL && parent_pattern == default_parent_pattern) { fprintf(stdout, "#\n# (%s)\n#\n", help); diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 1cb3436..6a4d5d4 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -733,7 +733,7 @@ static void perf_event__process_sample(struct perf_tool *tool, "Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n" "Check /proc/sys/kernel/kptr_restrict.\n\n" "Kernel%s samples will not be resolved.\n", - !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ? + al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ? " modules" : ""); if (use_browser <= 0) sleep(5); diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index e124741..e122970 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -2241,10 +2241,11 @@ static int trace__run(struct trace *trace, int argc, const char **argv) if (err < 0) goto out_error_mmap; + if (!target__none(&trace->opts.target)) + perf_evlist__enable(evlist); + if (forks) perf_evlist__start_workload(evlist); - else - perf_evlist__enable(evlist); trace->multiple_threads = evlist->threads->map[0] == -1 || evlist->threads->nr > 1 || @@ -2272,6 +2273,11 @@ next_event: if (interrupted) goto out_disable; + + if (done && !draining) { + perf_evlist__disable(evlist); + draining = true; + } } } diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index d8bb616..d05b77c 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -1084,6 +1084,8 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev) * * TODO:Group name support */ + if (!arg) + return -EINVAL; ptr = strpbrk(arg, ";=@+%"); if (ptr && *ptr == '=') { /* Event name */ diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index b5bf9d5..2a76e14 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -578,10 +578,12 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf) /* Search child die for local variables and parameters. */ if (!die_find_variable_at(sc_die, pf->pvar->var, pf->addr, &vr_die)) { /* Search again in global variables */ - if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, 0, &vr_die)) + if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, + 0, &vr_die)) { pr_warning("Failed to find '%s' in this function.\n", pf->pvar->var); ret = -ENOENT; + } } if (ret >= 0) ret = convert_variable(&vr_die, pf); diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile index 5a16117..a9099d9 100644 --- a/tools/testing/selftests/powerpc/pmu/Makefile +++ b/tools/testing/selftests/powerpc/pmu/Makefile @@ -26,7 +26,7 @@ override define EMIT_TESTS $(MAKE) -s -C ebb emit_tests endef -DEFAULT_INSTALL := $(INSTALL_RULE) +DEFAULT_INSTALL_RULE := $(INSTALL_RULE) override define INSTALL_RULE $(DEFAULT_INSTALL_RULE) $(MAKE) -C ebb install diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile index 1b616fa..6bff955 100644 --- a/tools/testing/selftests/powerpc/tm/Makefile +++ b/tools/testing/selftests/powerpc/tm/Makefile @@ -1,4 +1,4 @@ -TEST_PROGS := tm-resched-dscr tm-syscall +TEST_PROGS := tm-resched-dscr all: $(TEST_PROGS) diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index ddf6356..5bdb781 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile @@ -1,4 +1,8 @@ -.PHONY: all all_32 all_64 check_build32 clean run_tests +all: + +include ../lib.mk + +.PHONY: all all_32 all_64 warn_32bit_failure clean TARGETS_C_BOTHBITS := sigreturn single_step_syscall @@ -7,42 +11,47 @@ BINARIES_64 := $(TARGETS_C_BOTHBITS:%=%_64) CFLAGS := -O2 -g -std=gnu99 -pthread -Wall -UNAME_P := $(shell uname -p) +UNAME_M := $(shell uname -m) +CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32) +CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c) -# Always build 32-bit tests +ifeq ($(CAN_BUILD_I386),1) all: all_32 +TEST_PROGS += $(BINARIES_32) +endif -# If we're on a 64-bit host, build 64-bit tests as well -ifeq ($(shell uname -p),x86_64) +ifeq ($(CAN_BUILD_X86_64),1) all: all_64 +TEST_PROGS += $(BINARIES_64) endif -all_32: check_build32 $(BINARIES_32) +all_32: $(BINARIES_32) all_64: $(BINARIES_64) clean: $(RM) $(BINARIES_32) $(BINARIES_64) -run_tests: - ./run_x86_tests.sh - $(TARGETS_C_BOTHBITS:%=%_32): %_32: %.c $(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl $(TARGETS_C_BOTHBITS:%=%_64): %_64: %.c $(CC) -m64 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl -check_build32: - @if ! $(CC) -m32 -o /dev/null trivial_32bit_program.c; then \ - echo "Warning: you seem to have a broken 32-bit build" 2>&1; \ - echo "environment. If you are using a Debian-like"; \ - echo " distribution, try:"; \ - echo ""; \ - echo " apt-get install gcc-multilib libc6-i386 libc6-dev-i386"; \ - echo ""; \ - echo "If you are using a Fedora-like distribution, try:"; \ - echo ""; \ - echo " yum install glibc-devel.*i686"; \ - exit 1; \ - fi +# x86_64 users should be encouraged to install 32-bit libraries +ifeq ($(CAN_BUILD_I386)$(CAN_BUILD_X86_64),01) +all: warn_32bit_failure + +warn_32bit_failure: + @echo "Warning: you seem to have a broken 32-bit build" 2>&1; \ + echo "environment. This will reduce test coverage of 64-bit" 2>&1; \ + echo "kernels. If you are using a Debian-like distribution," 2>&1; \ + echo "try:"; 2>&1; \ + echo ""; \ + echo " apt-get install gcc-multilib libc6-i386 libc6-dev-i386"; \ + echo ""; \ + echo "If you are using a Fedora-like distribution, try:"; \ + echo ""; \ + echo " yum install glibc-devel.*i686"; \ + exit 0; +endif diff --git a/tools/testing/selftests/x86/check_cc.sh b/tools/testing/selftests/x86/check_cc.sh new file mode 100755 index 0000000..172d329 --- /dev/null +++ b/tools/testing/selftests/x86/check_cc.sh @@ -0,0 +1,16 @@ +#!/bin/sh +# check_cc.sh - Helper to test userspace compilation support +# Copyright (c) 2015 Andrew Lutomirski +# GPL v2 + +CC="$1" +TESTPROG="$2" +shift 2 + +if "$CC" -o /dev/null "$TESTPROG" -O0 "$@" 2>/dev/null; then + echo 1 +else + echo 0 +fi + +exit 0 diff --git a/tools/testing/selftests/x86/run_x86_tests.sh b/tools/testing/selftests/x86/run_x86_tests.sh deleted file mode 100644 index 3fc19b3..0000000 --- a/tools/testing/selftests/x86/run_x86_tests.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -# This is deliberately minimal. IMO kselftests should provide a standard -# script here. -./sigreturn_32 || exit 1 -./single_step_syscall_32 || exit 1 - -if [[ "$uname -p" -eq "x86_64" ]]; then - ./sigreturn_64 || exit 1 - ./single_step_syscall_64 || exit 1 -fi - -exit 0 diff --git a/tools/testing/selftests/x86/trivial_32bit_program.c b/tools/testing/selftests/x86/trivial_32bit_program.c index 2e231be..fabdf0f 100644 --- a/tools/testing/selftests/x86/trivial_32bit_program.c +++ b/tools/testing/selftests/x86/trivial_32bit_program.c @@ -4,6 +4,10 @@ * GPL v2 */ +#ifndef __i386__ +# error wrong architecture +#endif + #include <stdio.h> int main() diff --git a/tools/testing/selftests/x86/trivial_64bit_program.c b/tools/testing/selftests/x86/trivial_64bit_program.c new file mode 100644 index 0000000..b994946 --- /dev/null +++ b/tools/testing/selftests/x86/trivial_64bit_program.c @@ -0,0 +1,18 @@ +/* + * Trivial program to check that we have a valid 32-bit build environment. + * Copyright (c) 2015 Andy Lutomirski + * GPL v2 + */ + +#ifndef __x86_64__ +# error wrong architecture +#endif + +#include <stdio.h> + +int main() +{ + printf("\n"); + + return 0; +} diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile index 0788621..2e83dd3 100644 --- a/tools/thermal/tmon/Makefile +++ b/tools/thermal/tmon/Makefile @@ -12,10 +12,6 @@ TARGET=tmon INSTALL_PROGRAM=install -m 755 -p DEL_FILE=rm -f -INSTALL_CONFIGFILE=install -m 644 -p -CONFIG_FILE= -CONFIG_PATH= - # Static builds might require -ltinfo, for instance ifneq ($(findstring -static, $(LDFLAGS)),) STATIC := --static @@ -38,13 +34,9 @@ valgrind: tmon install: - mkdir -p $(INSTALL_ROOT)/$(BINDIR) - $(INSTALL_PROGRAM) "$(TARGET)" "$(INSTALL_ROOT)/$(BINDIR)/$(TARGET)" - - mkdir -p $(INSTALL_ROOT)/$(CONFIG_PATH) - - $(INSTALL_CONFIGFILE) "$(CONFIG_FILE)" "$(INSTALL_ROOT)/$(CONFIG_PATH)" uninstall: $(DEL_FILE) "$(INSTALL_ROOT)/$(BINDIR)/$(TARGET)" - $(CONFIG_FILE) "$(CONFIG_PATH)" - clean: find . -name "*.o" | xargs $(DEL_FILE) diff --git a/tools/vm/Makefile b/tools/vm/Makefile index ac884b6..93aadaf 100644 --- a/tools/vm/Makefile +++ b/tools/vm/Makefile @@ -3,7 +3,7 @@ TARGETS=page-types slabinfo page_owner_sort LIB_DIR = ../lib/api -LIBS = $(LIB_DIR)/libapikfs.a +LIBS = $(LIB_DIR)/libapi.a CC = $(CROSS_COMPILE)gcc CFLAGS = -Wall -Wextra -I../lib/ |