From d0ebef8230e267ec47d4d4a65fe3262e2ebb8026 Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Tue, 29 Oct 2013 14:50:27 -0500 Subject: Revert to v3.8 (no RT, no stable) diff --git a/Documentation/devicetree/bindings/tty/serial/of-serial.txt b/Documentation/devicetree/bindings/tty/serial/of-serial.txt index 8f01cb1..1e1145c 100644 --- a/Documentation/devicetree/bindings/tty/serial/of-serial.txt +++ b/Documentation/devicetree/bindings/tty/serial/of-serial.txt @@ -11,9 +11,6 @@ Required properties: - "nvidia,tegra20-uart" - "nxp,lpc3220-uart" - "ibm,qpace-nwp-serial" - - "altr,16550-FIFO32" - - "altr,16550-FIFO64" - - "altr,16550-FIFO128" - "serial" if the port type is unknown. - reg : offset and length of the register set for the device. - interrupts : should contain uart interrupt. diff --git a/Documentation/hwlat_detector.txt b/Documentation/hwlat_detector.txt deleted file mode 100644 index cb61516..0000000 --- a/Documentation/hwlat_detector.txt +++ /dev/null @@ -1,64 +0,0 @@ -Introduction: -------------- - -The module hwlat_detector is a special purpose kernel module that is used to -detect large system latencies induced by the behavior of certain underlying -hardware or firmware, independent of Linux itself. The code was developed -originally to detect SMIs (System Management Interrupts) on x86 systems, -however there is nothing x86 specific about this patchset. It was -originally written for use by the "RT" patch since the Real Time -kernel is highly latency sensitive. - -SMIs are usually not serviced by the Linux kernel, which typically does not -even know that they are occuring. SMIs are instead are set up by BIOS code -and are serviced by BIOS code, usually for "critical" events such as -management of thermal sensors and fans. Sometimes though, SMIs are used for -other tasks and those tasks can spend an inordinate amount of time in the -handler (sometimes measured in milliseconds). Obviously this is a problem if -you are trying to keep event service latencies down in the microsecond range. - -The hardware latency detector works by hogging all of the cpus for configurable -amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter -for some period, then looking for gaps in the TSC data. Any gap indicates a -time when the polling was interrupted and since the machine is stopped and -interrupts turned off the only thing that could do that would be an SMI. - -Note that the SMI detector should *NEVER* be used in a production environment. -It is intended to be run manually to determine if the hardware platform has a -problem with long system firmware service routines. - -Usage: ------- - -Loading the module hwlat_detector passing the parameter "enabled=1" (or by -setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only -step required to start the hwlat_detector. It is possible to redefine the -threshold in microseconds (us) above which latency spikes will be taken -into account (parameter "threshold="). - -Example: - - # modprobe hwlat_detector enabled=1 threshold=100 - -After the module is loaded, it creates a directory named "hwlat_detector" under -the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary -to have debugfs mounted, which might be on /sys/debug on your system. - -The /debug/hwlat_detector interface contains the following files: - -count - number of latency spikes observed since last reset -enable - a global enable/disable toggle (0/1), resets count -max - maximum hardware latency actually observed (usecs) -sample - a pipe from which to read current raw sample data - in the format - (can be opened O_NONBLOCK for a single sample) -threshold - minimum latency value to be considered (usecs) -width - time period to sample with CPUs held (usecs) - must be less than the total window size (enforced) -window - total period of sampling, width being inside (usecs) - -By default we will set width to 500,000 and window to 1,000,000, meaning that -we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we -observe any latencies that exceed the threshold (initially 100 usecs), -then we write to a global sample ring buffer of 8K samples, which is -consumed by reading from the "sample" (pipe) debugfs file interface. diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 1ba0afe..6c72381 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -564,8 +564,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted. UART at the specified I/O port or MMIO address, switching to the matching ttyS device later. The options are the same as for ttyS, above. - hvc Use the hypervisor console device . This is for - both Xen and PowerPC hypervisors. If the device connected to the port is not a TTY but a braille device, prepend "brl," before the device type, for instance @@ -756,7 +754,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted. earlyprintk= [X86,SH,BLACKFIN] earlyprintk=vga - earlyprintk=xen earlyprintk=serial[,ttySn[,baudrate]] earlyprintk=ttySn[,baudrate] earlyprintk=dbgp[debugController#] @@ -774,8 +771,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted. The VGA output is eventually overwritten by the real console. - The xen output can only be used by Xen PV guests. - ekgdboc= [X86,KGDB] Allow early kernel console debugging ekgdboc=kbd @@ -1182,15 +1177,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted. See comment before ip2_setup() in drivers/char/ip2/ip2base.c. - irqaffinity= [SMP] Set the default irq affinity mask - Format: - ,..., - or - - - (must be a positive range in ascending order) - or a mixture - ,...,- - irqfixup [HW] When an interrupt is not handled search all handlers for it. Intended to get systems with badly broken diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt index 67ac78c..2a4cdda 100644 --- a/Documentation/sysrq.txt +++ b/Documentation/sysrq.txt @@ -57,16 +57,9 @@ On PowerPC - Press 'ALT - Print Screen (or F13) - , On other - If you know of the key combos for other architectures, please let me know so I can add them to this section. -On all - write a character to /proc/sysrq-trigger, e.g.: - echo t > /proc/sysrq-trigger - -On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g. - echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq - Send an ICMP echo request with this pattern plus the particular - SysRq command key. Example: - # ping -c1 -s57 -p0102030468 - will trigger the SysRq-H (help) command. +On all - write a character to /proc/sysrq-trigger. e.g.: + echo t > /proc/sysrq-trigger * What are the 'command' keys? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/Documentation/trace/histograms.txt b/Documentation/trace/histograms.txt deleted file mode 100644 index 6f2aeab..0000000 --- a/Documentation/trace/histograms.txt +++ /dev/null @@ -1,186 +0,0 @@ - Using the Linux Kernel Latency Histograms - - -This document gives a short explanation how to enable, configure and use -latency histograms. Latency histograms are primarily relevant in the -context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT) -and are used in the quality management of the Linux real-time -capabilities. - - -* Purpose of latency histograms - -A latency histogram continuously accumulates the frequencies of latency -data. There are two types of histograms -- potential sources of latencies -- effective latencies - - -* Potential sources of latencies - -Potential sources of latencies are code segments where interrupts, -preemption or both are disabled (aka critical sections). To create -histograms of potential sources of latency, the kernel stores the time -stamp at the start of a critical section, determines the time elapsed -when the end of the section is reached, and increments the frequency -counter of that latency value - irrespective of whether any concurrently -running process is affected by latency or not. -- Configuration items (in the Kernel hacking/Tracers submenu) - CONFIG_INTERRUPT_OFF_LATENCY - CONFIG_PREEMPT_OFF_LATENCY - - -* Effective latencies - -Effective latencies are actually occuring during wakeup of a process. To -determine effective latencies, the kernel stores the time stamp when a -process is scheduled to be woken up, and determines the duration of the -wakeup time shortly before control is passed over to this process. Note -that the apparent latency in user space may be somewhat longer, since the -process may be interrupted after control is passed over to it but before -the execution in user space takes place. Simply measuring the interval -between enqueuing and wakeup may also not appropriate in cases when a -process is scheduled as a result of a timer expiration. The timer may have -missed its deadline, e.g. due to disabled interrupts, but this latency -would not be registered. Therefore, the offsets of missed timers are -recorded in a separate histogram. If both wakeup latency and missed timer -offsets are configured and enabled, a third histogram may be enabled that -records the overall latency as a sum of the timer latency, if any, and the -wakeup latency. This histogram is called "timerandwakeup". -- Configuration items (in the Kernel hacking/Tracers submenu) - CONFIG_WAKEUP_LATENCY - CONFIG_MISSED_TIMER_OFSETS - - -* Usage - -The interface to the administration of the latency histograms is located -in the debugfs file system. To mount it, either enter - -mount -t sysfs nodev /sys -mount -t debugfs nodev /sys/kernel/debug - -from shell command line level, or add - -nodev /sys sysfs defaults 0 0 -nodev /sys/kernel/debug debugfs defaults 0 0 - -to the file /etc/fstab. All latency histogram related files are then -available in the directory /sys/kernel/debug/tracing/latency_hist. A -particular histogram type is enabled by writing non-zero to the related -variable in the /sys/kernel/debug/tracing/latency_hist/enable directory. -Select "preemptirqsoff" for the histograms of potential sources of -latencies and "wakeup" for histograms of effective latencies etc. The -histogram data - one per CPU - are available in the files - -/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx -/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx -/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx -/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx -/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx -/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx -/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx - -The histograms are reset by writing non-zero to the file "reset" in a -particular latency directory. To reset all latency data, use - -#!/bin/sh - -TRACINGDIR=/sys/kernel/debug/tracing -HISTDIR=$TRACINGDIR/latency_hist - -if test -d $HISTDIR -then - cd $HISTDIR - for i in `find . | grep /reset$` - do - echo 1 >$i - done -fi - - -* Data format - -Latency data are stored with a resolution of one microsecond. The -maximum latency is 10,240 microseconds. The data are only valid, if the -overflow register is empty. Every output line contains the latency in -microseconds in the first row and the number of samples in the second -row. To display only lines with a positive latency count, use, for -example, - -grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0 - -#Minimum latency: 0 microseconds. -#Average latency: 0 microseconds. -#Maximum latency: 25 microseconds. -#Total samples: 3104770694 -#There are 0 samples greater or equal than 10240 microseconds -#usecs samples - 0 2984486876 - 1 49843506 - 2 58219047 - 3 5348126 - 4 2187960 - 5 3388262 - 6 959289 - 7 208294 - 8 40420 - 9 4485 - 10 14918 - 11 18340 - 12 25052 - 13 19455 - 14 5602 - 15 969 - 16 47 - 17 18 - 18 14 - 19 1 - 20 3 - 21 2 - 22 5 - 23 2 - 25 1 - - -* Wakeup latency of a selected process - -To only collect wakeup latency data of a particular process, write the -PID of the requested process to - -/sys/kernel/debug/tracing/latency_hist/wakeup/pid - -PIDs are not considered, if this variable is set to 0. - - -* Details of the process with the highest wakeup latency so far - -Selected data of the process that suffered from the highest wakeup -latency that occurred in a particular CPU are available in the file - -/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx. - -In addition, other relevant system data at the time when the -latency occurred are given. - -The format of the data is (all in one line): - () \ -<- - -The value of is only relevant in the combined timer -and wakeup latency recording. In the wakeup recording, it is -always 0, in the missed_timer_offsets recording, it is the same -as . - -When retrospectively searching for the origin of a latency and -tracing was not enabled, it may be helpful to know the name and -some basic data of the task that (finally) was switching to the -late real-tlme task. In addition to the victim's data, also the -data of the possible culprit are therefore displayed after the -"<-" symbol. - -Finally, the timestamp of the time when the latency occurred -in . after the most recent system boot -is provided. - -These data are also reset when the wakeup histogram is reset. diff --git a/Makefile b/Makefile index 183eff3..d69266c 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ VERSION = 3 PATCHLEVEL = 8 -SUBLEVEL = 13 +SUBLEVEL = 0 EXTRAVERSION = -NAME = Displaced Humerus Anterior +NAME = Unicycling Gorilla # *DOCUMENTATION* # To see a list of typical targets execute "make help" diff --git a/arch/Kconfig b/arch/Kconfig index 4a93e44..7f8f281 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -6,7 +6,6 @@ config OPROFILE tristate "OProfile system profiling" depends on PROFILING depends on HAVE_OPROFILE - depends on !PREEMPT_RT_FULL select RING_BUFFER select RING_BUFFER_ALLOW_SWAP help diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c index 1383f86..4d4c046 100644 --- a/arch/alpha/kernel/sys_nautilus.c +++ b/arch/alpha/kernel/sys_nautilus.c @@ -188,10 +188,6 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr) extern void free_reserved_mem(void *, void *); extern void pcibios_claim_one_bus(struct pci_bus *); -static struct resource irongate_io = { - .name = "Irongate PCI IO", - .flags = IORESOURCE_IO, -}; static struct resource irongate_mem = { .name = "Irongate PCI MEM", .flags = IORESOURCE_MEM, @@ -213,7 +209,6 @@ nautilus_init_pci(void) irongate = pci_get_bus_and_slot(0, 0); bus->self = irongate; - bus->resource[0] = &irongate_io; bus->resource[1] = &irongate_mem; pci_bus_size_bridges(bus); diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index fb9eaa4..0c4132d 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -108,7 +108,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, /* If we're in an interrupt context, or have no user context, we must not take the fault. */ - if (!mm || pagefault_disabled()) + if (!mm || in_atomic()) goto no_context; #ifdef CONFIG_ALPHA_LARGE_VMALLOC diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 2753534..67874b8 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -17,7 +17,6 @@ config ARM select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER select HARDIRQS_SW_RESEND - select IRQ_FORCED_THREADING select HAVE_AOUT select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL select HAVE_ARCH_KGDB @@ -47,7 +46,6 @@ config ARM select HAVE_MEMBLOCK select HAVE_OPROFILE if (HAVE_PERF_EVENTS) select HAVE_PERF_EVENTS - select HAVE_PREEMPT_LAZY select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_SYSCALL_TRACEPOINTS select HAVE_UID16 diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 91fe4f1..c2f14e8 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -349,7 +349,7 @@ rx_descs = <64>; mac_control = <0x20>; slaves = <2>; - active_slave = <0>; + cpts_active_slave = <0>; cpts_clock_mult = <0x80000000>; cpts_clock_shift = <29>; reg = <0x4a100000 0x800 @@ -385,19 +385,5 @@ mac-address = [ 00 00 00 00 00 00 ]; }; }; - - ocmcram: ocmcram@40300000 { - compatible = "ti,am3352-ocmcram"; - reg = <0x40300000 0x10000>; - ti,hwmods = "ocmcram"; - ti,no_idle_on_suspend; - }; - - wkup_m3: wkup_m3@44d00000 { - compatible = "ti,am3353-wkup-m3"; - reg = <0x44d00000 0x4000 /* M3 UMEM */ - 0x44d80000 0x2000>; /* M3 DMEM */ - ti,hwmods = "wkup_m3"; - }; }; }; diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi index 02b70a4..cb7bcc5 100644 --- a/arch/arm/boot/dts/at91sam9260.dtsi +++ b/arch/arm/boot/dts/at91sam9260.dtsi @@ -158,8 +158,8 @@ usart1 { pinctrl_usart1: usart1-0 { atmel,pins = - <1 6 0x1 0x1 /* PB6 periph A with pullup */ - 1 7 0x1 0x0>; /* PB7 periph A */ + <2 6 0x1 0x1 /* PB6 periph A with pullup */ + 2 7 0x1 0x0>; /* PB7 periph A */ }; pinctrl_usart1_rts: usart1_rts-0 { @@ -194,18 +194,18 @@ usart3 { pinctrl_usart3: usart3-0 { atmel,pins = - <1 10 0x1 0x1 /* PB10 periph A with pullup */ - 1 11 0x1 0x0>; /* PB11 periph A */ + <2 10 0x1 0x1 /* PB10 periph A with pullup */ + 2 11 0x1 0x0>; /* PB11 periph A */ }; pinctrl_usart3_rts: usart3_rts-0 { atmel,pins = - <2 8 0x2 0x0>; /* PC8 periph B */ + <3 8 0x2 0x0>; /* PB8 periph B */ }; pinctrl_usart3_cts: usart3_cts-0 { atmel,pins = - <2 10 0x2 0x0>; /* PC10 periph B */ + <3 10 0x2 0x0>; /* PB10 periph B */ }; }; @@ -220,8 +220,8 @@ uart1 { pinctrl_uart1: uart1-0 { atmel,pins = - <1 12 0x1 0x1 /* PB12 periph A with pullup */ - 1 13 0x1 0x0>; /* PB13 periph A */ + <2 12 0x1 0x1 /* PB12 periph A with pullup */ + 2 13 0x1 0x0>; /* PB13 periph A */ }; }; diff --git a/arch/arm/boot/dts/at91sam9g15.dtsi b/arch/arm/boot/dts/at91sam9g15.dtsi index 28467fd..fbe7a70 100644 --- a/arch/arm/boot/dts/at91sam9g15.dtsi +++ b/arch/arm/boot/dts/at91sam9g15.dtsi @@ -10,7 +10,7 @@ / { model = "Atmel AT91SAM9G15 SoC"; - compatible = "atmel,at91sam9g15", "atmel,at91sam9x5"; + compatible = "atmel, at91sam9g15, atmel,at91sam9x5"; ahb { apb { diff --git a/arch/arm/boot/dts/at91sam9g15ek.dts b/arch/arm/boot/dts/at91sam9g15ek.dts index 5427b2d..86dd3f6 100644 --- a/arch/arm/boot/dts/at91sam9g15ek.dts +++ b/arch/arm/boot/dts/at91sam9g15ek.dts @@ -11,6 +11,6 @@ /include/ "at91sam9x5ek.dtsi" / { - model = "Atmel AT91SAM9G15-EK"; + model = "Atmel AT91SAM9G25-EK"; compatible = "atmel,at91sam9g15ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9"; }; diff --git a/arch/arm/boot/dts/at91sam9g25.dtsi b/arch/arm/boot/dts/at91sam9g25.dtsi index 5fd32df..05a718f 100644 --- a/arch/arm/boot/dts/at91sam9g25.dtsi +++ b/arch/arm/boot/dts/at91sam9g25.dtsi @@ -10,7 +10,7 @@ / { model = "Atmel AT91SAM9G25 SoC"; - compatible = "atmel,at91sam9g25", "atmel,at91sam9x5"; + compatible = "atmel, at91sam9g25, atmel,at91sam9x5"; ahb { apb { diff --git a/arch/arm/boot/dts/at91sam9g35.dtsi b/arch/arm/boot/dts/at91sam9g35.dtsi index d6fa8af..f9d14a7 100644 --- a/arch/arm/boot/dts/at91sam9g35.dtsi +++ b/arch/arm/boot/dts/at91sam9g35.dtsi @@ -10,7 +10,7 @@ / { model = "Atmel AT91SAM9G35 SoC"; - compatible = "atmel,at91sam9g35", "atmel,at91sam9x5"; + compatible = "atmel, at91sam9g35, atmel,at91sam9x5"; ahb { apb { diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi index 4801717..80e29c6 100644 --- a/arch/arm/boot/dts/at91sam9n12.dtsi +++ b/arch/arm/boot/dts/at91sam9n12.dtsi @@ -324,6 +324,8 @@ compatible = "atmel,at91sam9260-usart"; reg = <0xf801c000 0x4000>; interrupts = <5 4 5>; + atmel,use-dma-rx; + atmel,use-dma-tx; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usart0>; status = "disabled"; @@ -333,6 +335,8 @@ compatible = "atmel,at91sam9260-usart"; reg = <0xf8020000 0x4000>; interrupts = <6 4 5>; + atmel,use-dma-rx; + atmel,use-dma-tx; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usart1>; status = "disabled"; @@ -342,6 +346,8 @@ compatible = "atmel,at91sam9260-usart"; reg = <0xf8024000 0x4000>; interrupts = <7 4 5>; + atmel,use-dma-rx; + atmel,use-dma-tx; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usart2>; status = "disabled"; @@ -351,6 +357,8 @@ compatible = "atmel,at91sam9260-usart"; reg = <0xf8028000 0x4000>; interrupts = <8 4 5>; + atmel,use-dma-rx; + atmel,use-dma-tx; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usart3>; status = "disabled"; diff --git a/arch/arm/boot/dts/at91sam9x25.dtsi b/arch/arm/boot/dts/at91sam9x25.dtsi index 9ac2bc2..54eb33b 100644 --- a/arch/arm/boot/dts/at91sam9x25.dtsi +++ b/arch/arm/boot/dts/at91sam9x25.dtsi @@ -10,7 +10,7 @@ / { model = "Atmel AT91SAM9X25 SoC"; - compatible = "atmel,at91sam9x25", "atmel,at91sam9x5"; + compatible = "atmel, at91sam9x25, atmel,at91sam9x5"; ahb { apb { diff --git a/arch/arm/boot/dts/at91sam9x35.dtsi b/arch/arm/boot/dts/at91sam9x35.dtsi index ba67d83..fb102d6 100644 --- a/arch/arm/boot/dts/at91sam9x35.dtsi +++ b/arch/arm/boot/dts/at91sam9x35.dtsi @@ -10,7 +10,7 @@ / { model = "Atmel AT91SAM9X35 SoC"; - compatible = "atmel,at91sam9x35", "atmel,at91sam9x5"; + compatible = "atmel, at91sam9x35, atmel,at91sam9x5"; ahb { apb { diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi index c461e11..8ecca69 100644 --- a/arch/arm/boot/dts/at91sam9x5.dtsi +++ b/arch/arm/boot/dts/at91sam9x5.dtsi @@ -402,6 +402,8 @@ compatible = "atmel,at91sam9260-usart"; reg = <0xf801c000 0x200>; interrupts = <5 4 5>; + atmel,use-dma-rx; + atmel,use-dma-tx; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usart0>; status = "disabled"; @@ -411,6 +413,8 @@ compatible = "atmel,at91sam9260-usart"; reg = <0xf8020000 0x200>; interrupts = <6 4 5>; + atmel,use-dma-rx; + atmel,use-dma-tx; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usart1>; status = "disabled"; @@ -420,6 +424,8 @@ compatible = "atmel,at91sam9260-usart"; reg = <0xf8024000 0x200>; interrupts = <7 4 5>; + atmel,use-dma-rx; + atmel,use-dma-tx; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usart2>; status = "disabled"; diff --git a/arch/arm/boot/dts/at91sam9x5ek.dtsi b/arch/arm/boot/dts/at91sam9x5ek.dtsi index ccab256..8a7cf1d 100644 --- a/arch/arm/boot/dts/at91sam9x5ek.dtsi +++ b/arch/arm/boot/dts/at91sam9x5ek.dtsi @@ -13,7 +13,7 @@ compatible = "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9"; chosen { - bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs"; + bootargs = "128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs"; }; ahb { diff --git a/arch/arm/boot/dts/kirkwood-dns320.dts b/arch/arm/boot/dts/kirkwood-dns320.dts index c9c44b2..5bb0bf3 100644 --- a/arch/arm/boot/dts/kirkwood-dns320.dts +++ b/arch/arm/boot/dts/kirkwood-dns320.dts @@ -42,10 +42,12 @@ ocp@f1000000 { serial@12000 { + clock-frequency = <166666667>; status = "okay"; }; serial@12100 { + clock-frequency = <166666667>; status = "okay"; }; }; diff --git a/arch/arm/boot/dts/kirkwood-dns325.dts b/arch/arm/boot/dts/kirkwood-dns325.dts index e4e4930..d430713 100644 --- a/arch/arm/boot/dts/kirkwood-dns325.dts +++ b/arch/arm/boot/dts/kirkwood-dns325.dts @@ -50,6 +50,7 @@ }; }; serial@12000 { + clock-frequency = <200000000>; status = "okay"; }; }; diff --git a/arch/arm/boot/dts/kirkwood-dockstar.dts b/arch/arm/boot/dts/kirkwood-dockstar.dts index 0196cf6..2e3dd34 100644 --- a/arch/arm/boot/dts/kirkwood-dockstar.dts +++ b/arch/arm/boot/dts/kirkwood-dockstar.dts @@ -37,6 +37,7 @@ }; }; serial@12000 { + clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood-dreamplug.dts b/arch/arm/boot/dts/kirkwood-dreamplug.dts index e21ae48..f2d386c 100644 --- a/arch/arm/boot/dts/kirkwood-dreamplug.dts +++ b/arch/arm/boot/dts/kirkwood-dreamplug.dts @@ -38,6 +38,7 @@ }; }; serial@12000 { + clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood-goflexnet.dts b/arch/arm/boot/dts/kirkwood-goflexnet.dts index c3573be..1b133e0 100644 --- a/arch/arm/boot/dts/kirkwood-goflexnet.dts +++ b/arch/arm/boot/dts/kirkwood-goflexnet.dts @@ -73,11 +73,11 @@ }; }; serial@12000 { + clock-frequency = <200000000>; status = "ok"; }; nand@3000000 { - chip-delay = <40>; status = "okay"; partition@0 { diff --git a/arch/arm/boot/dts/kirkwood-ib62x0.dts b/arch/arm/boot/dts/kirkwood-ib62x0.dts index 5335b1a..71902da 100644 --- a/arch/arm/boot/dts/kirkwood-ib62x0.dts +++ b/arch/arm/boot/dts/kirkwood-ib62x0.dts @@ -51,6 +51,7 @@ }; }; serial@12000 { + clock-frequency = <200000000>; status = "okay"; }; diff --git a/arch/arm/boot/dts/kirkwood-iconnect.dts b/arch/arm/boot/dts/kirkwood-iconnect.dts index 12ccf74..504f16b 100644 --- a/arch/arm/boot/dts/kirkwood-iconnect.dts +++ b/arch/arm/boot/dts/kirkwood-iconnect.dts @@ -78,6 +78,7 @@ }; }; serial@12000 { + clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts index 3694e94..6cae459 100644 --- a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts +++ b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts @@ -96,11 +96,11 @@ marvell,function = "gpio"; }; pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 { - marvell,pins = "mpp46"; + marvell,pins = "mpp44"; marvell,function = "gpio"; }; pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 { - marvell,pins = "mpp47"; + marvell,pins = "mpp45"; marvell,function = "gpio"; }; @@ -115,6 +115,7 @@ }; serial@12000 { + clock-frequency = <200000000>; status = "ok"; }; @@ -157,14 +158,14 @@ gpios = <&gpio0 16 0>; linux,default-trigger = "default-on"; }; - rebuild_led { - label = "status:white:rebuild_led"; - gpios = <&gpio1 4 0>; - }; - health_led { + health_led1 { label = "status:red:health_led"; gpios = <&gpio1 5 0>; }; + health_led2 { + label = "status:white:health_led"; + gpios = <&gpio1 4 0>; + }; backup_led { label = "status:blue:backup_led"; gpios = <&gpio0 15 0>; diff --git a/arch/arm/boot/dts/kirkwood-km_kirkwood.dts b/arch/arm/boot/dts/kirkwood-km_kirkwood.dts index 5bbd054..8db3123 100644 --- a/arch/arm/boot/dts/kirkwood-km_kirkwood.dts +++ b/arch/arm/boot/dts/kirkwood-km_kirkwood.dts @@ -34,6 +34,7 @@ }; serial@12000 { + clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood-lschlv2.dts b/arch/arm/boot/dts/kirkwood-lschlv2.dts index 9f55d95..9510c9e 100644 --- a/arch/arm/boot/dts/kirkwood-lschlv2.dts +++ b/arch/arm/boot/dts/kirkwood-lschlv2.dts @@ -13,6 +13,7 @@ ocp@f1000000 { serial@12000 { + clock-frequency = <166666667>; status = "okay"; }; }; diff --git a/arch/arm/boot/dts/kirkwood-lsxhl.dts b/arch/arm/boot/dts/kirkwood-lsxhl.dts index 5c84c11..739019c 100644 --- a/arch/arm/boot/dts/kirkwood-lsxhl.dts +++ b/arch/arm/boot/dts/kirkwood-lsxhl.dts @@ -13,6 +13,7 @@ ocp@f1000000 { serial@12000 { + clock-frequency = <200000000>; status = "okay"; }; }; diff --git a/arch/arm/boot/dts/kirkwood-mplcec4.dts b/arch/arm/boot/dts/kirkwood-mplcec4.dts index 07be213..262c654 100644 --- a/arch/arm/boot/dts/kirkwood-mplcec4.dts +++ b/arch/arm/boot/dts/kirkwood-mplcec4.dts @@ -91,6 +91,7 @@ }; serial@12000 { + clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood-ns2-common.dtsi b/arch/arm/boot/dts/kirkwood-ns2-common.dtsi index f0245c1..77d21ab 100644 --- a/arch/arm/boot/dts/kirkwood-ns2-common.dtsi +++ b/arch/arm/boot/dts/kirkwood-ns2-common.dtsi @@ -23,6 +23,7 @@ }; serial@12000 { + clock-frequency = <166666667>; status = "okay"; }; diff --git a/arch/arm/boot/dts/kirkwood-nsa310.dts b/arch/arm/boot/dts/kirkwood-nsa310.dts index 28d05e4..5509f96 100644 --- a/arch/arm/boot/dts/kirkwood-nsa310.dts +++ b/arch/arm/boot/dts/kirkwood-nsa310.dts @@ -18,6 +18,7 @@ ocp@f1000000 { serial@12000 { + clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts index f3cc7c4..49d3d74 100644 --- a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts +++ b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts @@ -18,10 +18,12 @@ ocp@f1000000 { serial@12000 { + clock-frequency = <200000000>; status = "ok"; }; serial@12100 { + clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood-topkick.dts b/arch/arm/boot/dts/kirkwood-topkick.dts index 7dd19ff..cd15452 100644 --- a/arch/arm/boot/dts/kirkwood-topkick.dts +++ b/arch/arm/boot/dts/kirkwood-topkick.dts @@ -17,6 +17,7 @@ ocp@f1000000 { serial@12000 { + clock-frequency = <200000000>; status = "ok"; }; diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi index ad26d92..d6ab442 100644 --- a/arch/arm/boot/dts/kirkwood.dtsi +++ b/arch/arm/boot/dts/kirkwood.dtsi @@ -38,7 +38,6 @@ interrupt-controller; #interrupt-cells = <2>; interrupts = <35>, <36>, <37>, <38>; - clocks = <&gate_clk 7>; }; gpio1: gpio@10140 { @@ -50,7 +49,6 @@ interrupt-controller; #interrupt-cells = <2>; interrupts = <39>, <40>, <41>; - clocks = <&gate_clk 7>; }; serial@12000 { @@ -59,6 +57,7 @@ reg-shift = <2>; interrupts = <33>; clocks = <&gate_clk 7>; + /* set clock-frequency in board dts */ status = "disabled"; }; @@ -68,6 +67,7 @@ reg-shift = <2>; interrupts = <34>; clocks = <&gate_clk 7>; + /* set clock-frequency in board dts */ status = "disabled"; }; @@ -75,7 +75,6 @@ compatible = "marvell,kirkwood-rtc", "marvell,orion-rtc"; reg = <0x10300 0x20>; interrupts = <53>; - clocks = <&gate_clk 7>; }; spi@10600 { diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi index 4ceb266..b8effa1 100644 --- a/arch/arm/boot/dts/tegra20.dtsi +++ b/arch/arm/boot/dts/tegra20.dtsi @@ -314,7 +314,7 @@ spi@7000d800 { compatible = "nvidia,tegra20-slink"; - reg = <0x7000d800 0x200>; + reg = <0x7000d480 0x200>; interrupts = <0 83 0x04>; nvidia,dma-request-selector = <&apbdma 17>; #address-cells = <1>; diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi index aeea838..529fdb8 100644 --- a/arch/arm/boot/dts/tegra30.dtsi +++ b/arch/arm/boot/dts/tegra30.dtsi @@ -309,7 +309,7 @@ spi@7000d800 { compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink"; - reg = <0x7000d800 0x200>; + reg = <0x7000d480 0x200>; interrupts = <0 83 0x04>; nvidia,dma-request-selector = <&apbdma 17>; #address-cells = <1>; diff --git a/arch/arm/configs/at91sam9g45_defconfig b/arch/arm/configs/at91sam9g45_defconfig index 8aab786..606d48f 100644 --- a/arch/arm/configs/at91sam9g45_defconfig +++ b/arch/arm/configs/at91sam9g45_defconfig @@ -173,6 +173,7 @@ CONFIG_MMC=y # CONFIG_MMC_BLOCK_BOUNCE is not set CONFIG_SDIO_UART=m CONFIG_MMC_ATMELMCI=y +CONFIG_MMC_ATMELMCI_DMA=y CONFIG_LEDS_ATMEL_PWM=y CONFIG_LEDS_GPIO=y CONFIG_LEDS_TRIGGER_TIMER=y diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig index a55b206..7bf5351 100644 --- a/arch/arm/configs/mxs_defconfig +++ b/arch/arm/configs/mxs_defconfig @@ -118,7 +118,6 @@ CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FONTS=y CONFIG_LOGO=y CONFIG_USB=y -CONFIG_USB_EHCI_HCD=y CONFIG_USB_CHIPIDEA=y CONFIG_USB_CHIPIDEA_HOST=y CONFIG_USB_STORAGE=y diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h index 720799f..ab98fdd 100644 --- a/arch/arm/include/asm/delay.h +++ b/arch/arm/include/asm/delay.h @@ -24,7 +24,6 @@ extern struct arm_delay_ops { void (*delay)(unsigned long); void (*const_udelay)(unsigned long); void (*udelay)(unsigned long); - bool const_clock; } arm_delay_ops; #define __delay(n) arm_delay_ops.delay(n) diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index ed94b1a..02fe2fb 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h @@ -37,7 +37,7 @@ extern int iop3xx_get_init_atu(void); * IOP3XX processor registers */ #define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000 -#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfedfe000 +#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfeffe000 #define IOP3XX_PERIPHERAL_SIZE 0x00002000 #define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\ IOP3XX_PERIPHERAL_SIZE - 1) diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index e3d5554..9f77e78 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -5,15 +5,15 @@ typedef struct { #ifdef CONFIG_CPU_HAS_ASID - atomic64_t id; + u64 id; #endif - unsigned int vmalloc_seq; + unsigned int vmalloc_seq; } mm_context_t; #ifdef CONFIG_CPU_HAS_ASID #define ASID_BITS 8 #define ASID_MASK ((~0ULL) << ASID_BITS) -#define ASID(mm) ((mm)->context.id.counter & ~ASID_MASK) +#define ASID(mm) ((mm)->context.id & ~ASID_MASK) #else #define ASID(mm) (0) #endif @@ -26,7 +26,7 @@ typedef struct { * modified for 2.6 by Hyok S. Choi */ typedef struct { - unsigned long end_brk; + unsigned long end_brk; } mm_context_t; #endif diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 863a661..e1f644b 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -25,7 +25,7 @@ void __check_vmalloc_seq(struct mm_struct *mm); #ifdef CONFIG_CPU_HAS_ASID void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); -#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) +#define init_new_context(tsk,mm) ({ mm->context.id = 0; }) #else /* !CONFIG_CPU_HAS_ASID */ diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 26e9ce4..9c82f988 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -61,15 +61,6 @@ extern void __pgd_error(const char *file, int line, pgd_t); #define FIRST_USER_ADDRESS PAGE_SIZE /* - * Use TASK_SIZE as the ceiling argument for free_pgtables() and - * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd - * page shared between user and kernel). - */ -#ifdef CONFIG_ARM_LPAE -#define USER_PGTABLES_CEILING TASK_SIZE -#endif - -/* * The pgprot_* and protection_map entries will be fixed up in runtime * to include the cachable and bufferable bits based on memory policy, * as well as any architecture dependent bits like global/ASID and SMP @@ -249,8 +240,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { - const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | - L_PTE_NONE | L_PTE_VALID; + const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE; pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } diff --git a/arch/arm/include/asm/signal.h b/arch/arm/include/asm/signal.h index 7f79136..9a0ea6a 100644 --- a/arch/arm/include/asm/signal.h +++ b/arch/arm/include/asm/signal.h @@ -29,7 +29,6 @@ struct sigaction { __sigrestore_t sa_restorer; sigset_t sa_mask; /* mask last for extensibility */ }; -#define __ARCH_HAS_SA_RESTORER struct k_sigaction { struct sigaction sa; diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h index fbd0ba7..fa09e6b 100644 --- a/arch/arm/include/asm/switch_to.h +++ b/arch/arm/include/asm/switch_to.h @@ -3,14 +3,6 @@ #include -#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM -void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p); -#else -static inline void -switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } -#endif - - /* * switch_to(prev, next) should switch from task `prev' to `next' * `prev' will never be the same as `next'. schedule() itself @@ -20,7 +12,6 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info #define switch_to(prev,next,last) \ do { \ - switch_kmaps(prev, next); \ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ } while (0) diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 4acacb2..cddda1f 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -50,7 +50,6 @@ struct cpu_context_save { struct thread_info { unsigned long flags; /* low level flags */ int preempt_count; /* 0 => preemptable, <0 => bug */ - int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ mm_segment_t addr_limit; /* address limit */ struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ @@ -149,7 +148,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define TIF_SIGPENDING 0 #define TIF_NEED_RESCHED 1 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ -#define TIF_NEED_RESCHED_LAZY 3 #define TIF_SYSCALL_TRACE 8 #define TIF_SYSCALL_AUDIT 9 #define TIF_SYSCALL_TRACEPOINT 10 @@ -162,7 +160,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) -#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 8b1d153..c985b48 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -50,7 +50,6 @@ int main(void) BLANK(); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); - DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); @@ -108,7 +107,7 @@ int main(void) BLANK(); #endif #ifdef CONFIG_CPU_HAS_ASID - DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter)); + DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id)); BLANK(); #endif DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); diff --git a/arch/arm/kernel/early_printk.c b/arch/arm/kernel/early_printk.c index 4307653..85aa2b2 100644 --- a/arch/arm/kernel/early_printk.c +++ b/arch/arm/kernel/early_printk.c @@ -29,17 +29,28 @@ static void early_console_write(struct console *con, const char *s, unsigned n) early_write(s, n); } -static struct console early_console_dev = { +static struct console early_console = { .name = "earlycon", .write = early_console_write, .flags = CON_PRINTBUFFER | CON_BOOT, .index = -1, }; +asmlinkage void early_printk(const char *fmt, ...) +{ + char buf[512]; + int n; + va_list ap; + + va_start(ap, fmt); + n = vscnprintf(buf, sizeof(buf), fmt, ap); + early_write(buf, n); + va_end(ap); +} + static int __init setup_early_printk(char *buf) { - early_console = &early_console_dev; - register_console(&early_console_dev); + register_console(&early_console); return 0; } diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index afa746c..0f82098 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -216,18 +216,11 @@ __irq_svc: #ifdef CONFIG_PREEMPT get_thread_info tsk ldr r8, [tsk, #TI_PREEMPT] @ get preempt count - teq r8, #0 @ if preempt count != 0 - bne 1f @ return from exeption ldr r0, [tsk, #TI_FLAGS] @ get flags - tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set - blne svc_preempt @ preempt! - - ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count - teq r8, #0 @ if preempt lazy count != 0 + teq r8, #0 @ if preempt count != 0 movne r0, #0 @ force flags to 0 - tst r0, #_TIF_NEED_RESCHED_LAZY + tst r0, #_TIF_NEED_RESCHED blne svc_preempt -1: #endif #ifdef CONFIG_TRACE_IRQFLAGS @@ -247,8 +240,6 @@ svc_preempt: 1: bl preempt_schedule_irq @ irq en/disable is done inside ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS tst r0, #_TIF_NEED_RESCHED - bne 1b - tst r0, #_TIF_NEED_RESCHED_LAZY moveq pc, r8 @ go again b 1b #endif diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index e0eb9a1..486a15a 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -184,22 +184,13 @@ __create_page_tables: orr r3, r3, #3 @ PGD block type mov r6, #4 @ PTRS_PER_PGD mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER -1: -#ifdef CONFIG_CPU_ENDIAN_BE8 +1: str r3, [r0], #4 @ set bottom PGD entry bits str r7, [r0], #4 @ set top PGD entry bits - str r3, [r0], #4 @ set bottom PGD entry bits -#else - str r3, [r0], #4 @ set bottom PGD entry bits - str r7, [r0], #4 @ set top PGD entry bits -#endif add r3, r3, #0x1000 @ next PMD table subs r6, r6, #1 bne 1b add r4, r4, #0x1000 @ point to the PMD tables -#ifdef CONFIG_CPU_ENDIAN_BE8 - add r4, r4, #4 @ we only write the bottom word -#endif #endif ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags @@ -267,11 +258,6 @@ __create_page_tables: addne r6, r6, #1 << SECTION_SHIFT strne r6, [r3] -#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) - sub r4, r4, #4 @ Fixup page table pointer - @ for 64-bit descriptors -#endif - #ifdef CONFIG_DEBUG_LL #if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING) /* @@ -290,16 +276,12 @@ __create_page_tables: orr r3, r7, r3, lsl #SECTION_SHIFT #ifdef CONFIG_ARM_LPAE mov r7, #1 << (54 - 32) @ XN -#ifdef CONFIG_CPU_ENDIAN_BE8 - str r7, [r0], #4 - str r3, [r0], #4 -#else - str r3, [r0], #4 - str r7, [r0], #4 -#endif #else orr r3, r3, #PMD_SECT_XN +#endif str r3, [r0], #4 +#ifdef CONFIG_ARM_LPAE + str r7, [r0], #4 #endif #else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */ diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 23fa6a2..f9e8657 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -261,10 +261,7 @@ validate_event(struct pmu_hw_events *hw_events, struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct pmu *leader_pmu = event->group_leader->pmu; - if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) - return 1; - - if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) + if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) return 1; return armpmu->get_event_idx(hw_events, event) >= 0; diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index aa1b171..5f66206 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -118,8 +118,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) continue; } - err = request_irq(irq, handler, - IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", + err = request_irq(irq, handler, IRQF_NOBALANCING, "arm-pmu", cpu_pmu); if (err) { pr_err("unable to request IRQ%d for ARM PMU counters\n", diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 89ede24..4fbc757 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -774,7 +774,7 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] /* * PMXEVTYPER: Event selection reg */ -#define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */ +#define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */ #define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */ /* diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 43ac178..c6dec5f 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -459,31 +459,6 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) } #ifdef CONFIG_MMU - -/* - * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not - * initialized by pgtable_page_ctor() then a coredump of the vector page will - * fail. - */ -static int __init vectors_user_mapping_init_page(void) -{ - struct page *page; - unsigned long addr = 0xffff0000; - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - - pgd = pgd_offset_k(addr); - pud = pud_offset(pgd, addr); - pmd = pmd_offset(pud, addr); - page = pmd_page(*(pmd)); - - pgtable_page_ctor(page); - - return 0; -} -late_initcall(vectors_user_mapping_init_page); - /* * The vectors page is always readable from user space for the * atomic helpers and the signal restart code. Insert it into the diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index 59d2adb..fc6692e 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c @@ -45,12 +45,12 @@ static u32 notrace jiffy_sched_clock_read(void) static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; -static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) +static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) { return (cyc * mult) >> shift; } -static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask) +static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask) { u64 epoch_ns; u32 epoch_cyc; @@ -93,11 +93,11 @@ static void notrace update_sched_clock(void) * detectable in cyc_to_fixed_sched_clock(). */ raw_local_irq_save(flags); - cd.epoch_cyc_copy = cyc; + cd.epoch_cyc = cyc; smp_wmb(); cd.epoch_ns = ns; smp_wmb(); - cd.epoch_cyc = cyc; + cd.epoch_cyc_copy = cyc; raw_local_irq_restore(flags); } diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 806416a..56f72d2 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -638,8 +638,7 @@ asmlinkage int do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) { do { - if (likely(thread_flags & (_TIF_NEED_RESCHED | - _TIF_NEED_RESCHED_LAZY))) { + if (likely(thread_flags & _TIF_NEED_RESCHED)) { schedule(); } else { if (unlikely(!user_mode(regs))) diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 58af91c..84f4cbf 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -693,9 +693,6 @@ static int cpufreq_callback(struct notifier_block *nb, if (freq->flags & CPUFREQ_CONST_LOOPS) return NOTIFY_OK; - if (arm_delay_ops.const_clock) - return NOTIFY_OK; - if (!per_cpu(l_p_j_ref, cpu)) { per_cpu(l_p_j_ref, cpu) = per_cpu(cpu_data, cpu).loops_per_jiffy; diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c index 6b93f6a..0dc5385 100644 --- a/arch/arm/lib/delay.c +++ b/arch/arm/lib/delay.c @@ -77,7 +77,6 @@ void __init register_current_timer_delay(const struct delay_timer *timer) arm_delay_ops.delay = __timer_delay; arm_delay_ops.const_udelay = __timer_const_udelay; arm_delay_ops.udelay = __timer_udelay; - arm_delay_ops.const_clock = true; delay_calibrated = true; } else { pr_info("Ignoring duplicate/late registration of read_current_timer delay\n"); diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c index 0c36dcd..cafe988 100644 --- a/arch/arm/mach-at91/at91rm9200_time.c +++ b/arch/arm/mach-at91/at91rm9200_time.c @@ -134,7 +134,6 @@ clkevt32k_mode(enum clock_event_mode mode, struct clock_event_device *dev) break; case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_UNUSED: - remove_irq(AT91_ID_SYS, &at91rm9200_timer_irq); case CLOCK_EVT_MODE_RESUME: irqmask = 0; break; diff --git a/arch/arm/mach-at91/at91sam926x_time.c b/arch/arm/mach-at91/at91sam926x_time.c index a6f9751..358412f 100644 --- a/arch/arm/mach-at91/at91sam926x_time.c +++ b/arch/arm/mach-at91/at91sam926x_time.c @@ -77,7 +77,7 @@ static struct clocksource pit_clk = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; -static struct irqaction at91sam926x_pit_irq; + /* * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16) */ @@ -86,8 +86,6 @@ pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev) { switch (mode) { case CLOCK_EVT_MODE_PERIODIC: - /* Set up irq handler */ - setup_irq(AT91_ID_SYS, &at91sam926x_pit_irq); /* update clocksource counter */ pit_cnt += pit_cycle * PIT_PICNT(pit_read(AT91_PIT_PIVR)); pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN @@ -100,7 +98,6 @@ pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev) case CLOCK_EVT_MODE_UNUSED: /* disable irq, leaving the clocksource active */ pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN); - remove_irq(AT91_ID_SYS, &at91sam926x_pit_irq); break; case CLOCK_EVT_MODE_RESUME: break; diff --git a/arch/arm/mach-at91/board-foxg20.c b/arch/arm/mach-at91/board-foxg20.c index 1478294..191d37c 100644 --- a/arch/arm/mach-at91/board-foxg20.c +++ b/arch/arm/mach-at91/board-foxg20.c @@ -176,7 +176,6 @@ static struct w1_gpio_platform_data w1_gpio_pdata = { /* If you choose to use a pin other than PB16 it needs to be 3.3V */ .pin = AT91_PIN_PB16, .is_open_drain = 1, - .ext_pullup_enable_pin = -EINVAL, }; static struct platform_device w1_device = { diff --git a/arch/arm/mach-at91/board-stamp9g20.c b/arch/arm/mach-at91/board-stamp9g20.c index 58a6758..48a962b 100644 --- a/arch/arm/mach-at91/board-stamp9g20.c +++ b/arch/arm/mach-at91/board-stamp9g20.c @@ -188,7 +188,6 @@ static struct spi_board_info portuxg20_spi_devices[] = { static struct w1_gpio_platform_data w1_gpio_pdata = { .pin = AT91_PIN_PA29, .is_open_drain = 1, - .ext_pullup_enable_pin = -EINVAL, }; static struct platform_device w1_device = { diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c index 6b4608d..4b67847 100644 --- a/arch/arm/mach-at91/setup.c +++ b/arch/arm/mach-at91/setup.c @@ -333,7 +333,7 @@ static void at91_dt_rstc(void) of_id = of_match_node(rstc_ids, np); if (!of_id) - panic("AT91: rtsc no restart function available\n"); + panic("AT91: rtsc no restart function availlable\n"); arm_pm_restart = of_id->data; diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c index 7f26faf..031805b 100644 --- a/arch/arm/mach-cns3xxx/core.c +++ b/arch/arm/mach-cns3xxx/core.c @@ -22,9 +22,19 @@ static struct map_desc cns3xxx_io_desc[] __initdata = { { - .virtual = CNS3XXX_TC11MP_SCU_BASE_VIRT, - .pfn = __phys_to_pfn(CNS3XXX_TC11MP_SCU_BASE), - .length = SZ_8K, + .virtual = CNS3XXX_TC11MP_TWD_BASE_VIRT, + .pfn = __phys_to_pfn(CNS3XXX_TC11MP_TWD_BASE), + .length = SZ_4K, + .type = MT_DEVICE, + }, { + .virtual = CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT, + .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_CPU_BASE), + .length = SZ_4K, + .type = MT_DEVICE, + }, { + .virtual = CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT, + .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_DIST_BASE), + .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = CNS3XXX_TIMER1_2_3_BASE_VIRT, diff --git a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h index b1021aa..191c8e5 100644 --- a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h +++ b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h @@ -94,10 +94,10 @@ #define RTC_INTR_STS_OFFSET 0x34 #define CNS3XXX_MISC_BASE 0x76000000 /* Misc Control */ -#define CNS3XXX_MISC_BASE_VIRT 0xFB000000 /* Misc Control */ +#define CNS3XXX_MISC_BASE_VIRT 0xFFF07000 /* Misc Control */ #define CNS3XXX_PM_BASE 0x77000000 /* Power Management Control */ -#define CNS3XXX_PM_BASE_VIRT 0xFB001000 +#define CNS3XXX_PM_BASE_VIRT 0xFFF08000 #define PM_CLK_GATE_OFFSET 0x00 #define PM_SOFT_RST_OFFSET 0x04 @@ -109,7 +109,7 @@ #define PM_PLL_HM_PD_OFFSET 0x1C #define CNS3XXX_UART0_BASE 0x78000000 /* UART 0 */ -#define CNS3XXX_UART0_BASE_VIRT 0xFB002000 +#define CNS3XXX_UART0_BASE_VIRT 0xFFF09000 #define CNS3XXX_UART1_BASE 0x78400000 /* UART 1 */ #define CNS3XXX_UART1_BASE_VIRT 0xFFF0A000 @@ -130,7 +130,7 @@ #define CNS3XXX_I2S_BASE_VIRT 0xFFF10000 #define CNS3XXX_TIMER1_2_3_BASE 0x7C800000 /* Timer */ -#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFB003000 +#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFFF10800 #define TIMER1_COUNTER_OFFSET 0x00 #define TIMER1_AUTO_RELOAD_OFFSET 0x04 @@ -227,16 +227,16 @@ * Testchip peripheral and fpga gic regions */ #define CNS3XXX_TC11MP_SCU_BASE 0x90000000 /* IRQ, Test chip */ -#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFB004000 +#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFF000000 #define CNS3XXX_TC11MP_GIC_CPU_BASE 0x90000100 /* Test chip interrupt controller CPU interface */ -#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x100) +#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT 0xFF000100 #define CNS3XXX_TC11MP_TWD_BASE 0x90000600 -#define CNS3XXX_TC11MP_TWD_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x600) +#define CNS3XXX_TC11MP_TWD_BASE_VIRT 0xFF000600 #define CNS3XXX_TC11MP_GIC_DIST_BASE 0x90001000 /* Test chip interrupt controller distributor */ -#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x1000) +#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT 0xFF001000 #define CNS3XXX_TC11MP_L220_BASE 0x92002000 /* L220 registers */ #define CNS3XXX_TC11MP_L220_BASE_VIRT 0xFF002000 diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c index 45b7c71..a685e97 100644 --- a/arch/arm/mach-davinci/dma.c +++ b/arch/arm/mach-davinci/dma.c @@ -743,9 +743,6 @@ EXPORT_SYMBOL(edma_free_channel); */ int edma_alloc_slot(unsigned ctlr, int slot) { - if (!edma_cc[ctlr]) - return -EINVAL; - if (slot >= 0) slot = EDMA_CHAN_SLOT(slot); diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c index 3b6de7a..1a89824 100644 --- a/arch/arm/mach-exynos/common.c +++ b/arch/arm/mach-exynos/common.c @@ -299,7 +299,6 @@ void exynos4_restart(char mode, const char *cmd) void exynos5_restart(char mode, const char *cmd) { - struct device_node *np; u32 val; void __iomem *addr; @@ -307,9 +306,8 @@ void exynos5_restart(char mode, const char *cmd) val = 0x1; addr = EXYNOS_SWRESET; } else if (of_machine_is_compatible("samsung,exynos5440")) { - np = of_find_compatible_node(NULL, NULL, "samsung,exynos5440-clock"); - addr = of_iomap(np, 0) + 0xcc; - val = (0xfff << 20) | (0x1 << 16); + val = (0x10 << 20) | (0x1 << 16); + addr = EXYNOS5440_SWRESET; } else { pr_err("%s: cannot support non-DT\n", __func__); return; diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c index d044812..c5c840e 100644 --- a/arch/arm/mach-exynos/platsmp.c +++ b/arch/arm/mach-exynos/platsmp.c @@ -71,7 +71,7 @@ static void __iomem *scu_base_addr(void) return (void __iomem *)(S5P_VA_SCU); } -static DEFINE_RAW_SPINLOCK(boot_lock); +static DEFINE_SPINLOCK(boot_lock); static void __cpuinit exynos_secondary_init(unsigned int cpu) { @@ -91,8 +91,8 @@ static void __cpuinit exynos_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - raw_spin_lock(&boot_lock); - raw_spin_unlock(&boot_lock); + spin_lock(&boot_lock); + spin_unlock(&boot_lock); } static int __cpuinit exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) @@ -104,7 +104,7 @@ static int __cpuinit exynos_boot_secondary(unsigned int cpu, struct task_struct * Set synchronisation state between this boot processor * and the secondary one */ - raw_spin_lock(&boot_lock); + spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from @@ -133,7 +133,7 @@ static int __cpuinit exynos_boot_secondary(unsigned int cpu, struct task_struct if (timeout == 0) { printk(KERN_ERR "cpu1 power enable failed"); - raw_spin_unlock(&boot_lock); + spin_unlock(&boot_lock); return -ETIMEDOUT; } } @@ -161,7 +161,7 @@ static int __cpuinit exynos_boot_secondary(unsigned int cpu, struct task_struct * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - raw_spin_unlock(&boot_lock); + spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } diff --git a/arch/arm/mach-imx/clk-imx25.c b/arch/arm/mach-imx/clk-imx25.c index 69858c7..2c570cd 100644 --- a/arch/arm/mach-imx/clk-imx25.c +++ b/arch/arm/mach-imx/clk-imx25.c @@ -224,9 +224,6 @@ static int __init __mx25_clocks_init(unsigned long osc_rate) clk_prepare_enable(clk[emi_ahb]); - /* Clock source for gpt must be derived from AHB */ - clk_set_parent(clk[per5_sel], clk[ahb]); - clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0"); clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0"); diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c index 5e3ca7a..f0727e8 100644 --- a/arch/arm/mach-imx/clk-imx35.c +++ b/arch/arm/mach-imx/clk-imx35.c @@ -257,7 +257,6 @@ int __init mx35_clocks_init() clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0"); clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0"); clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0"); - clk_register_clkdev(clk[admux_gate], "audmux", NULL); clk_prepare_enable(clk[spba_gate]); clk_prepare_enable(clk[gpio1_gate]); @@ -265,8 +264,6 @@ int __init mx35_clocks_init() clk_prepare_enable(clk[gpio3_gate]); clk_prepare_enable(clk[iim_gate]); clk_prepare_enable(clk[emi_gate]); - clk_prepare_enable(clk[max_gate]); - clk_prepare_enable(clk[iomuxc_gate]); /* * SCC is needed to boot via mmc after a watchdog reset. The clock code diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h index 122da24..fa36fb8 100644 --- a/arch/arm/mach-imx/common.h +++ b/arch/arm/mach-imx/common.h @@ -116,8 +116,6 @@ void tzic_handle_irq(struct pt_regs *); extern void imx_enable_cpu(int cpu, bool enable); extern void imx_set_cpu_jump(int cpu, void *jump_addr); -extern u32 imx_get_cpu_arg(int cpu); -extern void imx_set_cpu_arg(int cpu, u32 arg); #ifdef CONFIG_DEBUG_LL extern void imx_lluart_map_io(void); #else diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c index 361a253..7bc5fe1 100644 --- a/arch/arm/mach-imx/hotplug.c +++ b/arch/arm/mach-imx/hotplug.c @@ -46,23 +46,11 @@ static inline void cpu_enter_lowpower(void) void imx_cpu_die(unsigned int cpu) { cpu_enter_lowpower(); - /* - * We use the cpu jumping argument register to sync with - * imx_cpu_kill() which is running on cpu0 and waiting for - * the register being cleared to kill the cpu. - */ - imx_set_cpu_arg(cpu, ~0); cpu_do_idle(); } int imx_cpu_kill(unsigned int cpu) { - unsigned long timeout = jiffies + msecs_to_jiffies(50); - - while (imx_get_cpu_arg(cpu) == 0) - if (time_after(jiffies, timeout)) - return 0; imx_enable_cpu(cpu, false); - imx_set_cpu_arg(cpu, 0); return 1; } diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c index 09a742f..e15f155 100644 --- a/arch/arm/mach-imx/src.c +++ b/arch/arm/mach-imx/src.c @@ -43,18 +43,6 @@ void imx_set_cpu_jump(int cpu, void *jump_addr) src_base + SRC_GPR1 + cpu * 8); } -u32 imx_get_cpu_arg(int cpu) -{ - cpu = cpu_logical_map(cpu); - return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4); -} - -void imx_set_cpu_arg(int cpu, u32 arg) -{ - cpu = cpu_logical_map(cpu); - writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4); -} - void imx_src_prepare_restart(void) { u32 val; diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c index 26762bf..11e2a41 100644 --- a/arch/arm/mach-integrator/integrator_ap.c +++ b/arch/arm/mach-integrator/integrator_ap.c @@ -613,6 +613,7 @@ static struct map_desc ap_io_desc_atag[] __initdata = { static void __init ap_map_io_atag(void) { iotable_init(ap_io_desc_atag, ARRAY_SIZE(ap_io_desc_atag)); + ap_syscon_base = __io_address(INTEGRATOR_SC_BASE); ap_map_io(); } @@ -684,7 +685,6 @@ static void __init ap_init(void) platform_device_register(&cfi_flash_device); - ap_syscon_base = __io_address(INTEGRATOR_SC_BASE); sc_dec = readl(ap_syscon_base + INTEGRATOR_SC_DEC_OFFSET); for (i = 0; i < 4; i++) { struct lm_device *lmdev; diff --git a/arch/arm/mach-ixp4xx/vulcan-setup.c b/arch/arm/mach-ixp4xx/vulcan-setup.c index 1dddc1b..2798f43 100644 --- a/arch/arm/mach-ixp4xx/vulcan-setup.c +++ b/arch/arm/mach-ixp4xx/vulcan-setup.c @@ -163,7 +163,6 @@ static struct platform_device vulcan_max6369 = { static struct w1_gpio_platform_data vulcan_w1_gpio_pdata = { .pin = 14, - .ext_pullup_enable_pin = -EINVAL, }; static struct platform_device vulcan_w1_gpio = { diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c index e714ead..de4fd2b 100644 --- a/arch/arm/mach-kirkwood/board-dt.c +++ b/arch/arm/mach-kirkwood/board-dt.c @@ -41,12 +41,16 @@ static void __init kirkwood_legacy_clk_init(void) struct device_node *np = of_find_compatible_node( NULL, NULL, "marvell,kirkwood-gating-clock"); + struct of_phandle_args clkspec; - struct clk *clk; clkspec.np = np; clkspec.args_count = 1; + clkspec.args[0] = CGC_BIT_GE0; + orion_clkdev_add(NULL, "mv643xx_eth_port.0", + of_clk_get_from_provider(&clkspec)); + clkspec.args[0] = CGC_BIT_PEX0; orion_clkdev_add("0", "pcie", of_clk_get_from_provider(&clkspec)); @@ -59,24 +63,14 @@ static void __init kirkwood_legacy_clk_init(void) orion_clkdev_add("1", "pcie", of_clk_get_from_provider(&clkspec)); + clkspec.args[0] = CGC_BIT_GE1; + orion_clkdev_add(NULL, "mv643xx_eth_port.1", + of_clk_get_from_provider(&clkspec)); + clkspec.args[0] = CGC_BIT_SDIO; orion_clkdev_add(NULL, "mvsdio", of_clk_get_from_provider(&clkspec)); - /* - * The ethernet interfaces forget the MAC address assigned by - * u-boot if the clocks are turned off. Until proper DT support - * is available we always enable them for now. - */ - clkspec.args[0] = CGC_BIT_GE0; - clk = of_clk_get_from_provider(&clkspec); - orion_clkdev_add(NULL, "mv643xx_eth_port.0", clk); - clk_prepare_enable(clk); - - clkspec.args[0] = CGC_BIT_GE1; - clk = of_clk_get_from_provider(&clkspec); - orion_clkdev_add(NULL, "mv643xx_eth_port.1", clk); - clk_prepare_enable(clk); } static void __init kirkwood_of_clk_init(void) diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c index 7b8043a..7ed69b69 100644 --- a/arch/arm/mach-msm/platsmp.c +++ b/arch/arm/mach-msm/platsmp.c @@ -31,7 +31,7 @@ extern void msm_secondary_startup(void); -static DEFINE_RAW_SPINLOCK(boot_lock); +static DEFINE_SPINLOCK(boot_lock); static inline int get_core_count(void) { @@ -58,8 +58,8 @@ static void __cpuinit msm_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - raw_spin_lock(&boot_lock); - raw_spin_unlock(&boot_lock); + spin_lock(&boot_lock); + spin_unlock(&boot_lock); } static __cpuinit void prepare_cold_cpu(unsigned int cpu) @@ -96,7 +96,7 @@ static int __cpuinit msm_boot_secondary(unsigned int cpu, struct task_struct *id * set synchronisation state between this boot processor * and the secondary one */ - raw_spin_lock(&boot_lock); + spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from @@ -130,7 +130,7 @@ static int __cpuinit msm_boot_secondary(unsigned int cpu, struct task_struct *id * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - raw_spin_unlock(&boot_lock); + spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index b9adf69..948bcaa 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -286,8 +286,5 @@ extern void omap_reserve(void); struct omap_hwmod; extern int omap_dss_reset(struct omap_hwmod *); -/* SoC specific clock initializer */ -extern int (*omap_clk_init)(void); - #endif /* __ASSEMBLER__ */ #endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */ diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c index aa20002..22590db 100644 --- a/arch/arm/mach-omap2/cpuidle34xx.c +++ b/arch/arm/mach-omap2/cpuidle34xx.c @@ -265,9 +265,8 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev, static DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev); static struct cpuidle_driver omap3_idle_driver = { - .name = "omap3_idle", - .owner = THIS_MODULE, - .en_core_tk_irqen = 1, + .name = "omap3_idle", + .owner = THIS_MODULE, .states = { { .enter = omap3_enter_idle_bm, diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index 5c445ca..2c3fdd6 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -55,12 +55,6 @@ #include "prm44xx.h" /* - * omap_clk_init: points to a function that does the SoC-specific - * clock initializations - */ -int (*omap_clk_init)(void); - -/* * The machine specific code may provide the extra mapping besides the * default mapping provided here. */ @@ -403,7 +397,7 @@ void __init omap2420_init_early(void) omap242x_clockdomains_init(); omap2420_hwmod_init(); omap_hwmod_init_postsetup(); - omap_clk_init = omap2420_clk_init; + omap2420_clk_init(); } void __init omap2420_init_late(void) @@ -433,7 +427,7 @@ void __init omap2430_init_early(void) omap243x_clockdomains_init(); omap2430_hwmod_init(); omap_hwmod_init_postsetup(); - omap_clk_init = omap2430_clk_init; + omap2430_clk_init(); } void __init omap2430_init_late(void) @@ -468,7 +462,7 @@ void __init omap3_init_early(void) omap3xxx_clockdomains_init(); omap3xxx_hwmod_init(); omap_hwmod_init_postsetup(); - omap_clk_init = omap3xxx_clk_init; + omap3xxx_clk_init(); } void __init omap3430_init_early(void) @@ -506,7 +500,7 @@ void __init ti81xx_init_early(void) omap3xxx_clockdomains_init(); omap3xxx_hwmod_init(); omap_hwmod_init_postsetup(); - omap_clk_init = omap3xxx_clk_init; + omap3xxx_clk_init(); } void __init omap3_init_late(void) @@ -574,7 +568,7 @@ void __init am33xx_init_early(void) am33xx_clockdomains_init(); am33xx_hwmod_init(); omap_hwmod_init_postsetup(); - omap_clk_init = am33xx_clk_init; + am33xx_clk_init(); } #endif @@ -599,7 +593,7 @@ void __init omap4430_init_early(void) omap44xx_clockdomains_init(); omap44xx_hwmod_init(); omap_hwmod_init_postsetup(); - omap_clk_init = omap4xxx_clk_init; + omap4xxx_clk_init(); } void __init omap4430_init_late(void) diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index ef1fbcc..cd42d92 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c @@ -45,7 +45,7 @@ u16 pm44xx_errata; /* SCU base address */ static void __iomem *scu_base; -static DEFINE_RAW_SPINLOCK(boot_lock); +static DEFINE_SPINLOCK(boot_lock); void __iomem *omap4_get_scu_base(void) { @@ -76,8 +76,8 @@ static void __cpuinit omap4_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - raw_spin_lock(&boot_lock); - raw_spin_unlock(&boot_lock); + spin_lock(&boot_lock); + spin_unlock(&boot_lock); } static int __cpuinit omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) @@ -90,7 +90,7 @@ static int __cpuinit omap4_boot_secondary(unsigned int cpu, struct task_struct * * Set synchronisation state between this boot processor * and the secondary one */ - raw_spin_lock(&boot_lock); + spin_lock(&boot_lock); /* * Update the AuxCoreBoot0 with boot state for secondary core. @@ -163,7 +163,7 @@ static int __cpuinit omap4_boot_secondary(unsigned int cpu, struct task_struct * * Now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - raw_spin_unlock(&boot_lock); + spin_unlock(&boot_lock); return 0; } diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c index 8633a43..5d3b4f4 100644 --- a/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/arch/arm/mach-omap2/omap-wakeupgen.c @@ -46,7 +46,7 @@ static void __iomem *wakeupgen_base; static void __iomem *sar_base; -static DEFINE_RAW_SPINLOCK(wakeupgen_lock); +static DEFINE_SPINLOCK(wakeupgen_lock); static unsigned int irq_target_cpu[MAX_IRQS]; static unsigned int irq_banks = MAX_NR_REG_BANKS; static unsigned int max_irqs = MAX_IRQS; @@ -134,9 +134,9 @@ static void wakeupgen_mask(struct irq_data *d) { unsigned long flags; - raw_spin_lock_irqsave(&wakeupgen_lock, flags); + spin_lock_irqsave(&wakeupgen_lock, flags); _wakeupgen_clear(d->irq, irq_target_cpu[d->irq]); - raw_spin_unlock_irqrestore(&wakeupgen_lock, flags); + spin_unlock_irqrestore(&wakeupgen_lock, flags); } /* @@ -146,9 +146,9 @@ static void wakeupgen_unmask(struct irq_data *d) { unsigned long flags; - raw_spin_lock_irqsave(&wakeupgen_lock, flags); + spin_lock_irqsave(&wakeupgen_lock, flags); _wakeupgen_set(d->irq, irq_target_cpu[d->irq]); - raw_spin_unlock_irqrestore(&wakeupgen_lock, flags); + spin_unlock_irqrestore(&wakeupgen_lock, flags); } #ifdef CONFIG_HOTPLUG_CPU @@ -189,7 +189,7 @@ static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set) { unsigned long flags; - raw_spin_lock_irqsave(&wakeupgen_lock, flags); + spin_lock_irqsave(&wakeupgen_lock, flags); if (set) { _wakeupgen_save_masks(cpu); _wakeupgen_set_all(cpu, WKG_MASK_ALL); @@ -197,7 +197,7 @@ static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set) _wakeupgen_set_all(cpu, WKG_UNMASK_ALL); _wakeupgen_restore_masks(cpu); } - raw_spin_unlock_irqrestore(&wakeupgen_lock, flags); + spin_unlock_irqrestore(&wakeupgen_lock, flags); } #endif diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index 559c87b..b8ad6e6 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -554,8 +554,6 @@ static inline void __init realtime_counter_init(void) clksrc_nr, clksrc_src) \ static void __init omap##name##_gptimer_timer_init(void) \ { \ - if (omap_clk_init) \ - omap_clk_init(); \ omap_dmtimer_init(); \ omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src); \ @@ -565,8 +563,6 @@ static void __init omap##name##_gptimer_timer_init(void) \ clksrc_nr, clksrc_src) \ static void __init omap##name##_sync32k_timer_init(void) \ { \ - if (omap_clk_init) \ - omap_clk_init(); \ omap_dmtimer_init(); \ omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ /* Enable the use of clocksource="gp_timer" kernel parameter */ \ diff --git a/arch/arm/mach-pxa/include/mach/smemc.h b/arch/arm/mach-pxa/include/mach/smemc.h index b802f28..b7de471 100644 --- a/arch/arm/mach-pxa/include/mach/smemc.h +++ b/arch/arm/mach-pxa/include/mach/smemc.h @@ -37,7 +37,6 @@ #define CSADRCFG1 (SMEMC_VIRT + 0x84) /* Address Configuration Register for CS1 */ #define CSADRCFG2 (SMEMC_VIRT + 0x88) /* Address Configuration Register for CS2 */ #define CSADRCFG3 (SMEMC_VIRT + 0x8C) /* Address Configuration Register for CS3 */ -#define CSMSADRCFG (SMEMC_VIRT + 0xA0) /* Chip Select Configuration Register */ /* * More handy macros for PCMCIA diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c index 6283fcb..25b08bfa 100644 --- a/arch/arm/mach-pxa/raumfeld.c +++ b/arch/arm/mach-pxa/raumfeld.c @@ -505,7 +505,6 @@ static struct w1_gpio_platform_data w1_gpio_platform_data = { .pin = GPIO_ONE_WIRE, .is_open_drain = 0, .enable_external_pullup = w1_enable_external_pullup, - .ext_pullup_enable_pin = -EINVAL, }; struct platform_device raumfeld_w1_gpio_device = { diff --git a/arch/arm/mach-pxa/smemc.c b/arch/arm/mach-pxa/smemc.c index f38aa89..7992305 100644 --- a/arch/arm/mach-pxa/smemc.c +++ b/arch/arm/mach-pxa/smemc.c @@ -40,8 +40,6 @@ static void pxa3xx_smemc_resume(void) __raw_writel(csadrcfg[1], CSADRCFG1); __raw_writel(csadrcfg[2], CSADRCFG2); __raw_writel(csadrcfg[3], CSADRCFG3); - /* CSMSADRCFG wakes up in its default state (0), so we need to set it */ - __raw_writel(0x2, CSMSADRCFG); } static struct syscore_ops smemc_syscore_ops = { @@ -51,19 +49,8 @@ static struct syscore_ops smemc_syscore_ops = { static int __init smemc_init(void) { - if (cpu_is_pxa3xx()) { - /* - * The only documentation we have on the - * Chip Select Configuration Register (CSMSADRCFG) is that - * it must be programmed to 0x2. - * Moreover, in the bit definitions, the second bit - * (CSMSADRCFG[1]) is called "SETALWAYS". - * Other bits are reserved in this register. - */ - __raw_writel(0x2, CSMSADRCFG); - + if (cpu_is_pxa3xx()) register_syscore_ops(&smemc_syscore_ops); - } return 0; } diff --git a/arch/arm/mach-s3c24xx/include/mach/debug-macro.S b/arch/arm/mach-s3c24xx/include/mach/debug-macro.S index 13ed33c..4135de8 100644 --- a/arch/arm/mach-s3c24xx/include/mach/debug-macro.S +++ b/arch/arm/mach-s3c24xx/include/mach/debug-macro.S @@ -40,17 +40,17 @@ addeq \rd, \rx, #(S3C24XX_PA_GPIO - S3C24XX_PA_UART) addne \rd, \rx, #(S3C24XX_VA_GPIO - S3C24XX_VA_UART) bic \rd, \rd, #0xff000 - ldr \rd, [\rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0)] + ldr \rd, [ \rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0) ] and \rd, \rd, #0x00ff0000 teq \rd, #0x00440000 @ is it 2440? 1004: - ldr \rd, [\rx, # S3C2410_UFSTAT] + ldr \rd, [ \rx, # S3C2410_UFSTAT ] moveq \rd, \rd, lsr #SHIFT_2440TXF tst \rd, #S3C2410_UFSTAT_TXFULL .endm .macro fifo_full_s3c2410 rd, rx - ldr \rd, [\rx, # S3C2410_UFSTAT] + ldr \rd, [ \rx, # S3C2410_UFSTAT ] tst \rd, #S3C2410_UFSTAT_TXFULL .endm @@ -68,18 +68,18 @@ addeq \rd, \rx, #(S3C24XX_PA_GPIO - S3C24XX_PA_UART) addne \rd, \rx, #(S3C24XX_VA_GPIO - S3C24XX_VA_UART) bic \rd, \rd, #0xff000 - ldr \rd, [\rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0)] + ldr \rd, [ \rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0) ] and \rd, \rd, #0x00ff0000 teq \rd, #0x00440000 @ is it 2440? 10000: - ldr \rd, [\rx, # S3C2410_UFSTAT] + ldr \rd, [ \rx, # S3C2410_UFSTAT ] andne \rd, \rd, #S3C2410_UFSTAT_TXMASK andeq \rd, \rd, #S3C2440_UFSTAT_TXMASK .endm .macro fifo_level_s3c2410 rd, rx - ldr \rd, [\rx, # S3C2410_UFSTAT] + ldr \rd, [ \rx, # S3C2410_UFSTAT ] and \rd, \rd, #S3C2410_UFSTAT_TXMASK .endm diff --git a/arch/arm/mach-s3c24xx/include/mach/entry-macro.S b/arch/arm/mach-s3c24xx/include/mach/entry-macro.S index 6a21bee..7615a14 100644 --- a/arch/arm/mach-s3c24xx/include/mach/entry-macro.S +++ b/arch/arm/mach-s3c24xx/include/mach/entry-macro.S @@ -31,10 +31,10 @@ @@ try the interrupt offset register, since it is there - ldr \irqstat, [\base, #INTPND ] + ldr \irqstat, [ \base, #INTPND ] teq \irqstat, #0 beq 1002f - ldr \irqnr, [\base, #INTOFFSET ] + ldr \irqnr, [ \base, #INTOFFSET ] mov \tmp, #1 tst \irqstat, \tmp, lsl \irqnr bne 1001f diff --git a/arch/arm/mach-s3c24xx/pm-h1940.S b/arch/arm/mach-s3c24xx/pm-h1940.S index 6183a68..c93bf2d 100644 --- a/arch/arm/mach-s3c24xx/pm-h1940.S +++ b/arch/arm/mach-s3c24xx/pm-h1940.S @@ -30,4 +30,4 @@ h1940_pm_return: mov r0, #S3C2410_PA_GPIO - ldr pc, [r0, #S3C2410_GSTATUS3 - S3C24XX_VA_GPIO] + ldr pc, [ r0, #S3C2410_GSTATUS3 - S3C24XX_VA_GPIO ] diff --git a/arch/arm/mach-s3c24xx/sleep-s3c2410.S b/arch/arm/mach-s3c24xx/sleep-s3c2410.S index 65200ae..dd5b638 100644 --- a/arch/arm/mach-s3c24xx/sleep-s3c2410.S +++ b/arch/arm/mach-s3c24xx/sleep-s3c2410.S @@ -45,9 +45,9 @@ ENTRY(s3c2410_cpu_suspend) ldr r4, =S3C2410_REFRESH ldr r5, =S3C24XX_MISCCR ldr r6, =S3C2410_CLKCON - ldr r7, [r4] @ get REFRESH (and ensure in TLB) - ldr r8, [r5] @ get MISCCR (and ensure in TLB) - ldr r9, [r6] @ get CLKCON (and ensure in TLB) + ldr r7, [ r4 ] @ get REFRESH (and ensure in TLB) + ldr r8, [ r5 ] @ get MISCCR (and ensure in TLB) + ldr r9, [ r6 ] @ get CLKCON (and ensure in TLB) orr r7, r7, #S3C2410_REFRESH_SELF @ SDRAM sleep command orr r8, r8, #S3C2410_MISCCR_SDSLEEP @ SDRAM power-down signals @@ -61,8 +61,8 @@ ENTRY(s3c2410_cpu_suspend) @@ align next bit of code to cache line .align 5 s3c2410_do_sleep: - streq r7, [r4] @ SDRAM sleep command - streq r8, [r5] @ SDRAM power-down config - streq r9, [r6] @ CPU sleep + streq r7, [ r4 ] @ SDRAM sleep command + streq r8, [ r5 ] @ SDRAM power-down config + streq r9, [ r6 ] @ CPU sleep 1: beq 1b mov pc, r14 diff --git a/arch/arm/mach-s3c24xx/sleep-s3c2412.S b/arch/arm/mach-s3c24xx/sleep-s3c2412.S index 5adaceb..c82418e 100644 --- a/arch/arm/mach-s3c24xx/sleep-s3c2412.S +++ b/arch/arm/mach-s3c24xx/sleep-s3c2412.S @@ -57,12 +57,12 @@ s3c2412_sleep_enter1: * retry, as simply returning causes the system to lock. */ - ldrne r9, [r1] - strne r9, [r1] - ldrne r9, [r2] - strne r9, [r2] - ldrne r9, [r3] - strne r9, [r3] + ldrne r9, [ r1 ] + strne r9, [ r1 ] + ldrne r9, [ r2 ] + strne r9, [ r2 ] + ldrne r9, [ r3 ] + strne r9, [ r3 ] bne s3c2412_sleep_enter1 mov pc, r14 diff --git a/arch/arm/mach-spear13xx/platsmp.c b/arch/arm/mach-spear13xx/platsmp.c index bb61b1f..2eaa3fa 100644 --- a/arch/arm/mach-spear13xx/platsmp.c +++ b/arch/arm/mach-spear13xx/platsmp.c @@ -21,7 +21,7 @@ #include #include -static DEFINE_RAW_SPINLOCK(boot_lock); +static DEFINE_SPINLOCK(boot_lock); static void __iomem *scu_base = IOMEM(VA_SCU_BASE); @@ -44,8 +44,8 @@ static void __cpuinit spear13xx_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - raw_spin_lock(&boot_lock); - raw_spin_unlock(&boot_lock); + spin_lock(&boot_lock); + spin_unlock(&boot_lock); } static int __cpuinit spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) @@ -56,7 +56,7 @@ static int __cpuinit spear13xx_boot_secondary(unsigned int cpu, struct task_stru * set synchronisation state between this boot processor * and the secondary one */ - raw_spin_lock(&boot_lock); + spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from @@ -83,7 +83,7 @@ static int __cpuinit spear13xx_boot_secondary(unsigned int cpu, struct task_stru * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - raw_spin_unlock(&boot_lock); + spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } diff --git a/arch/arm/mach-u300/include/mach/u300-regs.h b/arch/arm/mach-u300/include/mach/u300-regs.h index 0320495..1e49d90 100644 --- a/arch/arm/mach-u300/include/mach/u300-regs.h +++ b/arch/arm/mach-u300/include/mach/u300-regs.h @@ -95,7 +95,7 @@ #define U300_SPI_BASE (U300_FAST_PER_PHYS_BASE+0x6000) /* Fast UART1 on U335 only */ -#define U300_UART1_BASE (U300_FAST_PER_PHYS_BASE+0x7000) +#define U300_UART1_BASE (U300_SLOW_PER_PHYS_BASE+0x7000) /* * SLOW peripherals diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c index dcecae1..3db7782 100644 --- a/arch/arm/mach-ux500/platsmp.c +++ b/arch/arm/mach-ux500/platsmp.c @@ -50,7 +50,7 @@ static void __iomem *scu_base_addr(void) return NULL; } -static DEFINE_RAW_SPINLOCK(boot_lock); +static DEFINE_SPINLOCK(boot_lock); static void __cpuinit ux500_secondary_init(unsigned int cpu) { @@ -70,8 +70,8 @@ static void __cpuinit ux500_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - raw_spin_lock(&boot_lock); - raw_spin_unlock(&boot_lock); + spin_lock(&boot_lock); + spin_unlock(&boot_lock); } static int __cpuinit ux500_boot_secondary(unsigned int cpu, struct task_struct *idle) @@ -82,7 +82,7 @@ static int __cpuinit ux500_boot_secondary(unsigned int cpu, struct task_struct * * set synchronisation state between this boot processor * and the secondary one */ - raw_spin_lock(&boot_lock); + spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from @@ -103,7 +103,7 @@ static int __cpuinit ux500_boot_secondary(unsigned int cpu, struct task_struct * * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - raw_spin_unlock(&boot_lock); + spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index a78827b..5d59294 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c @@ -36,7 +36,6 @@ #include #include #include -#include #include #include @@ -66,28 +65,16 @@ #define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE) #define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE) -/* These PIC IRQs are valid in each configuration */ -#define PIC_VALID_ALL BIT(SIC_INT_KMI0) | BIT(SIC_INT_KMI1) | \ - BIT(SIC_INT_SCI3) | BIT(SIC_INT_UART3) | \ - BIT(SIC_INT_CLCD) | BIT(SIC_INT_TOUCH) | \ - BIT(SIC_INT_KEYPAD) | BIT(SIC_INT_DoC) | \ - BIT(SIC_INT_USB) | BIT(SIC_INT_PCI0) | \ - BIT(SIC_INT_PCI1) | BIT(SIC_INT_PCI2) | \ - BIT(SIC_INT_PCI3) #if 1 #define IRQ_MMCI0A IRQ_VICSOURCE22 #define IRQ_AACI IRQ_VICSOURCE24 #define IRQ_ETH IRQ_VICSOURCE25 #define PIC_MASK 0xFFD00000 -#define PIC_VALID PIC_VALID_ALL #else #define IRQ_MMCI0A IRQ_SIC_MMCI0A #define IRQ_AACI IRQ_SIC_AACI #define IRQ_ETH IRQ_SIC_ETH #define PIC_MASK 0 -#define PIC_VALID PIC_VALID_ALL | BIT(SIC_INT_MMCI0A) | \ - BIT(SIC_INT_MMCI1A) | BIT(SIC_INT_AACI) | \ - BIT(SIC_INT_ETH) #endif /* Lookup table for finding a DT node that represents the vic instance */ @@ -115,7 +102,7 @@ void __init versatile_init_irq(void) VERSATILE_SIC_BASE); fpga_irq_init(VA_SIC_BASE, "SIC", IRQ_SIC_START, - IRQ_VICSOURCE31, PIC_VALID, np); + IRQ_VICSOURCE31, ~PIC_MASK, np); /* * Interrupts on secondary controller from 0 to 8 are routed to diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c index e92e5e0..2f84f40 100644 --- a/arch/arm/mach-versatile/pci.c +++ b/arch/arm/mach-versatile/pci.c @@ -23,7 +23,6 @@ #include #include -#include #include #include @@ -328,12 +327,12 @@ static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) int irq; /* slot, pin, irq - * 24 1 IRQ_SIC_PCI0 - * 25 1 IRQ_SIC_PCI1 - * 26 1 IRQ_SIC_PCI2 - * 27 1 IRQ_SIC_PCI3 + * 24 1 27 + * 25 1 28 + * 26 1 29 + * 27 1 30 */ - irq = IRQ_SIC_PCI0 + ((slot - 24 + pin - 1) & 3); + irq = 27 + ((slot - 24 + pin - 1) & 3); return irq; } diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index db26e2e..b820eda 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -749,6 +749,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) unsigned long instr = 0, instrptr; int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs); unsigned int type; + mm_segment_t fs; unsigned int fault; u16 tinstr = 0; int isize = 4; @@ -759,15 +760,16 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) instrptr = instruction_pointer(regs); + fs = get_fs(); + set_fs(KERNEL_DS); if (thumb_mode(regs)) { - u16 *ptr = (u16 *)(instrptr & ~1); - fault = probe_kernel_address(ptr, tinstr); + fault = __get_user(tinstr, (u16 *)(instrptr & ~1)); if (!fault) { if (cpu_architecture() >= CPU_ARCH_ARMv7 && IS_T32(tinstr)) { /* Thumb-2 32-bit */ u16 tinst2 = 0; - fault = probe_kernel_address(ptr + 1, tinst2); + fault = __get_user(tinst2, (u16 *)(instrptr+2)); instr = (tinstr << 16) | tinst2; thumb2_32b = 1; } else { @@ -776,7 +778,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) } } } else - fault = probe_kernel_address(instrptr, instr); + fault = __get_user(instr, (u32 *)instrptr); + set_fs(fs); if (fault) { type = TYPE_FAULT; diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index 48bc3c0..dd3d591 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c @@ -343,7 +343,6 @@ void __init feroceon_l2_init(int __l2_wt_override) outer_cache.inv_range = feroceon_l2_inv_range; outer_cache.clean_range = feroceon_l2_clean_range; outer_cache.flush_range = feroceon_l2_flush_range; - outer_cache.inv_all = l2_inv_all; enable_l2(); diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index d07df17..bc4a5e9 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -149,9 +149,9 @@ static int is_reserved_asid(u64 asid) return 0; } -static u64 new_context(struct mm_struct *mm, unsigned int cpu) +static void new_context(struct mm_struct *mm, unsigned int cpu) { - u64 asid = atomic64_read(&mm->context.id); + u64 asid = mm->context.id; u64 generation = atomic64_read(&asid_generation); if (asid != 0 && is_reserved_asid(asid)) { @@ -178,14 +178,13 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) cpumask_clear(mm_cpumask(mm)); } - return asid; + mm->context.id = asid; } void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) { unsigned long flags; unsigned int cpu = smp_processor_id(); - u64 asid; if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) __check_vmalloc_seq(mm); @@ -196,24 +195,20 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) */ cpu_set_reserved_ttbr0(); - asid = atomic64_read(&mm->context.id); - if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) - && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) + if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) + && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id)) goto switch_mm_fastpath; raw_spin_lock_irqsave(&cpu_asid_lock, flags); /* Check that our ASID belongs to the current generation. */ - asid = atomic64_read(&mm->context.id); - if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) { - asid = new_context(mm, cpu); - atomic64_set(&mm->context.id, asid); - } + if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) + new_context(mm, cpu); + + atomic64_set(&per_cpu(active_asids, cpu), mm->context.id); + cpumask_set_cpu(cpu, mm_cpumask(mm)); if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) local_flush_tlb_all(); - - atomic64_set(&per_cpu(active_asids, cpu), asid); - cpumask_set_cpu(cpu, mm_cpumask(mm)); raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); switch_mm_fastpath: diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 5397da0..dda3904 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -330,7 +330,6 @@ static int __init atomic_pool_init(void) { struct dma_pool *pool = &atomic_pool; pgprot_t prot = pgprot_dmacoherent(pgprot_kernel); - gfp_t gfp = GFP_KERNEL | GFP_DMA; unsigned long nr_pages = pool->size >> PAGE_SHIFT; unsigned long *bitmap; struct page *page; @@ -349,8 +348,8 @@ static int __init atomic_pool_init(void) if (IS_ENABLED(CONFIG_CMA)) ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); else - ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page, - NULL); + ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, + &page, NULL); if (ptr) { int i; diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 149bab5..5dbf13f 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -279,7 +279,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (!mm || pagefault_disabled()) + if (in_atomic() || !mm) goto no_context; /* diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index bd41dd8..21b9e1b 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -38,7 +38,6 @@ EXPORT_SYMBOL(kunmap); void *kmap_atomic(struct page *page) { - pte_t pte = mk_pte(page, kmap_prot); unsigned int idx; unsigned long vaddr; void *kmap; @@ -77,10 +76,7 @@ void *kmap_atomic(struct page *page) * in place, so the contained TLB flush ensures the TLB is updated * with the new mapping. */ -#ifdef CONFIG_PREEMPT_RT_FULL - current->kmap_pte[type] = pte; -#endif - set_top_pte(vaddr, pte); + set_top_pte(vaddr, mk_pte(page, kmap_prot)); return (void *)vaddr; } @@ -97,15 +93,12 @@ void __kunmap_atomic(void *kvaddr) if (cache_is_vivt()) __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); -#ifdef CONFIG_PREEMPT_RT_FULL - current->kmap_pte[type] = __pte(0); -#endif #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); + set_top_pte(vaddr, __pte(0)); #else (void) idx; /* to kill a warning */ #endif - set_top_pte(vaddr, __pte(0)); kmap_atomic_idx_pop(); } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { /* this address was obtained through kmap_high_get() */ @@ -117,7 +110,6 @@ EXPORT_SYMBOL(__kunmap_atomic); void *kmap_atomic_pfn(unsigned long pfn) { - pte_t pte = pfn_pte(pfn, kmap_prot); unsigned long vaddr; int idx, type; @@ -129,10 +121,7 @@ void *kmap_atomic_pfn(unsigned long pfn) #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(get_top_pte(vaddr))); #endif -#ifdef CONFIG_PREEMPT_RT_FULL - current->kmap_pte[type] = pte; -#endif - set_top_pte(vaddr, pte); + set_top_pte(vaddr, pfn_pte(pfn, kmap_prot)); return (void *)vaddr; } @@ -146,29 +135,3 @@ struct page *kmap_atomic_to_page(const void *ptr) return pte_page(get_top_pte(vaddr)); } - -#if defined CONFIG_PREEMPT_RT_FULL -void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) -{ - int i; - - /* - * Clear @prev's kmap_atomic mappings - */ - for (i = 0; i < prev_p->kmap_idx; i++) { - int idx = i + KM_TYPE_NR * smp_processor_id(); - - set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx), __pte(0)); - } - /* - * Restore @next_p's kmap_atomic mappings - */ - for (i = 0; i < next_p->kmap_idx; i++) { - int idx = i + KM_TYPE_NR * smp_processor_id(); - - if (!pte_none(next_p->kmap_pte[i])) - set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx), - next_p->kmap_pte[i]); - } -} -#endif diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 2556cf1..2c3b942 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm920_suspend_size .equ cpu_arm920_suspend_size, 4 * 3 -#ifdef CONFIG_ARM_CPU_SUSPEND +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_arm920_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c13, c0, 0 @ PID diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 344c8a5..f1803f7 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm926_suspend_size .equ cpu_arm926_suspend_size, 4 * 3 -#ifdef CONFIG_ARM_CPU_SUSPEND +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_arm926_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c13, c0, 0 @ PID diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 0b60dd3..82f9cdc 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext) .globl cpu_mohawk_suspend_size .equ cpu_mohawk_suspend_size, 4 * 6 -#ifdef CONFIG_ARM_CPU_SUSPEND +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_mohawk_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index d92dfd0..3aa0da1 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext) .globl cpu_sa1100_suspend_size .equ cpu_sa1100_suspend_size, 4 * 3 -#ifdef CONFIG_ARM_CPU_SUSPEND +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_sa1100_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c3, c0, 0 @ domain ID diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index d222215..09c5233 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -138,7 +138,7 @@ ENTRY(cpu_v6_set_pte_ext) /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ .globl cpu_v6_suspend_size .equ cpu_v6_suspend_size, 4 * 6 -#ifdef CONFIG_ARM_CPU_SUSPEND +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_v6_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index e8efd83..eb93d64 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext) .globl cpu_xsc3_suspend_size .equ cpu_xsc3_suspend_size, 4 * 6 -#ifdef CONFIG_ARM_CPU_SUSPEND +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_xsc3_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index e766f88..2551036 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext) .globl cpu_xscale_suspend_size .equ cpu_xscale_suspend_size, 4 * 6 -#ifdef CONFIG_ARM_CPU_SUSPEND +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_xscale_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode diff --git a/arch/arm/plat-samsung/include/plat/debug-macro.S b/arch/arm/plat-samsung/include/plat/debug-macro.S index f3a9cff..207e275 100644 --- a/arch/arm/plat-samsung/include/plat/debug-macro.S +++ b/arch/arm/plat-samsung/include/plat/debug-macro.S @@ -14,12 +14,12 @@ /* The S5PV210/S5PC110 implementations are as belows. */ .macro fifo_level_s5pv210 rd, rx - ldr \rd, [\rx, # S3C2410_UFSTAT] + ldr \rd, [ \rx, # S3C2410_UFSTAT ] and \rd, \rd, #S5PV210_UFSTAT_TXMASK .endm .macro fifo_full_s5pv210 rd, rx - ldr \rd, [\rx, # S3C2410_UFSTAT] + ldr \rd, [ \rx, # S3C2410_UFSTAT ] tst \rd, #S5PV210_UFSTAT_TXFULL .endm @@ -27,7 +27,7 @@ * most widely re-used */ .macro fifo_level_s3c2440 rd, rx - ldr \rd, [\rx, # S3C2410_UFSTAT] + ldr \rd, [ \rx, # S3C2410_UFSTAT ] and \rd, \rd, #S3C2440_UFSTAT_TXMASK .endm @@ -36,7 +36,7 @@ #endif .macro fifo_full_s3c2440 rd, rx - ldr \rd, [\rx, # S3C2410_UFSTAT] + ldr \rd, [ \rx, # S3C2410_UFSTAT ] tst \rd, #S3C2440_UFSTAT_TXFULL .endm @@ -45,11 +45,11 @@ #endif .macro senduart,rd,rx - strb \rd, [\rx, # S3C2410_UTXH] + strb \rd, [\rx, # S3C2410_UTXH ] .endm .macro busyuart, rd, rx - ldr \rd, [\rx, # S3C2410_UFCON] + ldr \rd, [ \rx, # S3C2410_UFCON ] tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled? beq 1001f @ @ FIFO enabled... @@ -60,7 +60,7 @@ 1001: @ busy waiting for non fifo - ldr \rd, [\rx, # S3C2410_UTRSTAT] + ldr \rd, [ \rx, # S3C2410_UTRSTAT ] tst \rd, #S3C2410_UTRSTAT_TXFE beq 1001b @@ -68,7 +68,7 @@ .endm .macro waituart,rd,rx - ldr \rd, [\rx, # S3C2410_UFCON] + ldr \rd, [ \rx, # S3C2410_UFCON ] tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled? beq 1001f @ @ FIFO enabled... @@ -79,7 +79,7 @@ b 1002f 1001: @ idle waiting for non fifo - ldr \rd, [\rx, # S3C2410_UTRSTAT] + ldr \rd, [ \rx, # S3C2410_UTRSTAT ] tst \rd, #S3C2410_UTRSTAT_TXFE beq 1001b diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c index 20f9044..04ca493 100644 --- a/arch/arm/plat-versatile/platsmp.c +++ b/arch/arm/plat-versatile/platsmp.c @@ -32,7 +32,7 @@ static void __cpuinit write_pen_release(int val) outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); } -static DEFINE_RAW_SPINLOCK(boot_lock); +static DEFINE_SPINLOCK(boot_lock); void __cpuinit versatile_secondary_init(unsigned int cpu) { @@ -52,8 +52,8 @@ void __cpuinit versatile_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - raw_spin_lock(&boot_lock); - raw_spin_unlock(&boot_lock); + spin_lock(&boot_lock); + spin_unlock(&boot_lock); } int __cpuinit versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) @@ -64,7 +64,7 @@ int __cpuinit versatile_boot_secondary(unsigned int cpu, struct task_struct *idl * Set synchronisation state between this boot processor * and the secondary one */ - raw_spin_lock(&boot_lock); + spin_lock(&boot_lock); /* * This is really belt and braces; we hold unintended secondary @@ -94,7 +94,7 @@ int __cpuinit versatile_boot_secondary(unsigned int cpu, struct task_struct *idl * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - raw_spin_unlock(&boot_lock); + spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 5dfbb0b..3b44e0d 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -413,7 +413,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) * If there isn't a second FP instruction, exit now. Note that * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1. */ - if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V)) + if (fpexc ^ (FPEXC_EX | FPEXC_FP2V)) goto exit; /* diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 01b20a2..7a32976 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -237,7 +237,7 @@ static int __init xen_init_events(void) xen_init_IRQ(); if (request_percpu_irq(xen_events_irq, xen_arm_callback, - "events", &xen_vcpu)) { + "events", xen_vcpu)) { pr_err("Error requesting IRQ %d\n", xen_events_irq); return -EINVAL; } diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 41db148..a4db3d2 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -76,7 +76,7 @@ struct compat_sigcontext { struct compat_ucontext { compat_ulong_t uc_flags; - compat_uptr_t uc_link; + struct compat_ucontext *uc_link; compat_stack_t uc_stack; struct compat_sigcontext uc_mcontext; compat_sigset_t uc_sigmask; @@ -703,7 +703,7 @@ int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, err |= copy_siginfo_to_user32(&frame->info, info); __put_user_error(0, &frame->sig.uc.uc_flags, err); - __put_user_error(0, &frame->sig.uc.uc_link, err); + __put_user_error(NULL, &frame->sig.uc.uc_link, err); memset(&stack, 0, sizeof(stack)); stack.ss_sp = (compat_uptr_t)current->sas_ss_sp; diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 0782eaf..afadae6 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -148,7 +148,6 @@ void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) #define VM_FAULT_BADACCESS 0x020000 #define ESR_WRITE (1 << 6) -#define ESR_CM (1 << 8) #define ESR_LNX_EXEC (1 << 24) /* @@ -207,7 +206,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, struct task_struct *tsk; struct mm_struct *mm; int fault, sig, code; - bool write = (esr & ESR_WRITE) && !(esr & ESR_CM); + int write = esr & ESR_WRITE; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | (write ? FAULT_FLAG_WRITE : 0); diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig index 9791820..0421498 100644 --- a/arch/avr32/configs/favr-32_defconfig +++ b/arch/avr32/configs/favr-32_defconfig @@ -122,6 +122,7 @@ CONFIG_USB_G_SERIAL=m CONFIG_USB_CDC_COMPOSITE=m CONFIG_MMC=y CONFIG_MMC_ATMELMCI=y +CONFIG_MMC_ATMELMCI_DMA=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_ATMEL_PWM=m diff --git a/arch/avr32/configs/merisc_defconfig b/arch/avr32/configs/merisc_defconfig index 65de443..3befab9 100644 --- a/arch/avr32/configs/merisc_defconfig +++ b/arch/avr32/configs/merisc_defconfig @@ -102,6 +102,7 @@ CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y CONFIG_MMC=y CONFIG_MMC_ATMELMCI=y +CONFIG_MMC_ATMELMCI_DMA=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_ATMEL_PWM=y diff --git a/arch/avr32/include/asm/signal.h b/arch/avr32/include/asm/signal.h index b65e61a..9326d18 100644 --- a/arch/avr32/include/asm/signal.h +++ b/arch/avr32/include/asm/signal.h @@ -29,7 +29,6 @@ struct sigaction { __sigrestore_t sa_restorer; sigset_t sa_mask; /* mask last for extensibility */ }; -#define __ARCH_HAS_SA_RESTORER struct k_sigaction { struct sigaction sa; diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index 9577e69..b2f2d2d 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c @@ -81,7 +81,7 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) * If we're in an interrupt or have no user context, we must * not take the fault... */ - if (!mm || regs->sr & SYSREG_BIT(GM) || pagefault_disabled()) + if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM)) goto no_context; local_irq_enable(); diff --git a/arch/blackfin/kernel/early_printk.c b/arch/blackfin/kernel/early_printk.c index 61fbd2d..84ed837 100644 --- a/arch/blackfin/kernel/early_printk.c +++ b/arch/blackfin/kernel/early_printk.c @@ -25,6 +25,8 @@ extern struct console *bfin_earlyserial_init(unsigned int port, extern struct console *bfin_jc_early_init(void); #endif +static struct console *early_console; + /* Default console */ #define DEFAULT_PORT 0 #define DEFAULT_CFLAG CS8|B57600 diff --git a/arch/cris/include/asm/signal.h b/arch/cris/include/asm/signal.h index a7e267c..c0cb1fd 100644 --- a/arch/cris/include/asm/signal.h +++ b/arch/cris/include/asm/signal.h @@ -29,7 +29,6 @@ struct sigaction { void (*sa_restorer)(void); sigset_t sa_mask; /* mask last for extensibility */ }; -#define __ARCH_HAS_SA_RESTORER struct k_sigaction { struct sigaction sa; diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c index 1a403d9..73312ab 100644 --- a/arch/cris/mm/fault.c +++ b/arch/cris/mm/fault.c @@ -114,7 +114,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs, * user context, we must not take the fault. */ - if (!mm || pagefault_disabled()) + if (in_atomic() || !mm) goto no_context; retry: diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c index e87972c..331c1e2 100644 --- a/arch/frv/mm/fault.c +++ b/arch/frv/mm/fault.c @@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (!mm || pagefault_disabled()) + if (in_atomic() || !mm) goto no_context; down_read(&mm->mmap_sem); diff --git a/arch/h8300/include/asm/signal.h b/arch/h8300/include/asm/signal.h index 4bf76ac..66c81c6 100644 --- a/arch/h8300/include/asm/signal.h +++ b/arch/h8300/include/asm/signal.h @@ -29,7 +29,6 @@ struct sigaction { void (*sa_restorer)(void); sigset_t sa_mask; /* mask last for extensibility */ }; -#define __ARCH_HAS_SA_RESTORER struct k_sigaction { struct sigaction sa; diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h index 76acbcd..d2bf1fd 100644 --- a/arch/ia64/include/asm/futex.h +++ b/arch/ia64/include/asm/futex.h @@ -106,15 +106,16 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return -EFAULT; { - register unsigned long r8 __asm ("r8") = 0; + register unsigned long r8 __asm ("r8"); unsigned long prev; __asm__ __volatile__( " mf;; \n" + " mov %0=r0 \n" " mov ar.ccv=%4;; \n" "[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n" " .xdata4 \"__ex_table\", 1b-., 2f-. \n" "[2:]" - : "+r" (r8), "=&r" (prev) + : "=r" (r8), "=r" (prev) : "r" (uaddr), "r" (newval), "rO" ((long) (unsigned) oldval) : "memory"); diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h index 8c70961..43f96ab 100644 --- a/arch/ia64/include/asm/mca.h +++ b/arch/ia64/include/asm/mca.h @@ -143,7 +143,6 @@ extern unsigned long __per_cpu_mca[NR_CPUS]; extern int cpe_vector; extern int ia64_cpe_irq; extern void ia64_mca_init(void); -extern void ia64_mca_irq_init(void); extern void ia64_mca_cpu_init(void *); extern void ia64_os_mca_dispatch(void); extern void ia64_os_mca_dispatch_end(void); diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index f2c41828..ad69606 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -23,8 +23,6 @@ #include #include -#include - /* * 'what should we do if we get a hw irq event on an illegal vector'. * each architecture has to answer this themselves. @@ -85,12 +83,6 @@ bool is_affinity_mask_valid(const struct cpumask *cpumask) #endif /* CONFIG_SMP */ -int __init arch_early_irq_init(void) -{ - ia64_mca_irq_init(); - return 0; -} - #ifdef CONFIG_HOTPLUG_CPU unsigned int vectors_in_migration[NR_IRQS]; diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index d7396db..65bf9cd 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -2074,16 +2074,22 @@ ia64_mca_init(void) printk(KERN_INFO "MCA related initialization done\n"); } - /* - * These pieces cannot be done in ia64_mca_init() because it is called before - * early_irq_init() which would wipe out our percpu irq registrations. But we - * cannot leave them until ia64_mca_late_init() because by then all the other - * processors have been brought online and have set their own CMC vectors to - * point at a non-existant action. Called from arch_early_irq_init(). + * ia64_mca_late_init + * + * Opportunity to setup things that require initialization later + * than ia64_mca_init. Setup a timer to poll for CPEs if the + * platform doesn't support an interrupt driven mechanism. + * + * Inputs : None + * Outputs : Status */ -void __init ia64_mca_irq_init(void) +static int __init +ia64_mca_late_init(void) { + if (!mca_init) + return 0; + /* * Configure the CMCI/P vector and handler. Interrupts for CMC are * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). @@ -2102,23 +2108,6 @@ void __init ia64_mca_irq_init(void) /* Setup the CPEI/P handler */ register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); #endif -} - -/* - * ia64_mca_late_init - * - * Opportunity to setup things that require initialization later - * than ia64_mca_init. Setup a timer to poll for CPEs if the - * platform doesn't support an interrupt driven mechanism. - * - * Inputs : None - * Outputs : Status - */ -static int __init -ia64_mca_late_init(void) -{ - if (!mca_init) - return 0; register_hotcpu_notifier(&mca_cpu_notifier); diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index a7869f8..4332f7e 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c @@ -256,7 +256,7 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte) "srlz.d;;" "ssm psr.i;;" "srlz.d;;" - : "=&r"(ret) : "r"(iha), "r"(pte) : "memory"); + : "=r"(ret) : "r"(iha), "r"(pte):"memory"); return ret; } diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index dd88415..6cf0341 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -98,7 +98,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re /* * If we're in an interrupt or have no user context, we must not take the fault.. */ - if (!mm || pagefault_disabled()) + if (in_atomic() || !mm) goto no_context; #ifdef CONFIG_VIRTUAL_MEM_MAP diff --git a/arch/m32r/include/asm/signal.h b/arch/m32r/include/asm/signal.h index 04ccbcd..a5ba4a2 100644 --- a/arch/m32r/include/asm/signal.h +++ b/arch/m32r/include/asm/signal.h @@ -22,7 +22,6 @@ struct sigaction { __sigrestore_t sa_restorer; sigset_t sa_mask; /* mask last for extensibility */ }; -#define __ARCH_HAS_SA_RESTORER struct k_sigaction { struct sigaction sa; diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c index 6945056..3cdfa9c 100644 --- a/arch/m32r/mm/fault.c +++ b/arch/m32r/mm/fault.c @@ -114,7 +114,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, * If we're in an interrupt or have no user context or are running in an * atomic region then we must not take the fault.. */ - if (!mm || pagefault_disabled()) + if (in_atomic() || !mm) goto bad_area_nosemaphore; /* When running in the kernel we expect faults to occur only to diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h index 60370da..9c8c46b 100644 --- a/arch/m68k/include/asm/signal.h +++ b/arch/m68k/include/asm/signal.h @@ -29,7 +29,6 @@ struct sigaction { __sigrestore_t sa_restorer; sigset_t sa_mask; /* mask last for extensibility */ }; -#define __ARCH_HAS_SA_RESTORER struct k_sigaction { struct sigaction sa; diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 9ea40db..a563727 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -85,7 +85,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (!mm || pagefault_disabled()) + if (in_atomic() || !mm) goto no_context; retry: diff --git a/arch/microblaze/kernel/early_printk.c b/arch/microblaze/kernel/early_printk.c index b099a86..aba1f9a 100644 --- a/arch/microblaze/kernel/early_printk.c +++ b/arch/microblaze/kernel/early_printk.c @@ -21,6 +21,7 @@ #include #include +static u32 early_console_initialized; static u32 base_addr; #ifdef CONFIG_SERIAL_UARTLITE_CONSOLE @@ -108,11 +109,27 @@ static struct console early_serial_uart16550_console = { }; #endif /* CONFIG_SERIAL_8250_CONSOLE */ +static struct console *early_console; + +void early_printk(const char *fmt, ...) +{ + char buf[512]; + int n; + va_list ap; + + if (early_console_initialized) { + va_start(ap, fmt); + n = vscnprintf(buf, 512, fmt, ap); + early_console->write(early_console, buf, n); + va_end(ap); + } +} + int __init setup_early_printk(char *opt) { int version = 0; - if (early_console) + if (early_console_initialized) return 1; base_addr = of_early_console(&version); @@ -142,6 +159,7 @@ int __init setup_early_printk(char *opt) } register_console(early_console); + early_console_initialized = 1; return 0; } return 1; @@ -151,7 +169,7 @@ int __init setup_early_printk(char *opt) * only for early console because of performance degression */ void __init remap_early_printk(void) { - if (!early_console) + if (!early_console_initialized || !early_console) return; printk(KERN_INFO "early_printk_console remapping from 0x%x to ", base_addr); @@ -177,9 +195,9 @@ void __init remap_early_printk(void) void __init disable_early_printk(void) { - if (!early_console) + if (!early_console_initialized || !early_console) return; printk(KERN_WARNING "disabling early console\n"); unregister_console(early_console); - early_console = NULL; + early_console_initialized = 0; } diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index dc1d8c1..714b35a 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -108,7 +108,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11) is_write = 0; - if (unlikely(!mm || pagefault_disabled())) { + if (unlikely(in_atomic() || !mm)) { if (kernel_mode(regs)) goto bad_area_nosemaphore; diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 1c355a1..2ac626a 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2102,7 +2102,7 @@ config CPU_R4400_WORKAROUNDS # config HIGHMEM bool "High Memory Support" - depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !PREEMPT_RT_FULL + depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM config CPU_SUPPORTS_HIGHMEM bool diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index 21bff32..dbaec94 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -31,7 +31,7 @@ #define PAGE_SHIFT 16 #endif #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) -#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) +#define PAGE_MASK (~(PAGE_SIZE - 1)) #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT #define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) diff --git a/arch/mips/kernel/early_printk.c b/arch/mips/kernel/early_printk.c index 86d325a..9ae813e 100644 --- a/arch/mips/kernel/early_printk.c +++ b/arch/mips/kernel/early_printk.c @@ -8,7 +8,6 @@ * written by Ralf Baechle (ralf@linux-mips.org) */ #include -#include #include #include @@ -26,18 +25,20 @@ early_console_write(struct console *con, const char *s, unsigned n) } } -static struct console early_console_prom = { +static struct console early_console __initdata = { .name = "early", .write = early_console_write, .flags = CON_PRINTBUFFER | CON_BOOT, .index = -1 }; +static int early_console_initialized __initdata; + void __init setup_early_printk(void) { - if (early_console) + if (early_console_initialized) return; - early_console = &early_console_prom; + early_console_initialized = 1; - register_console(&early_console_prom); + register_console(&early_console); } diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index 33d0671..1658676 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S @@ -46,9 +46,10 @@ PTR_L a5, PT_R9(sp) PTR_L a6, PT_R10(sp) PTR_L a7, PT_R11(sp) -#endif +#else PTR_ADDIU sp, PT_SIZE - .endm +#endif +.endm .macro RETURN_BACK jr ra @@ -67,11 +68,7 @@ NESTED(ftrace_caller, PT_SIZE, ra) .globl _mcount _mcount: b ftrace_stub -#ifdef CONFIG_32BIT - addiu sp,sp,8 -#else - nop -#endif + addiu sp,sp,8 /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ lw t1, function_trace_stop diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index bfcaea6..b6aa770 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -601,7 +601,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) { local_irq_enable(); - preempt_check_resched(); /* deal with pending signal delivery */ if (thread_info_flags & _TIF_SIGPENDING) diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c index f3f7756..239a9c9 100644 --- a/arch/mips/lib/bitops.c +++ b/arch/mips/lib/bitops.c @@ -90,12 +90,12 @@ int __mips_test_and_set_bit(unsigned long nr, unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - int res; + unsigned long res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a) != 0; + res = (mask & *a); *a |= mask; raw_local_irq_restore(flags); return res; @@ -116,12 +116,12 @@ int __mips_test_and_set_bit_lock(unsigned long nr, unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - int res; + unsigned long res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a) != 0; + res = (mask & *a); *a |= mask; raw_local_irq_restore(flags); return res; @@ -141,12 +141,12 @@ int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - int res; + unsigned long res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a) != 0; + res = (mask & *a); *a &= ~mask; raw_local_irq_restore(flags); return res; @@ -166,12 +166,12 @@ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr) unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; - int res; + unsigned long res; a += nr >> SZLONG_LOG; mask = 1UL << bit; raw_local_irq_save(flags); - res = (mask & *a) != 0; + res = (mask & *a); *a ^= mask; raw_local_irq_restore(flags); return res; diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 90d193a..ddcec1e 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -89,7 +89,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (!mm || pagefault_disabled()) + if (in_atomic() || !mm) goto bad_area_nosemaphore; retry: diff --git a/arch/mn10300/include/asm/signal.h b/arch/mn10300/include/asm/signal.h index d673860..d280e97 100644 --- a/arch/mn10300/include/asm/signal.h +++ b/arch/mn10300/include/asm/signal.h @@ -39,7 +39,6 @@ struct sigaction { __sigrestore_t sa_restorer; sigset_t sa_mask; /* mask last for extensibility */ }; -#define __ARCH_HAS_SA_RESTORER struct k_sigaction { struct sigaction sa; diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c index 34a83b9..d48a84f 100644 --- a/arch/mn10300/mm/fault.c +++ b/arch/mn10300/mm/fault.c @@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code, * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (!mm || pagefault_disabled()) + if (in_atomic() || !mm) goto no_context; retry: diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 7df49fa..ee99f23 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -12,10 +12,11 @@ #include #include -#include #include #include +struct vm_area_struct; + /* * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel * memory. For the return value to be meaningful, ADDR must be >= @@ -39,14 +40,7 @@ do{ \ *(pteptr) = (pteval); \ } while(0) - -extern void purge_tlb_entries(struct mm_struct *, unsigned long); - -#define set_pte_at(mm, addr, ptep, pteval) \ - do { \ - set_pte(ptep, pteval); \ - purge_tlb_entries(mm, addr); \ - } while (0) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #endif /* !__ASSEMBLY__ */ @@ -472,7 +466,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, old = pte_val(*ptep); new = pte_val(pte_wrprotect(__pte (old))); } while (cmpxchg((unsigned long *) ptep, old, new) != old); - purge_tlb_entries(mm, addr); #else pte_t old_pte = *ptep; set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index b89a85a..48e16dc 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -419,24 +419,6 @@ void kunmap_parisc(void *addr) EXPORT_SYMBOL(kunmap_parisc); #endif -void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) -{ - unsigned long flags; - - /* Note: purge_tlb_entries can be called at startup with - no context. */ - - /* Disable preemption while we play with %sr1. */ - preempt_disable(); - mtsp(mm->context, 1); - purge_tlb_start(flags); - pdtlb(addr); - pitlb(addr); - purge_tlb_end(flags); - preempt_enable(); -} -EXPORT_SYMBOL(purge_tlb_entries); - void __flush_tlb_range(unsigned long sid, unsigned long start, unsigned long end) { diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index df22f39..18162ce 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -176,7 +176,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, unsigned long acc_type; int fault; - if (!mm || pagefault_disabled()) + if (in_atomic() || !mm) goto no_context; down_read(&mm->mmap_sem); diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 281256e..17903f1 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -60,11 +60,10 @@ config LOCKDEP_SUPPORT config RWSEM_GENERIC_SPINLOCK bool - default y if PREEMPT_RT_FULL config RWSEM_XCHGADD_ALGORITHM bool - default y if !PREEMPT_RT_FULL + default y config GENERIC_LOCKBREAK bool @@ -142,7 +141,6 @@ config PPC select GENERIC_CLOCKEVENTS select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER - select HAVE_PREEMPT_LAZY select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_RELA select CLONE_BACKWARDS @@ -277,10 +275,6 @@ config PPC_ADV_DEBUG_DAC_RANGE depends on PPC_ADV_DEBUG_REGS && 44x default y -config PPC_EMULATE_SSTEP - bool - default y if KPROBES || UPROBES || XMON || HAVE_HW_BREAKPOINT - source "init/Kconfig" source "kernel/Kconfig.freezer" @@ -292,7 +286,7 @@ menu "Kernel options" config HIGHMEM bool "High memory support" - depends on PPC32 && !PREEMPT_RT_FULL + depends on PPC32 source kernel/Kconfig.hz source kernel/Kconfig.preempt diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index a80e32b4..a8fb03e 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -201,7 +201,6 @@ int eeh_dev_check_failure(struct eeh_dev *edev); void __init eeh_addr_cache_build(void); void eeh_add_device_tree_early(struct device_node *); void eeh_add_device_tree_late(struct pci_bus *); -void eeh_add_sysfs_files(struct pci_bus *); void eeh_remove_bus_device(struct pci_dev *, int); /** @@ -241,8 +240,6 @@ static inline void eeh_add_device_tree_early(struct device_node *dn) { } static inline void eeh_add_device_tree_late(struct pci_bus *bus) { } -static inline void eeh_add_sysfs_files(struct pci_bus *bus) { } - static inline void eeh_remove_bus_device(struct pci_dev *dev, int purge_pe) { } static inline void eeh_lock(void) { } diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index b59e06f..2fdb47a 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -343,16 +343,17 @@ extern void slb_set_size(u16 size); /* * VSID allocation (256MB segment) * - * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated - * from mmu context id and effective segment id of the address. + * We first generate a 38-bit "proto-VSID". For kernel addresses this + * is equal to the ESID | 1 << 37, for user addresses it is: + * (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1) * - * For user processes max context id is limited to ((1ul << 19) - 5) - * for kernel space, we use the top 4 context ids to map address as below - * NOTE: each context only support 64TB now. - * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ] - * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ] - * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ] - * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ] + * This splits the proto-VSID into the below range + * 0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range + * 2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range + * + * We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1 + * That is, we assign half of the space to user processes and half + * to the kernel. * * The proto-VSIDs are then scrambled into real VSIDs with the * multiplicative hash: @@ -362,49 +363,41 @@ extern void slb_set_size(u16 size); * VSID_MULTIPLIER is prime, so in particular it is * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. * Because the modulus is 2^n-1 we can compute it efficiently without - * a divide or extra multiply (see below). The scramble function gives - * robust scattering in the hash table (at least based on some initial - * results). + * a divide or extra multiply (see below). * - * We also consider VSID 0 special. We use VSID 0 for slb entries mapping - * bad address. This enables us to consolidate bad address handling in - * hash_page. + * This scheme has several advantages over older methods: * - * We also need to avoid the last segment of the last context, because that - * would give a protovsid of 0x1fffffffff. That will result in a VSID 0 - * because of the modulo operation in vsid scramble. But the vmemmap - * (which is what uses region 0xf) will never be close to 64TB in size - * (it's 56 bytes per page of system memory). - */ - -#define CONTEXT_BITS 19 -#define ESID_BITS 18 -#define ESID_BITS_1T 6 - -/* - * 256MB segment - * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments - * available for user + kernel mapping. The top 4 contexts are used for - * kernel mapping. Each segment contains 2^28 bytes. Each - * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts - * (19 == 37 + 28 - 46). + * - We have VSIDs allocated for every kernel address + * (i.e. everything above 0xC000000000000000), except the very top + * segment, which simplifies several things. + * + * - We allow for USER_ESID_BITS significant bits of ESID and + * CONTEXT_BITS bits of context for user addresses. + * i.e. 64T (46 bits) of address space for up to half a million contexts. + * + * - The scramble function gives robust scattering in the hash + * table (at least based on some initial results). The previous + * method was more susceptible to pathological cases giving excessive + * hash collisions. */ -#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5) /* * This should be computed such that protovosid * vsid_mulitplier * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus */ #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ -#define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS) +#define VSID_BITS_256M 38 #define VSID_MODULUS_256M ((1UL<= \ * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ * the bit clear, r3 already has the answer we want, if it \ @@ -521,6 +513,34 @@ typedef struct { }) #endif /* 1 */ +/* + * This is only valid for addresses >= PAGE_OFFSET + * The proto-VSID space is divided into two class + * User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1 + * kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1 + * + * With KERNEL_START at 0xc000000000000000, the proto vsid for + * the kernel ends up with 0xc00000000 (36 bits). With 64TB + * support we need to have kernel proto-VSID in the + * [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS. + */ +static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) +{ + unsigned long proto_vsid; + /* + * We need to make sure proto_vsid for the kernel is + * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T]) + */ + if (ssize == MMU_SEGSIZE_256M) { + proto_vsid = ea >> SID_SHIFT; + proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS)); + return vsid_scramble(proto_vsid, 256M); + } + proto_vsid = ea >> SID_SHIFT_1T; + proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T)); + return vsid_scramble(proto_vsid, 1T); +} + /* Returns the segment size indicator for a user address */ static inline int user_segment_size(unsigned long addr) { @@ -530,41 +550,17 @@ static inline int user_segment_size(unsigned long addr) return MMU_SEGSIZE_256M; } +/* This is only valid for user addresses (which are below 2^44) */ static inline unsigned long get_vsid(unsigned long context, unsigned long ea, int ssize) { - /* - * Bad address. We return VSID 0 for that - */ - if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) - return 0; - if (ssize == MMU_SEGSIZE_256M) - return vsid_scramble((context << ESID_BITS) + return vsid_scramble((context << USER_ESID_BITS) | (ea >> SID_SHIFT), 256M); - return vsid_scramble((context << ESID_BITS_1T) + return vsid_scramble((context << USER_ESID_BITS_1T) | (ea >> SID_SHIFT_1T), 1T); } -/* - * This is only valid for addresses >= PAGE_OFFSET - * - * For kernel space, we use the top 4 context ids to map address as below - * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ] - * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ] - * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ] - * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ] - */ -static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) -{ - unsigned long context; - - /* - * kernel take the top 4 context from the available range - */ - context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1; - return get_vsid(context, ea, ssize); -} #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_MMU_HASH64_H_ */ diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 4f440a6..51fb00a 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -112,10 +112,6 @@ #define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff #define PPC_INST_MTSPR_DSCR 0x7c1103a6 #define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff -#define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6 -#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff -#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6 -#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff #define PPC_INST_SLBFEE 0x7c0007a7 #define PPC_INST_STRING 0x7c00042a diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h index fbe66c4..a101637 100644 --- a/arch/powerpc/include/asm/signal.h +++ b/arch/powerpc/include/asm/signal.h @@ -1,7 +1,6 @@ #ifndef _ASM_POWERPC_SIGNAL_H #define _ASM_POWERPC_SIGNAL_H -#define __ARCH_HAS_SA_RESTORER #include #endif /* _ASM_POWERPC_SIGNAL_H */ diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 14c08f1..406b7b9 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -43,8 +43,6 @@ struct thread_info { int cpu; /* cpu we're on */ int preempt_count; /* 0 => preemptable, <0 => BUG */ - int preempt_lazy_count; /* 0 => preemptable, - <0 => BUG */ struct restart_block restart_block; unsigned long local_flags; /* private flags for thread */ @@ -99,7 +97,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SINGLESTEP 8 /* singlestepping active */ -#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */ +#define TIF_MEMDIE 9 /* is terminating due to OOM killer */ #define TIF_SECCOMP 10 /* secure computing */ #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ #define TIF_NOERROR 12 /* Force successful syscall return */ @@ -108,7 +106,6 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation for stack store? */ -#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1<= PGTABLE_RANGE - */ - rldicr. r9,r11,4,(63 - 46 - 4) - li r9,0 /* VSID = 0 for bad address */ - bne- 0f - - /* - * Calculate VSID: - * This is the kernel vsid, we take the top for context from - * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 - * Here we know that (ea >> 60) == 0xc - */ - lis r9,(MAX_USER_CONTEXT + 1)@ha - addi r9,r9,(MAX_USER_CONTEXT + 1)@l - - srdi r10,r11,SID_SHIFT - rldimi r10,r9,ESID_BITS,0 /* proto vsid */ - ASM_VSID_SCRAMBLE(r10, r9, 256M) - rldic r9,r10,12,16 /* r9 = vsid << 12 */ - -0: /* Hash to the primary group */ ld r10,PACASTABVIRT(r13) - srdi r11,r11,SID_SHIFT + mfspr r11,SPRN_DAR + srdi r11,r11,28 rldimi r10,r11,7,52 /* r10 = first ste of the group */ + /* Calculate VSID */ + /* This is a kernel address, so protovsid = ESID | 1 << 37 */ + li r9,0x1 + rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0 + ASM_VSID_SCRAMBLE(r11, r9, 256M) + rldic r9,r11,12,16 /* r9 = vsid << 12 */ + /* Search the primary group for a free entry */ 1: ld r11,0(r10) /* Test valid bit of the current ste */ andi. r11,r11,0x80 diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 1a63feb..116f086 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -490,7 +490,6 @@ _GLOBAL(copy_and_flush) sync addi r5,r5,8 addi r6,r6,8 - isync blr .align 8 diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index bb73a2e..71413f4 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -584,7 +584,6 @@ void irq_ctx_init(void) } } -#ifndef CONFIG_PREEMPT_RT_FULL static inline void do_softirq_onstack(void) { struct thread_info *curtp, *irqtp; @@ -621,7 +620,6 @@ void do_softirq(void) local_irq_restore(flags); } -#endif irq_hw_number_t virq_to_hw(unsigned int virq) { diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 466a290..7206701 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -162,8 +162,6 @@ static int kexec_all_irq_disabled = 0; static void kexec_smp_down(void *arg) { local_irq_disable(); - hard_irq_disable(); - mb(); /* make sure our irqs are disabled before we say they are */ get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF; while(kexec_all_irq_disabled == 0) @@ -246,8 +244,6 @@ static void kexec_prepare_cpus(void) wake_offline_cpus(); smp_call_function(kexec_smp_down, NULL, /* wait */0); local_irq_disable(); - hard_irq_disable(); - mb(); /* make sure IRQs are disabled before we say they are */ get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF; @@ -285,7 +281,6 @@ static void kexec_prepare_cpus(void) if (ppc_md.kexec_cpu_down) ppc_md.kexec_cpu_down(0, 0); local_irq_disable(); - hard_irq_disable(); } #endif /* SMP */ diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 313078b..19e096b 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -36,7 +36,6 @@ .text -#ifndef CONFIG_PREEMPT_RT_FULL _GLOBAL(call_do_softirq) mflr r0 stw r0,4(r1) @@ -47,7 +46,6 @@ _GLOBAL(call_do_softirq) lwz r0,4(r1) mtlr r0 blr -#endif _GLOBAL(call_handle_irq) mflr r0 diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 39b77e8..5cfa800 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -29,7 +29,6 @@ .text -#ifndef CONFIG_PREEMPT_RT_FULL _GLOBAL(call_do_softirq) mflr r0 std r0,16(r1) @@ -40,7 +39,6 @@ _GLOBAL(call_do_softirq) ld r0,16(r1) mtlr r0 blr -#endif _GLOBAL(call_handle_irq) ld r8,0(r6) diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c index e51c89f..07c1269 100644 --- a/arch/powerpc/kernel/of_platform.c +++ b/arch/powerpc/kernel/of_platform.c @@ -95,9 +95,6 @@ static int of_pci_phb_probe(struct platform_device *dev) /* Add probed PCI devices to the device model */ pci_bus_add_devices(phb->bus); - /* sysfs files should only be added after devices are added */ - eeh_add_sysfs_files(phb->bus); - return 0; } diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index fa12ae4..7c37379 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -1477,14 +1477,11 @@ void pcibios_finish_adding_to_bus(struct pci_bus *bus) pcibios_allocate_bus_resources(bus); pcibios_claim_one_bus(bus); - /* Fixup EEH */ - eeh_add_device_tree_late(bus); - /* Add new devices to global lists. Register in proc, sysfs. */ pci_bus_add_devices(bus); - /* sysfs files should only be added after devices are added */ - eeh_add_sysfs_files(bus); + /* Fixup EEH */ + eeh_add_device_tree_late(bus); } EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 8d97eb4..6da881b 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -156,15 +156,6 @@ early_param("smt-enabled", early_smt_enabled); #define check_smt_enabled() #endif /* CONFIG_SMP */ -/** Fix up paca fields required for the boot cpu */ -static void fixup_boot_paca(void) -{ - /* The boot cpu is started */ - get_paca()->cpu_start = 1; - /* Allow percpu accesses to work until we setup percpu data */ - get_paca()->data_offset = 0; -} - /* * Early initialization entry point. This is called by head.S * with MMU translation disabled. We rely on the "feature" of @@ -194,7 +185,6 @@ void __init early_setup(unsigned long dt_ptr) /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ initialise_paca(&boot_paca, 0); setup_paca(&boot_paca); - fixup_boot_paca(); /* Initialize lockdep early or else spinlocks will blow */ lockdep_init(); @@ -215,7 +205,11 @@ void __init early_setup(unsigned long dt_ptr) /* Now we know the logical id of our boot cpu, setup the paca. */ setup_paca(&paca[boot_cpuid]); - fixup_boot_paca(); + + /* Fix up paca fields required for the boot cpu */ + get_paca()->cpu_start = 1; + /* Allow percpu accesses to "work" until we setup percpu data */ + get_paca()->data_offset = 0; /* Probe the machine type */ probe_machine(); diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 6686794..3251840 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -961,10 +961,7 @@ static int emulate_instruction(struct pt_regs *regs) #ifdef CONFIG_PPC64 /* Emulate the mfspr rD, DSCR. */ - if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) == - PPC_INST_MFSPR_DSCR_USER) || - ((instword & PPC_INST_MFSPR_DSCR_MASK) == - PPC_INST_MFSPR_DSCR)) && + if (((instword & PPC_INST_MFSPR_DSCR_MASK) == PPC_INST_MFSPR_DSCR) && cpu_has_feature(CPU_FTR_DSCR)) { PPC_WARN_EMULATED(mfdscr, regs); rd = (instword >> 21) & 0x1f; @@ -972,10 +969,7 @@ static int emulate_instruction(struct pt_regs *regs) return 0; } /* Emulate the mtspr DSCR, rD. */ - if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) == - PPC_INST_MTSPR_DSCR_USER) || - ((instword & PPC_INST_MTSPR_DSCR_MASK) == - PPC_INST_MTSPR_DSCR)) && + if (((instword & PPC_INST_MTSPR_DSCR_MASK) == PPC_INST_MTSPR_DSCR) && cpu_has_feature(CPU_FTR_DSCR)) { PPC_WARN_EMULATED(mtdscr, regs); rd = (instword >> 21) & 0x1f; diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c index 13b8670..f974849 100644 --- a/arch/powerpc/kernel/udbg.c +++ b/arch/powerpc/kernel/udbg.c @@ -156,13 +156,15 @@ static struct console udbg_console = { .index = 0, }; +static int early_console_initialized; + /* * Called by setup_system after ppc_md->probe and ppc_md->early_init. * Call it again after setting udbg_putc in ppc_md->setup_arch. */ void __init register_early_udbg_console(void) { - if (early_console) + if (early_console_initialized) return; if (!udbg_putc) @@ -172,7 +174,7 @@ void __init register_early_udbg_console(void) printk(KERN_INFO "early console immortal !\n"); udbg_console.flags &= ~CON_BOOT; } - early_console = &udbg_console; + early_console_initialized = 1; register_console(&udbg_console); } diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index 5d7d29a..ead58e3 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -326,8 +326,8 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu) vcpu3s->context_id[0] = err; vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1) - << ESID_BITS) - 1; - vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << ESID_BITS; + << USER_ESID_BITS) - 1; + vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; kvmppc_mmu_hpte_init(vcpu); diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index 2f4baa0..1f89d26 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -108,8 +108,6 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) { } -static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu); - void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); @@ -138,11 +136,8 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) mtspr(SPRN_GDEAR, vcpu->arch.shared->dar); mtspr(SPRN_GESR, vcpu->arch.shared->esr); - if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || - __get_cpu_var(last_vcpu_on_cpu) != vcpu) { + if (vcpu->arch.oldpir != mfspr(SPRN_PIR)) kvmppc_e500_tlbil_all(vcpu_e500); - __get_cpu_var(last_vcpu_on_cpu) = vcpu; - } kvmppc_load_guest_fp(vcpu); } diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 35baad9..746e0c8 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -19,7 +19,9 @@ obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \ checksum_wrappers_64.o hweight_64.o \ copyuser_power7.o string_64.o copypage_power7.o \ memcpy_power7.o -obj-$(CONFIG_PPC_EMULATE_SSTEP) += sstep.o ldstfp.o +obj-$(CONFIG_XMON) += sstep.o ldstfp.o +obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o +obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o ifeq ($(CONFIG_PPC64),y) obj-$(CONFIG_SMP) += locks.o diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 66fdd82..3a8489a 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -259,7 +259,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, if (!arch_irq_disabled_regs(regs)) local_irq_enable(); - if (!mm || pagefault_disabled()) { + if (in_atomic() || mm == NULL) { if (!user_mode(regs)) return SIGSEGV; /* in_atomic() in user mode is really bad, diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 004630b..3a292be 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -194,11 +194,6 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); unsigned long tprot = prot; - /* - * If we hit a bad address return error. - */ - if (!vsid) - return -1; /* Make kernel text executable */ if (overlaps_kernel_text(vaddr, vaddr + step)) tprot &= ~HPTE_R_N; @@ -763,8 +758,6 @@ void __init early_init_mmu(void) /* Initialize stab / SLB management */ if (mmu_has_feature(MMU_FTR_SLB)) slb_initialize(); - else - stab_initialize(get_paca()->stab_real); } #ifdef CONFIG_SMP @@ -928,6 +921,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", ea, access, trap); + if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) { + DBG_LOW(" out of pgtable range !\n"); + return 1; + } + /* Get region & vsid */ switch (REGION_ID(ea)) { case USER_REGION_ID: @@ -958,11 +956,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) } DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); - /* Bad address. */ - if (!vsid) { - DBG_LOW("Bad address!\n"); - return 1; - } /* Get pgdir */ pgdir = mm->pgd; if (pgdir == NULL) @@ -1132,8 +1125,6 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, /* Get VSID */ ssize = user_segment_size(ea); vsid = get_vsid(mm->context.id, ea, ssize); - if (!vsid) - return; /* Hash doesn't like irqs */ local_irq_save(flags); @@ -1226,9 +1217,6 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); - /* Don't create HPTE entries for bad address */ - if (!vsid) - return; ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr), mode, HPTE_V_BOLTED, mmu_linear_psize, mmu_kernel_ssize); diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c index d1d1b92..40bc5b0 100644 --- a/arch/powerpc/mm/mmu_context_hash64.c +++ b/arch/powerpc/mm/mmu_context_hash64.c @@ -29,6 +29,15 @@ static DEFINE_SPINLOCK(mmu_context_lock); static DEFINE_IDA(mmu_context_ida); +/* + * 256MB segment + * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments + * available for user mappings. Each segment contains 2^28 bytes. Each + * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts + * (19 == 37 + 28 - 46). + */ +#define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1) + int __init_new_context(void) { int index; @@ -47,7 +56,7 @@ again: else if (err) return err; - if (index > MAX_USER_CONTEXT) { + if (index > MAX_CONTEXT) { spin_lock(&mmu_context_lock); ida_remove(&mmu_context_ida, index); spin_unlock(&mmu_context_lock); diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 6a252c4..bba87ca 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -201,7 +201,7 @@ int __node_distance(int a, int b) int distance = LOCAL_DISTANCE; if (!form1_affinity) - return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE); + return distance; for (i = 0; i < distance_ref_points_depth; i++) { if (distance_lookup_table[a][i] == distance_lookup_table[b][i]) diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 654258f..e212a27 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -61,7 +61,7 @@ #endif #ifdef CONFIG_PPC_STD_MMU_64 -#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT)) +#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) #error TASK_SIZE_USER64 exceeds user VSID range #endif #endif diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 17aa6df..1a16ca2 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -31,15 +31,10 @@ * No other registers are examined or changed. */ _GLOBAL(slb_allocate_realmode) - /* - * check for bad kernel/user address - * (ea & ~REGION_MASK) >= PGTABLE_RANGE - */ - rldicr. r9,r3,4,(63 - 46 - 4) - bne- 8f + /* r3 = faulting address */ srdi r9,r3,60 /* get region */ - srdi r10,r3,SID_SHIFT /* get esid */ + srdi r10,r3,28 /* get esid */ cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ @@ -61,14 +56,12 @@ _GLOBAL(slb_allocate_realmode) */ _GLOBAL(slb_miss_kernel_load_linear) li r11,0 + li r9,0x1 /* - * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 - * r9 = region id. + * for 1T we shift 12 bits more. slb_finish_load_1T will do + * the necessary adjustment */ - addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha - addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l - - + rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 BEGIN_FTR_SECTION b slb_finish_load END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) @@ -98,19 +91,24 @@ _GLOBAL(slb_miss_kernel_load_vmemmap) _GLOBAL(slb_miss_kernel_load_io) li r11,0 6: + li r9,0x1 /* - * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 - * r9 = region id. + * for 1T we shift 12 bits more. slb_finish_load_1T will do + * the necessary adjustment */ - addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha - addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l - + rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 BEGIN_FTR_SECTION b slb_finish_load END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) b slb_finish_load_1T -0: +0: /* user address: proto-VSID = context << 15 | ESID. First check + * if the address is within the boundaries of the user region + */ + srdi. r9,r10,USER_ESID_BITS + bne- 8f /* invalid ea bits set */ + + /* when using slices, we extract the psize off the slice bitmaps * and then we need to get the sllp encoding off the mmu_psize_defs * array. @@ -166,13 +164,15 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) ld r9,PACACONTEXTID(r13) BEGIN_FTR_SECTION cmpldi r10,0x1000 +END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) + rldimi r10,r9,USER_ESID_BITS,0 +BEGIN_FTR_SECTION bge slb_finish_load_1T END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) b slb_finish_load 8: /* invalid EA */ li r10,0 /* BAD_VSID */ - li r9,0 /* BAD_VSID */ li r11,SLB_VSID_USER /* flags don't much matter */ b slb_finish_load @@ -221,6 +221,8 @@ _GLOBAL(slb_allocate_user) /* get context to calculate proto-VSID */ ld r9,PACACONTEXTID(r13) + rldimi r10,r9,USER_ESID_BITS,0 + /* fall through slb_finish_load */ #endif /* __DISABLED__ */ @@ -229,10 +231,9 @@ _GLOBAL(slb_allocate_user) /* * Finish loading of an SLB entry and return * - * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET + * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET */ slb_finish_load: - rldimi r10,r9,ESID_BITS,0 ASM_VSID_SCRAMBLE(r10,r9,256M) /* * bits above VSID_BITS_256M need to be ignored from r10 @@ -297,11 +298,10 @@ _GLOBAL(slb_compare_rr_to_size) /* * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. * - * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9 + * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 */ slb_finish_load_1T: - srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */ - rldimi r10,r9,ESID_BITS_1T,0 + srdi r10,r10,40-28 /* get 1T ESID */ ASM_VSID_SCRAMBLE(r10,r9,1T) /* * bits above VSID_BITS_1T need to be ignored from r10 diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 023ec8a..0d82ef5 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c @@ -82,11 +82,11 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, if (!is_kernel_addr(addr)) { ssize = user_segment_size(addr); vsid = get_vsid(mm->context.id, addr, ssize); + WARN_ON(vsid == 0); } else { vsid = get_kernel_vsid(addr, mmu_kernel_ssize); ssize = mmu_kernel_ssize; } - WARN_ON(vsid == 0); vpn = hpt_vpn(addr, vsid, ssize); rpte = __real_pte(__pte(pte), ptep); diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c index 806cbbd..1e12108 100644 --- a/arch/powerpc/platforms/8xx/m8xx_setup.c +++ b/arch/powerpc/platforms/8xx/m8xx_setup.c @@ -43,7 +43,6 @@ static irqreturn_t timebase_interrupt(int irq, void *dev) static struct irqaction tbint_irqaction = { .handler = timebase_interrupt, - .flags = IRQF_NO_THREAD, .name = "tbint", }; diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 506dc9f..dba1ce2 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -99,7 +99,6 @@ spufs_new_inode(struct super_block *sb, umode_t mode) if (!inode) goto out; - inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index 6b73d6c..9a04322 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c @@ -788,6 +788,7 @@ static void eeh_add_device_late(struct pci_dev *dev) dev->dev.archdata.edev = edev; eeh_addr_cache_insert_dev(dev); + eeh_sysfs_add_device(dev); } /** @@ -814,29 +815,6 @@ void eeh_add_device_tree_late(struct pci_bus *bus) EXPORT_SYMBOL_GPL(eeh_add_device_tree_late); /** - * eeh_add_sysfs_files - Add EEH sysfs files for the indicated PCI bus - * @bus: PCI bus - * - * This routine must be used to add EEH sysfs files for PCI - * devices which are attached to the indicated PCI bus. The PCI bus - * is added after system boot through hotplug or dlpar. - */ -void eeh_add_sysfs_files(struct pci_bus *bus) -{ - struct pci_dev *dev; - - list_for_each_entry(dev, &bus->devices, bus_list) { - eeh_sysfs_add_device(dev); - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { - struct pci_bus *subbus = dev->subordinate; - if (subbus) - eeh_add_sysfs_files(subbus); - } - } -} -EXPORT_SYMBOL_GPL(eeh_add_sysfs_files); - -/** * eeh_remove_device - Undo EEH setup for the indicated pci device * @dev: pci device to be removed * @purge_pe: remove the PE or not diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 299731e..0da39fe 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -186,13 +186,7 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group) (0x1UL << 4), &dummy1, &dummy2); if (lpar_rc == H_SUCCESS) return i; - - /* - * The test for adjunct partition is performed before the - * ANDCOND test. H_RESOURCE may be returned, so we need to - * check for that as well. - */ - BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE); + BUG_ON(lpar_rc != H_NOT_FOUND); slot_offset++; slot_offset &= 0x7; diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c index 5e6ff38..d4fa03f 100644 --- a/arch/powerpc/sysdev/cpm1.c +++ b/arch/powerpc/sysdev/cpm1.c @@ -120,7 +120,6 @@ static irqreturn_t cpm_error_interrupt(int irq, void *dev) static struct irqaction cpm_error_irqaction = { .handler = cpm_error_interrupt, - .flags = IRQF_NO_THREAD, .name = "error", }; diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index b94b478..6e53d97 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -333,8 +333,6 @@ static int fsl_of_msi_remove(struct platform_device *ofdev) return 0; } -static struct lock_class_key fsl_msi_irq_class; - static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev, int offset, int irq_index) { @@ -353,7 +351,7 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev, dev_err(&dev->dev, "No memory for MSI cascade data\n"); return -ENOMEM; } - irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class); + msi->msi_virqs[irq_index] = virt_msir; cascade_data->index = offset; cascade_data->msi_data = msi; diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index 379d96e..27cb321 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -50,6 +50,10 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr); #define ioremap_nocache(addr, size) ioremap(addr, size) #define ioremap_wc ioremap_nocache +/* TODO: s390 cannot support io_remap_pfn_range... */ +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + static inline void __iomem *ioremap(unsigned long offset, unsigned long size) { return (void __iomem *) offset; diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 1532d7f..098adbb 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -56,10 +56,6 @@ extern unsigned long zero_page_mask; (((unsigned long)(vaddr)) &zero_page_mask)))) #define __HAVE_COLOR_ZERO_PAGE -/* TODO: s390 cannot support io_remap_pfn_range... */ -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ - remap_pfn_range(vma, vaddr, pfn, size, prot) - #endif /* !__ASSEMBLY__ */ /* diff --git a/arch/s390/include/asm/signal.h b/arch/s390/include/asm/signal.h index 639f569..db7ddfa 100644 --- a/arch/s390/include/asm/signal.h +++ b/arch/s390/include/asm/signal.h @@ -34,7 +34,6 @@ struct sigaction { void (*sa_restorer)(void); sigset_t sa_mask; /* mask last for extensibility */ }; -#define __ARCH_HAS_SA_RESTORER struct k_sigaction { struct sigaction sa; diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index 6b32af3..1d8fe2b 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h @@ -74,6 +74,8 @@ static inline void __tlb_flush_idte(unsigned long asce) static inline void __tlb_flush_mm(struct mm_struct * mm) { + if (unlikely(cpumask_empty(mm_cpumask(mm)))) + return; /* * If the machine has IDTE we prefer to do a per mm flush * on all cpus instead of doing a local flush if the mm diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 94feff7..5502285 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -636,8 +636,7 @@ ENTRY(mcck_int_handler) UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER mcck_skip: SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT - stm %r0,%r7,__PT_R0(%r11) - mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 + mvc __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA stm %r8,%r9,__PT_PSW(%r11) xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) l %r1,BASED(.Ldo_machine_check) diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 082b845..6d34e0c 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -678,9 +678,8 @@ ENTRY(mcck_int_handler) UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER LAST_BREAK %r14 mcck_skip: - lghi %r14,__LC_GPREGS_SAVE_AREA+64 - stmg %r0,%r7,__PT_R0(%r11) - mvc __PT_R8(64,%r11),0(%r14) + lghi %r14,__LC_GPREGS_SAVE_AREA + mvc __PT_R0(128,%r11),0(%r14) stmg %r8,%r9,__PT_PSW(%r11) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 8bafa4f..f090e81 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -766,14 +766,6 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) } else prefix = 0; - /* - * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy - * copying in vcpu load/put. Lets update our copies before we save - * it into the save area - */ - save_fp_regs(&vcpu->arch.guest_fpregs); - save_access_regs(vcpu->run->s.regs.acrs); - if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), vcpu->arch.guest_fpregs.fprs, 128, prefix)) return -EFAULT; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 62d659d..2fb9e63 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -296,8 +296,7 @@ static inline int do_exception(struct pt_regs *regs, int access) * user context. */ fault = VM_FAULT_BADCONTEXT; - if (unlikely(!user_space_fault(trans_exc_code) || - !mm || pagefault_disabled())) + if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) goto out; address = trans_exc_code & __FAIL_ADDR_MASK; @@ -436,8 +435,7 @@ void __kprobes do_asce_exception(struct pt_regs *regs) clear_tsk_thread_flag(current, TIF_PER_TRAP); trans_exc_code = regs->int_parm_long; - if (unlikely(!user_space_fault(trans_exc_code) || !mm || - pagefault_disabled())) + if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) goto no_context; down_read(&mm->mmap_sem); diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c index 59fccbe..47b600e 100644 --- a/arch/score/mm/fault.c +++ b/arch/score/mm/fault.c @@ -72,7 +72,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (!mm || pagefault_disabled()) + if (in_atomic() || !mm) goto bad_area_nosemaphore; down_read(&mm->mmap_sem); diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index ae4b141..063af10 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -149,7 +149,6 @@ void irq_ctx_exit(int cpu) hardirq_ctx[cpu] = NULL; } -#ifndef CONFIG_PREEMPT_RT_FULL asmlinkage void do_softirq(void) { unsigned long flags; @@ -192,7 +191,6 @@ asmlinkage void do_softirq(void) local_irq_restore(flags); } -#endif #else static inline void handle_one_irq(unsigned int irq) { diff --git a/arch/sh/kernel/sh_bios.c b/arch/sh/kernel/sh_bios.c index a5b51b9..47475cc 100644 --- a/arch/sh/kernel/sh_bios.c +++ b/arch/sh/kernel/sh_bios.c @@ -144,6 +144,8 @@ static struct console bios_console = { .index = -1, }; +static struct console *early_console; + static int __init setup_early_printk(char *buf) { int keep_early = 0; diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 8ff1613..1f49c28 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -440,7 +440,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, * If we're in an interrupt, have no user context or are running * in an atomic region then we must not take the fault: */ - if (unlikely(!mm || pagefault_disabled())) { + if (unlikely(in_atomic() || !mm)) { bad_area_nosemaphore(regs, error_code, address); return; } diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index 7eb57d2..9661e9b 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h @@ -12,6 +12,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) { + hugetlb_setup(mm); } static inline int is_hugepage_only_range(struct mm_struct *mm, diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index e155388..4b39f74 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -27,8 +27,8 @@ #ifndef __ASSEMBLY__ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) -struct pt_regs; -extern void hugetlb_setup(struct pt_regs *regs); +struct mm_struct; +extern void hugetlb_setup(struct mm_struct *mm); #endif #define WANT_PAGE_VIRTUAL diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 7619f2f..08fcce9 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -915,7 +915,6 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma, return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot); } -#include #include /* We provide our own get_unmapped_area to cope with VA holes and diff --git a/arch/sparc/include/asm/signal.h b/arch/sparc/include/asm/signal.h index 2f0df05..77b8585 100644 --- a/arch/sparc/include/asm/signal.h +++ b/arch/sparc/include/asm/signal.h @@ -26,7 +26,5 @@ struct k_sigaction { void __user *ka_restorer; }; -#define __ARCH_HAS_SA_RESTORER - #endif /* !(__ASSEMBLY__) */ #endif /* !(__SPARC_SIGNAL_H) */ diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h index c7de332..cad36f5 100644 --- a/arch/sparc/include/asm/switch_to_64.h +++ b/arch/sparc/include/asm/switch_to_64.h @@ -18,7 +18,8 @@ do { \ * and 2 stores in this critical code path. -DaveM */ #define switch_to(prev, next, last) \ -do { save_and_clear_fpu(); \ +do { flush_tlb_pending(); \ + save_and_clear_fpu(); \ /* If you are tempted to conditionalize the following */ \ /* so that ASI is only written if it changes, think again. */ \ __asm__ __volatile__("wr %%g0, %0, %%asi" \ diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h index f0d6a97..2ef4634 100644 --- a/arch/sparc/include/asm/tlbflush_64.h +++ b/arch/sparc/include/asm/tlbflush_64.h @@ -11,40 +11,24 @@ struct tlb_batch { struct mm_struct *mm; unsigned long tlb_nr; - unsigned long active; unsigned long vaddrs[TLB_BATCH_NR]; }; extern void flush_tsb_kernel_range(unsigned long start, unsigned long end); extern void flush_tsb_user(struct tlb_batch *tb); -extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr); /* TLB flush operations. */ -static inline void flush_tlb_mm(struct mm_struct *mm) -{ -} - -static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long vmaddr) -{ -} - -static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ -} - -#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE - extern void flush_tlb_pending(void); -extern void arch_enter_lazy_mmu_mode(void); -extern void arch_leave_lazy_mmu_mode(void); -#define arch_flush_lazy_mmu_mode() do {} while (0) + +#define flush_tlb_range(vma,start,end) \ + do { (void)(start); flush_tlb_pending(); } while (0) +#define flush_tlb_page(vma,addr) flush_tlb_pending() +#define flush_tlb_mm(mm) flush_tlb_pending() /* Local cpu only. */ extern void __flush_tlb_all(void); -extern void __flush_tlb_page(unsigned long context, unsigned long vaddr); + extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); #ifndef CONFIG_SMP @@ -54,24 +38,15 @@ do { flush_tsb_kernel_range(start,end); \ __flush_tlb_kernel_range(start,end); \ } while (0) -static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) -{ - __flush_tlb_page(CTX_HWBITS(mm->context), vaddr); -} - #else /* CONFIG_SMP */ extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); -extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr); #define flush_tlb_kernel_range(start, end) \ do { flush_tsb_kernel_range(start,end); \ smp_flush_tlb_kernel_range(start, end); \ } while (0) -#define global_flush_tlb_page(mm, vaddr) \ - smp_flush_tlb_page(mm, vaddr) - #endif /* ! CONFIG_SMP */ #endif /* _SPARC64_TLBFLUSH_H */ diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h index e696432..b4c258d 100644 --- a/arch/sparc/include/asm/tsb.h +++ b/arch/sparc/include/asm/tsb.h @@ -157,26 +157,17 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; andn REG2, 0x7, REG2; \ add REG1, REG2, REG1; - /* These macros exists only to make the PMD translator below - * easier to read. It hides the ELF section switch for the - * sun4v code patching. + /* This macro exists only to make the PMD translator below easier + * to read. It hides the ELF section switch for the sun4v code + * patching. */ -#define OR_PTE_BIT_1INSN(REG, NAME) \ +#define OR_PTE_BIT(REG, NAME) \ 661: or REG, _PAGE_##NAME##_4U, REG; \ .section .sun4v_1insn_patch, "ax"; \ .word 661b; \ or REG, _PAGE_##NAME##_4V, REG; \ .previous; -#define OR_PTE_BIT_2INSN(REG, TMP, NAME) \ -661: sethi %hi(_PAGE_##NAME##_4U), TMP; \ - or REG, TMP, REG; \ - .section .sun4v_2insn_patch, "ax"; \ - .word 661b; \ - mov -1, TMP; \ - or REG, _PAGE_##NAME##_4V, REG; \ - .previous; - /* Load into REG the PTE value for VALID, CACHE, and SZHUGE. */ #define BUILD_PTE_VALID_SZHUGE_CACHE(REG) \ 661: sethi %uhi(_PAGE_VALID|_PAGE_SZHUGE_4U), REG; \ @@ -223,13 +214,12 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; andn REG1, PMD_HUGE_PROTBITS, REG2; \ sllx REG2, PMD_PADDR_SHIFT, REG2; \ /* REG2 now holds PFN << PAGE_SHIFT */ \ - andcc REG1, PMD_HUGE_WRITE, %g0; \ + andcc REG1, PMD_HUGE_EXEC, %g0; \ + bne,a,pt %xcc, 1f; \ + OR_PTE_BIT(REG2, EXEC); \ +1: andcc REG1, PMD_HUGE_WRITE, %g0; \ bne,a,pt %xcc, 1f; \ - OR_PTE_BIT_1INSN(REG2, W); \ -1: andcc REG1, PMD_HUGE_EXEC, %g0; \ - be,pt %xcc, 1f; \ - nop; \ - OR_PTE_BIT_2INSN(REG2, REG1, EXEC); \ + OR_PTE_BIT(REG2, W); \ /* REG1 can now be clobbered, build final PTE */ \ 1: BUILD_PTE_VALID_SZHUGE_CACHE(REG1); \ ba,pt %xcc, PTE_LABEL; \ diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 1bc5cd8..9bcbbe2 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -698,7 +698,6 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs) set_irq_regs(old_regs); } -#ifndef CONFIG_PREEMPT_RT_FULL void do_softirq(void) { unsigned long flags; @@ -724,7 +723,6 @@ void do_softirq(void) local_irq_restore(flags); } -#endif #ifdef CONFIG_HOTPLUG_CPU void fixup_irqs(void) diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c index 9f20566..1303021 100644 --- a/arch/sparc/kernel/prom_common.c +++ b/arch/sparc/kernel/prom_common.c @@ -64,7 +64,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len err = -ENODEV; mutex_lock(&of_set_property_mutex); - raw_spin_lock(&devtree_lock); + write_lock(&devtree_lock); prevp = &dp->properties; while (*prevp) { struct property *prop = *prevp; @@ -91,7 +91,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len } prevp = &(*prevp)->next; } - raw_spin_unlock(&devtree_lock); + write_unlock(&devtree_lock); mutex_unlock(&of_set_property_mutex); /* XXX Upate procfs if necessary... */ diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index f4fb00e..38bf80a 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c @@ -309,7 +309,6 @@ void __init setup_arch(char **cmdline_p) boot_flags_init(*cmdline_p); - early_console = &prom_early_console; register_console(&prom_early_console); printk("ARCH: "); diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index ede7dc3..0eaf005 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -551,12 +551,6 @@ static void __init init_sparc64_elf_hwcap(void) pause_patch(); } -static inline void register_prom_console(void) -{ - early_console = &prom_early_console; - register_console(&prom_early_console); -} - void __init setup_arch(char **cmdline_p) { /* Initialize PROM console and command line. */ @@ -568,7 +562,7 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_EARLYFB if (btext_find_display()) #endif - register_prom_console(); + register_console(&prom_early_console); if (tlb_type == hypervisor) printk("ARCH: SUN4V\n"); diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index ca64d2a..537eb66 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -849,7 +849,7 @@ void smp_tsb_sync(struct mm_struct *mm) } extern unsigned long xcall_flush_tlb_mm; -extern unsigned long xcall_flush_tlb_page; +extern unsigned long xcall_flush_tlb_pending; extern unsigned long xcall_flush_tlb_kernel_range; extern unsigned long xcall_fetch_glob_regs; extern unsigned long xcall_fetch_glob_pmu; @@ -1074,56 +1074,23 @@ local_flush_and_out: put_cpu(); } -struct tlb_pending_info { - unsigned long ctx; - unsigned long nr; - unsigned long *vaddrs; -}; - -static void tlb_pending_func(void *info) -{ - struct tlb_pending_info *t = info; - - __flush_tlb_pending(t->ctx, t->nr, t->vaddrs); -} - void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) { u32 ctx = CTX_HWBITS(mm->context); - struct tlb_pending_info info; int cpu = get_cpu(); - info.ctx = ctx; - info.nr = nr; - info.vaddrs = vaddrs; - if (mm == current->mm && atomic_read(&mm->mm_users) == 1) cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); else - smp_call_function_many(mm_cpumask(mm), tlb_pending_func, - &info, 1); + smp_cross_call_masked(&xcall_flush_tlb_pending, + ctx, nr, (unsigned long) vaddrs, + mm_cpumask(mm)); __flush_tlb_pending(ctx, nr, vaddrs); put_cpu(); } -void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) -{ - unsigned long context = CTX_HWBITS(mm->context); - int cpu = get_cpu(); - - if (mm == current->mm && atomic_read(&mm->mm_users) == 1) - cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); - else - smp_cross_call_masked(&xcall_flush_tlb_page, - context, vaddr, 0, - mm_cpumask(mm)); - __flush_tlb_page(context, vaddr); - - put_cpu(); -} - void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) { start &= PAGE_MASK; diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S index a313e4a..d4bdc7a 100644 --- a/arch/sparc/kernel/tsb.S +++ b/arch/sparc/kernel/tsb.S @@ -136,43 +136,12 @@ tsb_miss_page_table_walk_sun4v_fastpath: nop /* It is a huge page, use huge page TSB entry address we - * calculated above. If the huge page TSB has not been - * allocated, setup a trap stack and call hugetlb_setup() - * to do so, then return from the trap to replay the TLB - * miss. - * - * This is necessary to handle the case of transparent huge - * pages where we don't really have a non-atomic context - * in which to allocate the hugepage TSB hash table. When - * the 'mm' faults in the hugepage for the first time, we - * thus handle it here. This also makes sure that we can - * allocate the TSB hash table on the correct NUMA node. + * calculated above. */ TRAP_LOAD_TRAP_BLOCK(%g7, %g2) - ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g1 - cmp %g1, -1 - bne,pt %xcc, 60f - nop - -661: rdpr %pstate, %g5 - wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate - .section .sun4v_2insn_patch, "ax" - .word 661b - SET_GL(1) - nop - .previous - - rdpr %tl, %g3 - cmp %g3, 1 - bne,pn %xcc, winfix_trampoline - nop - ba,pt %xcc, etrap - rd %pc, %g7 - call hugetlb_setup - add %sp, PTREGS_OFF, %o0 - ba,pt %xcc, rtrap - nop - + ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g2 + cmp %g2, -1 + movne %xcc, %g2, %g1 60: #endif diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 18cbe13..e98bfda 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -200,7 +200,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (!mm || pagefault_disabled()) + if (in_atomic() || !mm) goto no_context; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 2764ac6..097aee7 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -321,7 +321,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (!mm || pagefault_disabled()) + if (in_atomic() || !mm) goto intr_or_no_mm; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); @@ -472,13 +472,8 @@ good_area: #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) mm_rss = mm->context.huge_pte_count; if (unlikely(mm_rss > - mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) { - if (mm->context.tsb_block[MM_TSB_HUGE].tsb) - tsb_grow(mm, MM_TSB_HUGE, mm_rss); - else - hugetlb_setup(regs); - - } + mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) + tsb_grow(mm, MM_TSB_HUGE, mm_rss); #endif return; diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 82bbf04..c3b7242 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -314,31 +314,16 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb; unsigned long tag; - if (unlikely(!tsb)) - return; - tsb += ((address >> tsb_hash_shift) & (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); tag = (address >> 22UL); tsb_insert(tsb, tag, tte); } -#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) -static inline bool is_hugetlb_pte(pte_t pte) -{ - if ((tlb_type == hypervisor && - (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || - (tlb_type != hypervisor && - (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) - return true; - return false; -} -#endif - void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { + unsigned long tsb_index, tsb_hash_shift, flags; struct mm_struct *mm; - unsigned long flags; pte_t pte = *ptep; if (tlb_type != hypervisor) { @@ -350,16 +335,25 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * mm = vma->vm_mm; + tsb_index = MM_TSB_BASE; + tsb_hash_shift = PAGE_SHIFT; + spin_lock_irqsave(&mm->context.lock, flags); #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - if (mm->context.huge_pte_count && is_hugetlb_pte(pte)) - __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT, - address, pte_val(pte)); - else + if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) { + if ((tlb_type == hypervisor && + (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || + (tlb_type != hypervisor && + (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) { + tsb_index = MM_TSB_HUGE; + tsb_hash_shift = HPAGE_SHIFT; + } + } #endif - __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, - address, pte_val(pte)); + + __update_mmu_tsb_insert(mm, tsb_index, tsb_hash_shift, + address, pte_val(pte)); spin_unlock_irqrestore(&mm->context.lock, flags); } @@ -2718,28 +2712,14 @@ static void context_reload(void *__data) load_secondary_context(mm); } -void hugetlb_setup(struct pt_regs *regs) +void hugetlb_setup(struct mm_struct *mm) { - struct mm_struct *mm = current->mm; - struct tsb_config *tp; - - if (in_atomic() || !mm) { - const struct exception_table_entry *entry; + struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE]; - entry = search_exception_tables(regs->tpc); - if (entry) { - regs->tpc = entry->fixup; - regs->tnpc = regs->tpc + 4; - return; - } - pr_alert("Unexpected HugeTLB setup in atomic context.\n"); - die_if_kernel("HugeTSB in atomic", regs); - } - - tp = &mm->context.tsb_block[MM_TSB_HUGE]; - if (likely(tp->tsb == NULL)) - tsb_grow(mm, MM_TSB_HUGE, 0); + if (likely(tp->tsb != NULL)) + return; + tsb_grow(mm, MM_TSB_HUGE, 0); tsb_context_switch(mm); smp_tsb_sync(mm); diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 83d89bc..3e8fec3 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -24,17 +24,11 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch); void flush_tlb_pending(void) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); - struct mm_struct *mm = tb->mm; - if (!tb->tlb_nr) - goto out; + if (tb->tlb_nr) { + flush_tsb_user(tb); - flush_tsb_user(tb); - - if (CTX_VALID(mm->context)) { - if (tb->tlb_nr == 1) { - global_flush_tlb_page(mm, tb->vaddrs[0]); - } else { + if (CTX_VALID(tb->mm->context)) { #ifdef CONFIG_SMP smp_flush_tlb_pending(tb->mm, tb->tlb_nr, &tb->vaddrs[0]); @@ -43,30 +37,12 @@ void flush_tlb_pending(void) tb->tlb_nr, &tb->vaddrs[0]); #endif } + tb->tlb_nr = 0; } - tb->tlb_nr = 0; - -out: put_cpu_var(tlb_batch); } -void arch_enter_lazy_mmu_mode(void) -{ - struct tlb_batch *tb = &__get_cpu_var(tlb_batch); - - tb->active = 1; -} - -void arch_leave_lazy_mmu_mode(void) -{ - struct tlb_batch *tb = &__get_cpu_var(tlb_batch); - - if (tb->tlb_nr) - flush_tlb_pending(); - tb->active = 0; -} - static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, bool exec) { @@ -84,12 +60,6 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, nr = 0; } - if (!tb->active) { - global_flush_tlb_page(mm, vaddr); - flush_tsb_user_page(mm, vaddr); - goto out; - } - if (nr == 0) tb->mm = mm; @@ -98,7 +68,6 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, if (nr >= TLB_BATCH_NR) flush_tlb_pending(); -out: put_cpu_var(tlb_batch); } @@ -166,15 +135,8 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, mm->context.huge_pte_count++; else mm->context.huge_pte_count--; - - /* Do not try to allocate the TSB hash table if we - * don't have one already. We have various locks held - * and thus we'll end up doing a GFP_KERNEL allocation - * in an atomic context. - * - * Instead, we let the first TLB miss on a hugepage - * take care of this. - */ + if (mm->context.huge_pte_count == 1) + hugetlb_setup(mm); } if (!pmd_none(orig)) { diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 2cc3bce..7f64743 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -7,10 +7,11 @@ #include #include #include -#include +#include +#include #include +#include #include -#include #include extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; @@ -45,56 +46,28 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) } } -static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v, - unsigned long hash_shift, - unsigned long nentries) -{ - unsigned long tag, ent, hash; - - v &= ~0x1UL; - hash = tsb_hash(v, hash_shift, nentries); - ent = tsb + (hash * sizeof(struct tsb)); - tag = (v >> 22UL); - - tsb_flush(ent, tag); -} - static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, unsigned long tsb, unsigned long nentries) { unsigned long i; - for (i = 0; i < tb->tlb_nr; i++) - __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries); -} - -void flush_tsb_user(struct tlb_batch *tb) -{ - struct mm_struct *mm = tb->mm; - unsigned long nentries, base, flags; + for (i = 0; i < tb->tlb_nr; i++) { + unsigned long v = tb->vaddrs[i]; + unsigned long tag, ent, hash; - spin_lock_irqsave(&mm->context.lock, flags); + v &= ~0x1UL; - base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; - nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; - if (tlb_type == cheetah_plus || tlb_type == hypervisor) - base = __pa(base); - __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); + hash = tsb_hash(v, hash_shift, nentries); + ent = tsb + (hash * sizeof(struct tsb)); + tag = (v >> 22UL); -#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { - base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; - nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; - if (tlb_type == cheetah_plus || tlb_type == hypervisor) - base = __pa(base); - __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries); + tsb_flush(ent, tag); } -#endif - spin_unlock_irqrestore(&mm->context.lock, flags); } -void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) +void flush_tsb_user(struct tlb_batch *tb) { + struct mm_struct *mm = tb->mm; unsigned long nentries, base, flags; spin_lock_irqsave(&mm->context.lock, flags); @@ -103,7 +76,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(base); - __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); + __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { @@ -111,7 +84,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(base); - __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries); + __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries); } #endif spin_unlock_irqrestore(&mm->context.lock, flags); @@ -341,7 +314,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss) retry_tsb_alloc: gfp_flags = GFP_KERNEL; if (new_size > (PAGE_SIZE * 2)) - gfp_flags |= __GFP_NOWARN | __GFP_NORETRY; + gfp_flags = __GFP_NOWARN | __GFP_NORETRY; new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index], gfp_flags, numa_node_id()); diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index 29b9608..f8e13d4 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S @@ -53,33 +53,6 @@ __flush_tlb_mm: /* 18 insns */ nop .align 32 - .globl __flush_tlb_page -__flush_tlb_page: /* 22 insns */ - /* %o0 = context, %o1 = vaddr */ - rdpr %pstate, %g7 - andn %g7, PSTATE_IE, %g2 - wrpr %g2, %pstate - mov SECONDARY_CONTEXT, %o4 - ldxa [%o4] ASI_DMMU, %g2 - stxa %o0, [%o4] ASI_DMMU - andcc %o1, 1, %g0 - andn %o1, 1, %o3 - be,pn %icc, 1f - or %o3, 0x10, %o3 - stxa %g0, [%o3] ASI_IMMU_DEMAP -1: stxa %g0, [%o3] ASI_DMMU_DEMAP - membar #Sync - stxa %g2, [%o4] ASI_DMMU - sethi %hi(KERNBASE), %o4 - flush %o4 - retl - wrpr %g7, 0x0, %pstate - nop - nop - nop - nop - - .align 32 .globl __flush_tlb_pending __flush_tlb_pending: /* 26 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ @@ -230,31 +203,6 @@ __cheetah_flush_tlb_mm: /* 19 insns */ retl wrpr %g7, 0x0, %pstate -__cheetah_flush_tlb_page: /* 22 insns */ - /* %o0 = context, %o1 = vaddr */ - rdpr %pstate, %g7 - andn %g7, PSTATE_IE, %g2 - wrpr %g2, 0x0, %pstate - wrpr %g0, 1, %tl - mov PRIMARY_CONTEXT, %o4 - ldxa [%o4] ASI_DMMU, %g2 - srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3 - sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3 - or %o0, %o3, %o0 /* Preserve nucleus page size fields */ - stxa %o0, [%o4] ASI_DMMU - andcc %o1, 1, %g0 - be,pn %icc, 1f - andn %o1, 1, %o3 - stxa %g0, [%o3] ASI_IMMU_DEMAP -1: stxa %g0, [%o3] ASI_DMMU_DEMAP - membar #Sync - stxa %g2, [%o4] ASI_DMMU - sethi %hi(KERNBASE), %o4 - flush %o4 - wrpr %g0, 0, %tl - retl - wrpr %g7, 0x0, %pstate - __cheetah_flush_tlb_pending: /* 27 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ rdpr %pstate, %g7 @@ -321,20 +269,6 @@ __hypervisor_flush_tlb_mm: /* 10 insns */ retl nop -__hypervisor_flush_tlb_page: /* 11 insns */ - /* %o0 = context, %o1 = vaddr */ - mov %o0, %g2 - mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */ - mov %g2, %o1 /* ARG1: mmu context */ - mov HV_MMU_ALL, %o2 /* ARG2: flags */ - srlx %o0, PAGE_SHIFT, %o0 - sllx %o0, PAGE_SHIFT, %o0 - ta HV_MMU_UNMAP_ADDR_TRAP - brnz,pn %o0, __hypervisor_tlb_tl0_error - mov HV_MMU_UNMAP_ADDR_TRAP, %o1 - retl - nop - __hypervisor_flush_tlb_pending: /* 16 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ sllx %o1, 3, %g1 @@ -405,13 +339,6 @@ cheetah_patch_cachetlbops: call tlb_patch_one mov 19, %o2 - sethi %hi(__flush_tlb_page), %o0 - or %o0, %lo(__flush_tlb_page), %o0 - sethi %hi(__cheetah_flush_tlb_page), %o1 - or %o1, %lo(__cheetah_flush_tlb_page), %o1 - call tlb_patch_one - mov 22, %o2 - sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__cheetah_flush_tlb_pending), %o1 @@ -470,9 +397,10 @@ xcall_flush_tlb_mm: /* 21 insns */ nop nop - .globl xcall_flush_tlb_page -xcall_flush_tlb_page: /* 17 insns */ - /* %g5=context, %g1=vaddr */ + .globl xcall_flush_tlb_pending +xcall_flush_tlb_pending: /* 21 insns */ + /* %g5=context, %g1=nr, %g7=vaddrs[] */ + sllx %g1, 3, %g1 mov PRIMARY_CONTEXT, %g4 ldxa [%g4] ASI_DMMU, %g2 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4 @@ -480,16 +408,20 @@ xcall_flush_tlb_page: /* 17 insns */ or %g5, %g4, %g5 mov PRIMARY_CONTEXT, %g4 stxa %g5, [%g4] ASI_DMMU - andcc %g1, 0x1, %g0 +1: sub %g1, (1 << 3), %g1 + ldx [%g7 + %g1], %g5 + andcc %g5, 0x1, %g0 be,pn %icc, 2f - andn %g1, 0x1, %g5 + + andn %g5, 0x1, %g5 stxa %g0, [%g5] ASI_IMMU_DEMAP 2: stxa %g0, [%g5] ASI_DMMU_DEMAP membar #Sync + brnz,pt %g1, 1b + nop stxa %g2, [%g4] ASI_DMMU retry nop - nop .globl xcall_flush_tlb_kernel_range xcall_flush_tlb_kernel_range: /* 25 insns */ @@ -724,13 +656,15 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */ membar #Sync retry - .globl __hypervisor_xcall_flush_tlb_page -__hypervisor_xcall_flush_tlb_page: /* 17 insns */ - /* %g5=ctx, %g1=vaddr */ + .globl __hypervisor_xcall_flush_tlb_pending +__hypervisor_xcall_flush_tlb_pending: /* 21 insns */ + /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */ + sllx %g1, 3, %g1 mov %o0, %g2 mov %o1, %g3 mov %o2, %g4 - mov %g1, %o0 /* ARG0: virtual address */ +1: sub %g1, (1 << 3), %g1 + ldx [%g7 + %g1], %o0 /* ARG0: virtual address */ mov %g5, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ srlx %o0, PAGE_SHIFT, %o0 @@ -739,6 +673,8 @@ __hypervisor_xcall_flush_tlb_page: /* 17 insns */ mov HV_MMU_UNMAP_ADDR_TRAP, %g6 brnz,a,pn %o0, __hypervisor_tlb_xcall_error mov %o0, %g5 + brnz,pt %g1, 1b + nop mov %g2, %o0 mov %g3, %o1 mov %g4, %o2 @@ -821,13 +757,6 @@ hypervisor_patch_cachetlbops: call tlb_patch_one mov 10, %o2 - sethi %hi(__flush_tlb_page), %o0 - or %o0, %lo(__flush_tlb_page), %o0 - sethi %hi(__hypervisor_flush_tlb_page), %o1 - or %o1, %lo(__hypervisor_flush_tlb_page), %o1 - call tlb_patch_one - mov 11, %o2 - sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__hypervisor_flush_tlb_pending), %o1 @@ -859,12 +788,12 @@ hypervisor_patch_cachetlbops: call tlb_patch_one mov 21, %o2 - sethi %hi(xcall_flush_tlb_page), %o0 - or %o0, %lo(xcall_flush_tlb_page), %o0 - sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1 - or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1 + sethi %hi(xcall_flush_tlb_pending), %o0 + or %o0, %lo(xcall_flush_tlb_pending), %o0 + sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1 + or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 call tlb_patch_one - mov 17, %o2 + mov 21, %o2 sethi %hi(xcall_flush_tlb_kernel_range), %o0 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h index 59e3574..88f3c22 100644 --- a/arch/tile/include/asm/compat.h +++ b/arch/tile/include/asm/compat.h @@ -296,9 +296,6 @@ long compat_sys_sync_file_range2(int fd, unsigned int flags, long compat_sys_fallocate(int fd, int mode, u32 offset_lo, u32 offset_hi, u32 len_lo, u32 len_hi); -long compat_sys_llseek(unsigned int fd, unsigned int offset_high, - unsigned int offset_low, loff_t __user * result, - unsigned int origin); /* Assembly trampoline to avoid clobbering r0. */ long _compat_sys_rt_sigreturn(void); diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c index d8e3b7e..7f72401 100644 --- a/arch/tile/kernel/compat.c +++ b/arch/tile/kernel/compat.c @@ -76,18 +76,6 @@ long compat_sys_fallocate(int fd, int mode, ((loff_t)len_hi << 32) | len_lo); } -/* - * Avoid bug in generic sys_llseek() that specifies offset_high and - * offset_low as "unsigned long", thus making it possible to pass - * a sign-extended high 32 bits in offset_low. - */ -long compat_sys_llseek(unsigned int fd, unsigned int offset_high, - unsigned int offset_low, loff_t __user * result, - unsigned int origin) -{ - return sys_llseek(fd, offset_high, offset_low, result, origin); -} - /* Provide the compat syscall number to call mapping. */ #undef __SYSCALL #define __SYSCALL(nr, call) [nr] = (call), @@ -95,7 +83,6 @@ long compat_sys_llseek(unsigned int fd, unsigned int offset_high, /* See comments in sys.c */ #define compat_sys_fadvise64_64 sys32_fadvise64_64 #define compat_sys_readahead sys32_readahead -#define sys_llseek compat_sys_llseek /* Call the assembly trampolines where necessary. */ #define compat_sys_rt_sigreturn _compat_sys_rt_sigreturn diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c index 34d72a1..afb9c9a 100644 --- a/arch/tile/kernel/early_printk.c +++ b/arch/tile/kernel/early_printk.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include @@ -34,8 +33,25 @@ static struct console early_hv_console = { }; /* Direct interface for emergencies */ +static struct console *early_console = &early_hv_console; +static int early_console_initialized; static int early_console_complete; +static void early_vprintk(const char *fmt, va_list ap) +{ + char buf[512]; + int n = vscnprintf(buf, sizeof(buf), fmt, ap); + early_console->write(early_console, buf, n); +} + +void early_printk(const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + early_vprintk(fmt, ap); + va_end(ap); +} + void early_panic(const char *fmt, ...) { va_list ap; @@ -53,13 +69,14 @@ static int __initdata keep_early; static int __init setup_early_printk(char *str) { - if (early_console) + if (early_console_initialized) return 1; if (str != NULL && strncmp(str, "keep", 4) == 0) keep_early = 1; early_console = &early_hv_console; + early_console_initialized = 1; register_console(early_console); return 0; @@ -68,12 +85,12 @@ static int __init setup_early_printk(char *str) void __init disable_early_printk(void) { early_console_complete = 1; - if (!early_console) + if (!early_console_initialized || !early_console) return; if (!keep_early) { early_printk("disabling early console\n"); unregister_console(early_console); - early_console = NULL; + early_console_initialized = 0; } else { early_printk("keeping early console\n"); } @@ -81,7 +98,7 @@ void __init disable_early_printk(void) void warn_early_printk(void) { - if (early_console_complete || early_console) + if (early_console_complete || early_console_initialized) return; early_printk("\ Machine shutting down before console output is fully initialized.\n\ diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 7a5aa1a..d1e15f7 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -1004,8 +1004,15 @@ void __cpuinit setup_cpu(int boot) #ifdef CONFIG_BLK_DEV_INITRD +/* + * Note that the kernel can potentially support other compression + * techniques than gz, though we don't do so by default. If we ever + * decide to do so we can either look for other filename extensions, + * or just allow a file with this name to be compressed with an + * arbitrary compressor (somewhat counterintuitively). + */ static int __initdata set_initramfs_file; -static char __initdata initramfs_file[128] = "initramfs"; +static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; static int __init setup_initramfs_file(char *str) { @@ -1019,9 +1026,9 @@ static int __init setup_initramfs_file(char *str) early_param("initramfs_file", setup_initramfs_file); /* - * We look for a file called "initramfs" in the hvfs. If there is one, we - * allocate some memory for it and it will be unpacked to the initramfs. - * If it's compressed, the initd code will uncompress it first. + * We look for an "initramfs.cpio.gz" file in the hvfs. + * If there is one, we allocate some memory for it and it will be + * unpacked to the initramfs. */ static void __init load_hv_initrd(void) { @@ -1031,16 +1038,10 @@ static void __init load_hv_initrd(void) fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); if (fd == HV_ENOENT) { - if (set_initramfs_file) { + if (set_initramfs_file) pr_warning("No such hvfs initramfs file '%s'\n", initramfs_file); - return; - } else { - /* Try old backwards-compatible name. */ - fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz"); - if (fd == HV_ENOENT) - return; - } + return; } BUG_ON(fd < 0); stat = hv_fs_fstat(fd); diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index 1ba0ccc..3d2b81c 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c @@ -360,7 +360,7 @@ static int handle_page_fault(struct pt_regs *regs, * If we're in an interrupt, have no user context or are running in an * atomic region then we must not take the fault. */ - if (!mm || pagefault_disabled()) { + if (in_atomic() || !mm) { vma = NULL; /* happy compiler */ goto bad_area_nosemaphore; } diff --git a/arch/um/kernel/early_printk.c b/arch/um/kernel/early_printk.c index 4a0800b..49480f0 100644 --- a/arch/um/kernel/early_printk.c +++ b/arch/um/kernel/early_printk.c @@ -16,7 +16,7 @@ static void early_console_write(struct console *con, const char *s, unsigned int um_early_printk(s, n); } -static struct console early_console_dev = { +static struct console early_console = { .name = "earlycon", .write = early_console_write, .flags = CON_BOOT, @@ -25,10 +25,8 @@ static struct console early_console_dev = { static int __init setup_early_printk(char *buf) { - if (!early_console) { - early_console = &early_console_dev; - register_console(&early_console_dev); - } + register_console(&early_console); + return 0; } diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 991b33a..089f398 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -39,7 +39,7 @@ int handle_page_fault(unsigned long address, unsigned long ip, * If the fault was during atomic operation, don't take the fault, just * fail. */ - if (pagefault_disabled()) + if (in_atomic()) goto out_nosemaphore; retry: diff --git a/arch/unicore32/kernel/early_printk.c b/arch/unicore32/kernel/early_printk.c index 9be0d5d..3922255 100644 --- a/arch/unicore32/kernel/early_printk.c +++ b/arch/unicore32/kernel/early_printk.c @@ -33,17 +33,21 @@ static struct console early_ocd_console = { .index = -1, }; +/* Direct interface for emergencies */ +static struct console *early_console = &early_ocd_console; + +static int __initdata keep_early; + static int __init setup_early_printk(char *buf) { - int keep_early; - - if (!buf || early_console) + if (!buf) return 0; if (strstr(buf, "keep")) keep_early = 1; - early_console = &early_ocd_console; + if (!strncmp(buf, "ocd", 3)) + early_console = &early_ocd_console; if (keep_early) early_console->flags &= ~CON_BOOT; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 585e236..225543b 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -108,7 +108,6 @@ config X86 select KTIME_SCALAR if X86_32 select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER - select HAVE_PREEMPT_LAZY select HAVE_CONTEXT_TRACKING if X86_64 select HAVE_IRQ_TIME_ACCOUNTING select MODULES_USE_ELF_REL if X86_32 @@ -174,11 +173,8 @@ config ARCH_MAY_HAVE_PC_FDC def_bool y depends on ISA_DMA_API -config RWSEM_GENERIC_SPINLOCK - def_bool PREEMPT_RT_FULL - config RWSEM_XCHGADD_ALGORITHM - def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL + def_bool y config GENERIC_CALIBRATE_DELAY def_bool y @@ -776,7 +772,7 @@ config IOMMU_HELPER config MAXSMP bool "Enable Maximum number of SMP Processors and NUMA Nodes" depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL - select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL + select CPUMASK_OFFSTACK ---help--- Enable maximum number of CPUS and NUMA Nodes for this architecture. If unsure, say N. @@ -1257,6 +1253,10 @@ config NODES_SHIFT Specify the maximum number of NUMA Nodes available on the target system. Increases memory reserved to accommodate various tables. +config HAVE_ARCH_ALLOC_REMAP + def_bool y + depends on X86_32 && NUMA + config ARCH_HAVE_MEMORY_PRESENT def_bool y depends on X86_32 && DISCONTIGMEM diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 5ef205c..8a84501 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -4,7 +4,7 @@ # create a compressed vmlinux image from the original vmlinux # -targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo +targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 KBUILD_CFLAGS += -fno-strict-aliasing -fPIC @@ -29,6 +29,7 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \ $(obj)/piggy.o $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone +$(obj)/efi_stub_$(BITS).o: KBUILD_CLFAGS += -fshort-wchar -mno-red-zone ifeq ($(CONFIG_EFI_STUB), y) VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o @@ -42,7 +43,7 @@ OBJCOPYFLAGS_vmlinux.bin := -R .comment -S $(obj)/vmlinux.bin: vmlinux FORCE $(call if_changed,objcopy) -targets += $(patsubst $(obj)/%,%,$(VMLINUX_OBJS)) vmlinux.bin.all vmlinux.relocs +targets += vmlinux.bin.all vmlinux.relocs CMD_RELOCS = arch/x86/tools/relocs quiet_cmd_relocs = RELOCS $@ diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index c205035..f8fa411 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -19,28 +19,23 @@ static efi_system_table_t *sys_table; -static void efi_char16_printk(efi_char16_t *str) -{ - struct efi_simple_text_output_protocol *out; - - out = (struct efi_simple_text_output_protocol *)sys_table->con_out; - efi_call_phys2(out->output_string, out, str); -} - static void efi_printk(char *str) { char *s8; for (s8 = str; *s8; s8++) { + struct efi_simple_text_output_protocol *out; efi_char16_t ch[2] = { 0 }; ch[0] = *s8; + out = (struct efi_simple_text_output_protocol *)sys_table->con_out; + if (*s8 == '\n') { efi_char16_t nl[2] = { '\r', 0 }; - efi_char16_printk(nl); + efi_call_phys2(out->output_string, out, nl); } - efi_char16_printk(ch); + efi_call_phys2(out->output_string, out, ch); } } @@ -714,12 +709,7 @@ static efi_status_t handle_ramdisks(efi_loaded_image_t *image, if ((u8 *)p >= (u8 *)filename_16 + sizeof(filename_16)) break; - if (*str == '/') { - *p++ = '\\'; - *str++; - } else { - *p++ = *str++; - } + *p++ = *str++; } *p = '\0'; @@ -747,9 +737,7 @@ static efi_status_t handle_ramdisks(efi_loaded_image_t *image, status = efi_call_phys5(fh->open, fh, &h, filename_16, EFI_FILE_MODE_READ, (u64)0); if (status != EFI_SUCCESS) { - efi_printk("Failed to open initrd file: "); - efi_char16_printk(filename_16); - efi_printk("\n"); + efi_printk("Failed to open initrd file\n"); goto close_handles; } diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 653314e..1b9c22b 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -250,14 +250,14 @@ static int ecb_encrypt(struct blkcipher_desc *desc, err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { - kernel_fpu_begin(); aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, - nbytes & AES_BLOCK_MASK); - kernel_fpu_end(); + nbytes & AES_BLOCK_MASK); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } + kernel_fpu_end(); return err; } @@ -274,14 +274,14 @@ static int ecb_decrypt(struct blkcipher_desc *desc, err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { - kernel_fpu_begin(); aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK); - kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } + kernel_fpu_end(); return err; } @@ -298,14 +298,14 @@ static int cbc_encrypt(struct blkcipher_desc *desc, err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { - kernel_fpu_begin(); aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); - kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } + kernel_fpu_end(); return err; } @@ -322,14 +322,14 @@ static int cbc_decrypt(struct blkcipher_desc *desc, err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { - kernel_fpu_begin(); aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); - kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } + kernel_fpu_end(); return err; } @@ -362,20 +362,18 @@ static int ctr_crypt(struct blkcipher_desc *desc, err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + kernel_fpu_begin(); while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { - kernel_fpu_begin(); aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); - kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } if (walk.nbytes) { - kernel_fpu_begin(); ctr_crypt_final(ctx, &walk); - kernel_fpu_end(); err = blkcipher_walk_done(desc, &walk, 0); } + kernel_fpu_end(); return err; } diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S index b0f7d39..93c6d39 100644 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S @@ -42,8 +42,6 @@ * SOFTWARE. */ -#include - ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction .macro LABEL prefix n @@ -226,10 +224,10 @@ LABEL crc_ %i movdqa (bufp), %xmm0 # 2 consts: K1:K2 movq crc_init, %xmm1 # CRC for block 1 - PCLMULQDQ 0x00,%xmm0,%xmm1 # Multiply by K2 + pclmulqdq $0x00,%xmm0,%xmm1 # Multiply by K2 movq crc1, %xmm2 # CRC for block 2 - PCLMULQDQ 0x10, %xmm0, %xmm2 # Multiply by K1 + pclmulqdq $0x10, %xmm0, %xmm2 # Multiply by K1 pxor %xmm2,%xmm1 movq %xmm1, %rax diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 1785dd7..0c44630 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -51,8 +51,8 @@ #define ACPI_ASM_MACROS #define BREAKPOINT3 -#define ACPI_DISABLE_IRQS() local_irq_disable_nort() -#define ACPI_ENABLE_IRQS() local_irq_enable_nort() +#define ACPI_DISABLE_IRQS() local_irq_disable() +#define ACPI_ENABLE_IRQS() local_irq_enable() #define ACPI_FLUSH_CPU_CACHE() wbinvd() int __acpi_acquire_global_lock(unsigned int *lock); diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 85039f9..dc87b65 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -419,8 +419,8 @@ struct kvm_vcpu_arch { gpa_t time; struct pvclock_vcpu_time_info hv_clock; unsigned int hw_tsc_khz; - struct gfn_to_hva_cache pv_time; - bool pv_time_enabled; + unsigned int time_offset; + struct page *time_page; /* set guest stopped flag in pvclock flags field */ bool pvclock_set_guest_stopped_request; diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h index 8a9b3e2..eb05fb3 100644 --- a/arch/x86/include/asm/mmzone_32.h +++ b/arch/x86/include/asm/mmzone_32.h @@ -14,6 +14,12 @@ extern struct pglist_data *node_data[]; #include +extern void resume_map_numa_kva(pgd_t *pgd); + +#else /* !CONFIG_NUMA */ + +static inline void resume_map_numa_kva(pgd_t *pgd) {} + #endif /* CONFIG_NUMA */ #ifdef CONFIG_DISCONTIGMEM diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 65b85f4..320f7bb 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -14,21 +14,12 @@ #define IRQ_STACK_ORDER 2 #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) -#ifdef CONFIG_PREEMPT_RT_FULL -# define STACKFAULT_STACK 0 -# define DOUBLEFAULT_STACK 1 -# define NMI_STACK 2 -# define DEBUG_STACK 0 -# define MCE_STACK 3 -# define N_EXCEPTION_STACKS 3 /* hw limit: 7 */ -#else -# define STACKFAULT_STACK 1 -# define DOUBLEFAULT_STACK 2 -# define NMI_STACK 3 -# define DEBUG_STACK 4 -# define MCE_STACK 5 -# define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ -#endif +#define STACKFAULT_STACK 1 +#define DOUBLEFAULT_STACK 2 +#define NMI_STACK 3 +#define DEBUG_STACK 4 +#define MCE_STACK 5 +#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 7361e47..5edd174 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -703,10 +703,7 @@ static inline void arch_leave_lazy_mmu_mode(void) PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); } -static inline void arch_flush_lazy_mmu_mode(void) -{ - PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush); -} +void arch_flush_lazy_mmu_mode(void); static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, phys_addr_t phys, pgprot_t flags) diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index b3b0ec1..142236e 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -91,7 +91,6 @@ struct pv_lazy_ops { /* Set deferred update mode, used for batching operations. */ void (*enter)(void); void (*leave)(void); - void (*flush)(void); }; struct pv_time_ops { @@ -680,7 +679,6 @@ void paravirt_end_context_switch(struct task_struct *next); void paravirt_enter_lazy_mmu(void); void paravirt_leave_lazy_mmu(void); -void paravirt_flush_lazy_mmu(void); void _paravirt_nop(void); u32 _paravirt_ident_32(u32); diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h index aa4f8be..216bf36 100644 --- a/arch/x86/include/asm/signal.h +++ b/arch/x86/include/asm/signal.h @@ -23,19 +23,6 @@ typedef struct { unsigned long sig[_NSIG_WORDS]; } sigset_t; -/* - * Because some traps use the IST stack, we must keep preemption - * disabled while calling do_trap(), but do_trap() may call - * force_sig_info() which will grab the signal spin_locks for the - * task, which in PREEMPT_RT_FULL are mutexes. By defining - * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set - * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the - * trap. - */ -#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_X86_64) -#define ARCH_RT_DELAYS_SIGNAL_SEND -#endif - #ifndef CONFIG_COMPAT typedef sigset_t compat_sigset_t; #endif @@ -44,9 +31,6 @@ typedef sigset_t compat_sigset_t; #include #ifndef __ASSEMBLY__ extern void do_notify_resume(struct pt_regs *, void *, __u32); - -#define __ARCH_HAS_SA_RESTORER - #ifdef __i386__ struct old_sigaction { __sighandler_t sa_handler; diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index 64fb5cb..6a99859 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -57,7 +57,7 @@ */ static __always_inline void boot_init_stack_canary(void) { - u64 uninitialized_var(canary); + u64 canary; u64 tsc; #ifdef CONFIG_X86_64 @@ -68,16 +68,8 @@ static __always_inline void boot_init_stack_canary(void) * of randomness. The TSC only matters for very early init, * there it already has some randomness on most systems. Later * on during the bootup the random pool has true entropy too. - * - * For preempt-rt we need to weaken the randomness a bit, as - * we can't call into the random generator from atomic context - * due to locking constraints. We just leave canary - * uninitialized and use the TSC based randomness on top of - * it. */ -#ifndef CONFIG_PREEMPT_RT_FULL get_random_bytes(&canary, sizeof(canary)); -#endif tsc = __native_read_tsc(); canary += tsc + (tsc << 32UL); diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index 2e188d6..1ace47b 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h @@ -29,13 +29,13 @@ extern const unsigned long sys_call_table[]; */ static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { - return regs->orig_ax; + return regs->orig_ax & __SYSCALL_MASK; } static inline void syscall_rollback(struct task_struct *task, struct pt_regs *regs) { - regs->ax = regs->orig_ax; + regs->ax = regs->orig_ax & __SYSCALL_MASK; } static inline long syscall_get_error(struct task_struct *task, diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 6b0fc2e..2d946e6 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -31,8 +31,6 @@ struct thread_info { __u32 cpu; /* current CPU */ int preempt_count; /* 0 => preemptable, <0 => BUG */ - int preempt_lazy_count; /* 0 => lazy preemptable, - <0 => BUG */ mm_segment_t addr_limit; struct restart_block restart_block; void __user *sysenter_return; @@ -84,7 +82,6 @@ struct thread_info { #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SECCOMP 8 /* secure computing */ -#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */ #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ #define TIF_UPROBE 12 /* breakpointed or singlestepping */ @@ -110,7 +107,6 @@ struct thread_info { #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) -#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) #define _TIF_UPROBE (1 << TIF_UPROBE) @@ -161,8 +157,6 @@ struct thread_info { #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) -#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) - #define PREEMPT_ACTIVE 0x10000000 #ifdef CONFIG_X86_32 diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h index c779730..4fef207 100644 --- a/arch/x86/include/asm/tlb.h +++ b/arch/x86/include/asm/tlb.h @@ -7,7 +7,7 @@ #define tlb_flush(tlb) \ { \ - if (!tlb->fullmm && !tlb->need_flush_all) \ + if (tlb->fullmm == 0) \ flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \ else \ flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \ diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index e709884..c20d1ce 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -382,14 +382,14 @@ HYPERVISOR_console_io(int cmd, int count, char *str) return _hypercall3(int, console_io, cmd, count, str); } -extern int __must_check xen_physdev_op_compat(int, void *); +extern int __must_check HYPERVISOR_physdev_op_compat(int, void *); static inline int HYPERVISOR_physdev_op(int cmd, void *arg) { int rc = _hypercall2(int, physdev_op, cmd, arg); if (unlikely(rc == -ENOSYS)) - rc = xen_physdev_op_compat(cmd, arg); + rc = HYPERVISOR_physdev_op_compat(cmd, arg); return rc; } diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index cbf5121..b994cc8 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -131,7 +131,7 @@ static int __init parse_lapic(char *arg) { if (config_enabled(CONFIG_X86_32) && !arg) force_enable_local_apic = 1; - else if (arg && !strncmp(arg, "notscdeadline", 13)) + else if (!strncmp(arg, "notscdeadline", 13)) setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); return 0; } diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index aaa6399..b739d39 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2428,8 +2428,7 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg) static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg) { /* If we are moving the irq we need to mask it */ - if (unlikely(irqd_is_setaffinity_pending(data) && - !irqd_irq_inprogress(data))) { + if (unlikely(irqd_is_setaffinity_pending(data))) { mask_ioapic(cfg); return true; } diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index a36d9cf..2861082 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -33,7 +33,6 @@ void common(void) { OFFSET(TI_status, thread_info, status); OFFSET(TI_addr_limit, thread_info, addr_limit); OFFSET(TI_preempt_count, thread_info, preempt_count); - OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count); BLANK(); OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 2636e0f..9c3ab43 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1103,9 +1103,7 @@ DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); */ static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, -#if DEBUG_STACK > 0 [DEBUG_STACK - 1] = DEBUG_STKSZ -#endif }; static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 332e133..80dbda8 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -42,7 +41,6 @@ #include #include #include -#include #include #include @@ -1261,7 +1259,7 @@ void mce_log_therm_throt_event(__u64 status) static unsigned long check_interval = 5 * 60; /* 5 minutes */ static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ -static DEFINE_PER_CPU(struct hrtimer, mce_timer); +static DEFINE_PER_CPU(struct timer_list, mce_timer); static unsigned long mce_adjust_timer_default(unsigned long interval) { @@ -1271,10 +1269,13 @@ static unsigned long mce_adjust_timer_default(unsigned long interval) static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; -static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer) +static void mce_timer_fn(unsigned long data) { + struct timer_list *t = &__get_cpu_var(mce_timer); unsigned long iv; + WARN_ON(smp_processor_id() != data); + if (mce_available(__this_cpu_ptr(&cpu_info))) { machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_poll_banks)); @@ -1295,10 +1296,9 @@ static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer) __this_cpu_write(mce_next_interval, iv); /* Might have become 0 after CMCI storm subsided */ if (iv) { - hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_usecs(iv))); - return HRTIMER_RESTART; + t->expires = jiffies + iv; + add_timer_on(t, smp_processor_id()); } - return HRTIMER_NORESTART; } /* @@ -1306,37 +1306,28 @@ static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer) */ void mce_timer_kick(unsigned long interval) { - struct hrtimer *t = &__get_cpu_var(mce_timer); + struct timer_list *t = &__get_cpu_var(mce_timer); + unsigned long when = jiffies + interval; unsigned long iv = __this_cpu_read(mce_next_interval); - if (hrtimer_active(t)) { - s64 exp; - s64 intv_us; - - intv_us = jiffies_to_usecs(interval); - exp = ktime_to_us(hrtimer_expires_remaining(t)); - if (intv_us < exp) { - hrtimer_cancel(t); - hrtimer_start_range_ns(t, - ns_to_ktime(intv_us * 1000), - 0, HRTIMER_MODE_REL_PINNED); - } + if (timer_pending(t)) { + if (time_before(when, t->expires)) + mod_timer_pinned(t, when); } else { - hrtimer_start_range_ns(t, - ns_to_ktime(jiffies_to_usecs(interval) * 1000), - 0, HRTIMER_MODE_REL_PINNED); + t->expires = round_jiffies(when); + add_timer_on(t, smp_processor_id()); } if (interval < iv) __this_cpu_write(mce_next_interval, interval); } -/* Must not be called in IRQ context where hrtimer_cancel() can deadlock */ +/* Must not be called in IRQ context where del_timer_sync() can deadlock */ static void mce_timer_delete_all(void) { int cpu; for_each_online_cpu(cpu) - hrtimer_cancel(&per_cpu(mce_timer, cpu)); + del_timer_sync(&per_cpu(mce_timer, cpu)); } static void mce_do_trigger(struct work_struct *work) @@ -1346,63 +1337,6 @@ static void mce_do_trigger(struct work_struct *work) static DECLARE_WORK(mce_trigger_work, mce_do_trigger); -static void __mce_notify_work(void) -{ - /* Not more than two messages every minute */ - static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); - - /* wake processes polling /dev/mcelog */ - wake_up_interruptible(&mce_chrdev_wait); - - /* - * There is no risk of missing notifications because - * work_pending is always cleared before the function is - * executed. - */ - if (mce_helper[0] && !work_pending(&mce_trigger_work)) - schedule_work(&mce_trigger_work); - - if (__ratelimit(&ratelimit)) - pr_info(HW_ERR "Machine check events logged\n"); -} - -#ifdef CONFIG_PREEMPT_RT_FULL -struct task_struct *mce_notify_helper; - -static int mce_notify_helper_thread(void *unused) -{ - while (1) { - set_current_state(TASK_INTERRUPTIBLE); - schedule(); - if (kthread_should_stop()) - break; - __mce_notify_work(); - } - return 0; -} - -static int mce_notify_work_init(void) -{ - mce_notify_helper = kthread_run(mce_notify_helper_thread, NULL, - "mce-notify"); - if (!mce_notify_helper) - return -ENOMEM; - - return 0; -} - -static void mce_notify_work(void) -{ - wake_up_process(mce_notify_helper); -} -#else -static void mce_notify_work(void) -{ - __mce_notify_work(); -} -static inline int mce_notify_work_init(void) { return 0; } -#endif - /* * Notify the user(s) about new machine check events. * Can be called from interrupt context, but not from machine check/NMI @@ -1410,8 +1344,24 @@ static inline int mce_notify_work_init(void) { return 0; } */ int mce_notify_irq(void) { + /* Not more than two messages every minute */ + static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); + if (test_and_clear_bit(0, &mce_need_notify)) { - mce_notify_work(); + /* wake processes polling /dev/mcelog */ + wake_up_interruptible(&mce_chrdev_wait); + + /* + * There is no risk of missing notifications because + * work_pending is always cleared before the function is + * executed. + */ + if (mce_helper[0] && !work_pending(&mce_trigger_work)) + schedule_work(&mce_trigger_work); + + if (__ratelimit(&ratelimit)) + pr_info(HW_ERR "Machine check events logged\n"); + return 1; } return 0; @@ -1682,7 +1632,7 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) } } -static void mce_start_timer(unsigned int cpu, struct hrtimer *t) +static void mce_start_timer(unsigned int cpu, struct timer_list *t) { unsigned long iv = mce_adjust_timer(check_interval * HZ); @@ -1691,17 +1641,16 @@ static void mce_start_timer(unsigned int cpu, struct hrtimer *t) if (mca_cfg.ignore_ce || !iv) return; - hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000), - 0, HRTIMER_MODE_REL_PINNED); + t->expires = round_jiffies(jiffies + iv); + add_timer_on(t, smp_processor_id()); } static void __mcheck_cpu_init_timer(void) { - struct hrtimer *t = &__get_cpu_var(mce_timer); + struct timer_list *t = &__get_cpu_var(mce_timer); unsigned int cpu = smp_processor_id(); - hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - t->function = mce_timer_fn; + setup_timer(t, mce_timer_fn, cpu); mce_start_timer(cpu, t); } @@ -2358,8 +2307,6 @@ static void __cpuinit mce_disable_cpu(void *h) if (!mce_available(__this_cpu_ptr(&cpu_info))) return; - hrtimer_cancel(&__get_cpu_var(mce_timer)); - if (!(action & CPU_TASKS_FROZEN)) cmci_clear(); for (i = 0; i < mca_cfg.banks; i++) { @@ -2386,7 +2333,6 @@ static void __cpuinit mce_reenable_cpu(void *h) if (b->init) wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); } - __mcheck_cpu_init_timer(); } /* Get notified when a cpu comes on/off. Be hotplug friendly. */ @@ -2394,6 +2340,7 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; + struct timer_list *t = &per_cpu(mce_timer, cpu); switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: @@ -2409,9 +2356,11 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) break; case CPU_DOWN_PREPARE: smp_call_function_single(cpu, mce_disable_cpu, &action, 1); + del_timer_sync(t); break; case CPU_DOWN_FAILED: smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); + mce_start_timer(cpu, t); break; } @@ -2473,8 +2422,6 @@ static __init int mcheck_init_device(void) /* register character device /dev/mcelog */ misc_register(&mce_chrdev_device); - err = mce_notify_work_init(); - return err; } device_initcall_sync(mcheck_init_device); diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 646d192..0a630dd 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -68,8 +68,7 @@ static void __init ms_hyperv_init_platform(void) printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n", ms_hyperv.features, ms_hyperv.hints); - if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) - clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); + clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); } const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index f50cca1..115c1ea 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -108,7 +108,6 @@ struct intel_shared_regs { struct er_account regs[EXTRA_REG_MAX]; int refcnt; /* per-core: #HT threads */ unsigned core_id; /* per-core: core id */ - struct rcu_head rcu; }; #define MAX_LBR_ENTRIES 16 diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 0632237..4914e94 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -128,14 +128,8 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly = }; static struct extra_reg intel_snb_extra_regs[] __read_mostly = { - INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), - INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), - EVENT_EXTRA_END -}; - -static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { - INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), - INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), + INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), + INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), EVENT_EXTRA_END }; @@ -1721,7 +1715,7 @@ static void intel_pmu_cpu_dying(int cpu) pc = cpuc->shared_regs; if (pc) { if (pc->core_id == -1 || --pc->refcnt == 0) - kfree_rcu(pc, rcu); + kfree(pc); cpuc->shared_regs = NULL; } @@ -2078,10 +2072,7 @@ __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_snb_event_constraints; x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; x86_pmu.pebs_aliases = intel_pebs_aliases_snb; - if (boot_cpu_data.x86_model == 45) - x86_pmu.extra_regs = intel_snbep_extra_regs; - else - x86_pmu.extra_regs = intel_snb_extra_regs; + x86_pmu.extra_regs = intel_snb_extra_regs; /* all extra regs are per-cpu when HT is on */ x86_pmu.er_flags |= ERF_HAS_RSP_1; x86_pmu.er_flags |= ERF_NO_HT_SHARING; @@ -2107,10 +2098,7 @@ __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_snb_event_constraints; x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; x86_pmu.pebs_aliases = intel_pebs_aliases_snb; - if (boot_cpu_data.x86_model == 62) - x86_pmu.extra_regs = intel_snbep_extra_regs; - else - x86_pmu.extra_regs = intel_snb_extra_regs; + x86_pmu.extra_regs = intel_snb_extra_regs; /* all extra regs are per-cpu when HT is on */ x86_pmu.er_flags |= ERF_HAS_RSP_1; x86_pmu.er_flags |= ERF_NO_HT_SHARING; diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index b05a575..826054a 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -729,13 +729,3 @@ void intel_ds_init(void) } } } - -void perf_restore_debug_store(void) -{ - struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); - - if (!x86_pmu.bts && !x86_pmu.pebs) - return; - - wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds); -} diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index d978353..da02e9c 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c @@ -310,7 +310,7 @@ void intel_pmu_lbr_read(void) * - in case there is no HW filter * - in case the HW filter has errata or limitations */ -static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event) +static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event) { u64 br_type = event->attr.branch_sample_type; int mask = 0; @@ -318,11 +318,8 @@ static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event) if (br_type & PERF_SAMPLE_BRANCH_USER) mask |= X86_BR_USER; - if (br_type & PERF_SAMPLE_BRANCH_KERNEL) { - if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) - return -EACCES; + if (br_type & PERF_SAMPLE_BRANCH_KERNEL) mask |= X86_BR_KERNEL; - } /* we ignore BRANCH_HV here */ @@ -342,8 +339,6 @@ static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event) * be used by fixup code for some CPU */ event->hw.branch_reg.reg = mask; - - return 0; } /* @@ -391,9 +386,7 @@ int intel_pmu_setup_lbr_filter(struct perf_event *event) /* * setup SW LBR filter */ - ret = intel_pmu_setup_sw_lbr_filter(event); - if (ret) - return ret; + intel_pmu_setup_sw_lbr_filter(event); /* * setup HW LBR filter, if any @@ -449,18 +442,8 @@ static int branch_type(unsigned long from, unsigned long to) return X86_BR_NONE; addr = buf; - } else { - /* - * The LBR logs any address in the IP, even if the IP just - * faulted. This means userspace can control the from address. - * Ensure we don't blindy read any address by validating it is - * a known text address. - */ - if (kernel_text_address(from)) - addr = (void *)from; - else - return X86_BR_NONE; - } + } else + addr = (void *)from; /* * decoder needs to know the ABI especially diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 682355e..b43200d 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -2428,7 +2428,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types) static int __init uncore_type_init(struct intel_uncore_type *type) { struct intel_uncore_pmu *pmus; - struct attribute_group *attr_group; + struct attribute_group *events_group; struct attribute **attrs; int i, j; @@ -2455,19 +2455,19 @@ static int __init uncore_type_init(struct intel_uncore_type *type) while (type->event_descs[i].attr.attr.name) i++; - attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) + - sizeof(*attr_group), GFP_KERNEL); - if (!attr_group) + events_group = kzalloc(sizeof(struct attribute *) * (i + 1) + + sizeof(*events_group), GFP_KERNEL); + if (!events_group) goto fail; - attrs = (struct attribute **)(attr_group + 1); - attr_group->name = "events"; - attr_group->attrs = attrs; + attrs = (struct attribute **)(events_group + 1); + events_group->name = "events"; + events_group->attrs = attrs; for (j = 0; j < i; j++) attrs[j] = &type->event_descs[j].attr.attr; - type->events_group = attr_group; + type->events_group = events_group; } type->pmu_group = &uncore_pmu_attr_group; @@ -2636,7 +2636,7 @@ static void __cpuinit uncore_cpu_dying(int cpu) box = *per_cpu_ptr(pmu->box, cpu); *per_cpu_ptr(pmu->box, cpu) = NULL; if (box && atomic_dec_and_test(&box->refcnt)) - kfree_rcu(box, rcu); + kfree(box); } } } @@ -2666,8 +2666,7 @@ static int __cpuinit uncore_cpu_starting(int cpu) if (exist && exist->phys_id == phys_id) { atomic_inc(&exist->refcnt); *per_cpu_ptr(pmu->box, cpu) = exist; - if (box) - kfree_rcu(box, rcu); + kfree(box); box = NULL; break; } @@ -2854,7 +2853,6 @@ static int __init uncore_cpu_init(void) msr_uncores = nhm_msr_uncores; break; case 42: /* Sandy Bridge */ - case 58: /* Ivy Bridge */ if (snb_uncore_cbox.num_boxes > max_cores) snb_uncore_cbox.num_boxes = max_cores; msr_uncores = snb_msr_uncores; diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index c4e1028..e68a455 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h @@ -421,7 +421,6 @@ struct intel_uncore_box { struct hrtimer hrtimer; struct list_head list; struct intel_uncore_extra_reg shared_regs[0]; - struct rcu_head rcu; }; #define UNCORE_BOX_FLAG_INITIATED 0 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index f16c07b..b653675 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -21,14 +21,10 @@ (N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2) static char x86_stack_ids[][8] = { -#if DEBUG_STACK > 0 [ DEBUG_STACK-1 ] = "#DB", -#endif [ NMI_STACK-1 ] = "NMI", [ DOUBLEFAULT_STACK-1 ] = "#DF", -#if STACKFAULT_STACK > 0 [ STACKFAULT_STACK-1 ] = "#SS", -#endif [ MCE_STACK-1 ] = "#MC", #if DEBUG_STKSZ > EXCEPTION_STKSZ [ N_EXCEPTION_STACKS ... diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index d15f575..9b9f18b 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -169,9 +169,25 @@ static struct console early_serial_console = { .index = -1, }; +/* Direct interface for emergencies */ +static struct console *early_console = &early_vga_console; +static int __initdata early_console_initialized; + +asmlinkage void early_printk(const char *fmt, ...) +{ + char buf[512]; + int n; + va_list ap; + + va_start(ap, fmt); + n = vscnprintf(buf, sizeof(buf), fmt, ap); + early_console->write(early_console, buf, n); + va_end(ap); +} + static inline void early_console_register(struct console *con, int keep_early) { - if (con->index != -1) { + if (early_console->index != -1) { printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n", con->name); return; @@ -191,8 +207,9 @@ static int __init setup_early_printk(char *buf) if (!buf) return 0; - if (early_console) + if (early_console_initialized) return 0; + early_console_initialized = 1; keep = (strstr(buf, "keep") != NULL); diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 218e79a..6ed91d9 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -364,22 +364,14 @@ ENTRY(resume_kernel) DISABLE_INTERRUPTS(CLBR_ANY) cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? jnz restore_all +need_resched: movl TI_flags(%ebp), %ecx # need_resched set ? testb $_TIF_NEED_RESCHED, %cl - jnz 1f - - cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ? - jnz restore_all - testl $_TIF_NEED_RESCHED_LAZY, %ecx jz restore_all - -1: testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ? + testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ? jz restore_all call preempt_schedule_irq - movl TI_flags(%ebp), %ecx # need_resched set ? - testl $_TIF_NEED_RESCHED_MASK, %ecx - jnz 1b - jmp restore_all + jmp need_resched END(resume_kernel) #endif CFI_ENDPROC @@ -615,7 +607,7 @@ ENDPROC(system_call) ALIGN RING0_PTREGS_FRAME # can't unwind into user space anyway work_pending: - testl $_TIF_NEED_RESCHED_MASK, %ecx + testb $_TIF_NEED_RESCHED, %cl jz work_notifysig work_resched: call schedule @@ -628,7 +620,7 @@ work_resched: andl $_TIF_WORK_MASK, %ecx # is there any work to be done other # than syscall tracing? jz restore_all - testl $_TIF_NEED_RESCHED_MASK, %ecx + testb $_TIF_NEED_RESCHED, %cl jnz work_resched work_notifysig: # deal with pending signals and diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 0b01d8d..cb3c591 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -673,8 +673,8 @@ sysret_check: /* Handle reschedules */ /* edx: work, edi: workmask */ sysret_careful: - testl $_TIF_NEED_RESCHED_MASK,%edx - jz sysret_signal + bt $TIF_NEED_RESCHED,%edx + jnc sysret_signal TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) pushq_cfi %rdi @@ -786,8 +786,8 @@ GLOBAL(int_with_check) /* First do a reschedule test. */ /* edx: work, edi: workmask */ int_careful: - testl $_TIF_NEED_RESCHED_MASK,%edx - jz int_very_careful + bt $TIF_NEED_RESCHED,%edx + jnc int_very_careful TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) pushq_cfi %rdi @@ -1094,8 +1094,8 @@ bad_iret: /* edi: workmask, edx: work */ retint_careful: CFI_RESTORE_STATE - testl $_TIF_NEED_RESCHED_MASK,%edx - jz retint_signal + bt $TIF_NEED_RESCHED,%edx + jnc retint_signal TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) pushq_cfi %rdi @@ -1128,15 +1128,9 @@ retint_signal: ENTRY(retint_kernel) cmpl $0,TI_preempt_count(%rcx) jnz retint_restore_args - bt $TIF_NEED_RESCHED,TI_flags(%rcx) - jc 1f - - cmpl $0,TI_preempt_lazy_count(%rcx) - jnz retint_restore_args - bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx) + bt $TIF_NEED_RESCHED,TI_flags(%rcx) jnc retint_restore_args - -1: bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ + bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ jnc retint_restore_args call preempt_schedule_irq jmp exit_intr @@ -1343,7 +1337,6 @@ bad_gs: jmp 2b .previous -#ifndef CONFIG_PREEMPT_RT_FULL /* Call softirq on interrupt stack. Interrupts are off. */ ENTRY(call_softirq) CFI_STARTPROC @@ -1363,7 +1356,6 @@ ENTRY(call_softirq) ret CFI_ENDPROC END(call_softirq) -#endif #ifdef CONFIG_XEN zeroentry xen_hypervisor_callback xen_do_hypervisor_callback @@ -1528,7 +1520,7 @@ paranoid_userspace: movq %rsp,%rdi /* &pt_regs */ call sync_regs movq %rax,%rsp /* switch stack for scheduling */ - testl $_TIF_NEED_RESCHED_MASK,%ebx + testl $_TIF_NEED_RESCHED,%ebx jnz paranoid_schedule movl %ebx,%edx /* arg3: thread flags */ TRACE_IRQS_ON diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c index 992f442..48d9d4e 100644 --- a/arch/x86/kernel/head.c +++ b/arch/x86/kernel/head.c @@ -5,6 +5,8 @@ #include #include +#define BIOS_LOWMEM_KILOBYTES 0x413 + /* * The BIOS places the EBDA/XBDA at the top of conventional * memory, and usually decreases the reported amount of @@ -14,30 +16,17 @@ * chipset: reserve a page before VGA to prevent PCI prefetch * into it (errata #56). Usually the page is reserved anyways, * unless you have no PS/2 mouse plugged in. - * - * This functions is deliberately very conservative. Losing - * memory in the bottom megabyte is rarely a problem, as long - * as we have enough memory to install the trampoline. Using - * memory that is in use by the BIOS or by some DMA device - * the BIOS didn't shut down *is* a big problem. */ - -#define BIOS_LOWMEM_KILOBYTES 0x413 -#define LOWMEM_CAP 0x9f000U /* Absolute maximum */ -#define INSANE_CUTOFF 0x20000U /* Less than this = insane */ - void __init reserve_ebda_region(void) { unsigned int lowmem, ebda_addr; - /* - * To determine the position of the EBDA and the - * end of conventional memory, we need to look at - * the BIOS data area. In a paravirtual environment - * that area is absent. We'll just have to assume - * that the paravirt case can handle memory setup - * correctly, without our help. - */ + /* To determine the position of the EBDA and the */ + /* end of conventional memory, we need to look at */ + /* the BIOS data area. In a paravirtual environment */ + /* that area is absent. We'll just have to assume */ + /* that the paravirt case can handle memory setup */ + /* correctly, without our help. */ if (paravirt_enabled()) return; @@ -48,23 +37,19 @@ void __init reserve_ebda_region(void) /* start of EBDA area */ ebda_addr = get_bios_ebda(); - /* - * Note: some old Dells seem to need 4k EBDA without - * reporting so, so just consider the memory above 0x9f000 - * to be off limits (bugzilla 2990). - */ - - /* If the EBDA address is below 128K, assume it is bogus */ - if (ebda_addr < INSANE_CUTOFF) - ebda_addr = LOWMEM_CAP; + /* Fixup: bios puts an EBDA in the top 64K segment */ + /* of conventional memory, but does not adjust lowmem. */ + if ((lowmem - ebda_addr) <= 0x10000) + lowmem = ebda_addr; - /* If lowmem is less than 128K, assume it is bogus */ - if (lowmem < INSANE_CUTOFF) - lowmem = LOWMEM_CAP; + /* Fixup: bios does not report an EBDA at all. */ + /* Some old Dells seem to need 4k anyhow (bugzilla 2990) */ + if ((ebda_addr == 0) && (lowmem >= 0x9f000)) + lowmem = 0x9f000; - /* Use the lower of the lowmem and EBDA markers as the cutoff */ - lowmem = min(lowmem, ebda_addr); - lowmem = min(lowmem, LOWMEM_CAP); /* Absolute cap */ + /* Paranoia: should never happen, but... */ + if ((lowmem == 0) || (lowmem >= 0x100000)) + lowmem = 0x9f000; /* reserve all memory between lowmem and the 1MB mark */ memblock_reserve(lowmem, 0x100000 - lowmem); diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 5ce3c25..e28670f 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include @@ -574,30 +573,6 @@ static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu) #define RESERVE_TIMERS 0 #endif -static int __init dmi_disable_hpet_msi(const struct dmi_system_id *d) -{ - hpet_msi_disable = 1; - return 0; -} - -static struct dmi_system_id __initdata dmi_hpet_table[] = { - /* - * MSI based per cpu timers lose interrupts when intel_idle() - * is enabled - independent of the c-state. With idle=poll the - * problem cannot be observed. We have no idea yet, whether - * this is a W510 specific issue or a general chipset oddity. - */ - { - .callback = dmi_disable_hpet_msi, - .ident = "Lenovo W510", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"), - }, - }, - {} -}; - static void hpet_msi_capability_lookup(unsigned int start_timer) { unsigned int id; @@ -605,8 +580,6 @@ static void hpet_msi_capability_lookup(unsigned int start_timer) unsigned int num_timers_used = 0; int i; - dmi_check_system(dmi_hpet_table); - if (hpet_msi_disable) return; diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 84b7789..e4595f1 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -165,6 +165,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu) u64 arch_irq_stat(void) { u64 sum = atomic_read(&irq_err_count); + +#ifdef CONFIG_X86_IO_APIC + sum += atomic_read(&irq_mis_count); +#endif return sum; } diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index f60ecc0..344faf8 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -149,7 +149,6 @@ void __cpuinit irq_ctx_init(int cpu) cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); } -#ifndef CONFIG_PREEMPT_RT_FULL asmlinkage void do_softirq(void) { unsigned long flags; @@ -180,7 +179,6 @@ asmlinkage void do_softirq(void) local_irq_restore(flags); } -#endif bool handle_irq(unsigned irq, struct pt_regs *regs) { diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 831f247..d04d3ec 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -88,7 +88,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) return true; } -#ifndef CONFIG_PREEMPT_RT_FULL + extern void call_softirq(void); asmlinkage void do_softirq(void) @@ -108,4 +108,3 @@ asmlinkage void do_softirq(void) } local_irq_restore(flags); } -#endif diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c index 129b8bb..ca8f703 100644 --- a/arch/x86/kernel/irq_work.c +++ b/arch/x86/kernel/irq_work.c @@ -18,7 +18,6 @@ void smp_irq_work_interrupt(struct pt_regs *regs) irq_exit(); } -#ifndef CONFIG_PREEMPT_RT_FULL void arch_irq_work_raise(void) { #ifdef CONFIG_X86_LOCAL_APIC @@ -29,4 +28,3 @@ void arch_irq_work_raise(void) apic_wait_icr_idle(); #endif } -#endif diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 5bedbdd..220a360 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -218,9 +218,6 @@ static void kvm_shutdown(void) void __init kvmclock_init(void) { unsigned long mem; - int size; - - size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS); if (!kvm_para_available()) return; @@ -234,14 +231,16 @@ void __init kvmclock_init(void) printk(KERN_INFO "kvm-clock: Using msrs %x and %x", msr_kvm_system_time, msr_kvm_wall_clock); - mem = memblock_alloc(size, PAGE_SIZE); + mem = memblock_alloc(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS, + PAGE_SIZE); if (!mem) return; hv_clock = __va(mem); if (kvm_register_clock("boot clock")) { hv_clock = NULL; - memblock_free(mem, size); + memblock_free(mem, + sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS); return; } pv_time_ops.sched_clock = kvm_clock_read; @@ -276,7 +275,7 @@ int __init kvm_setup_vsyscall_timeinfo(void) struct pvclock_vcpu_time_info *vcpu_time; unsigned int size; - size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS); + size = sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS; preempt_disable(); cpu = smp_processor_id(); diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 8bfb335..17fff18 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -263,18 +263,6 @@ void paravirt_leave_lazy_mmu(void) leave_lazy(PARAVIRT_LAZY_MMU); } -void paravirt_flush_lazy_mmu(void) -{ - preempt_disable(); - - if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { - arch_leave_lazy_mmu_mode(); - arch_enter_lazy_mmu_mode(); - } - - preempt_enable(); -} - void paravirt_start_context_switch(struct task_struct *prev) { BUG_ON(preemptible()); @@ -304,6 +292,18 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void) return this_cpu_read(paravirt_lazy_mode); } +void arch_flush_lazy_mmu_mode(void) +{ + preempt_disable(); + + if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { + arch_leave_lazy_mmu_mode(); + arch_enter_lazy_mmu_mode(); + } + + preempt_enable(); +} + struct pv_info pv_info = { .name = "bare hardware", .paravirt_enabled = 0, @@ -475,7 +475,6 @@ struct pv_mmu_ops pv_mmu_ops = { .lazy_mode = { .enter = paravirt_nop, .leave = paravirt_nop, - .flush = paravirt_nop, }, .set_fixmap = native_set_fixmap, diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 139ad27..b5a8905 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -36,7 +36,6 @@ #include #include #include -#include #include #include @@ -217,35 +216,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) } EXPORT_SYMBOL_GPL(start_thread); -#ifdef CONFIG_PREEMPT_RT_FULL -static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) -{ - int i; - - /* - * Clear @prev's kmap_atomic mappings - */ - for (i = 0; i < prev_p->kmap_idx; i++) { - int idx = i + KM_TYPE_NR * smp_processor_id(); - pte_t *ptep = kmap_pte - idx; - - kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx)); - } - /* - * Restore @next_p's kmap_atomic mappings - */ - for (i = 0; i < next_p->kmap_idx; i++) { - int idx = i + KM_TYPE_NR * smp_processor_id(); - - if (!pte_none(next_p->kmap_pte[i])) - set_pte(kmap_pte - idx, next_p->kmap_pte[i]); - } -} -#else -static inline void -switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } -#endif - /* * switch_to(x,y) should switch tasks from x to y. @@ -325,8 +295,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) __switch_to_xtra(prev_p, next_p, tss); - switch_kmaps(prev_p, next_p); - /* * Leave lazy mode, flushing any hypercalls made here. * This must be done before restoring TLS segments so diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index 2cb9470..85c3959 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c @@ -185,7 +185,7 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i, for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) { __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx, - __pa(i) + (idx*PAGE_SIZE), + __pa_symbol(i) + (idx*PAGE_SIZE), PAGE_KERNEL_VVAR); } diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 4a3a5dd..d6bf1f3 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -808,14 +808,6 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) mce_notify_process(); #endif /* CONFIG_X86_64 && CONFIG_X86_MCE */ -#ifdef ARCH_RT_DELAYS_SIGNAL_SEND - if (unlikely(current->forced_info.si_signo)) { - struct task_struct *t = current; - force_sig_info(t->forced_info.si_signo, &t->forced_info, t); - t->forced_info.si_signo = 0; - } -#endif - if (thread_info_flags & _TIF_UPROBE) uprobe_notify_resume(regs); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index be18ff6..ecffca1 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -85,21 +85,9 @@ static inline void conditional_sti(struct pt_regs *regs) local_irq_enable(); } -static inline void conditional_sti_ist(struct pt_regs *regs) +static inline void preempt_conditional_sti(struct pt_regs *regs) { -#ifdef CONFIG_X86_64 - /* - * X86_64 uses a per CPU stack on the IST for certain traps - * like int3. The task can not be preempted when using one - * of these stacks, thus preemption must be disabled, otherwise - * the stack can be corrupted if the task is scheduled out, - * and another task comes in and uses this stack. - * - * On x86_32 the task keeps its own stack and it is OK if the - * task schedules out. - */ inc_preempt_count(); -#endif if (regs->flags & X86_EFLAGS_IF) local_irq_enable(); } @@ -110,13 +98,11 @@ static inline void conditional_cli(struct pt_regs *regs) local_irq_disable(); } -static inline void conditional_cli_ist(struct pt_regs *regs) +static inline void preempt_conditional_cli(struct pt_regs *regs) { if (regs->flags & X86_EFLAGS_IF) local_irq_disable(); -#ifdef CONFIG_X86_64 dec_preempt_count(); -#endif } static int __kprobes @@ -243,9 +229,9 @@ dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) exception_enter(regs); if (notify_die(DIE_TRAP, "stack segment", regs, error_code, X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) { - conditional_sti_ist(regs); + preempt_conditional_sti(regs); do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); - conditional_cli_ist(regs); + preempt_conditional_cli(regs); } exception_exit(regs); } @@ -345,9 +331,9 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co * as we may switch to the interrupt stack. */ debug_stack_usage_inc(); - conditional_sti_ist(regs); + preempt_conditional_sti(regs); do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); - conditional_cli_ist(regs); + preempt_conditional_cli(regs); debug_stack_usage_dec(); exit: exception_exit(regs); @@ -452,12 +438,12 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) debug_stack_usage_inc(); /* It's safe to allow irq's after DR6 has been saved */ - conditional_sti_ist(regs); + preempt_conditional_sti(regs); if (regs->flags & X86_VM_MASK) { handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, X86_TRAP_DB); - conditional_cli_ist(regs); + preempt_conditional_cli(regs); debug_stack_usage_dec(); goto exit; } @@ -477,7 +463,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) si_code = get_si_code(tsk->thread.debugreg6); if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) send_sigtrap(tsk, regs, error_code, si_code); - conditional_cli_ist(regs); + preempt_conditional_cli(regs); debug_stack_usage_dec(); exit: diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index d330b3c..a27e763 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -4030,10 +4030,6 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, break; case OpMem8: ctxt->memop.bytes = 1; - if (ctxt->memop.type == OP_REG) { - ctxt->memop.addr.reg = decode_register(ctxt, ctxt->modrm_rm, 1); - fetch_register_operand(&ctxt->memop); - } goto mem_common; case OpMem16: ctxt->memop.bytes = 2; diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index a2f492c..9392f52 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1781,7 +1781,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) if (!pv_eoi_enabled(vcpu)) return 0; return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, - addr, sizeof(u8)); + addr); } void kvm_lapic_init(void) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index fb485ba..c243b81 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1408,9 +1408,10 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) unsigned long flags, this_tsc_khz; struct kvm_vcpu_arch *vcpu = &v->arch; struct kvm_arch *ka = &v->kvm->arch; + void *shared_kaddr; s64 kernel_ns, max_kernel_ns; u64 tsc_timestamp, host_tsc; - struct pvclock_vcpu_time_info guest_hv_clock; + struct pvclock_vcpu_time_info *guest_hv_clock; u8 pvclock_flags; bool use_master_clock; @@ -1464,7 +1465,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) local_irq_restore(flags); - if (!vcpu->pv_time_enabled) + if (!vcpu->time_page) return 0; /* @@ -1526,12 +1527,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) */ vcpu->hv_clock.version += 2; - if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, - &guest_hv_clock, sizeof(guest_hv_clock)))) - return 0; + shared_kaddr = kmap_atomic(vcpu->time_page); + + guest_hv_clock = shared_kaddr + vcpu->time_offset; /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ - pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); + pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); if (vcpu->pvclock_set_guest_stopped_request) { pvclock_flags |= PVCLOCK_GUEST_STOPPED; @@ -1544,9 +1545,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) vcpu->hv_clock.flags = pvclock_flags; - kvm_write_guest_cached(v->kvm, &vcpu->pv_time, - &vcpu->hv_clock, - sizeof(vcpu->hv_clock)); + memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, + sizeof(vcpu->hv_clock)); + + kunmap_atomic(shared_kaddr); + + mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); return 0; } @@ -1825,8 +1829,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) return 0; } - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, - sizeof(u32))) + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) return 1; vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); @@ -1836,7 +1839,10 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) static void kvmclock_reset(struct kvm_vcpu *vcpu) { - vcpu->arch.pv_time_enabled = false; + if (vcpu->arch.time_page) { + kvm_release_page_dirty(vcpu->arch.time_page); + vcpu->arch.time_page = NULL; + } } static void accumulate_steal_time(struct kvm_vcpu *vcpu) @@ -1942,7 +1948,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_KVM_SYSTEM_TIME_NEW: case MSR_KVM_SYSTEM_TIME: { - u64 gpa_offset; kvmclock_reset(vcpu); vcpu->arch.time = data; @@ -1952,14 +1957,14 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (!(data & 1)) break; - gpa_offset = data & ~(PAGE_MASK | 1); + /* ...but clean it before doing the actual write */ + vcpu->arch.time_offset = data & ~(PAGE_MASK | 1); - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, - &vcpu->arch.pv_time, data & ~1ULL, - sizeof(struct pvclock_vcpu_time_info))) - vcpu->arch.pv_time_enabled = false; - else - vcpu->arch.pv_time_enabled = true; + vcpu->arch.time_page = + gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); + + if (is_error_page(vcpu->arch.time_page)) + vcpu->arch.time_page = NULL; break; } @@ -1976,8 +1981,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, - data & KVM_STEAL_VALID_BITS, - sizeof(struct kvm_steal_time))) + data & KVM_STEAL_VALID_BITS)) return 1; vcpu->arch.st.msr_val = data; @@ -2963,7 +2967,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, */ static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) { - if (!vcpu->arch.pv_time_enabled) + if (!vcpu->arch.time_page) return -EINVAL; vcpu->arch.pvclock_set_guest_stopped_request = true; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); @@ -5238,13 +5242,6 @@ int kvm_arch_init(void *opaque) goto out; } -#ifdef CONFIG_PREEMPT_RT_FULL - if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { - printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n"); - return -EOPNOTSUPP; - } -#endif - r = kvm_mmu_module_init(); if (r) goto out_free_percpu; @@ -6664,7 +6661,6 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) goto fail_free_wbinvd_dirty_mask; vcpu->arch.ia32_tsc_adjust_msr = 0x0; - vcpu->arch.pv_time_enabled = false; kvm_async_pf_hash_reset(vcpu); kvm_pmu_init(vcpu); diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 20a4fd4..df4176c 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -1333,7 +1333,6 @@ __init void lguest_init(void) pv_mmu_ops.read_cr3 = lguest_read_cr3; pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu; pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode; - pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu; pv_mmu_ops.pte_update = lguest_pte_update; pv_mmu_ops.pte_update_defer = lguest_pte_update; diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 906fea3..05928aa 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -74,10 +74,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest) char c; unsigned zero_len; - for (; len; --len, to++) { + for (; len; --len) { if (__get_user_nocheck(c, from++, sizeof(char))) break; - if (__put_user_nocheck(c, to, sizeof(char))) + if (__put_user_nocheck(c, to++, sizeof(char))) break; } diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9cc2653..fb674fd 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -378,12 +378,10 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) if (pgd_none(*pgd_ref)) return -1; - if (pgd_none(*pgd)) { + if (pgd_none(*pgd)) set_pgd(pgd, *pgd_ref); - arch_flush_lazy_mmu_mode(); - } else { + else BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); - } /* * Below here mismatches are bugs because these lower tables @@ -1110,7 +1108,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code) * If we're in an interrupt, have no user context or are running * in an atomic region then we must not take the fault: */ - if (unlikely(!mm || pagefault_disabled())) { + if (unlikely(in_atomic() || !mm)) { bad_area_nosemaphore(regs, error_code, address); return; } diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 01f7c99..6f31ee5 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -31,7 +31,6 @@ EXPORT_SYMBOL(kunmap); */ void *kmap_atomic_prot(struct page *page, pgprot_t prot) { - pte_t pte = mk_pte(page, prot); unsigned long vaddr; int idx, type; @@ -45,10 +44,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot) idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); BUG_ON(!pte_none(*(kmap_pte-idx))); -#ifdef CONFIG_PREEMPT_RT_FULL - current->kmap_pte[type] = pte; -#endif - set_pte(kmap_pte-idx, pte); + set_pte(kmap_pte-idx, mk_pte(page, prot)); arch_flush_lazy_mmu_mode(); return (void *)vaddr; @@ -91,9 +87,6 @@ void __kunmap_atomic(void *kvaddr) * is a bad idea also, in case the page changes cacheability * attributes or becomes a protected page in a hypervisor. */ -#ifdef CONFIG_PREEMPT_RT_FULL - current->kmap_pte[type] = __pte(0); -#endif kpte_clear_flush(kmap_pte-idx, vaddr); kmap_atomic_idx_pop(); arch_flush_lazy_mmu_mode(); diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 7d7a36d..d7aea41 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -45,15 +45,11 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range) int i; unsigned long puds = 0, pmds = 0, ptes = 0, tables; unsigned long start = 0, good_end; - unsigned long pgd_extra = 0; phys_addr_t base; for (i = 0; i < nr_range; i++) { unsigned long range, extra; - if ((mr[i].end >> PGDIR_SHIFT) - (mr[i].start >> PGDIR_SHIFT)) - pgd_extra++; - range = mr[i].end - mr[i].start; puds += (range + PUD_SIZE - 1) >> PUD_SHIFT; @@ -78,7 +74,6 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range) tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); - tables += (pgd_extra * PAGE_SIZE); #ifdef CONFIG_X86_32 /* for fixmap */ diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index 62377d6..7b179b4 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -56,7 +56,6 @@ EXPORT_SYMBOL_GPL(iomap_free); void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) { - pte_t pte = pfn_pte(pfn, prot); unsigned long vaddr; int idx, type; @@ -65,12 +64,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - WARN_ON(!pte_none(*(kmap_pte - idx))); - -#ifdef CONFIG_PREEMPT_RT_FULL - current->kmap_pte[type] = pte; -#endif - set_pte(kmap_pte - idx, pte); + set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); arch_flush_lazy_mmu_mode(); return (void *)vaddr; @@ -116,9 +110,6 @@ iounmap_atomic(void __iomem *kvaddr) * is a bad idea also, in case the page changes cacheability * attributes or becomes a protected page in a hypervisor. */ -#ifdef CONFIG_PREEMPT_RT_FULL - current->kmap_pte[type] = __pte(0); -#endif kpte_clear_flush(kmap_pte-idx, vaddr); kmap_atomic_idx_pop(); } diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 8504f36..2d125be 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -193,6 +193,7 @@ int __init numa_add_memblk(int nid, u64 start, u64 end) static void __init setup_node_data(int nid, u64 start, u64 end) { const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); + bool remapped = false; u64 nd_pa; void *nd; int tnid; @@ -204,28 +205,37 @@ static void __init setup_node_data(int nid, u64 start, u64 end) if (end && (end - start) < NODE_MIN_SIZE) return; + /* initialize remap allocator before aligning to ZONE_ALIGN */ + init_alloc_remap(nid, start, end); + start = roundup(start, ZONE_ALIGN); printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid, start, end - 1); /* - * Allocate node data. Try node-local memory and then any node. - * Never allocate in DMA zone. + * Allocate node data. Try remap allocator first, node-local + * memory and then any node. Never allocate in DMA zone. */ - nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid); - if (!nd_pa) { - pr_err("Cannot find %zu bytes in node %d\n", - nd_size, nid); - return; + nd = alloc_remap(nid, nd_size); + if (nd) { + nd_pa = __pa(nd); + remapped = true; + } else { + nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid); + if (!nd_pa) { + pr_err("Cannot find %zu bytes in node %d\n", + nd_size, nid); + return; + } + nd = __va(nd_pa); } - nd = __va(nd_pa); /* report and initialize */ - printk(KERN_INFO " NODE_DATA [mem %#010Lx-%#010Lx]\n", - nd_pa, nd_pa + nd_size - 1); + printk(KERN_INFO " NODE_DATA [mem %#010Lx-%#010Lx]%s\n", + nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : ""); tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); - if (tnid != nid) + if (!remapped && tnid != nid) printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid); node_data[nid] = nd; diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 73a6d73..534255a 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -73,6 +73,167 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn, extern unsigned long highend_pfn, highstart_pfn; +#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) + +static void *node_remap_start_vaddr[MAX_NUMNODES]; +void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); + +/* + * Remap memory allocator + */ +static unsigned long node_remap_start_pfn[MAX_NUMNODES]; +static void *node_remap_end_vaddr[MAX_NUMNODES]; +static void *node_remap_alloc_vaddr[MAX_NUMNODES]; + +/** + * alloc_remap - Allocate remapped memory + * @nid: NUMA node to allocate memory from + * @size: The size of allocation + * + * Allocate @size bytes from the remap area of NUMA node @nid. The + * size of the remap area is predetermined by init_alloc_remap() and + * only the callers considered there should call this function. For + * more info, please read the comment on top of init_alloc_remap(). + * + * The caller must be ready to handle allocation failure from this + * function and fall back to regular memory allocator in such cases. + * + * CONTEXT: + * Single CPU early boot context. + * + * RETURNS: + * Pointer to the allocated memory on success, %NULL on failure. + */ +void *alloc_remap(int nid, unsigned long size) +{ + void *allocation = node_remap_alloc_vaddr[nid]; + + size = ALIGN(size, L1_CACHE_BYTES); + + if (!allocation || (allocation + size) > node_remap_end_vaddr[nid]) + return NULL; + + node_remap_alloc_vaddr[nid] += size; + memset(allocation, 0, size); + + return allocation; +} + +#ifdef CONFIG_HIBERNATION +/** + * resume_map_numa_kva - add KVA mapping to the temporary page tables created + * during resume from hibernation + * @pgd_base - temporary resume page directory + */ +void resume_map_numa_kva(pgd_t *pgd_base) +{ + int node; + + for_each_online_node(node) { + unsigned long start_va, start_pfn, nr_pages, pfn; + + start_va = (unsigned long)node_remap_start_vaddr[node]; + start_pfn = node_remap_start_pfn[node]; + nr_pages = (node_remap_end_vaddr[node] - + node_remap_start_vaddr[node]) >> PAGE_SHIFT; + + printk(KERN_DEBUG "%s: node %d\n", __func__, node); + + for (pfn = 0; pfn < nr_pages; pfn += PTRS_PER_PTE) { + unsigned long vaddr = start_va + (pfn << PAGE_SHIFT); + pgd_t *pgd = pgd_base + pgd_index(vaddr); + pud_t *pud = pud_offset(pgd, vaddr); + pmd_t *pmd = pmd_offset(pud, vaddr); + + set_pmd(pmd, pfn_pmd(start_pfn + pfn, + PAGE_KERNEL_LARGE_EXEC)); + + printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n", + __func__, vaddr, start_pfn + pfn); + } + } +} +#endif + +/** + * init_alloc_remap - Initialize remap allocator for a NUMA node + * @nid: NUMA node to initizlie remap allocator for + * + * NUMA nodes may end up without any lowmem. As allocating pgdat and + * memmap on a different node with lowmem is inefficient, a special + * remap allocator is implemented which can be used by alloc_remap(). + * + * For each node, the amount of memory which will be necessary for + * pgdat and memmap is calculated and two memory areas of the size are + * allocated - one in the node and the other in lowmem; then, the area + * in the node is remapped to the lowmem area. + * + * As pgdat and memmap must be allocated in lowmem anyway, this + * doesn't waste lowmem address space; however, the actual lowmem + * which gets remapped over is wasted. The amount shouldn't be + * problematic on machines this feature will be used. + * + * Initialization failure isn't fatal. alloc_remap() is used + * opportunistically and the callers will fall back to other memory + * allocation mechanisms on failure. + */ +void __init init_alloc_remap(int nid, u64 start, u64 end) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long end_pfn = end >> PAGE_SHIFT; + unsigned long size, pfn; + u64 node_pa, remap_pa; + void *remap_va; + + /* + * The acpi/srat node info can show hot-add memroy zones where + * memory could be added but not currently present. + */ + printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n", + nid, start_pfn, end_pfn); + + /* calculate the necessary space aligned to large page size */ + size = node_memmap_size_bytes(nid, start_pfn, end_pfn); + size += ALIGN(sizeof(pg_data_t), PAGE_SIZE); + size = ALIGN(size, LARGE_PAGE_BYTES); + + /* allocate node memory and the lowmem remap area */ + node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES); + if (!node_pa) { + pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n", + size, nid); + return; + } + memblock_reserve(node_pa, size); + + remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT, + max_low_pfn << PAGE_SHIFT, + size, LARGE_PAGE_BYTES); + if (!remap_pa) { + pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n", + size, nid); + memblock_free(node_pa, size); + return; + } + memblock_reserve(remap_pa, size); + remap_va = phys_to_virt(remap_pa); + + /* perform actual remap */ + for (pfn = 0; pfn < size >> PAGE_SHIFT; pfn += PTRS_PER_PTE) + set_pmd_pfn((unsigned long)remap_va + (pfn << PAGE_SHIFT), + (node_pa >> PAGE_SHIFT) + pfn, + PAGE_KERNEL_LARGE); + + /* initialize remap allocator parameters */ + node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT; + node_remap_start_vaddr[nid] = remap_va; + node_remap_end_vaddr[nid] = remap_va + size; + node_remap_alloc_vaddr[nid] = remap_va; + + printk(KERN_DEBUG "remap_alloc: node %d [%08llx-%08llx) -> [%p-%p)\n", + nid, node_pa, node_pa + size, remap_va, remap_va + size); +} + void __init initmem_init(void) { x86_numa_init(); diff --git a/arch/x86/mm/numa_internal.h b/arch/x86/mm/numa_internal.h index ad86ec9..7178c3a 100644 --- a/arch/x86/mm/numa_internal.h +++ b/arch/x86/mm/numa_internal.h @@ -21,6 +21,12 @@ void __init numa_reset_distance(void); void __init x86_numa_init(void); +#ifdef CONFIG_X86_64 +static inline void init_alloc_remap(int nid, u64 start, u64 end) { } +#else +void __init init_alloc_remap(int nid, u64 start, u64 end); +#endif + #ifdef CONFIG_NUMA_EMU void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt); diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 395b3b4a..e27fbf8 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -58,13 +58,6 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) { paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); - /* - * NOTE! For PAE, any changes to the top page-directory-pointer-table - * entries need a full cr3 reload to flush. - */ -#ifdef CONFIG_X86_PAE - tlb->need_flush_all = 1; -#endif tlb_remove_page(tlb, virt_to_page(pmd)); } diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 94e7662..56ab749 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -162,9 +162,6 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) struct msi_desc *msidesc; int *v; - if (type == PCI_CAP_ID_MSI && nvec > 1) - return 1; - v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL); if (!v) return -ENOMEM; @@ -223,9 +220,6 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) struct msi_desc *msidesc; struct msi_msg msg; - if (type == PCI_CAP_ID_MSI && nvec > 1) - return 1; - list_for_each_entry(msidesc, &dev->msi_list, list) { __read_msi_msg(msidesc, &msg); pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | @@ -269,9 +263,6 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) int ret = 0; struct msi_desc *msidesc; - if (type == PCI_CAP_ID_MSI && nvec > 1) - return 1; - list_for_each_entry(msidesc, &dev->msi_list, list) { struct physdev_map_pirq map_irq; domid_t domid; diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index e2cd38f..928bf83 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -85,10 +85,9 @@ int efi_enabled(int facility) } EXPORT_SYMBOL(efi_enabled); -static bool disable_runtime = false; static int __init setup_noefi(char *arg) { - disable_runtime = true; + clear_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility); return 0; } early_param("noefi", setup_noefi); @@ -735,7 +734,7 @@ void __init efi_init(void) if (!efi_is_native()) pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n"); else { - if (disable_runtime || efi_runtime_init()) + if (efi_runtime_init()) return; set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility); } diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 3c68768..120cee1 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include @@ -229,7 +228,6 @@ static void __restore_processor_state(struct saved_context *ctxt) do_fpu_end(); x86_platform.restore_sched_clock_state(); mtrr_bp_restore(); - perf_restore_debug_store(); } /* Needed by apm.c */ diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c index 7d28c88..74202c1 100644 --- a/arch/x86/power/hibernate_32.c +++ b/arch/x86/power/hibernate_32.c @@ -129,6 +129,8 @@ static int resume_physical_mapping_init(pgd_t *pgd_base) } } + resume_map_numa_kva(pgd_base); + return 0; } diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 08c6511..e014092 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -67,7 +67,6 @@ #include #include #include -#include #ifdef CONFIG_ACPI #include @@ -1418,14 +1417,7 @@ asmlinkage void __init xen_start_kernel(void) */ acpi_numa = -1; #endif -#ifdef CONFIG_X86_PAT - /* - * For right now disable the PAT. We should remove this once - * git commit 8eaffa67b43e99ae581622c5133e20b0f48bcef1 - * (xen/pat: Disable PAT support for now) is reverted. - */ - pat_enabled = 0; -#endif + /* Don't do the full vcpu_info placement stuff until we have a possible map and a non-dummy shared_info. */ per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; @@ -1589,11 +1581,8 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, switch (action) { case CPU_UP_PREPARE: xen_vcpu_setup(cpu); - if (xen_have_vector_callback) { + if (xen_have_vector_callback) xen_init_lock_cpu(cpu); - if (xen_feature(XENFEAT_hvm_safe_pvclock)) - xen_setup_timer(cpu); - } break; default: break; diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index cab96b6..01de35c 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -2190,7 +2190,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .lazy_mode = { .enter = paravirt_enter_lazy_mmu, .leave = xen_leave_lazy_mmu, - .flush = paravirt_flush_lazy_mmu, }, .set_fixmap = xen_set_fixmap, diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 48d7b2c..34bc4ce 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -658,8 +658,6 @@ static void xen_hvm_cpu_die(unsigned int cpu) unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); - xen_uninit_lock_cpu(cpu); - xen_teardown_timer(cpu); native_cpu_die(cpu); } diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index f7a080e..83e866d 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -328,6 +328,7 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) if (per_cpu(lock_spinners, cpu) == xl) { ADD_STATS(released_slow_kicked, 1); xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); + break; } } } diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 054cc01..0296a95 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -497,11 +497,7 @@ static void xen_hvm_setup_cpu_clockevents(void) { int cpu = smp_processor_id(); xen_setup_runstate_info(cpu); - /* - * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence - * doing it xen_hvm_cpu_notify (which gets called by smp_init during - * early bootup and also during CPU hotplug events). - */ + xen_setup_timer(cpu); xen_setup_cpu_clockevents(); } diff --git a/arch/xtensa/include/asm/signal.h b/arch/xtensa/include/asm/signal.h index 83e23f4..6f586bd 100644 --- a/arch/xtensa/include/asm/signal.h +++ b/arch/xtensa/include/asm/signal.h @@ -21,7 +21,6 @@ struct sigaction { void (*sa_restorer)(void); sigset_t sa_mask; /* mask last for extensibility */ }; -#define __ARCH_HAS_SA_RESTORER struct k_sigaction { struct sigaction sa; diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index d57c257..4b7bc8d 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs) /* If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (!mm || pagefault_disabled()) { + if (in_atomic() || !mm) { bad_page_fault(regs, address, SIGSEGV); return; } diff --git a/block/blk-core.c b/block/blk-core.c index 372ddb3..c973249 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -241,7 +241,7 @@ EXPORT_SYMBOL(blk_delay_queue); **/ void blk_start_queue(struct request_queue *q) { - WARN_ON_NONRT(!irqs_disabled()); + WARN_ON(!irqs_disabled()); queue_flag_clear(QUEUE_FLAG_STOPPED, q); __blk_run_queue(q); @@ -2929,7 +2929,7 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth, blk_run_queue_async(q); else __blk_run_queue(q); - spin_unlock_irq(q->queue_lock); + spin_unlock(q->queue_lock); } static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) @@ -2977,6 +2977,7 @@ EXPORT_SYMBOL(blk_check_plugged); void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) { struct request_queue *q; + unsigned long flags; struct request *rq; LIST_HEAD(list); unsigned int depth; @@ -2997,6 +2998,11 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) q = NULL; depth = 0; + /* + * Save and disable interrupts here, to avoid doing it for every + * queue lock we have to take. + */ + local_irq_save(flags); while (!list_empty(&list)) { rq = list_entry_rq(list.next); list_del_init(&rq->queuelist); @@ -3009,7 +3015,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) queue_unplugged(q, depth, from_schedule); q = rq->q; depth = 0; - spin_lock_irq(q->queue_lock); + spin_lock(q->queue_lock); } /* @@ -3036,6 +3042,8 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) */ if (q) queue_unplugged(q, depth, from_schedule); + + local_irq_restore(flags); } void blk_finish_plug(struct blk_plug *plug) diff --git a/block/blk-ioc.c b/block/blk-ioc.c index fb21ad5..fab4cdd 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -8,7 +8,6 @@ #include #include /* for max_pfn/max_low_pfn */ #include -#include #include "blk.h" @@ -111,7 +110,7 @@ static void ioc_release_fn(struct work_struct *work) spin_unlock(q->queue_lock); } else { spin_unlock_irqrestore(&ioc->lock, flags); - cpu_chill(); + cpu_relax(); spin_lock_irqsave_nested(&ioc->lock, flags, 1); } } @@ -189,7 +188,7 @@ retry: spin_unlock(icq->q->queue_lock); } else { spin_unlock_irqrestore(&ioc->lock, flags); - cpu_chill(); + cpu_relax(); goto retry; } } diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c index f7ca9b4..58916af 100644 --- a/block/blk-iopoll.c +++ b/block/blk-iopoll.c @@ -38,7 +38,6 @@ void blk_iopoll_sched(struct blk_iopoll *iop) list_add_tail(&iop->list, &__get_cpu_var(blk_cpu_iopoll)); __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); local_irq_restore(flags); - preempt_check_resched_rt(); } EXPORT_SYMBOL(blk_iopoll_sched); @@ -136,7 +135,6 @@ static void blk_iopoll_softirq(struct softirq_action *h) __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); local_irq_enable(); - preempt_check_resched_rt(); } /** @@ -206,7 +204,6 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self, &__get_cpu_var(blk_cpu_iopoll)); __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); local_irq_enable(); - preempt_check_resched_rt(); } return NOTIFY_OK; diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 3fe2368..467c8de 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -51,7 +51,6 @@ static void trigger_softirq(void *data) raise_softirq_irqoff(BLOCK_SOFTIRQ); local_irq_restore(flags); - preempt_check_resched_rt(); } /* @@ -94,7 +93,6 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self, &__get_cpu_var(blk_cpu_done)); raise_softirq_irqoff(BLOCK_SOFTIRQ); local_irq_enable(); - preempt_check_resched_rt(); } return NOTIFY_OK; @@ -152,7 +150,6 @@ do_local: goto do_local; local_irq_restore(flags); - preempt_check_resched_rt(); } /** diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 6d2f7c0..7881477 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -229,8 +229,6 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \ unsigned long val; \ ssize_t ret; \ ret = queue_var_store(&val, page, count); \ - if (ret < 0) \ - return ret; \ if (neg) \ val = !val; \ \ diff --git a/block/genhd.c b/block/genhd.c index 7dcfdd8..3993ebf 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -25,7 +25,7 @@ static DEFINE_MUTEX(block_class_lock); struct kobject *block_depr; /* for extended dynamic devt allocation, currently only one major is used */ -#define NR_EXT_DEVT (1 << MINORBITS) +#define MAX_EXT_DEVT (1 << MINORBITS) /* For extended devt allocation. ext_devt_mutex prevents look up * results from going away underneath its user. @@ -422,18 +422,17 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt) do { if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL)) return -ENOMEM; - mutex_lock(&ext_devt_mutex); rc = idr_get_new(&ext_devt_idr, part, &idx); - if (!rc && idx >= NR_EXT_DEVT) { - idr_remove(&ext_devt_idr, idx); - rc = -EBUSY; - } - mutex_unlock(&ext_devt_mutex); } while (rc == -EAGAIN); if (rc) return rc; + if (idx > MAX_EXT_DEVT) { + idr_remove(&ext_devt_idr, idx); + return -EBUSY; + } + *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx)); return 0; } @@ -647,6 +646,7 @@ void del_gendisk(struct gendisk *disk) disk_part_iter_exit(&piter); invalidate_partition(disk, 0); + blk_free_devt(disk_to_dev(disk)->devt); set_capacity(disk, 0); disk->flags &= ~GENHD_FL_UP; @@ -664,7 +664,6 @@ void del_gendisk(struct gendisk *disk) if (!sysfs_deprecated) sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); device_del(disk_to_dev(disk)); - blk_free_devt(disk_to_dev(disk)->devt); } EXPORT_SYMBOL(del_gendisk); diff --git a/block/partition-generic.c b/block/partition-generic.c index 1cb4dec..f1d1451 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c @@ -249,11 +249,11 @@ void delete_partition(struct gendisk *disk, int partno) if (!part) return; + blk_free_devt(part_devt(part)); rcu_assign_pointer(ptbl->part[partno], NULL); rcu_assign_pointer(ptbl->last_lookup, NULL); kobject_put(part->holder_dir); device_del(part_to_dev(part)); - blk_free_devt(part_devt(part)); hd_struct_put(part); } diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index 7d4a8d2..533de95 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -388,9 +388,9 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_blkcipher rblkcipher; - strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type)); - strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "", - sizeof(rblkcipher.geniv)); + snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "ablkcipher"); + snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s", + alg->cra_ablkcipher.geniv ?: ""); rblkcipher.blocksize = alg->cra_blocksize; rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize; @@ -469,9 +469,9 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_blkcipher rblkcipher; - strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type)); - strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "", - sizeof(rblkcipher.geniv)); + snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "givcipher"); + snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s", + alg->cra_ablkcipher.geniv ?: ""); rblkcipher.blocksize = alg->cra_blocksize; rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize; diff --git a/crypto/aead.c b/crypto/aead.c index 27bc487..0b8121e 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -117,8 +117,9 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg) struct crypto_report_aead raead; struct aead_alg *aead = &alg->cra_aead; - strncpy(raead.type, "aead", sizeof(raead.type)); - strncpy(raead.geniv, aead->geniv ?: "", sizeof(raead.geniv)); + snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "aead"); + snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s", + aead->geniv ?: ""); raead.blocksize = alg->cra_blocksize; raead.maxauthsize = aead->maxauthsize; @@ -202,8 +203,8 @@ static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg) struct crypto_report_aead raead; struct aead_alg *aead = &alg->cra_aead; - strncpy(raead.type, "nivaead", sizeof(raead.type)); - strncpy(raead.geniv, aead->geniv, sizeof(raead.geniv)); + snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "nivaead"); + snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s", aead->geniv); raead.blocksize = alg->cra_blocksize; raead.maxauthsize = aead->maxauthsize; diff --git a/crypto/ahash.c b/crypto/ahash.c index 793a27f..3887856 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -404,7 +404,7 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_hash rhash; - strncpy(rhash.type, "ahash", sizeof(rhash.type)); + snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "ahash"); rhash.blocksize = alg->cra_blocksize; rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize; diff --git a/crypto/algapi.c b/crypto/algapi.c index 3574066..c3b9bfe 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -683,13 +683,13 @@ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2); int crypto_register_notifier(struct notifier_block *nb) { - return srcu_notifier_chain_register(&crypto_chain, nb); + return blocking_notifier_chain_register(&crypto_chain, nb); } EXPORT_SYMBOL_GPL(crypto_register_notifier); int crypto_unregister_notifier(struct notifier_block *nb) { - return srcu_notifier_chain_unregister(&crypto_chain, nb); + return blocking_notifier_chain_unregister(&crypto_chain, nb); } EXPORT_SYMBOL_GPL(crypto_unregister_notifier); diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 0262210..ef5356c 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -161,8 +161,6 @@ static int hash_recvmsg(struct kiocb *unused, struct socket *sock, else if (len < ds) msg->msg_flags |= MSG_TRUNC; - msg->msg_namelen = 0; - lock_sock(sk); if (ctx->more) { ctx->more = 0; diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index a1c4f0a..6a6dfc0 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -432,7 +432,6 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock, long copied = 0; lock_sock(sk); - msg->msg_namelen = 0; for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0; iovlen--, iov++) { unsigned long seglen = iov->iov_len; diff --git a/crypto/api.c b/crypto/api.c index 8ff072c..033a714 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(crypto_alg_list); DECLARE_RWSEM(crypto_alg_sem); EXPORT_SYMBOL_GPL(crypto_alg_sem); -SRCU_NOTIFIER_HEAD(crypto_chain); +BLOCKING_NOTIFIER_HEAD(crypto_chain); EXPORT_SYMBOL_GPL(crypto_chain); static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) @@ -237,10 +237,10 @@ int crypto_probing_notify(unsigned long val, void *v) { int ok; - ok = srcu_notifier_call_chain(&crypto_chain, val, v); + ok = blocking_notifier_call_chain(&crypto_chain, val, v); if (ok == NOTIFY_DONE) { request_module("cryptomgr"); - ok = srcu_notifier_call_chain(&crypto_chain, val, v); + ok = blocking_notifier_call_chain(&crypto_chain, val, v); } return ok; diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index c44e014..a8d85a1 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -499,9 +499,9 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_blkcipher rblkcipher; - strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type)); - strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "", - sizeof(rblkcipher.geniv)); + snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "blkcipher"); + snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s", + alg->cra_blkcipher.geniv ?: ""); rblkcipher.blocksize = alg->cra_blocksize; rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize; diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index f6d9baf..35d700a 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -75,7 +75,7 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_cipher rcipher; - strncpy(rcipher.type, "cipher", sizeof(rcipher.type)); + snprintf(rcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "cipher"); rcipher.blocksize = alg->cra_blocksize; rcipher.min_keysize = alg->cra_cipher.cia_min_keysize; @@ -94,7 +94,8 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_comp rcomp; - strncpy(rcomp.type, "compression", sizeof(rcomp.type)); + snprintf(rcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "compression"); + if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(struct crypto_report_comp), &rcomp)) goto nla_put_failure; @@ -107,14 +108,12 @@ nla_put_failure: static int crypto_report_one(struct crypto_alg *alg, struct crypto_user_alg *ualg, struct sk_buff *skb) { - strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); - strncpy(ualg->cru_driver_name, alg->cra_driver_name, - sizeof(ualg->cru_driver_name)); - strncpy(ualg->cru_module_name, module_name(alg->cra_module), - sizeof(ualg->cru_module_name)); - - ualg->cru_type = 0; - ualg->cru_mask = 0; + memcpy(&ualg->cru_name, &alg->cra_name, sizeof(ualg->cru_name)); + memcpy(&ualg->cru_driver_name, &alg->cra_driver_name, + sizeof(ualg->cru_driver_name)); + memcpy(&ualg->cru_module_name, module_name(alg->cra_module), + CRYPTO_MAX_ALG_NAME); + ualg->cru_flags = alg->cra_flags; ualg->cru_refcnt = atomic_read(&alg->cra_refcnt); @@ -123,7 +122,8 @@ static int crypto_report_one(struct crypto_alg *alg, if (alg->cra_flags & CRYPTO_ALG_LARVAL) { struct crypto_report_larval rl; - strncpy(rl.type, "larval", sizeof(rl.type)); + snprintf(rl.type, CRYPTO_MAX_ALG_NAME, "%s", "larval"); + if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, sizeof(struct crypto_report_larval), &rl)) goto nla_put_failure; diff --git a/crypto/gcm.c b/crypto/gcm.c index b97b186..1a25263 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -44,7 +44,6 @@ struct crypto_rfc4543_ctx { struct crypto_rfc4543_req_ctx { u8 auth_tag[16]; - u8 assocbuf[32]; struct scatterlist cipher[1]; struct scatterlist payload[2]; struct scatterlist assoc[2]; @@ -1143,19 +1142,9 @@ static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req, scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2); assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); - if (req->assoc->length == req->assoclen) { - sg_init_table(assoc, 2); - sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, - req->assoc->offset); - } else { - BUG_ON(req->assoclen > sizeof(rctx->assocbuf)); - - scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0, - req->assoclen, 0); - - sg_init_table(assoc, 2); - sg_set_buf(assoc, rctx->assocbuf, req->assoclen); - } + sg_init_table(assoc, 2); + sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, + req->assoc->offset); scatterwalk_crypto_chain(assoc, payload, 0, 2); aead_request_set_tfm(subreq, ctx->child); diff --git a/crypto/internal.h b/crypto/internal.h index 8cbe3dc..9ebedae 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -48,7 +48,7 @@ struct crypto_larval { extern struct list_head crypto_alg_list; extern struct rw_semaphore crypto_alg_sem; -extern struct srcu_notifier_head crypto_chain; +extern struct blocking_notifier_head crypto_chain; #ifdef CONFIG_PROC_FS void __init crypto_init_proc(void); @@ -136,7 +136,7 @@ static inline int crypto_is_moribund(struct crypto_alg *alg) static inline void crypto_notify(unsigned long val, void *v) { - srcu_notifier_call_chain(&crypto_chain, val, v); + blocking_notifier_call_chain(&crypto_chain, val, v); } #endif /* _CRYPTO_INTERNAL_H */ diff --git a/crypto/pcompress.c b/crypto/pcompress.c index 7140fe7..04e083f 100644 --- a/crypto/pcompress.c +++ b/crypto/pcompress.c @@ -53,7 +53,8 @@ static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_comp rpcomp; - strncpy(rpcomp.type, "pcomp", sizeof(rpcomp.type)); + snprintf(rpcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "pcomp"); + if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(struct crypto_report_comp), &rpcomp)) goto nla_put_failure; diff --git a/crypto/rng.c b/crypto/rng.c index e0a25c2..f3b7894 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -65,7 +65,7 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_rng rrng; - strncpy(rrng.type, "rng", sizeof(rrng.type)); + snprintf(rrng.type, CRYPTO_MAX_ALG_NAME, "%s", "rng"); rrng.seedsize = alg->cra_rng.seedsize; diff --git a/crypto/shash.c b/crypto/shash.c index 929058a..f426330f 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -530,8 +530,7 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg) struct crypto_report_hash rhash; struct shash_alg *salg = __crypto_shash_alg(alg); - strncpy(rhash.type, "shash", sizeof(rhash.type)); - + snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "shash"); rhash.blocksize = alg->cra_blocksize; rhash.digestsize = salg->digestsize; diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index f5ae996..38c5078 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -268,8 +268,7 @@ config ACPI_CUSTOM_DSDT default ACPI_CUSTOM_DSDT_FILE != "" config ACPI_INITRD_TABLE_OVERRIDE - bool "ACPI tables override via initrd" - depends on BLK_DEV_INITRD && X86 + bool "ACPI tables can be passed via uncompressed cpio in initrd" default n help This option provides functionality to override arbitrary ACPI tables diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index e9e486f..35da181 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -95,31 +95,40 @@ static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle) return ret; } -static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used, - void *addr_p, void **ret_p) +/* Get device's handler per its address under its parent */ +struct acpi_find_child { + acpi_handle handle; + u64 address; +}; + +static acpi_status +do_acpi_find_child(acpi_handle handle, u32 lvl, void *context, void **rv) { - unsigned long long addr; acpi_status status; - - status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr); - if (ACPI_SUCCESS(status) && addr == *((u64 *)addr_p)) { - *ret_p = handle; - return AE_CTRL_TERMINATE; + struct acpi_device_info *info; + struct acpi_find_child *find = context; + + status = acpi_get_object_info(handle, &info); + if (ACPI_SUCCESS(status)) { + if ((info->address == find->address) + && (info->valid & ACPI_VALID_ADR)) + find->handle = handle; + kfree(info); } return AE_OK; } acpi_handle acpi_get_child(acpi_handle parent, u64 address) { - void *ret = NULL; + struct acpi_find_child find = { NULL, address }; if (!parent) return NULL; - - acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, NULL, - do_acpi_find_child, &address, &ret); - return (acpi_handle)ret; + acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, + 1, do_acpi_find_child, NULL, &find, NULL); + return find.handle; } + EXPORT_SYMBOL(acpi_get_child); static int acpi_bind_one(struct device *dev, acpi_handle handle) diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 2999966..bd22f86 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -642,7 +642,7 @@ void __init acpi_initrd_override(void *data, size_t size) * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area) * works fine. */ - memblock_reserve(acpi_tables_addr, all_tables_size); + memblock_reserve(acpi_tables_addr, acpi_tables_addr + all_tables_size); arch_reserve_mem_area(acpi_tables_addr, all_tables_size); p = early_ioremap(acpi_tables_addr, all_tables_size); diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 77c9a92..7928d4d 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -240,8 +240,8 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, *control &= OSC_PCI_CONTROL_MASKS; capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set; } else { - /* Run _OSC query only with existing controls. */ - capbuf[OSC_CONTROL_TYPE] = root->osc_control_set; + /* Run _OSC query for all possible controls. */ + capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS; } status = acpi_pci_run_osc(root->device->handle, capbuf, &result); @@ -454,6 +454,7 @@ static int acpi_pci_root_add(struct acpi_device *device) acpi_handle handle; struct acpi_device *child; u32 flags, base_flags; + bool is_osc_granted = false; root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); if (!root) @@ -524,6 +525,60 @@ static int acpi_pci_root_add(struct acpi_device *device) flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT; acpi_pci_osc_support(root, flags); + /* Indicate support for various _OSC capabilities. */ + if (pci_ext_cfg_avail()) + flags |= OSC_EXT_PCI_CONFIG_SUPPORT; + if (pcie_aspm_support_enabled()) { + flags |= OSC_ACTIVE_STATE_PWR_SUPPORT | + OSC_CLOCK_PWR_CAPABILITY_SUPPORT; + } + if (pci_msi_enabled()) + flags |= OSC_MSI_SUPPORT; + if (flags != base_flags) { + status = acpi_pci_osc_support(root, flags); + if (ACPI_FAILURE(status)) { + dev_info(&device->dev, "ACPI _OSC support " + "notification failed, disabling PCIe ASPM\n"); + pcie_no_aspm(); + flags = base_flags; + } + } + if (!pcie_ports_disabled + && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) { + flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL + | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL + | OSC_PCI_EXPRESS_PME_CONTROL; + + if (pci_aer_available()) { + if (aer_acpi_firmware_first()) + dev_dbg(&device->dev, + "PCIe errors handled by BIOS.\n"); + else + flags |= OSC_PCI_EXPRESS_AER_CONTROL; + } + + dev_info(&device->dev, + "Requesting ACPI _OSC control (0x%02x)\n", flags); + + status = acpi_pci_osc_control_set(device->handle, &flags, + OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); + if (ACPI_SUCCESS(status)) { + is_osc_granted = true; + dev_info(&device->dev, + "ACPI _OSC control (0x%02x) granted\n", flags); + } else { + is_osc_granted = false; + dev_info(&device->dev, + "ACPI _OSC request failed (%s), " + "returned control mask: 0x%02x\n", + acpi_format_exception(status), flags); + } + } else { + dev_info(&device->dev, + "Unable to request _OSC control " + "(_OSC support mask: 0x%02x)\n", flags); + } + /* * TBD: Need PCI interface for enumeration/configuration of roots. */ @@ -563,66 +618,14 @@ static int acpi_pci_root_add(struct acpi_device *device) list_for_each_entry(child, &device->children, node) acpi_pci_bridge_scan(child); - /* Indicate support for various _OSC capabilities. */ - if (pci_ext_cfg_avail()) - flags |= OSC_EXT_PCI_CONFIG_SUPPORT; - if (pcie_aspm_support_enabled()) - flags |= OSC_ACTIVE_STATE_PWR_SUPPORT | - OSC_CLOCK_PWR_CAPABILITY_SUPPORT; - if (pci_msi_enabled()) - flags |= OSC_MSI_SUPPORT; - if (flags != base_flags) { - status = acpi_pci_osc_support(root, flags); - if (ACPI_FAILURE(status)) { - dev_info(root->bus->bridge, "ACPI _OSC support " - "notification failed, disabling PCIe ASPM\n"); - pcie_no_aspm(); - flags = base_flags; - } - } - - if (!pcie_ports_disabled - && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) { - flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL - | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL - | OSC_PCI_EXPRESS_PME_CONTROL; - - if (pci_aer_available()) { - if (aer_acpi_firmware_first()) - dev_dbg(root->bus->bridge, - "PCIe errors handled by BIOS.\n"); - else - flags |= OSC_PCI_EXPRESS_AER_CONTROL; - } - - dev_info(root->bus->bridge, - "Requesting ACPI _OSC control (0x%02x)\n", flags); - - status = acpi_pci_osc_control_set(device->handle, &flags, - OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); - if (ACPI_SUCCESS(status)) { - dev_info(root->bus->bridge, - "ACPI _OSC control (0x%02x) granted\n", flags); - if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { - /* - * We have ASPM control, but the FADT indicates - * that it's unsupported. Clear it. - */ - pcie_clear_aspm(root->bus); - } - } else { - dev_info(root->bus->bridge, - "ACPI _OSC request failed (%s), " - "returned control mask: 0x%02x\n", - acpi_format_exception(status), flags); - pr_info("ACPI _OSC control for PCIe not granted, " - "disabling ASPM\n"); - pcie_no_aspm(); - } + /* ASPM setting */ + if (is_osc_granted) { + if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) + pcie_clear_aspm(root->bus); } else { - dev_info(root->bus->bridge, - "Unable to request _OSC control " - "(_OSC support mask: 0x%02x)\n", flags); + pr_info("ACPI _OSC control for PCIe not granted, " + "disabling ASPM\n"); + pcie_no_aspm(); } pci_acpi_add_bus_pm_notifier(device, root->bus); diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index df85051..2fcc67d 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -177,14 +177,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { }, { .callback = init_nvs_nosave, - .ident = "Sony Vaio VGN-FW41E_H", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"), - }, - }, - { - .callback = init_nvs_nosave, .ident = "Sony Vaio VGN-FW21E", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 25246e8..506fbd4 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -719,19 +719,9 @@ static int thermal_get_trend(struct thermal_zone_device *thermal, return -EINVAL; if (type == THERMAL_TRIP_ACTIVE) { - unsigned long trip_temp; - unsigned long temp = KELVIN_TO_MILLICELSIUS(tz->temperature, - tz->kelvin_offset); - if (thermal_get_trip_temp(thermal, trip, &trip_temp)) - return -EINVAL; - - if (temp > trip_temp) { - *trend = THERMAL_TREND_RAISING; - return 0; - } else { - /* Fall back on default trend */ - return -EINVAL; - } + /* aggressive active cooling */ + *trend = THERMAL_TREND_RAISING; + return 0; } /* diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 72e3e12..4979127 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -265,30 +265,6 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */ { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */ { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */ - { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */ - { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */ - { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */ - { PCI_VDEVICE(INTEL, 0x1f25), board_ahci }, /* Avoton RAID */ - { PCI_VDEVICE(INTEL, 0x1f26), board_ahci }, /* Avoton RAID */ - { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */ - { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */ - { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */ - { PCI_VDEVICE(INTEL, 0x1f32), board_ahci }, /* Avoton AHCI */ - { PCI_VDEVICE(INTEL, 0x1f33), board_ahci }, /* Avoton AHCI */ - { PCI_VDEVICE(INTEL, 0x1f34), board_ahci }, /* Avoton RAID */ - { PCI_VDEVICE(INTEL, 0x1f35), board_ahci }, /* Avoton RAID */ - { PCI_VDEVICE(INTEL, 0x1f36), board_ahci }, /* Avoton RAID */ - { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */ - { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */ - { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */ - { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */ - { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */ - { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */ - { PCI_VDEVICE(INTEL, 0x8d0e), board_ahci }, /* Wellsburg RAID */ - { PCI_VDEVICE(INTEL, 0x8d62), board_ahci }, /* Wellsburg AHCI */ - { PCI_VDEVICE(INTEL, 0x8d64), board_ahci }, /* Wellsburg RAID */ - { PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */ - { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 78283bb..174eca6 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -150,7 +150,6 @@ enum piix_controller_ids { tolapai_sata, piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */ ich8_sata_snb, - ich8_2port_sata_snb, }; struct piix_map_db { @@ -305,7 +304,7 @@ static const struct pci_device_id piix_pci_tbl[] = { /* SATA Controller IDE (Lynx Point) */ { 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, /* SATA Controller IDE (Lynx Point) */ - { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb }, + { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (Lynx Point) */ { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (Lynx Point-LP) */ @@ -318,23 +317,6 @@ static const struct pci_device_id piix_pci_tbl[] = { { 0x8086, 0x9c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (DH89xxCC) */ { 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, - /* SATA Controller IDE (Avoton) */ - { 0x8086, 0x1f20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, - /* SATA Controller IDE (Avoton) */ - { 0x8086, 0x1f21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, - /* SATA Controller IDE (Avoton) */ - { 0x8086, 0x1f30, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, - /* SATA Controller IDE (Avoton) */ - { 0x8086, 0x1f31, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, - /* SATA Controller IDE (Wellsburg) */ - { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, - /* SATA Controller IDE (Wellsburg) */ - { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, - /* SATA Controller IDE (Wellsburg) */ - { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, - /* SATA Controller IDE (Wellsburg) */ - { 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, - { } /* terminate list */ }; @@ -440,7 +422,6 @@ static const struct piix_map_db *piix_map_db_table[] = { [ich8m_apple_sata] = &ich8m_apple_map_db, [tolapai_sata] = &tolapai_map_db, [ich8_sata_snb] = &ich8_map_db, - [ich8_2port_sata_snb] = &ich8_2port_map_db, }; static struct pci_bits piix_enable_bits[] = { @@ -1244,16 +1225,6 @@ static struct ata_port_info piix_port_info[] = { .udma_mask = ATA_UDMA6, .port_ops = &piix_sata_ops, }, - - [ich8_2port_sata_snb] = - { - .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR - | PIIX_FLAG_PIO16, - .pio_mask = ATA_PIO4, - .mwdma_mask = ATA_MWDMA2, - .udma_mask = ATA_UDMA6, - .port_ops = &piix_sata_ops, - }, }; #define AHCI_PCI_BAR 5 diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index cc8aa9e..ef01ac0 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -60,8 +60,7 @@ acpi_handle ata_ap_acpi_handle(struct ata_port *ap) if (ap->flags & ATA_FLAG_ACPI_SATA) return NULL; - return ap->scsi_host ? - DEVICE_ACPI_HANDLE(&ap->scsi_host->shost_gendev) : NULL; + return acpi_get_child(DEVICE_ACPI_HANDLE(ap->host->dev), ap->port_no); } EXPORT_SYMBOL(ata_ap_acpi_handle); @@ -240,15 +239,28 @@ void ata_acpi_dissociate(struct ata_host *host) } } -static int __ata_acpi_gtm(struct ata_port *ap, acpi_handle handle, - struct ata_acpi_gtm *gtm) +/** + * ata_acpi_gtm - execute _GTM + * @ap: target ATA port + * @gtm: out parameter for _GTM result + * + * Evaluate _GTM and store the result in @gtm. + * + * LOCKING: + * EH context. + * + * RETURNS: + * 0 on success, -ENOENT if _GTM doesn't exist, -errno on failure. + */ +int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *gtm) { struct acpi_buffer output = { .length = ACPI_ALLOCATE_BUFFER }; union acpi_object *out_obj; acpi_status status; int rc = 0; - status = acpi_evaluate_object(handle, "_GTM", NULL, &output); + status = acpi_evaluate_object(ata_ap_acpi_handle(ap), "_GTM", NULL, + &output); rc = -ENOENT; if (status == AE_NOT_FOUND) @@ -282,27 +294,6 @@ static int __ata_acpi_gtm(struct ata_port *ap, acpi_handle handle, return rc; } -/** - * ata_acpi_gtm - execute _GTM - * @ap: target ATA port - * @gtm: out parameter for _GTM result - * - * Evaluate _GTM and store the result in @gtm. - * - * LOCKING: - * EH context. - * - * RETURNS: - * 0 on success, -ENOENT if _GTM doesn't exist, -errno on failure. - */ -int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *gtm) -{ - if (ata_ap_acpi_handle(ap)) - return __ata_acpi_gtm(ap, ata_ap_acpi_handle(ap), gtm); - else - return -EINVAL; -} - EXPORT_SYMBOL_GPL(ata_acpi_gtm); /** @@ -1104,7 +1095,7 @@ static int ata_acpi_bind_host(struct ata_port *ap, acpi_handle *handle) if (!*handle) return -ENODEV; - if (__ata_acpi_gtm(ap, *handle, &ap->__acpi_init_gtm) == 0) + if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0) ap->pflags |= ATA_PFLAG_INIT_GTM_VALID; return 0; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 501c209..46cd3f4 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2329,7 +2329,7 @@ int ata_dev_configure(struct ata_device *dev) * from SATA Settings page of Identify Device Data Log. */ if (ata_id_has_devslp(dev->id)) { - u8 *sata_setting = ap->sector_buf; + u8 sata_setting[ATA_SECT_SIZE]; int i, j; dev->flags |= ATA_DFLAG_DEVSLP; @@ -2437,9 +2437,6 @@ int ata_dev_configure(struct ata_device *dev) dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, dev->max_sectors); - if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) - dev->max_sectors = ATA_MAX_SECTORS_LBA48; - if (ap->ops->dev_config) ap->ops->dev_config(dev); @@ -4101,7 +4098,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* Weird ATAPI devices */ { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, - { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, /* Devices we expect to fail diagnostics */ diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index ad3130d..d8af325 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf, unsigned long flags; unsigned int consumed; - local_irq_save_nort(flags); + local_irq_save(flags); consumed = ata_sff_data_xfer32(dev, buf, buflen, rw); - local_irq_restore_nort(flags); + local_irq_restore(flags); return consumed; } @@ -719,7 +719,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) unsigned long flags; /* FIXME: use a bounce buffer */ - local_irq_save_nort(flags); + local_irq_save(flags); buf = kmap_atomic(page); /* do the actual data transfer */ @@ -727,7 +727,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) do_write); kunmap_atomic(buf); - local_irq_restore_nort(flags); + local_irq_restore(flags); } else { buf = page_address(page); ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, @@ -864,7 +864,7 @@ next_sg: unsigned long flags; /* FIXME: use bounce buffer */ - local_irq_save_nort(flags); + local_irq_save(flags); buf = kmap_atomic(page); /* do the actual data transfer */ @@ -872,7 +872,7 @@ next_sg: count, rw); kunmap_atomic(buf); - local_irq_restore_nort(flags); + local_irq_restore(flags); } else { buf = page_address(page); consumed = ap->ops->sff_data_xfer(dev, buf + offset, diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index b1a664a..5dba77c 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c @@ -251,7 +251,7 @@ static const struct ata_port_info ahci_highbank_port_info = { }; static struct scsi_host_template ahci_highbank_platform_sht = { - AHCI_SHT("sata_highbank"), + AHCI_SHT("highbank-ahci"), }; static const struct of_device_id ahci_of_match[] = { diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 6856303..24eb078 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -290,7 +290,7 @@ int bus_for_each_dev(struct bus_type *bus, struct device *start, struct device *dev; int error = 0; - if (!bus || !bus->p) + if (!bus) return -EINVAL; klist_iter_init_node(&bus->p->klist_devices, &i, @@ -324,7 +324,7 @@ struct device *bus_find_device(struct bus_type *bus, struct klist_iter i; struct device *dev; - if (!bus || !bus->p) + if (!bus) return NULL; klist_iter_init_node(&bus->p->klist_devices, &i, diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 61d3e1b..e3bbed8 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -172,8 +172,6 @@ static int deferred_probe_initcall(void) driver_deferred_probe_enable = true; driver_deferred_probe_trigger(); - /* Sort as many dependencies as possible before exiting initcalls */ - flush_workqueue(deferred_wq); return 0; } late_initcall(deferred_probe_initcall); diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index 79f4fca..e6732cf 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -398,7 +398,7 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min, base = 0; if (max < rbnode->base_reg + rbnode->blklen) - end = max - rbnode->base_reg + 1; + end = rbnode->base_reg + rbnode->blklen - max; else end = rbnode->blklen; diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index ab3a020..f00b059 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -662,12 +662,12 @@ struct regmap *regmap_init(struct device *dev, } } - regmap_debugfs_init(map, config->name); - ret = regcache_init(map, config); if (ret != 0) goto err_range; + regmap_debugfs_init(map, config->name); + /* Add a devres resource for dev_get_regmap() */ m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); if (!m) { diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 92b6d7c..25ef5c0 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -51,9 +51,8 @@ new_skb(ulong len) { struct sk_buff *skb; - skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC); + skb = alloc_skb(len, GFP_ATOMIC); if (skb) { - skb_reserve(skb, MAX_HEADER); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb->protocol = __constant_htons(ETH_P_AOE); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f74f2c0..ae12512 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -917,11 +917,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, lo->lo_flags |= LO_FLAGS_PARTSCAN; if (lo->lo_flags & LO_FLAGS_PARTSCAN) ioctl_by_bdev(bdev, BLKRRPART, 0); - - /* Grab the block_device to prevent its destruction after we - * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev). - */ - bdgrab(bdev); return 0; out_clr: @@ -1031,10 +1026,8 @@ static int loop_clr_fd(struct loop_device *lo) memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); memset(lo->lo_file_name, 0, LO_NAME_SIZE); - if (bdev) { - bdput(bdev); + if (bdev) invalidate_bdev(bdev); - } set_capacity(lo->lo_disk, 0); loop_sysfs_exit(lo); if (bdev) { @@ -1292,9 +1285,11 @@ static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev) /* the width of sector_t may be narrow for bit-shift */ sz = sec; sz <<= 9; + mutex_lock(&bdev->bd_mutex); bd_set_size(bdev, sz); /* let user-space know about the new size */ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); + mutex_unlock(&bdev->bd_mutex); out: return err; @@ -1863,15 +1858,11 @@ static int __init loop_init(void) max_part = (1UL << part_shift) - 1; } - if ((1UL << part_shift) > DISK_MAX_PARTS) { - err = -EINVAL; - goto misc_out; - } + if ((1UL << part_shift) > DISK_MAX_PARTS) + return -EINVAL; - if (max_loop > 1UL << (MINORBITS - part_shift)) { - err = -EINVAL; - goto misc_out; - } + if (max_loop > 1UL << (MINORBITS - part_shift)) + return -EINVAL; /* * If max_loop is specified, create that many devices upfront. @@ -1889,10 +1880,8 @@ static int __init loop_init(void) range = 1UL << MINORBITS; } - if (register_blkdev(LOOP_MAJOR, "loop")) { - err = -EIO; - goto misc_out; - } + if (register_blkdev(LOOP_MAJOR, "loop")) + return -EIO; blk_register_region(MKDEV(LOOP_MAJOR, 0), range, THIS_MODULE, loop_probe, NULL, NULL); @@ -1905,10 +1894,6 @@ static int __init loop_init(void) printk(KERN_INFO "loop: module loaded\n"); return 0; - -misc_out: - misc_deregister(&loop_misc); - return err; } static int loop_exit_cb(int id, void *ptr, void *data) diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index eb591fb..043ddcc 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -595,20 +595,12 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, struct request sreq; dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); - if (!nbd->sock) - return -EINVAL; - mutex_unlock(&nbd->tx_lock); - fsync_bdev(bdev); - mutex_lock(&nbd->tx_lock); blk_rq_init(NULL, &sreq); sreq.cmd_type = REQ_TYPE_SPECIAL; nbd_cmd(&sreq) = NBD_CMD_DISC; - - /* Check again after getting mutex back. */ if (!nbd->sock) return -EINVAL; - nbd_send_req(nbd, &sreq); return 0; } @@ -622,7 +614,6 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, nbd_clear_que(nbd); BUG_ON(!list_empty(&nbd->queue_head)); BUG_ON(!list_empty(&nbd->waiting_queue)); - kill_bdev(bdev); if (file) fput(file); return 0; @@ -711,7 +702,6 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, nbd->file = NULL; nbd_clear_que(nbd); dev_warn(disk_to_dev(nbd->disk), "queue cleared\n"); - kill_bdev(bdev); queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); if (file) fput(file); diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index ef6d9be..5ac841f 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -46,7 +46,6 @@ #include #include #include -#include #include "common.h" /* @@ -240,7 +239,6 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num) ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); BUG_ON(ret); - free_xenballooned_pages(segs_to_unmap, pages); segs_to_unmap = 0; } @@ -529,8 +527,8 @@ static int xen_blkbk_map(struct blkif_request *req, GFP_KERNEL); if (!persistent_gnt) return -ENOMEM; - if (alloc_xenballooned_pages(1, &persistent_gnt->page, - false)) { + persistent_gnt->page = alloc_page(GFP_KERNEL); + if (!persistent_gnt->page) { kfree(persistent_gnt); return -ENOMEM; } @@ -679,16 +677,6 @@ static int dispatch_discard_io(struct xen_blkif *blkif, return err; } -static int dispatch_other_io(struct xen_blkif *blkif, - struct blkif_request *req, - struct pending_req *pending_req) -{ - free_req(pending_req); - make_response(blkif, req->u.other.id, req->operation, - BLKIF_RSP_EOPNOTSUPP); - return -EIO; -} - static void xen_blk_drain_io(struct xen_blkif *blkif) { atomic_set(&blkif->drain, 1); @@ -810,30 +798,17 @@ __do_block_io_op(struct xen_blkif *blkif) /* Apply all sanity checks to /private copy/ of request. */ barrier(); - - switch (req.operation) { - case BLKIF_OP_READ: - case BLKIF_OP_WRITE: - case BLKIF_OP_WRITE_BARRIER: - case BLKIF_OP_FLUSH_DISKCACHE: - if (dispatch_rw_block_io(blkif, &req, pending_req)) - goto done; - break; - case BLKIF_OP_DISCARD: + if (unlikely(req.operation == BLKIF_OP_DISCARD)) { free_req(pending_req); if (dispatch_discard_io(blkif, &req)) - goto done; + break; + } else if (dispatch_rw_block_io(blkif, &req, pending_req)) break; - default: - if (dispatch_other_io(blkif, &req, pending_req)) - goto done; - break; - } /* Yield point for this unbounded loop. */ cond_resched(); } -done: + return more_to_do; } @@ -1001,7 +976,13 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, bio->bi_end_io = end_block_io_op; } + /* + * We set it one so that the last submit_bio does not have to call + * atomic_inc. + */ atomic_set(&pending_req->pendcnt, nbio); + + /* Get a reference count for the disk queue and start sending I/O */ blk_start_plug(&plug); for (i = 0; i < nbio; i++) @@ -1029,7 +1010,6 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, fail_put_bio: for (i = 0; i < nbio; i++) bio_put(biolist[i]); - atomic_set(&pending_req->pendcnt, 1); __end_block_io_op(pending_req, -EINVAL); msleep(1); /* back off a bit */ return -EIO; diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 195278a..6072390 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -77,18 +77,11 @@ struct blkif_x86_32_request_discard { uint64_t nr_sectors; } __attribute__((__packed__)); -struct blkif_x86_32_request_other { - uint8_t _pad1; - blkif_vdev_t _pad2; - uint64_t id; /* private guest value, echoed in resp */ -} __attribute__((__packed__)); - struct blkif_x86_32_request { uint8_t operation; /* BLKIF_OP_??? */ union { struct blkif_x86_32_request_rw rw; struct blkif_x86_32_request_discard discard; - struct blkif_x86_32_request_other other; } u; } __attribute__((__packed__)); @@ -120,19 +113,11 @@ struct blkif_x86_64_request_discard { uint64_t nr_sectors; } __attribute__((__packed__)); -struct blkif_x86_64_request_other { - uint8_t _pad1; - blkif_vdev_t _pad2; - uint32_t _pad3; /* offsetof(blkif_..,u.discard.id)==8 */ - uint64_t id; /* private guest value, echoed in resp */ -} __attribute__((__packed__)); - struct blkif_x86_64_request { uint8_t operation; /* BLKIF_OP_??? */ union { struct blkif_x86_64_request_rw rw; struct blkif_x86_64_request_discard discard; - struct blkif_x86_64_request_other other; } u; } __attribute__((__packed__)); @@ -293,11 +278,6 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst, dst->u.discard.nr_sectors = src->u.discard.nr_sectors; break; default: - /* - * Don't know how to translate this op. Only get the - * ID so failure can be reported to the frontend. - */ - dst->u.other.id = src->u.other.id; break; } } @@ -329,11 +309,6 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst, dst->u.discard.nr_sectors = src->u.discard.nr_sectors; break; default: - /* - * Don't know how to translate this op. Only get the - * ID so failure can be reported to the frontend. - */ - dst->u.other.id = src->u.other.id; break; } } diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 5e237f6..6398072 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -367,7 +367,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev) be->blkif = NULL; } - kfree(be->mode); kfree(be); dev_set_drvdata(&dev->dev, NULL); return 0; @@ -503,7 +502,6 @@ static void backend_changed(struct xenbus_watch *watch, = container_of(watch, struct backend_info, backend_watch); struct xenbus_device *dev = be->dev; int cdrom = 0; - unsigned long handle; char *device_type; DPRINTK(""); @@ -523,10 +521,10 @@ static void backend_changed(struct xenbus_watch *watch, return; } - if (be->major | be->minor) { - if (be->major != major || be->minor != minor) - pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n", - be->major, be->minor, major, minor); + if ((be->major || be->minor) && + ((be->major != major) || (be->minor != minor))) { + pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n", + be->major, be->minor, major, minor); return; } @@ -544,33 +542,36 @@ static void backend_changed(struct xenbus_watch *watch, kfree(device_type); } - /* Front end dir is a number, which is used as the handle. */ - err = strict_strtoul(strrchr(dev->otherend, '/') + 1, 0, &handle); - if (err) - return; + if (be->major == 0 && be->minor == 0) { + /* Front end dir is a number, which is used as the handle. */ - be->major = major; - be->minor = minor; + char *p = strrchr(dev->otherend, '/') + 1; + long handle; + err = strict_strtoul(p, 0, &handle); + if (err) + return; - err = xen_vbd_create(be->blkif, handle, major, minor, - !strchr(be->mode, 'w'), cdrom); + be->major = major; + be->minor = minor; + + err = xen_vbd_create(be->blkif, handle, major, minor, + (NULL == strchr(be->mode, 'w')), cdrom); + if (err) { + be->major = 0; + be->minor = 0; + xenbus_dev_fatal(dev, err, "creating vbd structure"); + return; + } - if (err) - xenbus_dev_fatal(dev, err, "creating vbd structure"); - else { err = xenvbd_sysfs_addif(dev); if (err) { xen_vbd_free(&be->blkif->vbd); + be->major = 0; + be->minor = 0; xenbus_dev_fatal(dev, err, "creating sysfs entries"); + return; } - } - if (err) { - kfree(be->mode); - be->mode = NULL; - be->major = 0; - be->minor = 0; - } else { /* We're potentially connected now */ xen_update_blkif_status(be->blkif); } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 2e39eaf..11043c1 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -44,7 +44,7 @@ #include #include #include -#include +#include #include #include @@ -68,7 +68,7 @@ enum blkif_state { struct grant { grant_ref_t gref; unsigned long pfn; - struct list_head node; + struct llist_node node; }; struct blk_shadow { @@ -105,7 +105,7 @@ struct blkfront_info struct work_struct work; struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_RING_SIZE]; - struct list_head persistent_gnts; + struct llist_head persistent_gnts; unsigned int persistent_gnts_c; unsigned long shadow_free; unsigned int feature_flush; @@ -371,11 +371,10 @@ static int blkif_queue_request(struct request *req) lsect = fsect + (sg->length >> 9) - 1; if (info->persistent_gnts_c) { - BUG_ON(list_empty(&info->persistent_gnts)); - gnt_list_entry = list_first_entry( - &info->persistent_gnts, - struct grant, node); - list_del(&gnt_list_entry->node); + BUG_ON(llist_empty(&info->persistent_gnts)); + gnt_list_entry = llist_entry( + llist_del_first(&info->persistent_gnts), + struct grant, node); ref = gnt_list_entry->gref; buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); @@ -791,8 +790,9 @@ static void blkif_restart_queue(struct work_struct *work) static void blkif_free(struct blkfront_info *info, int suspend) { + struct llist_node *all_gnts; struct grant *persistent_gnt; - struct grant *n; + struct llist_node *n; /* Prevent new requests being issued until we fix things up. */ spin_lock_irq(&info->io_lock); @@ -804,15 +804,13 @@ static void blkif_free(struct blkfront_info *info, int suspend) /* Remove all persistent grants */ if (info->persistent_gnts_c) { - list_for_each_entry_safe(persistent_gnt, n, - &info->persistent_gnts, node) { - list_del(&persistent_gnt->node); + all_gnts = llist_del_all(&info->persistent_gnts); + llist_for_each_entry_safe(persistent_gnt, n, all_gnts, node) { gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); - info->persistent_gnts_c--; } - BUG_ON(info->persistent_gnts_c != 0); + info->persistent_gnts_c = 0; } /* No more gnttab callback work. */ @@ -870,7 +868,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, } /* Add the persistent grant into the list of free grants */ for (i = 0; i < s->req.u.rw.nr_segments; i++) { - list_add(&s->grants_used[i]->node, &info->persistent_gnts); + llist_add(&s->grants_used[i]->node, &info->persistent_gnts); info->persistent_gnts_c++; } } @@ -1166,7 +1164,7 @@ static int blkfront_probe(struct xenbus_device *dev, spin_lock_init(&info->io_lock); info->xbdev = dev; info->vdevice = vdevice; - INIT_LIST_HEAD(&info->persistent_gnts); + init_llist_head(&info->persistent_gnts); info->persistent_gnts_c = 0; info->connected = BLKIF_STATE_DISCONNECTED; INIT_WORK(&info->work, blkif_restart_queue); diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 1c0929b..33c9a44 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -73,10 +73,8 @@ static struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x03F0, 0x311D) }, /* Atheros AR3012 with sflash firmware*/ - { USB_DEVICE(0x0CF3, 0x0036) }, { USB_DEVICE(0x0CF3, 0x3004) }, { USB_DEVICE(0x0CF3, 0x311D) }, - { USB_DEVICE(0x0CF3, 0x817a) }, { USB_DEVICE(0x13d3, 0x3375) }, { USB_DEVICE(0x04CA, 0x3005) }, { USB_DEVICE(0x04CA, 0x3006) }, @@ -107,10 +105,8 @@ MODULE_DEVICE_TABLE(usb, ath3k_table); static struct usb_device_id ath3k_blist_tbl[] = { /* Atheros AR3012 with sflash firmware*/ - { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 568e703..7e351e3 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -131,10 +131,8 @@ static struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, /* Atheros 3012 with sflash firmware */ - { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 615d262..fe6d4be 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -373,14 +373,26 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma) struct hpet_dev *devp; unsigned long addr; + if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff) + return -EINVAL; + devp = file->private_data; addr = devp->hd_hpets->hp_hpet_phys; if (addr & (PAGE_SIZE - 1)) return -ENOSYS; + vma->vm_flags |= VM_IO; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - return vm_iomap_memory(vma, addr, PAGE_SIZE); + + if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, + PAGE_SIZE, vma->vm_page_prot)) { + printk(KERN_ERR "%s: io_remap_pfn_range failed\n", + __func__); + return -EAGAIN; + } + + return 0; #else return -ENOSYS; #endif diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 69ae597..1bafb40 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -40,7 +40,6 @@ #include #include #include -#include #include @@ -53,12 +52,8 @@ static struct hwrng *current_rng; static LIST_HEAD(rng_list); static DEFINE_MUTEX(rng_mutex); static int data_avail; -static u8 *rng_buffer; - -static size_t rng_buffer_size(void) -{ - return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES; -} +static u8 rng_buffer[SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES] + __cacheline_aligned; static inline int hwrng_init(struct hwrng *rng) { @@ -121,7 +116,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf, if (!data_avail) { bytes_read = rng_get_data(current_rng, rng_buffer, - rng_buffer_size(), + sizeof(rng_buffer), !(filp->f_flags & O_NONBLOCK)); if (bytes_read < 0) { err = bytes_read; @@ -312,14 +307,6 @@ int hwrng_register(struct hwrng *rng) mutex_lock(&rng_mutex); - /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */ - err = -ENOMEM; - if (!rng_buffer) { - rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL); - if (!rng_buffer) - goto out_unlock; - } - /* Must not register two RNGs with the same name. */ err = -EEXIST; list_for_each_entry(tmp, &rng_list, list) { diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 1acc4e0..b65c103 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -92,22 +92,14 @@ static int probe_common(struct virtio_device *vdev) { int err; - if (vq) { - /* We only support one device for now */ - return -EBUSY; - } /* We expect a single virtqueue. */ vq = virtio_find_single_vq(vdev, random_recv_done, "input"); - if (IS_ERR(vq)) { - err = PTR_ERR(vq); - vq = NULL; - return err; - } + if (IS_ERR(vq)) + return PTR_ERR(vq); err = hwrng_register(&virtio_hwrng); if (err) { vdev->config->del_vqs(vdev); - vq = NULL; return err; } @@ -120,7 +112,6 @@ static void remove_common(struct virtio_device *vdev) busy = false; hwrng_unregister(&virtio_hwrng); vdev->config->del_vqs(vdev); - vq = NULL; } static int virtrng_probe(struct virtio_device *vdev) diff --git a/drivers/char/random.c b/drivers/char/random.c index 9d6c416..85e81ec 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -445,7 +445,7 @@ static struct entropy_store input_pool = { .poolinfo = &poolinfo_table[0], .name = "input", .limit = 1, - .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), + .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock), .pool = input_pool_data }; @@ -454,7 +454,7 @@ static struct entropy_store blocking_pool = { .name = "blocking", .limit = 1, .pull = &input_pool, - .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock), + .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock), .pool = blocking_pool_data }; @@ -462,7 +462,7 @@ static struct entropy_store nonblocking_pool = { .poolinfo = &poolinfo_table[1], .name = "nonblocking", .pull = &input_pool, - .lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock), + .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock), .pool = nonblocking_pool_data }; @@ -676,12 +676,9 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) preempt_disable(); /* if over the trickle threshold, use only 1 in 4096 samples */ if (input_pool.entropy_count > trickle_thresh && - ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff)) { - preempt_enable(); - return; - } + ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff)) + goto out; - preempt_enable(); sample.jiffies = jiffies; sample.cycles = get_cycles(); sample.num = num; @@ -722,6 +719,8 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) credit_entropy_bits(&input_pool, min_t(int, fls(delta>>1), 11)); } +out: + preempt_enable(); } void add_input_randomness(unsigned int type, unsigned int code, @@ -742,16 +741,18 @@ EXPORT_SYMBOL_GPL(add_input_randomness); static DEFINE_PER_CPU(struct fast_pool, irq_randomness); -void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) +void add_interrupt_randomness(int irq, int irq_flags) { struct entropy_store *r; struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness); + struct pt_regs *regs = get_irq_regs(); unsigned long now = jiffies; __u32 input[4], cycles = get_cycles(); input[0] = cycles ^ jiffies; input[1] = irq; - if (ip) { + if (regs) { + __u64 ip = instruction_pointer(regs); input[2] = ip; input[3] = ip >> 32; } @@ -765,11 +766,7 @@ void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) fast_pool->last = now; r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool; -#ifndef CONFIG_PREEMPT_RT_FULL __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL); -#else - mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL); -#endif /* * If we don't have a valid cycle counter, and we see * back-to-back timer interrupts, then skip giving credit for @@ -855,7 +852,6 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, int reserved) { unsigned long flags; - int wakeup_write = 0; /* Hold lock while accounting */ spin_lock_irqsave(&r->lock, flags); @@ -877,8 +873,10 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, else r->entropy_count = reserved; - if (r->entropy_count < random_write_wakeup_thresh) - wakeup_write = 1; + if (r->entropy_count < random_write_wakeup_thresh) { + wake_up_interruptible(&random_write_wait); + kill_fasync(&fasync, SIGIO, POLL_OUT); + } } DEBUG_ENT("debiting %zu entropy credits from %s%s\n", @@ -886,11 +884,6 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, spin_unlock_irqrestore(&r->lock, flags); - if (wakeup_write) { - wake_up_interruptible(&random_write_wait); - kill_fasync(&fasync, SIGIO, POLL_OUT); - } - return nbytes; } diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index ba780b7..93211df 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c @@ -1291,7 +1291,7 @@ int tpm_pm_suspend(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); struct tpm_cmd_t cmd; - int rc, try; + int rc; u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 }; @@ -1309,32 +1309,9 @@ int tpm_pm_suspend(struct device *dev) } /* now do the actual savestate */ - for (try = 0; try < TPM_RETRY; try++) { - cmd.header.in = savestate_header; - rc = transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE, NULL); - - /* - * If the TPM indicates that it is too busy to respond to - * this command then retry before giving up. It can take - * several seconds for this TPM to be ready. - * - * This can happen if the TPM has already been sent the - * SaveState command before the driver has loaded. TCG 1.2 - * specification states that any communication after SaveState - * may cause the TPM to invalidate previously saved state. - */ - if (rc != TPM_WARN_RETRY) - break; - msleep(TPM_TIMEOUT_RETRY); - } - - if (rc) - dev_err(chip->dev, - "Error (%d) sending savestate before suspend\n", rc); - else if (try > 0) - dev_warn(chip->dev, "TPM savestate took %dms\n", - try * TPM_TIMEOUT_RETRY); - + cmd.header.in = savestate_header; + rc = transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE, + "sending savestate before suspend"); return rc; } EXPORT_SYMBOL_GPL(tpm_pm_suspend); diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 9c12a52..8ef7649 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -32,12 +32,10 @@ enum tpm_const { TPM_MINOR = 224, /* officially assigned */ TPM_BUFSIZE = 4096, TPM_NUM_DEVICES = 256, - TPM_RETRY = 50, /* 5 seconds */ }; enum tpm_timeout { TPM_TIMEOUT = 5, /* msecs */ - TPM_TIMEOUT_RETRY = 100 /* msecs */ }; /* TPM addresses */ @@ -46,7 +44,6 @@ enum tpm_addr { TPM_ADDR = 0x4E, }; -#define TPM_WARN_RETRY 0x800 #define TPM_WARN_DOING_SELFTEST 0x802 #define TPM_ERR_DEACTIVATED 0x6 #define TPM_ERR_DISABLED 0x7 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index a4b7aa0..ee4dbea 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -152,8 +152,7 @@ struct ports_device { spinlock_t ports_lock; /* To protect the vq operations for the control channel */ - spinlock_t c_ivq_lock; - spinlock_t c_ovq_lock; + spinlock_t cvq_lock; /* The current config space is stored here */ struct virtio_console_config config; @@ -576,14 +575,11 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, vq = portdev->c_ovq; sg_init_one(sg, &cpkt, sizeof(cpkt)); - - spin_lock(&portdev->c_ovq_lock); if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) { virtqueue_kick(vq); while (!virtqueue_get_buf(vq, &len)) cpu_relax(); } - spin_unlock(&portdev->c_ovq_lock); return 0; } @@ -1719,23 +1715,23 @@ static void control_work_handler(struct work_struct *work) portdev = container_of(work, struct ports_device, control_work); vq = portdev->c_ivq; - spin_lock(&portdev->c_ivq_lock); + spin_lock(&portdev->cvq_lock); while ((buf = virtqueue_get_buf(vq, &len))) { - spin_unlock(&portdev->c_ivq_lock); + spin_unlock(&portdev->cvq_lock); buf->len = len; buf->offset = 0; handle_control_message(portdev, buf); - spin_lock(&portdev->c_ivq_lock); + spin_lock(&portdev->cvq_lock); if (add_inbuf(portdev->c_ivq, buf) < 0) { dev_warn(&portdev->vdev->dev, "Error adding buffer to queue\n"); free_buf(buf, false); } } - spin_unlock(&portdev->c_ivq_lock); + spin_unlock(&portdev->cvq_lock); } static void out_intr(struct virtqueue *vq) @@ -2000,12 +1996,10 @@ static int virtcons_probe(struct virtio_device *vdev) if (multiport) { unsigned int nr_added_bufs; - spin_lock_init(&portdev->c_ivq_lock); - spin_lock_init(&portdev->c_ovq_lock); + spin_lock_init(&portdev->cvq_lock); INIT_WORK(&portdev->control_work, &control_work_handler); - nr_added_bufs = fill_queue(portdev->c_ivq, - &portdev->c_ivq_lock); + nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock); if (!nr_added_bufs) { dev_err(&vdev->dev, "Error allocating buffers for control queue\n"); @@ -2156,7 +2150,7 @@ static int virtcons_restore(struct virtio_device *vdev) return ret; if (use_multiport(portdev)) - fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); + fill_queue(portdev->c_ivq, &portdev->cvq_lock); list_for_each_entry(port, &portdev->ports, list) { port->in_vq = portdev->in_vqs[port->id]; diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index ac0bb2e..32cb929 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c @@ -23,7 +23,8 @@ * this 32 bit free-running counter. the second channel is not used. * * - The third channel may be used to provide a 16-bit clockevent - * source, used in either periodic or oneshot mode. + * source, used in either periodic or oneshot mode. This runs + * at 32 KiHZ, and can handle delays of up to two seconds. * * A boot clocksource and clockevent source are also currently needed, * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so @@ -73,7 +74,6 @@ static struct clocksource clksrc = { struct tc_clkevt_device { struct clock_event_device clkevt; struct clk *clk; - u32 freq; void __iomem *regs; }; @@ -82,6 +82,13 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt) return container_of(clkevt, struct tc_clkevt_device, clkevt); } +/* For now, we always use the 32K clock ... this optimizes for NO_HZ, + * because using one of the divided clocks would usually mean the + * tick rate can never be less than several dozen Hz (vs 0.5 Hz). + * + * A divided clock could be good for high resolution timers, since + * 30.5 usec resolution can seem "low". + */ static u32 timer_clock; static void tc_mode(enum clock_event_mode m, struct clock_event_device *d) @@ -104,12 +111,11 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d) case CLOCK_EVT_MODE_PERIODIC: clk_enable(tcd->clk); - /* count up to RC, then irq and restart */ + /* slow clock, count up to RC, then irq and restart */ __raw_writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); - __raw_writel((tcd->freq + HZ/2)/HZ, - tcaddr + ATMEL_TC_REG(2, RC)); + __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); /* Enable clock and interrupts on RC compare */ __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); @@ -122,7 +128,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d) case CLOCK_EVT_MODE_ONESHOT: clk_enable(tcd->clk); - /* count up to RC, then irq and stop */ + /* slow clock, count up to RC, then irq and stop */ __raw_writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); @@ -152,12 +158,8 @@ static struct tc_clkevt_device clkevt = { .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .shift = 32, -#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK /* Should be lower than at91rm9200's system timer */ .rating = 125, -#else - .rating = 200, -#endif .set_next_event = tc_next_event, .set_mode = tc_mode, }, @@ -183,9 +185,8 @@ static struct irqaction tc_irqaction = { .handler = ch2_irq, }; -static void __init setup_clkevents(struct atmel_tc *tc, int divisor_idx) +static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) { - unsigned divisor = atmel_tc_divisors[divisor_idx]; struct clk *t2_clk = tc->clk[2]; int irq = tc->irq[2]; @@ -193,17 +194,11 @@ static void __init setup_clkevents(struct atmel_tc *tc, int divisor_idx) clkevt.clk = t2_clk; tc_irqaction.dev_id = &clkevt; - timer_clock = divisor_idx; + timer_clock = clk32k_divisor_idx; - if (!divisor) - clkevt.freq = 32768; - else - clkevt.freq = clk_get_rate(t2_clk)/divisor; - - clkevt.clkevt.mult = div_sc(clkevt.freq, NSEC_PER_SEC, - clkevt.clkevt.shift); - clkevt.clkevt.max_delta_ns = - clockevent_delta2ns(0xffff, &clkevt.clkevt); + clkevt.clkevt.mult = div_sc(32768, NSEC_PER_SEC, clkevt.clkevt.shift); + clkevt.clkevt.max_delta_ns + = clockevent_delta2ns(0xffff, &clkevt.clkevt); clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1; clkevt.clkevt.cpumask = cpumask_of(0); @@ -332,11 +327,8 @@ static int __init tcb_clksrc_init(void) clocksource_register_hz(&clksrc, divided_rate); /* channel 2: periodic and oneshot timer support */ -#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK setup_clkevents(tc, clk32k_divisor_idx); -#else - setup_clkevents(tc, best_divisor_idx); -#endif + return 0; } arch_initcall(tcb_clksrc_init); diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index 1110478..fce2000 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c @@ -313,12 +313,6 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg, (task_active_pid_ns(current) != &init_pid_ns)) return; - /* Can only change if privileged. */ - if (!capable(CAP_NET_ADMIN)) { - err = EPERM; - goto out; - } - mc_op = (enum proc_cn_mcast_op *)msg->data; switch (*mc_op) { case PROC_CN_MCAST_LISTEN: @@ -331,8 +325,6 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg, err = EINVAL; break; } - -out: cn_proc_ack(err, msg->seq, msg->ack); } diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index 41fc550..7012ea8 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c @@ -222,6 +222,8 @@ static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu); + locking_frequency = exynos_getspeed(0); + /* set the transition latency value */ policy->cpuinfo.transition_latency = 100000; @@ -286,8 +288,6 @@ static int __init exynos_cpufreq_init(void) goto err_vdd_arm; } - locking_frequency = exynos_getspeed(0); - register_pm_notifier(&exynos_cpufreq_nb); if (cpufreq_register_driver(&exynos_driver)) { diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index cf268b1..b2a0a07 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -1650,7 +1650,11 @@ struct caam_alg_template { }; static struct caam_alg_template driver_algs[] = { - /* single-pass ipsec_esp descriptor */ + /* + * single-pass ipsec_esp descriptor + * authencesn(*,*) is also registered, although not present + * explicitly here. + */ { .name = "authenc(hmac(md5),cbc(aes))", .driver_name = "authenc-hmac-md5-cbc-aes-caam", @@ -2213,7 +2217,9 @@ static int __init caam_algapi_init(void) for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { /* TODO: check if h/w supports alg */ struct caam_crypto_alg *t_alg; + bool done = false; +authencesn: t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); if (IS_ERR(t_alg)) { err = PTR_ERR(t_alg); @@ -2227,8 +2233,25 @@ static int __init caam_algapi_init(void) dev_warn(ctrldev, "%s alg registration failed\n", t_alg->crypto_alg.cra_driver_name); kfree(t_alg); - } else + } else { list_add_tail(&t_alg->entry, &priv->alg_list); + if (driver_algs[i].type == CRYPTO_ALG_TYPE_AEAD && + !memcmp(driver_algs[i].name, "authenc", 7) && + !done) { + char *name; + + name = driver_algs[i].name; + memmove(name + 10, name + 7, strlen(name) - 7); + memcpy(name + 7, "esn", 3); + + name = driver_algs[i].driver_name; + memmove(name + 10, name + 7, strlen(name) - 7); + memcpy(name + 7, "esn", 3); + + done = true; + goto authencesn; + } + } } if (!list_empty(&priv->alg_list)) dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n", diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index 762aeff..cf15e78 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h @@ -23,6 +23,7 @@ #include #include #include +#include #include #include diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 5b2b5e6..09b184a 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -1973,7 +1974,11 @@ struct talitos_alg_template { }; static struct talitos_alg_template driver_algs[] = { - /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ + /* + * AEAD algorithms. These use a single-pass ipsec_esp descriptor. + * authencesn(*,*) is also registered, although not present + * explicitly here. + */ { .type = CRYPTO_ALG_TYPE_AEAD, .alg.crypto = { .cra_name = "authenc(hmac(sha1),cbc(aes))", @@ -2815,7 +2820,9 @@ static int talitos_probe(struct platform_device *ofdev) if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { struct talitos_crypto_alg *t_alg; char *name = NULL; + bool authenc = false; +authencesn: t_alg = talitos_alg_alloc(dev, &driver_algs[i]); if (IS_ERR(t_alg)) { err = PTR_ERR(t_alg); @@ -2830,6 +2837,8 @@ static int talitos_probe(struct platform_device *ofdev) err = crypto_register_alg( &t_alg->algt.alg.crypto); name = t_alg->algt.alg.crypto.cra_driver_name; + authenc = authenc ? !authenc : + !(bool)memcmp(name, "authenc", 7); break; case CRYPTO_ALG_TYPE_AHASH: err = crypto_register_ahash( @@ -2842,8 +2851,25 @@ static int talitos_probe(struct platform_device *ofdev) dev_err(dev, "%s alg registration failed\n", name); kfree(t_alg); - } else + } else { list_add_tail(&t_alg->entry, &priv->alg_list); + if (authenc) { + struct crypto_alg *alg = + &driver_algs[i].alg.crypto; + + name = alg->cra_name; + memmove(name + 10, name + 7, + strlen(name) - 7); + memcpy(name + 7, "esn", 3); + + name = alg->cra_driver_name; + memmove(name + 10, name + 7, + strlen(name) - 7); + memcpy(name + 7, "esn", 3); + + goto authencesn; + } + } } } if (!list_empty(&priv->alg_list)) diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index 22c9063..8bc5fef 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -1750,7 +1750,7 @@ static struct platform_driver cryp_driver = { .shutdown = ux500_cryp_shutdown, .driver = { .owner = THIS_MODULE, - .name = "cryp1", + .name = "cryp1" .pm = &ux500_cryp_pm, } }; diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c index 819dfda..bc6f5fa 100644 --- a/drivers/dca/dca-core.c +++ b/drivers/dca/dca-core.c @@ -420,11 +420,6 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev) raw_spin_lock_irqsave(&dca_lock, flags); - if (list_empty(&dca_domains)) { - raw_spin_unlock_irqrestore(&dca_lock, flags); - return; - } - list_del(&dca->node); pci_rc = dca_pci_rc_from_dev(dev); diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 8607724..5a31264 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -276,20 +276,12 @@ static void omap_dma_issue_pending(struct dma_chan *chan) spin_lock_irqsave(&c->vc.lock, flags); if (vchan_issue_pending(&c->vc) && !c->desc) { - /* - * c->cyclic is used only by audio and in this case the DMA need - * to be started without delay. - */ - if (!c->cyclic) { - struct omap_dmadev *d = to_omap_dma_dev(chan->device); - spin_lock(&d->lock); - if (list_empty(&c->node)) - list_add_tail(&c->node, &d->pending); - spin_unlock(&d->lock); - tasklet_schedule(&d->task); - } else { - omap_dma_start_desc(c); - } + struct omap_dmadev *d = to_omap_dma_dev(chan->device); + spin_lock(&d->lock); + if (list_empty(&c->node)) + list_add_tail(&c->node, &d->pending); + spin_unlock(&d->lock); + tasklet_schedule(&d->task); } spin_unlock_irqrestore(&c->vc.lock, flags); } diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c index b70709b..3315e4b 100644 --- a/drivers/dma/sh/shdma.c +++ b/drivers/dma/sh/shdma.c @@ -326,7 +326,7 @@ static int sh_dmae_set_slave(struct shdma_chan *schan, shdma_chan); const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id); if (!cfg) - return -ENXIO; + return -ENODEV; if (!try) sh_chan->config = cfg; diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index c9303ed..0ca1ca7 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c @@ -330,17 +330,17 @@ static struct device_attribute *dynamic_csrow_dimm_attr[] = { }; /* possible dynamic channel ce_count attribute files */ -DEVICE_CHANNEL(ch0_ce_count, S_IRUGO, +DEVICE_CHANNEL(ch0_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 0); -DEVICE_CHANNEL(ch1_ce_count, S_IRUGO, +DEVICE_CHANNEL(ch1_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 1); -DEVICE_CHANNEL(ch2_ce_count, S_IRUGO, +DEVICE_CHANNEL(ch2_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 2); -DEVICE_CHANNEL(ch3_ce_count, S_IRUGO, +DEVICE_CHANNEL(ch3_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 3); -DEVICE_CHANNEL(ch4_ce_count, S_IRUGO, +DEVICE_CHANNEL(ch4_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 4); -DEVICE_CHANNEL(ch5_ce_count, S_IRUGO, +DEVICE_CHANNEL(ch5_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 5); /* Total possible dynamic ce_count attribute file table */ diff --git a/drivers/eisa/pci_eisa.c b/drivers/eisa/pci_eisa.c index 6c3fca9..cdae207 100644 --- a/drivers/eisa/pci_eisa.c +++ b/drivers/eisa/pci_eisa.c @@ -19,10 +19,10 @@ /* There is only *one* pci_eisa device per machine, right ? */ static struct eisa_root_device pci_eisa_root; -static int __init pci_eisa_init(struct pci_dev *pdev) +static int __init pci_eisa_init(struct pci_dev *pdev, + const struct pci_device_id *ent) { - int rc, i; - struct resource *res, *bus_res = NULL; + int rc; if ((rc = pci_enable_device (pdev))) { printk (KERN_ERR "pci_eisa : Could not enable device %s\n", @@ -30,30 +30,9 @@ static int __init pci_eisa_init(struct pci_dev *pdev) return rc; } - /* - * The Intel 82375 PCI-EISA bridge is a subtractive-decode PCI - * device, so the resources available on EISA are the same as those - * available on the 82375 bus. This works the same as a PCI-PCI - * bridge in subtractive-decode mode (see pci_read_bridge_bases()). - * We assume other PCI-EISA bridges are similar. - * - * eisa_root_register() can only deal with a single io port resource, - * so we use the first valid io port resource. - */ - pci_bus_for_each_resource(pdev->bus, res, i) - if (res && (res->flags & IORESOURCE_IO)) { - bus_res = res; - break; - } - - if (!bus_res) { - dev_err(&pdev->dev, "No resources available\n"); - return -1; - } - pci_eisa_root.dev = &pdev->dev; - pci_eisa_root.res = bus_res; - pci_eisa_root.bus_base_addr = bus_res->start; + pci_eisa_root.res = pdev->bus->resource[0]; + pci_eisa_root.bus_base_addr = pdev->bus->resource[0]->start; pci_eisa_root.slots = EISA_MAX_SLOTS; pci_eisa_root.dma_mask = pdev->dma_mask; dev_set_drvdata(pci_eisa_root.dev, &pci_eisa_root); @@ -66,26 +45,22 @@ static int __init pci_eisa_init(struct pci_dev *pdev) return 0; } -/* - * We have to call pci_eisa_init_early() before pnpacpi_init()/isapnp_init(). - * Otherwise pnp resource will get enabled early and could prevent eisa - * to be initialized. - * Also need to make sure pci_eisa_init_early() is called after - * x86/pci_subsys_init(). - * So need to use subsys_initcall_sync with it. - */ -static int __init pci_eisa_init_early(void) -{ - struct pci_dev *dev = NULL; - int ret; +static struct pci_device_id pci_eisa_pci_tbl[] = { + { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_BRIDGE_EISA << 8, 0xffff00, 0 }, + { 0, } +}; - for_each_pci_dev(dev) - if ((dev->class >> 8) == PCI_CLASS_BRIDGE_EISA) { - ret = pci_eisa_init(dev); - if (ret) - return ret; - } +static struct pci_driver __refdata pci_eisa_driver = { + .name = "pci_eisa", + .id_table = pci_eisa_pci_tbl, + .probe = pci_eisa_init, +}; - return 0; +static int __init pci_eisa_init_module (void) +{ + return pci_register_driver (&pci_eisa_driver); } -subsys_initcall_sync(pci_eisa_init_early); + +device_initcall(pci_eisa_init_module); +MODULE_DEVICE_TABLE(pci, pci_eisa_pci_tbl); diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index af3e8aa..3873d53 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -1020,10 +1020,6 @@ static void fw_device_init(struct work_struct *work) ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ? idr_get_new(&fw_device_idr, device, &minor) : -ENOMEM; - if (minor >= 1 << MINORBITS) { - idr_remove(&fw_device_idr, minor); - minor = -ENOSPC; - } up_write(&fw_device_rwsem); if (ret < 0) diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 42c759a..9b00072 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -53,24 +53,6 @@ config EFI_VARS Subsequent efibootmgr releases may be found at: -config EFI_VARS_PSTORE - bool "Register efivars backend for pstore" - depends on EFI_VARS && PSTORE - default y - help - Say Y here to enable use efivars as a backend to pstore. This - will allow writing console messages, crash dumps, or anything - else supported by pstore to EFI variables. - -config EFI_VARS_PSTORE_DEFAULT_DISABLE - bool "Disable using efivars as a pstore backend by default" - depends on EFI_VARS_PSTORE - default n - help - Saying Y here will disable the use of efivars as a storage - backend for pstore by default. This setting can be overridden - using the efivars module's pstore_disable parameter. - config EFI_PCDP bool "Console device selection via EFI PCDP or HCDP table" depends on ACPI && EFI && IA64 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 4cd392d..982f1f5 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -442,6 +442,7 @@ static int __init dmi_present(const char __iomem *p) static int __init smbios_present(const char __iomem *p) { u8 buf[32]; + int offset = 0; memcpy_fromio(buf, p, 32); if ((buf[5] < 32) && dmi_checksum(buf, buf[5])) { @@ -460,9 +461,9 @@ static int __init smbios_present(const char __iomem *p) dmi_ver = 0x0206; break; } - return memcmp(p + 16, "_DMI_", 5) || dmi_present(p + 16); + offset = 16; } - return 1; + return dmi_present(buf + offset); } void __init dmi_scan_machine(void) diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c index b07cb37..f5596db 100644 --- a/drivers/firmware/efivars.c +++ b/drivers/firmware/efivars.c @@ -79,7 +79,6 @@ #include #include #include -#include #include #include @@ -103,11 +102,6 @@ MODULE_VERSION(EFIVARS_VERSION); */ #define GUID_LEN 36 -static bool efivars_pstore_disable = - IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE); - -module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644); - /* * The maximum size of VariableName + Data = 1024 * Therefore, it's reasonable to save that much @@ -411,11 +405,10 @@ static efi_status_t get_var_data(struct efivars *efivars, struct efi_variable *var) { efi_status_t status; - unsigned long flags; - spin_lock_irqsave(&efivars->lock, flags); + spin_lock(&efivars->lock); status = get_var_data_locked(efivars, var); - spin_unlock_irqrestore(&efivars->lock, flags); + spin_unlock(&efivars->lock); if (status != EFI_SUCCESS) { printk(KERN_WARNING "efivars: get_variable() failed 0x%lx!\n", @@ -424,44 +417,6 @@ get_var_data(struct efivars *efivars, struct efi_variable *var) return status; } -static efi_status_t -check_var_size_locked(struct efivars *efivars, u32 attributes, - unsigned long size) -{ - u64 storage_size, remaining_size, max_size; - efi_status_t status; - const struct efivar_operations *fops = efivars->ops; - - if (!efivars->ops->query_variable_info) - return EFI_UNSUPPORTED; - - status = fops->query_variable_info(attributes, &storage_size, - &remaining_size, &max_size); - - if (status != EFI_SUCCESS) - return status; - - if (!storage_size || size > remaining_size || size > max_size || - (remaining_size - size) < (storage_size / 2)) - return EFI_OUT_OF_RESOURCES; - - return status; -} - - -static efi_status_t -check_var_size(struct efivars *efivars, u32 attributes, unsigned long size) -{ - efi_status_t status; - unsigned long flags; - - spin_lock_irqsave(&efivars->lock, flags); - status = check_var_size_locked(efivars, attributes, size); - spin_unlock_irqrestore(&efivars->lock, flags); - - return status; -} - static ssize_t efivar_guid_read(struct efivar_entry *entry, char *buf) { @@ -582,19 +537,14 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count) return -EINVAL; } - spin_lock_irq(&efivars->lock); - - status = check_var_size_locked(efivars, new_var->Attributes, - new_var->DataSize + utf16_strsize(new_var->VariableName, 1024)); - - if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED) - status = efivars->ops->set_variable(new_var->VariableName, - &new_var->VendorGuid, - new_var->Attributes, - new_var->DataSize, - new_var->Data); + spin_lock(&efivars->lock); + status = efivars->ops->set_variable(new_var->VariableName, + &new_var->VendorGuid, + new_var->Attributes, + new_var->DataSize, + new_var->Data); - spin_unlock_irq(&efivars->lock); + spin_unlock(&efivars->lock); if (status != EFI_SUCCESS) { printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n", @@ -743,7 +693,8 @@ static ssize_t efivarfs_file_write(struct file *file, u32 attributes; struct inode *inode = file->f_mapping->host; unsigned long datasize = count - sizeof(attributes); - unsigned long newdatasize, varsize; + unsigned long newdatasize; + u64 storage_size, remaining_size, max_size; ssize_t bytes = 0; if (count < sizeof(attributes)) @@ -762,18 +713,28 @@ static ssize_t efivarfs_file_write(struct file *file, * amounts of memory. Pick a default size of 64K if * QueryVariableInfo() isn't supported by the firmware. */ + spin_lock(&efivars->lock); + + if (!efivars->ops->query_variable_info) + status = EFI_UNSUPPORTED; + else { + const struct efivar_operations *fops = efivars->ops; + status = fops->query_variable_info(attributes, &storage_size, + &remaining_size, &max_size); + } - varsize = datasize + utf16_strsize(var->var.VariableName, 1024); - status = check_var_size(efivars, attributes, varsize); + spin_unlock(&efivars->lock); if (status != EFI_SUCCESS) { if (status != EFI_UNSUPPORTED) return efi_status_to_err(status); - if (datasize > 65536) - return -ENOSPC; + remaining_size = 65536; } + if (datasize > remaining_size) + return -ENOSPC; + data = kmalloc(datasize, GFP_KERNEL); if (!data) return -ENOMEM; @@ -793,20 +754,7 @@ static ssize_t efivarfs_file_write(struct file *file, * set_variable call, and removal of the variable from the efivars * list (in the case of an authenticated delete). */ - spin_lock_irq(&efivars->lock); - - /* - * Ensure that the available space hasn't shrunk below the safe level - */ - - status = check_var_size_locked(efivars, attributes, varsize); - - if (status != EFI_SUCCESS && status != EFI_UNSUPPORTED) { - spin_unlock_irq(&efivars->lock); - kfree(data); - - return efi_status_to_err(status); - } + spin_lock(&efivars->lock); status = efivars->ops->set_variable(var->var.VariableName, &var->var.VendorGuid, @@ -814,7 +762,7 @@ static ssize_t efivarfs_file_write(struct file *file, data); if (status != EFI_SUCCESS) { - spin_unlock_irq(&efivars->lock); + spin_unlock(&efivars->lock); kfree(data); return efi_status_to_err(status); @@ -835,21 +783,21 @@ static ssize_t efivarfs_file_write(struct file *file, NULL); if (status == EFI_BUFFER_TOO_SMALL) { - spin_unlock_irq(&efivars->lock); + spin_unlock(&efivars->lock); mutex_lock(&inode->i_mutex); i_size_write(inode, newdatasize + sizeof(attributes)); mutex_unlock(&inode->i_mutex); } else if (status == EFI_NOT_FOUND) { list_del(&var->list); - spin_unlock_irq(&efivars->lock); + spin_unlock(&efivars->lock); efivar_unregister(var); drop_nlink(inode); d_delete(file->f_dentry); dput(file->f_dentry); } else { - spin_unlock_irq(&efivars->lock); + spin_unlock(&efivars->lock); pr_warn("efivarfs: inconsistent EFI variable implementation? " "status = %lx\n", status); } @@ -871,11 +819,11 @@ static ssize_t efivarfs_file_read(struct file *file, char __user *userbuf, void *data; ssize_t size = 0; - spin_lock_irq(&efivars->lock); + spin_lock(&efivars->lock); status = efivars->ops->get_variable(var->var.VariableName, &var->var.VendorGuid, &attributes, &datasize, NULL); - spin_unlock_irq(&efivars->lock); + spin_unlock(&efivars->lock); if (status != EFI_BUFFER_TOO_SMALL) return efi_status_to_err(status); @@ -885,12 +833,12 @@ static ssize_t efivarfs_file_read(struct file *file, char __user *userbuf, if (!data) return -ENOMEM; - spin_lock_irq(&efivars->lock); + spin_lock(&efivars->lock); status = efivars->ops->get_variable(var->var.VariableName, &var->var.VendorGuid, &attributes, &datasize, (data + sizeof(attributes))); - spin_unlock_irq(&efivars->lock); + spin_unlock(&efivars->lock); if (status != EFI_SUCCESS) { size = efi_status_to_err(status); @@ -952,48 +900,6 @@ static struct inode *efivarfs_get_inode(struct super_block *sb, return inode; } -/* - * Return true if 'str' is a valid efivarfs filename of the form, - * - * VariableName-12345678-1234-1234-1234-1234567891bc - */ -static bool efivarfs_valid_name(const char *str, int len) -{ - static const char dashes[GUID_LEN] = { - [8] = 1, [13] = 1, [18] = 1, [23] = 1 - }; - const char *s = str + len - GUID_LEN; - int i; - - /* - * We need a GUID, plus at least one letter for the variable name, - * plus the '-' separator - */ - if (len < GUID_LEN + 2) - return false; - - /* GUID must be preceded by a '-' */ - if (*(s - 1) != '-') - return false; - - /* - * Validate that 's' is of the correct format, e.g. - * - * 12345678-1234-1234-1234-123456789abc - */ - for (i = 0; i < GUID_LEN; i++) { - if (dashes[i]) { - if (*s++ != '-') - return false; - } else { - if (!isxdigit(*s++)) - return false; - } - } - - return true; -} - static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid) { guid->b[0] = hex_to_bin(str[6]) << 4 | hex_to_bin(str[7]); @@ -1022,7 +928,11 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry, struct efivar_entry *var; int namelen, i = 0, err = 0; - if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len)) + /* + * We need a GUID, plus at least one letter for the variable name, + * plus the '-' separator + */ + if (dentry->d_name.len < GUID_LEN + 2) return -EINVAL; inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0); @@ -1056,9 +966,9 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry, goto out; kobject_uevent(&var->kobj, KOBJ_ADD); - spin_lock_irq(&efivars->lock); + spin_lock(&efivars->lock); list_add(&var->list, &efivars->list); - spin_unlock_irq(&efivars->lock); + spin_unlock(&efivars->lock); d_instantiate(dentry, inode); dget(dentry); out: @@ -1075,7 +985,7 @@ static int efivarfs_unlink(struct inode *dir, struct dentry *dentry) struct efivars *efivars = var->efivars; efi_status_t status; - spin_lock_irq(&efivars->lock); + spin_lock(&efivars->lock); status = efivars->ops->set_variable(var->var.VariableName, &var->var.VendorGuid, @@ -1083,102 +993,17 @@ static int efivarfs_unlink(struct inode *dir, struct dentry *dentry) if (status == EFI_SUCCESS || status == EFI_NOT_FOUND) { list_del(&var->list); - spin_unlock_irq(&efivars->lock); + spin_unlock(&efivars->lock); efivar_unregister(var); drop_nlink(dentry->d_inode); dput(dentry); return 0; } - spin_unlock_irq(&efivars->lock); + spin_unlock(&efivars->lock); return -EINVAL; }; -/* - * Compare two efivarfs file names. - * - * An efivarfs filename is composed of two parts, - * - * 1. A case-sensitive variable name - * 2. A case-insensitive GUID - * - * So we need to perform a case-sensitive match on part 1 and a - * case-insensitive match on part 2. - */ -static int efivarfs_d_compare(const struct dentry *parent, const struct inode *pinode, - const struct dentry *dentry, const struct inode *inode, - unsigned int len, const char *str, - const struct qstr *name) -{ - int guid = len - GUID_LEN; - - if (name->len != len) - return 1; - - /* Case-sensitive compare for the variable name */ - if (memcmp(str, name->name, guid)) - return 1; - - /* Case-insensitive compare for the GUID */ - return strncasecmp(name->name + guid, str + guid, GUID_LEN); -} - -static int efivarfs_d_hash(const struct dentry *dentry, - const struct inode *inode, struct qstr *qstr) -{ - unsigned long hash = init_name_hash(); - const unsigned char *s = qstr->name; - unsigned int len = qstr->len; - - if (!efivarfs_valid_name(s, len)) - return -EINVAL; - - while (len-- > GUID_LEN) - hash = partial_name_hash(*s++, hash); - - /* GUID is case-insensitive. */ - while (len--) - hash = partial_name_hash(tolower(*s++), hash); - - qstr->hash = end_name_hash(hash); - return 0; -} - -/* - * Retaining negative dentries for an in-memory filesystem just wastes - * memory and lookup time: arrange for them to be deleted immediately. - */ -static int efivarfs_delete_dentry(const struct dentry *dentry) -{ - return 1; -} - -static struct dentry_operations efivarfs_d_ops = { - .d_compare = efivarfs_d_compare, - .d_hash = efivarfs_d_hash, - .d_delete = efivarfs_delete_dentry, -}; - -static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name) -{ - struct dentry *d; - struct qstr q; - int err; - - q.name = name; - q.len = strlen(name); - - err = efivarfs_d_hash(NULL, NULL, &q); - if (err) - return ERR_PTR(err); - - d = d_alloc(parent, &q); - if (d) - return d; - - return ERR_PTR(-ENOMEM); -} - static int efivarfs_fill_super(struct super_block *sb, void *data, int silent) { struct inode *inode = NULL; @@ -1186,7 +1011,6 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent) struct efivar_entry *entry, *n; struct efivars *efivars = &__efivars; char *name; - int err = -ENOMEM; efivarfs_sb = sb; @@ -1195,7 +1019,6 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = EFIVARFS_MAGIC; sb->s_op = &efivarfs_ops; - sb->s_d_op = &efivarfs_d_ops; sb->s_time_gran = 1; inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0); @@ -1236,22 +1059,20 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent) if (!inode) goto fail_name; - dentry = efivarfs_alloc_dentry(root, name); - if (IS_ERR(dentry)) { - err = PTR_ERR(dentry); + dentry = d_alloc_name(root, name); + if (!dentry) goto fail_inode; - } /* copied by the above to local storage in the dentry. */ kfree(name); - spin_lock_irq(&efivars->lock); + spin_lock(&efivars->lock); efivars->ops->get_variable(entry->var.VariableName, &entry->var.VendorGuid, &entry->var.Attributes, &size, NULL); - spin_unlock_irq(&efivars->lock); + spin_unlock(&efivars->lock); mutex_lock(&inode->i_mutex); inode->i_private = entry; @@ -1267,7 +1088,7 @@ fail_inode: fail_name: kfree(name); fail: - return err; + return -ENOMEM; } static struct dentry *efivarfs_mount(struct file_system_type *fs_type, @@ -1288,31 +1109,21 @@ static struct file_system_type efivarfs_type = { .kill_sb = efivarfs_kill_sb, }; -/* - * Handle negative dentry. - */ -static struct dentry *efivarfs_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) -{ - if (dentry->d_name.len > NAME_MAX) - return ERR_PTR(-ENAMETOOLONG); - d_add(dentry, NULL); - return NULL; -} - static const struct inode_operations efivarfs_dir_inode_operations = { - .lookup = efivarfs_lookup, + .lookup = simple_lookup, .unlink = efivarfs_unlink, .create = efivarfs_create, }; -#ifdef CONFIG_EFI_VARS_PSTORE +static struct pstore_info efi_pstore_info; + +#ifdef CONFIG_PSTORE static int efi_pstore_open(struct pstore_info *psi) { struct efivars *efivars = psi->data; - spin_lock_irq(&efivars->lock); + spin_lock(&efivars->lock); efivars->walk_entry = list_first_entry(&efivars->list, struct efivar_entry, list); return 0; @@ -1322,7 +1133,7 @@ static int efi_pstore_close(struct pstore_info *psi) { struct efivars *efivars = psi->data; - spin_unlock_irq(&efivars->lock); + spin_unlock(&efivars->lock); return 0; } @@ -1396,22 +1207,22 @@ static int efi_pstore_write(enum pstore_type_id type, efi_guid_t vendor = LINUX_EFI_CRASH_GUID; struct efivars *efivars = psi->data; int i, ret = 0; + u64 storage_space, remaining_space, max_variable_size; efi_status_t status = EFI_NOT_FOUND; - unsigned long flags; - spin_lock_irqsave(&efivars->lock, flags); + spin_lock(&efivars->lock); /* * Check if there is a space enough to log. * size: a size of logging data * DUMP_NAME_LEN * 2: a maximum size of variable name */ - - status = check_var_size_locked(efivars, PSTORE_EFI_ATTRIBUTES, - size + DUMP_NAME_LEN * 2); - - if (status) { - spin_unlock_irqrestore(&efivars->lock, flags); + status = efivars->ops->query_variable_info(PSTORE_EFI_ATTRIBUTES, + &storage_space, + &remaining_space, + &max_variable_size); + if (status || remaining_space < size + DUMP_NAME_LEN * 2) { + spin_unlock(&efivars->lock); *id = part; return -ENOSPC; } @@ -1425,7 +1236,7 @@ static int efi_pstore_write(enum pstore_type_id type, efivars->ops->set_variable(efi_name, &vendor, PSTORE_EFI_ATTRIBUTES, size, psi->buf); - spin_unlock_irqrestore(&efivars->lock, flags); + spin_unlock(&efivars->lock); if (size) ret = efivar_create_sysfs_entry(efivars, @@ -1452,7 +1263,7 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count, sprintf(name, "dump-type%u-%u-%d-%lu", type, (unsigned int)id, count, time.tv_sec); - spin_lock_irq(&efivars->lock); + spin_lock(&efivars->lock); for (i = 0; i < DUMP_NAME_LEN; i++) efi_name[i] = name[i]; @@ -1496,13 +1307,45 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count, if (found) list_del(&found->list); - spin_unlock_irq(&efivars->lock); + spin_unlock(&efivars->lock); if (found) efivar_unregister(found); return 0; } +#else +static int efi_pstore_open(struct pstore_info *psi) +{ + return 0; +} + +static int efi_pstore_close(struct pstore_info *psi) +{ + return 0; +} + +static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, int *count, + struct timespec *timespec, + char **buf, struct pstore_info *psi) +{ + return -1; +} + +static int efi_pstore_write(enum pstore_type_id type, + enum kmsg_dump_reason reason, u64 *id, + unsigned int part, int count, size_t size, + struct pstore_info *psi) +{ + return 0; +} + +static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count, + struct timespec time, struct pstore_info *psi) +{ + return 0; +} +#endif static struct pstore_info efi_pstore_info = { .owner = THIS_MODULE, @@ -1514,24 +1357,6 @@ static struct pstore_info efi_pstore_info = { .erase = efi_pstore_erase, }; -static void efivar_pstore_register(struct efivars *efivars) -{ - efivars->efi_pstore_info = efi_pstore_info; - efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL); - if (efivars->efi_pstore_info.buf) { - efivars->efi_pstore_info.bufsize = 1024; - efivars->efi_pstore_info.data = efivars; - spin_lock_init(&efivars->efi_pstore_info.buf_lock); - pstore_register(&efivars->efi_pstore_info); - } -} -#else -static void efivar_pstore_register(struct efivars *efivars) -{ - return; -} -#endif - static ssize_t efivar_create(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) @@ -1552,7 +1377,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, return -EINVAL; } - spin_lock_irq(&efivars->lock); + spin_lock(&efivars->lock); /* * Does this variable already exist? @@ -1570,18 +1395,10 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, } } if (found) { - spin_unlock_irq(&efivars->lock); + spin_unlock(&efivars->lock); return -EINVAL; } - status = check_var_size_locked(efivars, new_var->Attributes, - new_var->DataSize + utf16_strsize(new_var->VariableName, 1024)); - - if (status && status != EFI_UNSUPPORTED) { - spin_unlock_irq(&efivars->lock); - return efi_status_to_err(status); - } - /* now *really* create the variable via EFI */ status = efivars->ops->set_variable(new_var->VariableName, &new_var->VendorGuid, @@ -1592,10 +1409,10 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, if (status != EFI_SUCCESS) { printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n", status); - spin_unlock_irq(&efivars->lock); + spin_unlock(&efivars->lock); return -EIO; } - spin_unlock_irq(&efivars->lock); + spin_unlock(&efivars->lock); /* Create the entry in sysfs. Locking is not required here */ status = efivar_create_sysfs_entry(efivars, @@ -1623,7 +1440,7 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj, if (!capable(CAP_SYS_ADMIN)) return -EACCES; - spin_lock_irq(&efivars->lock); + spin_lock(&efivars->lock); /* * Does this variable already exist? @@ -1641,7 +1458,7 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj, } } if (!found) { - spin_unlock_irq(&efivars->lock); + spin_unlock(&efivars->lock); return -EINVAL; } /* force the Attributes/DataSize to 0 to ensure deletion */ @@ -1657,65 +1474,18 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj, if (status != EFI_SUCCESS) { printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n", status); - spin_unlock_irq(&efivars->lock); + spin_unlock(&efivars->lock); return -EIO; } list_del(&search_efivar->list); /* We need to release this lock before unregistering. */ - spin_unlock_irq(&efivars->lock); + spin_unlock(&efivars->lock); efivar_unregister(search_efivar); /* It's dead Jim.... */ return count; } -static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor) -{ - struct efivar_entry *entry, *n; - struct efivars *efivars = &__efivars; - unsigned long strsize1, strsize2; - bool found = false; - - strsize1 = utf16_strsize(variable_name, 1024); - list_for_each_entry_safe(entry, n, &efivars->list, list) { - strsize2 = utf16_strsize(entry->var.VariableName, 1024); - if (strsize1 == strsize2 && - !memcmp(variable_name, &(entry->var.VariableName), - strsize2) && - !efi_guidcmp(entry->var.VendorGuid, - *vendor)) { - found = true; - break; - } - } - return found; -} - -/* - * Returns the size of variable_name, in bytes, including the - * terminating NULL character, or variable_name_size if no NULL - * character is found among the first variable_name_size bytes. - */ -static unsigned long var_name_strnsize(efi_char16_t *variable_name, - unsigned long variable_name_size) -{ - unsigned long len; - efi_char16_t c; - - /* - * The variable name is, by definition, a NULL-terminated - * string, so make absolutely sure that variable_name_size is - * the value we expect it to be. If not, return the real size. - */ - for (len = 2; len <= variable_name_size; len += sizeof(c)) { - c = variable_name[(len / sizeof(c)) - 1]; - if (!c) - break; - } - - return min(len, variable_name_size); -} - /* * Let's not leave out systab information that snuck into * the efivars driver @@ -1824,9 +1594,9 @@ efivar_create_sysfs_entry(struct efivars *efivars, kfree(short_name); short_name = NULL; - spin_lock_irq(&efivars->lock); + spin_lock(&efivars->lock); list_add(&new_efivar->list, &efivars->list); - spin_unlock_irq(&efivars->lock); + spin_unlock(&efivars->lock); return 0; } @@ -1895,9 +1665,9 @@ void unregister_efivars(struct efivars *efivars) struct efivar_entry *entry, *n; list_for_each_entry_safe(entry, n, &efivars->list, list) { - spin_lock_irq(&efivars->lock); + spin_lock(&efivars->lock); list_del(&entry->list); - spin_unlock_irq(&efivars->lock); + spin_unlock(&efivars->lock); efivar_unregister(entry); } if (efivars->new_var) @@ -1911,28 +1681,6 @@ void unregister_efivars(struct efivars *efivars) } EXPORT_SYMBOL_GPL(unregister_efivars); -/* - * Print a warning when duplicate EFI variables are encountered and - * disable the sysfs workqueue since the firmware is buggy. - */ -static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid, - unsigned long len16) -{ - size_t i, len8 = len16 / sizeof(efi_char16_t); - char *s8; - - s8 = kzalloc(len8, GFP_KERNEL); - if (!s8) - return; - - for (i = 0; i < len8; i++) - s8[i] = s16[i]; - - printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n", - s8, vendor_guid); - kfree(s8); -} - int register_efivars(struct efivars *efivars, const struct efivar_operations *ops, struct kobject *parent_kobj) @@ -1981,24 +1729,6 @@ int register_efivars(struct efivars *efivars, &vendor_guid); switch (status) { case EFI_SUCCESS: - variable_name_size = var_name_strnsize(variable_name, - variable_name_size); - - /* - * Some firmware implementations return the - * same variable name on multiple calls to - * get_next_variable(). Terminate the loop - * immediately as there is no guarantee that - * we'll ever see a different variable name, - * and may end up looping here forever. - */ - if (variable_is_present(variable_name, &vendor_guid)) { - dup_variable_bug(variable_name, &vendor_guid, - variable_name_size); - status = EFI_NOT_FOUND; - break; - } - efivar_create_sysfs_entry(efivars, variable_name_size, variable_name, @@ -2018,8 +1748,15 @@ int register_efivars(struct efivars *efivars, if (error) unregister_efivars(efivars); - if (!efivars_pstore_disable) - efivar_pstore_register(efivars); + efivars->efi_pstore_info = efi_pstore_info; + + efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL); + if (efivars->efi_pstore_info.buf) { + efivars->efi_pstore_info.bufsize = 1024; + efivars->efi_pstore_info.data = efivars; + spin_lock_init(&efivars->efi_pstore_info.buf_lock); + pstore_register(&efivars->efi_pstore_info); + } register_filesystem(&efivarfs_type); diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c index deca78f..bdc8302 100644 --- a/drivers/gpio/gpio-em.c +++ b/drivers/gpio/gpio-em.c @@ -299,9 +299,8 @@ static int em_gio_probe(struct platform_device *pdev) irq_chip->irq_set_type = em_gio_irq_set_type; irq_chip->flags = IRQCHIP_SKIP_SET_WAKE; - p->irq_domain = irq_domain_add_simple(pdev->dev.of_node, + p->irq_domain = irq_domain_add_linear(pdev->dev.of_node, pdata->number_of_pins, - pdata->irq_base, &em_gio_irq_domain_ops, p); if (!p->irq_domain) { ret = -ENXIO; diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 456663c..6819d63 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -41,7 +41,6 @@ #include #include #include -#include #include /* @@ -496,7 +495,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev) struct resource *res; struct irq_chip_generic *gc; struct irq_chip_type *ct; - struct clk *clk; unsigned int ngpios; int soc_variant; int i, cpu, id; @@ -530,11 +528,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev) return id; } - clk = devm_clk_get(&pdev->dev, NULL); - /* Not all SoCs require a clock.*/ - if (!IS_ERR(clk)) - clk_prepare_enable(clk); - mvchip->soc_variant = soc_variant; mvchip->chip.label = dev_name(&pdev->dev); mvchip->chip.dev = &pdev->dev; diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c index 3ce5bc3..770476a 100644 --- a/drivers/gpio/gpio-stmpe.c +++ b/drivers/gpio/gpio-stmpe.c @@ -307,15 +307,11 @@ static const struct irq_domain_ops stmpe_gpio_irq_simple_ops = { .xlate = irq_domain_xlate_twocell, }; -static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio, - struct device_node *np) +static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio) { - int base = 0; + int base = stmpe_gpio->irq_base; - if (!np) - base = stmpe_gpio->irq_base; - - stmpe_gpio->domain = irq_domain_add_simple(np, + stmpe_gpio->domain = irq_domain_add_simple(NULL, stmpe_gpio->chip.ngpio, base, &stmpe_gpio_irq_simple_ops, stmpe_gpio); if (!stmpe_gpio->domain) { @@ -350,9 +346,6 @@ static int stmpe_gpio_probe(struct platform_device *pdev) stmpe_gpio->chip = template_chip; stmpe_gpio->chip.ngpio = stmpe->num_gpios; stmpe_gpio->chip.dev = &pdev->dev; -#ifdef CONFIG_OF - stmpe_gpio->chip.of_node = np; -#endif stmpe_gpio->chip.base = pdata ? pdata->gpio_base : -1; if (pdata) @@ -373,7 +366,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev) goto out_free; if (irq >= 0) { - ret = stmpe_gpio_irq_init(stmpe_gpio, np); + ret = stmpe_gpio_irq_init(stmpe_gpio); if (ret) goto out_disable; diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index ea537fa..d542a14 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -228,7 +228,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip) if (!np) return; - for (;; index++) { + do { ret = of_parse_phandle_with_args(np, "gpio-ranges", "#gpio-range-cells", index, &pinspec); if (ret) @@ -257,7 +257,8 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip) if (ret) break; - } + + } while (index++); } #else diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index cac9c9a..5ccf984 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -239,8 +239,6 @@ struct ast_fbdev { void *sysram; int size; struct ttm_bo_kmap_obj mapping; - int x1, y1, x2, y2; /* dirty rect */ - spinlock_t dirty_lock; }; #define to_ast_crtc(x) container_of(x, struct ast_crtc, base) diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c index 9138678..d9ec779 100644 --- a/drivers/gpu/drm/ast/ast_fb.c +++ b/drivers/gpu/drm/ast/ast_fb.c @@ -52,52 +52,16 @@ static void ast_dirty_update(struct ast_fbdev *afbdev, int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8; int ret; bool unmap = false; - bool store_for_later = false; - int x2, y2; - unsigned long flags; obj = afbdev->afb.obj; bo = gem_to_ast_bo(obj); - /* - * try and reserve the BO, if we fail with busy - * then the BO is being moved and we should - * store up the damage until later. - */ ret = ast_bo_reserve(bo, true); if (ret) { - if (ret != -EBUSY) - return; - - store_for_later = true; - } - - x2 = x + width - 1; - y2 = y + height - 1; - spin_lock_irqsave(&afbdev->dirty_lock, flags); - - if (afbdev->y1 < y) - y = afbdev->y1; - if (afbdev->y2 > y2) - y2 = afbdev->y2; - if (afbdev->x1 < x) - x = afbdev->x1; - if (afbdev->x2 > x2) - x2 = afbdev->x2; - - if (store_for_later) { - afbdev->x1 = x; - afbdev->x2 = x2; - afbdev->y1 = y; - afbdev->y2 = y2; - spin_unlock_irqrestore(&afbdev->dirty_lock, flags); + DRM_ERROR("failed to reserve fb bo\n"); return; } - afbdev->x1 = afbdev->y1 = INT_MAX; - afbdev->x2 = afbdev->y2 = 0; - spin_unlock_irqrestore(&afbdev->dirty_lock, flags); - if (!bo->kmap.virtual) { ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); if (ret) { @@ -107,10 +71,10 @@ static void ast_dirty_update(struct ast_fbdev *afbdev, } unmap = true; } - for (i = y; i <= y2; i++) { + for (i = y; i < y + height; i++) { /* assume equal stride for now */ src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp); - memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, (x2 - x + 1) * bpp); + memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp); } if (unmap) @@ -341,7 +305,6 @@ int ast_fbdev_init(struct drm_device *dev) ast->fbdev = afbdev; afbdev->helper.funcs = &ast_fb_helper_funcs; - spin_lock_init(&afbdev->dirty_lock); ret = drm_fb_helper_init(dev, &afbdev->helper, 1, 1); if (ret) { diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 09da339..3602731 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c @@ -316,7 +316,7 @@ int ast_bo_reserve(struct ast_bo *bo, bool no_wait) ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); if (ret) { - if (ret != -ERESTARTSYS && ret != -EBUSY) + if (ret != -ERESTARTSYS) DRM_ERROR("reserve failed %p\n", bo); return ret; } diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h index 7ca0595..6e0cc72 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.h +++ b/drivers/gpu/drm/cirrus/cirrus_drv.h @@ -154,8 +154,6 @@ struct cirrus_fbdev { struct list_head fbdev_list; void *sysram; int size; - int x1, y1, x2, y2; /* dirty rect */ - spinlock_t dirty_lock; }; struct cirrus_bo { diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c index 1e64d6f..6c6b4c8 100644 --- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c +++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c @@ -26,51 +26,16 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev, int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8; int ret; bool unmap = false; - bool store_for_later = false; - int x2, y2; - unsigned long flags; obj = afbdev->gfb.obj; bo = gem_to_cirrus_bo(obj); - /* - * try and reserve the BO, if we fail with busy - * then the BO is being moved and we should - * store up the damage until later. - */ ret = cirrus_bo_reserve(bo, true); if (ret) { - if (ret != -EBUSY) - return; - store_for_later = true; - } - - x2 = x + width - 1; - y2 = y + height - 1; - spin_lock_irqsave(&afbdev->dirty_lock, flags); - - if (afbdev->y1 < y) - y = afbdev->y1; - if (afbdev->y2 > y2) - y2 = afbdev->y2; - if (afbdev->x1 < x) - x = afbdev->x1; - if (afbdev->x2 > x2) - x2 = afbdev->x2; - - if (store_for_later) { - afbdev->x1 = x; - afbdev->x2 = x2; - afbdev->y1 = y; - afbdev->y2 = y2; - spin_unlock_irqrestore(&afbdev->dirty_lock, flags); + DRM_ERROR("failed to reserve fb bo\n"); return; } - afbdev->x1 = afbdev->y1 = INT_MAX; - afbdev->x2 = afbdev->y2 = 0; - spin_unlock_irqrestore(&afbdev->dirty_lock, flags); - if (!bo->kmap.virtual) { ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); if (ret) { @@ -317,7 +282,6 @@ int cirrus_fbdev_init(struct cirrus_device *cdev) cdev->mode_info.gfbdev = gfbdev; gfbdev->helper.funcs = &cirrus_fb_helper_funcs; - spin_lock_init(&gfbdev->dirty_lock); ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper, cdev->num_crtc, CIRRUSFB_CONN_LIMIT); diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index 2ed8cfc..1413a26 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c @@ -321,7 +321,7 @@ int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait) ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); if (ret) { - if (ret != -ERESTARTSYS && ret != -EBUSY) + if (ret != -ERESTARTSYS) DRM_ERROR("reserve failed %p\n", bo); return ret; } diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index bcb2c0a..f2d667b 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -2089,7 +2089,7 @@ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth) switch (bpp) { case 8: - fmt = DRM_FORMAT_C8; + fmt = DRM_FORMAT_RGB332; break; case 16: if (depth == 15) @@ -3702,7 +3702,6 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp) { switch (format) { - case DRM_FORMAT_C8: case DRM_FORMAT_RGB332: case DRM_FORMAT_BGR233: *depth = 8; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index dfd9ed3..5a3770f 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -87,6 +87,9 @@ static struct edid_quirk { int product_id; u32 quirks; } edid_quirk_list[] = { + /* ASUS VW222S */ + { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING }, + /* Acer AL1706 */ { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 }, /* Acer F51 */ @@ -354,14 +357,10 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) break; } } - - if (i == 4 && print_bad_edid) { + if (i == 4) dev_warn(connector->dev->dev, "%s: Ignoring invalid EDID block %d.\n", drm_get_connector_name(connector), j); - - connector->bad_edid_counter++; - } } if (valid_extensions != block[0x7e]) { @@ -894,7 +893,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo; unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo; - unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4; + unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4; unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf); /* ignore tiny modes */ @@ -975,7 +974,6 @@ set_size: } mode->type = DRM_MODE_TYPE_DRIVER; - mode->vrefresh = drm_mode_vrefresh(mode); drm_mode_set_name(mode); return mode; @@ -2022,8 +2020,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) num_modes += add_cvt_modes(connector, edid); num_modes += add_standard_modes(connector, edid); num_modes += add_established_modes(connector, edid); - if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF) - num_modes += add_inferred_modes(connector, edid); + num_modes += add_inferred_modes(connector, edid); num_modes += add_cea_modes(connector, edid); if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 32d7775..133b413 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -123,7 +123,6 @@ int drm_open(struct inode *inode, struct file *filp) int retcode = 0; int need_setup = 0; struct address_space *old_mapping; - struct address_space *old_imapping; minor = idr_find(&drm_minors_idr, minor_id); if (!minor) @@ -138,7 +137,6 @@ int drm_open(struct inode *inode, struct file *filp) if (!dev->open_count++) need_setup = 1; mutex_lock(&dev->struct_mutex); - old_imapping = inode->i_mapping; old_mapping = dev->dev_mapping; if (old_mapping == NULL) dev->dev_mapping = &inode->i_data; @@ -161,8 +159,8 @@ int drm_open(struct inode *inode, struct file *filp) err_undo: mutex_lock(&dev->struct_mutex); - filp->f_mapping = old_imapping; - inode->i_mapping = old_imapping; + filp->f_mapping = old_mapping; + inode->i_mapping = old_mapping; iput(container_of(dev->dev_mapping, struct inode, i_data)); dev->dev_mapping = old_mapping; mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 539bae9..24efae4 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -205,11 +205,11 @@ static void drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) { if (obj->import_attach) { - drm_prime_remove_buf_handle(&filp->prime, + drm_prime_remove_imported_buf_handle(&filp->prime, obj->import_attach->dmabuf); } if (obj->export_dma_buf) { - drm_prime_remove_buf_handle(&filp->prime, + drm_prime_remove_imported_buf_handle(&filp->prime, obj->export_dma_buf); } } diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 4f6439d..7f12573 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -61,7 +61,6 @@ struct drm_prime_member { struct dma_buf *dma_buf; uint32_t handle; }; -static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle); int drm_gem_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, @@ -69,8 +68,7 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, { struct drm_gem_object *obj; void *buf; - int ret = 0; - struct dma_buf *dmabuf; + int ret; obj = drm_gem_object_lookup(dev, file_priv, handle); if (!obj) @@ -79,44 +77,43 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, mutex_lock(&file_priv->prime.lock); /* re-export the original imported object */ if (obj->import_attach) { - dmabuf = obj->import_attach->dmabuf; - goto out_have_obj; + get_dma_buf(obj->import_attach->dmabuf); + *prime_fd = dma_buf_fd(obj->import_attach->dmabuf, flags); + drm_gem_object_unreference_unlocked(obj); + mutex_unlock(&file_priv->prime.lock); + return 0; } if (obj->export_dma_buf) { - dmabuf = obj->export_dma_buf; - goto out_have_obj; - } - - buf = dev->driver->gem_prime_export(dev, obj, flags); - if (IS_ERR(buf)) { - /* normally the created dma-buf takes ownership of the ref, - * but if that fails then drop the ref - */ - ret = PTR_ERR(buf); - goto out; + get_dma_buf(obj->export_dma_buf); + *prime_fd = dma_buf_fd(obj->export_dma_buf, flags); + drm_gem_object_unreference_unlocked(obj); + } else { + buf = dev->driver->gem_prime_export(dev, obj, flags); + if (IS_ERR(buf)) { + /* normally the created dma-buf takes ownership of the ref, + * but if that fails then drop the ref + */ + drm_gem_object_unreference_unlocked(obj); + mutex_unlock(&file_priv->prime.lock); + return PTR_ERR(buf); + } + obj->export_dma_buf = buf; + *prime_fd = dma_buf_fd(buf, flags); } - obj->export_dma_buf = buf; - /* if we've exported this buffer the cheat and add it to the import list * so we get the correct handle back */ - ret = drm_prime_add_buf_handle(&file_priv->prime, - obj->export_dma_buf, handle); - if (ret) - goto out; + ret = drm_prime_add_imported_buf_handle(&file_priv->prime, + obj->export_dma_buf, handle); + if (ret) { + drm_gem_object_unreference_unlocked(obj); + mutex_unlock(&file_priv->prime.lock); + return ret; + } - *prime_fd = dma_buf_fd(buf, flags); mutex_unlock(&file_priv->prime.lock); return 0; - -out_have_obj: - get_dma_buf(dmabuf); - *prime_fd = dma_buf_fd(dmabuf, flags); -out: - drm_gem_object_unreference_unlocked(obj); - mutex_unlock(&file_priv->prime.lock); - return ret; } EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); @@ -133,7 +130,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev, mutex_lock(&file_priv->prime.lock); - ret = drm_prime_lookup_buf_handle(&file_priv->prime, + ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime, dma_buf, handle); if (!ret) { ret = 0; @@ -152,7 +149,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev, if (ret) goto out_put; - ret = drm_prime_add_buf_handle(&file_priv->prime, + ret = drm_prime_add_imported_buf_handle(&file_priv->prime, dma_buf, *handle); if (ret) goto fail; @@ -310,7 +307,7 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) } EXPORT_SYMBOL(drm_prime_destroy_file_private); -static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle) +int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle) { struct drm_prime_member *member; @@ -318,14 +315,14 @@ static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, if (!member) return -ENOMEM; - get_dma_buf(dma_buf); member->dma_buf = dma_buf; member->handle = handle; list_add(&member->entry, &prime_fpriv->head); return 0; } +EXPORT_SYMBOL(drm_prime_add_imported_buf_handle); -int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle) +int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle) { struct drm_prime_member *member; @@ -337,20 +334,19 @@ int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, stru } return -ENOENT; } -EXPORT_SYMBOL(drm_prime_lookup_buf_handle); +EXPORT_SYMBOL(drm_prime_lookup_imported_buf_handle); -void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf) +void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf) { struct drm_prime_member *member, *safe; mutex_lock(&prime_fpriv->lock); list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) { if (member->dma_buf == dma_buf) { - dma_buf_put(dma_buf); list_del(&member->entry); kfree(member); } } mutex_unlock(&prime_fpriv->lock); } -EXPORT_SYMBOL(drm_prime_remove_buf_handle); +EXPORT_SYMBOL(drm_prime_remove_imported_buf_handle); diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c index 34a156f..3cec306 100644 --- a/drivers/gpu/drm/drm_usb.c +++ b/drivers/gpu/drm/drm_usb.c @@ -18,7 +18,7 @@ int drm_get_usb_dev(struct usb_interface *interface, usbdev = interface_to_usbdev(interface); dev->usbdev = usbdev; - dev->dev = &interface->dev; + dev->dev = &usbdev->dev; mutex_lock(&drm_global_mutex); diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c index 029eccf..8652cdf 100644 --- a/drivers/gpu/drm/gma500/psb_irq.c +++ b/drivers/gpu/drm/gma500/psb_irq.c @@ -211,7 +211,7 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS) vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R); - if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE)) + if (vdc_stat & _PSB_PIPE_EVENT_FLAG) dsp_int = 1; /* FIXME: Handle Medfield diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 261efc8e..9d4a2c2 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -103,7 +103,7 @@ static const char *cache_level_str(int type) static void describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) { - seq_printf(m, "%pK: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s", + seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s", &obj->base, get_pin_flag(obj), get_tiling_flag(obj), @@ -691,7 +691,7 @@ static int i915_error_state(struct seq_file *m, void *unused) seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, error->time.tv_usec); - seq_printf(m, "Kernel: " UTS_RELEASE "\n"); + seq_printf(m, "Kernel: " UTS_RELEASE); seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); seq_printf(m, "EIR: 0x%08x\n", error->eir); seq_printf(m, "IER: 0x%08x\n", error->ier); @@ -888,7 +888,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); - u32 rpstat, cagf; + u32 rpstat; u32 rpupei, rpcurup, rpprevup; u32 rpdownei, rpcurdown, rpprevdown; int max_freq; @@ -907,11 +907,6 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); - if (IS_HASWELL(dev)) - cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; - else - cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; - cagf *= GT_FREQUENCY_MULTIPLIER; gen6_gt_force_wake_put(dev_priv); mutex_unlock(&dev->struct_mutex); @@ -924,7 +919,8 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) gt_perf_status & 0xff); seq_printf(m, "Render p-state limit: %d\n", rp_state_limits & 0xff); - seq_printf(m, "CAGF: %dMHz\n", cagf); + seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> + GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER); seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & GEN6_CURICONT_MASK); seq_printf(m, "RP CUR UP: %dus\n", rpcurup & diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 79f5fc5..1172658 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -377,15 +377,15 @@ static const struct pci_device_id pciidlist[] = { /* aka */ INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */ INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */ INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */ - INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */ - INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */ + INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */ INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */ - INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */ - INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */ + INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */ + INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */ INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */ - INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */ - INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */ + INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */ + INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */ INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */ + INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */ INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e78419f..12ab3bd 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -711,7 +711,6 @@ typedef struct drm_i915_private { unsigned int int_crt_support:1; unsigned int lvds_use_ssc:1; unsigned int display_clock_mode:1; - unsigned int fdi_rx_polarity_inverted:1; int lvds_ssc_freq; unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ unsigned int lvds_val; /* used for checking LVDS channel mode */ @@ -775,7 +774,6 @@ typedef struct drm_i915_private { unsigned long gtt_start; unsigned long gtt_mappable_end; unsigned long gtt_end; - unsigned long stolen_base; /* limited to low memory (32-bit) */ struct io_mapping *gtt_mapping; phys_addr_t gtt_base_addr; @@ -921,7 +919,7 @@ typedef struct drm_i915_private { bool hw_contexts_disabled; uint32_t hw_context_size; - u32 fdi_rx_config; + bool fdi_rx_polarity_reversed; struct i915_suspend_saved_registers regfile; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 339540d..8febea6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -91,6 +91,7 @@ i915_gem_wait_for_error(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct completion *x = &dev_priv->error_completion; + unsigned long flags; int ret; if (!atomic_read(&dev_priv->mm.wedged)) @@ -115,7 +116,9 @@ i915_gem_wait_for_error(struct drm_device *dev) * end up waiting upon a subsequent completion event that * will never happen. */ - complete(x); + spin_lock_irqsave(&x->wait.lock, flags); + x->done++; + spin_unlock_irqrestore(&x->wait.lock, flags); } return 0; } @@ -943,9 +946,12 @@ i915_gem_check_wedge(struct drm_i915_private *dev_priv, if (atomic_read(&dev_priv->mm.wedged)) { struct completion *x = &dev_priv->error_completion; bool recovery_complete; + unsigned long flags; /* Give the error handler a chance to run. */ - recovery_complete = completion_done(x); + spin_lock_irqsave(&x->wait.lock, flags); + recovery_complete = x->done > 0; + spin_unlock_irqrestore(&x->wait.lock, flags); /* Non-interruptible callers can't handle -EAGAIN, hence return * -EIO unconditionally for these. */ @@ -1912,6 +1918,9 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); BUG_ON(!obj->active); + if (obj->pin_count) /* are we a framebuffer? */ + intel_mark_fb_idle(obj); + list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); list_del_init(&obj->ring_list); @@ -2656,35 +2665,17 @@ static inline int fence_number(struct drm_i915_private *dev_priv, return fence - dev_priv->fence_regs; } -static void i915_gem_write_fence__ipi(void *data) -{ - wbinvd(); -} - static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, struct drm_i915_fence_reg *fence, bool enable) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - int fence_reg = fence_number(dev_priv, fence); - - /* In order to fully serialize access to the fenced region and - * the update to the fence register we need to take extreme - * measures on SNB+. In theory, the write to the fence register - * flushes all memory transactions before, and coupled with the - * mb() placed around the register write we serialise all memory - * operations with respect to the changes in the tiler. Yet, on - * SNB+ we need to take a step further and emit an explicit wbinvd() - * on each processor in order to manually flush all memory - * transactions before updating the fence register. - */ - if (HAS_LLC(obj->base.dev)) - on_each_cpu(i915_gem_write_fence__ipi, NULL, 1); - i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL); + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + int reg = fence_number(dev_priv, fence); + + i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); if (enable) { - obj->fence_reg = fence_reg; + obj->fence_reg = reg; fence->obj = obj; list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); } else { @@ -3857,7 +3848,7 @@ void i915_gem_l3_remap(struct drm_device *dev) u32 misccpctl; int i; - if (!HAS_L3_GPU_CACHE(dev)) + if (!IS_IVYBRIDGE(dev)) return; if (!dev_priv->l3_parity.remap_info) @@ -4378,7 +4369,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) if (!mutex_is_locked(mutex)) return false; -#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE) +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) return mutex->owner == task; #else /* Since UP may be pre-empted, we cannot assume that we own the lock */ diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index d8ac0a3..a3f06bc 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -157,13 +157,6 @@ create_hw_context(struct drm_device *dev, return ERR_PTR(-ENOMEM); } - if (INTEL_INFO(dev)->gen >= 7) { - ret = i915_gem_object_set_cache_level(ctx->obj, - I915_CACHE_LLC_MLC); - if (ret) - goto err_out; - } - /* The ring associated with the context object is handled by the normal * object tracking code. We give an initial ring value simple to pass an * assertion in the context switch code. diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index eabd3dd..26d08bb 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -43,7 +43,7 @@ eb_create(int size) { struct eb_objects *eb; int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; - BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head)); + BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head))); while (count > size) count >>= 1; eb = kzalloc(count*sizeof(struct hlist_head) + @@ -706,20 +706,15 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, int count) { int i; - int relocs_total = 0; - int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry); for (i = 0; i < count; i++) { char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; int length; /* limited by fault_in_pages_readable() */ - /* First check for malicious input causing overflow in - * the worst case where we need to allocate the entire - * relocation tree as a single array. - */ - if (exec[i].relocation_count > relocs_max - relocs_total) + /* First check for malicious input causing overflow */ + if (exec[i].relocation_count > + INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) return -EINVAL; - relocs_total += exec[i].relocation_count; length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry); @@ -814,7 +809,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct intel_ring_buffer *ring; u32 ctx_id = i915_execbuffer2_get_context_id(*args); u32 exec_start, exec_len; - u32 seqno; u32 mask; u32 flags; int ret, mode, i; @@ -1069,9 +1063,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } - seqno = intel_ring_get_seqno(ring); - trace_i915_gem_ring_dispatch(ring, seqno, flags); - i915_trace_irq_get(ring, seqno); + trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); i915_gem_execbuffer_move_to_active(&objects, ring); i915_gem_execbuffer_retire_commands(dev, file, ring); diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index be24312..8e91083 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -42,50 +42,56 @@ * for is a boon. */ -static unsigned long i915_stolen_to_physical(struct drm_device *dev) +#define PTE_ADDRESS_MASK 0xfffff000 +#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ +#define PTE_MAPPING_TYPE_UNCACHED (0 << 1) +#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */ +#define PTE_MAPPING_TYPE_CACHED (3 << 1) +#define PTE_MAPPING_TYPE_MASK (3 << 1) +#define PTE_VALID (1 << 0) + +/** + * i915_stolen_to_phys - take an offset into stolen memory and turn it into + * a physical one + * @dev: drm device + * @offset: address to translate + * + * Some chip functions require allocations from stolen space and need the + * physical address of the memory in question. + */ +static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset) { struct drm_i915_private *dev_priv = dev->dev_private; struct pci_dev *pdev = dev_priv->bridge_dev; u32 base; +#if 0 /* On the machines I have tested the Graphics Base of Stolen Memory - * is unreliable, so on those compute the base by subtracting the - * stolen memory from the Top of Low Usable DRAM which is where the - * BIOS places the graphics stolen memory. - * - * On gen2, the layout is slightly different with the Graphics Segment - * immediately following Top of Memory (or Top of Usable DRAM). Note - * it appears that TOUD is only reported by 865g, so we just use the - * top of memory as determined by the e820 probe. - * - * XXX gen2 requires an unavailable symbol and 945gm fails with - * its value of TOLUD. + * is unreliable, so compute the base by subtracting the stolen memory + * from the Top of Low Usable DRAM which is where the BIOS places + * the graphics stolen memory. */ - base = 0; - if (INTEL_INFO(dev)->gen >= 6) { - /* Read Base Data of Stolen Memory Register (BDSM) directly. - * Note that there is also a MCHBAR miror at 0x1080c0 or - * we could use device 2:0x5c instead. - */ - pci_read_config_dword(pdev, 0xB0, &base); - base &= ~4095; /* lower bits used for locking register */ - } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { - /* Read Graphics Base of Stolen Memory directly */ + if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { + /* top 32bits are reserved = 0 */ pci_read_config_dword(pdev, 0xA4, &base); -#if 0 - } else if (IS_GEN3(dev)) { + } else { + /* XXX presume 8xx is the same as i915 */ + pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base); + } +#else + if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { + u16 val; + pci_read_config_word(pdev, 0xb0, &val); + base = val >> 4 << 20; + } else { u8 val; - /* Stolen is immediately below Top of Low Usable DRAM */ pci_read_config_byte(pdev, 0x9c, &val); base = val >> 3 << 27; - base -= dev_priv->mm.gtt->stolen_size; - } else { - /* Stolen is immediately above Top of Memory */ - base = max_low_pfn_mapped << PAGE_SHIFT; -#endif } + base -= dev_priv->mm.gtt->stolen_size; +#endif - return base; + return base + offset; } static void i915_warn_stolen(struct drm_device *dev) @@ -110,7 +116,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) if (!compressed_fb) goto err; - cfb_base = dev_priv->mm.stolen_base + compressed_fb->start; + cfb_base = i915_stolen_to_phys(dev, compressed_fb->start); if (!cfb_base) goto err_fb; @@ -123,7 +129,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) if (!compressed_llb) goto err_fb; - ll_base = dev_priv->mm.stolen_base + compressed_llb->start; + ll_base = i915_stolen_to_phys(dev, compressed_llb->start); if (!ll_base) goto err_llb; } @@ -142,7 +148,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) } DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", - (long)cfb_base, (long)ll_base, size >> 20); + cfb_base, ll_base, size >> 20); return; err_llb: @@ -174,13 +180,6 @@ int i915_gem_init_stolen(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size; - dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); - if (dev_priv->mm.stolen_base == 0) - return 0; - - DRM_DEBUG_KMS("found %d bytes of stolen memory at %08lx\n", - dev_priv->mm.gtt->stolen_size, dev_priv->mm.stolen_base); - /* Basic memrange allocator for stolen space */ drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ce70f0a..59afb7e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3839,7 +3839,7 @@ #define _TRANSB_CHICKEN2 0xf1064 #define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) #define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31) -#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29) + #define SOUTH_CHICKEN1 0xc2000 #define FDIA_PHASE_SYNC_SHIFT_OVR 19 @@ -3927,7 +3927,7 @@ #define FDI_10BPC (1<<16) #define FDI_6BPC (2<<16) #define FDI_12BPC (3<<16) -#define FDI_RX_LINK_REVERSAL_OVERRIDE (1<<15) +#define FDI_LINK_REVERSE_OVERWRITE (1<<15) #define FDI_DMI_LINK_REVERSE_MASK (1<<14) #define FDI_RX_PLL_ENABLE (1<<13) #define FDI_FS_ERR_CORRECT_ENABLE (1<<11) @@ -4211,9 +4211,7 @@ #define GEN6_RP_INTERRUPT_LIMITS 0xA014 #define GEN6_RPSTAT1 0xA01C #define GEN6_CAGF_SHIFT 8 -#define HSW_CAGF_SHIFT 7 #define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT) -#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT) #define GEN6_RP_CONTROL 0xA024 #define GEN6_RP_MEDIA_TURBO (1<<11) #define GEN6_RP_MEDIA_MODE_MASK (3<<9) @@ -4282,8 +4280,8 @@ #define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 #define GEN6_PCODE_WRITE_RC6VIDS 0x4 #define GEN6_PCODE_READ_RC6VIDS 0x5 -#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) -#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) +#define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0 +#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0) #define GEN6_PCODE_DATA 0x138128 #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 @@ -4526,7 +4524,6 @@ #define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */ #define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ #define DDI_BUF_EMP_MASK (0xf<<24) -#define DDI_BUF_PORT_REVERSAL (1<<16) #define DDI_BUF_IS_IDLE (1<<7) #define DDI_A_4_LANES (1<<4) #define DDI_PORT_WIDTH_X1 (0<<1) diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 29217db..3db4a68 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -244,6 +244,7 @@ TRACE_EVENT(i915_gem_ring_dispatch, __entry->ring = ring->id; __entry->seqno = seqno; __entry->flags = flags; + i915_trace_irq_get(ring, seqno); ), TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index bd83391..55ffba1 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -351,14 +351,12 @@ parse_general_features(struct drm_i915_private *dev_priv, dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, general->ssc_freq); dev_priv->display_clock_mode = general->display_clock_mode; - dev_priv->fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted; - DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n", + DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d\n", dev_priv->int_tv_support, dev_priv->int_crt_support, dev_priv->lvds_use_ssc, dev_priv->lvds_ssc_freq, - dev_priv->display_clock_mode, - dev_priv->fdi_rx_polarity_inverted); + dev_priv->display_clock_mode); } } diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index e088d6f..36e57f9 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -127,9 +127,7 @@ struct bdb_general_features { /* bits 3 */ u8 disable_smooth_vision:1; u8 single_dvi:1; - u8 rsvd9:1; - u8 fdi_rx_polarity_inverted:1; - u8 rsvd10:4; /* finish byte */ + u8 rsvd9:6; /* finish byte */ /* bits 4 */ u8 legacy_monitor_detect; diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 625b091..9293878 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -45,9 +45,6 @@ struct intel_crt { struct intel_encoder base; - /* DPMS state is stored in the connector, which we need in the - * encoder's enable/disable callbacks */ - struct intel_connector *connector; bool force_hotplug_required; u32 adpa_reg; }; @@ -84,6 +81,29 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder, return true; } +static void intel_disable_crt(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct intel_crt *crt = intel_encoder_to_crt(encoder); + u32 temp; + + temp = I915_READ(crt->adpa_reg); + temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); + temp &= ~ADPA_DAC_ENABLE; + I915_WRITE(crt->adpa_reg, temp); +} + +static void intel_enable_crt(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct intel_crt *crt = intel_encoder_to_crt(encoder); + u32 temp; + + temp = I915_READ(crt->adpa_reg); + temp |= ADPA_DAC_ENABLE; + I915_WRITE(crt->adpa_reg, temp); +} + /* Note: The caller is required to filter out dpms modes not supported by the * platform. */ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) @@ -115,19 +135,6 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) I915_WRITE(crt->adpa_reg, temp); } -static void intel_disable_crt(struct intel_encoder *encoder) -{ - intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF); -} - -static void intel_enable_crt(struct intel_encoder *encoder) -{ - struct intel_crt *crt = intel_encoder_to_crt(encoder); - - intel_crt_set_dpms(encoder, crt->connector->base.dpms); -} - - static void intel_crt_dpms(struct drm_connector *connector, int mode) { struct drm_device *dev = connector->dev; @@ -739,7 +746,6 @@ void intel_crt_init(struct drm_device *dev) } connector = &intel_connector->base; - crt->connector = intel_connector; drm_connector_init(dev, &intel_connector->base, &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); @@ -794,14 +800,10 @@ void intel_crt_init(struct drm_device *dev) dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; /* - * TODO: find a proper way to discover whether we need to set the the - * polarity and link reversal bits or not, instead of relying on the - * BIOS. + * TODO: find a proper way to discover whether we need to set the + * polarity reversal bit or not, instead of relying on the BIOS. */ - if (HAS_PCH_LPT(dev)) { - u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT | - FDI_RX_LINK_REVERSAL_OVERRIDE; - - dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config; - } + if (HAS_PCH_LPT(dev)) + dev_priv->fdi_rx_polarity_reversed = + !!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT); } diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 59b778d..4bad0f7 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -178,8 +178,10 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); /* Enable the PCH Receiver FDI PLL */ - rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE | - FDI_RX_PLL_ENABLE | ((intel_crtc->fdi_lanes - 1) << 19); + rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE | + ((intel_crtc->fdi_lanes - 1) << 19); + if (dev_priv->fdi_rx_polarity_reversed) + rx_ctl_val |= FDI_RX_POLARITY_REVERSED_LPT; I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); POSTING_READ(_FDI_RXA_CTL); udelay(220); @@ -201,10 +203,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_ENABLE); - /* Configure and enable DDI_BUF_CTL for DDI E with next voltage. - * DDI E does not support port reversal, the functionality is - * achieved on the PCH side in FDI_RX_CTL, so no need to set the - * port reversal bit */ + /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */ I915_WRITE(DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE | ((intel_crtc->fdi_lanes - 1) << 1) | @@ -678,11 +677,8 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder, if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct intel_digital_port *intel_dig_port = - enc_to_dig_port(encoder); - intel_dp->DP = intel_dig_port->port_reversal | - DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; + intel_dp->DP = DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; switch (intel_dp->lane_count) { case 1: intel_dp->DP |= DDI_PORT_WIDTH_X1; @@ -1295,15 +1291,11 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder) int type = intel_encoder->type; if (type == INTEL_OUTPUT_HDMI) { - struct intel_digital_port *intel_dig_port = - enc_to_dig_port(encoder); - /* In HDMI/DVI mode, the port width, and swing/emphasis values * are ignored so nothing special needs to be done besides * enabling the port. */ - I915_WRITE(DDI_BUF_CTL(port), - intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE); + I915_WRITE(DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE); } else if (type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -1465,7 +1457,6 @@ static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = { void intel_ddi_init(struct drm_device *dev, enum port port) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_digital_port *intel_dig_port; struct intel_encoder *intel_encoder; struct drm_encoder *encoder; @@ -1506,8 +1497,6 @@ void intel_ddi_init(struct drm_device *dev, enum port port) intel_encoder->get_hw_state = intel_ddi_get_hw_state; intel_dig_port->port = port; - intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) & - DDI_BUF_PORT_REVERSAL; if (hdmi_connector) intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port); else diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index faeaebc..da1ad9c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -154,8 +154,8 @@ static const intel_limit_t intel_limits_i9xx_sdvo = { .vco = { .min = 1400000, .max = 2800000 }, .n = { .min = 1, .max = 6 }, .m = { .min = 70, .max = 120 }, - .m1 = { .min = 8, .max = 18 }, - .m2 = { .min = 3, .max = 7 }, + .m1 = { .min = 10, .max = 22 }, + .m2 = { .min = 5, .max = 9 }, .p = { .min = 5, .max = 80 }, .p1 = { .min = 1, .max = 8 }, .p2 = { .dot_limit = 200000, @@ -2017,29 +2017,18 @@ void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel * is assumed to be a power-of-two. */ -unsigned long intel_gen4_compute_page_offset(int *x, int *y, - unsigned int tiling_mode, - unsigned int cpp, - unsigned int pitch) +unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y, + unsigned int bpp, + unsigned int pitch) { - if (tiling_mode != I915_TILING_NONE) { - unsigned int tile_rows, tiles; - - tile_rows = *y / 8; - *y %= 8; + int tile_rows, tiles; - tiles = *x / (512/cpp); - *x %= 512/cpp; - - return tile_rows * pitch * 8 + tiles * 4096; - } else { - unsigned int offset; + tile_rows = *y / 8; + *y %= 8; + tiles = *x / (512/bpp); + *x %= 512/bpp; - offset = *y * pitch + *x * cpp; - *y = 0; - *x = (offset & 4095) / cpp; - return offset & -4096; - } + return tile_rows * pitch * 8 + tiles * 4096; } static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, @@ -2116,9 +2105,9 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, if (INTEL_INFO(dev)->gen >= 4) { intel_crtc->dspaddr_offset = - intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, - fb->bits_per_pixel / 8, - fb->pitches[0]); + intel_gen4_compute_offset_xtiled(&x, &y, + fb->bits_per_pixel / 8, + fb->pitches[0]); linear_offset -= intel_crtc->dspaddr_offset; } else { intel_crtc->dspaddr_offset = linear_offset; @@ -2209,9 +2198,9 @@ static int ironlake_update_plane(struct drm_crtc *crtc, linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); intel_crtc->dspaddr_offset = - intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, - fb->bits_per_pixel / 8, - fb->pitches[0]); + intel_gen4_compute_offset_xtiled(&x, &y, + fb->bits_per_pixel / 8, + fb->pitches[0]); linear_offset -= intel_crtc->dspaddr_offset; DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", @@ -3697,7 +3686,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) struct intel_encoder *encoder; int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; - u32 pctl; if (!intel_crtc->active) @@ -3717,13 +3705,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) intel_disable_plane(dev_priv, plane, pipe); intel_disable_pipe(dev_priv, pipe); - - /* Disable pannel fitter if it is on this pipe. */ - pctl = I915_READ(PFIT_CONTROL); - if ((pctl & PFIT_ENABLE) && - ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe) - I915_WRITE(PFIT_CONTROL, 0); - intel_disable_pll(dev_priv, pipe); intel_crtc->active = false; @@ -7012,6 +6993,11 @@ void intel_mark_busy(struct drm_device *dev) void intel_mark_idle(struct drm_device *dev) { +} + +void intel_mark_fb_busy(struct drm_i915_gem_object *obj) +{ + struct drm_device *dev = obj->base.dev; struct drm_crtc *crtc; if (!i915_powersave) @@ -7021,11 +7007,12 @@ void intel_mark_idle(struct drm_device *dev) if (!crtc->fb) continue; - intel_decrease_pllclock(crtc); + if (to_intel_framebuffer(crtc->fb)->obj == obj) + intel_increase_pllclock(crtc); } } -void intel_mark_fb_busy(struct drm_i915_gem_object *obj) +void intel_mark_fb_idle(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; struct drm_crtc *crtc; @@ -7038,7 +7025,7 @@ void intel_mark_fb_busy(struct drm_i915_gem_object *obj) continue; if (to_intel_framebuffer(crtc->fb)->obj == obj) - intel_increase_pllclock(crtc); + intel_decrease_pllclock(crtc); } } @@ -7420,8 +7407,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_framebuffer *old_fb = crtc->fb; - struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; + struct intel_framebuffer *intel_fb; + struct drm_i915_gem_object *obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; unsigned long flags; @@ -7446,7 +7433,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->event = event; work->crtc = crtc; - work->old_fb_obj = to_intel_framebuffer(old_fb)->obj; + intel_fb = to_intel_framebuffer(crtc->fb); + work->old_fb_obj = intel_fb->obj; INIT_WORK(&work->work, intel_unpin_work_fn); ret = drm_vblank_get(dev, intel_crtc->pipe); @@ -7466,6 +7454,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, intel_crtc->unpin_work = work; spin_unlock_irqrestore(&dev->event_lock, flags); + intel_fb = to_intel_framebuffer(fb); + obj = intel_fb->obj; + if (atomic_read(&intel_crtc->unpin_work_count) >= 2) flush_workqueue(dev_priv->wq); @@ -7503,7 +7494,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, cleanup_pending: atomic_dec(&intel_crtc->unpin_work_count); - crtc->fb = old_fb; atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); drm_gem_object_unreference(&work->old_fb_obj->base); drm_gem_object_unreference(&obj->base); @@ -7732,25 +7722,22 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes, if (crtc->enabled) *prepare_pipes |= 1 << intel_crtc->pipe; - /* - * For simplicity do a full modeset on any pipe where the output routing - * changed. We could be more clever, but that would require us to be - * more careful with calling the relevant encoder->mode_set functions. - */ + /* We only support modeset on one single crtc, hence we need to do that + * only for the passed in crtc iff we change anything else than just + * disable crtcs. + * + * This is actually not true, to be fully compatible with the old crtc + * helper we automatically disable _any_ output (i.e. doesn't need to be + * connected to the crtc we're modesetting on) if it's disconnected. + * Which is a rather nutty api (since changed the output configuration + * without userspace's explicit request can lead to confusion), but + * alas. Hence we currently need to modeset on all pipes we prepare. */ if (*prepare_pipes) *modeset_pipes = *prepare_pipes; /* ... and mask these out. */ *modeset_pipes &= ~(*disable_pipes); *prepare_pipes &= ~(*disable_pipes); - - /* - * HACK: We don't (yet) fully support global modesets. intel_set_config - * obies this rule, but the modeset restore mode of - * intel_modeset_setup_hw_state does not. - */ - *modeset_pipes &= 1 << intel_crtc->pipe; - *prepare_pipes &= 1 << intel_crtc->pipe; } static bool intel_crtc_in_use(struct drm_crtc *crtc) @@ -8901,18 +8888,6 @@ static struct intel_quirk intel_quirks[] = { /* Acer Aspire 5734Z must invert backlight brightness */ { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, - - /* Acer Aspire 4736Z */ - { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, - - /* Acer/eMachines G725 */ - { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, - - /* Acer/eMachines e725 */ - { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, - - /* Acer/Packard Bell NCL20 */ - { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, }; static void intel_init_quirks(struct drm_device *dev) @@ -9391,9 +9366,6 @@ void intel_modeset_cleanup(struct drm_device *dev) /* flush any delayed tasks or pending work */ flush_scheduled_work(); - /* destroy backlight, if any, before the connectors */ - intel_panel_destroy_backlight(dev); - drm_mode_config_cleanup(dev); } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index cbe1ec3..fb3715b 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -788,7 +788,6 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct intel_dp_m_n m_n; int pipe = intel_crtc->pipe; enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; - int target_clock; /* * Find the lane count in the intel_encoder private @@ -804,22 +803,13 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, } } - target_clock = mode->clock; - for_each_encoder_on_crtc(dev, crtc, intel_encoder) { - if (intel_encoder->type == INTEL_OUTPUT_EDP) { - target_clock = intel_edp_target_clock(intel_encoder, - mode); - break; - } - } - /* * Compute the GMCH and Link ratios. The '3' here is * the number of bytes_per_pixel post-LUT, which we always * set up for 8-bits of R/G/B, or 3 bytes total. */ intel_dp_compute_m_n(intel_crtc->bpp, lane_count, - target_clock, adjusted_mode->clock, &m_n); + mode->clock, adjusted_mode->clock, &m_n); if (IS_HASWELL(dev)) { I915_WRITE(PIPE_DATA_M1(cpu_transcoder), @@ -1860,7 +1850,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) for (i = 0; i < intel_dp->lane_count; i++) if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) break; - if (i == intel_dp->lane_count) { + if (i == intel_dp->lane_count && voltage_tries == 5) { ++loop_tries; if (loop_tries == 5) { DRM_DEBUG_KMS("too many full retries, give up\n"); @@ -2467,14 +2457,17 @@ done: static void intel_dp_destroy(struct drm_connector *connector) { + struct drm_device *dev = connector->dev; struct intel_dp *intel_dp = intel_attached_dp(connector); struct intel_connector *intel_connector = to_intel_connector(connector); if (!IS_ERR_OR_NULL(intel_connector->edid)) kfree(intel_connector->edid); - if (is_edp(intel_dp)) + if (is_edp(intel_dp)) { + intel_panel_destroy_backlight(dev); intel_panel_fini(&intel_connector->panel); + } drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 1c1840f..8a1bd4a 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -377,7 +377,6 @@ struct intel_dp { struct intel_digital_port { struct intel_encoder base; enum port port; - u32 port_reversal; struct intel_dp dp; struct intel_hdmi hdmi; }; @@ -440,8 +439,9 @@ extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, extern void intel_dvo_init(struct drm_device *dev); extern void intel_tv_init(struct drm_device *dev); extern void intel_mark_busy(struct drm_device *dev); -extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj); extern void intel_mark_idle(struct drm_device *dev); +extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj); +extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj); extern bool intel_lvds_init(struct drm_device *dev); extern void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); @@ -627,10 +627,9 @@ extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe, struct drm_display_mode *mode); -extern unsigned long intel_gen4_compute_page_offset(int *x, int *y, - unsigned int tiling_mode, - unsigned int bpp, - unsigned int pitch); +extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y, + unsigned int bpp, + unsigned int pitch); extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv); diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index ba96e04..15da995 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -449,7 +449,6 @@ void intel_dvo_init(struct drm_device *dev) const struct intel_dvo_device *dvo = &intel_dvo_devices[i]; struct i2c_adapter *i2c; int gpio; - bool dvoinit; /* Allow the I2C driver info to specify the GPIO to be used in * special cases, but otherwise default to what's defined @@ -469,17 +468,7 @@ void intel_dvo_init(struct drm_device *dev) i2c = intel_gmbus_get_adapter(dev_priv, gpio); intel_dvo->dev = *dvo; - - /* GMBUS NAK handling seems to be unstable, hence let the - * transmitter detection run in bit banging mode for now. - */ - intel_gmbus_force_bit(i2c, true); - - dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c); - - intel_gmbus_force_bit(i2c, false); - - if (!dvoinit) + if (!dvo->dev_ops->init(&intel_dvo->dev, i2c)) continue; intel_encoder->type = INTEL_OUTPUT_DVO; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 8b383a6..17aee74 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -556,6 +556,7 @@ static void intel_lvds_destroy(struct drm_connector *connector) if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) kfree(lvds_connector->base.edid); + intel_panel_destroy_backlight(connector->dev); intel_panel_fini(&lvds_connector->base.panel); drm_sysfs_connector_remove(connector); @@ -789,14 +790,6 @@ static const struct dmi_system_id intel_no_lvds[] = { DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"), }, }, - { - .callback = intel_no_lvds_dmi_callback, - .ident = "Fujitsu Esprimo Q900", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"), - }, - }, { } /* terminating entry */ }; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 94d895b..bee8cb6 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -422,9 +422,6 @@ int intel_panel_setup_backlight(struct drm_connector *connector) intel_panel_init_backlight(dev); - if (WARN_ON(dev_priv->backlight)) - return -ENODEV; - memset(&props, 0, sizeof(props)); props.type = BACKLIGHT_RAW; props.max_brightness = _intel_panel_get_max_backlight(dev); @@ -450,10 +447,8 @@ int intel_panel_setup_backlight(struct drm_connector *connector) void intel_panel_destroy_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (dev_priv->backlight) { + if (dev_priv->backlight) backlight_device_unregister(dev_priv->backlight); - dev_priv->backlight = NULL; - } } #else int intel_panel_setup_backlight(struct drm_connector *connector) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 253bcf3..3280cff 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2572,7 +2572,7 @@ static void gen6_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_SLEEP, 0); I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); I915_WRITE(GEN6_RC6_THRESHOLD, 50000); - I915_WRITE(GEN6_RC6p_THRESHOLD, 150000); + I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ /* Check if we are enabling RC6 */ @@ -3560,7 +3560,6 @@ static void cpt_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - uint32_t val; /* * On Ibex Peak and Cougar Point, we need to disable clock @@ -3573,12 +3572,8 @@ static void cpt_init_clock_gating(struct drm_device *dev) /* The below fixes the weird display corruption, a few pixels shifted * downward, on (only) LVDS of some HP laptops with IVY. */ - for_each_pipe(pipe) { - val = TRANS_CHICKEN2_TIMING_OVERRIDE; - if (dev_priv->fdi_rx_polarity_inverted) - val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED; - I915_WRITE(TRANS_CHICKEN2(pipe), val); - } + for_each_pipe(pipe) + I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE); /* WADP0ClockGatingDisable */ for_each_pipe(pipe) { I915_WRITE(TRANS_CHICKEN1(pipe), diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 8b5e4ae..6af87cd 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -232,10 +232,8 @@ static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring) static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) { -#ifdef CONFIG_TRACEPOINTS if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) ring->trace_irq_seqno = seqno; -#endif } /* DRI warts */ diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 506c331..c275bf0 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1213,13 +1213,11 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder, struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); - u16 active_outputs; u32 tmp; tmp = I915_READ(intel_sdvo->sdvo_reg); - intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs); - if (!(tmp & SDVO_ENABLE) && (active_outputs == 0)) + if (!(tmp & SDVO_ENABLE)) return false; if (HAS_PCH_CPT(dev)) @@ -2706,6 +2704,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) struct intel_sdvo *intel_sdvo; u32 hotplug_mask; int i; + intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); if (!intel_sdvo) return false; diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 9a8d667..d7b060e 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -122,8 +122,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, linear_offset = y * fb->pitches[0] + x * pixel_size; sprsurf_offset = - intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, - pixel_size, fb->pitches[0]); + intel_gen4_compute_offset_xtiled(&x, &y, + pixel_size, fb->pitches[0]); linear_offset -= sprsurf_offset; /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET @@ -287,8 +287,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, linear_offset = y * fb->pitches[0] + x * pixel_size; dvssurf_offset = - intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, - pixel_size, fb->pitches[0]); + intel_gen4_compute_offset_xtiled(&x, &y, + pixel_size, fb->pitches[0]); linear_offset -= dvssurf_offset; if (obj->tiling_mode != I915_TILING_NONE) diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index a657709..5ea5033 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -116,8 +116,6 @@ struct mga_fbdev { void *sysram; int size; struct ttm_bo_kmap_obj mapping; - int x1, y1, x2, y2; /* dirty rect */ - spinlock_t dirty_lock; }; struct mga_crtc { diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c index 41eefc4..2f48648 100644 --- a/drivers/gpu/drm/mgag200/mgag200_fb.c +++ b/drivers/gpu/drm/mgag200/mgag200_fb.c @@ -28,52 +28,16 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev, int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8; int ret; bool unmap = false; - bool store_for_later = false; - int x2, y2; - unsigned long flags; obj = mfbdev->mfb.obj; bo = gem_to_mga_bo(obj); - /* - * try and reserve the BO, if we fail with busy - * then the BO is being moved and we should - * store up the damage until later. - */ ret = mgag200_bo_reserve(bo, true); if (ret) { - if (ret != -EBUSY) - return; - - store_for_later = true; - } - - x2 = x + width - 1; - y2 = y + height - 1; - spin_lock_irqsave(&mfbdev->dirty_lock, flags); - - if (mfbdev->y1 < y) - y = mfbdev->y1; - if (mfbdev->y2 > y2) - y2 = mfbdev->y2; - if (mfbdev->x1 < x) - x = mfbdev->x1; - if (mfbdev->x2 > x2) - x2 = mfbdev->x2; - - if (store_for_later) { - mfbdev->x1 = x; - mfbdev->x2 = x2; - mfbdev->y1 = y; - mfbdev->y2 = y2; - spin_unlock_irqrestore(&mfbdev->dirty_lock, flags); + DRM_ERROR("failed to reserve fb bo\n"); return; } - mfbdev->x1 = mfbdev->y1 = INT_MAX; - mfbdev->x2 = mfbdev->y2 = 0; - spin_unlock_irqrestore(&mfbdev->dirty_lock, flags); - if (!bo->kmap.virtual) { ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); if (ret) { @@ -83,10 +47,10 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev, } unmap = true; } - for (i = y; i <= y2; i++) { + for (i = y; i < y + height; i++) { /* assume equal stride for now */ src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp); - memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, (x2 - x + 1) * bpp); + memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, width * bpp); } if (unmap) @@ -305,7 +269,6 @@ int mgag200_fbdev_init(struct mga_device *mdev) mdev->mfbdev = mfbdev; mfbdev->helper.funcs = &mga_fb_helper_funcs; - spin_lock_init(&mfbdev->dirty_lock); ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper, mdev->num_crtc, MGAG200FB_CONN_LIMIT); diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 2e7c949..d3d99a2 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -382,19 +382,19 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) m = n = p = 0; vcomax = 800000; vcomin = 400000; - pllreffreq = 33333; + pllreffreq = 3333; delta = 0xffffffff; permitteddelta = clock * 5 / 1000; - for (testp = 16; testp > 0; testp >>= 1) { + for (testp = 16; testp > 0; testp--) { if (clock * testp > vcomax) continue; if (clock * testp < vcomin) continue; for (testm = 1; testm < 33; testm++) { - for (testn = 17; testn < 257; testn++) { + for (testn = 1; testn < 257; testn++) { computed = (pllreffreq * testn) / (testm * testp); if (computed > clock) @@ -404,11 +404,11 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) if (tmpdelta < delta) { delta = tmpdelta; n = testn - 1; - m = (testm - 1); + m = (testm - 1) | ((n >> 1) & 0x80); p = testp - 1; } if ((clock * testp) >= 600000) - p |= 0x80; + p |= 80; } } } @@ -751,6 +751,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, int i; unsigned char misc = 0; unsigned char ext_vga[6]; + unsigned char ext_vga_index24; + unsigned char dac_index90 = 0; u8 bppshift; static unsigned char dacvalue[] = { @@ -801,6 +803,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, option2 = 0x0000b000; break; case G200_ER: + dac_index90 = 0; break; } @@ -849,8 +852,10 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, WREG_DAC(i, dacvalue[i]); } - if (mdev->type == G200_ER) - WREG_DAC(0x90, 0); + if (mdev->type == G200_ER) { + WREG_DAC(0x90, dac_index90); + } + if (option) pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option); @@ -947,6 +952,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, if (mdev->type == G200_WB) ext_vga[1] |= 0x88; + ext_vga_index24 = 0x05; + /* Set pixel clocks */ misc = 0x2d; WREG8(MGA_MISC_OUT, misc); @@ -958,7 +965,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, } if (mdev->type == G200_ER) - WREG_ECRT(0x24, 0x5); + WREG_ECRT(24, ext_vga_index24); if (mdev->type == G200_EV) { WREG_ECRT(6, 0); diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 401c989..8fc9d92 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c @@ -315,8 +315,8 @@ int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait) ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); if (ret) { - if (ret != -ERESTARTSYS && ret != -EBUSY) - DRM_ERROR("reserve failed %p %d\n", bo, ret); + if (ret != -ERESTARTSYS) + DRM_ERROR("reserve failed %p\n", bo); return ret; } return 0; diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h index 123270e9..b79025d 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h @@ -16,8 +16,6 @@ enum dcb_output_type { struct dcb_output { int index; /* may not be raw dcb index if merging has happened */ - u16 hasht; - u16 hashm; enum dcb_output_type type; uint8_t i2c_index; uint8_t heads; @@ -27,7 +25,6 @@ struct dcb_output { uint8_t or; uint8_t link; bool duallink_possible; - uint8_t extdev; union { struct sor_conf { int link; diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c index 2d9b9d7..0fd87df 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c @@ -107,18 +107,6 @@ dcb_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len) return 0x0000; } -static inline u16 -dcb_outp_hasht(struct dcb_output *outp) -{ - return (outp->extdev << 8) | (outp->location << 4) | outp->type; -} - -static inline u16 -dcb_outp_hashm(struct dcb_output *outp) -{ - return (outp->heads << 8) | (outp->link << 6) | outp->or; -} - u16 dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len, struct dcb_output *outp) @@ -147,28 +135,34 @@ dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len, case DCB_OUTPUT_DP: outp->link = (conf & 0x00000030) >> 4; outp->sorconf.link = outp->link; /*XXX*/ - outp->extdev = 0x00; - if (outp->location != 0) - outp->extdev = (conf & 0x0000ff00) >> 8; break; default: break; } } - - outp->hasht = dcb_outp_hasht(outp); - outp->hashm = dcb_outp_hashm(outp); } return dcb; } +static inline u16 +dcb_outp_hasht(struct dcb_output *outp) +{ + return outp->type; +} + +static inline u16 +dcb_outp_hashm(struct dcb_output *outp) +{ + return (outp->heads << 8) | (outp->link << 6) | outp->or; +} + u16 dcb_outp_match(struct nouveau_bios *bios, u16 type, u16 mask, u8 *ver, u8 *len, struct dcb_output *outp) { u16 dcb, idx = 0; while ((dcb = dcb_outp_parse(bios, idx++, ver, len, outp))) { - if ((dcb_outp_hasht(outp) & 0x00ff) == (type & 0x00ff)) { + if (dcb_outp_hasht(outp) == type) { if ((dcb_outp_hashm(outp) & mask) == mask) break; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c index 4a85778..ae7249b 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c @@ -78,13 +78,12 @@ nv50_devinit_init(struct nouveau_object *object) if (ret) return ret; - /* if we ran the init tables, we have to execute the first script - * pointer of each dcb entry's display encoder table in order - * to properly initialise each encoder. + /* if we ran the init tables, execute first script pointer for each + * display table output entry that has a matching dcb entry. */ - while (priv->base.post && dcb_outp_parse(bios, i, &ver, &hdr, &outp)) { - if (nvbios_outp_match(bios, outp.hasht, outp.hashm, - &ver, &hdr, &cnt, &len, &info)) { + while (priv->base.post && ver) { + u16 data = nvbios_outp_parse(bios, i++, &ver, &hdr, &cnt, &len, &info); + if (data && dcb_outp_match(bios, info.type, info.mask, &ver, &len, &outp)) { struct nvbios_init init = { .subdev = nv_subdev(priv), .bios = bios, @@ -96,8 +95,7 @@ nv50_devinit_init(struct nouveau_object *object) nvbios_exec(&init); } - i++; - } + }; return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index b569fe8..4124192 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -386,7 +386,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_device *device = nv_device(drm->device); struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); - struct nouveau_abi16_chan *chan = NULL, *temp; + struct nouveau_abi16_chan *chan, *temp; struct nouveau_abi16_ntfy *ntfy; struct nouveau_object *object; struct nv_dma_class args = {}; @@ -399,11 +399,10 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) if (unlikely(nv_device(abi16->device)->card_type >= NV_C0)) return nouveau_abi16_put(abi16, -EINVAL); - list_for_each_entry(temp, &abi16->channels, head) { - if (temp->chan->handle == (NVDRM_CHAN | info->channel)) { - chan = temp; + list_for_each_entry_safe(chan, temp, &abi16->channels, head) { + if (chan->chan->handle == (NVDRM_CHAN | info->channel)) break; - } + chan = NULL; } if (!chan) @@ -455,18 +454,17 @@ nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS) { struct drm_nouveau_gpuobj_free *fini = data; struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); - struct nouveau_abi16_chan *chan = NULL, *temp; + struct nouveau_abi16_chan *chan, *temp; struct nouveau_abi16_ntfy *ntfy; int ret; if (unlikely(!abi16)) return -ENOMEM; - list_for_each_entry(temp, &abi16->channels, head) { - if (temp->chan->handle == (NVDRM_CHAN | fini->channel)) { - chan = temp; + list_for_each_entry_safe(chan, temp, &abi16->channels, head) { + if (chan->chan->handle == (NVDRM_CHAN | fini->channel)) break; - } + chan = NULL; } if (!chan) diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 43672b6..5ce9bf5 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -1389,10 +1389,10 @@ int atom_allocate_fb_scratch(struct atom_context *ctx) firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); DRM_DEBUG("atom firmware requested %08x %dkb\n", - le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware), - le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb)); + firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, + firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb); - usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024; + usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; } ctx->scratch_size_bytes = 0; if (usage_bytes == 0) diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 6d6fdb3..9175615 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -252,6 +252,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) radeon_crtc->enabled = true; /* adjust pm to dpms changes BEFORE enabling crtcs */ radeon_pm_compute_clocks(rdev); + if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) + atombios_powergate_crtc(crtc, ATOM_DISABLE); atombios_enable_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); @@ -269,6 +271,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); atombios_enable_crtc(crtc, ATOM_DISABLE); radeon_crtc->enabled = false; + if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) + atombios_powergate_crtc(crtc, ATOM_ENABLE); /* adjust pm to dpms changes AFTER disabling crtcs */ radeon_pm_compute_clocks(rdev); break; @@ -557,9 +561,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, /* use frac fb div on APUs */ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; - /* use frac fb div on RS780/RS880 */ - if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) - radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; if (ASIC_IS_DCE32(rdev) && mode->clock > 165000) radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; } else { @@ -1843,8 +1844,6 @@ static void atombios_crtc_disable(struct drm_crtc *crtc) int i; atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); - if (ASIC_IS_DCE6(rdev)) - atombios_powergate_crtc(crtc, ATOM_ENABLE); for (i = 0; i < rdev->num_crtc; i++) { if (rdev->mode_info.crtcs[i] && diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 90dc470..a2d478e 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -105,27 +105,6 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) } } -static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc) -{ - if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK) - return true; - else - return false; -} - -static bool dce4_is_counter_moving(struct radeon_device *rdev, int crtc) -{ - u32 pos1, pos2; - - pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]); - pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]); - - if (pos1 != pos2) - return true; - else - return false; -} - /** * dce4_wait_for_vblank - vblank wait asic callback. * @@ -136,28 +115,21 @@ static bool dce4_is_counter_moving(struct radeon_device *rdev, int crtc) */ void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc) { - unsigned i = 0; + int i; if (crtc >= rdev->num_crtc) return; - if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN)) - return; - - /* depending on when we hit vblank, we may be close to active; if so, - * wait for another frame. - */ - while (dce4_is_in_vblank(rdev, crtc)) { - if (i++ % 100 == 0) { - if (!dce4_is_counter_moving(rdev, crtc)) + if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) { + for (i = 0; i < rdev->usec_timeout; i++) { + if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)) break; + udelay(1); } - } - - while (!dce4_is_in_vblank(rdev, crtc)) { - if (i++ % 100 == 0) { - if (!dce4_is_counter_moving(rdev, crtc)) + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK) break; + udelay(1); } } } @@ -431,19 +403,6 @@ void evergreen_pm_misc(struct radeon_device *rdev) rdev->pm.current_vddc = voltage->voltage; DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage); } - - /* starting with BTC, there is one state that is used for both - * MH and SH. Difference is that we always use the high clock index for - * mclk and vddci. - */ - if ((rdev->pm.pm_method == PM_METHOD_PROFILE) && - (rdev->family >= CHIP_BARTS) && - rdev->pm.active_crtc_count && - ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) || - (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX))) - voltage = &rdev->pm.power_state[req_ps_idx]. - clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].voltage; - /* 0xff01 is a flag rather then an actual voltage */ if (voltage->vddci == 0xff01) return; @@ -636,16 +595,6 @@ void evergreen_hpd_init(struct radeon_device *rdev) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); - - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || - connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { - /* don't try to enable hpd on eDP or LVDS avoid breaking the - * aux dp channel on imac and help (but not completely fix) - * https://bugzilla.redhat.com/show_bug.cgi?id=726143 - * also avoid interrupt storms during dpms. - */ - continue; - } switch (radeon_connector->hpd.hpd) { case RADEON_HPD_1: WREG32(DC_HPD1_CONTROL, tmp); @@ -1363,16 +1312,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) { radeon_wait_for_vblank(rdev, i); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); } } else { tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) { radeon_wait_for_vblank(rdev, i); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); } @@ -1384,15 +1334,6 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav break; udelay(1); } - - /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); - tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); - tmp &= ~EVERGREEN_CRTC_MASTER_EN; - WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); - save->crtc_enabled[i] = false; - /* ***** */ } else { save->crtc_enabled[i] = false; } @@ -1410,22 +1351,6 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav } /* wait for the MC to settle */ udelay(100); - - /* lock double buffered regs */ - for (i = 0; i < rdev->num_crtc; i++) { - if (save->crtc_enabled[i]) { - tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); - if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) { - tmp |= EVERGREEN_GRPH_UPDATE_LOCK; - WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp); - } - tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]); - if (!(tmp & 1)) { - tmp |= 1; - WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp); - } - } - } } void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) @@ -1447,33 +1372,6 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); - /* unlock regs and wait for update */ - for (i = 0; i < rdev->num_crtc; i++) { - if (save->crtc_enabled[i]) { - tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]); - if ((tmp & 0x3) != 0) { - tmp &= ~0x3; - WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); - } - tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); - if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) { - tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK; - WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp); - } - tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]); - if (tmp & 1) { - tmp &= ~1; - WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp); - } - for (j = 0; j < rdev->usec_timeout; j++) { - tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); - if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0) - break; - udelay(1); - } - } - } - /* unblackout the MC */ tmp = RREG32(MC_SHARED_BLACKOUT_CNTL); tmp &= ~BLACKOUT_MODE_MASK; diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index 3e9773a..034f4c2 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h @@ -225,8 +225,6 @@ #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 #define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 -#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4 -#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 #define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0 #define EVERGREEN_DC_GPIO_HPD_A 0x64b4 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 10e1bd1..835992d 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -466,32 +466,21 @@ static void cayman_gpu_init(struct radeon_device *rdev) (rdev->pdev->device == 0x9907) || (rdev->pdev->device == 0x9908) || (rdev->pdev->device == 0x9909) || - (rdev->pdev->device == 0x990B) || - (rdev->pdev->device == 0x990C) || - (rdev->pdev->device == 0x990F) || (rdev->pdev->device == 0x9910) || - (rdev->pdev->device == 0x9917) || - (rdev->pdev->device == 0x9999) || - (rdev->pdev->device == 0x999C)) { + (rdev->pdev->device == 0x9917)) { rdev->config.cayman.max_simds_per_se = 6; rdev->config.cayman.max_backends_per_se = 2; } else if ((rdev->pdev->device == 0x9903) || (rdev->pdev->device == 0x9904) || (rdev->pdev->device == 0x990A) || - (rdev->pdev->device == 0x990D) || - (rdev->pdev->device == 0x990E) || (rdev->pdev->device == 0x9913) || - (rdev->pdev->device == 0x9918) || - (rdev->pdev->device == 0x999D)) { + (rdev->pdev->device == 0x9918)) { rdev->config.cayman.max_simds_per_se = 4; rdev->config.cayman.max_backends_per_se = 2; } else if ((rdev->pdev->device == 0x9919) || (rdev->pdev->device == 0x9990) || (rdev->pdev->device == 0x9991) || (rdev->pdev->device == 0x9994) || - (rdev->pdev->device == 0x9995) || - (rdev->pdev->device == 0x9996) || - (rdev->pdev->device == 0x999A) || (rdev->pdev->device == 0x99A0)) { rdev->config.cayman.max_simds_per_se = 3; rdev->config.cayman.max_backends_per_se = 1; @@ -621,28 +610,15 @@ static void cayman_gpu_init(struct radeon_device *rdev) WREG32(GB_ADDR_CONFIG, gb_addr_config); WREG32(DMIF_ADDR_CONFIG, gb_addr_config); - if (ASIC_IS_DCE6(rdev)) - WREG32(DMIF_ADDR_CALC, gb_addr_config); WREG32(HDP_ADDR_CONFIG, gb_addr_config); WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); - if ((rdev->config.cayman.max_backends_per_se == 1) && - (rdev->flags & RADEON_IS_IGP)) { - if ((disabled_rb_mask & 3) == 1) { - /* RB0 disabled, RB1 enabled */ - tmp = 0x11111111; - } else { - /* RB1 disabled, RB0 enabled */ - tmp = 0x00000000; - } - } else { - tmp = gb_addr_config & NUM_PIPES_MASK; - tmp = r6xx_remap_render_backend(rdev, tmp, - rdev->config.cayman.max_backends_per_se * - rdev->config.cayman.max_shader_engines, - CAYMAN_MAX_BACKENDS, disabled_rb_mask); - } + tmp = gb_addr_config & NUM_PIPES_MASK; + tmp = r6xx_remap_render_backend(rdev, tmp, + rdev->config.cayman.max_backends_per_se * + rdev->config.cayman.max_shader_engines, + CAYMAN_MAX_BACKENDS, disabled_rb_mask); WREG32(GB_BACKEND_MAP, tmp); cgts_tcc_disable = 0xffff0000; @@ -1686,7 +1662,6 @@ int cayman_resume(struct radeon_device *rdev) int cayman_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); - radeon_vm_manager_fini(rdev); cayman_cp_enable(rdev, false); cayman_dma_stop(rdev); evergreen_irq_suspend(rdev); diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h index e045f8c..48e5022 100644 --- a/drivers/gpu/drm/radeon/nid.h +++ b/drivers/gpu/drm/radeon/nid.h @@ -45,10 +45,6 @@ #define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001 #define DMIF_ADDR_CONFIG 0xBD4 - -/* DCE6 only */ -#define DMIF_ADDR_CALC 0xC00 - #define SRBM_GFX_CNTL 0x0E44 #define RINGID(x) (((x) & 0x3) << 0) #define VMID(x) (((x) & 0x7) << 0) diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 62719ec..8ff7cac 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -69,38 +69,6 @@ MODULE_FIRMWARE(FIRMWARE_R520); * and others in some cases. */ -static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc) -{ - if (crtc == 0) { - if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR) - return true; - else - return false; - } else { - if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR) - return true; - else - return false; - } -} - -static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc) -{ - u32 vline1, vline2; - - if (crtc == 0) { - vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; - vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; - } else { - vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; - vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; - } - if (vline1 != vline2) - return true; - else - return false; -} - /** * r100_wait_for_vblank - vblank wait asic callback. * @@ -111,33 +79,36 @@ static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc) */ void r100_wait_for_vblank(struct radeon_device *rdev, int crtc) { - unsigned i = 0; + int i; if (crtc >= rdev->num_crtc) return; if (crtc == 0) { - if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN)) - return; - } else { - if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN)) - return; - } - - /* depending on when we hit vblank, we may be close to active; if so, - * wait for another frame. - */ - while (r100_is_in_vblank(rdev, crtc)) { - if (i++ % 100 == 0) { - if (!r100_is_counter_moving(rdev, crtc)) - break; + if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) { + for (i = 0; i < rdev->usec_timeout; i++) { + if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)) + break; + udelay(1); + } + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR) + break; + udelay(1); + } } - } - - while (!r100_is_in_vblank(rdev, crtc)) { - if (i++ % 100 == 0) { - if (!r100_is_counter_moving(rdev, crtc)) - break; + } else { + if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN) { + for (i = 0; i < rdev->usec_timeout; i++) { + if (!(RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)) + break; + udelay(1); + } + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR) + break; + udelay(1); + } } } } diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h index 8ec2376..ec576aa 100644 --- a/drivers/gpu/drm/radeon/r500_reg.h +++ b/drivers/gpu/drm/radeon/r500_reg.h @@ -357,9 +357,7 @@ #define AVIVO_D1CRTC_FRAME_COUNT 0x60a4 #define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 -#define AVIVO_D1MODE_MASTER_UPDATE_LOCK 0x60e0 #define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4 -#define AVIVO_D1CRTC_UPDATE_LOCK 0x60e8 /* master controls */ #define AVIVO_DC_CRTC_MASTER_EN 0x60f8 diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index d89a1f8..ff80efe 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -489,7 +489,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder) offset = dig->afmt->offset; /* Older chipsets require setting HDMI and routing manually */ - if (ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE3(rdev)) { + if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { hdmi = HDMI0_ERROR_ACK | HDMI0_ENABLE; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: @@ -544,6 +544,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder) /* Called for ATOM_ENCODER_MODE_HDMI only */ if (!dig || !dig->afmt) { + WARN_ON(1); return; } if (!dig->afmt->enabled) @@ -557,7 +558,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder) radeon_irq_kms_disable_afmt(rdev, dig->afmt->id); /* Older chipsets not handled by AtomBIOS */ - if (ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE3(rdev)) { + if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: WREG32_P(AVIVO_TMDSA_CNTL, 0, diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 96168ef..f22eb57 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -2028,8 +2028,6 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) num_modes = power_info->info.ucNumOfPowerModeEntries; if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; - if (num_modes == 0) - return state_index; rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL); if (!rdev->pm.power_state) return state_index; @@ -2434,8 +2432,6 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev) power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); - if (power_info->pplib.ucNumStates == 0) - return state_index; rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * power_info->pplib.ucNumStates, GFP_KERNEL); if (!rdev->pm.power_state) @@ -2518,7 +2514,6 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); u16 data_offset; u8 frev, crev; - u8 *power_state_offset; if (!atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) @@ -2535,17 +2530,15 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) non_clock_info_array = (struct _NonClockInfoArray *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); - if (state_array->ucNumEntries == 0) - return state_index; rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * state_array->ucNumEntries, GFP_KERNEL); if (!rdev->pm.power_state) return state_index; - power_state_offset = (u8 *)state_array->states; for (i = 0; i < state_array->ucNumEntries; i++) { mode_index = 0; - power_state = (union pplib_power_state *)power_state_offset; - non_clock_array_index = power_state->v2.nonClockInfoIndex; + power_state = (union pplib_power_state *)&state_array->states[i]; + /* XXX this might be an inagua bug... */ + non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) &non_clock_info_array->nonClockInfo[non_clock_array_index]; rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) * @@ -2557,6 +2550,9 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) if (power_state->v2.ucNumDPMLevels) { for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { clock_array_index = power_state->v2.clockInfoIndex[j]; + /* XXX this might be an inagua bug... */ + if (clock_array_index >= clock_info_array->ucNumEntries) + continue; clock_info = (union pplib_clock_info *) &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; valid = radeon_atombios_parse_pplib_clock_info(rdev, @@ -2578,7 +2574,6 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) non_clock_info); state_index++; } - power_state_offset += 2 + power_state->v2.ucNumDPMLevels; } /* if multiple clock modes, mark the lowest as no display */ for (i = 0; i < state_index; i++) { @@ -2625,9 +2620,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) default: break; } - } - - if (state_index == 0) { + } else { rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); if (rdev->pm.power_state) { rdev->pm.power_state[0].clock_info = diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index d96070b..15f5ded 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -43,12 +43,6 @@ struct atpx_verify_interface { u32 function_bits; /* supported functions bit vector */ } __packed; -struct atpx_px_params { - u16 size; /* structure size in bytes (includes size field) */ - u32 valid_flags; /* which flags are valid */ - u32 flags; /* flags */ -} __packed; - struct atpx_power_control { u16 size; u8 dgpu_state; @@ -129,61 +123,9 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas } /** - * radeon_atpx_validate_functions - validate ATPX functions - * - * @atpx: radeon atpx struct - * - * Validate that required functions are enabled (all asics). - * returns 0 on success, error on failure. - */ -static int radeon_atpx_validate(struct radeon_atpx *atpx) -{ - /* make sure required functions are enabled */ - /* dGPU power control is required */ - atpx->functions.power_cntl = true; - - if (atpx->functions.px_params) { - union acpi_object *info; - struct atpx_px_params output; - size_t size; - u32 valid_bits; - - info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL); - if (!info) - return -EIO; - - memset(&output, 0, sizeof(output)); - - size = *(u16 *) info->buffer.pointer; - if (size < 10) { - printk("ATPX buffer is too small: %zu\n", size); - kfree(info); - return -EINVAL; - } - size = min(sizeof(output), size); - - memcpy(&output, info->buffer.pointer, size); - - valid_bits = output.flags & output.valid_flags; - /* if separate mux flag is set, mux controls are required */ - if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) { - atpx->functions.i2c_mux_cntl = true; - atpx->functions.disp_mux_cntl = true; - } - /* if any outputs are muxed, mux controls are required */ - if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED | - ATPX_TV_SIGNAL_MUXED | - ATPX_DFP_SIGNAL_MUXED)) - atpx->functions.disp_mux_cntl = true; - - kfree(info); - } - return 0; -} - -/** * radeon_atpx_verify_interface - verify ATPX * + * @handle: acpi handle * @atpx: radeon atpx struct * * Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function @@ -464,19 +406,8 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) */ static int radeon_atpx_init(void) { - int r; - /* set up the ATPX handle */ - r = radeon_atpx_verify_interface(&radeon_atpx_priv.atpx); - if (r) - return r; - - /* validate the atpx setup */ - r = radeon_atpx_validate(&radeon_atpx_priv.atpx); - if (r) - return r; - - return 0; + return radeon_atpx_verify_interface(&radeon_atpx_priv.atpx); } /** diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index a2f0c24..bedda9c 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c @@ -135,15 +135,13 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, sdomain, ddomain, "dma"); } - if (rdev->asic->copy.blit) { - time = radeon_benchmark_do_move(rdev, size, saddr, daddr, - RADEON_BENCHMARK_COPY_BLIT, n); - if (time < 0) - goto out_cleanup; - if (time > 0) - radeon_benchmark_log_results(n, size, time, - sdomain, ddomain, "blit"); - } + time = radeon_benchmark_do_move(rdev, size, saddr, daddr, + RADEON_BENCHMARK_COPY_BLIT, n); + if (time < 0) + goto out_cleanup; + if (time > 0) + radeon_benchmark_log_results(n, size, time, + sdomain, ddomain, "blit"); out_cleanup: if (sobj) { diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 78edadc..3e403bd 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -970,15 +970,6 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct found = 1; } - /* quirks */ - /* Radeon 9100 (R200) */ - if ((dev->pdev->device == 0x514D) && - (dev->pdev->subsystem_vendor == 0x174B) && - (dev->pdev->subsystem_device == 0x7149)) { - /* vbios value is bad, use the default */ - found = 0; - } - if (!found) /* fallback to defaults */ radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac); diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 48f80cd..90374dd 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -400,9 +400,6 @@ void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block) { unsigned long irqflags; - if (!rdev->ddev->irq_enabled) - return; - spin_lock_irqsave(&rdev->irq.lock, irqflags); rdev->irq.afmt[block] = true; radeon_irq_set(rdev); @@ -422,9 +419,6 @@ void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block) { unsigned long irqflags; - if (!rdev->ddev->irq_enabled) - return; - spin_lock_irqsave(&rdev->irq.lock, irqflags); rdev->irq.afmt[block] = false; radeon_irq_set(rdev); @@ -444,9 +438,6 @@ void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask) unsigned long irqflags; int i; - if (!rdev->ddev->irq_enabled) - return; - spin_lock_irqsave(&rdev->irq.lock, irqflags); for (i = 0; i < RADEON_MAX_HPD_PINS; ++i) rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i)); @@ -467,9 +458,6 @@ void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask) unsigned long irqflags; int i; - if (!rdev->ddev->irq_enabled) - return; - spin_lock_irqsave(&rdev->irq.lock, irqflags); for (i = 0; i < RADEON_MAX_HPD_PINS; ++i) rdev->irq.hpd[i] &= !(hpd_mask & (1 << i)); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index bc36922..9c312f9 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -50,13 +50,9 @@ int radeon_driver_unload_kms(struct drm_device *dev) if (rdev == NULL) return 0; - if (rdev->rmmio == NULL) - goto done_free; radeon_acpi_fini(rdev); radeon_modeset_fini(rdev); radeon_device_fini(rdev); - -done_free: kfree(rdev); dev->dev_private = NULL; return 0; diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 788c64c..0bfa656 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -169,7 +169,7 @@ static void radeon_set_power_state(struct radeon_device *rdev) /* starting with BTC, there is one state that is used for both * MH and SH. Difference is that we always use the high clock index for - * mclk and vddci. + * mclk. */ if ((rdev->pm.pm_method == PM_METHOD_PROFILE) && (rdev->family >= CHIP_BARTS) && @@ -843,11 +843,7 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) struct radeon_device *rdev = dev->dev_private; seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk); - /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */ - if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP)) - seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk); - else - seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); + seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk); if (rdev->asic->pm.get_memory_clock) seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 8adc5b5..cd72062 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -161,8 +161,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, radeon_semaphore_free(rdev, &ib->semaphore, NULL); } /* if we can't remember our last VM flush then flush now! */ - /* XXX figure out why we have to flush for every IB */ - if (ib->vm /*&& !ib->vm->last_flush*/) { + if (ib->vm && !ib->vm->last_flush) { radeon_ring_vm_flush(rdev, ib->ring, ib->vm); } if (const_ib) { diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 46fa1b0..5a0fc74 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -52,59 +52,23 @@ static const u32 crtc_offsets[2] = AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL }; -static bool avivo_is_in_vblank(struct radeon_device *rdev, int crtc) -{ - if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK) - return true; - else - return false; -} - -static bool avivo_is_counter_moving(struct radeon_device *rdev, int crtc) -{ - u32 pos1, pos2; - - pos1 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]); - pos2 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]); - - if (pos1 != pos2) - return true; - else - return false; -} - -/** - * avivo_wait_for_vblank - vblank wait asic callback. - * - * @rdev: radeon_device pointer - * @crtc: crtc to wait for vblank on - * - * Wait for vblank on the requested crtc (r5xx-r7xx). - */ void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc) { - unsigned i = 0; + int i; if (crtc >= rdev->num_crtc) return; - if (!(RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN)) - return; - - /* depending on when we hit vblank, we may be close to active; if so, - * wait for another frame. - */ - while (avivo_is_in_vblank(rdev, crtc)) { - if (i++ % 100 == 0) { - if (!avivo_is_counter_moving(rdev, crtc)) + if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN) { + for (i = 0; i < rdev->usec_timeout; i++) { + if (!(RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)) break; + udelay(1); } - } - - while (!avivo_is_in_vblank(rdev, crtc)) { - if (i++ % 100 == 0) { - if (!avivo_is_counter_moving(rdev, crtc)) + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK) break; + udelay(1); } } } diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index ffcba73..435ed35 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -303,10 +303,8 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save) tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]); if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) { radeon_wait_for_vblank(rdev, i); - WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1); tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp); - WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0); } /* wait for the next frame */ frame_count = radeon_get_vblank_counter(rdev, i); @@ -315,15 +313,6 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save) break; udelay(1); } - - /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ - WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1); - tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]); - tmp &= ~AVIVO_CRTC_EN; - WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp); - WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0); - save->crtc_enabled[i] = false; - /* ***** */ } else { save->crtc_enabled[i] = false; } @@ -349,22 +338,6 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save) } /* wait for the MC to settle */ udelay(100); - - /* lock double buffered regs */ - for (i = 0; i < rdev->num_crtc; i++) { - if (save->crtc_enabled[i]) { - tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]); - if (!(tmp & AVIVO_D1GRPH_UPDATE_LOCK)) { - tmp |= AVIVO_D1GRPH_UPDATE_LOCK; - WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp); - } - tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]); - if (!(tmp & 1)) { - tmp |= 1; - WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp); - } - } - } } void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) @@ -375,7 +348,7 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) /* update crtc base addresses */ for (i = 0; i < rdev->num_crtc; i++) { if (rdev->family >= CHIP_RV770) { - if (i == 0) { + if (i == 1) { WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, @@ -394,33 +367,6 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) } WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); - /* unlock regs and wait for update */ - for (i = 0; i < rdev->num_crtc; i++) { - if (save->crtc_enabled[i]) { - tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]); - if ((tmp & 0x3) != 0) { - tmp &= ~0x3; - WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); - } - tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]); - if (tmp & AVIVO_D1GRPH_UPDATE_LOCK) { - tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; - WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp); - } - tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]); - if (tmp & 1) { - tmp &= ~1; - WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp); - } - for (j = 0; j < rdev->usec_timeout; j++) { - tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]); - if ((tmp & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) == 0) - break; - udelay(1); - } - } - } - if (rdev->family >= CHIP_R600) { /* unblackout the MC */ if (rdev->family >= CHIP_RV770) diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 40d766e..ae8b482 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -1374,7 +1374,7 @@ static void si_select_se_sh(struct radeon_device *rdev, u32 data = INSTANCE_BROADCAST_WRITES; if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) - data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES; + data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES; else if (se_num == 0xffffffff) data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num); else if (sh_num == 0xffffffff) @@ -1659,7 +1659,6 @@ static void si_gpu_init(struct radeon_device *rdev) WREG32(GB_ADDR_CONFIG, gb_addr_config); WREG32(DMIF_ADDR_CONFIG, gb_addr_config); - WREG32(DMIF_ADDR_CALC, gb_addr_config); WREG32(HDP_ADDR_CONFIG, gb_addr_config); WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); @@ -4233,7 +4232,6 @@ int si_resume(struct radeon_device *rdev) int si_suspend(struct radeon_device *rdev) { - radeon_vm_manager_fini(rdev); si_cp_enable(rdev, false); cayman_dma_stop(rdev); si_irq_suspend(rdev); diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index e9a01f0..c056aae 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -60,8 +60,6 @@ #define DMIF_ADDR_CONFIG 0xBD4 -#define DMIF_ADDR_CALC 0xC00 - #define SRBM_STATUS 0xE50 #define SRBM_SOFT_RESET 0x0E60 diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index b44d548..fe5cdbc 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c @@ -61,10 +61,6 @@ static int udl_get_modes(struct drm_connector *connector) int ret; edid = (struct edid *)udl_get_edid(udl); - if (!edid) { - drm_mode_connector_update_edid_property(connector, NULL); - return 0; - } /* * We only read the main block, but if the monitor reports extension diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index cc6d90f..87aa5f5 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h @@ -75,8 +75,6 @@ struct udl_framebuffer { struct drm_framebuffer base; struct udl_gem_object *obj; bool active_16; /* active on the 16-bit channel */ - int x1, y1, x2, y2; /* dirty rect */ - spinlock_t dirty_lock; }; #define to_udl_fb(x) container_of(x, struct udl_framebuffer, base) diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index 1eb060c..d4ab3be 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -22,9 +22,9 @@ #include -#define DL_DEFIO_WRITE_DELAY (HZ/20) /* fb_deferred_io.delay in jiffies */ +#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */ -static int fb_defio = 0; /* Optionally enable experimental fb_defio mmap support */ +static int fb_defio = 1; /* Optionally enable experimental fb_defio mmap support */ static int fb_bpp = 16; module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); @@ -153,9 +153,6 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, struct urb *urb; int aligned_x; int bpp = (fb->base.bits_per_pixel / 8); - int x2, y2; - bool store_for_later = false; - unsigned long flags; if (!fb->active_16) return 0; @@ -172,6 +169,8 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, } } + start_cycles = get_cycles(); + aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long)); width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long)); x = aligned_x; @@ -181,53 +180,19 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, (y + height > fb->base.height)) return -EINVAL; - /* if we are in atomic just store the info - can't test inside spin lock */ - if (in_atomic()) - store_for_later = true; - - x2 = x + width - 1; - y2 = y + height - 1; - - spin_lock_irqsave(&fb->dirty_lock, flags); - - if (fb->y1 < y) - y = fb->y1; - if (fb->y2 > y2) - y2 = fb->y2; - if (fb->x1 < x) - x = fb->x1; - if (fb->x2 > x2) - x2 = fb->x2; - - if (store_for_later) { - fb->x1 = x; - fb->x2 = x2; - fb->y1 = y; - fb->y2 = y2; - spin_unlock_irqrestore(&fb->dirty_lock, flags); - return 0; - } - - fb->x1 = fb->y1 = INT_MAX; - fb->x2 = fb->y2 = 0; - - spin_unlock_irqrestore(&fb->dirty_lock, flags); - start_cycles = get_cycles(); - urb = udl_get_urb(dev); if (!urb) return 0; cmd = urb->transfer_buffer; - for (i = y; i <= y2 ; i++) { + for (i = y; i < y + height ; i++) { const int line_offset = fb->base.pitches[0] * i; const int byte_offset = line_offset + (x * bpp); const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp); if (udl_render_hline(dev, bpp, &urb, (char *) fb->obj->vmapping, &cmd, byte_offset, dev_byte_offset, - (x2 - x + 1) * bpp, + width * bpp, &bytes_identical, &bytes_sent)) goto error; } @@ -469,7 +434,6 @@ udl_framebuffer_init(struct drm_device *dev, { int ret; - spin_lock_init(&ufb->dirty_lock); ufb->obj = obj; ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs); drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd); diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index cf787e1..fa60add 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -25,7 +25,6 @@ #include #include -#include #include #include @@ -338,10 +337,8 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client) if (new_client->fb_info) { struct fb_event event; - console_lock(); event.info = new_client->fb_info; fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event); - console_unlock(); } ret = vgasr_priv.handler->switchto(new_client->id); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index ceb3040..eb2ee11 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1697,7 +1697,6 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, - { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) }, @@ -2071,7 +2070,6 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) }, { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) }, - { HID_USB_DEVICE(USB_VENDOR_ID_MASTERKIT, USB_DEVICE_ID_MASTERKIT_MA901RADIO) }, { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) }, { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 160a309..34e2547 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -554,9 +554,6 @@ #define USB_VENDOR_ID_MADCATZ 0x0738 #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540 -#define USB_VENDOR_ID_MASTERKIT 0x16c0 -#define USB_DEVICE_ID_MASTERKIT_MA901RADIO 0x05df - #define USB_VENDOR_ID_MCC 0x09db #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 #define USB_DEVICE_ID_MCC_PMD1208LS 0x007a @@ -587,9 +584,6 @@ #define USB_VENDOR_ID_MONTEREY 0x0566 #define USB_DEVICE_ID_GENIUS_KB29E 0x3004 -#define USB_VENDOR_ID_MSI 0x1770 -#define USB_DEVICE_ID_MSI_GX680R_LED_PANEL 0xff00 - #define USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR 0x0400 #define USB_DEVICE_ID_N_S_HARMONY 0xc359 @@ -681,9 +675,6 @@ #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008 -#define USB_VENDOR_ID_REALTEK 0x0bda -#define USB_DEVICE_ID_REALTEK_READER 0x0152 - #define USB_VENDOR_ID_ROCCAT 0x1e7d #define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4 #define USB_DEVICE_ID_ROCCAT_ISKU 0x319c @@ -718,7 +709,6 @@ #define USB_VENDOR_ID_SONY 0x054c #define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE 0x024b -#define USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE 0x0374 #define USB_DEVICE_ID_SONY_PS3_BDREMOTE 0x0306 #define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268 #define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index 8758f38c..9500f2f 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -459,25 +459,19 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev, struct dj_report *dj_report) { struct hid_device *hdev = djrcv_dev->hdev; - struct hid_report *report; - struct hid_report_enum *output_report_enum; - u8 *data = (u8 *)(&dj_report->device_index); - int i; + int sent_bytes; - output_report_enum = &hdev->report_enum[HID_OUTPUT_REPORT]; - report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT]; - - if (!report) { - dev_err(&hdev->dev, "%s: unable to find dj report\n", __func__); + if (!hdev->hid_output_raw_report) { + dev_err(&hdev->dev, "%s:" + "hid_output_raw_report is null\n", __func__); return -ENODEV; } - for (i = 0; i < report->field[0]->report_count; i++) - report->field[0]->value[i] = data[i]; - - usbhid_submit_report(hdev, report, USB_DIR_OUT); + sent_bytes = hdev->hid_output_raw_report(hdev, (u8 *) dj_report, + sizeof(struct dj_report), + HID_OUTPUT_REPORT); - return 0; + return (sent_bytes < 0) ? sent_bytes : 0; } static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 811062c..25ddf3e 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -462,21 +462,6 @@ static int magicmouse_input_mapping(struct hid_device *hdev, return 0; } -static void magicmouse_input_configured(struct hid_device *hdev, - struct hid_input *hi) - -{ - struct magicmouse_sc *msc = hid_get_drvdata(hdev); - - int ret = magicmouse_setup_input(msc->input, hdev); - if (ret) { - hid_err(hdev, "magicmouse setup input failed (%d)\n", ret); - /* clean msc->input to notify probe() of the failure */ - msc->input = NULL; - } -} - - static int magicmouse_probe(struct hid_device *hdev, const struct hid_device_id *id) { @@ -508,10 +493,15 @@ static int magicmouse_probe(struct hid_device *hdev, goto err_free; } - if (!msc->input) { - hid_err(hdev, "magicmouse input not registered\n"); - ret = -ENOMEM; - goto err_stop_hw; + /* We do this after hid-input is done parsing reports so that + * hid-input uses the most natural button and axis IDs. + */ + if (msc->input) { + ret = magicmouse_setup_input(msc->input, hdev); + if (ret) { + hid_err(hdev, "magicmouse setup input failed (%d)\n", ret); + goto err_stop_hw; + } } if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE) @@ -578,7 +568,6 @@ static struct hid_driver magicmouse_driver = { .remove = magicmouse_remove, .raw_event = magicmouse_raw_event, .input_mapping = magicmouse_input_mapping, - .input_configured = magicmouse_input_configured, }; static int __init magicmouse_init(void) diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index 126d6ae..7f33ebf 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c @@ -43,19 +43,9 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc, { struct sony_sc *sc = hid_get_drvdata(hdev); - /* - * Some Sony RF receivers wrongly declare the mouse pointer as a - * a constant non-data variable. - */ - if ((sc->quirks & VAIO_RDESC_CONSTANT) && *rsize >= 56 && - /* usage page: generic desktop controls */ - /* rdesc[0] == 0x05 && rdesc[1] == 0x01 && */ - /* usage: mouse */ - rdesc[2] == 0x09 && rdesc[3] == 0x02 && - /* input (usage page for x,y axes): constant, variable, relative */ - rdesc[54] == 0x81 && rdesc[55] == 0x07) { - hid_info(hdev, "Fixing up Sony RF Receiver report descriptor\n"); - /* input: data, variable, relative */ + if ((sc->quirks & VAIO_RDESC_CONSTANT) && + *rsize >= 56 && rdesc[54] == 0x81 && rdesc[55] == 0x07) { + hid_info(hdev, "Fixing up Sony Vaio VGX report descriptor\n"); rdesc[55] = 0x06; } @@ -227,8 +217,6 @@ static const struct hid_device_id sony_devices[] = { .driver_data = SIXAXIS_CONTROLLER_BT }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE), .driver_data = VAIO_RDESC_CONSTANT }, - { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE), - .driver_data = VAIO_RDESC_CONSTANT }, { } }; MODULE_DEVICE_TABLE(hid, sony_devices); diff --git a/drivers/hid/hid-wiimote-ext.c b/drivers/hid/hid-wiimote-ext.c index 0472191..38ae877 100644 --- a/drivers/hid/hid-wiimote-ext.c +++ b/drivers/hid/hid-wiimote-ext.c @@ -403,14 +403,14 @@ static void handler_nunchuck(struct wiimote_ext *ext, const __u8 *payload) if (ext->motionp) { input_report_key(ext->input, - wiiext_keymap[WIIEXT_KEY_Z], !(payload[5] & 0x04)); + wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x04)); input_report_key(ext->input, - wiiext_keymap[WIIEXT_KEY_C], !(payload[5] & 0x08)); + wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x08)); } else { input_report_key(ext->input, - wiiext_keymap[WIIEXT_KEY_Z], !(payload[5] & 0x01)); + wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x01)); input_report_key(ext->input, - wiiext_keymap[WIIEXT_KEY_C], !(payload[5] & 0x02)); + wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x02)); } input_sync(ext->input); diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 19b8360..e0e6abf 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -73,7 +73,6 @@ static const struct hid_blacklist { { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, - { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, @@ -81,7 +80,6 @@ static const struct hid_blacklist { { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET }, { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET }, { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET }, - { USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET }, { USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET }, { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET }, diff --git a/drivers/hwmon/lineage-pem.c b/drivers/hwmon/lineage-pem.c index ebbb9f4..41df29f 100644 --- a/drivers/hwmon/lineage-pem.c +++ b/drivers/hwmon/lineage-pem.c @@ -422,7 +422,6 @@ static struct attribute *pem_input_attributes[] = { &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_curr1_input.dev_attr.attr, &sensor_dev_attr_power1_input.dev_attr.attr, - NULL }; static const struct attribute_group pem_input_group = { @@ -433,7 +432,6 @@ static struct attribute *pem_fan_attributes[] = { &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan3_input.dev_attr.attr, - NULL }; static const struct attribute_group pem_fan_group = { diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c index 6d61307..9652a2c 100644 --- a/drivers/hwmon/pmbus/ltc2978.c +++ b/drivers/hwmon/pmbus/ltc2978.c @@ -59,10 +59,10 @@ enum chips { ltc2978, ltc3880 }; struct ltc2978_data { enum chips id; int vin_min, vin_max; - int temp_min, temp_max[2]; + int temp_min, temp_max; int vout_min[8], vout_max[8]; int iout_max[2]; - int temp2_max; + int temp2_max[2]; struct pmbus_driver_info info; }; @@ -113,10 +113,9 @@ static int ltc2978_read_word_data_common(struct i2c_client *client, int page, ret = pmbus_read_word_data(client, page, LTC2978_MFR_TEMPERATURE_PEAK); if (ret >= 0) { - if (lin11_to_val(ret) - > lin11_to_val(data->temp_max[page])) - data->temp_max[page] = ret; - ret = data->temp_max[page]; + if (lin11_to_val(ret) > lin11_to_val(data->temp_max)) + data->temp_max = ret; + ret = data->temp_max; } break; case PMBUS_VIRT_RESET_VOUT_HISTORY: @@ -205,9 +204,10 @@ static int ltc3880_read_word_data(struct i2c_client *client, int page, int reg) ret = pmbus_read_word_data(client, page, LTC3880_MFR_TEMPERATURE2_PEAK); if (ret >= 0) { - if (lin11_to_val(ret) > lin11_to_val(data->temp2_max)) - data->temp2_max = ret; - ret = data->temp2_max; + if (lin11_to_val(ret) + > lin11_to_val(data->temp2_max[page])) + data->temp2_max[page] = ret; + ret = data->temp2_max[page]; } break; case PMBUS_VIRT_READ_VIN_MIN: @@ -248,11 +248,11 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page, switch (reg) { case PMBUS_VIRT_RESET_IOUT_HISTORY: - data->iout_max[page] = 0x7c00; + data->iout_max[page] = 0x7fff; ret = ltc2978_clear_peaks(client, page, data->id); break; case PMBUS_VIRT_RESET_TEMP2_HISTORY: - data->temp2_max = 0x7c00; + data->temp2_max[page] = 0x7fff; ret = ltc2978_clear_peaks(client, page, data->id); break; case PMBUS_VIRT_RESET_VOUT_HISTORY: @@ -262,12 +262,12 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page, break; case PMBUS_VIRT_RESET_VIN_HISTORY: data->vin_min = 0x7bff; - data->vin_max = 0x7c00; + data->vin_max = 0; ret = ltc2978_clear_peaks(client, page, data->id); break; case PMBUS_VIRT_RESET_TEMP_HISTORY: data->temp_min = 0x7bff; - data->temp_max[page] = 0x7c00; + data->temp_max = 0x7fff; ret = ltc2978_clear_peaks(client, page, data->id); break; default: @@ -321,14 +321,12 @@ static int ltc2978_probe(struct i2c_client *client, info = &data->info; info->write_word_data = ltc2978_write_word_data; + data->vout_min[0] = 0xffff; data->vin_min = 0x7bff; - data->vin_max = 0x7c00; data->temp_min = 0x7bff; - for (i = 0; i < ARRAY_SIZE(data->temp_max); i++) - data->temp_max[i] = 0x7c00; - data->temp2_max = 0x7c00; + data->temp_max = 0x7fff; - switch (data->id) { + switch (id->driver_data) { case ltc2978: info->read_word_data = ltc2978_read_word_data; info->pages = 8; @@ -338,6 +336,7 @@ static int ltc2978_probe(struct i2c_client *client, for (i = 1; i < 8; i++) { info->func[i] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT; + data->vout_min[i] = 0xffff; } break; case ltc3880: @@ -353,14 +352,11 @@ static int ltc2978_probe(struct i2c_client *client, | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP; - data->iout_max[0] = 0x7c00; - data->iout_max[1] = 0x7c00; + data->vout_min[1] = 0xffff; break; default: return -ENODEV; } - for (i = 0; i < info->pages; i++) - data->vout_min[i] = 0xffff; return pmbus_do_probe(client, id, info); } diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c index 8047fed..1c85d39 100644 --- a/drivers/hwmon/sht15.c +++ b/drivers/hwmon/sht15.c @@ -926,13 +926,7 @@ static int sht15_probe(struct platform_device *pdev) if (voltage) data->supply_uV = voltage; - ret = regulator_enable(data->reg); - if (ret != 0) { - dev_err(&pdev->dev, - "failed to enable regulator: %d\n", ret); - return ret; - } - + regulator_enable(data->reg); /* * Setup a notifier block to update this if another device * causes the voltage to change diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c index 461a0d7..db713c0 100644 --- a/drivers/hwspinlock/hwspinlock_core.c +++ b/drivers/hwspinlock/hwspinlock_core.c @@ -416,8 +416,6 @@ static int __hwspin_lock_request(struct hwspinlock *hwlock) ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "%s: can't power on device\n", __func__); - pm_runtime_put_noidle(dev); - module_put(dev->driver->owner); return ret; } diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index bb51488..4cc2f05 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -881,12 +881,15 @@ omap_i2c_isr(int irq, void *dev_id) u16 mask; u16 stat; - stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG); + spin_lock(&dev->lock); mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG); + stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG); if (stat & mask) ret = IRQ_WAKE_THREAD; + spin_unlock(&dev->lock); + return ret; } diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 9e622b7..7b38877 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -392,11 +392,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) u32 val; int err = 0; - err = tegra_i2c_clock_enable(i2c_dev); - if (err < 0) { - dev_err(i2c_dev->dev, "Clock enable failed %d\n", err); - return err; - } + tegra_i2c_clock_enable(i2c_dev); tegra_periph_reset_assert(i2c_dev->div_clk); udelay(2); @@ -603,12 +599,7 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], if (i2c_dev->is_suspended) return -EBUSY; - ret = tegra_i2c_clock_enable(i2c_dev); - if (ret < 0) { - dev_err(i2c_dev->dev, "Clock enable failed %d\n", ret); - return ret; - } - + tegra_i2c_clock_enable(i2c_dev); for (i = 0; i < num; i++) { enum msg_end_type end_type = MSG_END_STOP; if (i < (num - 1)) { diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index fd7d66d..f042f6d 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -312,8 +312,10 @@ static void xiic_fill_tx_fifo(struct xiic_i2c *i2c) /* last message in transfer -> STOP */ data |= XIIC_TX_DYN_STOP_MASK; dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__); - } - xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data); + + xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data); + } else + xiic_setreg8(i2c, XIIC_DTR_REG_OFFSET, data); } } diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c index 394f142f..36f76e2 100644 --- a/drivers/ide/alim15x3.c +++ b/drivers/ide/alim15x3.c @@ -234,7 +234,7 @@ static int init_chipset_ali15x3(struct pci_dev *dev) isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); - local_irq_save_nort(flags); + local_irq_save(flags); if (m5229_revision < 0xC2) { /* @@ -325,7 +325,7 @@ out: } pci_dev_put(north); pci_dev_put(isa_dev); - local_irq_restore_nort(flags); + local_irq_restore(flags); return 0; } diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c index 0d0a966..696b6c1 100644 --- a/drivers/ide/hpt366.c +++ b/drivers/ide/hpt366.c @@ -1241,7 +1241,7 @@ static int init_dma_hpt366(ide_hwif_t *hwif, dma_old = inb(base + 2); - local_irq_save_nort(flags); + local_irq_save(flags); dma_new = dma_old; pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma); @@ -1252,7 +1252,7 @@ static int init_dma_hpt366(ide_hwif_t *hwif, if (dma_new != dma_old) outb(dma_new, base + 2); - local_irq_restore_nort(flags); + local_irq_restore(flags); printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name, base, base + 7); diff --git a/drivers/ide/ide-io-std.c b/drivers/ide/ide-io-std.c index 4169433..1976397 100644 --- a/drivers/ide/ide-io-std.c +++ b/drivers/ide/ide-io-std.c @@ -175,7 +175,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, unsigned long uninitialized_var(flags); if ((io_32bit & 2) && !mmio) { - local_irq_save_nort(flags); + local_irq_save(flags); ata_vlb_sync(io_ports->nsect_addr); } @@ -186,7 +186,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, insl(data_addr, buf, words); if ((io_32bit & 2) && !mmio) - local_irq_restore_nort(flags); + local_irq_restore(flags); if (((len + 1) & 3) < 2) return; @@ -219,7 +219,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, unsigned long uninitialized_var(flags); if ((io_32bit & 2) && !mmio) { - local_irq_save_nort(flags); + local_irq_save(flags); ata_vlb_sync(io_ports->nsect_addr); } @@ -230,7 +230,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, outsl(data_addr, buf, words); if ((io_32bit & 2) && !mmio) - local_irq_restore_nort(flags); + local_irq_restore(flags); if (((len + 1) & 3) < 2) return; diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 079ae6b..177db6d 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -659,7 +659,7 @@ void ide_timer_expiry (unsigned long data) /* disable_irq_nosync ?? */ disable_irq(hwif->irq); /* local CPU only, as if we were handling an interrupt */ - local_irq_disable_nort(); + local_irq_disable(); if (hwif->polling) { startstop = handler(drive); } else if (drive_is_ready(drive)) { diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c index f014dd1..376f2dc 100644 --- a/drivers/ide/ide-iops.c +++ b/drivers/ide/ide-iops.c @@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, if ((stat & ATA_BUSY) == 0) break; - local_irq_restore_nort(flags); + local_irq_restore(flags); *rstat = stat; return -EBUSY; } } - local_irq_restore_nort(flags); + local_irq_restore(flags); } /* * Allow status to settle, then read it again. diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 38e69e1..068cef0 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -196,10 +196,10 @@ static void do_identify(ide_drive_t *drive, u8 cmd, u16 *id) int bswap = 1; /* local CPU only; some systems need this */ - local_irq_save_nort(flags); + local_irq_save(flags); /* read 512 bytes of id info */ hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); - local_irq_restore_nort(flags); + local_irq_restore(flags); drive->dev_flags |= IDE_DFLAG_ID_READ; #ifdef DEBUG diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index 3a9a1fc..729428e 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c @@ -251,7 +251,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd, page_is_high = PageHighMem(page); if (page_is_high) - local_irq_save_nort(flags); + local_irq_save(flags); buf = kmap_atomic(page) + offset; @@ -272,7 +272,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd, kunmap_atomic(buf); if (page_is_high) - local_irq_restore_nort(flags); + local_irq_restore(flags); len -= nr_bytes; } @@ -415,7 +415,7 @@ static ide_startstop_t pre_task_out_intr(ide_drive_t *drive, } if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0) - local_irq_disable_nort(); + local_irq_disable(); ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE); diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c index ffeebc7..fa080eb 100644 --- a/drivers/idle/i7300_idle.c +++ b/drivers/idle/i7300_idle.c @@ -75,7 +75,7 @@ static unsigned long past_skip; static struct pci_dev *fbd_dev; -static raw_spinlock_t i7300_idle_lock; +static spinlock_t i7300_idle_lock; static int i7300_idle_active; static u8 i7300_idle_thrtctl_saved; @@ -457,7 +457,7 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val, idle_begin_time = ktime_get(); } - raw_spin_lock_irqsave(&i7300_idle_lock, flags); + spin_lock_irqsave(&i7300_idle_lock, flags); if (val == IDLE_START) { cpumask_set_cpu(smp_processor_id(), idle_cpumask); @@ -506,7 +506,7 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val, } } end: - raw_spin_unlock_irqrestore(&i7300_idle_lock, flags); + spin_unlock_irqrestore(&i7300_idle_lock, flags); return 0; } @@ -548,7 +548,7 @@ struct debugfs_file_info { static int __init i7300_idle_init(void) { - raw_spin_lock_init(&i7300_idle_lock); + spin_lock_init(&i7300_idle_lock); total_us = 0; if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload)) diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 892cd87..05bfe53 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -100,16 +100,6 @@ static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) return 0; } -static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user) -{ - int ret = -ENOSYS; - if (user) - ret = alloc_oc_sq(rdev, sq); - if (ret) - ret = alloc_host_sq(rdev, sq); - return ret; -} - static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, struct c4iw_dev_ucontext *uctx) { @@ -178,9 +168,18 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, goto free_sw_rq; } - ret = alloc_sq(rdev, &wq->sq, user); - if (ret) - goto free_hwaddr; + if (user) { + ret = alloc_oc_sq(rdev, &wq->sq); + if (ret) + goto free_hwaddr; + + ret = alloc_host_sq(rdev, &wq->sq); + if (ret) + goto free_sq; + } else + ret = alloc_host_sq(rdev, &wq->sq); + if (ret) + goto free_hwaddr; memset(wq->sq.queue, 0, wq->sq.memsize); dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 1ef880d..67b0c1d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -758,13 +758,9 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ if (++priv->tx_outstanding == ipoib_sendq_size) { ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", tx->qp->qp_num); - netif_stop_queue(dev); - rc = ib_req_notify_cq(priv->send_cq, - IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS); - if (rc < 0) + if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) ipoib_warn(priv, "request notify on send CQ failed\n"); - else if (rc) - ipoib_send_comp_handler(priv->send_cq, dev); + netif_stop_queue(dev); } } } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 3800ef5..cecb98a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -783,7 +783,7 @@ void ipoib_mcast_restart_task(struct work_struct *work) ipoib_mcast_stop_thread(dev, 0); - local_irq_save_nort(flags); + local_irq_save(flags); netif_addr_lock(dev); spin_lock(&priv->lock); @@ -865,7 +865,7 @@ void ipoib_mcast_restart_task(struct work_struct *work) spin_unlock(&priv->lock); netif_addr_unlock(dev); - local_irq_restore_nort(flags); + local_irq_restore(flags); /* We have to cancel outside of the spinlock */ list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 7ccf328..d5088ce 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -700,24 +700,23 @@ static int srp_reconnect_target(struct srp_target_port *target) struct Scsi_Host *shost = target->scsi_host; int i, ret; + if (target->state != SRP_TARGET_LIVE) + return -EAGAIN; + scsi_target_block(&shost->shost_gendev); srp_disconnect_target(target); /* - * Now get a new local CM ID so that we avoid confusing the target in - * case things are really fouled up. Doing so also ensures that all CM - * callbacks will have finished before a new QP is allocated. + * Now get a new local CM ID so that we avoid confusing the + * target in case things are really fouled up. */ ret = srp_new_cm_id(target); - /* - * Whether or not creating a new CM ID succeeded, create a new - * QP. This guarantees that all completion callback function - * invocations have finished before request resetting starts. - */ - if (ret == 0) - ret = srp_create_target_ib(target); - else - srp_create_target_ib(target); + if (ret) + goto unblock; + + ret = srp_create_target_ib(target); + if (ret) + goto unblock; for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { struct srp_request *req = &target->req_ring[i]; @@ -729,12 +728,11 @@ static int srp_reconnect_target(struct srp_target_port *target) for (i = 0; i < SRP_SQ_SIZE; ++i) list_add(&target->tx_ring[i]->list, &target->free_tx); - if (ret == 0) - ret = srp_connect_target(target); + ret = srp_connect_target(target); +unblock: scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING : SDEV_TRANSPORT_OFFLINE); - target->transport_offline = !!ret; if (ret) goto err; @@ -1354,12 +1352,6 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) unsigned long flags; int len; - if (unlikely(target->transport_offline)) { - scmnd->result = DID_NO_CONNECT << 16; - scmnd->scsi_done(scmnd); - return 0; - } - spin_lock_irqsave(&target->lock, flags); iu = __srp_get_tx_iu(target, SRP_IU_CMD); if (!iu) @@ -1703,9 +1695,6 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, struct srp_iu *iu; struct srp_tsk_mgmt *tsk_mgmt; - if (!target->connected || target->qp_in_error) - return -1; - init_completion(&target->tsk_mgmt_done); spin_lock_irq(&target->lock); @@ -1747,7 +1736,7 @@ static int srp_abort(struct scsi_cmnd *scmnd) shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); - if (!req || !srp_claim_req(target, req, scmnd)) + if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd)) return FAILED; srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, SRP_TSK_ABORT_TASK); @@ -1765,6 +1754,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); + if (target->qp_in_error) + return FAILED; if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun, SRP_TSK_LUN_RESET)) return FAILED; @@ -1981,6 +1972,7 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target) spin_unlock(&host->target_lock); target->state = SRP_TARGET_LIVE; + target->connected = false; scsi_scan_target(&target->scsi_host->shost_gendev, 0, target->scsi_id, SCAN_WILD_CARD, 0); diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index 66fbedd..de2d0b3 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h @@ -140,7 +140,6 @@ struct srp_target_port { unsigned int cmd_sg_cnt; unsigned int indirect_size; bool allow_ext_sg; - bool transport_offline; /* Everything above this point is used in the hot path of * command processing. Try to keep them packed into cachelines. diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c index 18fdafe..da739d9 100644 --- a/drivers/input/gameport/gameport.c +++ b/drivers/input/gameport/gameport.c @@ -87,12 +87,12 @@ static int gameport_measure_speed(struct gameport *gameport) tx = 1 << 30; for(i = 0; i < 50; i++) { - local_irq_save_nort(flags); + local_irq_save(flags); GET_TIME(t1); for (t = 0; t < 50; t++) gameport_read(gameport); GET_TIME(t2); GET_TIME(t3); - local_irq_restore_nort(flags); + local_irq_restore(flags); udelay(i * 10); if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t; } @@ -111,11 +111,11 @@ static int gameport_measure_speed(struct gameport *gameport) tx = 1 << 30; for(i = 0; i < 50; i++) { - local_irq_save_nort(flags); + local_irq_save(flags); rdtscl(t1); for (t = 0; t < 50; t++) gameport_read(gameport); rdtscl(t2); - local_irq_restore_nort(flags); + local_irq_restore(flags); udelay(i * 10); if (t2 - t1 < tx) tx = t2 - t1; } diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 4c867f4..c1c74e0 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2466,16 +2466,18 @@ static int device_change_notifier(struct notifier_block *nb, /* allocate a protection domain if a device is added */ dma_domain = find_protection_domain(devid); - if (!dma_domain) { - dma_domain = dma_ops_domain_alloc(); - if (!dma_domain) - goto out; - dma_domain->target_dev = devid; - - spin_lock_irqsave(&iommu_pd_list_lock, flags); - list_add_tail(&dma_domain->list, &iommu_pd_list); - spin_unlock_irqrestore(&iommu_pd_list_lock, flags); - } + if (dma_domain) + goto out; + dma_domain = dma_ops_domain_alloc(); + if (!dma_domain) + goto out; + dma_domain->target_dev = devid; + + spin_lock_irqsave(&iommu_pd_list_lock, flags); + list_add_tail(&dma_domain->list, &iommu_pd_list); + spin_unlock_irqrestore(&iommu_pd_list_lock, flags); + + dev_data = get_dev_data(dev); dev->archdata.dma_ops = &amd_iommu_dma_ops; @@ -3948,9 +3950,6 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) if (!table) goto out; - /* Initialize table spin-lock */ - spin_lock_init(&table->lock); - if (ioapic) /* Keep the first 32 indexes free for IOAPIC interrupts */ table->min_index = 32; diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index b6ecddb..faf10ba 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -1876,6 +1876,11 @@ static int amd_iommu_init_dma(void) struct amd_iommu *iommu; int ret; + init_device_table_dma(); + + for_each_iommu(iommu) + iommu_flush_all_caches(iommu); + if (iommu_pass_through) ret = amd_iommu_init_passthrough(); else @@ -1884,11 +1889,6 @@ static int amd_iommu_init_dma(void) if (ret) return ret; - init_device_table_dma(); - - for_each_iommu(iommu) - iommu_flush_all_caches(iommu); - amd_iommu_init_api(); amd_iommu_init_notifier(); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index f1e7b86..eca2801 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -4253,19 +4253,13 @@ static void quirk_iommu_rwbf(struct pci_dev *dev) { /* * Mobile 4 Series Chipset neglects to set RWBF capability, - * but needs it. Same seems to hold for the desktop versions. + * but needs it: */ printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n"); rwbf_quirk = 1; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf); #define GGC 0x52 #define GGC_MEMORY_SIZE_MASK (0xf << 8) diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 64eff90..651ca79 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1026,8 +1026,6 @@ void dm_bufio_prefetch(struct dm_bufio_client *c, { struct blk_plug plug; - BUG_ON(dm_bufio_in_request()); - blk_start_plug(&plug); dm_bufio_lock(c); diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 2ae151e..f7369f9 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1234,6 +1234,20 @@ static int crypt_decode_key(u8 *key, char *hex, unsigned int size) return 0; } +/* + * Encode key into its hex representation + */ +static void crypt_encode_key(char *hex, u8 *key, unsigned int size) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + sprintf(hex, "%02x", *key); + hex += 2; + key++; + } +} + static void crypt_free_tfms(struct crypt_config *cc) { unsigned i; @@ -1703,11 +1717,11 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_SUBMITTED; } -static void crypt_status(struct dm_target *ti, status_type_t type, - unsigned status_flags, char *result, unsigned maxlen) +static int crypt_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen) { struct crypt_config *cc = ti->private; - unsigned i, sz = 0; + unsigned int sz = 0; switch (type) { case STATUSTYPE_INFO: @@ -1717,11 +1731,17 @@ static void crypt_status(struct dm_target *ti, status_type_t type, case STATUSTYPE_TABLE: DMEMIT("%s ", cc->cipher_string); - if (cc->key_size > 0) - for (i = 0; i < cc->key_size; i++) - DMEMIT("%02x", cc->key[i]); - else - DMEMIT("-"); + if (cc->key_size > 0) { + if ((maxlen - sz) < ((cc->key_size << 1) + 1)) + return -ENOMEM; + + crypt_encode_key(result + sz, cc->key, cc->key_size); + sz += cc->key_size << 1; + } else { + if (sz >= maxlen) + return -ENOMEM; + result[sz++] = '-'; + } DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, cc->dev->name, (unsigned long long)cc->start); @@ -1731,6 +1751,7 @@ static void crypt_status(struct dm_target *ti, status_type_t type, break; } + return 0; } static void crypt_postsuspend(struct dm_target *ti) @@ -1824,7 +1845,7 @@ static int crypt_iterate_devices(struct dm_target *ti, static struct target_type crypt_target = { .name = "crypt", - .version = {1, 12, 1}, + .version = {1, 12, 0}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index c0d03b0..cc1bd04 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -293,8 +293,8 @@ static int delay_map(struct dm_target *ti, struct bio *bio) return delay_bio(dc, dc->read_delay, bio); } -static void delay_status(struct dm_target *ti, status_type_t type, - unsigned status_flags, char *result, unsigned maxlen) +static int delay_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen) { struct delay_c *dc = ti->private; int sz = 0; @@ -314,6 +314,8 @@ static void delay_status(struct dm_target *ti, status_type_t type, dc->write_delay); break; } + + return 0; } static int delay_iterate_devices(struct dm_target *ti, @@ -335,7 +337,7 @@ out: static struct target_type delay_target = { .name = "delay", - .version = {1, 2, 1}, + .version = {1, 2, 0}, .module = THIS_MODULE, .ctr = delay_ctr, .dtr = delay_dtr, diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 5d6c04c..9721f2f 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -337,8 +337,8 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error) return error; } -static void flakey_status(struct dm_target *ti, status_type_t type, - unsigned status_flags, char *result, unsigned maxlen) +static int flakey_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen) { unsigned sz = 0; struct flakey_c *fc = ti->private; @@ -368,6 +368,7 @@ static void flakey_status(struct dm_target *ti, status_type_t type, break; } + return 0; } static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg) @@ -410,7 +411,7 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_ static struct target_type flakey_target = { .name = "flakey", - .version = {1, 3, 1}, + .version = {1, 3, 0}, .module = THIS_MODULE, .ctr = flakey_ctr, .dtr = flakey_dtr, diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index eee353d..0666b5d 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1067,7 +1067,6 @@ static void retrieve_status(struct dm_table *table, num_targets = dm_table_get_num_targets(table); for (i = 0; i < num_targets; i++) { struct dm_target *ti = dm_table_get_target(table, i); - size_t l; remaining = len - (outptr - outbuf); if (remaining <= sizeof(struct dm_target_spec)) { @@ -1094,17 +1093,14 @@ static void retrieve_status(struct dm_table *table, if (ti->type->status) { if (param->flags & DM_NOFLUSH_FLAG) status_flags |= DM_STATUS_NOFLUSH_FLAG; - ti->type->status(ti, type, status_flags, outptr, remaining); + if (ti->type->status(ti, type, status_flags, outptr, remaining)) { + param->flags |= DM_BUFFER_FULL_FLAG; + break; + } } else outptr[0] = '\0'; - l = strlen(outptr) + 1; - if (l == remaining) { - param->flags |= DM_BUFFER_FULL_FLAG; - break; - } - - outptr += l; + outptr += strlen(outptr) + 1; used = param->data_start + (outptr - outbuf); outptr = align_ptr(outptr); diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 5be301c..328cad5 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -95,8 +95,8 @@ static int linear_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_REMAPPED; } -static void linear_status(struct dm_target *ti, status_type_t type, - unsigned status_flags, char *result, unsigned maxlen) +static int linear_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen) { struct linear_c *lc = (struct linear_c *) ti->private; @@ -110,6 +110,7 @@ static void linear_status(struct dm_target *ti, status_type_t type, (unsigned long long)lc->start); break; } + return 0; } static int linear_ioctl(struct dm_target *ti, unsigned int cmd, @@ -154,7 +155,7 @@ static int linear_iterate_devices(struct dm_target *ti, static struct target_type linear_target = { .name = "linear", - .version = {1, 2, 1}, + .version = {1, 2, 0}, .module = THIS_MODULE, .ctr = linear_ctr, .dtr = linear_dtr, diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index d267bb5..573bd04 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -1378,8 +1378,8 @@ static void multipath_resume(struct dm_target *ti) * [priority selector-name num_ps_args [ps_args]* * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+ */ -static void multipath_status(struct dm_target *ti, status_type_t type, - unsigned status_flags, char *result, unsigned maxlen) +static int multipath_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen) { int sz = 0; unsigned long flags; @@ -1485,6 +1485,8 @@ static void multipath_status(struct dm_target *ti, status_type_t type, } spin_unlock_irqrestore(&m->lock, flags); + + return 0; } static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) @@ -1693,7 +1695,7 @@ out: *---------------------------------------------------------------*/ static struct target_type multipath_target = { .name = "multipath", - .version = {1, 5, 1}, + .version = {1, 5, 0}, .module = THIS_MODULE, .ctr = multipath_ctr, .dtr = multipath_dtr, diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 5a578d8..9e58dbd 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -1201,8 +1201,8 @@ static int raid_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_SUBMITTED; } -static void raid_status(struct dm_target *ti, status_type_t type, - unsigned status_flags, char *result, unsigned maxlen) +static int raid_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen) { struct raid_set *rs = ti->private; unsigned raid_param_cnt = 1; /* at least 1 for chunksize */ @@ -1344,6 +1344,8 @@ static void raid_status(struct dm_target *ti, status_type_t type, DMEMIT(" -"); } } + + return 0; } static int raid_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) @@ -1403,7 +1405,7 @@ static void raid_resume(struct dm_target *ti) static struct target_type raid_target = { .name = "raid", - .version = {1, 4, 2}, + .version = {1, 4, 1}, .module = THIS_MODULE, .ctr = raid_ctr, .dtr = raid_dtr, diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 7f24190..fa51918 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -1347,8 +1347,8 @@ static char device_status_char(struct mirror *m) } -static void mirror_status(struct dm_target *ti, status_type_t type, - unsigned status_flags, char *result, unsigned maxlen) +static int mirror_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen) { unsigned int m, sz = 0; struct mirror_set *ms = (struct mirror_set *) ti->private; @@ -1383,6 +1383,8 @@ static void mirror_status(struct dm_target *ti, status_type_t type, if (ms->features & DM_RAID1_HANDLE_ERRORS) DMEMIT(" 1 handle_errors"); } + + return 0; } static int mirror_iterate_devices(struct dm_target *ti, @@ -1401,7 +1403,7 @@ static int mirror_iterate_devices(struct dm_target *ti, static struct target_type mirror_target = { .name = "mirror", - .version = {1, 13, 2}, + .version = {1, 13, 1}, .module = THIS_MODULE, .ctr = mirror_ctr, .dtr = mirror_dtr, diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index df74f9f..59fc18a 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1837,8 +1837,8 @@ static void snapshot_merge_resume(struct dm_target *ti) start_merge(s); } -static void snapshot_status(struct dm_target *ti, status_type_t type, - unsigned status_flags, char *result, unsigned maxlen) +static int snapshot_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen) { unsigned sz = 0; struct dm_snapshot *snap = ti->private; @@ -1884,6 +1884,8 @@ static void snapshot_status(struct dm_target *ti, status_type_t type, maxlen - sz); break; } + + return 0; } static int snapshot_iterate_devices(struct dm_target *ti, @@ -2137,8 +2139,8 @@ static void origin_resume(struct dm_target *ti) ti->max_io_len = get_origin_minimum_chunksize(dev->bdev); } -static void origin_status(struct dm_target *ti, status_type_t type, - unsigned status_flags, char *result, unsigned maxlen) +static int origin_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen) { struct dm_dev *dev = ti->private; @@ -2151,6 +2153,8 @@ static void origin_status(struct dm_target *ti, status_type_t type, snprintf(result, maxlen, "%s", dev->name); break; } + + return 0; } static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm, @@ -2177,7 +2181,7 @@ static int origin_iterate_devices(struct dm_target *ti, static struct target_type origin_target = { .name = "snapshot-origin", - .version = {1, 8, 1}, + .version = {1, 8, 0}, .module = THIS_MODULE, .ctr = origin_ctr, .dtr = origin_dtr, @@ -2190,7 +2194,7 @@ static struct target_type origin_target = { static struct target_type snapshot_target = { .name = "snapshot", - .version = {1, 11, 1}, + .version = {1, 11, 0}, .module = THIS_MODULE, .ctr = snapshot_ctr, .dtr = snapshot_dtr, @@ -2303,5 +2307,3 @@ module_exit(dm_snapshot_exit); MODULE_DESCRIPTION(DM_NAME " snapshot target"); MODULE_AUTHOR("Joe Thornber"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("dm-snapshot-origin"); -MODULE_ALIAS("dm-snapshot-merge"); diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index aaecefa..c89cde8 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -312,8 +312,8 @@ static int stripe_map(struct dm_target *ti, struct bio *bio) * */ -static void stripe_status(struct dm_target *ti, status_type_t type, - unsigned status_flags, char *result, unsigned maxlen) +static int stripe_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen) { struct stripe_c *sc = (struct stripe_c *) ti->private; char buffer[sc->stripes + 1]; @@ -340,6 +340,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type, (unsigned long long)sc->stripe[i].physical_start); break; } + return 0; } static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error) @@ -427,7 +428,7 @@ static int stripe_merge(struct dm_target *ti, struct bvec_merge_data *bvm, static struct target_type stripe_target = { .name = "striped", - .version = {1, 5, 1}, + .version = {1, 5, 0}, .module = THIS_MODULE, .ctr = stripe_ctr, .dtr = stripe_dtr, diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 2d3a2af..5409607 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2299,8 +2299,8 @@ static void emit_flags(struct pool_features *pf, char *result, * / * / */ -static void pool_status(struct dm_target *ti, status_type_t type, - unsigned status_flags, char *result, unsigned maxlen) +static int pool_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen) { int r; unsigned sz = 0; @@ -2326,41 +2326,32 @@ static void pool_status(struct dm_target *ti, status_type_t type, if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) (void) commit_or_fallback(pool); - r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id); - if (r) { - DMERR("dm_pool_get_metadata_transaction_id returned %d", r); - goto err; - } + r = dm_pool_get_metadata_transaction_id(pool->pmd, + &transaction_id); + if (r) + return r; - r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata); - if (r) { - DMERR("dm_pool_get_free_metadata_block_count returned %d", r); - goto err; - } + r = dm_pool_get_free_metadata_block_count(pool->pmd, + &nr_free_blocks_metadata); + if (r) + return r; r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata); - if (r) { - DMERR("dm_pool_get_metadata_dev_size returned %d", r); - goto err; - } + if (r) + return r; - r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data); - if (r) { - DMERR("dm_pool_get_free_block_count returned %d", r); - goto err; - } + r = dm_pool_get_free_block_count(pool->pmd, + &nr_free_blocks_data); + if (r) + return r; r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data); - if (r) { - DMERR("dm_pool_get_data_dev_size returned %d", r); - goto err; - } + if (r) + return r; r = dm_pool_get_metadata_snap(pool->pmd, &held_root); - if (r) { - DMERR("dm_pool_get_metadata_snap returned %d", r); - goto err; - } + if (r) + return r; DMEMIT("%llu %llu/%llu %llu/%llu ", (unsigned long long)transaction_id, @@ -2397,10 +2388,8 @@ static void pool_status(struct dm_target *ti, status_type_t type, emit_flags(&pt->requested_pf, result, sz, maxlen); break; } - return; -err: - DMEMIT("Error"); + return 0; } static int pool_iterate_devices(struct dm_target *ti, @@ -2479,7 +2468,7 @@ static struct target_type pool_target = { .name = "thin-pool", .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | DM_TARGET_IMMUTABLE, - .version = {1, 7, 0}, + .version = {1, 6, 0}, .module = THIS_MODULE, .ctr = pool_ctr, .dtr = pool_dtr, @@ -2687,8 +2676,8 @@ static void thin_postsuspend(struct dm_target *ti) /* * */ -static void thin_status(struct dm_target *ti, status_type_t type, - unsigned status_flags, char *result, unsigned maxlen) +static int thin_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen) { int r; ssize_t sz = 0; @@ -2698,7 +2687,7 @@ static void thin_status(struct dm_target *ti, status_type_t type, if (get_pool_mode(tc->pool) == PM_FAIL) { DMEMIT("Fail"); - return; + return 0; } if (!tc->td) @@ -2707,16 +2696,12 @@ static void thin_status(struct dm_target *ti, status_type_t type, switch (type) { case STATUSTYPE_INFO: r = dm_thin_get_mapped_count(tc->td, &mapped); - if (r) { - DMERR("dm_thin_get_mapped_count returned %d", r); - goto err; - } + if (r) + return r; r = dm_thin_get_highest_mapped_block(tc->td, &highest); - if (r < 0) { - DMERR("dm_thin_get_highest_mapped_block returned %d", r); - goto err; - } + if (r < 0) + return r; DMEMIT("%llu ", mapped * tc->pool->sectors_per_block); if (r) @@ -2736,10 +2721,7 @@ static void thin_status(struct dm_target *ti, status_type_t type, } } - return; - -err: - DMEMIT("Error"); + return 0; } static int thin_iterate_devices(struct dm_target *ti, @@ -2766,7 +2748,7 @@ static int thin_iterate_devices(struct dm_target *ti, static struct target_type thin_target = { .name = "thin", - .version = {1, 8, 0}, + .version = {1, 7, 0}, .module = THIS_MODULE, .ctr = thin_ctr, .dtr = thin_dtr, diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index a746f1d..52cde98 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c @@ -93,13 +93,6 @@ struct dm_verity_io { */ }; -struct dm_verity_prefetch_work { - struct work_struct work; - struct dm_verity *v; - sector_t block; - unsigned n_blocks; -}; - static struct shash_desc *io_hash_desc(struct dm_verity *v, struct dm_verity_io *io) { return (struct shash_desc *)(io + 1); @@ -431,18 +424,15 @@ static void verity_end_io(struct bio *bio, int error) * The root buffer is not prefetched, it is assumed that it will be cached * all the time. */ -static void verity_prefetch_io(struct work_struct *work) +static void verity_prefetch_io(struct dm_verity *v, struct dm_verity_io *io) { - struct dm_verity_prefetch_work *pw = - container_of(work, struct dm_verity_prefetch_work, work); - struct dm_verity *v = pw->v; int i; for (i = v->levels - 2; i >= 0; i--) { sector_t hash_block_start; sector_t hash_block_end; - verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL); - verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL); + verity_hash_at_level(v, io->block, i, &hash_block_start, NULL); + verity_hash_at_level(v, io->block + io->n_blocks - 1, i, &hash_block_end, NULL); if (!i) { unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster); @@ -462,25 +452,6 @@ no_prefetch_cluster: dm_bufio_prefetch(v->bufio, hash_block_start, hash_block_end - hash_block_start + 1); } - - kfree(pw); -} - -static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io) -{ - struct dm_verity_prefetch_work *pw; - - pw = kmalloc(sizeof(struct dm_verity_prefetch_work), - GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); - - if (!pw) - return; - - INIT_WORK(&pw->work, verity_prefetch_io); - pw->v = v; - pw->block = io->block; - pw->n_blocks = io->n_blocks; - queue_work(v->verify_wq, &pw->work); } /* @@ -527,7 +498,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio) memcpy(io->io_vec, bio_iovec(bio), io->io_vec_size * sizeof(struct bio_vec)); - verity_submit_prefetch(v, io); + verity_prefetch_io(v, io); generic_make_request(bio); @@ -537,8 +508,8 @@ static int verity_map(struct dm_target *ti, struct bio *bio) /* * Status: V (valid) or C (corruption found) */ -static void verity_status(struct dm_target *ti, status_type_t type, - unsigned status_flags, char *result, unsigned maxlen) +static int verity_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen) { struct dm_verity *v = ti->private; unsigned sz = 0; @@ -569,6 +540,8 @@ static void verity_status(struct dm_target *ti, status_type_t type, DMEMIT("%02x", v->salt[x]); break; } + + return 0; } static int verity_ioctl(struct dm_target *ti, unsigned cmd, @@ -887,7 +860,7 @@ bad: static struct target_type verity_target = { .name = "verity", - .version = {1, 2, 0}, + .version = {1, 1, 0}, .module = THIS_MODULE, .ctr = verity_ctr, .dtr = verity_dtr, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 0e1699e..314a0e2 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1677,14 +1677,14 @@ static void dm_request_fn(struct request_queue *q) if (map_request(ti, clone, md)) goto requeued; - BUG_ON_NONRT(!irqs_disabled()); + BUG_ON(!irqs_disabled()); spin_lock(q->queue_lock); } goto out; requeued: - BUG_ON_NONRT(!irqs_disabled()); + BUG_ON(!irqs_disabled()); spin_lock(q->queue_lock); delay_and_out: @@ -1973,27 +1973,15 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t) { struct dm_md_mempools *p = dm_table_get_md_mempools(t); - if (md->io_pool && md->bs) { - /* The md already has necessary mempools. */ - if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) { - /* - * Reload bioset because front_pad may have changed - * because a different table was loaded. - */ - bioset_free(md->bs); - md->bs = p->bs; - p->bs = NULL; - } else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) { - BUG_ON(!md->tio_pool); - /* - * There's no need to reload with request-based dm - * because the size of front_pad doesn't change. - * Note for future: If you are to reload bioset, - * prep-ed requests in the queue may refer - * to bio from the old bioset, so you must walk - * through the queue to unprep. - */ - } + if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs) { + /* + * The md already has necessary mempools. Reload just the + * bioset because front_pad may have changed because + * a different table was loaded. + */ + bioset_free(md->bs); + md->bs = p->bs; + p->bs = NULL; goto out; } @@ -2433,7 +2421,7 @@ static void dm_queue_flush(struct mapped_device *md) */ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) { - struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); + struct dm_table *live_map, *map = ERR_PTR(-EINVAL); struct queue_limits limits; int r; @@ -2456,12 +2444,10 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) dm_table_put(live_map); } - if (!live_map) { - r = dm_calculate_queue_limits(table, &limits); - if (r) { - map = ERR_PTR(r); - goto out; - } + r = dm_calculate_queue_limits(table, &limits); + if (r) { + map = ERR_PTR(r); + goto out; } map = __bind(md, table, &limits); diff --git a/drivers/md/md.c b/drivers/md/md.c index 0411bde..3db3d1b 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -307,10 +307,6 @@ static void md_make_request(struct request_queue *q, struct bio *bio) bio_io_error(bio); return; } - if (mddev->ro == 1 && unlikely(rw == WRITE)) { - bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS); - return; - } smp_rmb(); /* Ensure implications of 'active' are visible */ rcu_read_lock(); if (mddev->suspended) { @@ -1564,8 +1560,8 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ sector, count, 1) == 0) return -EINVAL; } - } else if (sb->bblog_offset != 0) - rdev->badblocks.shift = 0; + } else if (sb->bblog_offset == 0) + rdev->badblocks.shift = -1; if (!refdev) { ret = 1; @@ -2998,9 +2994,6 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) } else if (!sectors) sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - rdev->data_offset; - if (!my_mddev->pers->resize) - /* Cannot change size for RAID0 or Linear etc */ - return -EINVAL; } if (sectors < my_mddev->dev_sectors) return -EINVAL; /* component must fit device */ @@ -3221,7 +3214,7 @@ int md_rdev_init(struct md_rdev *rdev) * be used - I wonder if that matters */ rdev->badblocks.count = 0; - rdev->badblocks.shift = -1; /* disabled until explicitly enabled */ + rdev->badblocks.shift = 0; rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL); seqlock_init(&rdev->badblocks.lock); if (rdev->badblocks.page == NULL) @@ -3293,6 +3286,9 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe goto abort_free; } } + if (super_format == -1) + /* hot-add for 0.90, or non-persistent: so no badblocks */ + rdev->badblocks.shift = -1; return rdev; diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c index b88757c..c4f2813 100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c @@ -139,8 +139,15 @@ struct child { struct btree_node *n; }; -static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt, - struct btree_node *parent, +static struct dm_btree_value_type le64_type = { + .context = NULL, + .size = sizeof(__le64), + .inc = NULL, + .dec = NULL, + .equal = NULL +}; + +static int init_child(struct dm_btree_info *info, struct btree_node *parent, unsigned index, struct child *result) { int r, inc; @@ -157,7 +164,7 @@ static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt result->n = dm_block_data(result->block); if (inc) - inc_children(info->tm, result->n, vt); + inc_children(info->tm, result->n, &le64_type); *((__le64 *) value_ptr(parent, index)) = cpu_to_le64(dm_block_location(result->block)); @@ -229,7 +236,7 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent, } static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info, - struct dm_btree_value_type *vt, unsigned left_index) + unsigned left_index) { int r; struct btree_node *parent; @@ -237,11 +244,11 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info, parent = dm_block_data(shadow_current(s)); - r = init_child(info, vt, parent, left_index, &left); + r = init_child(info, parent, left_index, &left); if (r) return r; - r = init_child(info, vt, parent, left_index + 1, &right); + r = init_child(info, parent, left_index + 1, &right); if (r) { exit_child(info, &left); return r; @@ -361,7 +368,7 @@ static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent, } static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info, - struct dm_btree_value_type *vt, unsigned left_index) + unsigned left_index) { int r; struct btree_node *parent = dm_block_data(shadow_current(s)); @@ -370,17 +377,17 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info, /* * FIXME: fill out an array? */ - r = init_child(info, vt, parent, left_index, &left); + r = init_child(info, parent, left_index, &left); if (r) return r; - r = init_child(info, vt, parent, left_index + 1, ¢er); + r = init_child(info, parent, left_index + 1, ¢er); if (r) { exit_child(info, &left); return r; } - r = init_child(info, vt, parent, left_index + 2, &right); + r = init_child(info, parent, left_index + 2, &right); if (r) { exit_child(info, &left); exit_child(info, ¢er); @@ -427,8 +434,7 @@ static int get_nr_entries(struct dm_transaction_manager *tm, } static int rebalance_children(struct shadow_spine *s, - struct dm_btree_info *info, - struct dm_btree_value_type *vt, uint64_t key) + struct dm_btree_info *info, uint64_t key) { int i, r, has_left_sibling, has_right_sibling; uint32_t child_entries; @@ -466,13 +472,13 @@ static int rebalance_children(struct shadow_spine *s, has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1); if (!has_left_sibling) - r = rebalance2(s, info, vt, i); + r = rebalance2(s, info, i); else if (!has_right_sibling) - r = rebalance2(s, info, vt, i - 1); + r = rebalance2(s, info, i - 1); else - r = rebalance3(s, info, vt, i - 1); + r = rebalance3(s, info, i - 1); return r; } @@ -523,7 +529,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info, if (le32_to_cpu(n->header.flags) & LEAF_NODE) return do_leaf(n, key, index); - r = rebalance_children(s, info, vt, key); + r = rebalance_children(s, info, key); if (r) break; @@ -544,14 +550,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info, return r; } -static struct dm_btree_value_type le64_type = { - .context = NULL, - .size = sizeof(__le64), - .inc = NULL, - .dec = NULL, - .equal = NULL -}; - int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, uint64_t *keys, dm_block_t *new_root) { diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index d9babda..24b3597 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -289,7 +289,7 @@ abort: kfree(conf->strip_zone); kfree(conf->devlist); kfree(conf); - *private_conf = ERR_PTR(err); + *private_conf = NULL; return err; } @@ -411,8 +411,7 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks "%s does not support generic reshape\n", __func__); rdev_for_each(rdev, mddev) - array_sectors += (rdev->sectors & - ~(sector_t)(mddev->chunk_sectors-1)); + array_sectors += rdev->sectors; return array_sectors; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 6af167f..d5bddfc 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -967,7 +967,6 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) bio_list_merge(&conf->pending_bio_list, &plug->pending); conf->pending_count += plug->pending_cnt; spin_unlock_irq(&conf->device_lock); - wake_up(&conf->wait_barrier); md_wakeup_thread(mddev->thread); kfree(plug); return; @@ -981,12 +980,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; bio->bi_next = NULL; - if (unlikely((bio->bi_rw & REQ_DISCARD) && - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) - /* Just ignore it */ - bio_endio(bio, 0); - else - generic_make_request(bio); + generic_make_request(bio); bio = next; } kfree(plug); @@ -1006,7 +1000,6 @@ static void make_request(struct mddev *mddev, struct bio * bio) const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); const unsigned long do_discard = (bio->bi_rw & (REQ_DISCARD | REQ_SECURE)); - const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME); struct md_rdev *blocked_rdev; struct blk_plug_cb *cb; struct raid1_plug_cb *plug = NULL; @@ -1308,8 +1301,7 @@ read_again: conf->mirrors[i].rdev->data_offset); mbio->bi_bdev = conf->mirrors[i].rdev->bdev; mbio->bi_end_io = raid1_end_write_request; - mbio->bi_rw = - WRITE | do_flush_fua | do_sync | do_discard | do_same; + mbio->bi_rw = WRITE | do_flush_fua | do_sync | do_discard; mbio->bi_private = r1_bio; atomic_inc(&r1_bio->remaining); @@ -2826,9 +2818,6 @@ static int run(struct mddev *mddev) if (IS_ERR(conf)) return PTR_ERR(conf); - if (mddev->queue) - blk_queue_max_write_same_sectors(mddev->queue, - mddev->chunk_sectors); rdev_for_each(rdev, mddev) { if (!mddev->gendisk) continue; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 61ab219..64d4824 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1073,7 +1073,6 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) bio_list_merge(&conf->pending_bio_list, &plug->pending); conf->pending_count += plug->pending_cnt; spin_unlock_irq(&conf->device_lock); - wake_up(&conf->wait_barrier); md_wakeup_thread(mddev->thread); kfree(plug); return; @@ -1087,12 +1086,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; bio->bi_next = NULL; - if (unlikely((bio->bi_rw & REQ_DISCARD) && - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) - /* Just ignore it */ - bio_endio(bio, 0); - else - generic_make_request(bio); + generic_make_request(bio); bio = next; } kfree(plug); @@ -1111,7 +1105,6 @@ static void make_request(struct mddev *mddev, struct bio * bio) const unsigned long do_fua = (bio->bi_rw & REQ_FUA); const unsigned long do_discard = (bio->bi_rw & (REQ_DISCARD | REQ_SECURE)); - const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME); unsigned long flags; struct md_rdev *blocked_rdev; struct blk_plug_cb *cb; @@ -1467,8 +1460,7 @@ retry_write: rdev)); mbio->bi_bdev = rdev->bdev; mbio->bi_end_io = raid10_end_write_request; - mbio->bi_rw = - WRITE | do_sync | do_fua | do_discard | do_same; + mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; mbio->bi_private = r10_bio; atomic_inc(&r10_bio->remaining); @@ -1510,8 +1502,7 @@ retry_write: r10_bio, rdev)); mbio->bi_bdev = rdev->bdev; mbio->bi_end_io = raid10_end_write_request; - mbio->bi_rw = - WRITE | do_sync | do_fua | do_discard | do_same; + mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; mbio->bi_private = r10_bio; atomic_inc(&r10_bio->remaining); @@ -3578,8 +3569,6 @@ static int run(struct mddev *mddev) if (mddev->queue) { blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); - blk_queue_max_write_same_sectors(mddev->queue, - mddev->chunk_sectors); blk_queue_io_min(mddev->queue, chunk_size); if (conf->geo.raid_disks % conf->geo.near_copies) blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 0089cb1..19d77a0 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -674,11 +674,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) bi->bi_next = NULL; if (rrdev) set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); - - if (conf->mddev->gendisk) - trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), - bi, disk_devt(conf->mddev->gendisk), - sh->dev[i].sector); + trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), + bi, disk_devt(conf->mddev->gendisk), + sh->dev[i].sector); generic_make_request(bi); } if (rrdev) { @@ -706,10 +704,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) rbi->bi_io_vec[0].bv_offset = 0; rbi->bi_size = STRIPE_SIZE; rbi->bi_next = NULL; - if (conf->mddev->gendisk) - trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), - rbi, disk_devt(conf->mddev->gendisk), - sh->dev[i].sector); + trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), + rbi, disk_devt(conf->mddev->gendisk), + sh->dev[i].sector); generic_make_request(rbi); } if (!rdev && !rrdev) { @@ -1418,9 +1415,8 @@ static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request) struct raid5_percpu *percpu; unsigned long cpu; - cpu = get_cpu_light(); + cpu = get_cpu(); percpu = per_cpu_ptr(conf->percpu, cpu); - spin_lock(&percpu->lock); if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { ops_run_biofill(sh); overlap_clear++; @@ -1472,8 +1468,7 @@ static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request) if (test_and_clear_bit(R5_Overlap, &dev->flags)) wake_up(&sh->raid_conf->wait_for_overlap); } - spin_unlock(&percpu->lock); - put_cpu_light(); + put_cpu(); } #ifdef CONFIG_MULTICORE_RAID456 @@ -2324,26 +2319,11 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, int level = conf->level; if (rcw) { - - for (i = disks; i--; ) { - struct r5dev *dev = &sh->dev[i]; - - if (dev->towrite) { - set_bit(R5_LOCKED, &dev->flags); - set_bit(R5_Wantdrain, &dev->flags); - if (!expand) - clear_bit(R5_UPTODATE, &dev->flags); - s->locked++; - } - } /* if we are not expanding this is a proper write request, and * there will be bios with new data to be drained into the * stripe cache */ if (!expand) { - if (!s->locked) - /* False alarm, nothing to do */ - return; sh->reconstruct_state = reconstruct_state_drain_run; set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); } else @@ -2351,6 +2331,17 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); + for (i = disks; i--; ) { + struct r5dev *dev = &sh->dev[i]; + + if (dev->towrite) { + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantdrain, &dev->flags); + if (!expand) + clear_bit(R5_UPTODATE, &dev->flags); + s->locked++; + } + } if (s->locked + conf->max_degraded == disks) if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) atomic_inc(&conf->pending_full_writes); @@ -2359,6 +2350,11 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); + sh->reconstruct_state = reconstruct_state_prexor_drain_run; + set_bit(STRIPE_OP_PREXOR, &s->ops_request); + set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); + set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); + for (i = disks; i--; ) { struct r5dev *dev = &sh->dev[i]; if (i == pd_idx) @@ -2373,13 +2369,6 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, s->locked++; } } - if (!s->locked) - /* False alarm - nothing to do */ - return; - sh->reconstruct_state = reconstruct_state_prexor_drain_run; - set_bit(STRIPE_OP_PREXOR, &s->ops_request); - set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); - set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); } /* keep the parity disk(s) locked while asynchronous operations @@ -2614,8 +2603,6 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, int i; clear_bit(STRIPE_SYNCING, &sh->state); - if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) - wake_up(&conf->wait_for_overlap); s->syncing = 0; s->replacing = 0; /* There is nothing more to do for sync/check/repair. @@ -2789,7 +2776,6 @@ static void handle_stripe_clean_event(struct r5conf *conf, { int i; struct r5dev *dev; - int discard_pending = 0; for (i = disks; i--; ) if (sh->dev[i].written) { @@ -2818,23 +2804,9 @@ static void handle_stripe_clean_event(struct r5conf *conf, STRIPE_SECTORS, !test_bit(STRIPE_DEGRADED, &sh->state), 0); - } else if (test_bit(R5_Discard, &dev->flags)) - discard_pending = 1; - } - if (!discard_pending && - test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { - clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); - clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); - if (sh->qd_idx >= 0) { - clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); - clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); - } - /* now that discard is done we can proceed with any sync */ - clear_bit(STRIPE_DISCARD, &sh->state); - if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) - set_bit(STRIPE_HANDLE, &sh->state); - - } + } + } else if (test_bit(R5_Discard, &sh->dev[i].flags)) + clear_bit(R5_Discard, &sh->dev[i].flags); if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) if (atomic_dec_and_test(&conf->pending_full_writes)) @@ -2893,10 +2865,8 @@ static void handle_stripe_dirtying(struct r5conf *conf, set_bit(STRIPE_HANDLE, &sh->state); if (rmw < rcw && rmw > 0) { /* prefer read-modify-write, but need to get some data */ - if (conf->mddev->queue) - blk_add_trace_msg(conf->mddev->queue, - "raid5 rmw %llu %d", - (unsigned long long)sh->sector, rmw); + blk_add_trace_msg(conf->mddev->queue, "raid5 rmw %llu %d", + (unsigned long long)sh->sector, rmw); for (i = disks; i--; ) { struct r5dev *dev = &sh->dev[i]; if ((dev->towrite || i == sh->pd_idx) && @@ -2946,7 +2916,7 @@ static void handle_stripe_dirtying(struct r5conf *conf, } } } - if (rcw && conf->mddev->queue) + if (rcw) blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", (unsigned long long)sh->sector, rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); @@ -3486,15 +3456,9 @@ static void handle_stripe(struct stripe_head *sh) return; } - if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { - spin_lock(&sh->stripe_lock); - /* Cannot process 'sync' concurrently with 'discard' */ - if (!test_bit(STRIPE_DISCARD, &sh->state) && - test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { - set_bit(STRIPE_SYNCING, &sh->state); - clear_bit(STRIPE_INSYNC, &sh->state); - } - spin_unlock(&sh->stripe_lock); + if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { + set_bit(STRIPE_SYNCING, &sh->state); + clear_bit(STRIPE_INSYNC, &sh->state); } clear_bit(STRIPE_DELAYED, &sh->state); @@ -3654,8 +3618,6 @@ static void handle_stripe(struct stripe_head *sh) test_bit(STRIPE_INSYNC, &sh->state)) { md_done_sync(conf->mddev, STRIPE_SECTORS, 1); clear_bit(STRIPE_SYNCING, &sh->state); - if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) - wake_up(&conf->wait_for_overlap); } /* If the failed drives are just a ReadError, then we might need @@ -4061,10 +4023,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) atomic_inc(&conf->active_aligned_reads); spin_unlock_irq(&conf->device_lock); - if (mddev->gendisk) - trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), - align_bi, disk_devt(mddev->gendisk), - raid_bio->bi_sector); + trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), + align_bi, disk_devt(mddev->gendisk), + raid_bio->bi_sector); generic_make_request(align_bi); return 1; } else { @@ -4158,8 +4119,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) } spin_unlock_irq(&conf->device_lock); } - if (mddev->queue) - trace_block_unplug(mddev->queue, cnt, !from_schedule); + trace_block_unplug(mddev->queue, cnt, !from_schedule); kfree(cb); } @@ -4222,13 +4182,6 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) sh = get_active_stripe(conf, logical_sector, 0, 0, 0); prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); - set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); - if (test_bit(STRIPE_SYNCING, &sh->state)) { - release_stripe(sh); - schedule(); - goto again; - } - clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); spin_lock_irq(&sh->stripe_lock); for (d = 0; d < conf->raid_disks; d++) { if (d == sh->pd_idx || d == sh->qd_idx) @@ -4241,7 +4194,6 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) goto again; } } - set_bit(STRIPE_DISCARD, &sh->state); finish_wait(&conf->wait_for_overlap, &w); for (d = 0; d < conf->raid_disks; d++) { if (d == sh->pd_idx || d == sh->qd_idx) @@ -5141,7 +5093,6 @@ static int raid5_alloc_percpu(struct r5conf *conf) break; } per_cpu_ptr(conf->percpu, cpu)->scribble = scribble; - spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock); } #ifdef CONFIG_HOTPLUG_CPU conf->cpu_notify.notifier_call = raid456_cpu_notify; diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 8a57647..18b2c4a 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -323,7 +323,6 @@ enum { STRIPE_COMPUTE_RUN, STRIPE_OPS_REQ_PENDING, STRIPE_ON_UNPLUG_LIST, - STRIPE_DISCARD, }; /* @@ -429,7 +428,6 @@ struct r5conf { int recovery_disabled; /* per cpu variables */ struct raid5_percpu { - spinlock_t lock; /* Protection for -RT */ struct page *spare_page; /* Used when checking P/Q in raid6 */ void *scribble; /* space for constructing buffer * lists and performing address diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index dd53210..45e5d06 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c @@ -250,19 +250,17 @@ static u8 SRAM_Table[][60] = vdelay start of active video in 2 * field lines relative to trailing edge of /VRESET pulse (VDELAY register). sheight height of active video in 2 * field lines. - extraheight Added to sheight for cropcap.bounds.height only videostart0 ITU-R frame line number of the line corresponding to vdelay in the first field. */ #define CROPCAP(minhdelayx1, hdelayx1, swidth, totalwidth, sqwidth, \ - vdelay, sheight, extraheight, videostart0) \ + vdelay, sheight, videostart0) \ .cropcap.bounds.left = minhdelayx1, \ /* * 2 because vertically we count field lines times two, */ \ /* e.g. 23 * 2 to 23 * 2 + 576 in PAL-BGHI defrect. */ \ .cropcap.bounds.top = (videostart0) * 2 - (vdelay) + MIN_VDELAY, \ /* 4 is a safety margin at the end of the line. */ \ .cropcap.bounds.width = (totalwidth) - (minhdelayx1) - 4, \ - .cropcap.bounds.height = (sheight) + (extraheight) + (vdelay) - \ - MIN_VDELAY, \ + .cropcap.bounds.height = (sheight) + (vdelay) - MIN_VDELAY, \ .cropcap.defrect.left = hdelayx1, \ .cropcap.defrect.top = (videostart0) * 2, \ .cropcap.defrect.width = swidth, \ @@ -303,10 +301,9 @@ const struct bttv_tvnorm bttv_tvnorms[] = { /* totalwidth */ 1135, /* sqwidth */ 944, /* vdelay */ 0x20, - /* sheight */ 576, - /* bt878 (and bt848?) can capture another - line below active video. */ - /* extraheight */ 2, + /* bt878 (and bt848?) can capture another + line below active video. */ + /* sheight */ (576 + 2) + 0x20 - 2, /* videostart0 */ 23) },{ .v4l2_id = V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_KR, @@ -333,7 +330,6 @@ const struct bttv_tvnorm bttv_tvnorms[] = { /* sqwidth */ 780, /* vdelay */ 0x1a, /* sheight */ 480, - /* extraheight */ 0, /* videostart0 */ 23) },{ .v4l2_id = V4L2_STD_SECAM, @@ -359,7 +355,6 @@ const struct bttv_tvnorm bttv_tvnorms[] = { /* sqwidth */ 944, /* vdelay */ 0x20, /* sheight */ 576, - /* extraheight */ 0, /* videostart0 */ 23) },{ .v4l2_id = V4L2_STD_PAL_Nc, @@ -385,7 +380,6 @@ const struct bttv_tvnorm bttv_tvnorms[] = { /* sqwidth */ 780, /* vdelay */ 0x1a, /* sheight */ 576, - /* extraheight */ 0, /* videostart0 */ 23) },{ .v4l2_id = V4L2_STD_PAL_M, @@ -411,7 +405,6 @@ const struct bttv_tvnorm bttv_tvnorms[] = { /* sqwidth */ 780, /* vdelay */ 0x1a, /* sheight */ 480, - /* extraheight */ 0, /* videostart0 */ 23) },{ .v4l2_id = V4L2_STD_PAL_N, @@ -437,7 +430,6 @@ const struct bttv_tvnorm bttv_tvnorms[] = { /* sqwidth */ 944, /* vdelay */ 0x20, /* sheight */ 576, - /* extraheight */ 0, /* videostart0 */ 23) },{ .v4l2_id = V4L2_STD_NTSC_M_JP, @@ -463,7 +455,6 @@ const struct bttv_tvnorm bttv_tvnorms[] = { /* sqwidth */ 780, /* vdelay */ 0x16, /* sheight */ 480, - /* extraheight */ 0, /* videostart0 */ 23) },{ /* that one hopefully works with the strange timing @@ -493,7 +484,6 @@ const struct bttv_tvnorm bttv_tvnorms[] = { /* sqwidth */ 944, /* vdelay */ 0x1a, /* sheight */ 480, - /* extraheight */ 0, /* videostart0 */ 23) } }; diff --git a/drivers/media/pci/cx18/cx18-alsa-main.c b/drivers/media/pci/cx18/cx18-alsa-main.c index b2c8c34..8e971ff 100644 --- a/drivers/media/pci/cx18/cx18-alsa-main.c +++ b/drivers/media/pci/cx18/cx18-alsa-main.c @@ -197,7 +197,7 @@ err_exit: return ret; } -static int cx18_alsa_load(struct cx18 *cx) +static int __init cx18_alsa_load(struct cx18 *cx) { struct v4l2_device *v4l2_dev = &cx->v4l2_dev; struct cx18_stream *s; diff --git a/drivers/media/pci/cx18/cx18-alsa-pcm.h b/drivers/media/pci/cx18/cx18-alsa-pcm.h index e2b2c5b..d26e51f 100644 --- a/drivers/media/pci/cx18/cx18-alsa-pcm.h +++ b/drivers/media/pci/cx18/cx18-alsa-pcm.h @@ -20,7 +20,7 @@ * 02111-1307 USA */ -int snd_cx18_pcm_create(struct snd_cx18_card *cxsc); +int __init snd_cx18_pcm_create(struct snd_cx18_card *cxsc); /* Used by cx18-mailbox to announce the PCM data to the module */ void cx18_alsa_announce_pcm_data(struct snd_cx18_card *card, u8 *pcm_data, diff --git a/drivers/media/pci/ivtv/ivtv-alsa-main.c b/drivers/media/pci/ivtv/ivtv-alsa-main.c index e970cfa..4a221c6 100644 --- a/drivers/media/pci/ivtv/ivtv-alsa-main.c +++ b/drivers/media/pci/ivtv/ivtv-alsa-main.c @@ -205,7 +205,7 @@ err_exit: return ret; } -static int ivtv_alsa_load(struct ivtv *itv) +static int __init ivtv_alsa_load(struct ivtv *itv) { struct v4l2_device *v4l2_dev = &itv->v4l2_dev; struct ivtv_stream *s; diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.h b/drivers/media/pci/ivtv/ivtv-alsa-pcm.h index 186814e..23dfe0d 100644 --- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.h +++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.h @@ -20,4 +20,4 @@ * 02111-1307 USA */ -int snd_ivtv_pcm_create(struct snd_ivtv_card *itvsc); +int __init snd_ivtv_pcm_create(struct snd_ivtv_card *itvsc); diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c index 8e9a668..35cc526 100644 --- a/drivers/media/platform/omap/omap_vout.c +++ b/drivers/media/platform/omap/omap_vout.c @@ -205,21 +205,19 @@ static u32 omap_vout_uservirt_to_phys(u32 virtp) struct vm_area_struct *vma; struct mm_struct *mm = current->mm; - /* For kernel direct-mapped memory, take the easy way */ - if (virtp >= PAGE_OFFSET) - return virt_to_phys((void *) virtp); - - down_read(¤t->mm->mmap_sem); vma = find_vma(mm, virtp); - if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) { + /* For kernel direct-mapped memory, take the easy way */ + if (virtp >= PAGE_OFFSET) { + physp = virt_to_phys((void *) virtp); + } else if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) { /* this will catch, kernel-allocated, mmaped-to-usermode addresses */ physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start); - up_read(¤t->mm->mmap_sem); } else { /* otherwise, use get_user_pages() for general userland pages */ int res, nr_pages = 1; struct page *pages; + down_read(¤t->mm->mmap_sem); res = get_user_pages(current, current->mm, virtp, nr_pages, 1, 0, &pages, NULL); diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index d593bc6..601d1ac1 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -789,10 +789,8 @@ static ssize_t show_protocols(struct device *device, } else if (dev->raw) { enabled = dev->raw->enabled_protocols; allowed = ir_raw_get_allowed_protocols(); - } else { - mutex_unlock(&dev->lock); + } else return -ENODEV; - } IR_dprintk(1, "allowed - 0x%llx, enabled - 0x%llx\n", (long long)allowed, diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c index 98a7f5e..513969f 100644 --- a/drivers/media/v4l2-core/v4l2-device.c +++ b/drivers/media/v4l2-core/v4l2-device.c @@ -159,21 +159,31 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, sd->v4l2_dev = v4l2_dev; if (sd->internal_ops && sd->internal_ops->registered) { err = sd->internal_ops->registered(sd); - if (err) - goto error_module; + if (err) { + module_put(sd->owner); + return err; + } } /* This just returns 0 if either of the two args is NULL */ err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler, NULL); - if (err) - goto error_unregister; + if (err) { + if (sd->internal_ops && sd->internal_ops->unregistered) + sd->internal_ops->unregistered(sd); + module_put(sd->owner); + return err; + } #if defined(CONFIG_MEDIA_CONTROLLER) /* Register the entity. */ if (v4l2_dev->mdev) { err = media_device_register_entity(v4l2_dev->mdev, entity); - if (err < 0) - goto error_unregister; + if (err < 0) { + if (sd->internal_ops && sd->internal_ops->unregistered) + sd->internal_ops->unregistered(sd); + module_put(sd->owner); + return err; + } } #endif @@ -182,14 +192,6 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, spin_unlock(&v4l2_dev->lock); return 0; - -error_unregister: - if (sd->internal_ops && sd->internal_ops->unregistered) - sd->internal_ops->unregistered(sd); -error_module: - module_put(sd->owner); - sd->v4l2_dev = NULL; - return err; } EXPORT_SYMBOL_GPL(v4l2_device_register_subdev); diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c index 64a779c..f5ddb82 100644 --- a/drivers/memstick/host/rtsx_pci_ms.c +++ b/drivers/memstick/host/rtsx_pci_ms.c @@ -426,9 +426,6 @@ static void rtsx_pci_ms_request(struct memstick_host *msh) dev_dbg(ms_dev(host), "--> %s\n", __func__); - if (rtsx_pci_card_exclusive_check(host->pcr, RTSX_MS_CARD)) - return; - schedule_work(&host->handle_req); } @@ -444,10 +441,6 @@ static int rtsx_pci_ms_set_param(struct memstick_host *msh, dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n", __func__, param, value); - err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_MS_CARD); - if (err) - return err; - switch (param) { case MEMSTICK_POWER: if (value == MEMSTICK_POWER_ON) diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c index 6b40e0c..210dd03 100644 --- a/drivers/mfd/adp5520.c +++ b/drivers/mfd/adp5520.c @@ -36,7 +36,6 @@ struct adp5520_chip { struct blocking_notifier_head notifier_list; int irq; unsigned long id; - uint8_t mode; }; static int __adp5520_read(struct i2c_client *client, @@ -327,10 +326,7 @@ static int adp5520_suspend(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct adp5520_chip *chip = dev_get_drvdata(&client->dev); - adp5520_read(chip->dev, ADP5520_MODE_STATUS, &chip->mode); - /* All other bits are W1C */ - chip->mode &= ADP5520_BL_EN | ADP5520_DIM_EN | ADP5520_nSTNBY; - adp5520_write(chip->dev, ADP5520_MODE_STATUS, 0); + adp5520_clr_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY); return 0; } @@ -339,7 +335,7 @@ static int adp5520_resume(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct adp5520_chip *chip = dev_get_drvdata(&client->dev); - adp5520_write(chip->dev, ADP5520_MODE_STATUS, chip->mode); + adp5520_set_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY); return 0; } #endif diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c index 1e2d120..9fc5700 100644 --- a/drivers/mfd/rtsx_pcr.c +++ b/drivers/mfd/rtsx_pcr.c @@ -713,25 +713,6 @@ int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card) } EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off); -int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card) -{ - unsigned int cd_mask[] = { - [RTSX_SD_CARD] = SD_EXIST, - [RTSX_MS_CARD] = MS_EXIST - }; - - if (!pcr->ms_pmos) { - /* When using single PMOS, accessing card is not permitted - * if the existing card is not the designated one. - */ - if (pcr->card_exist & (~cd_mask[card])) - return -EIO; - } - - return 0; -} -EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check); - int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) { if (pcr->ops->switch_output_voltage) @@ -777,7 +758,7 @@ static void rtsx_pci_card_detect(struct work_struct *work) struct delayed_work *dwork; struct rtsx_pcr *pcr; unsigned long flags; - unsigned int card_detect = 0, card_inserted, card_removed; + unsigned int card_detect = 0; u32 irq_status; dwork = to_delayed_work(work); @@ -785,35 +766,25 @@ static void rtsx_pci_card_detect(struct work_struct *work) dev_dbg(&(pcr->pci->dev), "--> %s\n", __func__); - mutex_lock(&pcr->pcr_mutex); spin_lock_irqsave(&pcr->lock, flags); irq_status = rtsx_pci_readl(pcr, RTSX_BIPR); dev_dbg(&(pcr->pci->dev), "irq_status: 0x%08x\n", irq_status); - irq_status &= CARD_EXIST; - card_inserted = pcr->card_inserted & irq_status; - card_removed = pcr->card_removed; - pcr->card_inserted = 0; - pcr->card_removed = 0; - - spin_unlock_irqrestore(&pcr->lock, flags); - - if (card_inserted || card_removed) { + if (pcr->card_inserted || pcr->card_removed) { dev_dbg(&(pcr->pci->dev), "card_inserted: 0x%x, card_removed: 0x%x\n", - card_inserted, card_removed); + pcr->card_inserted, pcr->card_removed); if (pcr->ops->cd_deglitch) - card_inserted = pcr->ops->cd_deglitch(pcr); - - card_detect = card_inserted | card_removed; + pcr->card_inserted = pcr->ops->cd_deglitch(pcr); - pcr->card_exist |= card_inserted; - pcr->card_exist &= ~card_removed; + card_detect = pcr->card_inserted | pcr->card_removed; + pcr->card_inserted = 0; + pcr->card_removed = 0; } - mutex_unlock(&pcr->pcr_mutex); + spin_unlock_irqrestore(&pcr->lock, flags); if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event) pcr->slots[RTSX_SD_CARD].card_event( @@ -865,6 +836,10 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id) } } + if (pcr->card_inserted || pcr->card_removed) + schedule_delayed_work(&pcr->carddet_work, + msecs_to_jiffies(200)); + if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) { if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) { pcr->trans_result = TRANS_RESULT_FAIL; @@ -877,10 +852,6 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id) } } - if (pcr->card_inserted || pcr->card_removed) - schedule_delayed_work(&pcr->carddet_work, - msecs_to_jiffies(200)); - spin_unlock(&pcr->lock); return IRQ_HANDLED; } @@ -1003,14 +974,6 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) return err; } - /* No CD interrupt if probing driver with card inserted. - * So we need to initialize pcr->card_exist here. - */ - if (pcr->ops->cd_deglitch) - pcr->card_exist = pcr->ops->cd_deglitch(pcr); - else - pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST; - return 0; } diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index bb07512..b151b7c 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -63,7 +63,6 @@ config ATMEL_PWM config ATMEL_TCLIB bool "Atmel AT32/AT91 Timer/Counter Library" depends on (AVR32 || ARCH_AT91) - default y if PREEMPT_RT_FULL help Select this if you want a library to allocate the Timer/Counter blocks found on many Atmel processors. This facilitates using @@ -79,7 +78,8 @@ config ATMEL_TCB_CLKSRC are combined to make a single 32-bit timer. When GENERIC_CLOCKEVENTS is defined, the third timer channel - may be used as a clock event device supporting oneshot mode. + may be used as a clock event device supporting oneshot mode + (delays of up to two seconds) based on the 32 KiHz clock. config ATMEL_TCB_CLKSRC_BLOCK int @@ -93,14 +93,6 @@ config ATMEL_TCB_CLKSRC_BLOCK TC can be used for other purposes, such as PWM generation and interval timing. -config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK - bool "TC Block use 32 KiHz clock" - depends on ATMEL_TCB_CLKSRC - default y if !PREEMPT_RT_FULL - help - Select this to use 32 KiHz base clock rate as TC block clock - source for clock events. - config IBM_ASM tristate "Device driver for IBM RSA service processor" depends on X86 && PCI && INPUT @@ -122,35 +114,6 @@ config IBM_ASM for information on the specific driver level and support statement for your IBM server. -config HWLAT_DETECTOR - tristate "Testing module to detect hardware-induced latencies" - depends on DEBUG_FS - depends on RING_BUFFER - default m - ---help--- - A simple hardware latency detector. Use this module to detect - large latencies introduced by the behavior of the underlying - system firmware external to Linux. We do this using periodic - use of stop_machine to grab all available CPUs and measure - for unexplainable gaps in the CPU timestamp counter(s). By - default, the module is not enabled until the "enable" file - within the "hwlat_detector" debugfs directory is toggled. - - This module is often used to detect SMI (System Management - Interrupts) on x86 systems, though is not x86 specific. To - this end, we default to using a sample window of 1 second, - during which we will sample for 0.5 seconds. If an SMI or - similar event occurs during that time, it is recorded - into an 8K samples global ring buffer until retreived. - - WARNING: This software should never be enabled (it can be built - but should not be turned on after it is loaded) in a production - environment where high latencies are a concern since the - sampling mechanism actually introduces latencies for - regular tasks while the CPU(s) are being held. - - If unsure, say N - config PHANTOM tristate "Sensable PHANToM (PCI)" depends on PCI diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index ec6ee3f..2129377 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -49,4 +49,3 @@ obj-y += carma/ obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/ obj-$(CONFIG_INTEL_MEI) += mei/ -obj-$(CONFIG_HWLAT_DETECTOR) += hwlat_detector.o diff --git a/drivers/misc/hwlat_detector.c b/drivers/misc/hwlat_detector.c deleted file mode 100644 index b7b7c90..0000000 --- a/drivers/misc/hwlat_detector.c +++ /dev/null @@ -1,1212 +0,0 @@ -/* - * hwlat_detector.c - A simple Hardware Latency detector. - * - * Use this module to detect large system latencies induced by the behavior of - * certain underlying system hardware or firmware, independent of Linux itself. - * The code was developed originally to detect the presence of SMIs on Intel - * and AMD systems, although there is no dependency upon x86 herein. - * - * The classical example usage of this module is in detecting the presence of - * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a - * somewhat special form of hardware interrupt spawned from earlier CPU debug - * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge - * LPC (or other device) to generate a special interrupt under certain - * circumstances, for example, upon expiration of a special SMI timer device, - * due to certain external thermal readings, on certain I/O address accesses, - * and other situations. An SMI hits a special CPU pin, triggers a special - * SMI mode (complete with special memory map), and the OS is unaware. - * - * Although certain hardware-inducing latencies are necessary (for example, - * a modern system often requires an SMI handler for correct thermal control - * and remote management) they can wreak havoc upon any OS-level performance - * guarantees toward low-latency, especially when the OS is not even made - * aware of the presence of these interrupts. For this reason, we need a - * somewhat brute force mechanism to detect these interrupts. In this case, - * we do it by hogging all of the CPU(s) for configurable timer intervals, - * sampling the built-in CPU timer, looking for discontiguous readings. - * - * WARNING: This implementation necessarily introduces latencies. Therefore, - * you should NEVER use this module in a production environment - * requiring any kind of low-latency performance guarantee(s). - * - * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. - * - * Includes useful feedback from Clark Williams - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */ -#define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */ -#define U64STR_SIZE 22 /* 20 digits max */ - -#define VERSION "1.0.0" -#define BANNER "hwlat_detector: " -#define DRVNAME "hwlat_detector" -#define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */ -#define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */ -#define DEFAULT_LAT_THRESHOLD 10 /* 10us */ - -/* Module metadata */ - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Jon Masters "); -MODULE_DESCRIPTION("A simple hardware latency detector"); -MODULE_VERSION(VERSION); - -/* Module parameters */ - -static int debug; -static int enabled; -static int threshold; - -module_param(debug, int, 0); /* enable debug */ -module_param(enabled, int, 0); /* enable detector */ -module_param(threshold, int, 0); /* latency threshold */ - -/* Buffering and sampling */ - -static struct ring_buffer *ring_buffer; /* sample buffer */ -static DEFINE_MUTEX(ring_buffer_mutex); /* lock changes */ -static unsigned long buf_size = BUF_SIZE_DEFAULT; -static struct task_struct *kthread; /* sampling thread */ - -/* DebugFS filesystem entries */ - -static struct dentry *debug_dir; /* debugfs directory */ -static struct dentry *debug_max; /* maximum TSC delta */ -static struct dentry *debug_count; /* total detect count */ -static struct dentry *debug_sample_width; /* sample width us */ -static struct dentry *debug_sample_window; /* sample window us */ -static struct dentry *debug_sample; /* raw samples us */ -static struct dentry *debug_threshold; /* threshold us */ -static struct dentry *debug_enable; /* enable/disable */ - -/* Individual samples and global state */ - -struct sample; /* latency sample */ -struct data; /* Global state */ - -/* Sampling functions */ -static int __buffer_add_sample(struct sample *sample); -static struct sample *buffer_get_sample(struct sample *sample); -static int get_sample(void *unused); - -/* Threading and state */ -static int kthread_fn(void *unused); -static int start_kthread(void); -static int stop_kthread(void); -static void __reset_stats(void); -static int init_stats(void); - -/* Debugfs interface */ -static ssize_t simple_data_read(struct file *filp, char __user *ubuf, - size_t cnt, loff_t *ppos, const u64 *entry); -static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, - size_t cnt, loff_t *ppos, u64 *entry); -static int debug_sample_fopen(struct inode *inode, struct file *filp); -static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, - size_t cnt, loff_t *ppos); -static int debug_sample_release(struct inode *inode, struct file *filp); -static int debug_enable_fopen(struct inode *inode, struct file *filp); -static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf, - size_t cnt, loff_t *ppos); -static ssize_t debug_enable_fwrite(struct file *file, - const char __user *user_buffer, - size_t user_size, loff_t *offset); - -/* Initialization functions */ -static int init_debugfs(void); -static void free_debugfs(void); -static int detector_init(void); -static void detector_exit(void); - -/* Individual latency samples are stored here when detected and packed into - * the ring_buffer circular buffer, where they are overwritten when - * more than buf_size/sizeof(sample) samples are received. */ -struct sample { - u64 seqnum; /* unique sequence */ - u64 duration; /* ktime delta */ - struct timespec timestamp; /* wall time */ - unsigned long lost; -}; - -/* keep the global state somewhere. Mostly used under stop_machine. */ -static struct data { - - struct mutex lock; /* protect changes */ - - u64 count; /* total since reset */ - u64 max_sample; /* max hardware latency */ - u64 threshold; /* sample threshold level */ - - u64 sample_window; /* total sampling window (on+off) */ - u64 sample_width; /* active sampling portion of window */ - - atomic_t sample_open; /* whether the sample file is open */ - - wait_queue_head_t wq; /* waitqeue for new sample values */ - -} data; - -/** - * __buffer_add_sample - add a new latency sample recording to the ring buffer - * @sample: The new latency sample value - * - * This receives a new latency sample and records it in a global ring buffer. - * No additional locking is used in this case - suited for stop_machine use. - */ -static int __buffer_add_sample(struct sample *sample) -{ - return ring_buffer_write(ring_buffer, - sizeof(struct sample), sample); -} - -/** - * buffer_get_sample - remove a hardware latency sample from the ring buffer - * @sample: Pre-allocated storage for the sample - * - * This retrieves a hardware latency sample from the global circular buffer - */ -static struct sample *buffer_get_sample(struct sample *sample) -{ - struct ring_buffer_event *e = NULL; - struct sample *s = NULL; - unsigned int cpu = 0; - - if (!sample) - return NULL; - - mutex_lock(&ring_buffer_mutex); - for_each_online_cpu(cpu) { - e = ring_buffer_consume(ring_buffer, cpu, NULL, &sample->lost); - if (e) - break; - } - - if (e) { - s = ring_buffer_event_data(e); - memcpy(sample, s, sizeof(struct sample)); - } else - sample = NULL; - mutex_unlock(&ring_buffer_mutex); - - return sample; -} - -/** - * get_sample - sample the CPU TSC and look for likely hardware latencies - * @unused: This is not used but is a part of the stop_machine API - * - * Used to repeatedly capture the CPU TSC (or similar), looking for potential - * hardware-induced latency. Called under stop_machine, with data.lock held. - */ -static int get_sample(void *unused) -{ - ktime_t start, t1, t2; - s64 diff, total = 0; - u64 sample = 0; - int ret = 1; - - start = ktime_get(); /* start timestamp */ - - do { - - t1 = ktime_get(); /* we'll look for a discontinuity */ - t2 = ktime_get(); - - total = ktime_to_us(ktime_sub(t2, start)); /* sample width */ - diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */ - - /* This shouldn't happen */ - if (diff < 0) { - printk(KERN_ERR BANNER "time running backwards\n"); - goto out; - } - - if (diff > sample) - sample = diff; /* only want highest value */ - - } while (total <= data.sample_width); - - /* If we exceed the threshold value, we have found a hardware latency */ - if (sample > data.threshold) { - struct sample s; - - data.count++; - s.seqnum = data.count; - s.duration = sample; - s.timestamp = CURRENT_TIME; - __buffer_add_sample(&s); - - /* Keep a running maximum ever recorded hardware latency */ - if (sample > data.max_sample) - data.max_sample = sample; - } - - ret = 0; -out: - return ret; -} - -/* - * kthread_fn - The CPU time sampling/hardware latency detection kernel thread - * @unused: A required part of the kthread API. - * - * Used to periodically sample the CPU TSC via a call to get_sample. We - * use stop_machine, whith does (intentionally) introduce latency since we - * need to ensure nothing else might be running (and thus pre-empting). - * Obviously this should never be used in production environments. - * - * stop_machine will schedule us typically only on CPU0 which is fine for - * almost every real-world hardware latency situation - but we might later - * generalize this if we find there are any actualy systems with alternate - * SMI delivery or other non CPU0 hardware latencies. - */ -static int kthread_fn(void *unused) -{ - int err = 0; - u64 interval = 0; - - while (!kthread_should_stop()) { - - mutex_lock(&data.lock); - - err = stop_machine(get_sample, unused, 0); - if (err) { - /* Houston, we have a problem */ - mutex_unlock(&data.lock); - goto err_out; - } - - wake_up(&data.wq); /* wake up reader(s) */ - - interval = data.sample_window - data.sample_width; - do_div(interval, USEC_PER_MSEC); /* modifies interval value */ - - mutex_unlock(&data.lock); - - if (msleep_interruptible(interval)) - goto out; - } - goto out; -err_out: - printk(KERN_ERR BANNER "could not call stop_machine, disabling\n"); - enabled = 0; -out: - return err; - -} - -/** - * start_kthread - Kick off the hardware latency sampling/detector kthread - * - * This starts a kernel thread that will sit and sample the CPU timestamp - * counter (TSC or similar) and look for potential hardware latencies. - */ -static int start_kthread(void) -{ - kthread = kthread_run(kthread_fn, NULL, - DRVNAME); - if (IS_ERR(kthread)) { - printk(KERN_ERR BANNER "could not start sampling thread\n"); - enabled = 0; - return -ENOMEM; - } - - return 0; -} - -/** - * stop_kthread - Inform the hardware latency samping/detector kthread to stop - * - * This kicks the running hardware latency sampling/detector kernel thread and - * tells it to stop sampling now. Use this on unload and at system shutdown. - */ -static int stop_kthread(void) -{ - int ret; - - ret = kthread_stop(kthread); - - return ret; -} - -/** - * __reset_stats - Reset statistics for the hardware latency detector - * - * We use data to store various statistics and global state. We call this - * function in order to reset those when "enable" is toggled on or off, and - * also at initialization. Should be called with data.lock held. - */ -static void __reset_stats(void) -{ - data.count = 0; - data.max_sample = 0; - ring_buffer_reset(ring_buffer); /* flush out old sample entries */ -} - -/** - * init_stats - Setup global state statistics for the hardware latency detector - * - * We use data to store various statistics and global state. We also use - * a global ring buffer (ring_buffer) to keep raw samples of detected hardware - * induced system latencies. This function initializes these structures and - * allocates the global ring buffer also. - */ -static int init_stats(void) -{ - int ret = -ENOMEM; - - mutex_init(&data.lock); - init_waitqueue_head(&data.wq); - atomic_set(&data.sample_open, 0); - - ring_buffer = ring_buffer_alloc(buf_size, BUF_FLAGS); - - if (WARN(!ring_buffer, KERN_ERR BANNER - "failed to allocate ring buffer!\n")) - goto out; - - __reset_stats(); - data.threshold = DEFAULT_LAT_THRESHOLD; /* threshold us */ - data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */ - data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */ - - ret = 0; - -out: - return ret; - -} - -/* - * simple_data_read - Wrapper read function for global state debugfs entries - * @filp: The active open file structure for the debugfs "file" - * @ubuf: The userspace provided buffer to read value into - * @cnt: The maximum number of bytes to read - * @ppos: The current "file" position - * @entry: The entry to read from - * - * This function provides a generic read implementation for the global state - * "data" structure debugfs filesystem entries. It would be nice to use - * simple_attr_read directly, but we need to make sure that the data.lock - * spinlock is held during the actual read (even though we likely won't ever - * actually race here as the updater runs under a stop_machine context). - */ -static ssize_t simple_data_read(struct file *filp, char __user *ubuf, - size_t cnt, loff_t *ppos, const u64 *entry) -{ - char buf[U64STR_SIZE]; - u64 val = 0; - int len = 0; - - memset(buf, 0, sizeof(buf)); - - if (!entry) - return -EFAULT; - - mutex_lock(&data.lock); - val = *entry; - mutex_unlock(&data.lock); - - len = snprintf(buf, sizeof(buf), "%llu\n", (unsigned long long)val); - - return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); - -} - -/* - * simple_data_write - Wrapper write function for global state debugfs entries - * @filp: The active open file structure for the debugfs "file" - * @ubuf: The userspace provided buffer to write value from - * @cnt: The maximum number of bytes to write - * @ppos: The current "file" position - * @entry: The entry to write to - * - * This function provides a generic write implementation for the global state - * "data" structure debugfs filesystem entries. It would be nice to use - * simple_attr_write directly, but we need to make sure that the data.lock - * spinlock is held during the actual write (even though we likely won't ever - * actually race here as the updater runs under a stop_machine context). - */ -static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, - size_t cnt, loff_t *ppos, u64 *entry) -{ - char buf[U64STR_SIZE]; - int csize = min(cnt, sizeof(buf)); - u64 val = 0; - int err = 0; - - memset(buf, '\0', sizeof(buf)); - if (copy_from_user(buf, ubuf, csize)) - return -EFAULT; - - buf[U64STR_SIZE-1] = '\0'; /* just in case */ - err = strict_strtoull(buf, 10, &val); - if (err) - return -EINVAL; - - mutex_lock(&data.lock); - *entry = val; - mutex_unlock(&data.lock); - - return csize; -} - -/** - * debug_count_fopen - Open function for "count" debugfs entry - * @inode: The in-kernel inode representation of the debugfs "file" - * @filp: The active open file structure for the debugfs "file" - * - * This function provides an open implementation for the "count" debugfs - * interface to the hardware latency detector. - */ -static int debug_count_fopen(struct inode *inode, struct file *filp) -{ - return 0; -} - -/** - * debug_count_fread - Read function for "count" debugfs entry - * @filp: The active open file structure for the debugfs "file" - * @ubuf: The userspace provided buffer to read value into - * @cnt: The maximum number of bytes to read - * @ppos: The current "file" position - * - * This function provides a read implementation for the "count" debugfs - * interface to the hardware latency detector. Can be used to read the - * number of latency readings exceeding the configured threshold since - * the detector was last reset (e.g. by writing a zero into "count"). - */ -static ssize_t debug_count_fread(struct file *filp, char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - return simple_data_read(filp, ubuf, cnt, ppos, &data.count); -} - -/** - * debug_count_fwrite - Write function for "count" debugfs entry - * @filp: The active open file structure for the debugfs "file" - * @ubuf: The user buffer that contains the value to write - * @cnt: The maximum number of bytes to write to "file" - * @ppos: The current position in the debugfs "file" - * - * This function provides a write implementation for the "count" debugfs - * interface to the hardware latency detector. Can be used to write a - * desired value, especially to zero the total count. - */ -static ssize_t debug_count_fwrite(struct file *filp, - const char __user *ubuf, - size_t cnt, - loff_t *ppos) -{ - return simple_data_write(filp, ubuf, cnt, ppos, &data.count); -} - -/** - * debug_enable_fopen - Dummy open function for "enable" debugfs interface - * @inode: The in-kernel inode representation of the debugfs "file" - * @filp: The active open file structure for the debugfs "file" - * - * This function provides an open implementation for the "enable" debugfs - * interface to the hardware latency detector. - */ -static int debug_enable_fopen(struct inode *inode, struct file *filp) -{ - return 0; -} - -/** - * debug_enable_fread - Read function for "enable" debugfs interface - * @filp: The active open file structure for the debugfs "file" - * @ubuf: The userspace provided buffer to read value into - * @cnt: The maximum number of bytes to read - * @ppos: The current "file" position - * - * This function provides a read implementation for the "enable" debugfs - * interface to the hardware latency detector. Can be used to determine - * whether the detector is currently enabled ("0\n" or "1\n" returned). - */ -static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - char buf[4]; - - if ((cnt < sizeof(buf)) || (*ppos)) - return 0; - - buf[0] = enabled ? '1' : '0'; - buf[1] = '\n'; - buf[2] = '\0'; - if (copy_to_user(ubuf, buf, strlen(buf))) - return -EFAULT; - return *ppos = strlen(buf); -} - -/** - * debug_enable_fwrite - Write function for "enable" debugfs interface - * @filp: The active open file structure for the debugfs "file" - * @ubuf: The user buffer that contains the value to write - * @cnt: The maximum number of bytes to write to "file" - * @ppos: The current position in the debugfs "file" - * - * This function provides a write implementation for the "enable" debugfs - * interface to the hardware latency detector. Can be used to enable or - * disable the detector, which will have the side-effect of possibly - * also resetting the global stats and kicking off the measuring - * kthread (on an enable) or the converse (upon a disable). - */ -static ssize_t debug_enable_fwrite(struct file *filp, - const char __user *ubuf, - size_t cnt, - loff_t *ppos) -{ - char buf[4]; - int csize = min(cnt, sizeof(buf)); - long val = 0; - int err = 0; - - memset(buf, '\0', sizeof(buf)); - if (copy_from_user(buf, ubuf, csize)) - return -EFAULT; - - buf[sizeof(buf)-1] = '\0'; /* just in case */ - err = strict_strtoul(buf, 10, &val); - if (0 != err) - return -EINVAL; - - if (val) { - if (enabled) - goto unlock; - enabled = 1; - __reset_stats(); - if (start_kthread()) - return -EFAULT; - } else { - if (!enabled) - goto unlock; - enabled = 0; - err = stop_kthread(); - if (err) { - printk(KERN_ERR BANNER "cannot stop kthread\n"); - return -EFAULT; - } - wake_up(&data.wq); /* reader(s) should return */ - } -unlock: - return csize; -} - -/** - * debug_max_fopen - Open function for "max" debugfs entry - * @inode: The in-kernel inode representation of the debugfs "file" - * @filp: The active open file structure for the debugfs "file" - * - * This function provides an open implementation for the "max" debugfs - * interface to the hardware latency detector. - */ -static int debug_max_fopen(struct inode *inode, struct file *filp) -{ - return 0; -} - -/** - * debug_max_fread - Read function for "max" debugfs entry - * @filp: The active open file structure for the debugfs "file" - * @ubuf: The userspace provided buffer to read value into - * @cnt: The maximum number of bytes to read - * @ppos: The current "file" position - * - * This function provides a read implementation for the "max" debugfs - * interface to the hardware latency detector. Can be used to determine - * the maximum latency value observed since it was last reset. - */ -static ssize_t debug_max_fread(struct file *filp, char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - return simple_data_read(filp, ubuf, cnt, ppos, &data.max_sample); -} - -/** - * debug_max_fwrite - Write function for "max" debugfs entry - * @filp: The active open file structure for the debugfs "file" - * @ubuf: The user buffer that contains the value to write - * @cnt: The maximum number of bytes to write to "file" - * @ppos: The current position in the debugfs "file" - * - * This function provides a write implementation for the "max" debugfs - * interface to the hardware latency detector. Can be used to reset the - * maximum or set it to some other desired value - if, then, subsequent - * measurements exceed this value, the maximum will be updated. - */ -static ssize_t debug_max_fwrite(struct file *filp, - const char __user *ubuf, - size_t cnt, - loff_t *ppos) -{ - return simple_data_write(filp, ubuf, cnt, ppos, &data.max_sample); -} - - -/** - * debug_sample_fopen - An open function for "sample" debugfs interface - * @inode: The in-kernel inode representation of this debugfs "file" - * @filp: The active open file structure for the debugfs "file" - * - * This function handles opening the "sample" file within the hardware - * latency detector debugfs directory interface. This file is used to read - * raw samples from the global ring_buffer and allows the user to see a - * running latency history. Can be opened blocking or non-blocking, - * affecting whether it behaves as a buffer read pipe, or does not. - * Implements simple locking to prevent multiple simultaneous use. - */ -static int debug_sample_fopen(struct inode *inode, struct file *filp) -{ - if (!atomic_add_unless(&data.sample_open, 1, 1)) - return -EBUSY; - else - return 0; -} - -/** - * debug_sample_fread - A read function for "sample" debugfs interface - * @filp: The active open file structure for the debugfs "file" - * @ubuf: The user buffer that will contain the samples read - * @cnt: The maximum bytes to read from the debugfs "file" - * @ppos: The current position in the debugfs "file" - * - * This function handles reading from the "sample" file within the hardware - * latency detector debugfs directory interface. This file is used to read - * raw samples from the global ring_buffer and allows the user to see a - * running latency history. By default this will block pending a new - * value written into the sample buffer, unless there are already a - * number of value(s) waiting in the buffer, or the sample file was - * previously opened in a non-blocking mode of operation. - */ -static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - int len = 0; - char buf[64]; - struct sample *sample = NULL; - - if (!enabled) - return 0; - - sample = kzalloc(sizeof(struct sample), GFP_KERNEL); - if (!sample) - return -ENOMEM; - - while (!buffer_get_sample(sample)) { - - DEFINE_WAIT(wait); - - if (filp->f_flags & O_NONBLOCK) { - len = -EAGAIN; - goto out; - } - - prepare_to_wait(&data.wq, &wait, TASK_INTERRUPTIBLE); - schedule(); - finish_wait(&data.wq, &wait); - - if (signal_pending(current)) { - len = -EINTR; - goto out; - } - - if (!enabled) { /* enable was toggled */ - len = 0; - goto out; - } - } - - len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\n", - sample->timestamp.tv_sec, - sample->timestamp.tv_nsec, - sample->duration); - - - /* handling partial reads is more trouble than it's worth */ - if (len > cnt) - goto out; - - if (copy_to_user(ubuf, buf, len)) - len = -EFAULT; - -out: - kfree(sample); - return len; -} - -/** - * debug_sample_release - Release function for "sample" debugfs interface - * @inode: The in-kernel inode represenation of the debugfs "file" - * @filp: The active open file structure for the debugfs "file" - * - * This function completes the close of the debugfs interface "sample" file. - * Frees the sample_open "lock" so that other users may open the interface. - */ -static int debug_sample_release(struct inode *inode, struct file *filp) -{ - atomic_dec(&data.sample_open); - - return 0; -} - -/** - * debug_threshold_fopen - Open function for "threshold" debugfs entry - * @inode: The in-kernel inode representation of the debugfs "file" - * @filp: The active open file structure for the debugfs "file" - * - * This function provides an open implementation for the "threshold" debugfs - * interface to the hardware latency detector. - */ -static int debug_threshold_fopen(struct inode *inode, struct file *filp) -{ - return 0; -} - -/** - * debug_threshold_fread - Read function for "threshold" debugfs entry - * @filp: The active open file structure for the debugfs "file" - * @ubuf: The userspace provided buffer to read value into - * @cnt: The maximum number of bytes to read - * @ppos: The current "file" position - * - * This function provides a read implementation for the "threshold" debugfs - * interface to the hardware latency detector. It can be used to determine - * the current threshold level at which a latency will be recorded in the - * global ring buffer, typically on the order of 10us. - */ -static ssize_t debug_threshold_fread(struct file *filp, char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - return simple_data_read(filp, ubuf, cnt, ppos, &data.threshold); -} - -/** - * debug_threshold_fwrite - Write function for "threshold" debugfs entry - * @filp: The active open file structure for the debugfs "file" - * @ubuf: The user buffer that contains the value to write - * @cnt: The maximum number of bytes to write to "file" - * @ppos: The current position in the debugfs "file" - * - * This function provides a write implementation for the "threshold" debugfs - * interface to the hardware latency detector. It can be used to configure - * the threshold level at which any subsequently detected latencies will - * be recorded into the global ring buffer. - */ -static ssize_t debug_threshold_fwrite(struct file *filp, - const char __user *ubuf, - size_t cnt, - loff_t *ppos) -{ - int ret; - - ret = simple_data_write(filp, ubuf, cnt, ppos, &data.threshold); - - if (enabled) - wake_up_process(kthread); - - return ret; -} - -/** - * debug_width_fopen - Open function for "width" debugfs entry - * @inode: The in-kernel inode representation of the debugfs "file" - * @filp: The active open file structure for the debugfs "file" - * - * This function provides an open implementation for the "width" debugfs - * interface to the hardware latency detector. - */ -static int debug_width_fopen(struct inode *inode, struct file *filp) -{ - return 0; -} - -/** - * debug_width_fread - Read function for "width" debugfs entry - * @filp: The active open file structure for the debugfs "file" - * @ubuf: The userspace provided buffer to read value into - * @cnt: The maximum number of bytes to read - * @ppos: The current "file" position - * - * This function provides a read implementation for the "width" debugfs - * interface to the hardware latency detector. It can be used to determine - * for how many us of the total window us we will actively sample for any - * hardware-induced latecy periods. Obviously, it is not possible to - * sample constantly and have the system respond to a sample reader, or, - * worse, without having the system appear to have gone out to lunch. - */ -static ssize_t debug_width_fread(struct file *filp, char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_width); -} - -/** - * debug_width_fwrite - Write function for "width" debugfs entry - * @filp: The active open file structure for the debugfs "file" - * @ubuf: The user buffer that contains the value to write - * @cnt: The maximum number of bytes to write to "file" - * @ppos: The current position in the debugfs "file" - * - * This function provides a write implementation for the "width" debugfs - * interface to the hardware latency detector. It can be used to configure - * for how many us of the total window us we will actively sample for any - * hardware-induced latency periods. Obviously, it is not possible to - * sample constantly and have the system respond to a sample reader, or, - * worse, without having the system appear to have gone out to lunch. It - * is enforced that width is less that the total window size. - */ -static ssize_t debug_width_fwrite(struct file *filp, - const char __user *ubuf, - size_t cnt, - loff_t *ppos) -{ - char buf[U64STR_SIZE]; - int csize = min(cnt, sizeof(buf)); - u64 val = 0; - int err = 0; - - memset(buf, '\0', sizeof(buf)); - if (copy_from_user(buf, ubuf, csize)) - return -EFAULT; - - buf[U64STR_SIZE-1] = '\0'; /* just in case */ - err = strict_strtoull(buf, 10, &val); - if (0 != err) - return -EINVAL; - - mutex_lock(&data.lock); - if (val < data.sample_window) - data.sample_width = val; - else { - mutex_unlock(&data.lock); - return -EINVAL; - } - mutex_unlock(&data.lock); - - if (enabled) - wake_up_process(kthread); - - return csize; -} - -/** - * debug_window_fopen - Open function for "window" debugfs entry - * @inode: The in-kernel inode representation of the debugfs "file" - * @filp: The active open file structure for the debugfs "file" - * - * This function provides an open implementation for the "window" debugfs - * interface to the hardware latency detector. The window is the total time - * in us that will be considered one sample period. Conceptually, windows - * occur back-to-back and contain a sample width period during which - * actual sampling occurs. - */ -static int debug_window_fopen(struct inode *inode, struct file *filp) -{ - return 0; -} - -/** - * debug_window_fread - Read function for "window" debugfs entry - * @filp: The active open file structure for the debugfs "file" - * @ubuf: The userspace provided buffer to read value into - * @cnt: The maximum number of bytes to read - * @ppos: The current "file" position - * - * This function provides a read implementation for the "window" debugfs - * interface to the hardware latency detector. The window is the total time - * in us that will be considered one sample period. Conceptually, windows - * occur back-to-back and contain a sample width period during which - * actual sampling occurs. Can be used to read the total window size. - */ -static ssize_t debug_window_fread(struct file *filp, char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_window); -} - -/** - * debug_window_fwrite - Write function for "window" debugfs entry - * @filp: The active open file structure for the debugfs "file" - * @ubuf: The user buffer that contains the value to write - * @cnt: The maximum number of bytes to write to "file" - * @ppos: The current position in the debugfs "file" - * - * This function provides a write implementation for the "window" debufds - * interface to the hardware latency detetector. The window is the total time - * in us that will be considered one sample period. Conceptually, windows - * occur back-to-back and contain a sample width period during which - * actual sampling occurs. Can be used to write a new total window size. It - * is enfoced that any value written must be greater than the sample width - * size, or an error results. - */ -static ssize_t debug_window_fwrite(struct file *filp, - const char __user *ubuf, - size_t cnt, - loff_t *ppos) -{ - char buf[U64STR_SIZE]; - int csize = min(cnt, sizeof(buf)); - u64 val = 0; - int err = 0; - - memset(buf, '\0', sizeof(buf)); - if (copy_from_user(buf, ubuf, csize)) - return -EFAULT; - - buf[U64STR_SIZE-1] = '\0'; /* just in case */ - err = strict_strtoull(buf, 10, &val); - if (0 != err) - return -EINVAL; - - mutex_lock(&data.lock); - if (data.sample_width < val) - data.sample_window = val; - else { - mutex_unlock(&data.lock); - return -EINVAL; - } - mutex_unlock(&data.lock); - - return csize; -} - -/* - * Function pointers for the "count" debugfs file operations - */ -static const struct file_operations count_fops = { - .open = debug_count_fopen, - .read = debug_count_fread, - .write = debug_count_fwrite, - .owner = THIS_MODULE, -}; - -/* - * Function pointers for the "enable" debugfs file operations - */ -static const struct file_operations enable_fops = { - .open = debug_enable_fopen, - .read = debug_enable_fread, - .write = debug_enable_fwrite, - .owner = THIS_MODULE, -}; - -/* - * Function pointers for the "max" debugfs file operations - */ -static const struct file_operations max_fops = { - .open = debug_max_fopen, - .read = debug_max_fread, - .write = debug_max_fwrite, - .owner = THIS_MODULE, -}; - -/* - * Function pointers for the "sample" debugfs file operations - */ -static const struct file_operations sample_fops = { - .open = debug_sample_fopen, - .read = debug_sample_fread, - .release = debug_sample_release, - .owner = THIS_MODULE, -}; - -/* - * Function pointers for the "threshold" debugfs file operations - */ -static const struct file_operations threshold_fops = { - .open = debug_threshold_fopen, - .read = debug_threshold_fread, - .write = debug_threshold_fwrite, - .owner = THIS_MODULE, -}; - -/* - * Function pointers for the "width" debugfs file operations - */ -static const struct file_operations width_fops = { - .open = debug_width_fopen, - .read = debug_width_fread, - .write = debug_width_fwrite, - .owner = THIS_MODULE, -}; - -/* - * Function pointers for the "window" debugfs file operations - */ -static const struct file_operations window_fops = { - .open = debug_window_fopen, - .read = debug_window_fread, - .write = debug_window_fwrite, - .owner = THIS_MODULE, -}; - -/** - * init_debugfs - A function to initialize the debugfs interface files - * - * This function creates entries in debugfs for "hwlat_detector", including - * files to read values from the detector, current samples, and the - * maximum sample that has been captured since the hardware latency - * dectector was started. - */ -static int init_debugfs(void) -{ - int ret = -ENOMEM; - - debug_dir = debugfs_create_dir(DRVNAME, NULL); - if (!debug_dir) - goto err_debug_dir; - - debug_sample = debugfs_create_file("sample", 0444, - debug_dir, NULL, - &sample_fops); - if (!debug_sample) - goto err_sample; - - debug_count = debugfs_create_file("count", 0444, - debug_dir, NULL, - &count_fops); - if (!debug_count) - goto err_count; - - debug_max = debugfs_create_file("max", 0444, - debug_dir, NULL, - &max_fops); - if (!debug_max) - goto err_max; - - debug_sample_window = debugfs_create_file("window", 0644, - debug_dir, NULL, - &window_fops); - if (!debug_sample_window) - goto err_window; - - debug_sample_width = debugfs_create_file("width", 0644, - debug_dir, NULL, - &width_fops); - if (!debug_sample_width) - goto err_width; - - debug_threshold = debugfs_create_file("threshold", 0644, - debug_dir, NULL, - &threshold_fops); - if (!debug_threshold) - goto err_threshold; - - debug_enable = debugfs_create_file("enable", 0644, - debug_dir, &enabled, - &enable_fops); - if (!debug_enable) - goto err_enable; - - else { - ret = 0; - goto out; - } - -err_enable: - debugfs_remove(debug_threshold); -err_threshold: - debugfs_remove(debug_sample_width); -err_width: - debugfs_remove(debug_sample_window); -err_window: - debugfs_remove(debug_max); -err_max: - debugfs_remove(debug_count); -err_count: - debugfs_remove(debug_sample); -err_sample: - debugfs_remove(debug_dir); -err_debug_dir: -out: - return ret; -} - -/** - * free_debugfs - A function to cleanup the debugfs file interface - */ -static void free_debugfs(void) -{ - /* could also use a debugfs_remove_recursive */ - debugfs_remove(debug_enable); - debugfs_remove(debug_threshold); - debugfs_remove(debug_sample_width); - debugfs_remove(debug_sample_window); - debugfs_remove(debug_max); - debugfs_remove(debug_count); - debugfs_remove(debug_sample); - debugfs_remove(debug_dir); -} - -/** - * detector_init - Standard module initialization code - */ -static int detector_init(void) -{ - int ret = -ENOMEM; - - printk(KERN_INFO BANNER "version %s\n", VERSION); - - ret = init_stats(); - if (0 != ret) - goto out; - - ret = init_debugfs(); - if (0 != ret) - goto err_stats; - - if (enabled) - ret = start_kthread(); - - goto out; - -err_stats: - ring_buffer_free(ring_buffer); -out: - return ret; - -} - -/** - * detector_exit - Standard module cleanup code - */ -static void detector_exit(void) -{ - int err; - - if (enabled) { - enabled = 0; - err = stop_kthread(); - if (err) - printk(KERN_ERR BANNER "cannot stop kthread\n"); - } - - free_debugfs(); - ring_buffer_free(ring_buffer); /* free up the ring buffer */ - -} - -module_init(detector_init); -module_exit(detector_exit); diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 2743b7d..e6e3911 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -368,13 +368,13 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; card->ext_csd.raw_trim_mult = ext_csd[EXT_CSD_TRIM_MULT]; - card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT]; if (card->ext_csd.rev >= 4) { /* * Enhanced area feature support -- check whether the eMMC * card has the Enhanced area enabled. If so, export enhanced * area offset and size to user by adding sysfs interface. */ + card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT]; if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) && (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) { hc_erase_grp_sz = @@ -496,7 +496,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) * RPMB regions are defined in multiples of 128K. */ card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT]; - if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) { + if (ext_csd[EXT_CSD_RPMB_MULT]) { mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17, EXT_CSD_PART_CONFIG_ACC_RPMB, "rpmb", 0, false, diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 009dabd..8d13c65 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -292,6 +292,16 @@ config MMC_ATMELMCI If unsure, say N. +config MMC_ATMELMCI_DMA + bool "Atmel MCI DMA support" + depends on MMC_ATMELMCI && (AVR32 || ARCH_AT91SAM9G45) && DMA_ENGINE + help + Say Y here to have the Atmel MCI driver use a DMA engine to + do data transfers and thus increase the throughput and + reduce the CPU utilization. + + If unsure, say N. + config MMC_MSM tristate "Qualcomm SDCC Controller Support" depends on MMC && ARCH_MSM diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index e75774f..722af1d 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -178,7 +178,6 @@ struct atmel_mci { void __iomem *regs; struct scatterlist *sg; - unsigned int sg_len; unsigned int pio_offset; unsigned int *buffer; unsigned int buf_size; @@ -893,7 +892,6 @@ static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data) data->error = -EINPROGRESS; host->sg = data->sg; - host->sg_len = data->sg_len; host->data = data; host->data_chan = NULL; @@ -1828,8 +1826,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) if (offset == sg->length) { flush_dcache_page(sg_page(sg)); host->sg = sg = sg_next(sg); - host->sg_len--; - if (!sg || !host->sg_len) + if (!sg) goto done; offset = 0; @@ -1842,8 +1839,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) flush_dcache_page(sg_page(sg)); host->sg = sg = sg_next(sg); - host->sg_len--; - if (!sg || !host->sg_len) + if (!sg) goto done; offset = 4 - remaining; @@ -1894,8 +1890,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) nbytes += 4; if (offset == sg->length) { host->sg = sg = sg_next(sg); - host->sg_len--; - if (!sg || !host->sg_len) + if (!sg) goto done; offset = 0; @@ -1909,8 +1904,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) nbytes += remaining; host->sg = sg = sg_next(sg); - host->sg_len--; - if (!sg || !host->sg_len) { + if (!sg) { atmci_writel(host, ATMCI_TDR, value); goto done; } @@ -2493,8 +2487,10 @@ static int __exit atmci_remove(struct platform_device *pdev) atmci_readl(host, ATMCI_SR); clk_disable(host->mck); +#ifdef CONFIG_MMC_ATMELMCI_DMA if (host->dma.chan) dma_release_channel(host->dma.chan); +#endif free_irq(platform_get_irq(pdev, 0), host); iounmap(host->regs); diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 724f478..1507723 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -930,12 +930,15 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) struct sg_mapping_iter *sg_miter = &host->sg_miter; struct variant_data *variant = host->variant; void __iomem *base = host->base; + unsigned long flags; u32 status; status = readl(base + MMCISTATUS); dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); + local_irq_save(flags); + do { unsigned int remain, len; char *buffer; @@ -975,6 +978,8 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) sg_miter_stop(sg_miter); + local_irq_restore(flags); + /* * If we have less than the fifo 'half-full' threshold to transfer, * trigger a PIO interrupt as soon as any data is available. diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index 468c923..f74b5ad 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -678,19 +678,12 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq) struct mmc_command *cmd = mrq->cmd; struct mmc_data *data = mrq->data; unsigned int data_size = 0; - int err; if (host->eject) { cmd->error = -ENOMEDIUM; goto finish; } - err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD); - if (err) { - cmd->error = err; - goto finish; - } - mutex_lock(&pcr->pcr_mutex); rtsx_pci_start_run(pcr); @@ -908,9 +901,6 @@ static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (host->eject) return; - if (rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD)) - return; - mutex_lock(&pcr->pcr_mutex); rtsx_pci_start_run(pcr); @@ -1083,10 +1073,6 @@ static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) if (host->eject) return -ENOMEDIUM; - err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD); - if (err) - return err; - mutex_lock(&pcr->pcr_mutex); rtsx_pci_start_run(pcr); @@ -1136,10 +1122,6 @@ static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode) if (host->eject) return -ENOMEDIUM; - err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD); - if (err) - return err; - mutex_lock(&pcr->pcr_mutex); rtsx_pci_start_run(pcr); diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index b503113..e07df81 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -237,18 +237,15 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) static u16 esdhc_readw_le(struct sdhci_host *host, int reg) { - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct pltfm_imx_data *imx_data = pltfm_host->priv; - if (unlikely(reg == SDHCI_HOST_VERSION)) { - reg ^= 2; - if (is_imx6q_usdhc(imx_data)) { - /* - * The usdhc register returns a wrong host version. - * Correct it here. - */ - return SDHCI_SPEC_300; - } + u16 val = readw(host->ioaddr + (reg ^ 2)); + /* + * uSDHC supports SDHCI v3.0, but it's encoded as value + * 0x3 in host controller version register, which violates + * SDHCI_SPEC_300 definition. Work it around here. + */ + if ((val & SDHCI_SPEC_VER_MASK) == 3) + return --val; } return readw(host->ioaddr + reg); diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 6e3d6dc..82c0616 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -1159,17 +1159,45 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma) struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; struct map_info *map = mtd->priv; + resource_size_t start, off; + unsigned long len, vma_len; /* This is broken because it assumes the MTD device is map-based and that mtd->priv is a valid struct map_info. It should be replaced with something that uses the mtd_get_unmapped_area() operation properly. */ if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) { + off = get_vm_offset(vma); + start = map->phys; + len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size); + start &= PAGE_MASK; + vma_len = get_vm_size(vma); + + /* Overflow in off+len? */ + if (vma_len + off < off) + return -EINVAL; + /* Does it fit in the mapping? */ + if (vma_len + off > len) + return -EINVAL; + + off += start; + /* Did that overflow? */ + if (off < start) + return -EINVAL; + if (set_vm_offset(vma, off) < 0) + return -EINVAL; + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; + #ifdef pgprot_noncached - if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory)) + if (file->f_flags & O_DSYNC || off >= __pa(high_memory)) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); #endif - return vm_iomap_memory(vma, map->phys, map->size); + if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) + return -EAGAIN; + + return 0; } return -ENOSYS; #else diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index db04f53..3766682 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -1527,14 +1527,6 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, oobreadlen -= toread; } } - - if (chip->options & NAND_NEED_READRDY) { - /* Apply delay or wait for ready/busy pin */ - if (!chip->dev_ready) - udelay(chip->chip_delay); - else - nand_wait_ready(mtd); - } } else { memcpy(buf, chip->buffers->databuf + col, bytes); buf += bytes; @@ -1799,14 +1791,6 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, len = min(len, readlen); buf = nand_transfer_oob(chip, buf, ops, len); - if (chip->options & NAND_NEED_READRDY) { - /* Apply delay or wait for ready/busy pin */ - if (!chip->dev_ready) - udelay(chip->chip_delay); - else - nand_wait_ready(mtd); - } - readlen -= len; if (!readlen) break; diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index 9c61238..e3aa274 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c @@ -22,51 +22,49 @@ * 512 512 Byte page size */ struct nand_flash_dev nand_flash_ids[] = { -#define SP_OPTIONS NAND_NEED_READRDY -#define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16) #ifdef CONFIG_MTD_NAND_MUSEUM_IDS - {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, SP_OPTIONS}, - {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, SP_OPTIONS}, - {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, SP_OPTIONS}, - {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, SP_OPTIONS}, - {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, SP_OPTIONS}, - {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, SP_OPTIONS}, - {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, SP_OPTIONS}, - {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, SP_OPTIONS}, - {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, SP_OPTIONS}, - {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, SP_OPTIONS}, - - {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, SP_OPTIONS}, - {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, SP_OPTIONS}, - {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, SP_OPTIONS16}, - {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, SP_OPTIONS16}, + {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, 0}, + {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, 0}, + {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, 0}, + {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, 0}, + {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, 0}, + {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, 0}, + {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, 0}, + {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, 0}, + {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, 0}, + {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, 0}, + + {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, 0}, + {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, 0}, + {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16}, + {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16}, #endif - {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, SP_OPTIONS}, - {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, SP_OPTIONS}, - {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, SP_OPTIONS16}, - {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, SP_OPTIONS16}, - - {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, SP_OPTIONS}, - {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, SP_OPTIONS}, - {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, SP_OPTIONS16}, - {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, SP_OPTIONS16}, - - {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, SP_OPTIONS}, - {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, SP_OPTIONS}, - {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, SP_OPTIONS16}, - {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, SP_OPTIONS16}, - - {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, SP_OPTIONS}, - {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, SP_OPTIONS}, - {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, SP_OPTIONS}, - {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, SP_OPTIONS16}, - {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, SP_OPTIONS16}, - {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, SP_OPTIONS16}, - {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, SP_OPTIONS16}, - - {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, SP_OPTIONS}, + {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, 0}, + {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, 0}, + {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16}, + {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16}, + + {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, 0}, + {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, 0}, + {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16}, + {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16}, + + {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, 0}, + {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, 0}, + {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16}, + {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16}, + + {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, 0}, + {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, 0}, + {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, 0}, + {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16}, + {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, NAND_BUSWIDTH_16}, + {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16}, + {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, NAND_BUSWIDTH_16}, + + {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, 0}, /* * These are the new chips with large page size. The pagesize and the diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 0052e52..6a70184 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -164,7 +164,6 @@ config VXLAN config NETCONSOLE tristate "Network console logging support" - depends on !PREEMPT_RT_FULL ---help--- If you want to log kernel messages over the network, enable this. See for details. diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 045dc53..b7d45f3 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1728,8 +1728,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) bond_compute_features(bond); - bond_update_speed_duplex(new_slave); - read_lock(&bond->lock); new_slave->last_arp_rx = jiffies - @@ -1782,6 +1780,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) new_slave->link == BOND_LINK_DOWN ? "DOWN" : (new_slave->link == BOND_LINK_UP ? "UP" : "BACK")); + bond_update_speed_duplex(new_slave); + if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) { /* if there is a primary slave, remember it */ if (strcmp(bond->params.primary, new_slave->dev->name) == 0) { @@ -1888,7 +1888,6 @@ err_detach: write_unlock_bh(&bond->lock); err_close: - slave_dev->priv_flags &= ~IFF_BONDING; dev_close(slave_dev); err_unset_master: @@ -1944,6 +1943,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) } block_netpoll_tx(); + call_netdevice_notifiers(NETDEV_RELEASE, bond_dev); write_lock_bh(&bond->lock); slave = bond_get_slave_by_dev(bond, slave_dev); @@ -1956,11 +1956,12 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) return -EINVAL; } - write_unlock_bh(&bond->lock); /* unregister rx_handler early so bond_handle_frame wouldn't be called * for this slave anymore. */ netdev_rx_handler_unregister(slave_dev); + write_unlock_bh(&bond->lock); + synchronize_net(); write_lock_bh(&bond->lock); if (!bond->params.fail_over_mac) { @@ -2046,10 +2047,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) write_unlock_bh(&bond->lock); unblock_netpoll_tx(); - if (bond->slave_cnt == 0) { + if (bond->slave_cnt == 0) call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); - call_netdevice_notifiers(NETDEV_RELEASE, bond->dev); - } bond_compute_features(bond); if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) && @@ -2463,6 +2462,8 @@ static void bond_miimon_commit(struct bonding *bond) bond_set_backup_slave(slave); } + bond_update_speed_duplex(slave); + pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n", bond->dev->name, slave->dev->name, slave->speed, slave->duplex ? "full" : "half"); @@ -3380,22 +3381,20 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count) */ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count) { - const struct ethhdr *data; - const struct iphdr *iph; - const struct ipv6hdr *ipv6h; + struct ethhdr *data = (struct ethhdr *)skb->data; + struct iphdr *iph; + struct ipv6hdr *ipv6h; u32 v6hash; - const __be32 *s, *d; + __be32 *s, *d; if (skb->protocol == htons(ETH_P_IP) && - pskb_network_may_pull(skb, sizeof(*iph))) { + skb_network_header_len(skb) >= sizeof(*iph)) { iph = ip_hdr(skb); - data = (struct ethhdr *)skb->data; return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ (data->h_dest[5] ^ data->h_source[5])) % count; } else if (skb->protocol == htons(ETH_P_IPV6) && - pskb_network_may_pull(skb, sizeof(*ipv6h))) { + skb_network_header_len(skb) >= sizeof(*ipv6h)) { ipv6h = ipv6_hdr(skb); - data = (struct ethhdr *)skb->data; s = &ipv6h->saddr.s6_addr32[0]; d = &ipv6h->daddr.s6_addr32[0]; v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]); @@ -3414,36 +3413,33 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count) static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count) { u32 layer4_xor = 0; - const struct iphdr *iph; - const struct ipv6hdr *ipv6h; - const __be32 *s, *d; - const __be16 *l4 = NULL; - __be16 _l4[2]; - int noff = skb_network_offset(skb); - int poff; + struct iphdr *iph; + struct ipv6hdr *ipv6h; + __be32 *s, *d; + __be16 *layer4hdr; if (skb->protocol == htons(ETH_P_IP) && - pskb_may_pull(skb, noff + sizeof(*iph))) { + skb_network_header_len(skb) >= sizeof(*iph)) { iph = ip_hdr(skb); - poff = proto_ports_offset(iph->protocol); - - if (!ip_is_fragment(iph) && poff >= 0) { - l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff, - sizeof(_l4), &_l4); - if (l4) - layer4_xor = ntohs(l4[0] ^ l4[1]); + if (!ip_is_fragment(iph) && + (iph->protocol == IPPROTO_TCP || + iph->protocol == IPPROTO_UDP) && + (skb_headlen(skb) - skb_network_offset(skb) >= + iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) { + layer4hdr = (__be16 *)((u32 *)iph + iph->ihl); + layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); } return (layer4_xor ^ ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count; } else if (skb->protocol == htons(ETH_P_IPV6) && - pskb_may_pull(skb, noff + sizeof(*ipv6h))) { + skb_network_header_len(skb) >= sizeof(*ipv6h)) { ipv6h = ipv6_hdr(skb); - poff = proto_ports_offset(ipv6h->nexthdr); - if (poff >= 0) { - l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff, - sizeof(_l4), &_l4); - if (l4) - layer4_xor = ntohs(l4[0] ^ l4[1]); + if ((ipv6h->nexthdr == IPPROTO_TCP || + ipv6h->nexthdr == IPPROTO_UDP) && + (skb_headlen(skb) - skb_network_offset(skb) >= + sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) { + layer4hdr = (__be16 *)(ipv6h + 1); + layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); } s = &ipv6h->saddr.s6_addr32[0]; d = &ipv6h->daddr.s6_addr32[0]; @@ -4925,18 +4921,9 @@ static int __net_init bond_net_init(struct net *net) static void __net_exit bond_net_exit(struct net *net) { struct bond_net *bn = net_generic(net, bond_net_id); - struct bonding *bond, *tmp_bond; - LIST_HEAD(list); bond_destroy_sysfs(bn); bond_destroy_proc_dir(bn); - - /* Kill off any bonds created after unregistering bond rtnl ops */ - rtnl_lock(); - list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list) - unregister_netdevice_queue(bond->dev, &list); - unregister_netdevice_many(&list); - rtnl_unlock(); } static struct pernet_operations bond_net_ops = { diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index ea7a388..1c9e09f 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -183,11 +183,6 @@ int bond_create_slave_symlinks(struct net_device *master, sprintf(linkname, "slave_%s", slave->name); ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj), linkname); - - /* free the master link created earlier in case of error */ - if (ret) - sysfs_remove_link(&(slave->dev.kobj), "master"); - return ret; } @@ -527,7 +522,7 @@ static ssize_t bonding_store_arp_interval(struct device *d, goto out; } if (new_value < 0) { - pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n", + pr_err("%s: Invalid arp_interval value %d not in range 1-%d; rejected.\n", bond->dev->name, new_value, INT_MAX); ret = -EINVAL; goto out; @@ -542,15 +537,14 @@ static ssize_t bonding_store_arp_interval(struct device *d, pr_info("%s: Setting ARP monitoring interval to %d.\n", bond->dev->name, new_value); bond->params.arp_interval = new_value; - if (new_value) { - if (bond->params.miimon) { - pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", - bond->dev->name, bond->dev->name); - bond->params.miimon = 0; - } - if (!bond->params.arp_targets[0]) - pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n", - bond->dev->name); + if (bond->params.miimon) { + pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", + bond->dev->name, bond->dev->name); + bond->params.miimon = 0; + } + if (!bond->params.arp_targets[0]) { + pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n", + bond->dev->name); } if (bond->dev->flags & IFF_UP) { /* If the interface is up, we may need to fire off @@ -558,13 +552,10 @@ static ssize_t bonding_store_arp_interval(struct device *d, * timer will get fired off when the open function * is called. */ - if (!new_value) { - cancel_delayed_work_sync(&bond->arp_work); - } else { - cancel_delayed_work_sync(&bond->mii_work); - queue_delayed_work(bond->wq, &bond->arp_work, 0); - } + cancel_delayed_work_sync(&bond->mii_work); + queue_delayed_work(bond->wq, &bond->arp_work, 0); } + out: rtnl_unlock(); return ret; @@ -706,7 +697,7 @@ static ssize_t bonding_store_downdelay(struct device *d, } if (new_value < 0) { pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n", - bond->dev->name, new_value, 0, INT_MAX); + bond->dev->name, new_value, 1, INT_MAX); ret = -EINVAL; goto out; } else { @@ -761,8 +752,8 @@ static ssize_t bonding_store_updelay(struct device *d, goto out; } if (new_value < 0) { - pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n", - bond->dev->name, new_value, 0, INT_MAX); + pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n", + bond->dev->name, new_value, 1, INT_MAX); ret = -EINVAL; goto out; } else { @@ -972,37 +963,37 @@ static ssize_t bonding_store_miimon(struct device *d, } if (new_value < 0) { pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n", - bond->dev->name, new_value, 0, INT_MAX); + bond->dev->name, new_value, 1, INT_MAX); ret = -EINVAL; goto out; - } - pr_info("%s: Setting MII monitoring interval to %d.\n", - bond->dev->name, new_value); - bond->params.miimon = new_value; - if (bond->params.updelay) - pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n", - bond->dev->name, - bond->params.updelay * bond->params.miimon); - if (bond->params.downdelay) - pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n", - bond->dev->name, - bond->params.downdelay * bond->params.miimon); - if (new_value && bond->params.arp_interval) { - pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", - bond->dev->name); - bond->params.arp_interval = 0; - if (bond->params.arp_validate) - bond->params.arp_validate = BOND_ARP_VALIDATE_NONE; - } - if (bond->dev->flags & IFF_UP) { - /* If the interface is up, we may need to fire off - * the MII timer. If the interface is down, the - * timer will get fired off when the open function - * is called. - */ - if (!new_value) { - cancel_delayed_work_sync(&bond->mii_work); - } else { + } else { + pr_info("%s: Setting MII monitoring interval to %d.\n", + bond->dev->name, new_value); + bond->params.miimon = new_value; + if (bond->params.updelay) + pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n", + bond->dev->name, + bond->params.updelay * bond->params.miimon); + if (bond->params.downdelay) + pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n", + bond->dev->name, + bond->params.downdelay * bond->params.miimon); + if (bond->params.arp_interval) { + pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", + bond->dev->name); + bond->params.arp_interval = 0; + if (bond->params.arp_validate) { + bond->params.arp_validate = + BOND_ARP_VALIDATE_NONE; + } + } + + if (bond->dev->flags & IFF_UP) { + /* If the interface is up, we may need to fire off + * the MII timer. If the interface is down, the + * timer will get fired off when the open function + * is called. + */ cancel_delayed_work_sync(&bond->arp_work); queue_delayed_work(bond->wq, &bond->mii_work, 0); } diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index 42b6d69..5eaf47b 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c @@ -922,7 +922,6 @@ static int mcp251x_open(struct net_device *net) struct mcp251x_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; struct mcp251x_platform_data *pdata = spi->dev.platform_data; - unsigned long flags; int ret; ret = open_candev(net); @@ -939,14 +938,9 @@ static int mcp251x_open(struct net_device *net) priv->tx_skb = NULL; priv->tx_len = 0; - flags = IRQF_ONESHOT; - if (pdata->irq_flags) - flags |= pdata->irq_flags; - else - flags |= IRQF_TRIGGER_FALLING; - ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, - flags, DEVICE_NAME, priv); + pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING, + DEVICE_NAME, priv); if (ret) { dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); if (pdata->transceiver_enable) diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index 08a606c..11d1062 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c @@ -348,7 +348,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv) */ if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) == REG_CR_BASICCAN_INITIAL && - (priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_BASICCAN_INITIAL) && + (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) && (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL)) flag = 1; @@ -360,7 +360,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv) * See states on p. 23 of the Datasheet. */ if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL && - priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_PELICAN_INITIAL && + priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL && priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL) return flag; diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 43921f9..83ee11e 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -91,7 +91,7 @@ static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val) */ spin_lock_irqsave(&priv->cmdreg_lock, flags); priv->write_reg(priv, REG_CMR, val); - priv->read_reg(priv, SJA1000_REG_SR); + priv->read_reg(priv, REG_SR); spin_unlock_irqrestore(&priv->cmdreg_lock, flags); } @@ -499,7 +499,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) { n++; - status = priv->read_reg(priv, SJA1000_REG_SR); + status = priv->read_reg(priv, REG_SR); /* check for absent controller due to hw unplug */ if (status == 0xFF && sja1000_is_absent(priv)) return IRQ_NONE; @@ -526,7 +526,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) /* receive interrupt */ while (status & SR_RBS) { sja1000_rx(dev); - status = priv->read_reg(priv, SJA1000_REG_SR); + status = priv->read_reg(priv, REG_SR); /* check for absent controller */ if (status == 0xFF && sja1000_is_absent(priv)) return IRQ_NONE; diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h index aa48e05..afa9984 100644 --- a/drivers/net/can/sja1000/sja1000.h +++ b/drivers/net/can/sja1000/sja1000.h @@ -56,7 +56,7 @@ /* SJA1000 registers - manual section 6.4 (Pelican Mode) */ #define REG_MOD 0x00 #define REG_CMR 0x01 -#define SJA1000_REG_SR 0x02 +#define REG_SR 0x02 #define REG_IR 0x03 #define REG_IER 0x04 #define REG_ALC 0x0B diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c index 8e0c4a0..6433b81 100644 --- a/drivers/net/can/sja1000/sja1000_of_platform.c +++ b/drivers/net/can/sja1000/sja1000_of_platform.c @@ -96,8 +96,8 @@ static int sja1000_ofp_probe(struct platform_device *ofdev) struct net_device *dev; struct sja1000_priv *priv; struct resource res; - u32 prop; - int err, irq, res_size; + const u32 *prop; + int err, irq, res_size, prop_size; void __iomem *base; err = of_address_to_resource(np, 0, &res); @@ -138,27 +138,27 @@ static int sja1000_ofp_probe(struct platform_device *ofdev) priv->read_reg = sja1000_ofp_read_reg; priv->write_reg = sja1000_ofp_write_reg; - err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop); - if (!err) - priv->can.clock.freq = prop / 2; + prop = of_get_property(np, "nxp,external-clock-frequency", &prop_size); + if (prop && (prop_size == sizeof(u32))) + priv->can.clock.freq = *prop / 2; else priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */ - err = of_property_read_u32(np, "nxp,tx-output-mode", &prop); - if (!err) - priv->ocr |= prop & OCR_MODE_MASK; + prop = of_get_property(np, "nxp,tx-output-mode", &prop_size); + if (prop && (prop_size == sizeof(u32))) + priv->ocr |= *prop & OCR_MODE_MASK; else priv->ocr |= OCR_MODE_NORMAL; /* default */ - err = of_property_read_u32(np, "nxp,tx-output-config", &prop); - if (!err) - priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK; + prop = of_get_property(np, "nxp,tx-output-config", &prop_size); + if (prop && (prop_size == sizeof(u32))) + priv->ocr |= (*prop << OCR_TX_SHIFT) & OCR_TX_MASK; else priv->ocr |= OCR_TX0_PULLDOWN; /* default */ - err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop); - if (!err && prop) { - u32 divider = priv->can.clock.freq * 2 / prop; + prop = of_get_property(np, "nxp,clock-out-frequency", &prop_size); + if (prop && (prop_size == sizeof(u32)) && *prop) { + u32 divider = priv->can.clock.freq * 2 / *prop; if (divider > 1) priv->cdr |= divider / 2 - 1; @@ -168,7 +168,8 @@ static int sja1000_ofp_probe(struct platform_device *ofdev) priv->cdr |= CDR_CLK_OFF; /* default */ } - if (!of_property_read_bool(np, "nxp,no-comparator-bypass")) + prop = of_get_property(np, "nxp,no-comparator-bypass", NULL); + if (!prop) priv->cdr |= CDR_CBP; /* default */ priv->irq_flags = IRQF_SHARED; diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 0da3917..ed0feb3 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -843,9 +843,9 @@ static void poll_vortex(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); unsigned long flags; - local_irq_save_nort(flags); + local_irq_save(flags); (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); - local_irq_restore_nort(flags); + local_irq_restore(flags); } #endif @@ -1919,12 +1919,12 @@ static void vortex_tx_timeout(struct net_device *dev) * Block interrupts because vortex_interrupt does a bare spin_lock() */ unsigned long flags; - local_irq_save_nort(flags); + local_irq_save(flags); if (vp->full_bus_master_tx) boomerang_interrupt(dev->irq, dev); else vortex_interrupt(dev->irq, dev); - local_irq_restore_nort(flags); + local_irq_restore(flags); } } diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index dbe44ba..0035c01 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -2075,7 +2075,7 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter, if (unlikely(pci_dma_mapping_error(adapter->pdev, buffer_info->dma))) goto err_dma; - ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); + ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, ATL1C_PCIMAP_TODEVICE); mapped_len += map_len; @@ -2171,7 +2171,11 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, } tpd_req = atl1c_cal_tpd_req(skb); - spin_lock_irqsave(&adapter->tx_lock, flags); + if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) { + if (netif_msg_pktdata(adapter)) + dev_info(&adapter->pdev->dev, "tx locked\n"); + return NETDEV_TX_LOCKED; + } if (atl1c_tpd_avail(adapter, type) < tpd_req) { /* no enough descriptor, just stop queue */ diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h index b5fd934..829b5ad 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e.h +++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h @@ -186,7 +186,7 @@ struct atl1e_tpd_desc { /* how about 0x2000 */ #define MAX_TX_BUF_LEN 0x2000 #define MAX_TX_BUF_SHIFT 13 -#define MAX_TSO_SEG_SIZE 0x3c00 +/*#define MAX_TX_BUF_LEN 0x3000 */ /* rrs word 1 bit 0:31 */ #define RRS_RX_CSUM_MASK 0xFFFF @@ -438,6 +438,7 @@ struct atl1e_adapter { struct atl1e_hw hw; struct atl1e_hw_stats hw_stats; + bool have_msi; u32 wol; u16 link_speed; u16 link_duplex; diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 7569f68..e4466a3 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1803,7 +1803,8 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb, return NETDEV_TX_OK; } tpd_req = atl1e_cal_tdp_req(skb); - spin_lock_irqsave(&adapter->tx_lock, flags); + if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) + return NETDEV_TX_LOCKED; if (atl1e_tpd_avail(adapter) < tpd_req) { /* no enough descriptor, just stop queue */ @@ -1850,19 +1851,34 @@ static void atl1e_free_irq(struct atl1e_adapter *adapter) struct net_device *netdev = adapter->netdev; free_irq(adapter->pdev->irq, netdev); + + if (adapter->have_msi) + pci_disable_msi(adapter->pdev); } static int atl1e_request_irq(struct atl1e_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct net_device *netdev = adapter->netdev; + int flags = 0; int err = 0; - err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name, - netdev); + adapter->have_msi = true; + err = pci_enable_msi(pdev); + if (err) { + netdev_dbg(netdev, + "Unable to allocate MSI interrupt Error: %d\n", err); + adapter->have_msi = false; + } + + if (!adapter->have_msi) + flags |= IRQF_SHARED; + err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev); if (err) { netdev_dbg(adapter->netdev, "Unable to allocate interrupt Error: %d\n", err); + if (adapter->have_msi) + pci_disable_msi(pdev); return err; } netdev_dbg(netdev, "atl1e_request_irq OK\n"); @@ -2331,7 +2347,6 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&adapter->reset_task, atl1e_reset_task); INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); - netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE); err = register_netdev(netdev); if (err) { netdev_err(netdev, "register netdevice failed\n"); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 3a73bb9..a5edac8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2523,7 +2523,6 @@ load_error2: bp->port.pmf = 0; load_error1: bnx2x_napi_disable(bp); - bnx2x_del_all_napi(bp); /* clear pf_load status, as it was already set */ bnx2x_clear_pf_load(bp); load_error0: diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 937f5b5..b4d7b26 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -456,9 +456,8 @@ struct bnx2x_fw_port_stats_old { #define UPDATE_QSTAT(s, t) \ do { \ + qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi); \ qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \ - qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \ - + ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \ } while (0) #define UPDATE_QSTAT_OLD(f) \ diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 6917998..bdb0869 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -330,7 +330,6 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, @@ -1844,8 +1843,6 @@ static void tg3_link_report(struct tg3 *tp) tg3_ump_link_report(tp); } - - tp->link_up = netif_carrier_ok(tp->dev); } static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) @@ -2499,6 +2496,12 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp) return err; } +static void tg3_carrier_on(struct tg3 *tp) +{ + netif_carrier_on(tp->dev); + tp->link_up = true; +} + static void tg3_carrier_off(struct tg3 *tp) { netif_carrier_off(tp->dev); @@ -2524,7 +2527,7 @@ static int tg3_phy_reset(struct tg3 *tp) return -EBUSY; if (netif_running(tp->dev) && tp->link_up) { - netif_carrier_off(tp->dev); + tg3_carrier_off(tp); tg3_link_report(tp); } @@ -4094,14 +4097,6 @@ static void tg3_phy_copper_begin(struct tg3 *tp) tp->link_config.active_speed = tp->link_config.speed; tp->link_config.active_duplex = tp->link_config.duplex; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { - /* With autoneg disabled, 5715 only links up when the - * advertisement register has the configured speed - * enabled. - */ - tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL); - } - bmcr = 0; switch (tp->link_config.speed) { default: @@ -4230,9 +4225,9 @@ static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up) { if (curr_link_up != tp->link_up) { if (curr_link_up) { - netif_carrier_on(tp->dev); + tg3_carrier_on(tp); } else { - netif_carrier_off(tp->dev); + tg3_carrier_off(tp); if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } @@ -9104,14 +9099,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) } if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) { - u32 grc_mode; - - /* Fix transmit hangs */ - val = tr32(TG3_CPMU_PADRNG_CTL); - val |= TG3_CPMU_PADRNG_CTL_RDIV2; - tw32(TG3_CPMU_PADRNG_CTL, val); - - grc_mode = tr32(GRC_MODE); + u32 grc_mode = tr32(GRC_MODE); /* Access the lower 1K of DL PCIE block registers. */ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; @@ -9421,14 +9409,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (tg3_flag(tp, PCI_EXPRESS)) rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) { - tp->dma_limit = 0; - if (tp->dev->mtu <= ETH_DATA_LEN) { - rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR; - tp->dma_limit = TG3_TX_BD_DMA_MAX_2K; - } - } - if (tg3_flag(tp, HW_TSO_1) || tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) @@ -14419,11 +14399,8 @@ static void tg3_read_vpd(struct tg3 *tp) if (j + len > block_end) goto partno; - if (len >= sizeof(tp->fw_ver)) - len = sizeof(tp->fw_ver) - 1; - memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); - snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, - &vpd_data[j]); + memcpy(tp->fw_ver, &vpd_data[j], len); + strncat(tp->fw_ver, " bc ", vpdlen - len - 1); } partno: diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 6f9b74c..d330e81 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -1159,8 +1159,6 @@ #define CPMU_MUTEX_GNT_DRIVER 0x00001000 #define TG3_CPMU_PHY_STRAP 0x00003664 #define TG3_CPMU_PHY_STRAP_IS_SERDES 0x00000020 -#define TG3_CPMU_PADRNG_CTL 0x00003668 -#define TG3_CPMU_PADRNG_CTL_RDIV2 0x00040000 /* 0x3664 --> 0x36b0 unused */ #define TG3_CPMU_EEE_MODE 0x000036b0 diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index 1420ea8..d84872e 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -1666,7 +1666,8 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, struct cmdQ *q = &sge->cmdQ[qid]; unsigned int credits, pidx, genbit, count, use_sched_skb = 0; - spin_lock(&q->lock); + if (!spin_trylock(&q->lock)) + return NETDEV_TX_LOCKED; reclaim_completed_tx(sge, q); diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index d67de83..c73472c 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -257,107 +257,6 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count) tmp = readl(reg); } -/* - * Sleep, either by using msleep() or if we are suspending, then - * use mdelay() to sleep. - */ -static void dm9000_msleep(board_info_t *db, unsigned int ms) -{ - if (db->in_suspend) - mdelay(ms); - else - msleep(ms); -} - -/* Read a word from phyxcer */ -static int -dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) -{ - board_info_t *db = netdev_priv(dev); - unsigned long flags; - unsigned int reg_save; - int ret; - - mutex_lock(&db->addr_lock); - - spin_lock_irqsave(&db->lock, flags); - - /* Save previous register address */ - reg_save = readb(db->io_addr); - - /* Fill the phyxcer register into REG_0C */ - iow(db, DM9000_EPAR, DM9000_PHY | reg); - - /* Issue phyxcer read command */ - iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); - - writeb(reg_save, db->io_addr); - spin_unlock_irqrestore(&db->lock, flags); - - dm9000_msleep(db, 1); /* Wait read complete */ - - spin_lock_irqsave(&db->lock, flags); - reg_save = readb(db->io_addr); - - iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ - - /* The read data keeps on REG_0D & REG_0E */ - ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); - - /* restore the previous address */ - writeb(reg_save, db->io_addr); - spin_unlock_irqrestore(&db->lock, flags); - - mutex_unlock(&db->addr_lock); - - dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); - return ret; -} - -/* Write a word to phyxcer */ -static void -dm9000_phy_write(struct net_device *dev, - int phyaddr_unused, int reg, int value) -{ - board_info_t *db = netdev_priv(dev); - unsigned long flags; - unsigned long reg_save; - - dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); - mutex_lock(&db->addr_lock); - - spin_lock_irqsave(&db->lock, flags); - - /* Save previous register address */ - reg_save = readb(db->io_addr); - - /* Fill the phyxcer register into REG_0C */ - iow(db, DM9000_EPAR, DM9000_PHY | reg); - - /* Fill the written data into REG_0D & REG_0E */ - iow(db, DM9000_EPDRL, value); - iow(db, DM9000_EPDRH, value >> 8); - - /* Issue phyxcer write command */ - iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); - - writeb(reg_save, db->io_addr); - spin_unlock_irqrestore(&db->lock, flags); - - dm9000_msleep(db, 1); /* Wait write complete */ - - spin_lock_irqsave(&db->lock, flags); - reg_save = readb(db->io_addr); - - iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ - - /* restore the previous address */ - writeb(reg_save, db->io_addr); - - spin_unlock_irqrestore(&db->lock, flags); - mutex_unlock(&db->addr_lock); -} - /* dm9000_set_io * * select the specified set of io routines to use with the @@ -895,9 +794,6 @@ dm9000_init_dm9000(struct net_device *dev) iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ - dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ - dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */ - ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; /* if wol is needed, then always set NCR_WAKEEN otherwise we end @@ -1304,6 +1200,109 @@ dm9000_open(struct net_device *dev) return 0; } +/* + * Sleep, either by using msleep() or if we are suspending, then + * use mdelay() to sleep. + */ +static void dm9000_msleep(board_info_t *db, unsigned int ms) +{ + if (db->in_suspend) + mdelay(ms); + else + msleep(ms); +} + +/* + * Read a word from phyxcer + */ +static int +dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) +{ + board_info_t *db = netdev_priv(dev); + unsigned long flags; + unsigned int reg_save; + int ret; + + mutex_lock(&db->addr_lock); + + spin_lock_irqsave(&db->lock,flags); + + /* Save previous register address */ + reg_save = readb(db->io_addr); + + /* Fill the phyxcer register into REG_0C */ + iow(db, DM9000_EPAR, DM9000_PHY | reg); + + iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */ + + writeb(reg_save, db->io_addr); + spin_unlock_irqrestore(&db->lock,flags); + + dm9000_msleep(db, 1); /* Wait read complete */ + + spin_lock_irqsave(&db->lock,flags); + reg_save = readb(db->io_addr); + + iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ + + /* The read data keeps on REG_0D & REG_0E */ + ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); + + /* restore the previous address */ + writeb(reg_save, db->io_addr); + spin_unlock_irqrestore(&db->lock,flags); + + mutex_unlock(&db->addr_lock); + + dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); + return ret; +} + +/* + * Write a word to phyxcer + */ +static void +dm9000_phy_write(struct net_device *dev, + int phyaddr_unused, int reg, int value) +{ + board_info_t *db = netdev_priv(dev); + unsigned long flags; + unsigned long reg_save; + + dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); + mutex_lock(&db->addr_lock); + + spin_lock_irqsave(&db->lock,flags); + + /* Save previous register address */ + reg_save = readb(db->io_addr); + + /* Fill the phyxcer register into REG_0C */ + iow(db, DM9000_EPAR, DM9000_PHY | reg); + + /* Fill the written data into REG_0D & REG_0E */ + iow(db, DM9000_EPDRL, value); + iow(db, DM9000_EPDRH, value >> 8); + + iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */ + + writeb(reg_save, db->io_addr); + spin_unlock_irqrestore(&db->lock, flags); + + dm9000_msleep(db, 1); /* Wait write complete */ + + spin_lock_irqsave(&db->lock,flags); + reg_save = readb(db->io_addr); + + iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ + + /* restore the previous address */ + writeb(reg_save, db->io_addr); + + spin_unlock_irqrestore(&db->lock, flags); + mutex_unlock(&db->addr_lock); +} + static void dm9000_shutdown(struct net_device *dev) { @@ -1502,12 +1501,7 @@ dm9000_probe(struct platform_device *pdev) db->flags |= DM9000_PLATF_SIMPLE_PHY; #endif - /* Fixing bug on dm9000_probe, takeover dm9000_reset(db), - * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo - * while probe stage. - */ - - iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST); + dm9000_reset(db); /* try multiple times, DM9000 sometimes gets the read wrong */ for (i = 0; i < 8; i++) { diff --git a/drivers/net/ethernet/davicom/dm9000.h b/drivers/net/ethernet/davicom/dm9000.h index 9ce058a..55688bd 100644 --- a/drivers/net/ethernet/davicom/dm9000.h +++ b/drivers/net/ethernet/davicom/dm9000.h @@ -69,9 +69,7 @@ #define NCR_WAKEEN (1<<6) #define NCR_FCOL (1<<4) #define NCR_FDX (1<<3) - -#define NCR_RESERVED (3<<1) -#define NCR_MAC_LBK (1<<1) +#define NCR_LBK (3<<1) #define NCR_RST (1<<0) #define NSR_SPEED (1<<7) @@ -169,12 +167,5 @@ #define ISR_LNKCHNG (1<<5) #define ISR_UNDERRUN (1<<4) -/* Davicom MII registers. - */ - -#define MII_DM_DSPCR 0x1b /* DSP Control Register */ - -#define DSPCR_INIT_PARAM 0xE100 /* DSP init parameter */ - #endif /* _DM9000X_H_ */ diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index d25961b..1e9443d 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -1943,7 +1943,6 @@ static void tulip_remove_one(struct pci_dev *pdev) pci_iounmap(pdev, tp->base_addr); free_netdev (dev); pci_release_regions (pdev); - pci_disable_device (pdev); pci_set_drvdata (pdev, NULL); /* pci_power_off (pdev, -1); */ diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 547c9f1..c40526c 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -128,7 +128,6 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev) spin_unlock_irqrestore(&fep->tmreg_lock, flags); } -EXPORT_SYMBOL(fec_ptp_start_cyclecounter); /** * fec_ptp_adjfreq - adjust ptp cycle frequency @@ -319,7 +318,6 @@ int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } -EXPORT_SYMBOL(fec_ptp_ioctl); /** * fec_time_keep - call timecounter_read every second to avoid timer overrun @@ -383,4 +381,3 @@ void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev) pr_info("registered PHC device on %s\n", ndev->name); } } -EXPORT_SYMBOL(fec_ptp_init); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 5c53535..bffb2ed 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1663,7 +1663,7 @@ void stop_gfar(struct net_device *dev) /* Lock it down */ - local_irq_save_nort(flags); + local_irq_save(flags); lock_tx_qs(priv); lock_rx_qs(priv); @@ -1671,7 +1671,7 @@ void stop_gfar(struct net_device *dev) unlock_rx_qs(priv); unlock_tx_qs(priv); - local_irq_restore_nort(flags); + local_irq_restore(flags); /* Free the IRQs */ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { @@ -2951,7 +2951,7 @@ static void adjust_link(struct net_device *dev) struct phy_device *phydev = priv->phydev; int new_state = 0; - local_irq_save_nort(flags); + local_irq_save(flags); lock_tx_qs(priv); if (phydev->link) { @@ -3020,7 +3020,7 @@ static void adjust_link(struct net_device *dev) if (new_state && netif_msg_link(priv)) phy_print_status(phydev); unlock_tx_qs(priv); - local_irq_restore_nort(flags); + local_irq_restore(flags); } /* Update the hash table based on the current list of multicast diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index a3f8a25..2e5daee 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -127,6 +127,7 @@ struct gianfar_ptp_registers { #define DRIVER "gianfar_ptp" #define DEFAULT_CKSEL 1 +#define N_ALARM 1 /* first alarm is used internally to reset fipers */ #define N_EXT_TS 2 #define REG_SIZE sizeof(struct gianfar_ptp_registers) @@ -409,7 +410,7 @@ static struct ptp_clock_info ptp_gianfar_caps = { .owner = THIS_MODULE, .name = "gianfar clock", .max_adj = 512000, - .n_alarm = 0, + .n_alarm = N_ALARM, .n_ext_ts = N_EXT_TS, .n_per_out = 0, .pps = 1, diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 82c63ac..f2fdbb7 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1326,7 +1326,7 @@ static const struct net_device_ops ibmveth_netdev_ops = { static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) { - int rc, i, mac_len; + int rc, i; struct net_device *netdev; struct ibmveth_adapter *adapter; unsigned char *mac_addr_p; @@ -1336,19 +1336,11 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) dev->unit_address); mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR, - &mac_len); + NULL); if (!mac_addr_p) { dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n"); return -EINVAL; } - /* Workaround for old/broken pHyp */ - if (mac_len == 8) - mac_addr_p += 2; - else if (mac_len != 6) { - dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n", - mac_len); - return -EINVAL; - } mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev, VETH_MCAST_FILTER_SIZE, NULL); @@ -1373,6 +1365,17 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); + /* + * Some older boxes running PHYP non-natively have an OF that returns + * a 8-byte local-mac-address field (and the first 2 bytes have to be + * ignored) while newer boxes' OF return a 6-byte field. Note that + * IEEE 1275 specifies that local-mac-address must be a 6-byte field. + * The RPA doc specifies that the first byte must be 10b, so we'll + * just look for it to solve this 8 vs. 6 byte field issue + */ + if ((*mac_addr_p & 0x3) != 0x02) + mac_addr_p += 2; + adapter->mac_addr = 0; memcpy(&adapter->mac_addr, mac_addr_p, 6); diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 522fb10..fd4772a 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -35,7 +35,6 @@ #include #include #include -#include #include "e1000.h" @@ -2054,19 +2053,7 @@ static int e1000_get_rxnfc(struct net_device *netdev, } } -static int e1000e_ethtool_begin(struct net_device *netdev) -{ - return pm_runtime_get_sync(netdev->dev.parent); -} - -static void e1000e_ethtool_complete(struct net_device *netdev) -{ - pm_runtime_put_sync(netdev->dev.parent); -} - static const struct ethtool_ops e1000_ethtool_ops = { - .begin = e1000e_ethtool_begin, - .complete = e1000e_ethtool_complete, .get_settings = e1000_get_settings, .set_settings = e1000_set_settings, .get_drvinfo = e1000_get_drvinfo, diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 8692eca..643c883 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3952,7 +3952,6 @@ static int e1000_open(struct net_device *netdev) netif_start_queue(netdev); adapter->idle_check = true; - hw->mac.get_link_status = true; pm_runtime_put(&pdev->dev); /* fire a link status change interrupt to start the watchdog */ @@ -4313,7 +4312,6 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) (adapter->hw.phy.media_type == e1000_media_type_copper)) { int ret_val; - pm_runtime_get_sync(&adapter->pdev->dev); ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr); ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr); ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise); @@ -4324,7 +4322,6 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus); if (ret_val) e_warn("Error reading PHY register\n"); - pm_runtime_put_sync(&adapter->pdev->dev); } else { /* Do not read PHY registers if link is not up * Set values to typical power-on defaults @@ -5453,7 +5450,8 @@ release: return retval; } -static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, + bool runtime) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); @@ -5477,6 +5475,10 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) } e1000e_reset_interrupt_capability(adapter); + retval = pci_save_state(pdev); + if (retval) + return retval; + status = er32(STATUS); if (status & E1000_STATUS_LU) wufc &= ~E1000_WUFC_LNKC; @@ -5532,6 +5534,13 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) ew32(WUFC, 0); } + *enable_wake = !!wufc; + + /* make sure adapter isn't asleep if manageability is enabled */ + if ((adapter->flags & FLAG_MNG_PT_ENABLED) || + (hw->mac.ops.check_mng_mode(hw))) + *enable_wake = true; + if (adapter->hw.phy.type == e1000_phy_igp_3) e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); @@ -5540,7 +5549,27 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) */ e1000e_release_hw_control(adapter); - pci_clear_master(pdev); + pci_disable_device(pdev); + + return 0; +} + +static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake) +{ + if (sleep && wake) { + pci_prepare_to_sleep(pdev); + return; + } + + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); +} + +static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, + bool wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); /* The pci-e switch on some quad port adapters will report a * correctable error when the MAC transitions from D0 to D3. To @@ -5555,13 +5584,12 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, (devctl & ~PCI_EXP_DEVCTL_CERE)); - pci_save_state(pdev); - pci_prepare_to_sleep(pdev); + e1000_power_off(pdev, sleep, wake); pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl); + } else { + e1000_power_off(pdev, sleep, wake); } - - return 0; } #ifdef CONFIG_PCIEASPM @@ -5612,7 +5640,9 @@ static int __e1000_resume(struct pci_dev *pdev) if (aspm_disable_flag) e1000e_disable_aspm(pdev, aspm_disable_flag); - pci_set_master(pdev); + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); e1000e_set_interrupt_capability(adapter); if (netif_running(netdev)) { @@ -5678,8 +5708,14 @@ static int __e1000_resume(struct pci_dev *pdev) static int e1000_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); + int retval; + bool wake; + + retval = __e1000_shutdown(pdev, &wake, false); + if (!retval) + e1000_complete_shutdown(pdev, true, wake); - return __e1000_shutdown(pdev, false); + return retval; } static int e1000_resume(struct device *dev) @@ -5702,10 +5738,13 @@ static int e1000_runtime_suspend(struct device *dev) struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); - if (!e1000e_pm_ready(adapter)) - return 0; + if (e1000e_pm_ready(adapter)) { + bool wake; - return __e1000_shutdown(pdev, true); + __e1000_shutdown(pdev, &wake, true); + } + + return 0; } static int e1000_idle(struct device *dev) @@ -5743,7 +5782,12 @@ static int e1000_runtime_resume(struct device *dev) static void e1000_shutdown(struct pci_dev *pdev) { - __e1000_shutdown(pdev, false); + bool wake = false; + + __e1000_shutdown(pdev, &wake, false); + + if (system_state == SYSTEM_POWER_OFF) + e1000_complete_shutdown(pdev, false, wake); } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -5864,9 +5908,9 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) "Cannot re-enable PCI device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; } else { + pci_set_master(pdev); pdev->state_saved = true; pci_restore_state(pdev); - pci_set_master(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); @@ -6297,11 +6341,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* initialize the wol settings based on the eeprom settings */ adapter->wol = adapter->eeprom_wol; - - /* make sure adapter isn't asleep if manageability is enabled */ - if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) || - (hw->mac.ops.check_mng_mode(hw))) - device_wakeup_enable(&pdev->dev); + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); /* save off EEPROM version number */ e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 911956e..b3e3294 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2407,16 +2407,6 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) * with the write to EICR. */ eicr = IXGBE_READ_REG(hw, IXGBE_EICS); - - /* The lower 16bits of the EICR register are for the queue interrupts - * which should be masked here in order to not accidently clear them if - * the bits are high when ixgbe_msix_other is called. There is a race - * condition otherwise which results in possible performance loss - * especially if the ixgbe_msix_other interrupt is triggering - * consistently (as it would when PPS is turned on for the X540 device) - */ - eicr &= 0xFFFF0000; - IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); if (eicr & IXGBE_EICR_LSC) @@ -7868,19 +7858,12 @@ static int __init ixgbe_init_module(void) ixgbe_dbg_init(); #endif /* CONFIG_DEBUG_FS */ - ret = pci_register_driver(&ixgbe_driver); - if (ret) { -#ifdef CONFIG_DEBUG_FS - ixgbe_dbg_exit(); -#endif /* CONFIG_DEBUG_FS */ - return ret; - } - #ifdef CONFIG_IXGBE_DCA dca_register_notify(&dca_notifier); #endif - return 0; + ret = pci_register_driver(&ixgbe_driver); + return ret; } module_init(ixgbe_init_module); diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index 434e33c..edfba93 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -33,7 +33,6 @@ config MV643XX_ETH config MVMDIO tristate "Marvell MDIO interface support" - select PHYLIB ---help--- This driver supports the MDIO interface found in the network interface units of the Marvell EBU SoCs (Kirkwood, Orion5x, @@ -46,6 +45,7 @@ config MVMDIO config MVNETA tristate "Marvell Armada 370/XP network interface support" depends on MACH_ARMADA_370_XP + select PHYLIB select MVMDIO ---help--- This driver supports the network interface units in the diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 84b312ea..b6025c3 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -375,6 +375,7 @@ static int rxq_number = 8; static int txq_number = 8; static int rxq_def; +static int txq_def; #define MVNETA_DRIVER_NAME "mvneta" #define MVNETA_DRIVER_VERSION "1.0" @@ -1475,8 +1476,7 @@ error: static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); - u16 txq_id = skb_get_queue_mapping(skb); - struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; + struct mvneta_tx_queue *txq = &pp->txqs[txq_def]; struct mvneta_tx_desc *tx_desc; struct netdev_queue *nq; int frags = 0; @@ -1486,7 +1486,7 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) goto out; frags = skb_shinfo(skb)->nr_frags + 1; - nq = netdev_get_tx_queue(dev, txq_id); + nq = netdev_get_tx_queue(dev, txq_def); /* Get a descriptor for the first part of the packet */ tx_desc = mvneta_txq_next_desc_get(txq); @@ -2690,7 +2690,7 @@ static int mvneta_probe(struct platform_device *pdev) return -EINVAL; } - dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number); + dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8); if (!dev) return -ENOMEM; @@ -2844,3 +2844,4 @@ module_param(rxq_number, int, S_IRUGO); module_param(txq_number, int, S_IRUGO); module_param(rxq_def, int, S_IRUGO); +module_param(txq_def, int, S_IRUGO); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index d23dc5e..3269eb3 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -1067,7 +1067,7 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space) sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); - tp = space - 8192/8; + tp = space - 2048/8; sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); } else { diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h index ec6dcd8..615ac63 100644 --- a/drivers/net/ethernet/marvell/sky2.h +++ b/drivers/net/ethernet/marvell/sky2.h @@ -2074,7 +2074,7 @@ enum { GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ -#define GMAC_DEF_MSK (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR) +#define GMAC_DEF_MSK GM_IS_TX_FF_UR }; /* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index bf3f4bc..75a3f46 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1434,11 +1434,12 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) } #ifdef CONFIG_RFS_ACCEL - if (priv->mdev->dev->caps.comp_pool) { - priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool); - if (!priv->dev->rx_cpu_rmap) - goto err; - } + priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num); + if (!priv->dev->rx_cpu_rmap) + goto err; + + INIT_LIST_HEAD(&priv->filters); + spin_lock_init(&priv->filters_lock); #endif return 0; @@ -1596,7 +1597,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, err = -ENOMEM; goto out; } - priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_TX_RINGS, + priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_RX_RINGS, GFP_KERNEL); if (!priv->tx_cq) { err = -ENOMEM; @@ -1633,11 +1634,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, if (err) goto out; -#ifdef CONFIG_RFS_ACCEL - INIT_LIST_HEAD(&priv->filters); - spin_lock_init(&priv->filters_lock); -#endif - /* Allocate page for receive rings */ err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index 1e42882..286816a 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -547,7 +547,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) for (; rxfc != 0; rxfc--) { rxh = ks8851_rdreg32(ks, KS_RXFHSR); rxstat = rxh & 0xffff; - rxlen = (rxh >> 16) & 0xfff; + rxlen = rxh >> 16; netif_dbg(ks, rx_status, ks->netdev, "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen); diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 8757a2c..7c94c08 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -4088,7 +4088,12 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) [skb->priority & (MAX_TX_FIFOS - 1)]; fifo = &mac_control->fifos[queue]; - spin_lock_irqsave(&fifo->tx_lock, flags); + if (do_spin_lock) + spin_lock_irqsave(&fifo->tx_lock, flags); + else { + if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags))) + return NETDEV_TX_LOCKED; + } if (sp->config.multiq) { if (__netif_subqueue_stopped(dev, fifo->fifo_no)) { diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index b3ba6fe..39ab4d0 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -1726,9 +1726,9 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, skb->protocol = eth_type_trans(skb, netdev); if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) - skb->ip_summed = CHECKSUM_UNNECESSARY; - else skb->ip_summed = CHECKSUM_NONE; + else + skb->ip_summed = CHECKSUM_UNNECESSARY; napi_gro_receive(&adapter->napi, skb); (*work_done)++; @@ -2114,8 +2114,10 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; unsigned long flags; - spin_lock_irqsave(&tx_ring->tx_lock, flags); - + if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) { + /* Collision - tell upper layer to requeue */ + return NETDEV_TX_LOCKED; + } if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) { netif_stop_queue(netdev); spin_unlock_irqrestore(&tx_ring->tx_lock, flags); diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 3bed27d..5dc1616 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -2216,7 +2216,7 @@ static void rtl8139_poll_controller(struct net_device *dev) struct rtl8139_private *tp = netdev_priv(dev); const int irq = tp->pci_dev->irq; - disable_irq_nosync(irq); + disable_irq(irq); rtl8139_interrupt(irq, dev); enable_irq(irq); } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 2d56d71..998974f 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -3819,30 +3819,6 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp) } } -static void rtl_speed_down(struct rtl8169_private *tp) -{ - u32 adv; - int lpa; - - rtl_writephy(tp, 0x1f, 0x0000); - lpa = rtl_readphy(tp, MII_LPA); - - if (lpa & (LPA_10HALF | LPA_10FULL)) - adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; - else if (lpa & (LPA_100HALF | LPA_100FULL)) - adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; - else - adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | - (tp->mii.supports_gmii ? - ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full : 0); - - rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, - adv); -} - static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; @@ -3873,7 +3849,9 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) return false; - rtl_speed_down(tp); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, MII_BMCR, 0x0000); + rtl_wol_suspend_quirk(tp); return true; @@ -5779,14 +5757,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, goto err_stop_0; } - /* 8168evl does not automatically pad to minimum length. */ - if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 && - skb->len < ETH_ZLEN)) { - if (skb_padto(skb, ETH_ZLEN)) - goto err_update_stats; - skb_put(skb, ETH_ZLEN - skb->len); - } - if (unlikely(le32_to_cpu(txd->opts1) & DescOwn)) goto err_stop_0; @@ -5858,7 +5828,6 @@ err_dma_1: rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd); err_dma_0: dev_kfree_skb(skb); -err_update_stats: dev->stats.tx_dropped++; return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 0bc0099..bf57b3c 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -779,7 +779,6 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) tx_queue->txd.entries); } - efx_device_detach_sync(efx); efx_stop_all(efx); efx_stop_interrupts(efx, true); @@ -833,7 +832,6 @@ out: efx_start_interrupts(efx, true); efx_start_all(efx); - netif_device_attach(efx->net_dev); return rc; rollback: @@ -1643,12 +1641,8 @@ static void efx_stop_all(struct efx_nic *efx) /* Flush efx_mac_work(), refill_workqueue, monitor_work */ efx_flush_all(efx); - /* Stop the kernel transmit interface. This is only valid if - * the device is stopped or detached; otherwise the watchdog - * may fire immediately. - */ - WARN_ON(netif_running(efx->net_dev) && - netif_device_present(efx->net_dev)); + /* Stop the kernel transmit interface late, so the watchdog + * timer isn't ticking over the flush */ netif_tx_disable(efx->net_dev); efx_stop_datapath(efx); @@ -1969,18 +1963,16 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu) if (new_mtu > EFX_MAX_MTU) return -EINVAL; - netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); - - efx_device_detach_sync(efx); efx_stop_all(efx); + netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); + mutex_lock(&efx->mac_lock); net_dev->mtu = new_mtu; efx->type->reconfigure_mac(efx); mutex_unlock(&efx->mac_lock); efx_start_all(efx); - netif_device_attach(efx->net_dev); return 0; } diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index d2f790d..50247df 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -171,9 +171,9 @@ static inline void efx_device_detach_sync(struct efx_nic *efx) * TX scheduler is stopped when we're done and before * netif_device_present() becomes false. */ - netif_tx_lock_bh(dev); + netif_tx_lock(dev); netif_device_detach(dev); - netif_tx_unlock_bh(dev); + netif_tx_unlock(dev); } #endif /* EFX_EFX_H */ diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 0a90abd..2d756c1 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -210,7 +210,6 @@ struct efx_tx_queue { * Will be %NULL if the buffer slot is currently free. * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE. * Will be %NULL if the buffer slot is currently free. - * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE. * @len: Buffer length, in bytes. * @flags: Flags for buffer and packet state. */ @@ -220,8 +219,7 @@ struct efx_rx_buffer { struct sk_buff *skb; struct page *page; } u; - u16 page_offset; - u16 len; + unsigned int len; u16 flags; }; #define EFX_RX_BUF_PAGE 0x0001 diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index eaa8e87..0ad790c 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -376,8 +376,7 @@ efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) return false; tx_queue->empty_read_count = 0; - return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0 - && tx_queue->write_count - write_count == 1; + return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; } /* For each entry inserted into the software descriptor ring, create a diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 3f93624..0767043f 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -1439,7 +1439,7 @@ static int efx_phc_settime(struct ptp_clock_info *ptp, delta = timespec_sub(*e_ts, time_now); - rc = efx_phc_adjtime(ptp, timespec_to_ns(&delta)); + efx_phc_adjtime(ptp, timespec_to_ns(&delta)); if (rc != 0) return rc; diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index bb579a6..d780a0d 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -90,7 +90,11 @@ static unsigned int rx_refill_threshold; static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx, struct efx_rx_buffer *buf) { - return buf->page_offset + efx->type->rx_buffer_hash_size; + /* Offset is always within one page, so we don't need to consider + * the page order. + */ + return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) + + efx->type->rx_buffer_hash_size; } static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) { @@ -183,7 +187,6 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) struct efx_nic *efx = rx_queue->efx; struct efx_rx_buffer *rx_buf; struct page *page; - unsigned int page_offset; struct efx_rx_page_state *state; dma_addr_t dma_addr; unsigned index, count; @@ -208,14 +211,12 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) state->dma_addr = dma_addr; dma_addr += sizeof(struct efx_rx_page_state); - page_offset = sizeof(struct efx_rx_page_state); split: index = rx_queue->added_count & rx_queue->ptr_mask; rx_buf = efx_rx_buffer(rx_queue, index); rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; rx_buf->u.page = page; - rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN; rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; rx_buf->flags = EFX_RX_BUF_PAGE; ++rx_queue->added_count; @@ -226,7 +227,6 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) /* Use the second half of the page */ get_page(page); dma_addr += (PAGE_SIZE >> 1); - page_offset += (PAGE_SIZE >> 1); ++count; goto split; } @@ -236,8 +236,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) } static void efx_unmap_rx_buffer(struct efx_nic *efx, - struct efx_rx_buffer *rx_buf, - unsigned int used_len) + struct efx_rx_buffer *rx_buf) { if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { struct efx_rx_page_state *state; @@ -248,10 +247,6 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx, state->dma_addr, efx_rx_buf_size(efx), DMA_FROM_DEVICE); - } else if (used_len) { - dma_sync_single_for_cpu(&efx->pci_dev->dev, - rx_buf->dma_addr, used_len, - DMA_FROM_DEVICE); } } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr, @@ -274,7 +269,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx, static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf) { - efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0); + efx_unmap_rx_buffer(rx_queue->efx, rx_buf); efx_free_rx_buffer(rx_queue->efx, rx_buf); } @@ -540,10 +535,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, goto out; } - /* Release and/or sync DMA mapping - assumes all RX buffers - * consumed in-order per RX queue + /* Release card resources - assumes all RX buffers consumed in-order + * per RX queue */ - efx_unmap_rx_buffer(efx, rx_buf, len); + efx_unmap_rx_buffer(efx, rx_buf); /* Prefetch nice and early so data will (hopefully) be in cache by * the time we look at it. diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 9dfd4f5..1e4d743 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1630,8 +1630,13 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb, unsigned long flags; ENTER; - - spin_lock_irqsave(&priv->tx_lock, flags); + local_irq_save(flags); + if (!spin_trylock(&priv->tx_lock)) { + local_irq_restore(flags); + DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n", + BDX_DRV_NAME, ndev->name); + return NETDEV_TX_LOCKED; + } /* build tx descriptor */ BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */ diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index de71b1e..4426151 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -88,8 +88,8 @@ config TLAN Please email feedback to . config CPMAC - tristate "TI AR7 CPMAC Ethernet support" - depends on AR7 + tristate "TI AR7 CPMAC Ethernet support (EXPERIMENTAL)" + depends on EXPERIMENTAL && AR7 select PHYLIB ---help--- TI AR7 CPMAC Ethernet support diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index 70d1920..d9625f6 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -904,9 +904,10 @@ static int cpmac_set_ringparam(struct net_device *dev, static void cpmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strlcpy(info->driver, "cpmac", sizeof(info->driver)); - strlcpy(info->version, CPMAC_VERSION, sizeof(info->version)); - snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac"); + strcpy(info->driver, "cpmac"); + strcpy(info->version, CPMAC_VERSION); + info->fw_version[0] = '\0'; + sprintf(info->bus_info, "%s", "cpmac"); info->regdump_len = 0; } diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 1cfde0c..40aff68 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -32,7 +32,6 @@ #include #include #include -#include #include @@ -119,20 +118,6 @@ do { \ #define TX_PRIORITY_MAPPING 0x33221100 #define CPDMA_TX_PRIORITY_MAP 0x76543210 -#define CPSW_VLAN_AWARE BIT(1) -#define CPSW_ALE_VLAN_AWARE 1 - -#define CPSW_FIFO_NORMAL_MODE (0 << 15) -#define CPSW_FIFO_DUAL_MAC_MODE (1 << 15) -#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15) - -#define CPSW_INTPACEEN (0x3f << 16) -#define CPSW_INTPRESCALE_MASK (0x7FF << 0) -#define CPSW_CMINTMAX_CNT 63 -#define CPSW_CMINTMIN_CNT 2 -#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT) -#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1) - #define cpsw_enable_irq(priv) \ do { \ u32 i; \ @@ -146,10 +131,6 @@ do { \ disable_irq_nosync(priv->irqs_table[i]); \ } while (0); -#define cpsw_slave_index(priv) \ - ((priv->data.dual_emac) ? priv->emac_port : \ - priv->data.active_slave) - static int debug_level; module_param(debug_level, int, 0); MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)"); @@ -171,15 +152,6 @@ struct cpsw_wr_regs { u32 rx_en; u32 tx_en; u32 misc_en; - u32 mem_allign1[8]; - u32 rx_thresh_stat; - u32 rx_stat; - u32 tx_stat; - u32 misc_stat; - u32 mem_allign2[8]; - u32 rx_imax; - u32 tx_imax; - }; struct cpsw_ss_regs { @@ -278,7 +250,7 @@ struct cpsw_ss_regs { struct cpsw_host_regs { u32 max_blks; u32 blk_cnt; - u32 tx_in_ctl; + u32 flow_thresh; u32 port_vlan; u32 tx_pri_map; u32 cpdma_tx_pri_map; @@ -305,9 +277,6 @@ struct cpsw_slave { u32 mac_control; struct cpsw_slave_data *data; struct phy_device *phy; - struct net_device *ndev; - u32 port_vlan; - u32 open_stat; }; static inline u32 slave_read(struct cpsw_slave *slave, u32 offset) @@ -334,8 +303,6 @@ struct cpsw_priv { struct cpsw_host_regs __iomem *host_port_regs; u32 msg_enable; u32 version; - u32 coal_intvl; - u32 bus_freq_mhz; struct net_device_stats stats; int rx_packet_max; int host_port; @@ -348,69 +315,17 @@ struct cpsw_priv { /* snapshot of IRQ numbers */ u32 irqs_table[4]; u32 num_irqs; - bool irq_enabled; - struct cpts *cpts; - u32 emac_port; + struct cpts cpts; }; #define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi) -#define for_each_slave(priv, func, arg...) \ - do { \ - struct cpsw_slave *slave; \ - int n; \ - if (priv->data.dual_emac) \ - (func)((priv)->slaves + priv->emac_port, ##arg);\ - else \ - for (n = (priv)->data.slaves, \ - slave = (priv)->slaves; \ - n; n--) \ - (func)(slave++, ##arg); \ - } while (0) -#define cpsw_get_slave_ndev(priv, __slave_no__) \ - (priv->slaves[__slave_no__].ndev) -#define cpsw_get_slave_priv(priv, __slave_no__) \ - ((priv->slaves[__slave_no__].ndev) ? \ - netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \ - -#define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \ - do { \ - if (!priv->data.dual_emac) \ - break; \ - if (CPDMA_RX_SOURCE_PORT(status) == 1) { \ - ndev = cpsw_get_slave_ndev(priv, 0); \ - priv = netdev_priv(ndev); \ - skb->dev = ndev; \ - } else if (CPDMA_RX_SOURCE_PORT(status) == 2) { \ - ndev = cpsw_get_slave_ndev(priv, 1); \ - priv = netdev_priv(ndev); \ - skb->dev = ndev; \ - } \ - } while (0) -#define cpsw_add_mcast(priv, addr) \ - do { \ - if (priv->data.dual_emac) { \ - struct cpsw_slave *slave = priv->slaves + \ - priv->emac_port; \ - int slave_port = cpsw_get_slave_port(priv, \ - slave->slave_num); \ - cpsw_ale_add_mcast(priv->ale, addr, \ - 1 << slave_port | 1 << priv->host_port, \ - ALE_VLAN, slave->port_vlan, 0); \ - } else { \ - cpsw_ale_add_mcast(priv->ale, addr, \ - ALE_ALL_PORTS << priv->host_port, \ - 0, 0, 0); \ - } \ +#define for_each_slave(priv, func, arg...) \ + do { \ + int idx; \ + for (idx = 0; idx < (priv)->data.slaves; idx++) \ + (func)((priv)->slaves + idx, ##arg); \ } while (0) -static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num) -{ - if (priv->host_port == 0) - return slave_num + 1; - else - return slave_num; -} - static void cpsw_ndo_set_rx_mode(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); @@ -429,7 +344,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) /* program multicast address list into ALE register */ netdev_for_each_mc_addr(ha, ndev) { - cpsw_add_mcast(priv, (u8 *)ha->addr); + cpsw_ale_add_mcast(priv->ale, (u8 *)ha->addr, + ALE_ALL_PORTS << priv->host_port, 0, 0); } } } @@ -458,12 +374,9 @@ void cpsw_tx_handler(void *token, int len, int status) struct net_device *ndev = skb->dev; struct cpsw_priv *priv = netdev_priv(ndev); - /* Check whether the queue is stopped due to stalled tx dma, if the - * queue is stopped then start the queue as we have free desc for tx - */ if (unlikely(netif_queue_stopped(ndev))) - netif_wake_queue(ndev); - cpts_tx_timestamp(priv->cpts, skb); + netif_start_queue(ndev); + cpts_tx_timestamp(&priv->cpts, skb); priv->stats.tx_packets++; priv->stats.tx_bytes += len; dev_kfree_skb_any(skb); @@ -472,106 +385,82 @@ void cpsw_tx_handler(void *token, int len, int status) void cpsw_rx_handler(void *token, int len, int status) { struct sk_buff *skb = token; - struct sk_buff *new_skb; struct net_device *ndev = skb->dev; struct cpsw_priv *priv = netdev_priv(ndev); int ret = 0; - cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); - - if (unlikely(status < 0)) { - /* the interface is going down, skbs are purged */ + /* free and bail if we are shutting down */ + if (unlikely(!netif_running(ndev)) || + unlikely(!netif_carrier_ok(ndev))) { dev_kfree_skb_any(skb); return; } - - new_skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max); - if (new_skb) { + if (likely(status >= 0)) { skb_put(skb, len); - cpts_rx_timestamp(priv->cpts, skb); + cpts_rx_timestamp(&priv->cpts, skb); skb->protocol = eth_type_trans(skb, ndev); - netif_rx(skb); + netif_receive_skb(skb); priv->stats.rx_bytes += len; priv->stats.rx_packets++; - } else { - priv->stats.rx_dropped++; - new_skb = skb; + skb = NULL; } - ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data, - skb_tailroom(new_skb), 0); - if (WARN_ON(ret < 0)) - dev_kfree_skb_any(new_skb); + if (unlikely(!netif_running(ndev))) { + if (skb) + dev_kfree_skb_any(skb); + return; + } + + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max); + if (WARN_ON(!skb)) + return; + + ret = cpdma_chan_submit(priv->rxch, skb, skb->data, + skb_tailroom(skb), GFP_KERNEL); + } + WARN_ON(ret < 0); } static irqreturn_t cpsw_interrupt(int irq, void *dev_id) { struct cpsw_priv *priv = dev_id; - unsigned long flags; - u32 rx, tx, rx_thresh; - - spin_lock_irqsave(&priv->lock, flags); - rx_thresh = __raw_readl(&priv->wr_regs->rx_thresh_stat); - rx = __raw_readl(&priv->wr_regs->rx_stat); - tx = __raw_readl(&priv->wr_regs->tx_stat); - if (!rx_thresh && !rx && !tx) { - spin_unlock_irqrestore(&priv->lock, flags); - return IRQ_NONE; - } - cpsw_intr_disable(priv); - if (priv->irq_enabled == true) { + if (likely(netif_running(priv->ndev))) { + cpsw_intr_disable(priv); cpsw_disable_irq(priv); - priv->irq_enabled = false; - } - spin_unlock_irqrestore(&priv->lock, flags); - - if (netif_running(priv->ndev)) { napi_schedule(&priv->napi); - return IRQ_HANDLED; } + return IRQ_HANDLED; +} - priv = cpsw_get_slave_priv(priv, 1); - if (!priv) - return IRQ_NONE; - - if (netif_running(priv->ndev)) { - napi_schedule(&priv->napi); - return IRQ_HANDLED; - } - return IRQ_NONE; +static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num) +{ + if (priv->host_port == 0) + return slave_num + 1; + else + return slave_num; } static int cpsw_poll(struct napi_struct *napi, int budget) { struct cpsw_priv *priv = napi_to_priv(napi); int num_tx, num_rx; - unsigned long flags; - spin_lock_irqsave(&priv->lock, flags); num_tx = cpdma_chan_process(priv->txch, 128); - if (num_tx) - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); - num_rx = cpdma_chan_process(priv->rxch, budget); - if (num_rx < budget) { - struct cpsw_priv *prim_cpsw; - - napi_complete(napi); - cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); - prim_cpsw = cpsw_get_slave_priv(priv, 0); - if (prim_cpsw->irq_enabled == false) { - cpsw_enable_irq(priv); - prim_cpsw->irq_enabled = true; - } - } - spin_unlock_irqrestore(&priv->lock, flags); if (num_rx || num_tx) cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n", num_rx, num_tx); + if (num_rx < budget) { + napi_complete(napi); + cpsw_intr_enable(priv); + cpdma_ctlr_eoi(priv->dma); + cpsw_enable_irq(priv); + } + return num_rx; } @@ -659,77 +548,6 @@ static void cpsw_adjust_link(struct net_device *ndev) } } -static int cpsw_get_coalesce(struct net_device *ndev, - struct ethtool_coalesce *coal) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - - coal->rx_coalesce_usecs = priv->coal_intvl; - return 0; -} - -static int cpsw_set_coalesce(struct net_device *ndev, - struct ethtool_coalesce *coal) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - u32 int_ctrl; - u32 num_interrupts = 0; - u32 prescale = 0; - u32 addnl_dvdr = 1; - u32 coal_intvl = 0; - - if (!coal->rx_coalesce_usecs) - return -EINVAL; - - coal_intvl = coal->rx_coalesce_usecs; - - int_ctrl = readl(&priv->wr_regs->int_control); - prescale = priv->bus_freq_mhz * 4; - - if (coal_intvl < CPSW_CMINTMIN_INTVL) - coal_intvl = CPSW_CMINTMIN_INTVL; - - if (coal_intvl > CPSW_CMINTMAX_INTVL) { - /* Interrupt pacer works with 4us Pulse, we can - * throttle further by dilating the 4us pulse. - */ - addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale; - - if (addnl_dvdr > 1) { - prescale *= addnl_dvdr; - if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr)) - coal_intvl = (CPSW_CMINTMAX_INTVL - * addnl_dvdr); - } else { - addnl_dvdr = 1; - coal_intvl = CPSW_CMINTMAX_INTVL; - } - } - - num_interrupts = (1000 * addnl_dvdr) / coal_intvl; - writel(num_interrupts, &priv->wr_regs->rx_imax); - writel(num_interrupts, &priv->wr_regs->tx_imax); - - int_ctrl |= CPSW_INTPACEEN; - int_ctrl &= (~CPSW_INTPRESCALE_MASK); - int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK); - writel(int_ctrl, &priv->wr_regs->int_control); - - cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl); - if (priv->data.dual_emac) { - int i; - - for (i = 0; i < priv->data.slaves; i++) { - priv = netdev_priv(priv->slaves[i].ndev); - priv->coal_intvl = coal_intvl; - } - } else { - priv->coal_intvl = coal_intvl; - } - - return 0; -} - static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val) { static char *leader = "........................................"; @@ -741,54 +559,6 @@ static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val) leader + strlen(name), val); } -static int cpsw_common_res_usage_state(struct cpsw_priv *priv) -{ - u32 i; - u32 usage_count = 0; - - if (!priv->data.dual_emac) - return 0; - - for (i = 0; i < priv->data.slaves; i++) - if (priv->slaves[i].open_stat) - usage_count++; - - return usage_count; -} - -static inline int cpsw_tx_packet_submit(struct net_device *ndev, - struct cpsw_priv *priv, struct sk_buff *skb) -{ - if (!priv->data.dual_emac) - return cpdma_chan_submit(priv->txch, skb, skb->data, - skb->len, 0); - - if (ndev == cpsw_get_slave_ndev(priv, 0)) - return cpdma_chan_submit(priv->txch, skb, skb->data, - skb->len, 1); - else - return cpdma_chan_submit(priv->txch, skb, skb->data, - skb->len, 2); -} - -static inline void cpsw_add_dual_emac_def_ale_entries( - struct cpsw_priv *priv, struct cpsw_slave *slave, - u32 slave_port) -{ - u32 port_mask = 1 << slave_port | 1 << priv->host_port; - - if (priv->version == CPSW_VERSION_1) - slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN); - else - slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN); - cpsw_ale_add_vlan(priv->ale, slave->port_vlan, port_mask, - port_mask, port_mask, 0); - cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, - port_mask, ALE_VLAN, slave->port_vlan, 0); - cpsw_ale_add_ucast(priv->ale, priv->mac_addr, - priv->host_port, ALE_VLAN, slave->port_vlan); -} - static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) { char name[32]; @@ -818,11 +588,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) slave_port = cpsw_get_slave_port(priv, slave->slave_num); - if (priv->data.dual_emac) - cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port); - else - cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, - 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); + cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + 1 << slave_port, 0, ALE_MCAST_FWD_2); slave->phy = phy_connect(priv->ndev, slave->data->phy_id, &cpsw_adjust_link, 0, slave->data->phy_if); @@ -837,44 +604,14 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) } } -static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) -{ - const int vlan = priv->data.default_vlan; - const int port = priv->host_port; - u32 reg; - int i; - - reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : - CPSW2_PORT_VLAN; - - writel(vlan, &priv->host_port_regs->port_vlan); - - for (i = 0; i < priv->data.slaves; i++) - slave_write(priv->slaves + i, vlan, reg); - - cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port, - ALE_ALL_PORTS << port, ALE_ALL_PORTS << port, - (ALE_PORT_1 | ALE_PORT_2) << port); -} - static void cpsw_init_host_port(struct cpsw_priv *priv) { - u32 control_reg; - u32 fifo_mode; - /* soft reset the controller and initialize ale */ soft_reset("cpsw", &priv->regs->soft_reset); cpsw_ale_start(priv->ale); /* switch to vlan unaware mode */ - cpsw_ale_control_set(priv->ale, priv->host_port, ALE_VLAN_AWARE, - CPSW_ALE_VLAN_AWARE); - control_reg = readl(&priv->regs->control); - control_reg |= CPSW_VLAN_AWARE; - writel(control_reg, &priv->regs->control); - fifo_mode = (priv->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE : - CPSW_FIFO_NORMAL_MODE; - writel(fifo_mode, &priv->host_port_regs->tx_in_ctl); + cpsw_ale_control_set(priv->ale, 0, ALE_VLAN_AWARE, 0); /* setup host port priority mapping */ __raw_writel(CPDMA_TX_PRIORITY_MAP, @@ -884,32 +621,18 @@ static void cpsw_init_host_port(struct cpsw_priv *priv) cpsw_ale_control_set(priv->ale, priv->host_port, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); - if (!priv->data.dual_emac) { - cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, - 0, 0); - cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, - 1 << priv->host_port, 0, 0, ALE_MCAST_FWD_2); - } -} - -static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) -{ - if (!slave->phy) - return; - phy_stop(slave->phy); - phy_disconnect(slave->phy); - slave->phy = NULL; + cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, 0); + cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + 1 << priv->host_port, 0, ALE_MCAST_FWD_2); } static int cpsw_ndo_open(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_priv *prim_cpsw; int i, ret; u32 reg; - if (!cpsw_common_res_usage_state(priv)) - cpsw_intr_disable(priv); + cpsw_intr_disable(priv); netif_carrier_off(ndev); pm_runtime_get_sync(&priv->pdev->dev); @@ -921,81 +644,53 @@ static int cpsw_ndo_open(struct net_device *ndev) CPSW_RTL_VERSION(reg)); /* initialize host and slave ports */ - if (!cpsw_common_res_usage_state(priv)) - cpsw_init_host_port(priv); + cpsw_init_host_port(priv); for_each_slave(priv, cpsw_slave_open, priv); - /* Add default VLAN */ - if (!priv->data.dual_emac) - cpsw_add_default_vlan(priv); - - if (!cpsw_common_res_usage_state(priv)) { - /* setup tx dma to fixed prio and zero offset */ - cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1); - cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0); - - /* disable priority elevation */ - __raw_writel(0, &priv->regs->ptype); - - /* enable statistics collection only on all ports */ - __raw_writel(0x7, &priv->regs->stat_port_en); - - if (WARN_ON(!priv->data.rx_descs)) - priv->data.rx_descs = 128; - - for (i = 0; i < priv->data.rx_descs; i++) { - struct sk_buff *skb; - - ret = -ENOMEM; - skb = __netdev_alloc_skb_ip_align(priv->ndev, - priv->rx_packet_max, GFP_KERNEL); - if (!skb) - goto err_cleanup; - ret = cpdma_chan_submit(priv->rxch, skb, skb->data, - skb_tailroom(skb), 0); - if (ret < 0) { - kfree_skb(skb); - goto err_cleanup; - } - } - /* continue even if we didn't manage to submit all - * receive descs - */ - cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); - } + /* setup tx dma to fixed prio and zero offset */ + cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1); + cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0); - /* Enable Interrupt pacing if configured */ - if (priv->coal_intvl != 0) { - struct ethtool_coalesce coal; + /* disable priority elevation and enable statistics on all ports */ + __raw_writel(0, &priv->regs->ptype); - coal.rx_coalesce_usecs = (priv->coal_intvl << 4); - cpsw_set_coalesce(ndev, &coal); - } + /* enable statistics collection only on the host port */ + __raw_writel(0x7, &priv->regs->stat_port_en); - prim_cpsw = cpsw_get_slave_priv(priv, 0); - if (prim_cpsw->irq_enabled == false) { - if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) { - prim_cpsw->irq_enabled = true; - cpsw_enable_irq(prim_cpsw); - } + if (WARN_ON(!priv->data.rx_descs)) + priv->data.rx_descs = 128; + + for (i = 0; i < priv->data.rx_descs; i++) { + struct sk_buff *skb; + + ret = -ENOMEM; + skb = netdev_alloc_skb_ip_align(priv->ndev, + priv->rx_packet_max); + if (!skb) + break; + ret = cpdma_chan_submit(priv->rxch, skb, skb->data, + skb_tailroom(skb), GFP_KERNEL); + if (WARN_ON(ret < 0)) + break; } + /* continue even if we didn't manage to submit all receive descs */ + cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); cpdma_ctlr_start(priv->dma); cpsw_intr_enable(priv); napi_enable(&priv->napi); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); + cpdma_ctlr_eoi(priv->dma); - if (priv->data.dual_emac) - priv->slaves[priv->emac_port].open_stat = true; return 0; +} -err_cleanup: - cpdma_ctlr_stop(priv->dma); - for_each_slave(priv, cpsw_slave_stop, priv); - pm_runtime_put_sync(&priv->pdev->dev); - netif_carrier_off(priv->ndev); - return ret; +static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) +{ + if (!slave->phy) + return; + phy_stop(slave->phy); + phy_disconnect(slave->phy); + slave->phy = NULL; } static int cpsw_ndo_stop(struct net_device *ndev) @@ -1006,17 +701,12 @@ static int cpsw_ndo_stop(struct net_device *ndev) netif_stop_queue(priv->ndev); napi_disable(&priv->napi); netif_carrier_off(priv->ndev); - - if (cpsw_common_res_usage_state(priv) <= 1) { - cpsw_intr_disable(priv); - cpdma_ctlr_int_ctrl(priv->dma, false); - cpdma_ctlr_stop(priv->dma); - cpsw_ale_stop(priv->ale); - } + cpsw_intr_disable(priv); + cpdma_ctlr_int_ctrl(priv->dma, false); + cpdma_ctlr_stop(priv->dma); + cpsw_ale_stop(priv->ale); for_each_slave(priv, cpsw_slave_stop, priv); pm_runtime_put_sync(&priv->pdev->dev); - if (priv->data.dual_emac) - priv->slaves[priv->emac_port].open_stat = false; return 0; } @@ -1034,24 +724,18 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } - if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && - priv->cpts->tx_enable) + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && priv->cpts.tx_enable) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; skb_tx_timestamp(skb); - ret = cpsw_tx_packet_submit(ndev, priv, skb); + ret = cpdma_chan_submit(priv->txch, skb, skb->data, + skb->len, GFP_KERNEL); if (unlikely(ret != 0)) { cpsw_err(priv, tx_err, "desc submit failed\n"); goto fail; } - /* If there is no more tx desc left free then we need to - * tell the kernel to stop sending us tx frames. - */ - if (unlikely(!cpdma_check_free_tx_desc(priv->txch))) - netif_stop_queue(ndev); - return NETDEV_TX_OK; fail: priv->stats.tx_dropped++; @@ -1086,10 +770,10 @@ static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags) static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) { - struct cpsw_slave *slave = &priv->slaves[priv->data.active_slave]; + struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave]; u32 ts_en, seq_id; - if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) { + if (!priv->cpts.tx_enable && !priv->cpts.rx_enable) { slave_write(slave, 0, CPSW1_TS_CTL); return; } @@ -1097,10 +781,10 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588; ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS; - if (priv->cpts->tx_enable) + if (priv->cpts.tx_enable) ts_en |= CPSW_V1_TS_TX_EN; - if (priv->cpts->rx_enable) + if (priv->cpts.rx_enable) ts_en |= CPSW_V1_TS_RX_EN; slave_write(slave, ts_en, CPSW1_TS_CTL); @@ -1109,21 +793,16 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) { - struct cpsw_slave *slave; + struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave]; u32 ctrl, mtype; - if (priv->data.dual_emac) - slave = &priv->slaves[priv->emac_port]; - else - slave = &priv->slaves[priv->data.active_slave]; - ctrl = slave_read(slave, CPSW2_CONTROL); ctrl &= ~CTRL_ALL_TS_MASK; - if (priv->cpts->tx_enable) + if (priv->cpts.tx_enable) ctrl |= CTRL_TX_TS_BITS; - if (priv->cpts->rx_enable) + if (priv->cpts.rx_enable) ctrl |= CTRL_RX_TS_BITS; mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS; @@ -1136,7 +815,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) { struct cpsw_priv *priv = netdev_priv(dev); - struct cpts *cpts = priv->cpts; + struct cpts *cpts = &priv->cpts; struct hwtstamp_config cfg; if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) @@ -1200,26 +879,14 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { - struct cpsw_priv *priv = netdev_priv(dev); - struct mii_ioctl_data *data = if_mii(req); - int slave_no = cpsw_slave_index(priv); - if (!netif_running(dev)) return -EINVAL; - switch (cmd) { #ifdef CONFIG_TI_CPTS - case SIOCSHWTSTAMP: + if (cmd == SIOCSHWTSTAMP) return cpsw_hwtstamp_ioctl(dev, req); #endif - case SIOCGMIIPHY: - data->phy_id = priv->slaves[slave_no].phy->addr; - break; - default: - return -ENOTSUPP; - } - - return 0; + return -ENOTSUPP; } static void cpsw_ndo_tx_timeout(struct net_device *ndev) @@ -1234,9 +901,7 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev) cpdma_chan_start(priv->txch); cpdma_ctlr_int_ctrl(priv->dma, true); cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); - + cpdma_ctlr_eoi(priv->dma); } static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev) @@ -1255,79 +920,10 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev) cpsw_interrupt(ndev->irq, priv); cpdma_ctlr_int_ctrl(priv->dma, true); cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); - + cpdma_ctlr_eoi(priv->dma); } #endif -static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, - unsigned short vid) -{ - int ret; - - ret = cpsw_ale_add_vlan(priv->ale, vid, - ALE_ALL_PORTS << priv->host_port, - 0, ALE_ALL_PORTS << priv->host_port, - (ALE_PORT_1 | ALE_PORT_2) << priv->host_port); - if (ret != 0) - return ret; - - ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr, - priv->host_port, ALE_VLAN, vid); - if (ret != 0) - goto clean_vid; - - ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, - ALE_ALL_PORTS << priv->host_port, - ALE_VLAN, vid, 0); - if (ret != 0) - goto clean_vlan_ucast; - return 0; - -clean_vlan_ucast: - cpsw_ale_del_ucast(priv->ale, priv->mac_addr, - priv->host_port, ALE_VLAN, vid); -clean_vid: - cpsw_ale_del_vlan(priv->ale, vid, 0); - return ret; -} - -static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, - unsigned short vid) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - - if (vid == priv->data.default_vlan) - return 0; - - dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); - return cpsw_add_vlan_ale_entry(priv, vid); -} - -static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, - unsigned short vid) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - int ret; - - if (vid == priv->data.default_vlan) - return 0; - - dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); - ret = cpsw_ale_del_vlan(priv->ale, vid, 0); - if (ret != 0) - return ret; - - ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr, - priv->host_port, ALE_VLAN, vid); - if (ret != 0) - return ret; - - return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast, - 0, ALE_VLAN, vid); -} - static const struct net_device_ops cpsw_netdev_ops = { .ndo_open = cpsw_ndo_open, .ndo_stop = cpsw_ndo_stop, @@ -1342,18 +938,15 @@ static const struct net_device_ops cpsw_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cpsw_ndo_poll_controller, #endif - .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid, }; static void cpsw_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { struct cpsw_priv *priv = netdev_priv(ndev); - - strlcpy(info->driver, "TI CPSW Driver v1.0", sizeof(info->driver)); - strlcpy(info->version, "1.0", sizeof(info->version)); - strlcpy(info->bus_info, priv->pdev->name, sizeof(info->bus_info)); + strcpy(info->driver, "TI CPSW Driver v1.0"); + strcpy(info->version, "1.0"); + strcpy(info->bus_info, priv->pdev->name); } static u32 cpsw_get_msglevel(struct net_device *ndev) @@ -1381,7 +974,7 @@ static int cpsw_get_ts_info(struct net_device *ndev, SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->phc_index = priv->cpts->phc_index; + info->phc_index = priv->cpts.phc_index; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); @@ -1400,39 +993,12 @@ static int cpsw_get_ts_info(struct net_device *ndev, return 0; } -static int cpsw_get_settings(struct net_device *ndev, - struct ethtool_cmd *ecmd) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - int slave_no = cpsw_slave_index(priv); - - if (priv->slaves[slave_no].phy) - return phy_ethtool_gset(priv->slaves[slave_no].phy, ecmd); - else - return -EOPNOTSUPP; -} - -static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - int slave_no = cpsw_slave_index(priv); - - if (priv->slaves[slave_no].phy) - return phy_ethtool_sset(priv->slaves[slave_no].phy, ecmd); - else - return -EOPNOTSUPP; -} - static const struct ethtool_ops cpsw_ethtool_ops = { .get_drvinfo = cpsw_get_drvinfo, .get_msglevel = cpsw_get_msglevel, .set_msglevel = cpsw_set_msglevel, .get_link = ethtool_op_get_link, .get_ts_info = cpsw_get_ts_info, - .get_settings = cpsw_get_settings, - .set_settings = cpsw_set_settings, - .get_coalesce = cpsw_get_coalesce, - .set_coalesce = cpsw_set_coalesce, }; static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, @@ -1445,7 +1011,6 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, slave->data = data; slave->regs = regs + slave_reg_ofs; slave->sliver = regs + sliver_reg_ofs; - slave->port_vlan = data->dual_emac_res_vlan; } static int cpsw_probe_dt(struct cpsw_platform_data *data, @@ -1465,16 +1030,12 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, } data->slaves = prop; - if (of_property_read_u32(node, "active_slave", &prop)) { - pr_err("Missing active_slave property in the DT.\n"); - if (of_property_read_u32(node, "cpts_active_slave", &prop)) { - ret = -EINVAL; - goto error_ret; - } else { - pr_err("Using old cpts_active_slave as fallback.\n"); - } + if (of_property_read_u32(node, "cpts_active_slave", &prop)) { + pr_err("Missing cpts_active_slave property in the DT.\n"); + ret = -EINVAL; + goto error_ret; } - data->active_slave = prop; + data->cpts_active_slave = prop; if (of_property_read_u32(node, "cpts_clock_mult", &prop)) { pr_err("Missing cpts_clock_mult property in the DT.\n"); @@ -1490,10 +1051,12 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, } data->cpts_clock_shift = prop; - data->slave_data = kcalloc(data->slaves, sizeof(struct cpsw_slave_data), - GFP_KERNEL); - if (!data->slave_data) + data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) * + data->slaves, GFP_KERNEL); + if (!data->slave_data) { + pr_err("Could not allocate slave memory.\n"); return -EINVAL; + } if (of_property_read_u32(node, "cpdma_channels", &prop)) { pr_err("Missing cpdma_channels property in the DT.\n"); @@ -1530,9 +1093,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, } data->mac_control = prop; - if (!of_property_read_u32(node, "dual_emac", &prop)) - data->dual_emac = prop; - /* * Populate all the child nodes here... */ @@ -1551,7 +1111,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, struct platform_device *mdio; parp = of_get_property(slave_node, "phy_id", &lenp); - if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { + if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) { pr_err("Missing slave[%d] phy_id property\n", i); ret = -EINVAL; goto error_ret; @@ -1566,18 +1126,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, if (mac_addr) memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); - if (data->dual_emac) { - if (of_property_read_u32(slave_node, "dual_emac_res_vlan", - &prop)) { - pr_err("Missing dual_emac_res_vlan in DT.\n"); - slave_data->dual_emac_res_vlan = i+1; - pr_err("Using %d as Reserved VLAN for %d slave\n", - slave_data->dual_emac_res_vlan, i); - } else { - slave_data->dual_emac_res_vlan = prop; - } - } - i++; } @@ -1588,84 +1136,9 @@ error_ret: return ret; } -static int cpsw_probe_dual_emac(struct platform_device *pdev, - struct cpsw_priv *priv) -{ - struct cpsw_platform_data *data = &priv->data; - struct net_device *ndev; - struct cpsw_priv *priv_sl2; - int ret = 0, i; - - ndev = alloc_etherdev(sizeof(struct cpsw_priv)); - if (!ndev) { - pr_err("cpsw: error allocating net_device\n"); - return -ENOMEM; - } - - priv_sl2 = netdev_priv(ndev); - spin_lock_init(&priv_sl2->lock); - priv_sl2->data = *data; - priv_sl2->pdev = pdev; - priv_sl2->ndev = ndev; - priv_sl2->dev = &ndev->dev; - priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); - priv_sl2->rx_packet_max = max(rx_packet_max, 128); - - if (is_valid_ether_addr(data->slave_data[1].mac_addr)) { - memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr, - ETH_ALEN); - pr_info("cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr); - } else { - random_ether_addr(priv_sl2->mac_addr); - pr_info("cpsw: Random MACID = %pM\n", priv_sl2->mac_addr); - } - memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN); - - priv_sl2->slaves = priv->slaves; - priv_sl2->clk = priv->clk; - - priv_sl2->coal_intvl = 0; - priv_sl2->bus_freq_mhz = priv->bus_freq_mhz; - - priv_sl2->cpsw_res = priv->cpsw_res; - priv_sl2->regs = priv->regs; - priv_sl2->host_port = priv->host_port; - priv_sl2->host_port_regs = priv->host_port_regs; - priv_sl2->wr_regs = priv->wr_regs; - priv_sl2->dma = priv->dma; - priv_sl2->txch = priv->txch; - priv_sl2->rxch = priv->rxch; - priv_sl2->ale = priv->ale; - priv_sl2->emac_port = 1; - priv->slaves[1].ndev = ndev; - priv_sl2->cpts = priv->cpts; - priv_sl2->version = priv->version; - - for (i = 0; i < priv->num_irqs; i++) { - priv_sl2->irqs_table[i] = priv->irqs_table[i]; - priv_sl2->num_irqs = priv->num_irqs; - } - ndev->features |= NETIF_F_HW_VLAN_FILTER; - - ndev->netdev_ops = &cpsw_netdev_ops; - SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); - netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT); - - /* register the network device */ - SET_NETDEV_DEV(ndev, &pdev->dev); - ret = register_netdev(ndev); - if (ret) { - pr_err("cpsw: error registering net device\n"); - free_netdev(ndev); - ret = -ENODEV; - } - - return ret; -} - static int cpsw_probe(struct platform_device *pdev) { - struct cpsw_platform_data *data; + struct cpsw_platform_data *data = pdev->dev.platform_data; struct net_device *ndev; struct cpsw_priv *priv; struct cpdma_params dma_params; @@ -1689,11 +1162,6 @@ static int cpsw_probe(struct platform_device *pdev) priv->dev = &ndev->dev; priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); priv->rx_packet_max = max(rx_packet_max, 128); - priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); - if (!ndev) { - pr_err("error allocating cpts\n"); - goto clean_ndev_ret; - } /* * This may be required here for child devices. @@ -1726,17 +1194,12 @@ static int cpsw_probe(struct platform_device *pdev) for (i = 0; i < data->slaves; i++) priv->slaves[i].slave_num = i; - priv->slaves[0].ndev = ndev; - priv->emac_port = 0; - priv->clk = clk_get(&pdev->dev, "fck"); if (IS_ERR(priv->clk)) { dev_err(&pdev->dev, "fck is not found\n"); ret = -ENODEV; goto clean_slave_ret; } - priv->coal_intvl = 0; - priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000; priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!priv->cpsw_res) { @@ -1785,7 +1248,7 @@ static int cpsw_probe(struct platform_device *pdev) switch (priv->version) { case CPSW_VERSION_1: priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET; - priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET; + priv->cpts.reg = ss_regs + CPSW1_CPTS_OFFSET; dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET; dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET; ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET; @@ -1796,7 +1259,7 @@ static int cpsw_probe(struct platform_device *pdev) break; case CPSW_VERSION_2: priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET; - priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET; + priv->cpts.reg = ss_regs + CPSW2_CPTS_OFFSET; dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET; dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET; ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET; @@ -1878,13 +1341,12 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_ale_ret; } priv->irqs_table[k] = i; - priv->num_irqs = k + 1; + priv->num_irqs = k; } k++; } - priv->irq_enabled = true; - ndev->features |= NETIF_F_HW_VLAN_FILTER; + ndev->flags |= IFF_ALLMULTI; /* see cpsw_ndo_change_rx_flags() */ ndev->netdev_ops = &cpsw_netdev_ops; SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); @@ -1899,26 +1361,17 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_irq_ret; } - if (cpts_register(&pdev->dev, priv->cpts, + if (cpts_register(&pdev->dev, &priv->cpts, data->cpts_clock_mult, data->cpts_clock_shift)) dev_err(priv->dev, "error registering cpts device\n"); cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n", priv->cpsw_res->start, ndev->irq); - if (priv->data.dual_emac) { - ret = cpsw_probe_dual_emac(pdev, priv); - if (ret) { - cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); - goto clean_irq_ret; - } - } - return 0; clean_irq_ret: - for (i = 0; i < priv->num_irqs; i++) - free_irq(priv->irqs_table[i], priv); + free_irq(ndev->irq, priv); clean_ale_ret: cpsw_ale_destroy(priv->ale); clean_dma_ret: @@ -1941,8 +1394,7 @@ clean_slave_ret: pm_runtime_disable(&pdev->dev); kfree(priv->slaves); clean_ndev_ret: - kfree(priv->data.slave_data); - free_netdev(priv->ndev); + free_netdev(ndev); return ret; } @@ -1950,17 +1402,12 @@ static int cpsw_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct cpsw_priv *priv = netdev_priv(ndev); - int i; + pr_info("removing device"); platform_set_drvdata(pdev, NULL); - if (priv->data.dual_emac) - unregister_netdev(cpsw_get_slave_ndev(priv, 1)); - unregister_netdev(ndev); - - cpts_unregister(priv->cpts); - for (i = 0; i < priv->num_irqs; i++) - free_irq(priv->irqs_table[i], priv); + cpts_unregister(&priv->cpts); + free_irq(ndev->irq, priv); cpsw_ale_destroy(priv->ale); cpdma_chan_destroy(priv->txch); cpdma_chan_destroy(priv->rxch); @@ -1974,10 +1421,8 @@ static int cpsw_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); clk_put(priv->clk); kfree(priv->slaves); - kfree(priv->data.slave_data); - if (priv->data.dual_emac) - free_netdev(cpsw_get_slave_ndev(priv, 1)); free_netdev(ndev); + return 0; } @@ -2013,7 +1458,6 @@ static const struct of_device_id cpsw_of_mtable[] = { { .compatible = "ti,cpsw", }, { /* sentinel */ }, }; -MODULE_DEVICE_TABLE(of, cpsw_of_mtable); static struct platform_driver cpsw_driver = { .driver = { diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 7fa60d6..0e9ccc2 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -148,7 +148,7 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry) return idx; } -int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid) +static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr) { u32 ale_entry[ALE_ENTRY_WORDS]; int type, idx; @@ -160,8 +160,6 @@ int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid) type = cpsw_ale_get_entry_type(ale_entry); if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR) continue; - if (cpsw_ale_get_vlan_id(ale_entry) != vid) - continue; cpsw_ale_get_addr(ale_entry, entry_addr); if (memcmp(entry_addr, addr, 6) == 0) return idx; @@ -169,22 +167,6 @@ int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid) return -ENOENT; } -int cpsw_ale_match_vlan(struct cpsw_ale *ale, u16 vid) -{ - u32 ale_entry[ALE_ENTRY_WORDS]; - int type, idx; - - for (idx = 0; idx < ale->params.ale_entries; idx++) { - cpsw_ale_read(ale, idx, ale_entry); - type = cpsw_ale_get_entry_type(ale_entry); - if (type != ALE_TYPE_VLAN) - continue; - if (cpsw_ale_get_vlan_id(ale_entry) == vid) - return idx; - } - return -ENOENT; -} - static int cpsw_ale_match_free(struct cpsw_ale *ale) { u32 ale_entry[ALE_ENTRY_WORDS]; @@ -292,32 +274,19 @@ int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask) return 0; } -static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry, - int flags, u16 vid) -{ - if (flags & ALE_VLAN) { - cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN_ADDR); - cpsw_ale_set_vlan_id(ale_entry, vid); - } else { - cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR); - } -} - -int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, - int flags, u16 vid) +int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; int idx; - cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid); - + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR); cpsw_ale_set_addr(ale_entry, addr); cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT); cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0); cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0); cpsw_ale_set_port_num(ale_entry, port); - idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); + idx = cpsw_ale_match_addr(ale, addr); if (idx < 0) idx = cpsw_ale_match_free(ale); if (idx < 0) @@ -329,13 +298,12 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, return 0; } -int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, - int flags, u16 vid) +int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; int idx; - idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); + idx = cpsw_ale_match_addr(ale, addr); if (idx < 0) return -ENOENT; @@ -345,19 +313,18 @@ int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, } int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, - int flags, u16 vid, int mcast_state) + int super, int mcast_state) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; int idx, mask; - idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); + idx = cpsw_ale_match_addr(ale, addr); if (idx >= 0) cpsw_ale_read(ale, idx, ale_entry); - cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid); - + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR); cpsw_ale_set_addr(ale_entry, addr); - cpsw_ale_set_super(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0); + cpsw_ale_set_super(ale_entry, super); cpsw_ale_set_mcast_state(ale_entry, mcast_state); mask = cpsw_ale_get_port_mask(ale_entry); @@ -375,13 +342,12 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, return 0; } -int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, - int flags, u16 vid) +int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; int idx; - idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); + idx = cpsw_ale_match_addr(ale, addr); if (idx < 0) return -EINVAL; @@ -396,55 +362,6 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, return 0; } -int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, - int reg_mcast, int unreg_mcast) -{ - u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; - int idx; - - idx = cpsw_ale_match_vlan(ale, vid); - if (idx >= 0) - cpsw_ale_read(ale, idx, ale_entry); - - cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN); - cpsw_ale_set_vlan_id(ale_entry, vid); - - cpsw_ale_set_vlan_untag_force(ale_entry, untag); - cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast); - cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast); - cpsw_ale_set_vlan_member_list(ale_entry, port); - - if (idx < 0) - idx = cpsw_ale_match_free(ale); - if (idx < 0) - idx = cpsw_ale_find_ageable(ale); - if (idx < 0) - return -ENOMEM; - - cpsw_ale_write(ale, idx, ale_entry); - return 0; -} - -int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) -{ - u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; - int idx; - - idx = cpsw_ale_match_vlan(ale, vid); - if (idx < 0) - return -ENOENT; - - cpsw_ale_read(ale, idx, ale_entry); - - if (port_mask) - cpsw_ale_set_vlan_member_list(ale_entry, port_mask); - else - cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); - - cpsw_ale_write(ale, idx, ale_entry); - return 0; -} - struct ale_control_info { const char *name; int offset, port_offset; diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index 30daa12..2bd09cb 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h @@ -64,14 +64,8 @@ enum cpsw_ale_port_state { }; /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */ -#define ALE_SECURE BIT(0) -#define ALE_BLOCKED BIT(1) -#define ALE_SUPER BIT(2) -#define ALE_VLAN BIT(3) - -#define ALE_PORT_HOST BIT(0) -#define ALE_PORT_1 BIT(1) -#define ALE_PORT_2 BIT(2) +#define ALE_SECURE 1 +#define ALE_BLOCKED 2 #define ALE_MCAST_FWD 0 #define ALE_MCAST_BLOCK_LEARN_FWD 1 @@ -87,17 +81,11 @@ void cpsw_ale_stop(struct cpsw_ale *ale); int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout); int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask); int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask); -int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, - int flags, u16 vid); -int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, - int flags, u16 vid); +int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags); +int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port); int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, - int flags, u16 vid, int mcast_state); -int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, - int flags, u16 vid); -int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, - int reg_mcast, int unreg_mcast); -int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port); + int super, int mcast_state); +int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask); int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control); int cpsw_ale_control_set(struct cpsw_ale *ale, int port, diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 49dfd59..4995673 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -20,7 +20,6 @@ #include #include #include -#include #include "davinci_cpdma.h" @@ -61,9 +60,6 @@ #define CPDMA_DESC_EOQ BIT(28) #define CPDMA_DESC_TD_COMPLETE BIT(27) #define CPDMA_DESC_PASS_CRC BIT(26) -#define CPDMA_DESC_TO_PORT_EN BIT(20) -#define CPDMA_TO_PORT_SHIFT 16 -#define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16)) #define CPDMA_TEARDOWN_VALUE 0xfffffffc @@ -109,13 +105,13 @@ struct cpdma_ctlr { }; struct cpdma_chan { - struct cpdma_desc __iomem *head, *tail; - void __iomem *hdp, *cp, *rxfree; enum cpdma_state state; struct cpdma_ctlr *ctlr; int chan_num; spinlock_t lock; + struct cpdma_desc __iomem *head, *tail; int count; + void __iomem *hdp, *cp, *rxfree; u32 mask; cpdma_handler_fn handler; enum dma_data_direction dir; @@ -136,14 +132,6 @@ struct cpdma_chan { #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld) #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld) -#define cpdma_desc_to_port(chan, mode, directed) \ - do { \ - if (!is_rx_chan(chan) && ((directed == 1) || \ - (directed == 2))) \ - mode |= (CPDMA_DESC_TO_PORT_EN | \ - (directed << CPDMA_TO_PORT_SHIFT)); \ - } while (0) - /* * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci * emac) have dedicated on-chip memory for these descriptors. Some other @@ -229,27 +217,17 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) } static struct cpdma_desc __iomem * -cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx) +cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc) { unsigned long flags; int index; - int desc_start; - int desc_end; struct cpdma_desc __iomem *desc = NULL; spin_lock_irqsave(&pool->lock, flags); - if (is_rx) { - desc_start = 0; - desc_end = pool->num_desc/2; - } else { - desc_start = pool->num_desc/2; - desc_end = pool->num_desc; - } - - index = bitmap_find_next_zero_area(pool->bitmap, - desc_end, desc_start, num_desc, 0); - if (index < desc_end) { + index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0, + num_desc, 0); + if (index < pool->num_desc) { bitmap_set(pool->bitmap, index, num_desc); desc = pool->iomap + pool->desc_size * index; pool->used_desc++; @@ -313,16 +291,14 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) } if (ctlr->params.has_soft_reset) { - unsigned timeout = 10 * 100; + unsigned long timeout = jiffies + HZ/10; dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); - while (timeout) { + while (time_before(jiffies, timeout)) { if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) break; - udelay(10); - timeout--; } - WARN_ON(!timeout); + WARN_ON(!time_before(jiffies, timeout)); } for (i = 0; i < ctlr->num_chan; i++) { @@ -463,8 +439,10 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) if (ctlr->state != CPDMA_STATE_IDLE) cpdma_ctlr_stop(ctlr); - for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) - cpdma_chan_destroy(ctlr->channels[i]); + for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { + if (ctlr->channels[i]) + cpdma_chan_destroy(ctlr->channels[i]); + } cpdma_desc_pool_destroy(ctlr->pool); spin_unlock_irqrestore(&ctlr->lock, flags); @@ -495,13 +473,11 @@ int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } -EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl); -void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) +void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr) { - dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value); + dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0); } -EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi); struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, cpdma_handler_fn handler) @@ -676,7 +652,7 @@ static void __cpdma_chan_submit(struct cpdma_chan *chan, } int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, - int len, int directed) + int len, gfp_t gfp_mask) { struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc __iomem *desc; @@ -692,7 +668,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, goto unlock_ret; } - desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan)); + desc = cpdma_desc_alloc(ctlr->pool, 1); if (!desc) { chan->stats.desc_alloc_fail++; ret = -ENOMEM; @@ -706,7 +682,6 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, buffer = dma_map_single(ctlr->dev, data, len, chan->dir); mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; - cpdma_desc_to_port(chan, mode, directed); desc_write(desc, hw_next, 0); desc_write(desc, hw_buffer, buffer); @@ -729,29 +704,6 @@ unlock_ret: } EXPORT_SYMBOL_GPL(cpdma_chan_submit); -bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) -{ - unsigned long flags; - int index; - bool ret; - struct cpdma_ctlr *ctlr = chan->ctlr; - struct cpdma_desc_pool *pool = ctlr->pool; - - spin_lock_irqsave(&pool->lock, flags); - - index = bitmap_find_next_zero_area(pool->bitmap, - pool->num_desc, pool->num_desc/2, 1, 0); - - if (index < pool->num_desc) - ret = true; - else - ret = false; - - spin_unlock_irqrestore(&pool->lock, flags); - return ret; -} -EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); - static void __cpdma_chan_free(struct cpdma_chan *chan, struct cpdma_desc __iomem *desc, int outlen, int status) @@ -776,7 +728,6 @@ static int __cpdma_chan_process(struct cpdma_chan *chan) struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc __iomem *desc; int status, outlen; - int cb_status = 0; struct cpdma_desc_pool *pool = ctlr->pool; dma_addr_t desc_dma; unsigned long flags; @@ -798,8 +749,7 @@ static int __cpdma_chan_process(struct cpdma_chan *chan) status = -EBUSY; goto unlock_ret; } - status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE | - CPDMA_DESC_PORT_MASK); + status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE); chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); chan_write(chan, cp, desc_dma); @@ -812,12 +762,8 @@ static int __cpdma_chan_process(struct cpdma_chan *chan) } spin_unlock_irqrestore(&chan->lock, flags); - if (unlikely(status & CPDMA_DESC_TD_COMPLETE)) - cb_status = -ENOSYS; - else - cb_status = status; - __cpdma_chan_free(chan, desc, outlen, cb_status); + __cpdma_chan_free(chan, desc, outlen, status); return status; unlock_ret: @@ -876,7 +822,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan) struct cpdma_desc_pool *pool = ctlr->pool; unsigned long flags; int ret; - unsigned timeout; + unsigned long timeout; spin_lock_irqsave(&chan->lock, flags); if (chan->state != CPDMA_STATE_ACTIVE) { @@ -891,15 +837,14 @@ int cpdma_chan_stop(struct cpdma_chan *chan) dma_reg_write(ctlr, chan->td, chan_linear(chan)); /* wait for teardown complete */ - timeout = 100 * 100; /* 100 ms */ - while (timeout) { + timeout = jiffies + HZ/10; /* 100 msec */ + while (time_before(jiffies, timeout)) { u32 cp = chan_read(chan, cp); if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE) break; - udelay(10); - timeout--; + cpu_relax(); } - WARN_ON(!timeout); + WARN_ON(!time_before(jiffies, timeout)); chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); /* handle completed packets */ @@ -1039,6 +984,3 @@ unlock_ret: spin_unlock_irqrestore(&ctlr->lock, flags); return ret; } -EXPORT_SYMBOL_GPL(cpdma_control_set); - -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h index 86dee48..afa19a0 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.h +++ b/drivers/net/ethernet/ti/davinci_cpdma.h @@ -24,13 +24,6 @@ #define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1)) #define chan_linear(chan) __chan_linear((chan)->chan_num) -#define CPDMA_RX_SOURCE_PORT(__status__) ((__status__ >> 16) & 0x7) - -#define CPDMA_EOI_RX_THRESH 0x0 -#define CPDMA_EOI_RX 0x1 -#define CPDMA_EOI_TX 0x2 -#define CPDMA_EOI_MISC 0x3 - struct cpdma_params { struct device *dev; void __iomem *dmaregs; @@ -89,13 +82,12 @@ int cpdma_chan_dump(struct cpdma_chan *chan); int cpdma_chan_get_stats(struct cpdma_chan *chan, struct cpdma_chan_stats *stats); int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, - int len, int directed); + int len, gfp_t gfp_mask); int cpdma_chan_process(struct cpdma_chan *chan, int quota); int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable); -void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value); +void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr); int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable); -bool cpdma_check_free_tx_desc(struct cpdma_chan *chan); enum cpdma_control { CPDMA_CMD_IDLE, /* write-only */ diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 5aa9e4d..2a3e2c5 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -120,6 +120,7 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; #define EMAC_DEF_TX_CH (0) /* Default 0th channel */ #define EMAC_DEF_RX_CH (0) /* Default 0th channel */ #define EMAC_DEF_RX_NUM_DESC (128) +#define EMAC_DEF_TX_NUM_DESC (128) #define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */ #define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */ #define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */ @@ -341,6 +342,7 @@ struct emac_priv { u32 mac_hash2; u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS]; u32 rx_addr_type; + atomic_t cur_tx; const char *phy_id; #ifdef CONFIG_OF struct device_node *phy_node; @@ -478,8 +480,8 @@ static void emac_dump_regs(struct emac_priv *priv) static void emac_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { - strlcpy(info->driver, emac_version_string, sizeof(info->driver)); - strlcpy(info->version, EMAC_MODULE_VERSION, sizeof(info->version)); + strcpy(info->driver, emac_version_string); + strcpy(info->version, EMAC_MODULE_VERSION); } /** @@ -1037,7 +1039,7 @@ static void emac_rx_handler(void *token, int len, int status) recycle: ret = cpdma_chan_submit(priv->rxchan, skb, skb->data, - skb_tailroom(skb), 0); + skb_tailroom(skb), GFP_KERNEL); WARN_ON(ret == -ENOMEM); if (unlikely(ret < 0)) @@ -1048,12 +1050,12 @@ static void emac_tx_handler(void *token, int len, int status) { struct sk_buff *skb = token; struct net_device *ndev = skb->dev; + struct emac_priv *priv = netdev_priv(ndev); + + atomic_dec(&priv->cur_tx); - /* Check whether the queue is stopped due to stalled tx dma, if the - * queue is stopped then start the queue as we have free desc for tx - */ if (unlikely(netif_queue_stopped(ndev))) - netif_wake_queue(ndev); + netif_start_queue(ndev); ndev->stats.tx_packets++; ndev->stats.tx_bytes += len; dev_kfree_skb_any(skb); @@ -1092,17 +1094,14 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev) skb_tx_timestamp(skb); ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len, - 0); + GFP_KERNEL); if (unlikely(ret_code != 0)) { if (netif_msg_tx_err(priv) && net_ratelimit()) dev_err(emac_dev, "DaVinci EMAC: desc submit failed"); goto fail_tx; } - /* If there is no more tx desc left free then we need to - * tell the kernel to stop sending us tx frames. - */ - if (unlikely(!cpdma_check_free_tx_desc(priv->txchan))) + if (atomic_inc_return(&priv->cur_tx) >= EMAC_DEF_TX_NUM_DESC) netif_stop_queue(ndev); return NETDEV_TX_OK; @@ -1265,6 +1264,7 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr) /* Store mac addr in priv and rx channel and set it in EMAC hw */ memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len); memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len); + ndev->addr_assign_type &= ~NET_ADDR_RANDOM; /* MAC address is configured only after the interface is enabled. */ if (netif_running(ndev)) { @@ -1438,7 +1438,7 @@ static int emac_poll(struct napi_struct *napi, int budget) * Polled functionality used by netconsole and others in non interrupt mode * */ -static void emac_poll_controller(struct net_device *ndev) +void emac_poll_controller(struct net_device *ndev) { struct emac_priv *priv = netdev_priv(ndev); @@ -1558,7 +1558,7 @@ static int emac_dev_open(struct net_device *ndev) break; ret = cpdma_chan_submit(priv->rxchan, skb, skb->data, - skb_tailroom(skb), 0); + skb_tailroom(skb), GFP_KERNEL); if (WARN_ON(ret < 0)) break; } @@ -1865,18 +1865,21 @@ static int davinci_emac_probe(struct platform_device *pdev) /* obtain emac clock from kernel */ - emac_clk = devm_clk_get(&pdev->dev, NULL); + emac_clk = clk_get(&pdev->dev, NULL); if (IS_ERR(emac_clk)) { dev_err(&pdev->dev, "failed to get EMAC clock\n"); return -EBUSY; } emac_bus_frequency = clk_get_rate(emac_clk); + clk_put(emac_clk); /* TODO: Probe PHY here if possible */ ndev = alloc_etherdev(sizeof(struct emac_priv)); - if (!ndev) - return -ENOMEM; + if (!ndev) { + rc = -ENOMEM; + goto no_ndev; + } platform_set_drvdata(pdev, ndev); priv = netdev_priv(ndev); @@ -1890,7 +1893,7 @@ static int davinci_emac_probe(struct platform_device *pdev) if (!pdata) { dev_err(&pdev->dev, "no platform data\n"); rc = -ENODEV; - goto no_pdata; + goto probe_quit; } /* MAC addr and PHY mask , RMII enable info from platform_data */ @@ -1910,23 +1913,23 @@ static int davinci_emac_probe(struct platform_device *pdev) if (!res) { dev_err(&pdev->dev,"error getting res\n"); rc = -ENOENT; - goto no_pdata; + goto probe_quit; } priv->emac_base_phys = res->start + pdata->ctrl_reg_offset; size = resource_size(res); - if (!devm_request_mem_region(&pdev->dev, res->start, - size, ndev->name)) { + if (!request_mem_region(res->start, size, ndev->name)) { dev_err(&pdev->dev, "failed request_mem_region() for regs\n"); rc = -ENXIO; - goto no_pdata; + goto probe_quit; } - priv->remap_addr = devm_ioremap(&pdev->dev, res->start, size); + priv->remap_addr = ioremap(res->start, size); if (!priv->remap_addr) { dev_err(&pdev->dev, "unable to map IO\n"); rc = -ENOMEM; - goto no_pdata; + release_mem_region(res->start, size); + goto probe_quit; } priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset; ndev->base_addr = (unsigned long)priv->remap_addr; @@ -1959,7 +1962,7 @@ static int davinci_emac_probe(struct platform_device *pdev) if (!priv->dma) { dev_err(&pdev->dev, "error initializing DMA\n"); rc = -ENOMEM; - goto no_pdata; + goto no_dma; } priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH), @@ -1968,14 +1971,14 @@ static int davinci_emac_probe(struct platform_device *pdev) emac_rx_handler); if (WARN_ON(!priv->txchan || !priv->rxchan)) { rc = -ENOMEM; - goto no_cpdma_chan; + goto no_irq_res; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(&pdev->dev, "error getting irq res\n"); rc = -ENOENT; - goto no_cpdma_chan; + goto no_irq_res; } ndev->irq = res->start; @@ -1997,7 +2000,7 @@ static int davinci_emac_probe(struct platform_device *pdev) if (rc) { dev_err(&pdev->dev, "error in register_netdev\n"); rc = -ENODEV; - goto no_cpdma_chan; + goto no_irq_res; } @@ -2012,14 +2015,20 @@ static int davinci_emac_probe(struct platform_device *pdev) return 0; -no_cpdma_chan: +no_irq_res: if (priv->txchan) cpdma_chan_destroy(priv->txchan); if (priv->rxchan) cpdma_chan_destroy(priv->rxchan); cpdma_ctlr_destroy(priv->dma); -no_pdata: +no_dma: + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, resource_size(res)); + iounmap(priv->remap_addr); + +probe_quit: free_netdev(ndev); +no_ndev: return rc; } @@ -2032,12 +2041,14 @@ no_pdata: */ static int davinci_emac_remove(struct platform_device *pdev) { + struct resource *res; struct net_device *ndev = platform_get_drvdata(pdev); struct emac_priv *priv = netdev_priv(ndev); dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n"); platform_set_drvdata(pdev, NULL); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (priv->txchan) cpdma_chan_destroy(priv->txchan); @@ -2045,7 +2056,10 @@ static int davinci_emac_remove(struct platform_device *pdev) cpdma_chan_destroy(priv->rxchan); cpdma_ctlr_destroy(priv->dma); + release_mem_region(res->start, resource_size(res)); + unregister_netdev(ndev); + iounmap(priv->remap_addr); free_netdev(ndev); return 0; diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 12aec17..cca2550 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -320,8 +320,10 @@ static int davinci_mdio_probe(struct platform_device *pdev) int ret, addr; data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) + if (!data) { + dev_err(dev, "failed to alloc device data\n"); return -ENOMEM; + } data->bus = mdiobus_alloc(); if (!data->bus) { @@ -485,7 +487,6 @@ static const struct of_device_id davinci_mdio_of_mtable[] = { { .compatible = "ti,davinci_mdio", }, { /* sentinel */ }, }; -MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable); static struct platform_driver davinci_mdio_driver = { .driver = { diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 60c400f..2272538 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -320,7 +320,6 @@ static void tlan_remove_one(struct pci_dev *pdev) free_netdev(dev); pci_set_drvdata(pdev, NULL); - cancel_work_sync(&priv->tlan_tqueue); } static void tlan_start(struct net_device *dev) @@ -1912,8 +1911,10 @@ static void tlan_reset_lists(struct net_device *dev) list->frame_size = TLAN_MAX_FRAME_SIZE; list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5); - if (!skb) + if (!skb) { + netdev_err(dev, "Out of memory for received data\n"); break; + } list->buffer[0].address = pci_map_single(priv->pci_dev, skb->data, diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index e5cb723..d3fb97d 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -628,7 +628,6 @@ void macvlan_common_setup(struct net_device *dev) ether_setup(dev); dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); - dev->priv_flags |= IFF_UNICAST_FLT; dev->netdev_ops = &macvlan_netdev_ops; dev->destructor = free_netdev; dev->header_ops = &macvlan_hard_header_ops, diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 21a942c..6989ebe 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -630,7 +630,6 @@ static int netconsole_netdev_event(struct notifier_block *this, goto done; spin_lock_irqsave(&target_list_lock, flags); -restart: list_for_each_entry(nt, &target_list, list) { netconsole_target_get(nt); if (nt->np.dev == dev) { @@ -643,17 +642,15 @@ restart: case NETDEV_UNREGISTER: /* * rtnl_lock already held - * we might sleep in __netpoll_cleanup() */ - spin_unlock_irqrestore(&target_list_lock, flags); - __netpoll_cleanup(&nt->np); - spin_lock_irqsave(&target_list_lock, flags); - dev_put(nt->np.dev); - nt->np.dev = NULL; + if (nt->np.dev) { + __netpoll_cleanup(&nt->np); + dev_put(nt->np.dev); + nt->np.dev = NULL; + } nt->enabled = 0; stopped = true; - netconsole_target_put(nt); - goto restart; + break; } } netconsole_target_put(nt); diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 508570e..0b2706a 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1058,15 +1058,7 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) return stats64; } -static struct lock_class_key ppp_tx_busylock; -static int ppp_dev_init(struct net_device *dev) -{ - dev->qdisc_tx_busylock = &ppp_tx_busylock; - return 0; -} - static const struct net_device_ops ppp_netdev_ops = { - .ndo_init = ppp_dev_init, .ndo_start_xmit = ppp_start_xmit, .ndo_do_ioctl = ppp_net_ioctl, .ndo_get_stats64 = ppp_get_stats64, diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index 7ae06a7..d8b9b1e 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c @@ -174,7 +174,11 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev) unsigned long flags; int add_num = 1; - spin_lock_irqsave(&rnet->tx_lock, flags); + local_irq_save(flags); + if (!spin_trylock(&rnet->tx_lock)) { + local_irq_restore(flags); + return NETDEV_TX_LOCKED; + } if (is_multicast_ether_addr(eth->h_dest)) add_num = nets[rnet->mport->id].nact; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 8efe47a..ad86660 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1139,8 +1139,6 @@ static int team_port_del(struct team *team, struct net_device *port_dev) netdev_set_master(port_dev, NULL); team_port_disable_netpoll(port); vlan_vids_del_by_dev(port_dev, dev); - dev_uc_unsync(port_dev, dev); - dev_mc_unsync(port_dev, dev); dev_close(port_dev); team_port_leave(team, port); team_port_set_orig_dev_addr(port); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index cb95fe5..2917a86 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -748,8 +748,6 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) goto drop; skb_orphan(skb); - nf_reset(skb); - /* Enqueue packet */ skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb); diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 6bd9167..248d2dc 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -68,9 +68,18 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf) struct cdc_ncm_ctx *ctx; struct usb_driver *subdriver = ERR_PTR(-ENODEV); int ret = -ENODEV; - u8 data_altsetting = cdc_ncm_select_altsetting(dev, intf); + u8 data_altsetting = CDC_NCM_DATA_ALTSETTING_NCM; struct cdc_mbim_state *info = (void *)&dev->data; + /* see if interface supports MBIM alternate setting */ + if (intf->num_altsetting == 2) { + if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) + usb_set_interface(dev->udev, + intf->cur_altsetting->desc.bInterfaceNumber, + CDC_NCM_COMM_ALTSETTING_MBIM); + data_altsetting = CDC_NCM_DATA_ALTSETTING_MBIM; + } + /* Probably NCM, defer for cdc_ncm_bind */ if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) goto err; @@ -134,7 +143,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb goto error; if (skb) { - if (skb->len <= ETH_HLEN) + if (skb->len <= sizeof(ETH_HLEN)) goto error; /* mapping VLANs to MBIM sessions: diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 70fb846..00d3b2d 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -55,14 +55,6 @@ #define DRIVER_VERSION "14-Mar-2012" -#if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM) -static bool prefer_mbim = true; -#else -static bool prefer_mbim; -#endif -module_param(prefer_mbim, bool, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(prefer_mbim, "Prefer MBIM setting on dual NCM/MBIM functions"); - static void cdc_ncm_txpath_bh(unsigned long param); static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx); static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer); @@ -558,12 +550,9 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf) } EXPORT_SYMBOL_GPL(cdc_ncm_unbind); -/* Select the MBIM altsetting iff it is preferred and available, - * returning the number of the corresponding data interface altsetting - */ -u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf) +static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) { - struct usb_host_interface *alt; + int ret; /* The MBIM spec defines a NCM compatible default altsetting, * which we may have matched: @@ -579,27 +568,18 @@ u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf) * endpoint descriptors, shall be constructed according to * the rules given in section 6 (USB Device Model) of this * specification." + * + * Do not bind to such interfaces, allowing cdc_mbim to handle + * them */ - if (prefer_mbim && intf->num_altsetting == 2) { - alt = usb_altnum_to_altsetting(intf, CDC_NCM_COMM_ALTSETTING_MBIM); - if (alt && cdc_ncm_comm_intf_is_mbim(alt) && - !usb_set_interface(dev->udev, - intf->cur_altsetting->desc.bInterfaceNumber, - CDC_NCM_COMM_ALTSETTING_MBIM)) - return CDC_NCM_DATA_ALTSETTING_MBIM; - } - return CDC_NCM_DATA_ALTSETTING_NCM; -} -EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting); - -static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) -{ - int ret; - - /* MBIM backwards compatible function? */ - cdc_ncm_select_altsetting(dev, intf); - if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) +#if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM) + if ((intf->num_altsetting == 2) && + !usb_set_interface(dev->udev, + intf->cur_altsetting->desc.bInterfaceNumber, + CDC_NCM_COMM_ALTSETTING_MBIM) && + cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) return -ENODEV; +#endif /* NCM data altsetting is always 1 */ ret = cdc_ncm_bind_common(dev, intf, 1); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index b69ca0f..19d9035 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -139,9 +139,16 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state))); - /* set up initial state */ - info->control = intf; - info->data = intf; + /* control and data is shared? */ + if (intf->cur_altsetting->desc.bNumEndpoints == 3) { + info->control = intf; + info->data = intf; + goto shared; + } + + /* else require a single interrupt status endpoint on control intf */ + if (intf->cur_altsetting->desc.bNumEndpoints != 1) + goto err; /* and a number of CDC descriptors */ while (len > 3) { @@ -200,14 +207,25 @@ next_desc: buf += h->bLength; } - /* Use separate control and data interfaces if we found a CDC Union */ - if (cdc_union) { - info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0); - if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 || !info->data) { - dev_err(&intf->dev, "bogus CDC Union: master=%u, slave=%u\n", - cdc_union->bMasterInterface0, cdc_union->bSlaveInterface0); - goto err; - } + /* did we find all the required ones? */ + if (!(found & (1 << USB_CDC_HEADER_TYPE)) || + !(found & (1 << USB_CDC_UNION_TYPE))) { + dev_err(&intf->dev, "CDC functional descriptors missing\n"); + goto err; + } + + /* verify CDC Union */ + if (desc->bInterfaceNumber != cdc_union->bMasterInterface0) { + dev_err(&intf->dev, "bogus CDC Union: master=%u\n", cdc_union->bMasterInterface0); + goto err; + } + + /* need to save these for unbind */ + info->control = intf; + info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0); + if (!info->data) { + dev_err(&intf->dev, "bogus CDC Union: slave=%u\n", cdc_union->bSlaveInterface0); + goto err; } /* errors aren't fatal - we can live with the dynamic address */ @@ -217,12 +235,11 @@ next_desc: } /* claim data interface and set it up */ - if (info->control != info->data) { - status = usb_driver_claim_interface(driver, info->data, dev); - if (status < 0) - goto err; - } + status = usb_driver_claim_interface(driver, info->data, dev); + if (status < 0) + goto err; +shared: status = qmi_wwan_register_subdriver(dev); if (status < 0 && info->control != info->data) { usb_set_intfdata(info->data, NULL); diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 937c09d..251a335 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -914,12 +914,8 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size) static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu) { struct usbnet *dev = netdev_priv(netdev); - int ret; - - if (new_mtu > MAX_SINGLE_PACKET_SIZE) - return -EINVAL; - ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN); + int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu); if (ret < 0) { netdev_warn(dev->net, "Failed to set mac rx frame length\n"); return ret; @@ -1328,7 +1324,7 @@ static int smsc75xx_reset(struct usbnet *dev) netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf); - ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN); + ret = smsc75xx_set_rx_max_frame_length(dev, 1514); if (ret < 0) { netdev_warn(dev->net, "Failed to set max rx frame length\n"); return ret; @@ -2140,8 +2136,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT)) dev->net->stats.rx_frame_errors++; } else { - /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */ - if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) { + /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */ + if (unlikely(size > (ETH_FRAME_LEN + 12))) { netif_dbg(dev, rx_err, dev->net, "size err rx_cmd_a=0x%08x\n", rx_cmd_a); diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 6214181..9b73670 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1340,8 +1340,6 @@ static int smsc95xx_enter_suspend0(struct usbnet *dev) ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val); if (ret < 0) netdev_warn(dev->net, "Error reading PM_CTRL\n"); - else - ret = 0; return ret; } @@ -1394,8 +1392,6 @@ static int smsc95xx_enter_suspend1(struct usbnet *dev) ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val); if (ret < 0) netdev_warn(dev->net, "Error writing PM_CTRL\n"); - else - ret = 0; return ret; } @@ -1417,8 +1413,6 @@ static int smsc95xx_enter_suspend2(struct usbnet *dev) ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val); if (ret < 0) netdev_warn(dev->net, "Error writing PM_CTRL\n"); - else - ret = 0; return ret; } diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 6993bfa..656230e 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1491,15 +1491,6 @@ static __net_init int vxlan_init_net(struct net *net) static __net_exit void vxlan_exit_net(struct net *net) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); - struct vxlan_dev *vxlan; - struct hlist_node *pos; - unsigned h; - - rtnl_lock(); - for (h = 0; h < VNI_HASH_SIZE; ++h) - hlist_for_each_entry(vxlan, pos, &vn->vni_list[h], hlist) - dev_close(vxlan->dev); - rtnl_unlock(); if (vn->sock) { sk_release_kernel(vn->sock->sk); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index e99f481..56317b0 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -976,7 +976,6 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah, AR_PHY_CL_TAB_1, AR_PHY_CL_TAB_2 }; - /* Use chip chainmask only for calibration */ ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask); if (rtt) { @@ -1132,9 +1131,6 @@ skip_tx_iqcal: ar9003_hw_rtt_disable(ah); } - /* Revert chainmask to runtime parameters */ - ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); - /* Initialize list pointers */ ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; ah->supp_cals = IQ_MISMATCH_CAL; diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h index c00c13a..6e1915a 100644 --- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h @@ -519,7 +519,7 @@ static const u32 ar9580_1p0_mac_core[][2] = { {0x00008258, 0x00000000}, {0x0000825c, 0x40000000}, {0x00008260, 0x00080922}, - {0x00008264, 0x9d400010}, + {0x00008264, 0x9bc00010}, {0x00008268, 0xffffffff}, {0x0000826c, 0x0000ffff}, {0x00008270, 0x00000000}, diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h index 050ca4a..5f845be 100644 --- a/drivers/net/wireless/ath/ath9k/common.h +++ b/drivers/net/wireless/ath/ath9k/common.h @@ -27,7 +27,7 @@ #define WME_MAX_BA WME_BA_BMP_SIZE #define ATH_TID_MAX_BUFS (2 * WME_MAX_BA) -#define ATH_RSSI_DUMMY_MARKER 127 +#define ATH_RSSI_DUMMY_MARKER 0x127 #define ATH_RSSI_LPF_LEN 10 #define RSSI_LPF_THRESHOLD -20 #define ATH_RSSI_EP_MULTIPLIER (1<<7) diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h index d3b099d..96bfb18 100644 --- a/drivers/net/wireless/ath/ath9k/htc.h +++ b/drivers/net/wireless/ath/ath9k/htc.h @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 0663653..05d5ba6 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -796,7 +796,7 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv) * required version. */ if (priv->fw_version_major != MAJOR_VERSION_REQ || - priv->fw_version_minor < MINOR_VERSION_REQ) { + priv->fw_version_minor != MINOR_VERSION_REQ) { dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n", MAJOR_VERSION_REQ, MINOR_VERSION_REQ); return -EINVAL; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 8788621..b6a5a08 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -1067,19 +1067,15 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, last_rssi = priv->rx.last_rssi; - if (ieee80211_is_beacon(hdr->frame_control) && - !is_zero_ether_addr(common->curbssid) && - ether_addr_equal(hdr->addr3, common->curbssid)) { - s8 rssi = rxbuf->rxstatus.rs_rssi; + if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) + rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi, + ATH_RSSI_EP_MULTIPLIER); - if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) - rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER); + if (rxbuf->rxstatus.rs_rssi < 0) + rxbuf->rxstatus.rs_rssi = 0; - if (rssi < 0) - rssi = 0; - - priv->ah->stats.avgbrssi = rssi; - } + if (ieee80211_is_beacon(fc)) + priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi; rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp); rx_status->band = hw->conf.channel->band; diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index e26f92d..7cb7870 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -1480,9 +1480,7 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah, reset_type = ATH9K_RESET_POWER_ON; else reset_type = ATH9K_RESET_COLD; - } else if (ah->chip_fullsleep || REG_READ(ah, AR_Q_TXE) || - (REG_READ(ah, AR_CR) & AR_CR_RXE)) - reset_type = ATH9K_RESET_COLD; + } if (!ath9k_hw_set_reset_reg(ah, reset_type)) return false; diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c index 7fdac6c..ade3afb 100644 --- a/drivers/net/wireless/ath/ath9k/link.c +++ b/drivers/net/wireless/ath/ath9k/link.c @@ -28,21 +28,21 @@ void ath_tx_complete_poll_work(struct work_struct *work) int i; bool needreset = false; - for (i = 0; i < IEEE80211_NUM_ACS; i++) { - txq = sc->tx.txq_map[i]; - - ath_txq_lock(sc, txq); - if (txq->axq_depth) { - if (txq->axq_tx_inprogress) { - needreset = true; - ath_txq_unlock(sc, txq); - break; - } else { - txq->axq_tx_inprogress = true; + for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) + if (ATH_TXQ_SETUP(sc, i)) { + txq = &sc->tx.txq[i]; + ath_txq_lock(sc, txq); + if (txq->axq_depth) { + if (txq->axq_tx_inprogress) { + needreset = true; + ath_txq_unlock(sc, txq); + break; + } else { + txq->axq_tx_inprogress = true; + } } + ath_txq_unlock_complete(sc, txq); } - ath_txq_unlock_complete(sc, txq); - } if (needreset) { ath_dbg(ath9k_hw_common(sc->sc_ah), RESET, @@ -170,8 +170,7 @@ void ath_rx_poll(unsigned long data) { struct ath_softc *sc = (struct ath_softc *)data; - if (!test_bit(SC_OP_INVALID, &sc->sc_flags)) - ieee80211_queue_work(sc->hw, &sc->hw_check_work); + ieee80211_queue_work(sc->hw, &sc->hw_check_work); } /* diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 1221469..38bc5a7 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c @@ -1487,12 +1487,8 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, const struct b43_dma_ops *ops; struct b43_dmaring *ring; struct b43_dmadesc_meta *meta; - static const struct b43_txstatus fake; /* filled with 0 */ - const struct b43_txstatus *txstat; int slot, firstused; bool frame_succeed; - int skip; - static u8 err_out1, err_out2; ring = parse_cookie(dev, status->cookie, &slot); if (unlikely(!ring)) @@ -1505,36 +1501,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, firstused = ring->current_slot - ring->used_slots + 1; if (firstused < 0) firstused = ring->nr_slots + firstused; - - skip = 0; if (unlikely(slot != firstused)) { /* This possibly is a firmware bug and will result in - * malfunction, memory leaks and/or stall of DMA functionality. - */ - if (slot == next_slot(ring, next_slot(ring, firstused))) { - /* If a single header/data pair was missed, skip over - * the first two slots in an attempt to recover. - */ - slot = firstused; - skip = 2; - if (!err_out1) { - /* Report the error once. */ - b43dbg(dev->wl, - "Skip on DMA ring %d slot %d.\n", - ring->index, slot); - err_out1 = 1; - } - } else { - /* More than a single header/data pair were missed. - * Report this error once. - */ - if (!err_out2) - b43dbg(dev->wl, - "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n", - ring->index, firstused, slot); - err_out2 = 1; - return; - } + * malfunction, memory leaks and/or stall of DMA functionality. */ + b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. " + "Expected %d, but got %d\n", + ring->index, firstused, slot); + return; } ops = ring->ops; @@ -1549,13 +1522,11 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, slot, firstused, ring->index); break; } - if (meta->skb) { struct b43_private_tx_info *priv_info = - b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); + b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); - unmap_descbuffer(ring, meta->dmaaddr, - meta->skb->len, 1); + unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); kfree(priv_info->bouncebuffer); priv_info->bouncebuffer = NULL; } else { @@ -1567,9 +1538,8 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, struct ieee80211_tx_info *info; if (unlikely(!meta->skb)) { - /* This is a scatter-gather fragment of a frame, - * so the skb pointer must not be NULL. - */ + /* This is a scatter-gather fragment of a frame, so + * the skb pointer must not be NULL. */ b43dbg(dev->wl, "TX status unexpected NULL skb " "at slot %d (first=%d) on ring %d\n", slot, firstused, ring->index); @@ -1580,18 +1550,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, /* * Call back to inform the ieee80211 subsystem about - * the status of the transmission. When skipping over - * a missed TX status report, use a status structure - * filled with zeros to indicate that the frame was not - * sent (frame_count 0) and not acknowledged + * the status of the transmission. */ - if (unlikely(skip)) - txstat = &fake; - else - txstat = status; - - frame_succeed = b43_fill_txstatus_report(dev, info, - txstat); + frame_succeed = b43_fill_txstatus_report(dev, info, status); #ifdef CONFIG_B43_DEBUG if (frame_succeed) ring->nr_succeed_tx_packets++; @@ -1619,14 +1580,12 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, /* Everything unmapped and free'd. So it's not used anymore. */ ring->used_slots--; - if (meta->is_last_fragment && !skip) { + if (meta->is_last_fragment) { /* This is the last scatter-gather * fragment of the frame. We are done. */ break; } slot = next_slot(ring, slot); - if (skip > 0) - --skip; } if (ring->stopped) { B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h index 9fdd198..315b96e 100644 --- a/drivers/net/wireless/b43/dma.h +++ b/drivers/net/wireless/b43/dma.h @@ -169,7 +169,7 @@ struct b43_dmadesc_generic { /* DMA engine tuning knobs */ #define B43_TXRING_SLOTS 256 -#define B43_RXRING_SLOTS 256 +#define B43_RXRING_SLOTS 64 #define B43_DMA0_RX_FW598_BUFSIZE (B43_DMA0_RX_FW598_FO + IEEE80211_MAX_FRAME_LEN) #define B43_DMA0_RX_FW351_BUFSIZE (B43_DMA0_RX_FW351_FO + IEEE80211_MAX_FRAME_LEN) diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 0568273..806e34c 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -4214,6 +4214,7 @@ redo: mutex_unlock(&wl->mutex); cancel_delayed_work_sync(&dev->periodic_work); cancel_work_sync(&wl->tx_work); + cancel_work_sync(&wl->firmware_load); mutex_lock(&wl->mutex); dev = wl->current_dev; if (!dev || b43_status(dev) < B43_STAT_STARTED) { @@ -5433,7 +5434,6 @@ static void b43_bcma_remove(struct bcma_device *core) /* We must cancel any work here before unregistering from ieee80211, * as the ieee80211 unreg will destroy the workqueue. */ cancel_work_sync(&wldev->restart_work); - cancel_work_sync(&wl->firmware_load); B43_WARN_ON(!wl); if (!wldev->fw.ucode.data) @@ -5510,7 +5510,6 @@ static void b43_ssb_remove(struct ssb_device *sdev) /* We must cancel any work here before unregistering from ieee80211, * as the ieee80211 unreg will destroy the workqueue. */ cancel_work_sync(&wldev->restart_work); - cancel_work_sync(&wl->firmware_load); B43_WARN_ON(!wl); if (!wldev->fw.ucode.data) diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c index b70f220..3c35382 100644 --- a/drivers/net/wireless/b43/phy_n.c +++ b/drivers/net/wireless/b43/phy_n.c @@ -1564,7 +1564,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) u16 clip_off[2] = { 0xFFFF, 0xFFFF }; u8 vcm_final = 0; - s32 offset[4]; + s8 offset[4]; s32 results[8][4] = { }; s32 results_min[4] = { }; s32 poll_results[4] = { }; @@ -1615,7 +1615,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) } for (i = 0; i < 4; i += 2) { s32 curr; - s32 mind = 0x100000; + s32 mind = 40; s32 minpoll = 249; u8 minvcm = 0; if (2 * core != i) @@ -1732,7 +1732,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type) u8 regs_save_radio[2]; u16 regs_save_phy[2]; - s32 offset[4]; + s8 offset[4]; u8 core; u8 rail; @@ -1799,7 +1799,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type) } for (i = 0; i < 4; i++) { - s32 mind = 0x100000; + s32 mind = 40; u8 minvcm = 0; s32 minpoll = 249; s32 curr; @@ -5165,8 +5165,7 @@ static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid) #endif #ifdef CONFIG_B43_SSB case B43_BUS_SSB: - ssb_pmu_spuravoid_pllupdate(&dev->dev->sdev->bus->chipco, - avoid); + /* FIXME */ break; #endif } diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c index 18d3764..21a8242 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c @@ -1137,8 +1137,9 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi, gain0_15 = ((biq1 & 0xf) << 12) | ((tia & 0xf) << 8) | ((lna2 & 0x3) << 6) | - ((lna2 & - 0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0); + ((lna2 & 0x3) << 4) | + ((lna1 & 0x3) << 2) | + ((lna1 & 0x3) << 0); mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0); mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0); @@ -1156,6 +1157,8 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi, } mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0); + mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11); + mod_phy_reg(pi, 0x4e6, (0x3 << 3), lna1 << 3); } @@ -1328,6 +1331,43 @@ static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples) return (iq_est.i_pwr + iq_est.q_pwr) / nsamples; } +static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain, + u16 tia_gain, u16 lna2_gain) +{ + u32 i_thresh_l, q_thresh_l; + u32 i_thresh_h, q_thresh_h; + struct lcnphy_iq_est iq_est_h, iq_est_l; + + wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 0, biq1_gain, tia_gain, + lna2_gain, 0); + + wlc_lcnphy_rx_gain_override_enable(pi, true); + wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0); + udelay(500); + write_radio_reg(pi, RADIO_2064_REG112, 0); + if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l)) + return false; + + wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0); + udelay(500); + write_radio_reg(pi, RADIO_2064_REG112, 0); + if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h)) + return false; + + i_thresh_l = (iq_est_l.i_pwr << 1); + i_thresh_h = (iq_est_l.i_pwr << 2) + iq_est_l.i_pwr; + + q_thresh_l = (iq_est_l.q_pwr << 1); + q_thresh_h = (iq_est_l.q_pwr << 2) + iq_est_l.q_pwr; + if ((iq_est_h.i_pwr > i_thresh_l) && + (iq_est_h.i_pwr < i_thresh_h) && + (iq_est_h.q_pwr > q_thresh_l) && + (iq_est_h.q_pwr < q_thresh_h)) + return true; + + return false; +} + static bool wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, const struct lcnphy_rx_iqcomp *iqcomp, @@ -1342,8 +1382,8 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old, rfoverride3_old, rfoverride3val_old, rfoverride4_old, rfoverride4val_old, afectrlovr_old, afectrlovrval_old; - int tia_gain; - u32 received_power, rx_pwr_threshold; + int tia_gain, lna2_gain, biq1_gain; + bool set_gain; u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl; u16 values_to_save[11]; s16 *ptr; @@ -1368,126 +1408,134 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, goto cal_done; } - if (module == 1) { + WARN_ON(module != 1); + tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); + wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); - tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); - wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); + for (i = 0; i < 11; i++) + values_to_save[i] = + read_radio_reg(pi, rxiq_cal_rf_reg[i]); + Core1TxControl_old = read_phy_reg(pi, 0x631); + + or_phy_reg(pi, 0x631, 0x0015); + + RFOverride0_old = read_phy_reg(pi, 0x44c); + RFOverrideVal0_old = read_phy_reg(pi, 0x44d); + rfoverride2_old = read_phy_reg(pi, 0x4b0); + rfoverride2val_old = read_phy_reg(pi, 0x4b1); + rfoverride3_old = read_phy_reg(pi, 0x4f9); + rfoverride3val_old = read_phy_reg(pi, 0x4fa); + rfoverride4_old = read_phy_reg(pi, 0x938); + rfoverride4val_old = read_phy_reg(pi, 0x939); + afectrlovr_old = read_phy_reg(pi, 0x43b); + afectrlovrval_old = read_phy_reg(pi, 0x43c); + old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); + old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db); - for (i = 0; i < 11; i++) - values_to_save[i] = - read_radio_reg(pi, rxiq_cal_rf_reg[i]); - Core1TxControl_old = read_phy_reg(pi, 0x631); - - or_phy_reg(pi, 0x631, 0x0015); - - RFOverride0_old = read_phy_reg(pi, 0x44c); - RFOverrideVal0_old = read_phy_reg(pi, 0x44d); - rfoverride2_old = read_phy_reg(pi, 0x4b0); - rfoverride2val_old = read_phy_reg(pi, 0x4b1); - rfoverride3_old = read_phy_reg(pi, 0x4f9); - rfoverride3val_old = read_phy_reg(pi, 0x4fa); - rfoverride4_old = read_phy_reg(pi, 0x938); - rfoverride4val_old = read_phy_reg(pi, 0x939); - afectrlovr_old = read_phy_reg(pi, 0x43b); - afectrlovrval_old = read_phy_reg(pi, 0x43c); - old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); - old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db); - - tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi); - if (tx_gain_override_old) { - wlc_lcnphy_get_tx_gain(pi, &old_gains); - tx_gain_index_old = pi_lcn->lcnphy_current_index; - } + tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi); + if (tx_gain_override_old) { + wlc_lcnphy_get_tx_gain(pi, &old_gains); + tx_gain_index_old = pi_lcn->lcnphy_current_index; + } - wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx); + wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx); - mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); - mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0); + mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); + mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0); - mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); - mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); + mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); + mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); - write_radio_reg(pi, RADIO_2064_REG116, 0x06); - write_radio_reg(pi, RADIO_2064_REG12C, 0x07); - write_radio_reg(pi, RADIO_2064_REG06A, 0xd3); - write_radio_reg(pi, RADIO_2064_REG098, 0x03); - write_radio_reg(pi, RADIO_2064_REG00B, 0x7); - mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4); - write_radio_reg(pi, RADIO_2064_REG01D, 0x01); - write_radio_reg(pi, RADIO_2064_REG114, 0x01); - write_radio_reg(pi, RADIO_2064_REG02E, 0x10); - write_radio_reg(pi, RADIO_2064_REG12A, 0x08); - - mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0); - mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0); - mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1); - mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1); - mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2); - mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2); - mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3); - mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3); - mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5); - mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5); - - mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0); - mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0); - - wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0); - write_phy_reg(pi, 0x6da, 0xffff); - or_phy_reg(pi, 0x6db, 0x3); - wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch); - wlc_lcnphy_rx_gain_override_enable(pi, true); - - tia_gain = 8; - rx_pwr_threshold = 950; - while (tia_gain > 0) { - tia_gain -= 1; - wlc_lcnphy_set_rx_gain_by_distribution(pi, - 0, 0, 2, 2, - (u16) - tia_gain, 1, 0); - udelay(500); + write_radio_reg(pi, RADIO_2064_REG116, 0x06); + write_radio_reg(pi, RADIO_2064_REG12C, 0x07); + write_radio_reg(pi, RADIO_2064_REG06A, 0xd3); + write_radio_reg(pi, RADIO_2064_REG098, 0x03); + write_radio_reg(pi, RADIO_2064_REG00B, 0x7); + mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4); + write_radio_reg(pi, RADIO_2064_REG01D, 0x01); + write_radio_reg(pi, RADIO_2064_REG114, 0x01); + write_radio_reg(pi, RADIO_2064_REG02E, 0x10); + write_radio_reg(pi, RADIO_2064_REG12A, 0x08); + + mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0); + mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0); + mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1); + mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1); + mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2); + mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2); + mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3); + mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3); + mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5); + mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5); - received_power = - wlc_lcnphy_measure_digital_power(pi, 2000); - if (received_power < rx_pwr_threshold) - break; + mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0); + mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0); + + write_phy_reg(pi, 0x6da, 0xffff); + or_phy_reg(pi, 0x6db, 0x3); + + wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch); + set_gain = false; + + lna2_gain = 3; + while ((lna2_gain >= 0) && !set_gain) { + tia_gain = 4; + + while ((tia_gain >= 0) && !set_gain) { + biq1_gain = 6; + + while ((biq1_gain >= 0) && !set_gain) { + set_gain = wlc_lcnphy_rx_iq_cal_gain(pi, + (u16) + biq1_gain, + (u16) + tia_gain, + (u16) + lna2_gain); + biq1_gain -= 1; + } + tia_gain -= 1; } - result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff); + lna2_gain -= 1; + } - wlc_lcnphy_stop_tx_tone(pi); + if (set_gain) + result = wlc_lcnphy_calc_rx_iq_comp(pi, 1024); + else + result = false; - write_phy_reg(pi, 0x631, Core1TxControl_old); + wlc_lcnphy_stop_tx_tone(pi); - write_phy_reg(pi, 0x44c, RFOverrideVal0_old); - write_phy_reg(pi, 0x44d, RFOverrideVal0_old); - write_phy_reg(pi, 0x4b0, rfoverride2_old); - write_phy_reg(pi, 0x4b1, rfoverride2val_old); - write_phy_reg(pi, 0x4f9, rfoverride3_old); - write_phy_reg(pi, 0x4fa, rfoverride3val_old); - write_phy_reg(pi, 0x938, rfoverride4_old); - write_phy_reg(pi, 0x939, rfoverride4val_old); - write_phy_reg(pi, 0x43b, afectrlovr_old); - write_phy_reg(pi, 0x43c, afectrlovrval_old); - write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); - write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl); + write_phy_reg(pi, 0x631, Core1TxControl_old); + + write_phy_reg(pi, 0x44c, RFOverrideVal0_old); + write_phy_reg(pi, 0x44d, RFOverrideVal0_old); + write_phy_reg(pi, 0x4b0, rfoverride2_old); + write_phy_reg(pi, 0x4b1, rfoverride2val_old); + write_phy_reg(pi, 0x4f9, rfoverride3_old); + write_phy_reg(pi, 0x4fa, rfoverride3val_old); + write_phy_reg(pi, 0x938, rfoverride4_old); + write_phy_reg(pi, 0x939, rfoverride4val_old); + write_phy_reg(pi, 0x43b, afectrlovr_old); + write_phy_reg(pi, 0x43c, afectrlovrval_old); + write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); + write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl); - wlc_lcnphy_clear_trsw_override(pi); + wlc_lcnphy_clear_trsw_override(pi); - mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2); + mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2); - for (i = 0; i < 11; i++) - write_radio_reg(pi, rxiq_cal_rf_reg[i], - values_to_save[i]); + for (i = 0; i < 11; i++) + write_radio_reg(pi, rxiq_cal_rf_reg[i], + values_to_save[i]); - if (tx_gain_override_old) - wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old); - else - wlc_lcnphy_disable_tx_gain_override(pi); + if (tx_gain_override_old) + wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old); + else + wlc_lcnphy_disable_tx_gain_override(pi); - wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl); - wlc_lcnphy_rx_gain_override_enable(pi, false); - } + wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl); + wlc_lcnphy_rx_gain_override_enable(pi, false); cal_done: kfree(ptr); @@ -1781,6 +1829,17 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel) write_radio_reg(pi, RADIO_2064_REG038, 3); write_radio_reg(pi, RADIO_2064_REG091, 7); } + + if (!(pi->sh->boardflags & BFL_FEM)) { + u8 reg038[14] = {0xd, 0xe, 0xd, 0xd, 0xd, 0xc, + 0xa, 0xb, 0xb, 0x3, 0x3, 0x2, 0x0, 0x0}; + + write_radio_reg(pi, RADIO_2064_REG02A, 0xf); + write_radio_reg(pi, RADIO_2064_REG091, 0x3); + write_radio_reg(pi, RADIO_2064_REG038, 0x3); + + write_radio_reg(pi, RADIO_2064_REG038, reg038[channel - 1]); + } } static int @@ -1975,6 +2034,16 @@ wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos) } else { mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1); mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); + mod_radio_reg(pi, RADIO_2064_REG028, 0x1, 0x0); + mod_radio_reg(pi, RADIO_2064_REG11A, 0x4, 1<<2); + mod_radio_reg(pi, RADIO_2064_REG036, 0x10, 0x0); + mod_radio_reg(pi, RADIO_2064_REG11A, 0x10, 1<<4); + mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0); + mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x77); + mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0xe<<1); + mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1<<7); + mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 1<<1); + mod_radio_reg(pi, RADIO_2064_REG029, 0xf0, 0<<4); } } else { mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2); @@ -2061,12 +2130,14 @@ static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi) (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12)); mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5)); + mod_radio_reg(pi, RADIO_2064_REG07C, (1 << 0), (1 << 0)); } static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) { struct phytbl_info tab; u32 rfseq, ind; + u8 tssi_sel; tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; tab.tbl_width = 32; @@ -2088,7 +2159,13 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4); - wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT); + if (pi->sh->boardflags & BFL_FEM) { + tssi_sel = 0x1; + wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT); + } else { + tssi_sel = 0xe; + wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_POST_PA); + } mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14); mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15); @@ -2124,9 +2201,10 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0); if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { - mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe); + mod_radio_reg(pi, RADIO_2064_REG028, 0xf, tssi_sel); mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4); } else { + mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, tssi_sel << 1); mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1); mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3); } @@ -2173,6 +2251,10 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8); + mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x0); + mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0); + mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); + wlc_lcnphy_pwrctrl_rssiparams(pi); } @@ -2791,6 +2873,8 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) read_radio_reg(pi, RADIO_2064_REG007) & 1; u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10; u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4; + u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi); + idleTssi = read_phy_reg(pi, 0x4ab); suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & MCTL_EN_MAC)); @@ -2808,6 +2892,12 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4); mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2); wlc_lcnphy_tssi_setup(pi); + + mod_phy_reg(pi, 0x4d7, (0x1 << 0), (1 << 0)); + mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1 << 6)); + + wlc_lcnphy_set_bbmult(pi, 0x0); + wlc_phy_do_dummy_tx(pi, true, OFF); idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0)) >> 0); @@ -2829,6 +2919,7 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12); + wlc_lcnphy_set_bbmult(pi, SAVE_bbmult); wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old); wlc_lcnphy_set_tx_gain(pi, &old_gains); wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl); @@ -3042,6 +3133,11 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi) wlc_lcnphy_write_table(pi, &tab); tab.tbl_offset++; } + mod_phy_reg(pi, 0x4d0, (0x1 << 0), (0) << 0); + mod_phy_reg(pi, 0x4d3, (0xff << 0), (0) << 0); + mod_phy_reg(pi, 0x4d3, (0xff << 8), (0) << 8); + mod_phy_reg(pi, 0x4d0, (0x1 << 4), (0) << 4); + mod_phy_reg(pi, 0x4d0, (0x1 << 2), (0) << 2); mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7); @@ -3843,7 +3939,6 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi) target_gains.pad_gain = 21; target_gains.dac_gain = 0; wlc_lcnphy_set_tx_gain(pi, &target_gains); - wlc_lcnphy_set_tx_pwr_by_index(pi, 16); if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) { @@ -3854,6 +3949,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi) lcnphy_recal ? LCNPHY_CAL_RECAL : LCNPHY_CAL_FULL), false); } else { + wlc_lcnphy_set_tx_pwr_by_index(pi, 16); wlc_lcnphy_tx_iqlo_soft_cal_full(pi); } @@ -4278,17 +4374,22 @@ wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi, if (CHSPEC_IS5G(pi->radio_chanspec)) pa_gain = 0x70; else - pa_gain = 0x70; + pa_gain = 0x60; if (pi->sh->boardflags & BFL_FEM) pa_gain = 0x10; + tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; tab.tbl_width = 32; tab.tbl_len = 1; tab.tbl_ptr = &val; for (j = 0; j < 128; j++) { - gm_gain = gain_table[j].gm; + if (pi->sh->boardflags & BFL_FEM) + gm_gain = gain_table[j].gm; + else + gm_gain = 15; + val = (((u32) pa_gain << 24) | (gain_table[j].pad << 16) | (gain_table[j].pga << 8) | gm_gain); @@ -4499,7 +4600,10 @@ static void wlc_radio_2064_init(struct brcms_phy *pi) write_phy_reg(pi, 0x4ea, 0x4688); - mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0); + if (pi->sh->boardflags & BFL_FEM) + mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0); + else + mod_phy_reg(pi, 0x4eb, (0x7 << 0), 3 << 0); mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6); @@ -4510,6 +4614,13 @@ static void wlc_radio_2064_init(struct brcms_phy *pi) wlc_lcnphy_rcal(pi); wlc_lcnphy_rc_cal(pi); + + if (!(pi->sh->boardflags & BFL_FEM)) { + write_radio_reg(pi, RADIO_2064_REG032, 0x6f); + write_radio_reg(pi, RADIO_2064_REG033, 0x19); + write_radio_reg(pi, RADIO_2064_REG039, 0xe); + } + } static void wlc_lcnphy_radio_init(struct brcms_phy *pi) @@ -4539,22 +4650,20 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi) wlc_lcnphy_write_table(pi, &tab); } - tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; - tab.tbl_width = 16; - tab.tbl_ptr = &val; - tab.tbl_len = 1; - - val = 114; - tab.tbl_offset = 0; - wlc_lcnphy_write_table(pi, &tab); + if (!(pi->sh->boardflags & BFL_FEM)) { + tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; + tab.tbl_width = 16; + tab.tbl_ptr = &val; + tab.tbl_len = 1; - val = 130; - tab.tbl_offset = 1; - wlc_lcnphy_write_table(pi, &tab); + val = 150; + tab.tbl_offset = 0; + wlc_lcnphy_write_table(pi, &tab); - val = 6; - tab.tbl_offset = 8; - wlc_lcnphy_write_table(pi, &tab); + val = 220; + tab.tbl_offset = 1; + wlc_lcnphy_write_table(pi, &tab); + } if (CHSPEC_IS2G(pi->radio_chanspec)) { if (pi->sh->boardflags & BFL_FEM) @@ -4946,6 +5055,7 @@ void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec) wlc_lcnphy_load_tx_iir_filter(pi, true, 3); mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3); + wlc_lcnphy_tssi_setup(pi); } void wlc_phy_detach_lcnphy(struct brcms_phy *pi) @@ -4984,8 +5094,7 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi) if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) return false; - if ((pi->sh->boardflags & BFL_FEM) && - (LCNREV_IS(pi->pubpi.phy_rev, 1))) { + if (LCNREV_IS(pi->pubpi.phy_rev, 1)) { if (pi_lcn->lcnphy_tempsense_option == 3) { pi->hwpwrctrl = true; pi->hwpwrctrl_capable = true; diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c index 622c01c..b7e95ac 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c @@ -1992,70 +1992,70 @@ static const u16 dot11lcn_sw_ctrl_tbl_4313_epa_rev0[] = { }; static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = { - 0x000a, 0x0009, - 0x0006, - 0x0005, 0x000a, - 0x0009, - 0x0006, 0x0005, - 0x000a, - 0x0009, 0x0006, - 0x0005, - 0x000a, 0x0009, - 0x0006, - 0x0005, 0x000a, - 0x0009, - 0x0006, 0x0005, - 0x000a, - 0x0009, 0x0006, - 0x0005, - 0x000a, 0x0009, - 0x0006, - 0x0005, 0x000a, - 0x0009, - 0x0006, 0x0005, - 0x000a, - 0x0009, 0x0006, - 0x0005, - 0x000a, 0x0009, - 0x0006, - 0x0005, 0x000a, - 0x0009, - 0x0006, 0x0005, - 0x000a, - 0x0009, 0x0006, - 0x0005, + 0x0009, 0x000a, + 0x0005, + 0x0006, 0x0009, + 0x000a, + 0x0005, 0x0006, + 0x0009, + 0x000a, 0x0005, + 0x0006, + 0x0009, 0x000a, + 0x0005, + 0x0006, 0x0009, + 0x000a, + 0x0005, 0x0006, + 0x0009, + 0x000a, 0x0005, + 0x0006, + 0x0009, 0x000a, + 0x0005, + 0x0006, 0x0009, + 0x000a, + 0x0005, 0x0006, + 0x0009, + 0x000a, 0x0005, + 0x0006, + 0x0009, 0x000a, + 0x0005, + 0x0006, 0x0009, + 0x000a, + 0x0005, 0x0006, + 0x0009, + 0x000a, 0x0005, + 0x0006, }; static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = { diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c index 2c056b1..5b9533e 100644 --- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c @@ -2237,15 +2237,15 @@ static ssize_t iwl_dbgfs_log_event_read(struct file *file, size_t count, loff_t *ppos) { struct iwl_priv *priv = file->private_data; - char *buf = NULL; - ssize_t ret; + char *buf; + int pos = 0; + ssize_t ret = -ENOMEM; - ret = iwl_dump_nic_event_log(priv, true, &buf, true); - if (ret < 0) - goto err; - ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); -err: - kfree(buf); + ret = pos = iwl_dump_nic_event_log(priv, true, &buf, true); + if (buf) { + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + } return ret; } diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c index 0348f42..6ff4660 100644 --- a/drivers/net/wireless/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/iwlwifi/dvm/lib.c @@ -1262,15 +1262,6 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) } /* - * This can happen upon FW ASSERT: we clear the STATUS_FW_ERROR flag - * in iwl_down but cancel the workers only later. - */ - if (!priv->ucode_loaded) { - IWL_ERR(priv, "Fw not loaded - dropping CMD: %x\n", cmd->id); - return -EIO; - } - - /* * Synchronous commands from this op-mode must hold * the mutex, this ensures we don't try to send two * (or more) synchronous commands at a time. diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c index a8632a4..bdba954 100644 --- a/drivers/net/wireless/iwlwifi/dvm/sta.c +++ b/drivers/net/wireless/iwlwifi/dvm/sta.c @@ -707,7 +707,6 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv, void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { struct iwl_addsta_cmd sta_cmd; - static const struct iwl_link_quality_cmd zero_lq = {}; struct iwl_link_quality_cmd lq; int i; bool found = false; @@ -746,9 +745,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx) else memcpy(&lq, priv->stations[i].lq, sizeof(struct iwl_link_quality_cmd)); - - if (!memcmp(&lq, &zero_lq, sizeof(lq))) - send_lq = true; + send_lq = true; } spin_unlock_bh(&priv->sta_lock); ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c index 9b138b8..c6467e5 100644 --- a/drivers/net/wireless/iwlwifi/dvm/ucode.c +++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c @@ -450,8 +450,6 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, return -EIO; } - priv->ucode_loaded = true; - /* * This step takes a long time (60-80ms!!) and * WoWLAN image should be loaded quickly, so @@ -476,6 +474,8 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, return ret; } + priv->ucode_loaded = true; + return 0; } diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h index c85eb37..dc7e26b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h @@ -349,23 +349,25 @@ TRACE_EVENT(iwlwifi_dev_rx_data, TRACE_EVENT(iwlwifi_dev_hcmd, TP_PROTO(const struct device *dev, struct iwl_host_cmd *cmd, u16 total_size, - struct iwl_cmd_header *hdr), - TP_ARGS(dev, cmd, total_size, hdr), + const void *hdr, size_t hdr_len), + TP_ARGS(dev, cmd, total_size, hdr, hdr_len), TP_STRUCT__entry( DEV_ENTRY __dynamic_array(u8, hcmd, total_size) __field(u32, flags) ), TP_fast_assign( - int i, offset = sizeof(*hdr); + int i, offset = hdr_len; DEV_ASSIGN; __entry->flags = cmd->flags; - memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr)); + memcpy(__get_dynamic_array(hcmd), hdr, hdr_len); for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { if (!cmd->len[i]) continue; + if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)) + continue; memcpy((u8 *)__get_dynamic_array(hcmd) + offset, cmd->data[i], cmd->len[i]); offset += cmd->len[i]; diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index bc5e9ec..d91d2e8 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -182,15 +182,6 @@ struct iwl_queue { #define TFD_TX_CMD_SLOTS 256 #define TFD_CMD_SLOTS 32 -/* - * The FH will write back to the first TB only, so we need - * to copy some data into the buffer regardless of whether - * it should be mapped or not. This indicates how much to - * copy, even for HCMDs it must be big enough to fit the - * DRAM scratch from the TX cmd, at least 16 bytes. - */ -#define IWL_HCMD_MIN_COPY_SIZE 16 - struct iwl_pcie_txq_entry { struct iwl_device_cmd *cmd; struct iwl_device_cmd *copy_cmd; diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index d760da9..6c5b867 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -1131,12 +1131,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, void *dup_buf = NULL; dma_addr_t phys_addr; int idx; - u16 copy_size, cmd_size, dma_size; + u16 copy_size, cmd_size; bool had_nocopy = false; int i; u32 cmd_pos; - const u8 *cmddata[IWL_MAX_CMD_TFDS]; - u16 cmdlen[IWL_MAX_CMD_TFDS]; copy_size = sizeof(out_cmd->hdr); cmd_size = sizeof(out_cmd->hdr); @@ -1145,23 +1143,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1); for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { - cmddata[i] = cmd->data[i]; - cmdlen[i] = cmd->len[i]; - if (!cmd->len[i]) continue; - - /* need at least IWL_HCMD_MIN_COPY_SIZE copied */ - if (copy_size < IWL_HCMD_MIN_COPY_SIZE) { - int copy = IWL_HCMD_MIN_COPY_SIZE - copy_size; - - if (copy > cmdlen[i]) - copy = cmdlen[i]; - cmdlen[i] -= copy; - cmddata[i] += copy; - copy_size += copy; - } - if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { had_nocopy = true; if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { @@ -1181,7 +1164,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, goto free_dup_buf; } - dup_buf = kmemdup(cmddata[i], cmdlen[i], + dup_buf = kmemdup(cmd->data[i], cmd->len[i], GFP_ATOMIC); if (!dup_buf) return -ENOMEM; @@ -1191,7 +1174,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, idx = -EINVAL; goto free_dup_buf; } - copy_size += cmdlen[i]; + copy_size += cmd->len[i]; } cmd_size += cmd->len[i]; } @@ -1238,31 +1221,14 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, /* and copy the data that needs to be copied */ cmd_pos = offsetof(struct iwl_device_cmd, payload); - copy_size = sizeof(out_cmd->hdr); for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { - int copy = 0; - if (!cmd->len[i]) continue; - - /* need at least IWL_HCMD_MIN_COPY_SIZE copied */ - if (copy_size < IWL_HCMD_MIN_COPY_SIZE) { - copy = IWL_HCMD_MIN_COPY_SIZE - copy_size; - - if (copy > cmd->len[i]) - copy = cmd->len[i]; - } - - /* copy everything if not nocopy/dup */ - if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | - IWL_HCMD_DFL_DUP))) - copy = cmd->len[i]; - - if (copy) { - memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); - cmd_pos += copy; - copy_size += copy; - } + if (cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | + IWL_HCMD_DFL_DUP)) + break; + memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]); + cmd_pos += cmd->len[i]; } WARN_ON_ONCE(txq->entries[idx].copy_cmd); @@ -1288,14 +1254,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); - /* - * If the entire command is smaller than IWL_HCMD_MIN_COPY_SIZE, we must - * still map at least that many bytes for the hardware to write back to. - * We have enough space, so that's not a problem. - */ - dma_size = max_t(u16, copy_size, IWL_HCMD_MIN_COPY_SIZE); - - phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, dma_size, + phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size, DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { idx = -ENOMEM; @@ -1303,15 +1262,14 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, } dma_unmap_addr_set(out_meta, mapping, phys_addr); - dma_unmap_len_set(out_meta, len, dma_size); + dma_unmap_len_set(out_meta, len, copy_size); iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1); - /* map the remaining (adjusted) nocopy/dup fragments */ for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { - const void *data = cmddata[i]; + const void *data = cmd->data[i]; - if (!cmdlen[i]) + if (!cmd->len[i]) continue; if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | IWL_HCMD_DFL_DUP))) @@ -1319,7 +1277,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) data = dup_buf; phys_addr = dma_map_single(trans->dev, (void *)data, - cmdlen[i], DMA_BIDIRECTIONAL); + cmd->len[i], DMA_BIDIRECTIONAL); if (dma_mapping_error(trans->dev, phys_addr)) { iwl_pcie_tfd_unmap(trans, out_meta, &txq->tfds[q->write_ptr], @@ -1328,7 +1286,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, goto out; } - iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], 0); + iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmd->len[i], 0); } out_meta->flags = cmd->flags; @@ -1338,7 +1296,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, txq->need_update = 1; - trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr); + trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, + &out_cmd->hdr, copy_size); /* start timer if queue currently empty */ if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout) diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c index 4557833..739309e 100644 --- a/drivers/net/wireless/libertas/if_sdio.c +++ b/drivers/net/wireless/libertas/if_sdio.c @@ -825,11 +825,6 @@ static void if_sdio_finish_power_on(struct if_sdio_card *card) sdio_release_host(func); - /* Set fw_ready before queuing any commands so that - * lbs_thread won't block from sending them to firmware. - */ - priv->fw_ready = 1; - /* * FUNC_INIT is required for SD8688 WLAN/BT multiple functions */ @@ -844,6 +839,7 @@ static void if_sdio_finish_power_on(struct if_sdio_card *card) netdev_alert(priv->dev, "CMD_FUNC_INIT cmd failed\n"); } + priv->fw_ready = 1; wake_up(&card->pwron_waitq); if (!card->started) { diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index 3eca710..cdb11b3 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -1846,8 +1846,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, } } - for (i = 0; i < min_t(u32, request->n_channels, - MWIFIEX_USER_SCAN_CHAN_MAX); i++) { + for (i = 0; i < request->n_channels; i++) { chan = request->channels[i]; priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value; priv->user_scan_cfg->chan_list[i].radio_type = chan->band; diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index bc9a402..5f438e6 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -156,20 +156,6 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, return -1; } - cmd_code = le16_to_cpu(host_cmd->command); - cmd_size = le16_to_cpu(host_cmd->size); - - if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET && - cmd_code != HostCmd_CMD_FUNC_SHUTDOWN && - cmd_code != HostCmd_CMD_FUNC_INIT) { - dev_err(adapter->dev, - "DNLD_CMD: FW in reset state, ignore cmd %#x\n", - cmd_code); - mwifiex_complete_cmd(adapter, cmd_node); - mwifiex_insert_cmd_to_free_q(adapter, cmd_node); - return -1; - } - /* Set command sequence number */ adapter->seq_num++; host_cmd->seq_num = cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO @@ -181,6 +167,9 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, adapter->curr_cmd = cmd_node; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); + cmd_code = le16_to_cpu(host_cmd->command); + cmd_size = le16_to_cpu(host_cmd->size); + /* Adjust skb length */ if (cmd_node->cmd_skb->len > cmd_size) /* @@ -499,6 +488,8 @@ int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no, ret = mwifiex_send_cmd_async(priv, cmd_no, cmd_action, cmd_oid, data_buf); + if (!ret) + ret = mwifiex_wait_queue_complete(adapter); return ret; } @@ -601,10 +592,9 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no, if (cmd_no == HostCmd_CMD_802_11_SCAN) { mwifiex_queue_scan_cmd(priv, cmd_node); } else { + adapter->cmd_queued = cmd_node; mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); queue_work(adapter->workqueue, &adapter->main_work); - if (cmd_node->wait_q_enabled) - ret = mwifiex_wait_queue_complete(adapter, cmd_node); } return ret; diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index 78c3aa6..39f03ce 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -707,14 +707,6 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) return ret; } - /* cancel current command */ - if (adapter->curr_cmd) { - dev_warn(adapter->dev, "curr_cmd is still in processing\n"); - del_timer(&adapter->cmd_timer); - mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd); - adapter->curr_cmd = NULL; - } - /* shut down mwifiex */ dev_dbg(adapter->dev, "info: shutdown mwifiex...\n"); diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c index 3473876..88664ae 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/mwifiex/join.c @@ -1092,9 +1092,10 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv, adhoc_join->bss_descriptor.bssid, adhoc_join->bss_descriptor.ssid); - for (i = 0; i < MWIFIEX_SUPPORTED_RATES && - bss_desc->supported_rates[i]; i++) - ; + for (i = 0; bss_desc->supported_rates[i] && + i < MWIFIEX_SUPPORTED_RATES; + i++) + ; rates_size = i; /* Copy Data Rates from the Rates recorded in scan response */ diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index db39449..1b3cfc8 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -714,6 +714,7 @@ struct mwifiex_adapter { u16 cmd_wait_q_required; struct mwifiex_wait_queue cmd_wait_q; u8 scan_wait_q_woken; + struct cmd_ctrl_node *cmd_queued; spinlock_t queue_lock; /* lock for tx queues */ struct completion fw_load; u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; @@ -993,8 +994,7 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, struct mwifiex_multicast_list *mcast_list); int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist, struct net_device *dev); -int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter, - struct cmd_ctrl_node *cmd_queued); +int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter); int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, struct cfg80211_ssid *req_ssid); int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type); diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index b7a5387..b879e13 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -291,7 +291,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) i++; usleep_range(10, 20); /* 50ms max wait */ - if (i == 5000) + if (i == 50000) break; } @@ -1831,9 +1831,9 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter) if (pdev) { pci_iounmap(pdev, card->pci_mmap); pci_iounmap(pdev, card->pci_mmap1); + + pci_release_regions(pdev); pci_disable_device(pdev); - pci_release_region(pdev, 2); - pci_release_region(pdev, 0); pci_set_drvdata(pdev, NULL); } } diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index 771be26..973a9d9 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -1366,15 +1366,10 @@ int mwifiex_scan_networks(struct mwifiex_private *priv, list_del(&cmd_node->list); spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); + adapter->cmd_queued = cmd_node; mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); queue_work(adapter->workqueue, &adapter->main_work); - - /* Perform internal scan synchronously */ - if (!priv->scan_request) { - dev_dbg(adapter->dev, "wait internal scan\n"); - mwifiex_wait_queue_complete(adapter, cmd_node); - } } else { spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); @@ -1770,12 +1765,7 @@ check_next_scan: /* Need to indicate IOCTL complete */ if (adapter->curr_cmd->wait_q_enabled) { adapter->cmd_wait_q.status = 0; - if (!priv->scan_request) { - dev_dbg(adapter->dev, - "complete internal scan\n"); - mwifiex_complete_cmd(adapter, - adapter->curr_cmd); - } + mwifiex_complete_cmd(adapter, adapter->curr_cmd); } if (priv->report_scan_result) priv->report_scan_result = false; @@ -1933,6 +1923,9 @@ int mwifiex_request_scan(struct mwifiex_private *priv, /* Normal scan */ ret = mwifiex_scan_networks(priv, NULL); + if (!ret) + ret = mwifiex_wait_queue_complete(priv->adapter); + up(&priv->async_sem); return ret; diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index 1798bc7..f542bb8 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -54,10 +54,16 @@ int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist, * This function waits on a cmd wait queue. It also cancels the pending * request after waking up, in case of errors. */ -int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter, - struct cmd_ctrl_node *cmd_queued) +int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter) { int status; + struct cmd_ctrl_node *cmd_queued; + + if (!adapter->cmd_queued) + return 0; + + cmd_queued = adapter->cmd_queued; + adapter->cmd_queued = NULL; dev_dbg(adapter->dev, "cmd pending\n"); atomic_inc(&adapter->cmd_pending); diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index 1f78585..800a165 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -84,8 +84,8 @@ static struct usb_device_id p54u_table[] = { {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */ + {USB_DEVICE(0x083a, 0x4503)}, /* T-Com Sinus 154 data II */ {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ - {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */ {USB_DEVICE(0x083a, 0xc501)}, /* Zoom Wireless-G 4410 */ {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */ {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */ diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 0b55706..197b446 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -4386,8 +4386,6 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) if (!rt2x00_rt(rt2x00dev, RT5390) && !rt2x00_rt(rt2x00dev, RT5392)) { - u8 min_gain = rt2x00_rt(rt2x00dev, RT3070) ? 1 : 2; - rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0); if (rt2x00_rt(rt2x00dev, RT3070) || @@ -4398,10 +4396,8 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) &rt2x00dev->cap_flags)) rt2x00_set_field8(&rfcsr, RFCSR17_R, 1); } - if (drv_data->txmixer_gain_24g >= min_gain) { - rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN, - drv_data->txmixer_gain_24g); - } + rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN, + drv_data->txmixer_gain_24g); rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); } diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index cdbfc30..44f8b3f 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -1209,9 +1209,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) rt2x00dev->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) | -#ifdef CONFIG_MAC80211_MESH BIT(NL80211_IFTYPE_MESH_POINT) | -#endif BIT(NL80211_IFTYPE_WDS); rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index b1c673e..a0c8cae 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c @@ -52,8 +52,8 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, udelay(REGISTER_BUSY_DELAY); } - printk_once(KERN_ERR "%s() Indirect register access failed: " - "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg); + ERROR(rt2x00dev, "Indirect register access failed: " + "offset=0x%.08x, value=0x%.08x\n", offset, *reg); *reg = ~0; return 0; diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c index c08d0f4..b1ccff4 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c @@ -1377,57 +1377,74 @@ void rtl92cu_card_disable(struct ieee80211_hw *hw) void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) { + /* dummy routine needed for callback from rtl_op_configure_filter() */ +} + +/*========================================================================== */ + +static void _rtl92cu_set_check_bssid(struct ieee80211_hw *hw, + enum nl80211_iftype type) +{ struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtlpriv); u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 filterout_non_associated_bssid = false; - if (rtlpriv->psc.rfpwr_state != ERFON) - return; - - if (check_bssid) { - u8 tmp; + switch (type) { + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_STATION: + filterout_non_associated_bssid = true; + break; + case NL80211_IFTYPE_UNSPECIFIED: + case NL80211_IFTYPE_AP: + default: + break; + } + if (filterout_non_associated_bssid) { if (IS_NORMAL_CHIP(rtlhal->version)) { - reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); - tmp = BIT(4); + switch (rtlphy->current_io_type) { + case IO_CMD_RESUME_DM_BY_SCAN: + reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_RCR, (u8 *)(®_rcr)); + /* enable update TSF */ + _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4)); + break; + case IO_CMD_PAUSE_DM_BY_SCAN: + reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN); + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_RCR, (u8 *)(®_rcr)); + /* disable update TSF */ + _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0); + break; + } } else { - reg_rcr |= RCR_CBSSID; - tmp = BIT(4) | BIT(5); + reg_rcr |= (RCR_CBSSID); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, + (u8 *)(®_rcr)); + _rtl92cu_set_bcn_ctrl_reg(hw, 0, (BIT(4)|BIT(5))); } - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, - (u8 *) (®_rcr)); - _rtl92cu_set_bcn_ctrl_reg(hw, 0, tmp); - } else { - u8 tmp; + } else if (filterout_non_associated_bssid == false) { if (IS_NORMAL_CHIP(rtlhal->version)) { - reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN); - tmp = BIT(4); + reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, + (u8 *)(®_rcr)); + _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0); } else { - reg_rcr &= ~RCR_CBSSID; - tmp = BIT(4) | BIT(5); + reg_rcr &= (~RCR_CBSSID); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, + (u8 *)(®_rcr)); + _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4)|BIT(5)), 0); } - reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN)); - rtlpriv->cfg->ops->set_hw_reg(hw, - HW_VAR_RCR, (u8 *) (®_rcr)); - _rtl92cu_set_bcn_ctrl_reg(hw, tmp, 0); } } -/*========================================================================== */ - int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type) { - struct rtl_priv *rtlpriv = rtl_priv(hw); - if (_rtl92cu_set_media_status(hw, type)) return -EOPNOTSUPP; - - if (rtlpriv->mac80211.link_state == MAC80211_LINKED) { - if (type != NL80211_IFTYPE_AP) - rtl92cu_set_check_bssid(hw, true); - } else { - rtl92cu_set_check_bssid(hw, false); - } - + _rtl92cu_set_check_bssid(hw, type); return 0; } @@ -2041,6 +2058,8 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw, (shortgi_rate << 4) | (shortgi_rate); } rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); + RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n", + rtl_read_dword(rtlpriv, REG_ARFR0)); } void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index b450931..b7e6607 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -285,7 +285,6 @@ static struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)}, /* RTL8188CUS-VL */ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)}, - {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x819a, rtl92cu_hal_cfg)}, /* 8188 Combo for BC4 */ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)}, @@ -364,15 +363,9 @@ static struct usb_device_id rtl8192c_usb_ids[] = { MODULE_DEVICE_TABLE(usb, rtl8192c_usb_ids); -static int rtl8192cu_probe(struct usb_interface *intf, - const struct usb_device_id *id) -{ - return rtl_usb_probe(intf, id, &rtl92cu_hal_cfg); -} - static struct usb_driver rtl8192cu_driver = { .name = "rtl8192cu", - .probe = rtl8192cu_probe, + .probe = rtl_usb_probe, .disconnect = rtl_usb_disconnect, .id_table = rtl8192c_usb_ids, diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index 82bc684..1535efd 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c @@ -42,12 +42,8 @@ static void usbctrl_async_callback(struct urb *urb) { - if (urb) { - /* free dr */ - kfree(urb->setup_packet); - /* free databuf */ - kfree(urb->transfer_buffer); - } + if (urb) + kfree(urb->context); } static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request, @@ -59,47 +55,39 @@ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request, u8 reqtype; struct usb_ctrlrequest *dr; struct urb *urb; - const u16 databuf_maxlen = REALTEK_USB_VENQT_MAX_BUF_SIZE; - u8 *databuf; - - if (WARN_ON_ONCE(len > databuf_maxlen)) - len = databuf_maxlen; + struct rtl819x_async_write_data { + u8 data[REALTEK_USB_VENQT_MAX_BUF_SIZE]; + struct usb_ctrlrequest dr; + } *buf; pipe = usb_sndctrlpipe(udev, 0); /* write_out */ reqtype = REALTEK_USB_VENQT_WRITE; - dr = kmalloc(sizeof(*dr), GFP_ATOMIC); - if (!dr) - return -ENOMEM; - - databuf = kmalloc(databuf_maxlen, GFP_ATOMIC); - if (!databuf) { - kfree(dr); + buf = kmalloc(sizeof(*buf), GFP_ATOMIC); + if (!buf) return -ENOMEM; - } urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { - kfree(databuf); - kfree(dr); + kfree(buf); return -ENOMEM; } + dr = &buf->dr; + dr->bRequestType = reqtype; dr->bRequest = request; dr->wValue = cpu_to_le16(value); dr->wIndex = cpu_to_le16(index); dr->wLength = cpu_to_le16(len); /* data are already in little-endian order */ - memcpy(databuf, pdata, len); + memcpy(buf, pdata, len); usb_fill_control_urb(urb, udev, pipe, - (unsigned char *)dr, databuf, len, - usbctrl_async_callback, NULL); + (unsigned char *)dr, buf, len, + usbctrl_async_callback, buf); rc = usb_submit_urb(urb, GFP_ATOMIC); - if (rc < 0) { - kfree(databuf); - kfree(dr); - } + if (rc < 0) + kfree(buf); usb_free_urb(urb); return rc; } @@ -854,7 +842,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, if (unlikely(!_urb)) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't allocate urb. Drop skb!\n"); - kfree_skb(skb); return; } urb_list = &rtlusb->tx_pending[ep_num]; @@ -954,8 +941,7 @@ static struct rtl_intf_ops rtl_usb_ops = { }; int rtl_usb_probe(struct usb_interface *intf, - const struct usb_device_id *id, - struct rtl_hal_cfg *rtl_hal_cfg) + const struct usb_device_id *id) { int err; struct ieee80211_hw *hw = NULL; @@ -990,7 +976,7 @@ int rtl_usb_probe(struct usb_interface *intf, usb_set_intfdata(intf, hw); /* init cfg & intf_ops */ rtlpriv->rtlhal.interface = INTF_USB; - rtlpriv->cfg = rtl_hal_cfg; + rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_info); rtlpriv->intf_ops = &rtl_usb_ops; rtl_dbgp_flag_init(hw); /* Init IO handler */ diff --git a/drivers/net/wireless/rtlwifi/usb.h b/drivers/net/wireless/rtlwifi/usb.h index fb986f9..5235136 100644 --- a/drivers/net/wireless/rtlwifi/usb.h +++ b/drivers/net/wireless/rtlwifi/usb.h @@ -157,8 +157,7 @@ struct rtl_usb_priv { int rtl_usb_probe(struct usb_interface *intf, - const struct usb_device_id *id, - struct rtl_hal_cfg *rtl92cu_hal_cfg); + const struct usb_device_id *id); void rtl_usb_disconnect(struct usb_interface *intf); int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message); int rtl_usb_resume(struct usb_interface *pusb_intf); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 221f426..b8c5193 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -132,7 +132,6 @@ static void xenvif_up(struct xenvif *vif) static void xenvif_down(struct xenvif *vif) { disable_irq(vif->irq); - del_timer_sync(&vif->credit_timeout); xen_netbk_deschedule_xenvif(vif); xen_netbk_remove_xenvif(vif); } @@ -364,6 +363,8 @@ void xenvif_disconnect(struct xenvif *vif) atomic_dec(&vif->refcnt); wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); + del_timer_sync(&vif->credit_timeout); + if (vif->irq) unbind_from_irqhandler(vif->irq, vif); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index cd49ba9..2b9520c 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -911,13 +911,13 @@ static int netbk_count_requests(struct xenvif *vif, if (frags >= work_to_do) { netdev_err(vif->dev, "Need more frags\n"); netbk_fatal_tx_err(vif); - return -ENODATA; + return -frags; } if (unlikely(frags >= MAX_SKB_FRAGS)) { netdev_err(vif->dev, "Too many frags\n"); netbk_fatal_tx_err(vif); - return -E2BIG; + return -frags; } memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), @@ -925,7 +925,7 @@ static int netbk_count_requests(struct xenvif *vif, if (txp->size > first->size) { netdev_err(vif->dev, "Frag is bigger than frame.\n"); netbk_fatal_tx_err(vif); - return -EIO; + return -frags; } first->size -= txp->size; @@ -935,7 +935,7 @@ static int netbk_count_requests(struct xenvif *vif, netdev_err(vif->dev, "txp->offset: %x, size: %u\n", txp->offset, txp->size); netbk_fatal_tx_err(vif); - return -EINVAL; + return -frags; } } while ((txp++)->flags & XEN_NETTXF_more_data); return frags; diff --git a/drivers/of/base.c b/drivers/of/base.c index ec2fd1f..2390ddb 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -55,7 +55,7 @@ static DEFINE_MUTEX(of_aliases_mutex); /* use when traversing tree through the allnext, child, sibling, * or parent members of struct device_node. */ -DEFINE_RAW_SPINLOCK(devtree_lock); +DEFINE_RWLOCK(devtree_lock); int of_n_addr_cells(struct device_node *np) { @@ -164,14 +164,16 @@ void of_node_put(struct device_node *node) EXPORT_SYMBOL(of_node_put); #endif /* CONFIG_OF_DYNAMIC */ -static struct property *__of_find_property(const struct device_node *np, - const char *name, int *lenp) +struct property *of_find_property(const struct device_node *np, + const char *name, + int *lenp) { struct property *pp; if (!np) return NULL; + read_lock(&devtree_lock); for (pp = np->properties; pp; pp = pp->next) { if (of_prop_cmp(pp->name, name) == 0) { if (lenp) @@ -179,20 +181,7 @@ static struct property *__of_find_property(const struct device_node *np, break; } } - - return pp; -} - -struct property *of_find_property(const struct device_node *np, - const char *name, - int *lenp) -{ - struct property *pp; - unsigned long flags; - - raw_spin_lock_irqsave(&devtree_lock, flags); - pp = __of_find_property(np, name, lenp); - raw_spin_unlock_irqrestore(&devtree_lock, flags); + read_unlock(&devtree_lock); return pp; } @@ -210,13 +199,13 @@ struct device_node *of_find_all_nodes(struct device_node *prev) { struct device_node *np; - raw_spin_lock(&devtree_lock); + read_lock(&devtree_lock); np = prev ? prev->allnext : of_allnodes; for (; np != NULL; np = np->allnext) if (of_node_get(np)) break; of_node_put(prev); - raw_spin_unlock(&devtree_lock); + read_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_all_nodes); @@ -225,20 +214,8 @@ EXPORT_SYMBOL(of_find_all_nodes); * Find a property with a given name for a given node * and return the value. */ -static const void *__of_get_property(const struct device_node *np, - const char *name, int *lenp) -{ - struct property *pp = __of_find_property(np, name, lenp); - - return pp ? pp->value : NULL; -} - -/* - * Find a property with a given name for a given node - * and return the value. - */ const void *of_get_property(const struct device_node *np, const char *name, - int *lenp) + int *lenp) { struct property *pp = of_find_property(np, name, lenp); @@ -249,13 +226,13 @@ EXPORT_SYMBOL(of_get_property); /** Checks if the given "compat" string matches one of the strings in * the device's "compatible" property */ -static int __of_device_is_compatible(const struct device_node *device, - const char *compat) +int of_device_is_compatible(const struct device_node *device, + const char *compat) { const char* cp; int cplen, l; - cp = __of_get_property(device, "compatible", &cplen); + cp = of_get_property(device, "compatible", &cplen); if (cp == NULL) return 0; while (cplen > 0) { @@ -268,21 +245,6 @@ static int __of_device_is_compatible(const struct device_node *device, return 0; } - -/** Checks if the given "compat" string matches one of the strings in - * the device's "compatible" property - */ -int of_device_is_compatible(const struct device_node *device, - const char *compat) -{ - unsigned long flags; - int res; - - raw_spin_lock_irqsave(&devtree_lock, flags); - res = __of_device_is_compatible(device, compat); - raw_spin_unlock_irqrestore(&devtree_lock, flags); - return res; -} EXPORT_SYMBOL(of_device_is_compatible); /** @@ -307,19 +269,19 @@ int of_machine_is_compatible(const char *compat) EXPORT_SYMBOL(of_machine_is_compatible); /** - * __of_device_is_available - check if a device is available for use + * of_device_is_available - check if a device is available for use * - * @device: Node to check for availability, with locks already held + * @device: Node to check for availability * * Returns 1 if the status property is absent or set to "okay" or "ok", * 0 otherwise */ -static int __of_device_is_available(const struct device_node *device) +int of_device_is_available(const struct device_node *device) { const char *status; int statlen; - status = __of_get_property(device, "status", &statlen); + status = of_get_property(device, "status", &statlen); if (status == NULL) return 1; @@ -330,26 +292,6 @@ static int __of_device_is_available(const struct device_node *device) return 0; } - -/** - * of_device_is_available - check if a device is available for use - * - * @device: Node to check for availability - * - * Returns 1 if the status property is absent or set to "okay" or "ok", - * 0 otherwise - */ -int of_device_is_available(const struct device_node *device) -{ - unsigned long flags; - int res; - - raw_spin_lock_irqsave(&devtree_lock, flags); - res = __of_device_is_available(device); - raw_spin_unlock_irqrestore(&devtree_lock, flags); - return res; - -} EXPORT_SYMBOL(of_device_is_available); /** @@ -362,14 +304,13 @@ EXPORT_SYMBOL(of_device_is_available); struct device_node *of_get_parent(const struct device_node *node) { struct device_node *np; - unsigned long flags; if (!node) return NULL; - raw_spin_lock_irqsave(&devtree_lock, flags); + read_lock(&devtree_lock); np = of_node_get(node->parent); - raw_spin_unlock_irqrestore(&devtree_lock, flags); + read_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_get_parent); @@ -388,15 +329,14 @@ EXPORT_SYMBOL(of_get_parent); struct device_node *of_get_next_parent(struct device_node *node) { struct device_node *parent; - unsigned long flags; if (!node) return NULL; - raw_spin_lock_irqsave(&devtree_lock, flags); + read_lock(&devtree_lock); parent = of_node_get(node->parent); of_node_put(node); - raw_spin_unlock_irqrestore(&devtree_lock, flags); + read_unlock(&devtree_lock); return parent; } @@ -412,15 +352,14 @@ struct device_node *of_get_next_child(const struct device_node *node, struct device_node *prev) { struct device_node *next; - unsigned long flags; - raw_spin_lock_irqsave(&devtree_lock, flags); + read_lock(&devtree_lock); next = prev ? prev->sibling : node->child; for (; next; next = next->sibling) if (of_node_get(next)) break; of_node_put(prev); - raw_spin_unlock_irqrestore(&devtree_lock, flags); + read_unlock(&devtree_lock); return next; } EXPORT_SYMBOL(of_get_next_child); @@ -438,16 +377,16 @@ struct device_node *of_get_next_available_child(const struct device_node *node, { struct device_node *next; - raw_spin_lock(&devtree_lock); + read_lock(&devtree_lock); next = prev ? prev->sibling : node->child; for (; next; next = next->sibling) { - if (!__of_device_is_available(next)) + if (!of_device_is_available(next)) continue; if (of_node_get(next)) break; } of_node_put(prev); - raw_spin_unlock(&devtree_lock); + read_unlock(&devtree_lock); return next; } EXPORT_SYMBOL(of_get_next_available_child); @@ -485,15 +424,14 @@ EXPORT_SYMBOL(of_get_child_by_name); struct device_node *of_find_node_by_path(const char *path) { struct device_node *np = of_allnodes; - unsigned long flags; - raw_spin_lock_irqsave(&devtree_lock, flags); + read_lock(&devtree_lock); for (; np; np = np->allnext) { if (np->full_name && (of_node_cmp(np->full_name, path) == 0) && of_node_get(np)) break; } - raw_spin_unlock_irqrestore(&devtree_lock, flags); + read_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_node_by_path); @@ -513,16 +451,15 @@ struct device_node *of_find_node_by_name(struct device_node *from, const char *name) { struct device_node *np; - unsigned long flags; - raw_spin_lock_irqsave(&devtree_lock, flags); + read_lock(&devtree_lock); np = from ? from->allnext : of_allnodes; for (; np; np = np->allnext) if (np->name && (of_node_cmp(np->name, name) == 0) && of_node_get(np)) break; of_node_put(from); - raw_spin_unlock_irqrestore(&devtree_lock, flags); + read_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_node_by_name); @@ -543,16 +480,15 @@ struct device_node *of_find_node_by_type(struct device_node *from, const char *type) { struct device_node *np; - unsigned long flags; - raw_spin_lock_irqsave(&devtree_lock, flags); + read_lock(&devtree_lock); np = from ? from->allnext : of_allnodes; for (; np; np = np->allnext) if (np->type && (of_node_cmp(np->type, type) == 0) && of_node_get(np)) break; of_node_put(from); - raw_spin_unlock_irqrestore(&devtree_lock, flags); + read_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_node_by_type); @@ -575,20 +511,18 @@ struct device_node *of_find_compatible_node(struct device_node *from, const char *type, const char *compatible) { struct device_node *np; - unsigned long flags; - raw_spin_lock_irqsave(&devtree_lock, flags); + read_lock(&devtree_lock); np = from ? from->allnext : of_allnodes; for (; np; np = np->allnext) { if (type && !(np->type && (of_node_cmp(np->type, type) == 0))) continue; - if (__of_device_is_compatible(np, compatible) && - of_node_get(np)) + if (of_device_is_compatible(np, compatible) && of_node_get(np)) break; } of_node_put(from); - raw_spin_unlock_irqrestore(&devtree_lock, flags); + read_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_compatible_node); @@ -610,9 +544,8 @@ struct device_node *of_find_node_with_property(struct device_node *from, { struct device_node *np; struct property *pp; - unsigned long flags; - raw_spin_lock_irqsave(&devtree_lock, flags); + read_lock(&devtree_lock); np = from ? from->allnext : of_allnodes; for (; np; np = np->allnext) { for (pp = np->properties; pp; pp = pp->next) { @@ -624,14 +557,20 @@ struct device_node *of_find_node_with_property(struct device_node *from, } out: of_node_put(from); - raw_spin_unlock_irqrestore(&devtree_lock, flags); + read_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_node_with_property); -static -const struct of_device_id *__of_match_node(const struct of_device_id *matches, - const struct device_node *node) +/** + * of_match_node - Tell if an device_node has a matching of_match structure + * @matches: array of of device match structures to search in + * @node: the of device structure to match against + * + * Low level utility function used by device matching. + */ +const struct of_device_id *of_match_node(const struct of_device_id *matches, + const struct device_node *node) { if (!matches) return NULL; @@ -645,33 +584,14 @@ const struct of_device_id *__of_match_node(const struct of_device_id *matches, match &= node->type && !strcmp(matches->type, node->type); if (matches->compatible[0]) - match &= __of_device_is_compatible(node, - matches->compatible); + match &= of_device_is_compatible(node, + matches->compatible); if (match) return matches; matches++; } return NULL; } - -/** - * of_match_node - Tell if an device_node has a matching of_match structure - * @matches: array of of device match structures to search in - * @node: the of device structure to match against - * - * Low level utility function used by device matching. - */ -const struct of_device_id *of_match_node(const struct of_device_id *matches, - const struct device_node *node) -{ - const struct of_device_id *match; - unsigned long flags; - - raw_spin_lock_irqsave(&devtree_lock, flags); - match = __of_match_node(matches, node); - raw_spin_unlock_irqrestore(&devtree_lock, flags); - return match; -} EXPORT_SYMBOL(of_match_node); /** @@ -692,22 +612,21 @@ struct device_node *of_find_matching_node_and_match(struct device_node *from, const struct of_device_id **match) { struct device_node *np; - unsigned long flags; if (match) *match = NULL; - raw_spin_lock_irqsave(&devtree_lock, flags); + read_lock(&devtree_lock); np = from ? from->allnext : of_allnodes; for (; np; np = np->allnext) { - if (__of_match_node(matches, np) && of_node_get(np)) { + if (of_match_node(matches, np) && of_node_get(np)) { if (match) *match = matches; break; } } of_node_put(from); - raw_spin_unlock_irqrestore(&devtree_lock, flags); + read_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_matching_node_and_match); @@ -750,12 +669,12 @@ struct device_node *of_find_node_by_phandle(phandle handle) { struct device_node *np; - raw_spin_lock(&devtree_lock); + read_lock(&devtree_lock); for (np = of_allnodes; np; np = np->allnext) if (np->phandle == handle) break; of_node_get(np); - raw_spin_unlock(&devtree_lock); + read_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_node_by_phandle); @@ -1227,18 +1146,18 @@ int of_add_property(struct device_node *np, struct property *prop) return rc; prop->next = NULL; - raw_spin_lock_irqsave(&devtree_lock, flags); + write_lock_irqsave(&devtree_lock, flags); next = &np->properties; while (*next) { if (strcmp(prop->name, (*next)->name) == 0) { /* duplicate ! don't insert it */ - raw_spin_unlock_irqrestore(&devtree_lock, flags); + write_unlock_irqrestore(&devtree_lock, flags); return -1; } next = &(*next)->next; } *next = prop; - raw_spin_unlock_irqrestore(&devtree_lock, flags); + write_unlock_irqrestore(&devtree_lock, flags); #ifdef CONFIG_PROC_DEVICETREE /* try to add to proc as well if it was initialized */ @@ -1268,7 +1187,7 @@ int of_remove_property(struct device_node *np, struct property *prop) if (rc) return rc; - raw_spin_lock_irqsave(&devtree_lock, flags); + write_lock_irqsave(&devtree_lock, flags); next = &np->properties; while (*next) { if (*next == prop) { @@ -1281,7 +1200,7 @@ int of_remove_property(struct device_node *np, struct property *prop) } next = &(*next)->next; } - raw_spin_unlock_irqrestore(&devtree_lock, flags); + write_unlock_irqrestore(&devtree_lock, flags); if (!found) return -ENODEV; @@ -1321,7 +1240,7 @@ int of_update_property(struct device_node *np, struct property *newprop) if (!oldprop) return of_add_property(np, newprop); - raw_spin_lock_irqsave(&devtree_lock, flags); + write_lock_irqsave(&devtree_lock, flags); next = &np->properties; while (*next) { if (*next == oldprop) { @@ -1335,7 +1254,7 @@ int of_update_property(struct device_node *np, struct property *newprop) } next = &(*next)->next; } - raw_spin_unlock_irqrestore(&devtree_lock, flags); + write_unlock_irqrestore(&devtree_lock, flags); if (!found) return -ENODEV; @@ -1408,12 +1327,12 @@ int of_attach_node(struct device_node *np) if (rc) return rc; - raw_spin_lock_irqsave(&devtree_lock, flags); + write_lock_irqsave(&devtree_lock, flags); np->sibling = np->parent->child; np->allnext = of_allnodes; np->parent->child = np; of_allnodes = np; - raw_spin_unlock_irqrestore(&devtree_lock, flags); + write_unlock_irqrestore(&devtree_lock, flags); of_add_proc_dt_entry(np); return 0; @@ -1456,17 +1375,17 @@ int of_detach_node(struct device_node *np) if (rc) return rc; - raw_spin_lock_irqsave(&devtree_lock, flags); + write_lock_irqsave(&devtree_lock, flags); if (of_node_check_flag(np, OF_DETACHED)) { /* someone already detached it */ - raw_spin_unlock_irqrestore(&devtree_lock, flags); + write_unlock_irqrestore(&devtree_lock, flags); return rc; } parent = np->parent; if (!parent) { - raw_spin_unlock_irqrestore(&devtree_lock, flags); + write_unlock_irqrestore(&devtree_lock, flags); return rc; } @@ -1493,7 +1412,7 @@ int of_detach_node(struct device_node *np) } of_node_set_flag(np, OF_DETACHED); - raw_spin_unlock_irqrestore(&devtree_lock, flags); + write_unlock_irqrestore(&devtree_lock, flags); of_remove_proc_dt_entry(np); return rc; diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 0941838..3af0478 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -465,7 +465,7 @@ void pci_cfg_access_unlock(struct pci_dev *dev) WARN_ON(!dev->block_cfg_access); dev->block_cfg_access = 0; - wake_up_all_locked(&pci_cfg_wait); + wake_up_all(&pci_cfg_wait); raw_spin_unlock_irqrestore(&pci_lock, flags); } EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); @@ -515,7 +515,7 @@ static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) return false; switch (pos) { - case PCI_EXP_FLAGS: + case PCI_EXP_FLAGS_TYPE: return true; case PCI_EXP_DEVCAP: case PCI_EXP_DEVCTL: diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 21354bf..1af4008 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -53,15 +53,14 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context) return; } - /* Clear PME Status if set. */ - if (pci_dev->pme_support) - pci_check_pme_status(pci_dev); + if (!pci_dev->pm_cap || !pci_dev->pme_support + || pci_check_pme_status(pci_dev)) { + if (pci_dev->pme_poll) + pci_dev->pme_poll = false; - if (pci_dev->pme_poll) - pci_dev->pme_poll = false; - - pci_wakeup_event(pci_dev); - pm_runtime_resume(&pci_dev->dev); + pci_wakeup_event(pci_dev); + pm_runtime_resume(&pci_dev->dev); + } if (pci_dev->subordinate) pci_pme_wakeup_bus(pci_dev->subordinate); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 8c1ecc5..f79cbcd 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -628,7 +628,6 @@ static int pci_pm_suspend(struct device *dev) goto Fixup; } - pci_dev->state_saved = false; if (pm->suspend) { pci_power_t prev = pci_dev->current_state; int error; @@ -775,7 +774,6 @@ static int pci_pm_freeze(struct device *dev) return 0; } - pci_dev->state_saved = false; if (pm->freeze) { int error; @@ -864,7 +862,6 @@ static int pci_pm_poweroff(struct device *dev) goto Fixup; } - pci_dev->state_saved = false; if (pm->poweroff) { int error; @@ -990,7 +987,6 @@ static int pci_pm_runtime_suspend(struct device *dev) if (!pm || !pm->runtime_suspend) return -ENOSYS; - pci_dev->state_saved = false; pci_dev->no_d3cold = false; error = pm->runtime_suspend(dev); suspend_report_result(pm->runtime_suspend, error); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index d1b4e00..5cb5820 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -651,11 +651,15 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state) error = platform_pci_set_power_state(dev, state); if (!error) pci_update_current_state(dev, state); - } else + /* Fall back to PCI_D0 if native PM is not supported */ + if (!dev->pm_cap) + dev->current_state = PCI_D0; + } else { error = -ENODEV; - - if (error && !dev->pm_cap) /* Fall back to PCI_D0 */ - dev->current_state = PCI_D0; + /* Fall back to PCI_D0 if native PM is not supported */ + if (!dev->pm_cap) + dev->current_state = PCI_D0; + } return error; } diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index ed4d094..08c243a 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -185,6 +185,14 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = { #endif /* !PM */ /* + * PCIe port runtime suspend is broken for some chipsets, so use a + * black list to disable runtime PM for these chipsets. + */ +static const struct pci_device_id port_runtime_pm_black_list[] = { + { /* end: all zeroes */ } +}; + +/* * pcie_portdrv_probe - Probe PCI-Express port devices * @dev: PCI-Express port device being probed * @@ -217,11 +225,16 @@ static int pcie_portdrv_probe(struct pci_dev *dev, * it by default. */ dev->d3cold_allowed = false; + if (!pci_match_id(port_runtime_pm_black_list, dev)) + pm_runtime_put_noidle(&dev->dev); + return 0; } static void pcie_portdrv_remove(struct pci_dev *dev) { + if (!pci_match_id(port_runtime_pm_black_list, dev)) + pm_runtime_get_noresume(&dev->dev); pcie_port_device_remove(dev); pci_disable_device(dev); } diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c index d98a086..75806be 100644 --- a/drivers/pcmcia/vrc4171_card.c +++ b/drivers/pcmcia/vrc4171_card.c @@ -246,7 +246,6 @@ static int pccard_init(struct pcmcia_socket *sock) socket = &vrc4171_sockets[slot]; socket->csc_irq = search_nonuse_irq(); socket->io_irq = search_nonuse_irq(); - spin_lock_init(&socket->lock); return 0; } diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 684ce75..afed701 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -1204,9 +1204,6 @@ static acpi_status WMID_set_capabilities(void) devices = *((u32 *) obj->buffer.pointer); } else if (obj->type == ACPI_TYPE_INTEGER) { devices = (u32) obj->integer.value; - } else { - kfree(out.pointer); - return AE_ERROR; } } else { kfree(out.pointer); diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c index b96766b..2264331 100644 --- a/drivers/platform/x86/msi-wmi.c +++ b/drivers/platform/x86/msi-wmi.c @@ -176,7 +176,7 @@ static void msi_wmi_notify(u32 value, void *context) pr_debug("Suppressed key event 0x%X - " "Last press was %lld us ago\n", key->code, ktime_to_us(diff)); - goto msi_wmi_notify_exit; + return; } last_pressed[key->code - SCANCODE_BASE] = cur; @@ -195,8 +195,6 @@ static void msi_wmi_notify(u32 value, void *context) pr_info("Unknown key pressed - %x\n", eventcode); } else pr_info("Unknown event received\n"); - -msi_wmi_notify_exit: kfree(response.pointer); } diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index 0fe987f..b8ad71f 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -1534,7 +1534,7 @@ static int sony_nc_rfkill_set(void *data, bool blocked) int argument = sony_rfkill_address[(long) data] + 0x100; if (!blocked) - argument |= 0x070000; + argument |= 0x030000; return sony_call_snc_handle(sony_rfkill_handle, argument, &result); } diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c index 056222e..20e2a7d 100644 --- a/drivers/power/ab8500_btemp.c +++ b/drivers/power/ab8500_btemp.c @@ -1123,7 +1123,7 @@ static void __exit ab8500_btemp_exit(void) platform_driver_unregister(&ab8500_btemp_driver); } -device_initcall(ab8500_btemp_init); +subsys_initcall_sync(ab8500_btemp_init); module_exit(ab8500_btemp_exit); MODULE_LICENSE("GPL v2"); diff --git a/drivers/power/abx500_chargalg.c b/drivers/power/abx500_chargalg.c index eb7b4a6..2970891 100644 --- a/drivers/power/abx500_chargalg.c +++ b/drivers/power/abx500_chargalg.c @@ -1698,7 +1698,7 @@ static ssize_t abx500_chargalg_sysfs_charger(struct kobject *kobj, static struct attribute abx500_chargalg_en_charger = \ { .name = "chargalg", - .mode = S_IWUSR, + .mode = S_IWUGO, }; static struct attribute *abx500_chargalg_chg[] = { diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c index 7087d0d..36b34ef 100644 --- a/drivers/power/bq27x00_battery.c +++ b/drivers/power/bq27x00_battery.c @@ -448,6 +448,7 @@ static void bq27x00_update(struct bq27x00_device_info *di) cache.temperature = bq27x00_battery_read_temperature(di); if (!is_bq27425) cache.cycle_count = bq27x00_battery_read_cyct(di); + cache.cycle_count = bq27x00_battery_read_cyct(di); cache.power_avg = bq27x00_battery_read_pwr_avg(di, BQ27x00_POWER_AVG); @@ -695,6 +696,7 @@ static int bq27x00_powersupply_init(struct bq27x00_device_info *di) int ret; di->bat.type = POWER_SUPPLY_TYPE_BATTERY; + di->chip = BQ27425; if (di->chip == BQ27425) { di->bat.properties = bq27425_battery_props; di->bat.num_properties = ARRAY_SIZE(bq27425_battery_props); diff --git a/drivers/pps/clients/pps-ldisc.c b/drivers/pps/clients/pps-ldisc.c index 60cee9e..79451f2 100644 --- a/drivers/pps/clients/pps-ldisc.c +++ b/drivers/pps/clients/pps-ldisc.c @@ -31,7 +31,7 @@ static void pps_tty_dcd_change(struct tty_struct *tty, unsigned int status, struct pps_event_time *ts) { - struct pps_device *pps = pps_lookup_dev(tty); + struct pps_device *pps = (struct pps_device *)tty->disc_data; BUG_ON(pps == NULL); @@ -67,9 +67,9 @@ static int pps_tty_open(struct tty_struct *tty) pr_err("cannot register PPS source \"%s\"\n", info.path); return -ENOMEM; } - pps->lookup_cookie = tty; + tty->disc_data = pps; - /* Now open the base class N_TTY ldisc */ + /* Should open N_TTY ldisc too */ ret = alias_n_tty_open(tty); if (ret < 0) { pr_err("cannot open tty ldisc \"%s\"\n", info.path); @@ -81,6 +81,7 @@ static int pps_tty_open(struct tty_struct *tty) return 0; err_unregister: + tty->disc_data = NULL; pps_unregister_source(pps); return ret; } @@ -89,10 +90,11 @@ static void (*alias_n_tty_close)(struct tty_struct *tty); static void pps_tty_close(struct tty_struct *tty) { - struct pps_device *pps = pps_lookup_dev(tty); + struct pps_device *pps = (struct pps_device *)tty->disc_data; alias_n_tty_close(tty); + tty->disc_data = NULL; dev_info(pps->dev, "removed\n"); pps_unregister_source(pps); } diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c index 6437703..2420d5a 100644 --- a/drivers/pps/pps.c +++ b/drivers/pps/pps.c @@ -247,15 +247,12 @@ static int pps_cdev_open(struct inode *inode, struct file *file) struct pps_device *pps = container_of(inode->i_cdev, struct pps_device, cdev); file->private_data = pps; - kobject_get(&pps->dev->kobj); + return 0; } static int pps_cdev_release(struct inode *inode, struct file *file) { - struct pps_device *pps = container_of(inode->i_cdev, - struct pps_device, cdev); - kobject_put(&pps->dev->kobj); return 0; } @@ -277,10 +274,8 @@ static void pps_device_destruct(struct device *dev) { struct pps_device *pps = dev_get_drvdata(dev); - cdev_del(&pps->cdev); - - /* Now we can release the ID for re-use */ - pr_debug("deallocating pps%d\n", pps->id); + /* release id here to protect others from using it while it's + * still in use */ mutex_lock(&pps_idr_lock); idr_remove(&pps_idr, pps->id); mutex_unlock(&pps_idr_lock); @@ -337,7 +332,6 @@ int pps_register_cdev(struct pps_device *pps) goto del_cdev; } - /* Override the release function with our own */ pps->dev->release = pps_device_destruct; pr_debug("source %s got cdev (%d:%d)\n", pps->info.name, @@ -358,44 +352,11 @@ free_idr: void pps_unregister_cdev(struct pps_device *pps) { - pr_debug("unregistering pps%d\n", pps->id); - pps->lookup_cookie = NULL; device_destroy(pps_class, pps->dev->devt); + cdev_del(&pps->cdev); } /* - * Look up a pps device by magic cookie. - * The cookie is usually a pointer to some enclosing device, but this - * code doesn't care; you should never be dereferencing it. - * - * This is a bit of a kludge that is currently used only by the PPS - * serial line discipline. It may need to be tweaked when a second user - * is found. - * - * There is no function interface for setting the lookup_cookie field. - * It's initialized to NULL when the pps device is created, and if a - * client wants to use it, just fill it in afterward. - * - * The cookie is automatically set to NULL in pps_unregister_source() - * so that it will not be used again, even if the pps device cannot - * be removed from the idr due to pending references holding the minor - * number in use. - */ -struct pps_device *pps_lookup_dev(void const *cookie) -{ - struct pps_device *pps; - unsigned id; - - rcu_read_lock(); - idr_for_each_entry(&pps_idr, pps, id) - if (cookie == pps->lookup_cookie) - break; - rcu_read_unlock(); - return pps; -} -EXPORT_SYMBOL(pps_lookup_dev); - -/* * Module stuff */ diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c index 0c644e7..83b21d9 100644 --- a/drivers/pwm/pwm-spear.c +++ b/drivers/pwm/pwm-spear.c @@ -143,7 +143,7 @@ static int spear_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) u32 val; rc = clk_enable(pc->clk); - if (rc) + if (!rc) return rc; val = spear_pwm_readl(pc, pwm->hwpwm, PWMCR); @@ -209,12 +209,12 @@ static int spear_pwm_probe(struct platform_device *pdev) pc->chip.npwm = NUM_PWM; ret = clk_prepare(pc->clk); - if (ret) + if (!ret) return ret; if (of_device_is_compatible(np, "st,spear1340-pwm")) { ret = clk_enable(pc->clk); - if (ret) { + if (!ret) { clk_unprepare(pc->clk); return ret; } diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 5a0f54a..2785843 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -200,8 +200,8 @@ static int regulator_check_consumers(struct regulator_dev *rdev, } if (*min_uV > *max_uV) { - rdev_err(rdev, "Restricting voltage, %u-%uuV\n", - *min_uV, *max_uV); + dev_err(regulator->dev, "Restricting voltage, %u-%uuV\n", + regulator->min_uV, regulator->max_uV); return -EINVAL; } diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index a936efb..96ce101 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -5,7 +5,7 @@ config REMOTEPROC tristate depends on EXPERIMENTAL depends on HAS_DMA - select FW_LOADER + select FW_CONFIG select VIRTIO config OMAP_REMOTEPROC diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 752b507..dd3bfaf 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -370,12 +370,10 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, /* it is now safe to add the virtio device */ ret = rproc_add_virtio_dev(rvdev, rsc->id); if (ret) - goto remove_rvdev; + goto free_rvdev; return 0; -remove_rvdev: - list_del(&rvdev->node); free_rvdev: kfree(rvdev); return ret; diff --git a/drivers/remoteproc/ste_modem_rproc.c b/drivers/remoteproc/ste_modem_rproc.c index fb95c42..a7743c0 100644 --- a/drivers/remoteproc/ste_modem_rproc.c +++ b/drivers/remoteproc/ste_modem_rproc.c @@ -240,8 +240,6 @@ static int sproc_drv_remove(struct platform_device *pdev) /* Unregister as remoteproc device */ rproc_del(sproc->rproc); - dma_free_coherent(sproc->rproc->dev.parent, SPROC_FW_SIZE, - sproc->fw_addr, sproc->fw_dma_addr); rproc_put(sproc->rproc); mdev->drv_data = NULL; @@ -299,13 +297,10 @@ static int sproc_probe(struct platform_device *pdev) /* Register as a remoteproc device */ err = rproc_add(rproc); if (err) - goto free_mem; + goto free_rproc; return 0; -free_mem: - dma_free_coherent(rproc->dev.parent, SPROC_FW_SIZE, - sproc->fw_addr, sproc->fw_dma_addr); free_rproc: /* Reset device data upon error */ mdev->drv_data = NULL; diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 1c77423..16630aa 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -805,8 +805,9 @@ static int cmos_suspend(struct device *dev) mask = RTC_IRQMASK; tmp &= ~mask; CMOS_WRITE(tmp, RTC_CONTROL); - hpet_mask_rtc_irq_bit(mask); + /* shut down hpet emulation - we don't need it for alarm */ + hpet_mask_rtc_irq_bit(RTC_PIE|RTC_AIE|RTC_UIE); cmos_checkintr(cmos, tmp); } spin_unlock_irq(&rtc_lock); @@ -871,7 +872,6 @@ static int cmos_resume(struct device *dev) rtc_update_irq(cmos->rtc, 1, mask); tmp &= ~RTC_AIE; hpet_mask_rtc_irq_bit(RTC_AIE); - hpet_rtc_timer_init(); } while (mask & RTC_AIE); spin_unlock_irq(&rtc_lock); } diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c index 8f87fec..57233c8 100644 --- a/drivers/rtc/rtc-mv.c +++ b/drivers/rtc/rtc-mv.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include @@ -42,7 +41,6 @@ struct rtc_plat_data { struct rtc_device *rtc; void __iomem *ioaddr; int irq; - struct clk *clk; }; static int mv_rtc_set_time(struct device *dev, struct rtc_time *tm) @@ -223,7 +221,6 @@ static int mv_rtc_probe(struct platform_device *pdev) struct rtc_plat_data *pdata; resource_size_t size; u32 rtc_time; - int ret = 0; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) @@ -242,17 +239,11 @@ static int mv_rtc_probe(struct platform_device *pdev) if (!pdata->ioaddr) return -ENOMEM; - pdata->clk = devm_clk_get(&pdev->dev, NULL); - /* Not all SoCs require a clock.*/ - if (!IS_ERR(pdata->clk)) - clk_prepare_enable(pdata->clk); - /* make sure the 24 hours mode is enabled */ rtc_time = readl(pdata->ioaddr + RTC_TIME_REG_OFFS); if (rtc_time & RTC_HOURS_12H_MODE) { dev_err(&pdev->dev, "24 Hours mode not supported.\n"); - ret = -EINVAL; - goto out; + return -EINVAL; } /* make sure it is actually functional */ @@ -261,8 +252,7 @@ static int mv_rtc_probe(struct platform_device *pdev) rtc_time = readl(pdata->ioaddr + RTC_TIME_REG_OFFS); if (rtc_time == 0x01000000) { dev_err(&pdev->dev, "internal RTC not ticking\n"); - ret = -ENODEV; - goto out; + return -ENODEV; } } @@ -278,10 +268,8 @@ static int mv_rtc_probe(struct platform_device *pdev) } else pdata->rtc = rtc_device_register(pdev->name, &pdev->dev, &mv_rtc_ops, THIS_MODULE); - if (IS_ERR(pdata->rtc)) { - ret = PTR_ERR(pdata->rtc); - goto out; - } + if (IS_ERR(pdata->rtc)) + return PTR_ERR(pdata->rtc); if (pdata->irq >= 0) { writel(0, pdata->ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS); @@ -294,11 +282,6 @@ static int mv_rtc_probe(struct platform_device *pdev) } return 0; -out: - if (!IS_ERR(pdata->clk)) - clk_disable_unprepare(pdata->clk); - - return ret; } static int __exit mv_rtc_remove(struct platform_device *pdev) @@ -309,9 +292,6 @@ static int __exit mv_rtc_remove(struct platform_device *pdev) device_init_wakeup(&pdev->dev, 0); rtc_device_unregister(pdata->rtc); - if (!IS_ERR(pdata->clk)) - clk_disable_unprepare(pdata->clk); - return 0; } diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index 56dcd7c..c44d13f 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -567,8 +567,6 @@ static void __init sclp_add_standby_memory(void) add_memory_merged(0); } -#define MEM_SCT_SIZE (1UL << SECTION_SIZE_BITS) - static void __init insert_increment(u16 rn, int standby, int assigned) { struct memory_increment *incr, *new_incr; @@ -581,7 +579,7 @@ static void __init insert_increment(u16 rn, int standby, int assigned) new_incr->rn = rn; new_incr->standby = standby; if (!standby) - new_incr->usecount = rzm > MEM_SCT_SIZE ? rzm/MEM_SCT_SIZE : 1; + new_incr->usecount = 1; last_rn = 0; prev = &sclp_mem_list; list_for_each_entry(incr, &sclp_mem_list, list) { diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index 03a15e0..8491111 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c @@ -422,26 +422,6 @@ static void kvm_extint_handler(struct ext_code ext_code, } /* - * For s390-virtio, we expect a page above main storage containing - * the virtio configuration. Try to actually load from this area - * in order to figure out if the host provides this page. - */ -static int __init test_devices_support(unsigned long addr) -{ - int ret = -EIO; - - asm volatile( - "0: lura 0,%1\n" - "1: xgr %0,%0\n" - "2:\n" - EX_TABLE(0b,2b) - EX_TABLE(1b,2b) - : "+d" (ret) - : "a" (addr) - : "0", "cc"); - return ret; -} -/* * Init function for virtio * devices are in a single page above top of "normal" mem */ @@ -452,23 +432,21 @@ static int __init kvm_devices_init(void) if (!MACHINE_IS_KVM) return -ENODEV; - if (test_devices_support(real_memory_size) < 0) - return -ENODEV; - - rc = vmem_add_mapping(real_memory_size, PAGE_SIZE); - if (rc) - return rc; - - kvm_devices = (void *) real_memory_size; - kvm_root = root_device_register("kvm_s390"); if (IS_ERR(kvm_root)) { rc = PTR_ERR(kvm_root); printk(KERN_ERR "Could not register kvm_s390 root device"); - vmem_remove_mapping(real_memory_size, PAGE_SIZE); return rc; } + rc = vmem_add_mapping(real_memory_size, PAGE_SIZE); + if (rc) { + root_device_unregister(kvm_root); + return rc; + } + + kvm_devices = (void *) real_memory_size; + INIT_WORK(&hotplug_work, hotplug_devices); service_subclass_irq_register(); diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index fed486bf..865c64f 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c @@ -3747,13 +3747,13 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb, dcb->max_command = 1; dcb->target_id = target; dcb->target_lun = lun; - dcb->dev_mode = eeprom->target[target].cfg0; #ifndef DC395x_NO_DISCONNECT dcb->identify_msg = IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun); #else dcb->identify_msg = IDENTIFY(0, lun); #endif + dcb->dev_mode = eeprom->target[target].cfg0; dcb->inquiry7 = 0; dcb->sync_mode = 0; dcb->min_nego_period = clock_period[period_index]; diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 61c1d2a..666b7ac 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1272,7 +1272,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu) struct sk_buff *skb; #ifdef CONFIG_SMP struct fcoe_percpu_s *p0; - unsigned targ_cpu = get_cpu_light(); + unsigned targ_cpu = get_cpu(); #endif /* CONFIG_SMP */ FCOE_DBG("Destroying receive thread for CPU %d\n", cpu); @@ -1328,7 +1328,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu) kfree_skb(skb); spin_unlock_bh(&p->fcoe_rx_list.lock); } - put_cpu_light(); + put_cpu(); #else /* * This a non-SMP scenario where the singular Rx thread is @@ -1546,11 +1546,11 @@ err2: static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen) { struct fcoe_percpu_s *fps; - int rc, cpu = get_cpu_light(); + int rc; - fps = &per_cpu(fcoe_percpu, cpu); + fps = &get_cpu_var(fcoe_percpu); rc = fcoe_get_paged_crc_eof(skb, tlen, fps); - put_cpu_light(); + put_cpu_var(fcoe_percpu); return rc; } @@ -1745,11 +1745,11 @@ static inline int fcoe_filter_frames(struct fc_lport *lport, return 0; } - stats = per_cpu_ptr(lport->stats, get_cpu_light()); + stats = per_cpu_ptr(lport->stats, get_cpu()); stats->InvalidCRCCount++; if (stats->InvalidCRCCount < 5) printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); - put_cpu_light(); + put_cpu(); return -EINVAL; } @@ -1825,13 +1825,13 @@ static void fcoe_recv_frame(struct sk_buff *skb) goto drop; if (!fcoe_filter_frames(lport, fp)) { - put_cpu_light(); + put_cpu(); fc_exch_recv(lport, fp); return; } drop: stats->ErrorFrames++; - put_cpu_light(); + put_cpu(); kfree_skb(skb); } diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 69d36cc..4a909d7 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -792,7 +792,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) INIT_LIST_HEAD(&del_list); - stats = per_cpu_ptr(fip->lp->stats, get_cpu_light()); + stats = per_cpu_ptr(fip->lp->stats, get_cpu()); list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; @@ -828,7 +828,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) sel_time = fcf->time; } } - put_cpu_light(); + put_cpu(); list_for_each_entry_safe(fcf, next, &del_list, list) { /* Removes fcf from current list */ diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 67ca13a..c772d8d 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -730,10 +730,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, } memset(ep, 0, sizeof(*ep)); - cpu = get_cpu_light(); + cpu = get_cpu(); pool = per_cpu_ptr(mp->pool, cpu); spin_lock_bh(&pool->lock); - put_cpu_light(); + put_cpu(); /* peek cache of free slot */ if (pool->left != FC_XID_UNKNOWN) { diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 1924d8b..aec2e0d 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -235,17 +235,6 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) linkrate = phy->linkrate; memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); - /* Handle vacant phy - rest of dr data is not valid so skip it */ - if (phy->phy_state == PHY_VACANT) { - memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); - phy->attached_dev_type = NO_DEVICE; - if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) { - phy->phy_id = phy_id; - goto skip; - } else - goto out; - } - phy->attached_dev_type = to_dev_type(dr); if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) goto out; @@ -283,7 +272,6 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) phy->phy->maximum_linkrate = dr->pmax_linkrate; phy->phy->negotiated_linkrate = phy->linkrate; - skip: if (new_phy) if (sas_phy_add(phy->phy)) { sas_phy_free(phy->phy); diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 41192aa..c0462c0 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -36,12 +36,12 @@ qla2x00_poll(struct rsp_que *rsp) { unsigned long flags; struct qla_hw_data *ha = rsp->hw; - local_irq_save_nort(flags); + local_irq_save(flags); if (IS_QLA82XX(ha)) qla82xx_poll(0, rsp); else ha->isp_ops->intr_handler(0, rsp); - local_irq_restore_nort(flags); + local_irq_restore(flags); } static inline uint8_t * diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 9f4e560..0144078 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -467,7 +467,6 @@ static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl, if (!bounce_sgl) return NULL; - sg_init_table(bounce_sgl, num_pages); for (i = 0; i < num_pages; i++) { page_buf = alloc_page(GFP_ATOMIC); if (!page_buf) diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c index 41e21bf..cb3a310 100644 --- a/drivers/spi/spi-mpc512x-psc.c +++ b/drivers/spi/spi-mpc512x-psc.c @@ -164,7 +164,7 @@ static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi, for (i = count; i > 0; i--) { data = tx_buf ? *tx_buf++ : 0; - if (len == EOFBYTE && t->cs_change) + if (len == EOFBYTE) setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF); out_8(&fifo->txdata_8, data); len--; diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 56c4166..b610f52 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -285,12 +285,8 @@ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) timeout = jiffies + msecs_to_jiffies(1000); while (!(__raw_readl(reg) & bit)) { - if (time_after(jiffies, timeout)) { - if (!(__raw_readl(reg) & bit)) - return -ETIMEDOUT; - else - return 0; - } + if (time_after(jiffies, timeout)) + return -1; cpu_relax(); } return 0; diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 6796a25..ad93231 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -997,30 +997,25 @@ static irqreturn_t s3c64xx_spi_irq(int irq, void *data) { struct s3c64xx_spi_driver_data *sdd = data; struct spi_master *spi = sdd->master; - unsigned int val, clr = 0; + unsigned int val; - val = readl(sdd->regs + S3C64XX_SPI_STATUS); + val = readl(sdd->regs + S3C64XX_SPI_PENDING_CLR); - if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) { - clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR; + val &= S3C64XX_SPI_PND_RX_OVERRUN_CLR | + S3C64XX_SPI_PND_RX_UNDERRUN_CLR | + S3C64XX_SPI_PND_TX_OVERRUN_CLR | + S3C64XX_SPI_PND_TX_UNDERRUN_CLR; + + writel(val, sdd->regs + S3C64XX_SPI_PENDING_CLR); + + if (val & S3C64XX_SPI_PND_RX_OVERRUN_CLR) dev_err(&spi->dev, "RX overrun\n"); - } - if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) { - clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR; + if (val & S3C64XX_SPI_PND_RX_UNDERRUN_CLR) dev_err(&spi->dev, "RX underrun\n"); - } - if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) { - clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR; + if (val & S3C64XX_SPI_PND_TX_OVERRUN_CLR) dev_err(&spi->dev, "TX overrun\n"); - } - if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) { - clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR; + if (val & S3C64XX_SPI_PND_TX_UNDERRUN_CLR) dev_err(&spi->dev, "TX underrun\n"); - } - - /* Clear the pending irq by setting and then clearing it */ - writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR); - writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR); return IRQ_HANDLED; } @@ -1044,13 +1039,9 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) writel(0, regs + S3C64XX_SPI_MODE_CFG); writel(0, regs + S3C64XX_SPI_PACKET_CNT); - /* Clear any irq pending bits, should set and clear the bits */ - val = S3C64XX_SPI_PND_RX_OVERRUN_CLR | - S3C64XX_SPI_PND_RX_UNDERRUN_CLR | - S3C64XX_SPI_PND_TX_OVERRUN_CLR | - S3C64XX_SPI_PND_TX_UNDERRUN_CLR; - writel(val, regs + S3C64XX_SPI_PENDING_CLR); - writel(0, regs + S3C64XX_SPI_PENDING_CLR); + /* Clear any irq pending bits */ + writel(readl(regs + S3C64XX_SPI_PENDING_CLR), + regs + S3C64XX_SPI_PENDING_CLR); writel(0, regs + S3C64XX_SPI_SWAP_CFG); diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c index bc75528..a43415a 100644 --- a/drivers/ssb/driver_chipcommon_pmu.c +++ b/drivers/ssb/driver_chipcommon_pmu.c @@ -675,32 +675,3 @@ u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc) return 0; } } - -void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid) -{ - u32 pmu_ctl = 0; - - switch (cc->dev->bus->chip_id) { - case 0x4322: - ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL0, 0x11100070); - ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL1, 0x1014140a); - ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL5, 0x88888854); - if (spuravoid == 1) - ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05201828); - else - ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05001828); - pmu_ctl = SSB_CHIPCO_PMU_CTL_PLL_UPD; - break; - case 43222: - /* TODO: BCM43222 requires updating PLLs too */ - return; - default: - ssb_printk(KERN_ERR PFX - "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n", - cc->dev->bus->chip_id); - return; - } - - chipco_set32(cc, SSB_CHIPCO_PMU_CTL, pmu_ctl); -} -EXPORT_SYMBOL_GPL(ssb_pmu_spuravoid_pllupdate); diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index 6894b3e..9b038e4 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -1547,11 +1547,6 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd, /* Device config is special, because it must work on * an unconfigured device. */ if (cmd == COMEDI_DEVCONFIG) { - if (minor >= COMEDI_NUM_BOARD_MINORS) { - /* Device config not appropriate on non-board minors. */ - rc = -ENOTTY; - goto done; - } rc = do_devconfig_ioctl(dev, (struct comedi_devconfig __user *)arg); if (rc == 0) @@ -1779,7 +1774,7 @@ static unsigned int comedi_poll(struct file *file, poll_table *wait) mask = 0; read_subdev = comedi_get_read_subdevice(dev_file_info); - if (read_subdev && read_subdev->async) { + if (read_subdev) { poll_wait(file, &read_subdev->async->wait_head, wait); if (!read_subdev->busy || comedi_buf_read_n_available(read_subdev->async) > 0 @@ -1789,7 +1784,7 @@ static unsigned int comedi_poll(struct file *file, poll_table *wait) } } write_subdev = comedi_get_write_subdevice(dev_file_info); - if (write_subdev && write_subdev->async) { + if (write_subdev) { poll_wait(file, &write_subdev->async->wait_head, wait); comedi_buf_write_alloc(write_subdev->async, write_subdev->async->prealloc_bufsz); @@ -1831,7 +1826,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf, } s = comedi_get_write_subdevice(dev_file_info); - if (s == NULL || s->async == NULL) { + if (s == NULL) { retval = -EIO; goto done; } @@ -1942,7 +1937,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes, } s = comedi_get_read_subdevice(dev_file_info); - if (s == NULL || s->async == NULL) { + if (s == NULL) { retval = -EIO; goto done; } diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c index 3e7f961..1767998 100644 --- a/drivers/staging/comedi/drivers/dt9812.c +++ b/drivers/staging/comedi/drivers/dt9812.c @@ -948,13 +948,12 @@ static int dt9812_di_rinsn(struct comedi_device *dev, unsigned int *data) { struct comedi_dt9812 *devpriv = dev->private; - unsigned int channel = CR_CHAN(insn->chanspec); int n; u8 bits = 0; dt9812_digital_in(devpriv->slot, &bits); for (n = 0; n < insn->n; n++) - data[n] = ((1 << channel) & bits) != 0; + data[n] = ((1 << insn->chanspec) & bits) != 0; return n; } @@ -963,13 +962,12 @@ static int dt9812_do_winsn(struct comedi_device *dev, unsigned int *data) { struct comedi_dt9812 *devpriv = dev->private; - unsigned int channel = CR_CHAN(insn->chanspec); int n; u8 bits = 0; dt9812_digital_out_shadow(devpriv->slot, &bits); for (n = 0; n < insn->n; n++) { - u8 mask = 1 << channel; + u8 mask = 1 << insn->chanspec; bits &= ~mask; if (data[n]) @@ -984,13 +982,13 @@ static int dt9812_ai_rinsn(struct comedi_device *dev, unsigned int *data) { struct comedi_dt9812 *devpriv = dev->private; - unsigned int channel = CR_CHAN(insn->chanspec); int n; for (n = 0; n < insn->n; n++) { u16 value = 0; - dt9812_analog_in(devpriv->slot, channel, &value, DT9812_GAIN_1); + dt9812_analog_in(devpriv->slot, insn->chanspec, &value, + DT9812_GAIN_1); data[n] = value; } return n; @@ -1001,13 +999,12 @@ static int dt9812_ao_rinsn(struct comedi_device *dev, unsigned int *data) { struct comedi_dt9812 *devpriv = dev->private; - unsigned int channel = CR_CHAN(insn->chanspec); int n; u16 value; for (n = 0; n < insn->n; n++) { value = 0; - dt9812_analog_out_shadow(devpriv->slot, channel, &value); + dt9812_analog_out_shadow(devpriv->slot, insn->chanspec, &value); data[n] = value; } return n; @@ -1018,11 +1015,10 @@ static int dt9812_ao_winsn(struct comedi_device *dev, unsigned int *data) { struct comedi_dt9812 *devpriv = dev->private; - unsigned int channel = CR_CHAN(insn->chanspec); int n; for (n = 0; n < insn->n; n++) - dt9812_analog_out(devpriv->slot, channel, data[n]); + dt9812_analog_out(devpriv->slot, insn->chanspec, data[n]); return n; } diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c index d999053..d29c4d7 100644 --- a/drivers/staging/comedi/drivers/ni_labpc.c +++ b/drivers/staging/comedi/drivers/ni_labpc.c @@ -1202,8 +1202,7 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) else channel = CR_CHAN(cmd->chanlist[0]); /* munge channel bits for differential / scan disabled mode */ - if ((mode == MODE_SINGLE_CHAN || mode == MODE_SINGLE_CHAN_INTERVAL) && - aref == AREF_DIFF) + if (mode != MODE_SINGLE_CHAN && aref == AREF_DIFF) channel *= 2; devpriv->command1_bits |= ADC_CHAN_BITS(channel); devpriv->command1_bits |= thisboard->ai_range_code[range]; @@ -1218,6 +1217,21 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) devpriv->write_byte(devpriv->command1_bits, dev->iobase + COMMAND1_REG); } + /* setup any external triggering/pacing (command4 register) */ + devpriv->command4_bits = 0; + if (cmd->convert_src != TRIG_EXT) + devpriv->command4_bits |= EXT_CONVERT_DISABLE_BIT; + /* XXX should discard first scan when using interval scanning + * since manual says it is not synced with scan clock */ + if (labpc_use_continuous_mode(cmd, mode) == 0) { + devpriv->command4_bits |= INTERVAL_SCAN_EN_BIT; + if (cmd->scan_begin_src == TRIG_EXT) + devpriv->command4_bits |= EXT_SCAN_EN_BIT; + } + /* single-ended/differential */ + if (aref == AREF_DIFF) + devpriv->command4_bits |= ADC_DIFF_BIT; + devpriv->write_byte(devpriv->command4_bits, dev->iobase + COMMAND4_REG); devpriv->write_byte(cmd->chanlist_len, dev->iobase + INTERVAL_COUNT_REG); @@ -1297,22 +1311,6 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) devpriv->command3_bits &= ~ADC_FNE_INTR_EN_BIT; devpriv->write_byte(devpriv->command3_bits, dev->iobase + COMMAND3_REG); - /* setup any external triggering/pacing (command4 register) */ - devpriv->command4_bits = 0; - if (cmd->convert_src != TRIG_EXT) - devpriv->command4_bits |= EXT_CONVERT_DISABLE_BIT; - /* XXX should discard first scan when using interval scanning - * since manual says it is not synced with scan clock */ - if (labpc_use_continuous_mode(cmd, mode) == 0) { - devpriv->command4_bits |= INTERVAL_SCAN_EN_BIT; - if (cmd->scan_begin_src == TRIG_EXT) - devpriv->command4_bits |= EXT_SCAN_EN_BIT; - } - /* single-ended/differential */ - if (aref == AREF_DIFF) - devpriv->command4_bits |= ADC_DIFF_BIT; - devpriv->write_byte(devpriv->command4_bits, dev->iobase + COMMAND4_REG); - /* startup acquisition */ /* command2 reg */ diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c index 5b65b52..6dc1d28 100644 --- a/drivers/staging/comedi/drivers/s626.c +++ b/drivers/staging/comedi/drivers/s626.c @@ -1482,7 +1482,7 @@ static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) case TRIG_NONE: /* continous acquisition */ devpriv->ai_continous = 1; - devpriv->ai_sample_count = 1; + devpriv->ai_sample_count = 0; break; } diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c index d409e14..e94f6a1 100644 --- a/drivers/staging/vt6656/dpc.c +++ b/drivers/staging/vt6656/dpc.c @@ -1190,7 +1190,7 @@ static BOOL s_bHandleRxEncryption ( if (byDecMode == KEY_CTL_WEP) { // handle WEP if ((pDevice->byLocalID <= REV_ID_VT3253_A1) || - (((PSKeyTable)(pKey->pvKeyTable))->bSoftWEP == TRUE)) { + (((PSKeyTable)(&pKey->pvKeyTable))->bSoftWEP == TRUE)) { // Software WEP // 1. 3253A // 2. WEP 256 @@ -1299,7 +1299,7 @@ static BOOL s_bHostWepRxEncryption ( // handle WEP DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"byDecMode == KEY_CTL_WEP\n"); if ((pDevice->byLocalID <= REV_ID_VT3253_A1) || - (((PSKeyTable)(pKey->pvKeyTable))->bSoftWEP == TRUE) || + (((PSKeyTable)(&pKey->pvKeyTable))->bSoftWEP == TRUE) || (bOnFly == FALSE)) { // Software WEP // 1. 3253A diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index f726970..f33086d 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -644,6 +644,8 @@ static int vt6656_suspend(struct usb_interface *intf, pm_message_t message) if (device->flags & DEVICE_FLAGS_OPENED) device_close(device->dev); + usb_put_dev(interface_to_usbdev(intf)); + return 0; } @@ -654,6 +656,8 @@ static int vt6656_resume(struct usb_interface *intf) if (!device || !device->dev) return -ENODEV; + usb_get_dev(interface_to_usbdev(intf)); + if (!(device->flags & DEVICE_FLAGS_OPENED)) device_open(device->dev); diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c index 9213d69..83c04e1 100644 --- a/drivers/staging/vt6656/rxtx.c +++ b/drivers/staging/vt6656/rxtx.c @@ -1454,7 +1454,7 @@ s_bPacketToWirelessUsb( pvRrvTime = pMICHDR = pvRTS = pvCTS = pvTxDataHd = NULL; if (bNeedEncryption && pTransmitKey->pvKeyTable) { - if (((PSKeyTable)pTransmitKey->pvKeyTable)->bSoftWEP == TRUE) + if (((PSKeyTable)&pTransmitKey->pvKeyTable)->bSoftWEP == TRUE) bSoftWEP = TRUE; /* WEP 256 */ } diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c index 87815c5..fc68518 100644 --- a/drivers/staging/vt6656/usbpipe.c +++ b/drivers/staging/vt6656/usbpipe.c @@ -165,11 +165,6 @@ int PIPEnsControlOut( if (pDevice->Flags & fMP_CONTROL_WRITES) return STATUS_FAILURE; - if (pDevice->Flags & fMP_CONTROL_READS) - return STATUS_FAILURE; - - MP_SET_FLAG(pDevice, fMP_CONTROL_WRITES); - pDevice->sUsbCtlRequest.bRequestType = 0x40; pDevice->sUsbCtlRequest.bRequest = byRequest; pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue); @@ -184,13 +179,12 @@ int PIPEnsControlOut( ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC); if (ntStatus != 0) { - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO - "control send request submission failed: %d\n", - ntStatus); - MP_CLEAR_FLAG(pDevice, fMP_CONTROL_WRITES); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control send request submission failed: %d\n", ntStatus); return STATUS_FAILURE; } - + else { + MP_SET_FLAG(pDevice, fMP_CONTROL_WRITES); + } spin_unlock_irq(&pDevice->lock); for (ii = 0; ii <= USB_CTL_WAIT; ii ++) { @@ -230,11 +224,6 @@ int PIPEnsControlIn( if (pDevice->Flags & fMP_CONTROL_READS) return STATUS_FAILURE; - if (pDevice->Flags & fMP_CONTROL_WRITES) - return STATUS_FAILURE; - - MP_SET_FLAG(pDevice, fMP_CONTROL_READS); - pDevice->sUsbCtlRequest.bRequestType = 0xC0; pDevice->sUsbCtlRequest.bRequest = byRequest; pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue); @@ -248,11 +237,10 @@ int PIPEnsControlIn( ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC); if (ntStatus != 0) { - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO - "control request submission failed: %d\n", ntStatus); - MP_CLEAR_FLAG(pDevice, fMP_CONTROL_READS); - return STATUS_FAILURE; - } + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control request submission failed: %d\n", ntStatus); + }else { + MP_SET_FLAG(pDevice, fMP_CONTROL_READS); + } spin_unlock_irq(&pDevice->lock); for (ii = 0; ii <= USB_CTL_WAIT; ii ++) { diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index 071e058..f2a73bd 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c @@ -228,12 +228,11 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, return 0; } + user_mem = kmap_atomic(page); if (is_partial_io(bvec)) /* Use a temporary buffer to decompress the page */ - uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); - - user_mem = kmap_atomic(page); - if (!is_partial_io(bvec)) + uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL); + else uncmem = user_mem; if (!uncmem) { @@ -280,7 +279,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, * This is a partial IO. We need to read the full page * before to write the changes. */ - uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); + uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!uncmem) { pr_info("Error allocating temp memory!\n"); ret = -ENOMEM; diff --git a/drivers/staging/zsmalloc/Kconfig b/drivers/staging/zsmalloc/Kconfig index 7fab032..9084565 100644 --- a/drivers/staging/zsmalloc/Kconfig +++ b/drivers/staging/zsmalloc/Kconfig @@ -1,5 +1,5 @@ config ZSMALLOC - bool "Memory allocator for compressed pages" + tristate "Memory allocator for compressed pages" default n help zsmalloc is a slab-based memory allocator designed to store diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c index 851a2ff..09a9d35 100644 --- a/drivers/staging/zsmalloc/zsmalloc-main.c +++ b/drivers/staging/zsmalloc/zsmalloc-main.c @@ -222,9 +222,11 @@ struct zs_pool { /* * By default, zsmalloc uses a copy-based object mapping method to access * allocations that span two pages. However, if a particular architecture - * performs VM mapping faster than copying, then it should be added here - * so that USE_PGTABLE_MAPPING is defined. This causes zsmalloc to use - * page table mapping rather than copying for object mapping. + * 1) Implements local_flush_tlb_kernel_range() and 2) Performs VM mapping + * faster than copying, then it should be added here so that + * USE_PGTABLE_MAPPING is defined. This causes zsmalloc to use page table + * mapping rather than copying + * for object mapping. */ #if defined(CONFIG_ARM) #define USE_PGTABLE_MAPPING @@ -657,8 +659,11 @@ static inline void __zs_unmap_object(struct mapping_area *area, struct page *pages[2], int off, int size) { unsigned long addr = (unsigned long)area->vm_addr; + unsigned long end = addr + (PAGE_SIZE * 2); - unmap_kernel_range(addr, PAGE_SIZE * 2); + flush_cache_vunmap(addr, end); + unmap_kernel_range_noflush(addr, PAGE_SIZE * 2); + local_flush_tlb_kernel_range(addr, end); } #else /* USE_PGTABLE_MAPPING */ diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 42a2bf7..339f97f 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -3570,10 +3570,6 @@ check_rsp_state: spin_lock_bh(&cmd->istate_lock); cmd->i_state = ISTATE_SENT_STATUS; spin_unlock_bh(&cmd->istate_lock); - - if (atomic_read(&conn->check_immediate_queue)) - return 1; - continue; } else if (ret == 2) { /* Still must send status, @@ -3663,7 +3659,7 @@ check_rsp_state: } if (atomic_read(&conn->check_immediate_queue)) - return 1; + break; } return 0; @@ -3707,15 +3703,12 @@ restart: signal_pending(current)) goto transport_err; -get_immediate: ret = handle_immediate_queue(conn); if (ret < 0) goto transport_err; ret = handle_response_queue(conn); - if (ret == 1) - goto get_immediate; - else if (ret == -EAGAIN) + if (ret == -EAGAIN) goto restart; else if (ret < 0) goto transport_err; diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index a0fc7b9..db0cf7c 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -166,7 +166,6 @@ static int chap_server_compute_md5( { char *endptr; unsigned long id; - unsigned char id_as_uchar; unsigned char digest[MD5_SIGNATURE_SIZE]; unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2]; unsigned char identifier[10], *challenge = NULL; @@ -356,9 +355,7 @@ static int chap_server_compute_md5( goto out; } - /* To handle both endiannesses */ - id_as_uchar = id; - sg_init_one(&sg, &id_as_uchar, 1); + sg_init_one(&sg, &id, 1); ret = crypto_hash_update(&desc, &sg, 1); if (ret < 0) { pr_err("crypto_hash_update() failed for id\n"); diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index fea564c..7d4ec02 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -408,7 +408,6 @@ static inline int core_alua_state_standby( case REPORT_LUNS: case RECEIVE_DIAGNOSTIC: case SEND_DIAGNOSTIC: - return 0; case MAINTENANCE_IN: switch (cdb[1] & 0x1f) { case MI_REPORT_TARGET_PGS: @@ -451,7 +450,6 @@ static inline int core_alua_state_unavailable( switch (cdb[0]) { case INQUIRY: case REPORT_LUNS: - return 0; case MAINTENANCE_IN: switch (cdb[1] & 0x1f) { case MI_REPORT_TARGET_PGS: @@ -492,7 +490,6 @@ static inline int core_alua_state_transition( switch (cdb[0]) { case INQUIRY: case REPORT_LUNS: - return 0; case MAINTENANCE_IN: switch (cdb[1] & 0x1f) { case MI_REPORT_TARGET_PGS: diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 96f4981..f2aa754 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -1182,18 +1182,24 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked struct se_lun_acl *core_dev_init_initiator_node_lun_acl( struct se_portal_group *tpg, - struct se_node_acl *nacl, u32 mapped_lun, + char *initiatorname, int *ret) { struct se_lun_acl *lacl; + struct se_node_acl *nacl; - if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) { + if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) { pr_err("%s InitiatorName exceeds maximum size.\n", tpg->se_tpg_tfo->get_fabric_name()); *ret = -EOVERFLOW; return NULL; } + nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname); + if (!nacl) { + *ret = -EINVAL; + return NULL; + } lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); if (!lacl) { pr_err("Unable to allocate memory for struct se_lun_acl.\n"); @@ -1204,8 +1210,7 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl( INIT_LIST_HEAD(&lacl->lacl_list); lacl->mapped_lun = mapped_lun; lacl->se_lun_nacl = nacl; - snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", - nacl->initiatorname); + snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); return lacl; } diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 04c775c..c57bbbc 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -354,17 +354,9 @@ static struct config_group *target_fabric_make_mappedlun( ret = -EINVAL; goto out; } - if (mapped_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { - pr_err("Mapped LUN: %lu exceeds TRANSPORT_MAX_LUNS_PER_TPG" - "-1: %u for Target Portal Group: %u\n", mapped_lun, - TRANSPORT_MAX_LUNS_PER_TPG-1, - se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); - ret = -EINVAL; - goto out; - } - lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl, - mapped_lun, &ret); + lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun, + config_item_name(acl_ci), &ret); if (!lacl) { ret = -EINVAL; goto out; diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index 37ffc5b..bc02b01 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h @@ -7,7 +7,7 @@ #define FD_DEVICE_QUEUE_DEPTH 32 #define FD_MAX_DEVICE_QUEUE_DEPTH 128 #define FD_BLOCKSIZE 512 -#define FD_MAX_SECTORS 2048 +#define FD_MAX_SECTORS 1024 #define RRF_EMULATE_CDB 0x01 #define RRF_GOT_LBA 0x02 diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 396e1eb..93e9c1f 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -45,7 +45,7 @@ struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u3 int core_dev_del_lun(struct se_portal_group *, u32); struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32); struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *, - struct se_node_acl *, u32, int *); + u32, char *, int *); int core_dev_add_initiator_node_lun_acl(struct se_portal_group *, struct se_lun_acl *, u32, u32); int core_dev_del_initiator_node_lun_acl(struct se_portal_group *, diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 55b9530..2bcfd79 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -940,6 +940,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, bio = NULL; } + page++; len -= bytes; data_len -= bytes; off = 0; diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 9169d6a..5192ac0 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -111,10 +111,16 @@ struct se_node_acl *core_tpg_get_initiator_node_acl( struct se_node_acl *acl; spin_lock_irq(&tpg->acl_node_lock); - acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); + list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { + if (!strcmp(acl->initiatorname, initiatorname) && + !acl->dynamic_node_acl) { + spin_unlock_irq(&tpg->acl_node_lock); + return acl; + } + } spin_unlock_irq(&tpg->acl_node_lock); - return acl; + return NULL; } /* core_tpg_add_node_to_devs(): diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index fcf880f..bd587b7 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1136,10 +1136,8 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) return ret; ret = target_check_reservation(cmd); - if (ret) { - cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; + if (ret) return ret; - } ret = dev->transport->parse_cdb(cmd); if (ret) diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c index bfbf9fb..8c8ce80 100644 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c @@ -1807,7 +1807,6 @@ static int __init thermal_init(void) idr_destroy(&thermal_cdev_idr); mutex_destroy(&thermal_idr_lock); mutex_destroy(&thermal_list_lock); - return result; } result = genetlink_init(); return result; diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index bfd6771..dcc0430 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -1689,8 +1689,6 @@ static inline void dlci_put(struct gsm_dlci *dlci) tty_port_put(&dlci->port); } -static void gsm_destroy_network(struct gsm_dlci *dlci); - /** * gsm_dlci_release - release DLCI * @dlci: DLCI to destroy @@ -1704,19 +1702,9 @@ static void gsm_dlci_release(struct gsm_dlci *dlci) { struct tty_struct *tty = tty_port_tty_get(&dlci->port); if (tty) { - mutex_lock(&dlci->mutex); - gsm_destroy_network(dlci); - mutex_unlock(&dlci->mutex); - - /* tty_vhangup needs the tty_lock, so unlock and - relock after doing the hangup. */ - tty_unlock(tty); tty_vhangup(tty); - tty_lock(tty); - tty_port_tty_set(&dlci->port, NULL); tty_kref_put(tty); } - dlci->state = DLCI_CLOSED; dlci_put(dlci); } @@ -2959,8 +2947,6 @@ static void gsmtty_close(struct tty_struct *tty, struct file *filp) if (dlci == NULL) return; - if (dlci->state == DLCI_CLOSED) - return; mutex_lock(&dlci->mutex); gsm_destroy_network(dlci); mutex_unlock(&dlci->mutex); @@ -2979,8 +2965,6 @@ out: static void gsmtty_hangup(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; - if (dlci->state == DLCI_CLOSED) - return; tty_port_hangup(&dlci->port); gsm_dlci_begin_close(dlci); } @@ -2988,12 +2972,9 @@ static void gsmtty_hangup(struct tty_struct *tty) static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf, int len) { - int sent; struct gsm_dlci *dlci = tty->driver_data; - if (dlci->state == DLCI_CLOSED) - return -EINVAL; /* Stuff the bytes into the fifo queue */ - sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock); + int sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock); /* Need to kick the channel */ gsm_dlci_data_kick(dlci); return sent; @@ -3002,24 +2983,18 @@ static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf, static int gsmtty_write_room(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; - if (dlci->state == DLCI_CLOSED) - return -EINVAL; return TX_SIZE - kfifo_len(dlci->fifo); } static int gsmtty_chars_in_buffer(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; - if (dlci->state == DLCI_CLOSED) - return -EINVAL; return kfifo_len(dlci->fifo); } static void gsmtty_flush_buffer(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; - if (dlci->state == DLCI_CLOSED) - return; /* Caution needed: If we implement reliable transport classes then the data being transmitted can't simply be junked once it has first hit the stack. Until then we can just blow it @@ -3038,8 +3013,6 @@ static void gsmtty_wait_until_sent(struct tty_struct *tty, int timeout) static int gsmtty_tiocmget(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; - if (dlci->state == DLCI_CLOSED) - return -EINVAL; return dlci->modem_rx; } @@ -3049,8 +3022,6 @@ static int gsmtty_tiocmset(struct tty_struct *tty, struct gsm_dlci *dlci = tty->driver_data; unsigned int modem_tx = dlci->modem_tx; - if (dlci->state == DLCI_CLOSED) - return -EINVAL; modem_tx &= ~clear; modem_tx |= set; @@ -3069,8 +3040,6 @@ static int gsmtty_ioctl(struct tty_struct *tty, struct gsm_netconfig nc; int index; - if (dlci->state == DLCI_CLOSED) - return -EINVAL; switch (cmd) { case GSMIOC_ENABLE_NET: if (copy_from_user(&nc, (void __user *)arg, sizeof(nc))) @@ -3097,9 +3066,6 @@ static int gsmtty_ioctl(struct tty_struct *tty, static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old) { - struct gsm_dlci *dlci = tty->driver_data; - if (dlci->state == DLCI_CLOSED) - return; /* For the moment its fixed. In actual fact the speed information for the virtual channel can be propogated in both directions by the RPN control message. This however rapidly gets nasty as we @@ -3111,8 +3077,6 @@ static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old) static void gsmtty_throttle(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; - if (dlci->state == DLCI_CLOSED) - return; if (tty->termios.c_cflag & CRTSCTS) dlci->modem_tx &= ~TIOCM_DTR; dlci->throttled = 1; @@ -3123,8 +3087,6 @@ static void gsmtty_throttle(struct tty_struct *tty) static void gsmtty_unthrottle(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; - if (dlci->state == DLCI_CLOSED) - return; if (tty->termios.c_cflag & CRTSCTS) dlci->modem_tx |= TIOCM_DTR; dlci->throttled = 0; @@ -3136,8 +3098,6 @@ static int gsmtty_break_ctl(struct tty_struct *tty, int state) { struct gsm_dlci *dlci = tty->driver_data; int encode = 0; /* Off */ - if (dlci->state == DLCI_CLOSED) - return -EINVAL; if (state == -1) /* "On indefinitely" - we can't encode this properly */ diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index c830b60..79ff3a5 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -47,6 +47,7 @@ static void pty_close(struct tty_struct *tty, struct file *filp) /* Review - krefs on tty_link ?? */ if (!tty->link) return; + tty->link->packet = 0; set_bit(TTY_OTHER_CLOSED, &tty->link->flags); wake_up_interruptible(&tty->link->read_wait); wake_up_interruptible(&tty->link->write_wait); @@ -675,9 +676,6 @@ static int ptmx_open(struct inode *inode, struct file *filp) nonseekable_open(inode, filp); - /* We refuse fsnotify events on ptmx, since it's a shared resource */ - filp->f_mode |= FMODE_NONOTIFY; - retval = tty_alloc_file(filp); if (retval) return retval; diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c index f318c96..f932043 100644 --- a/drivers/tty/serial/8250/8250.c +++ b/drivers/tty/serial/8250/8250.c @@ -38,7 +38,6 @@ #include #include #include -#include #ifdef CONFIG_SPARC #include #endif @@ -81,16 +80,7 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */ #define DEBUG_INTR(fmt...) do { } while (0) #endif -/* - * On -rt we can have a more delays, and legitimately - * so - so don't drop work spuriously and spam the - * syslog: - */ -#ifdef CONFIG_PREEMPT_RT_FULL -# define PASS_LIMIT 1000000 -#else -# define PASS_LIMIT 512 -#endif +#define PASS_LIMIT 512 #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) @@ -318,28 +308,7 @@ static const struct serial8250_config uart_config[] = { }, [PORT_8250_CIR] = { .name = "CIR port" - }, - [PORT_ALTR_16550_F32] = { - .name = "Altera 16550 FIFO32", - .fifo_size = 32, - .tx_loadsz = 32, - .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, - .flags = UART_CAP_FIFO | UART_CAP_AFE, - }, - [PORT_ALTR_16550_F64] = { - .name = "Altera 16550 FIFO64", - .fifo_size = 64, - .tx_loadsz = 64, - .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, - .flags = UART_CAP_FIFO | UART_CAP_AFE, - }, - [PORT_ALTR_16550_F128] = { - .name = "Altera 16550 FIFO128", - .fifo_size = 128, - .tx_loadsz = 128, - .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, - .flags = UART_CAP_FIFO | UART_CAP_AFE, - }, + } }; /* Uart divisor latch read */ @@ -2910,10 +2879,14 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count) touch_nmi_watchdog(); - if (port->sysrq || oops_in_progress || in_kdb_printk()) - locked = spin_trylock_irqsave(&port->lock, flags); - else - spin_lock_irqsave(&port->lock, flags); + local_irq_save(flags); + if (port->sysrq) { + /* serial8250_handle_irq() already took the lock */ + locked = 0; + } else if (oops_in_progress) { + locked = spin_trylock(&port->lock); + } else + spin_lock(&port->lock); /* * First save the IER then disable the interrupts @@ -2945,7 +2918,8 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count) serial8250_modem_status(up); if (locked) - spin_unlock_irqrestore(&port->lock, flags); + spin_unlock(&port->lock); + local_irq_restore(flags); } static int __init serial8250_console_setup(struct console *co, char *options) @@ -3456,32 +3430,3 @@ module_param_array(probe_rsa, ulong, &probe_rsa_count, 0444); MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA"); #endif MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR); - -#ifndef MODULE -/* This module was renamed to 8250_core in 3.7. Keep the old "8250" name - * working as well for the module options so we don't break people. We - * need to keep the names identical and the convenient macros will happily - * refuse to let us do that by failing the build with redefinition errors - * of global variables. So we stick them inside a dummy function to avoid - * those conflicts. The options still get parsed, and the redefined - * MODULE_PARAM_PREFIX lets us keep the "8250." syntax alive. - * - * This is hacky. I'm sorry. - */ -static void __used s8250_options(void) -{ -#undef MODULE_PARAM_PREFIX -#define MODULE_PARAM_PREFIX "8250." - - module_param_cb(share_irqs, ¶m_ops_uint, &share_irqs, 0644); - module_param_cb(nr_uarts, ¶m_ops_uint, &nr_uarts, 0644); - module_param_cb(skip_txen_test, ¶m_ops_uint, &skip_txen_test, 0644); -#ifdef CONFIG_SERIAL_8250_RSA - __module_param_call(MODULE_PARAM_PREFIX, probe_rsa, - ¶m_array_ops, .arr = &__param_arr_probe_rsa, - 0444, -1); -#endif -} -#else -MODULE_ALIAS("8250"); -#endif diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 5cdb092..a27a98e 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -1321,7 +1321,6 @@ pci_wch_ch353_setup(struct serial_private *priv, /* Unknown vendors/cards - this should not be in linux/pci_ids.h */ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584 -#define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588 /* * Master list of serial port init/setup/exit quirks. @@ -1593,6 +1592,15 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { }, { .vendor = PCI_VENDOR_ID_PLX, + .device = PCI_DEVICE_ID_PLX_9050, + .subvendor = PCI_VENDOR_ID_PLX, + .subdevice = PCI_SUBDEVICE_ID_UNKNOWN_0x1584, + .init = pci_plx9050_init, + .setup = pci_default_setup, + .exit = pci_plx9050_exit, + }, + { + .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_ROMULUS, .subvendor = PCI_VENDOR_ID_PLX, .subdevice = PCI_DEVICE_ID_PLX_ROMULUS, @@ -3448,12 +3456,7 @@ static struct pci_device_id serial_pci_tbl[] = { { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX, PCI_SUBDEVICE_ID_UNKNOWN_0x1584, 0, 0, - pbn_b2_4_115200 }, - /* Unknown card - subdevice 0x1588 */ - { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, - PCI_VENDOR_ID_PLX, - PCI_SUBDEVICE_ID_UNKNOWN_0x1588, 0, 0, - pbn_b2_8_115200 }, + pbn_b0_4_115200 }, { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_SUBVENDOR_ID_KEYSPAN, PCI_SUBDEVICE_ID_KEYSPAN_SX2, 0, 0, @@ -4446,10 +4449,6 @@ static struct pci_device_id serial_pci_tbl[] = { PCI_VENDOR_ID_IBM, 0x0299, 0, 0, pbn_b0_bt_2_115200 }, - { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9835, - 0x1000, 0x0012, - 0, 0, pbn_b0_bt_2_115200 }, - { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901, 0xA000, 0x1000, 0, 0, pbn_b0_1_115200 }, diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 02e706e..59c23d0 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -209,14 +209,14 @@ config SERIAL_SAMSUNG config SERIAL_SAMSUNG_UARTS_4 bool depends on PLAT_SAMSUNG - default y if !(CPU_S3C2410 || CPU_S3C2412 || CPU_S3C2440 || CPU_S3C2442) + default y if !(CPU_S3C2410 || SERIAL_S3C2412 || CPU_S3C2440 || CPU_S3C2442) help Internal node for the common case of 4 Samsung compatible UARTs config SERIAL_SAMSUNG_UARTS int depends on PLAT_SAMSUNG - default 6 if CPU_S5P6450 + default 6 if ARCH_S5P6450 default 4 if SERIAL_SAMSUNG_UARTS_4 || CPU_S3C2416 default 3 help diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index d7294f7..7fca402 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -1779,19 +1779,13 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) clk_enable(uap->clk); - /* - * local_irq_save(flags); - * - * This local_irq_save() is nonsense. If we come in via sysrq - * handling then interrupts are already disabled. Aside of - * that the port.sysrq check is racy on SMP regardless. - */ + local_irq_save(flags); if (uap->port.sysrq) locked = 0; else if (oops_in_progress) - locked = spin_trylock_irqsave(&uap->port.lock, flags); + locked = spin_trylock(&uap->port.lock); else - spin_lock_irqsave(&uap->port.lock, flags); + spin_lock(&uap->port.lock); /* * First save the CR then disable the interrupts @@ -1813,7 +1807,8 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) writew(old_cr, uap->port.membase + UART011_CR); if (locked) - spin_unlock_irqrestore(&uap->port.lock, flags); + spin_unlock(&uap->port.lock); + local_irq_restore(flags); clk_disable(uap->clk); } diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 2d2288d..922e85a 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -158,7 +158,7 @@ struct atmel_uart_port { }; static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART]; -static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART); +static unsigned long atmel_ports_in_use; #ifdef SUPPORT_SYSRQ static struct console atmel_console; @@ -1768,14 +1768,15 @@ static int atmel_serial_probe(struct platform_device *pdev) if (ret < 0) /* port id not found in platform data nor device-tree aliases: * auto-enumerate it */ - ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART); + ret = find_first_zero_bit(&atmel_ports_in_use, + sizeof(atmel_ports_in_use)); - if (ret >= ATMEL_MAX_UART) { + if (ret > ATMEL_MAX_UART) { ret = -ENODEV; goto err; } - if (test_and_set_bit(ret, atmel_ports_in_use)) { + if (test_and_set_bit(ret, &atmel_ports_in_use)) { /* port already in use */ ret = -EBUSY; goto err; @@ -1855,7 +1856,7 @@ static int atmel_serial_remove(struct platform_device *pdev) /* "port" is allocated statically, so we shouldn't free it */ - clear_bit(port->line, atmel_ports_in_use); + clear_bit(port->line, &atmel_ports_in_use); clk_put(atmel_port->clk); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 5c110c8..5981912 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -1213,14 +1213,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count) struct imx_port_ucrs old_ucr; unsigned int ucr1; unsigned long flags; - int locked = 1; - if (sport->port.sysrq) - locked = 0; - else if (oops_in_progress) - locked = spin_trylock_irqsave(&sport->port.lock, flags); - else - spin_lock_irqsave(&sport->port.lock, flags); + spin_lock_irqsave(&sport->port.lock, flags); /* * First, save UCR1/2/3 and then disable interrupts @@ -1247,8 +1241,7 @@ imx_console_write(struct console *co, const char *s, unsigned int count) imx_port_ucrs_restore(&sport->port, &old_ucr); - if (locked) - spin_unlock_irqrestore(&sport->port.lock, flags); + spin_unlock_irqrestore(&sport->port.lock, flags); } /* diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c index 3490629..e7cae1c 100644 --- a/drivers/tty/serial/of_serial.c +++ b/drivers/tty/serial/of_serial.c @@ -240,12 +240,6 @@ static struct of_device_id of_platform_serial_table[] = { { .compatible = "ns16850", .data = (void *)PORT_16850, }, { .compatible = "nvidia,tegra20-uart", .data = (void *)PORT_TEGRA, }, { .compatible = "nxp,lpc3220-uart", .data = (void *)PORT_LPC3220, }, - { .compatible = "altr,16550-FIFO32", - .data = (void *)PORT_ALTR_16550_F32, }, - { .compatible = "altr,16550-FIFO64", - .data = (void *)PORT_ALTR_16550_F64, }, - { .compatible = "altr,16550-FIFO128", - .data = (void *)PORT_ALTR_16550_F128, }, #ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL { .compatible = "ibm,qpace-nwp-serial", .data = (void *)PORT_NWPSERIAL, }, diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index f33fa96..57d6b29 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -1166,10 +1166,13 @@ serial_omap_console_write(struct console *co, const char *s, pm_runtime_get_sync(up->dev); - if (up->port.sysrq || oops_in_progress) - locked = spin_trylock_irqsave(&up->port.lock, flags); + local_irq_save(flags); + if (up->port.sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock(&up->port.lock); else - spin_lock_irqsave(&up->port.lock, flags); + spin_lock(&up->port.lock); /* * First save the IER then disable the interrupts @@ -1198,7 +1201,8 @@ serial_omap_console_write(struct console *co, const char *s, pm_runtime_mark_last_busy(up->dev); pm_runtime_put_autosuspend(up->dev); if (locked) - spin_unlock_irqrestore(&up->port.lock, flags); + spin_unlock(&up->port.lock); + local_irq_restore(flags); } static int __init diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 4293a3e..2c7230a 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -1940,8 +1940,6 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) mutex_unlock(&port->mutex); return 0; } - put_device(tty_dev); - if (console_suspend_enabled || !uart_console(uport)) uport->suspended = 1; @@ -2007,11 +2005,9 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) disable_irq_wake(uport->irq); uport->irq_wake = 0; } - put_device(tty_dev); mutex_unlock(&port->mutex); return 0; } - put_device(tty_dev); uport->suspended = 0; /* diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c index 94b0ad7..220da3f 100644 --- a/drivers/tty/serial/sunsu.c +++ b/drivers/tty/serial/sunsu.c @@ -974,7 +974,6 @@ static struct uart_ops sunsu_pops = { #define UART_NR 4 static struct uart_sunsu_port sunsu_ports[UART_NR]; -static int nr_inst; /* Number of already registered ports */ #ifdef CONFIG_SERIO @@ -1344,8 +1343,13 @@ static int __init sunsu_console_setup(struct console *co, char *options) printk("Console: ttyS%d (SU)\n", (sunsu_reg.minor - 64) + co->index); - if (co->index > nr_inst) - return -ENODEV; + /* + * Check whether an invalid uart number has been specified, and + * if so, search for the first available port that does have + * console support. + */ + if (co->index >= UART_NR) + co->index = 0; port = &sunsu_ports[co->index].port; /* @@ -1410,6 +1414,7 @@ static enum su_type su_get_type(struct device_node *dp) static int su_probe(struct platform_device *op) { + static int inst; struct device_node *dp = op->dev.of_node; struct uart_sunsu_port *up; struct resource *rp; @@ -1419,16 +1424,16 @@ static int su_probe(struct platform_device *op) type = su_get_type(dp); if (type == SU_PORT_PORT) { - if (nr_inst >= UART_NR) + if (inst >= UART_NR) return -EINVAL; - up = &sunsu_ports[nr_inst]; + up = &sunsu_ports[inst]; } else { up = kzalloc(sizeof(*up), GFP_KERNEL); if (!up) return -ENOMEM; } - up->port.line = nr_inst; + up->port.line = inst; spin_lock_init(&up->port.lock); @@ -1462,8 +1467,6 @@ static int su_probe(struct platform_device *op) } dev_set_drvdata(&op->dev, up); - nr_inst++; - return 0; } @@ -1491,7 +1494,7 @@ static int su_probe(struct platform_device *op) dev_set_drvdata(&op->dev, up); - nr_inst++; + inst++; return 0; diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c index 7c021eb..45d9161 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c @@ -473,7 +473,7 @@ static void flush_to_ldisc(struct work_struct *work) struct tty_ldisc *disc; tty = port->itty; - if (tty == NULL) + if (WARN_RATELIMIT(tty == NULL, "tty is NULL\n")) return; disc = tty_ldisc_ref(tty); @@ -566,15 +566,10 @@ void tty_flip_buffer_push(struct tty_struct *tty) buf->tail->commit = buf->tail->used; spin_unlock_irqrestore(&buf->lock, flags); -#ifndef CONFIG_PREEMPT_RT_FULL if (tty->low_latency) flush_to_ldisc(&buf->work); else schedule_work(&buf->work); -#else - flush_to_ldisc(&buf->work); -#endif - } EXPORT_SYMBOL(tty_flip_buffer_push); diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index f34f98d..da9fde8 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -941,14 +941,6 @@ void start_tty(struct tty_struct *tty) EXPORT_SYMBOL(start_tty); -/* We limit tty time update visibility to every 8 seconds or so. */ -static void tty_update_time(struct timespec *time) -{ - unsigned long sec = get_seconds() & ~7; - if ((long)(sec - time->tv_sec) > 0) - time->tv_sec = sec; -} - /** * tty_read - read method for tty device files * @file: pointer to tty file @@ -985,10 +977,8 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count, else i = -EIO; tty_ldisc_deref(ld); - if (i > 0) - tty_update_time(&inode->i_atime); - + inode->i_atime = current_fs_time(inode->i_sb); return i; } @@ -1091,7 +1081,7 @@ static inline ssize_t do_tty_write( } if (written) { struct inode *inode = file->f_path.dentry->d_inode; - tty_update_time(&inode->i_mtime); + inode->i_mtime = current_fs_time(inode->i_sb); ret = written; } out: diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index e4455e0..8481b29 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c @@ -617,7 +617,7 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt) if (opt & TERMIOS_WAIT) { tty_wait_until_sent(tty, 0); if (signal_pending(current)) - return -ERESTARTSYS; + return -EINTR; } tty_set_termios(tty, &tmp_termios); @@ -684,7 +684,7 @@ static int set_termiox(struct tty_struct *tty, void __user *arg, int opt) if (opt & TERMIOS_WAIT) { tty_wait_until_sent(tty, 0); if (signal_pending(current)) - return -ERESTARTSYS; + return -EINTR; } mutex_lock(&tty->termios_mutex); diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 78f1be2..c578229 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -934,17 +934,17 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty) * race with the set_ldisc code path. */ + tty_lock_pair(tty, o_tty); tty_ldisc_halt(tty); - if (o_tty) - tty_ldisc_halt(o_tty); - tty_ldisc_flush_works(tty); - if (o_tty) + if (o_tty) { + tty_ldisc_halt(o_tty); tty_ldisc_flush_works(o_tty); + } - tty_lock_pair(tty, o_tty); /* This will need doing differently if we need to lock */ tty_ldisc_kill(tty); + if (o_tty) tty_ldisc_kill(o_tty); diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c index 6abb92c..fa7268a 100644 --- a/drivers/tty/vt/vc_screen.c +++ b/drivers/tty/vt/vc_screen.c @@ -93,7 +93,7 @@ vcs_poll_data_free(struct vcs_poll_data *poll) static struct vcs_poll_data * vcs_poll_data_get(struct file *file) { - struct vcs_poll_data *poll = file->private_data, *kill = NULL; + struct vcs_poll_data *poll = file->private_data; if (poll) return poll; @@ -122,12 +122,10 @@ vcs_poll_data_get(struct file *file) file->private_data = poll; } else { /* someone else raced ahead of us */ - kill = poll; + vcs_poll_data_free(poll); poll = file->private_data; } spin_unlock(&file->f_lock); - if (kill) - vcs_poll_data_free(kill); return poll; } diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 3b1d6bf..8fd8968 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -539,7 +539,7 @@ static void insert_char(struct vc_data *vc, unsigned int nr) { unsigned short *p = (unsigned short *) vc->vc_pos; - scr_memmovew(p + nr, p, (vc->vc_cols - vc->vc_x - nr) * 2); + scr_memmovew(p + nr, p, (vc->vc_cols - vc->vc_x) * 2); scr_memsetw(p, vc->vc_video_erase_char, nr * 2); vc->vc_need_wrap = 0; if (DO_UPDATE(vc)) @@ -638,7 +638,7 @@ static inline void save_screen(struct vc_data *vc) * Redrawing of screen */ -void clear_buffer_attributes(struct vc_data *vc) +static void clear_buffer_attributes(struct vc_data *vc) { unsigned short *p = (unsigned short *)vc->vc_origin; int count = vc->vc_screenbuf_size / 2; @@ -2987,7 +2987,7 @@ int __init vty_init(const struct file_operations *console_fops) static struct class *vtconsole_class; -static int do_bind_con_driver(const struct consw *csw, int first, int last, +static int bind_con_driver(const struct consw *csw, int first, int last, int deflt) { struct module *owner = csw->owner; @@ -2998,7 +2998,7 @@ static int do_bind_con_driver(const struct consw *csw, int first, int last, if (!try_module_get(owner)) return -ENODEV; - WARN_CONSOLE_UNLOCKED(); + console_lock(); /* check if driver is registered */ for (i = 0; i < MAX_NR_CON_DRIVER; i++) { @@ -3083,22 +3083,11 @@ static int do_bind_con_driver(const struct consw *csw, int first, int last, retval = 0; err: + console_unlock(); module_put(owner); return retval; }; - -static int bind_con_driver(const struct consw *csw, int first, int last, - int deflt) -{ - int ret; - - console_lock(); - ret = do_bind_con_driver(csw, first, last, deflt); - console_unlock(); - return ret; -} - #ifdef CONFIG_VT_HW_CONSOLE_BINDING static int con_is_graphics(const struct consw *csw, int first, int last) { @@ -3135,18 +3124,6 @@ static int con_is_graphics(const struct consw *csw, int first, int last) */ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt) { - int retval; - - console_lock(); - retval = do_unbind_con_driver(csw, first, last, deflt); - console_unlock(); - return retval; -} -EXPORT_SYMBOL(unbind_con_driver); - -/* unlocked version of unbind_con_driver() */ -int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt) -{ struct module *owner = csw->owner; const struct consw *defcsw = NULL; struct con_driver *con_driver = NULL, *con_back = NULL; @@ -3155,7 +3132,7 @@ int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt if (!try_module_get(owner)) return -ENODEV; - WARN_CONSOLE_UNLOCKED(); + console_lock(); /* check if driver is registered and if it is unbindable */ for (i = 0; i < MAX_NR_CON_DRIVER; i++) { @@ -3168,8 +3145,10 @@ int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt } } - if (retval) + if (retval) { + console_unlock(); goto err; + } retval = -ENODEV; @@ -3185,11 +3164,15 @@ int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt } } - if (retval) + if (retval) { + console_unlock(); goto err; + } - if (!con_is_bound(csw)) + if (!con_is_bound(csw)) { + console_unlock(); goto err; + } first = max(first, con_driver->first); last = min(last, con_driver->last); @@ -3216,14 +3199,15 @@ int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt if (!con_is_bound(csw)) con_driver->flag &= ~CON_DRIVER_FLAG_INIT; + console_unlock(); /* ignore return value, binding should not fail */ - do_bind_con_driver(defcsw, first, last, deflt); + bind_con_driver(defcsw, first, last, deflt); err: module_put(owner); return retval; } -EXPORT_SYMBOL_GPL(do_unbind_con_driver); +EXPORT_SYMBOL(unbind_con_driver); static int vt_bind(struct con_driver *con) { @@ -3508,18 +3492,28 @@ int con_debug_leave(void) } EXPORT_SYMBOL_GPL(con_debug_leave); -static int do_register_con_driver(const struct consw *csw, int first, int last) +/** + * register_con_driver - register console driver to console layer + * @csw: console driver + * @first: the first console to take over, minimum value is 0 + * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1 + * + * DESCRIPTION: This function registers a console driver which can later + * bind to a range of consoles specified by @first and @last. It will + * also initialize the console driver by calling con_startup(). + */ +int register_con_driver(const struct consw *csw, int first, int last) { struct module *owner = csw->owner; struct con_driver *con_driver; const char *desc; int i, retval = 0; - WARN_CONSOLE_UNLOCKED(); - if (!try_module_get(owner)) return -ENODEV; + console_lock(); + for (i = 0; i < MAX_NR_CON_DRIVER; i++) { con_driver = ®istered_con_driver[i]; @@ -3572,27 +3566,8 @@ static int do_register_con_driver(const struct consw *csw, int first, int last) } err: - module_put(owner); - return retval; -} - -/** - * register_con_driver - register console driver to console layer - * @csw: console driver - * @first: the first console to take over, minimum value is 0 - * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1 - * - * DESCRIPTION: This function registers a console driver which can later - * bind to a range of consoles specified by @first and @last. It will - * also initialize the console driver by calling con_startup(). - */ -int register_con_driver(const struct consw *csw, int first, int last) -{ - int retval; - - console_lock(); - retval = do_register_con_driver(csw, first, last); console_unlock(); + module_put(owner); return retval; } EXPORT_SYMBOL(register_con_driver); @@ -3610,18 +3585,9 @@ EXPORT_SYMBOL(register_con_driver); */ int unregister_con_driver(const struct consw *csw) { - int retval; + int i, retval = -ENODEV; console_lock(); - retval = do_unregister_con_driver(csw); - console_unlock(); - return retval; -} -EXPORT_SYMBOL(unregister_con_driver); - -int do_unregister_con_driver(const struct consw *csw) -{ - int i, retval = -ENODEV; /* cannot unregister a bound driver */ if (con_is_bound(csw)) @@ -3647,53 +3613,27 @@ int do_unregister_con_driver(const struct consw *csw) } } err: + console_unlock(); return retval; } -EXPORT_SYMBOL_GPL(do_unregister_con_driver); - -/* - * If we support more console drivers, this function is used - * when a driver wants to take over some existing consoles - * and become default driver for newly opened ones. - * - * take_over_console is basically a register followed by unbind - */ -int do_take_over_console(const struct consw *csw, int first, int last, int deflt) -{ - int err; - - err = do_register_con_driver(csw, first, last); - /* - * If we get an busy error we still want to bind the console driver - * and return success, as we may have unbound the console driver - * but not unregistered it. - */ - if (err == -EBUSY) - err = 0; - if (!err) - do_bind_con_driver(csw, first, last, deflt); - - return err; -} -EXPORT_SYMBOL_GPL(do_take_over_console); +EXPORT_SYMBOL(unregister_con_driver); /* * If we support more console drivers, this function is used * when a driver wants to take over some existing consoles * and become default driver for newly opened ones. * - * take_over_console is basically a register followed by unbind + * take_over_console is basically a register followed by unbind */ int take_over_console(const struct consw *csw, int first, int last, int deflt) { int err; err = register_con_driver(csw, first, last); - /* - * If we get an busy error we still want to bind the console driver + /* if we get an busy error we still want to bind the console driver * and return success, as we may have unbound the console driver - * but not unregistered it. - */ +  * but not unregistered it. + */ if (err == -EBUSY) err = 0; if (!err) diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c index a62c4a4..3bc244d 100644 --- a/drivers/usb/chipidea/debug.c +++ b/drivers/usb/chipidea/debug.c @@ -222,7 +222,7 @@ static struct { } dbg_data = { .idx = 0, .tty = 0, - .lck = __RW_LOCK_UNLOCKED(dbg_data.lck) + .lck = __RW_LOCK_UNLOCKED(lck) }; /** diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index c0f4066..2f45bba 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -461,8 +461,6 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) mReq->ptr->page[i] = (mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK; - wmb(); - if (!list_empty(&mEp->qh.queue)) { struct ci13xxx_req *mReqPrev; int n = hw_ep_bit(mEp->num, mEp->dir); @@ -563,12 +561,6 @@ __acquires(mEp->lock) struct ci13xxx_req *mReq = \ list_entry(mEp->qh.queue.next, struct ci13xxx_req, queue); - - if (mReq->zptr) { - dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma); - mReq->zptr = NULL; - } - list_del_init(&mReq->queue); mReq->req.status = -ESHUTDOWN; diff --git a/drivers/usb/chipidea/udc.h b/drivers/usb/chipidea/udc.h index d12e8b5..4ff2384d 100644 --- a/drivers/usb/chipidea/udc.h +++ b/drivers/usb/chipidea/udc.h @@ -40,7 +40,7 @@ struct ci13xxx_td { #define TD_CURR_OFFSET (0x0FFFUL << 0) #define TD_FRAME_NUM (0x07FFUL << 0) #define TD_RESERVED_MASK (0x0FFFUL << 0) -} __attribute__ ((packed, aligned(4))); +} __attribute__ ((packed)); /* DMA layout of queue heads */ struct ci13xxx_qh { @@ -57,7 +57,7 @@ struct ci13xxx_qh { /* 9 */ u32 RESERVED; struct usb_ctrlrequest setup; -} __attribute__ ((packed, aligned(4))); +} __attribute__ ((packed)); /** * struct ci13xxx_req - usb request representation diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 35d2cf1..2d92cce 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -600,6 +600,7 @@ static void acm_port_destruct(struct tty_port *port) dev_dbg(&acm->control->dev, "%s\n", __func__); + tty_unregister_device(acm_tty_driver, acm->minor); acm_release_minor(acm); usb_put_intf(acm->control); kfree(acm->country_codes); @@ -1417,8 +1418,6 @@ static void acm_disconnect(struct usb_interface *intf) stop_data_traffic(acm); - tty_unregister_device(acm_tty_driver, acm->minor); - usb_free_urb(acm->ctrlurb); for (i = 0; i < ACM_NW; i++) usb_free_urb(acm->wb[i].urb); diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 122d056..5f0cb41 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -56,7 +56,6 @@ MODULE_DEVICE_TABLE (usb, wdm_ids); #define WDM_RESPONDING 7 #define WDM_SUSPENDING 8 #define WDM_RESETTING 9 -#define WDM_OVERFLOW 10 #define WDM_MAX 16 @@ -156,7 +155,6 @@ static void wdm_in_callback(struct urb *urb) { struct wdm_device *desc = urb->context; int status = urb->status; - int length = urb->actual_length; spin_lock(&desc->iuspin); clear_bit(WDM_RESPONDING, &desc->flags); @@ -187,17 +185,9 @@ static void wdm_in_callback(struct urb *urb) } desc->rerr = status; - if (length + desc->length > desc->wMaxCommand) { - /* The buffer would overflow */ - set_bit(WDM_OVERFLOW, &desc->flags); - } else { - /* we may already be in overflow */ - if (!test_bit(WDM_OVERFLOW, &desc->flags)) { - memmove(desc->ubuf + desc->length, desc->inbuf, length); - desc->length += length; - desc->reslength = length; - } - } + desc->reslength = urb->actual_length; + memmove(desc->ubuf + desc->length, desc->inbuf, desc->reslength); + desc->length += desc->reslength; skip_error: wake_up(&desc->wait); @@ -445,11 +435,6 @@ retry: rv = -ENODEV; goto err; } - if (test_bit(WDM_OVERFLOW, &desc->flags)) { - clear_bit(WDM_OVERFLOW, &desc->flags); - rv = -ENOBUFS; - goto err; - } i++; if (file->f_flags & O_NONBLOCK) { if (!test_bit(WDM_READ, &desc->flags)) { @@ -493,7 +478,6 @@ retry: spin_unlock_irq(&desc->iuspin); goto retry; } - if (!desc->reslength) { /* zero length read */ dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__); clear_bit(WDM_READ, &desc->flags); @@ -1020,7 +1004,6 @@ static int wdm_post_reset(struct usb_interface *intf) struct wdm_device *desc = wdm_find_device(intf); int rv; - clear_bit(WDM_OVERFLOW, &desc->flags); clear_bit(WDM_RESETTING, &desc->flags); rv = recover_from_urb_loss(desc); mutex_unlock(&desc->wlock); diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index ea0a9a1..b78fbe2 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -738,8 +738,6 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype, index &= 0xff; switch (requesttype & USB_RECIP_MASK) { case USB_RECIP_ENDPOINT: - if ((index & ~USB_DIR_IN) == 0) - return 0; ret = findintfep(ps->dev, index); if (ret >= 0) ret = checkintf(ps, ret); diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index 2b487d4..622b4a4 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -173,7 +173,6 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) struct hc_driver *driver; struct usb_hcd *hcd; int retval; - int hcd_irq = 0; if (usb_disabled()) return -ENODEV; @@ -188,19 +187,15 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) return -ENODEV; dev->current_state = PCI_D0; - /* - * The xHCI driver has its own irq management - * make sure irq setup is not touched for xhci in generic hcd code + /* The xHCI driver supports MSI and MSI-X, + * so don't fail if the BIOS doesn't provide a legacy IRQ. */ - if ((driver->flags & HCD_MASK) != HCD_USB3) { - if (!dev->irq) { - dev_err(&dev->dev, - "Found HC with no IRQ. Check BIOS/PCI %s setup!\n", - pci_name(dev)); - retval = -ENODEV; - goto disable_pci; - } - hcd_irq = dev->irq; + if (!dev->irq && (driver->flags & HCD_MASK) != HCD_USB3) { + dev_err(&dev->dev, + "Found HC with no IRQ. Check BIOS/PCI %s setup!\n", + pci_name(dev)); + retval = -ENODEV; + goto disable_pci; } hcd = usb_create_hcd(driver, &dev->dev, pci_name(dev)); @@ -250,7 +245,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) pci_set_master(dev); - retval = usb_add_hcd(hcd, hcd_irq, IRQF_SHARED); + retval = usb_add_hcd(hcd, dev->irq, IRQF_SHARED); if (retval != 0) goto unmap_registers; set_hs_companion(dev, hcd); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 59c4d3c..8e64adf 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -2217,7 +2217,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd) * when the first handler doesn't use it. So let's just * assume it's never used. */ - local_irq_save_nort(flags); + local_irq_save(flags); if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd))) rc = IRQ_NONE; @@ -2226,7 +2226,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd) else rc = IRQ_HANDLED; - local_irq_restore_nort(flags); + local_irq_restore(flags); return rc; } EXPORT_SYMBOL_GPL(usb_hcd_irq); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 2a89588..cbf7168 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -2538,35 +2538,70 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1, if ((portstatus & USB_PORT_STAT_RESET)) goto delay; - if (hub_port_warm_reset_required(hub, portstatus)) - return -ENOTCONN; - - /* Device went away? */ - if (!(portstatus & USB_PORT_STAT_CONNECTION)) - return -ENOTCONN; - - /* bomb out completely if the connection bounced. A USB 3.0 - * connection may bounce if multiple warm resets were issued, - * but the device may have successfully re-connected. Ignore it. + /* + * Some buggy devices require a warm reset to be issued even + * when the port appears not to be connected. */ - if (!hub_is_superspeed(hub->hdev) && - (portchange & USB_PORT_STAT_C_CONNECTION)) - return -ENOTCONN; - - if ((portstatus & USB_PORT_STAT_ENABLE)) { - if (!udev) + if (!warm) { + /* + * Some buggy devices can cause an NEC host controller + * to transition to the "Error" state after a hot port + * reset. This will show up as the port state in + * "Inactive", and the port may also report a + * disconnect. Forcing a warm port reset seems to make + * the device work. + * + * See https://bugzilla.kernel.org/show_bug.cgi?id=41752 + */ + if (hub_port_warm_reset_required(hub, portstatus)) { + int ret; + + if ((portchange & USB_PORT_STAT_C_CONNECTION)) + clear_port_feature(hub->hdev, port1, + USB_PORT_FEAT_C_CONNECTION); + if (portchange & USB_PORT_STAT_C_LINK_STATE) + clear_port_feature(hub->hdev, port1, + USB_PORT_FEAT_C_PORT_LINK_STATE); + if (portchange & USB_PORT_STAT_C_RESET) + clear_port_feature(hub->hdev, port1, + USB_PORT_FEAT_C_RESET); + dev_dbg(hub->intfdev, "hot reset failed, warm reset port %d\n", + port1); + ret = hub_port_reset(hub, port1, + udev, HUB_BH_RESET_TIME, + true); + if ((portchange & USB_PORT_STAT_C_CONNECTION)) + clear_port_feature(hub->hdev, port1, + USB_PORT_FEAT_C_CONNECTION); + return ret; + } + /* Device went away? */ + if (!(portstatus & USB_PORT_STAT_CONNECTION)) + return -ENOTCONN; + + /* bomb out completely if the connection bounced */ + if ((portchange & USB_PORT_STAT_C_CONNECTION)) + return -ENOTCONN; + + if ((portstatus & USB_PORT_STAT_ENABLE)) { + if (hub_is_wusb(hub)) + udev->speed = USB_SPEED_WIRELESS; + else if (hub_is_superspeed(hub->hdev)) + udev->speed = USB_SPEED_SUPER; + else if (portstatus & USB_PORT_STAT_HIGH_SPEED) + udev->speed = USB_SPEED_HIGH; + else if (portstatus & USB_PORT_STAT_LOW_SPEED) + udev->speed = USB_SPEED_LOW; + else + udev->speed = USB_SPEED_FULL; return 0; + } + } else { + if (!(portstatus & USB_PORT_STAT_CONNECTION) || + hub_port_warm_reset_required(hub, + portstatus)) + return -ENOTCONN; - if (hub_is_wusb(hub)) - udev->speed = USB_SPEED_WIRELESS; - else if (hub_is_superspeed(hub->hdev)) - udev->speed = USB_SPEED_SUPER; - else if (portstatus & USB_PORT_STAT_HIGH_SPEED) - udev->speed = USB_SPEED_HIGH; - else if (portstatus & USB_PORT_STAT_LOW_SPEED) - udev->speed = USB_SPEED_LOW; - else - udev->speed = USB_SPEED_FULL; return 0; } @@ -2584,16 +2619,16 @@ delay: } static void hub_port_finish_reset(struct usb_hub *hub, int port1, - struct usb_device *udev, int *status) + struct usb_device *udev, int *status, bool warm) { switch (*status) { case 0: - /* TRSTRCY = 10 ms; plus some extra */ - msleep(10 + 40); - if (udev) { - struct usb_hcd *hcd = bus_to_hcd(udev->bus); - + if (!warm) { + struct usb_hcd *hcd; + /* TRSTRCY = 10 ms; plus some extra */ + msleep(10 + 40); update_devnum(udev, 0); + hcd = bus_to_hcd(udev->bus); /* The xHC may think the device is already reset, * so ignore the status. */ @@ -2605,15 +2640,14 @@ static void hub_port_finish_reset(struct usb_hub *hub, int port1, case -ENODEV: clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_RESET); + /* FIXME need disconnect() for NOTATTACHED device */ if (hub_is_superspeed(hub->hdev)) { clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_BH_PORT_RESET); clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_PORT_LINK_STATE); - clear_port_feature(hub->hdev, port1, - USB_PORT_FEAT_C_CONNECTION); } - if (udev) + if (!warm) usb_set_device_state(udev, *status ? USB_STATE_NOTATTACHED : USB_STATE_DEFAULT); @@ -2626,30 +2660,18 @@ static int hub_port_reset(struct usb_hub *hub, int port1, struct usb_device *udev, unsigned int delay, bool warm) { int i, status; - u16 portchange, portstatus; - if (!hub_is_superspeed(hub->hdev)) { - if (warm) { - dev_err(hub->intfdev, "only USB3 hub support " - "warm reset\n"); - return -EINVAL; - } + if (!warm) { /* Block EHCI CF initialization during the port reset. * Some companion controllers don't like it when they mix. */ down_read(&ehci_cf_port_reset_rwsem); - } else if (!warm) { - /* - * If the caller hasn't explicitly requested a warm reset, - * double check and see if one is needed. - */ - status = hub_port_status(hub, port1, - &portstatus, &portchange); - if (status < 0) - goto done; - - if (hub_port_warm_reset_required(hub, portstatus)) - warm = true; + } else { + if (!hub_is_superspeed(hub->hdev)) { + dev_err(hub->intfdev, "only USB3 hub support " + "warm reset\n"); + return -EINVAL; + } } /* Reset the port */ @@ -2670,33 +2692,10 @@ static int hub_port_reset(struct usb_hub *hub, int port1, status); } - /* Check for disconnect or reset */ + /* return on disconnect or reset */ if (status == 0 || status == -ENOTCONN || status == -ENODEV) { - hub_port_finish_reset(hub, port1, udev, &status); - - if (!hub_is_superspeed(hub->hdev)) - goto done; - - /* - * If a USB 3.0 device migrates from reset to an error - * state, re-issue the warm reset. - */ - if (hub_port_status(hub, port1, - &portstatus, &portchange) < 0) - goto done; - - if (!hub_port_warm_reset_required(hub, portstatus)) - goto done; - - /* - * If the port is in SS.Inactive or Compliance Mode, the - * hot or warm reset failed. Try another warm reset. - */ - if (!warm) { - dev_dbg(hub->intfdev, "hot reset failed, warm reset port %d\n", - port1); - warm = true; - } + hub_port_finish_reset(hub, port1, udev, &status, warm); + goto done; } dev_dbg (hub->intfdev, @@ -2710,7 +2709,7 @@ static int hub_port_reset(struct usb_hub *hub, int port1, port1); done: - if (!hub_is_superspeed(hub->hdev)) + if (!warm) up_read(&ehci_cf_port_reset_rwsem); return status; @@ -4741,21 +4740,12 @@ static void hub_events(void) */ if (hub_port_warm_reset_required(hub, portstatus)) { int status; - struct usb_device *udev = - hub->ports[i - 1]->child; dev_dbg(hub_dev, "warm reset port %d\n", i); - if (!udev) { - status = hub_port_reset(hub, i, - NULL, HUB_BH_RESET_TIME, - true); - if (status < 0) - hub_port_disable(hub, i, 1); - } else { - usb_lock_device(udev); - status = usb_reset_device(udev); - usb_unlock_device(udev); - } + status = hub_port_reset(hub, i, NULL, + HUB_BH_RESET_TIME, true); + if (status < 0) + hub_port_disable(hub, i, 1); connect_change = 0; } diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index f00c749..3a4004a 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -575,7 +575,6 @@ static int dwc3_remove(struct platform_device *pdev) break; } - dwc3_free_event_buffers(dwc); dwc3_core_exit(dwc); return 0; diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 1dae91d..4999563 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -405,6 +405,7 @@ struct dwc3_event_buffer { * @number: endpoint number (1 - 15) * @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK * @resource_index: Resource transfer index + * @current_uf: Current uf received through last event parameter * @interval: the intervall on which the ISOC transfer is started * @name: a human readable name e.g. ep1out-bulk * @direction: true for TX, false for RX @@ -438,6 +439,7 @@ struct dwc3_ep { u8 number; u8 type; u8 resource_index; + u16 current_uf; u32 interval; char name[20]; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 09835b6..2fdd767 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -754,19 +754,22 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3 *dwc = dep->dwc; struct dwc3_trb *trb; + unsigned int cur_slot; + dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n", dep->name, req, (unsigned long long) dma, length, last ? " last" : "", chain ? " chain" : ""); - /* Skip the LINK-TRB on ISOC */ - if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && - usb_endpoint_xfer_isoc(dep->endpoint.desc)) - dep->free_slot++; - trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK]; + cur_slot = dep->free_slot; dep->free_slot++; + /* Skip the LINK-TRB on ISOC */ + if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) && + usb_endpoint_xfer_isoc(dep->endpoint.desc)) + return; + if (!req->trb) { dwc3_gadget_move_request_queued(req); req->trb = trb; @@ -1088,10 +1091,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) * notion of current microframe. */ if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { - if (list_empty(&dep->req_queued)) { - dwc3_stop_active_transfer(dwc, dep->number); - dep->flags = DWC3_EP_ENABLED; - } + dwc3_stop_active_transfer(dwc, dep->number); return 0; } @@ -1117,6 +1117,16 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) dep->name); } + /* + * 3. Missed ISOC Handling. We need to start isoc transfer on the saved + * uframe number. + */ + if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && + (dep->flags & DWC3_EP_MISSED_ISOC)) { + __dwc3_gadget_start_isoc(dwc, dep, dep->current_uf); + dep->flags &= ~DWC3_EP_MISSED_ISOC; + } + return 0; } @@ -1679,29 +1689,14 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, if (trb_status == DWC3_TRBSTS_MISSED_ISOC) { dev_dbg(dwc->dev, "incomplete IN transfer %s\n", dep->name); - /* - * If missed isoc occurred and there is - * no request queued then issue END - * TRANSFER, so that core generates - * next xfernotready and we will issue - * a fresh START TRANSFER. - * If there are still queued request - * then wait, do not issue either END - * or UPDATE TRANSFER, just attach next - * request in request_list during - * giveback.If any future queued request - * is successfully transferred then we - * will issue UPDATE TRANSFER for all - * request in the request_list. - */ + dep->current_uf = event->parameters & + ~(dep->interval - 1); dep->flags |= DWC3_EP_MISSED_ISOC; } else { dev_err(dwc->dev, "incomplete IN transfer %s\n", dep->name); status = -ECONNRESET; } - } else { - dep->flags &= ~DWC3_EP_MISSED_ISOC; } } else { if (count && (event->status & DEPEVT_STATUS_SHORT)) @@ -1728,23 +1723,6 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, break; } while (1); - if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && - list_empty(&dep->req_queued)) { - if (list_empty(&dep->request_list)) { - /* - * If there is no entry in request list then do - * not issue END TRANSFER now. Just set PENDING - * flag, so that END TRANSFER is issued when an - * entry is added into request list. - */ - dep->flags = DWC3_EP_PENDING_REQUEST; - } else { - dwc3_stop_active_transfer(dwc, dep->number); - dep->flags = DWC3_EP_ENABLED; - } - return 1; - } - if ((event->status & DEPEVT_STATUS_IOC) && (trb->ctrl & DWC3_TRB_CTRL_IOC)) return 0; @@ -2179,26 +2157,6 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) break; } - /* Enable USB2 LPM Capability */ - - if ((dwc->revision > DWC3_REVISION_194A) - && (speed != DWC3_DCFG_SUPERSPEED)) { - reg = dwc3_readl(dwc->regs, DWC3_DCFG); - reg |= DWC3_DCFG_LPM_CAP; - dwc3_writel(dwc->regs, DWC3_DCFG, reg); - - reg = dwc3_readl(dwc->regs, DWC3_DCTL); - reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); - - /* - * TODO: This should be configurable. For now using - * maximum allowed HIRD threshold value of 0b1100 - */ - reg |= DWC3_DCTL_HIRD_THRES(12); - - dwc3_writel(dwc->regs, DWC3_DCTL, reg); - } - /* Recent versions support automatic phy suspend and don't need this */ if (dwc->revision < DWC3_REVISION_194A) { /* Suspend unneeded PHY */ @@ -2505,8 +2463,20 @@ int dwc3_gadget_init(struct dwc3 *dwc) DWC3_DEVTEN_DISCONNEVTEN); dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); - /* automatic phy suspend only on recent versions */ + /* Enable USB2 LPM and automatic phy suspend only on recent versions */ if (dwc->revision >= DWC3_REVISION_194A) { + reg = dwc3_readl(dwc->regs, DWC3_DCFG); + reg |= DWC3_DCFG_LPM_CAP; + dwc3_writel(dwc->regs, DWC3_DCFG, reg); + + reg = dwc3_readl(dwc->regs, DWC3_DCTL); + reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN); + + /* TODO: This should be configurable */ + reg |= DWC3_DCTL_HIRD_THRES(28); + + dwc3_writel(dwc->regs, DWC3_DCTL, reg); + dwc3_gadget_usb2_phy_suspend(dwc, false); dwc3_gadget_usb3_phy_suspend(dwc, false); } diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c index 3b343b2..3953dd4 100644 --- a/drivers/usb/gadget/g_ffs.c +++ b/drivers/usb/gadget/g_ffs.c @@ -357,7 +357,7 @@ static int gfs_bind(struct usb_composite_dev *cdev) goto error; gfs_dev_desc.iProduct = gfs_strings[USB_GADGET_PRODUCT_IDX].id; - for (i = func_num; i--; ) { + for (i = func_num; --i; ) { ret = functionfs_bind(ffs_tab[i].ffs_data, cdev); if (unlikely(ret < 0)) { while (++i < func_num) @@ -413,7 +413,7 @@ static int gfs_unbind(struct usb_composite_dev *cdev) gether_cleanup(); gfs_ether_setup = false; - for (i = func_num; i--; ) + for (i = func_num; --i; ) if (ffs_tab[i].ffs_data) functionfs_unbind(ffs_tab[i].ffs_data); diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c index 34a3907..4d90a80 100644 --- a/drivers/usb/gadget/udc-core.c +++ b/drivers/usb/gadget/udc-core.c @@ -265,7 +265,7 @@ static void usb_gadget_remove_driver(struct usb_udc *udc) usb_gadget_disconnect(udc->gadget); udc->driver->disconnect(udc->gadget); udc->driver->unbind(udc->gadget); - usb_gadget_udc_stop(udc->gadget, NULL); + usb_gadget_udc_stop(udc->gadget, udc->driver); } else { usb_gadget_stop(udc->gadget, udc->driver); } diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 83b5a172..b416a3f 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -302,7 +302,6 @@ static void ehci_quiesce (struct ehci_hcd *ehci) static void end_unlink_async(struct ehci_hcd *ehci); static void unlink_empty_async(struct ehci_hcd *ehci); -static void unlink_empty_async_suspended(struct ehci_hcd *ehci); static void ehci_work(struct ehci_hcd *ehci); static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh); static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh); @@ -670,6 +669,9 @@ int ehci_setup(struct usb_hcd *hcd) if (retval) return retval; + if (ehci_is_TDI(ehci)) + tdi_reset(ehci); + ehci_reset(ehci); return 0; @@ -746,9 +748,11 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) /* guard against (alleged) silicon errata */ if (cmd & CMD_IAAD) ehci_dbg(ehci, "IAA with IAAD still set?\n"); - if (ehci->async_iaa) + if (ehci->async_iaa) { COUNT(ehci->stats.iaa); - end_unlink_async(ehci); + end_unlink_async(ehci); + } else + ehci_dbg(ehci, "IAA with nothing unlinked?\n"); } /* remote wakeup [4.3.1] */ diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 7d06e77..4d3b294 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -328,7 +328,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) ehci->rh_state = EHCI_RH_SUSPENDED; end_unlink_async(ehci); - unlink_empty_async_suspended(ehci); + unlink_empty_async(ehci); ehci_handle_intr_unlinks(ehci); end_free_itds(ehci); diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index 99899e8..ac17a7c 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c @@ -288,6 +288,7 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct usb_hcd *hcd = dev_get_drvdata(dev); + struct ehci_hcd_omap_platform_data *pdata = dev->platform_data; usb_remove_hcd(hcd); disable_put_regulator(dev->platform_data); @@ -297,6 +298,13 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev) pm_runtime_put_sync(dev); pm_runtime_disable(dev); + if (pdata->phy_reset) { + if (gpio_is_valid(pdata->reset_gpio_port[0])) + gpio_free(pdata->reset_gpio_port[0]); + + if (gpio_is_valid(pdata->reset_gpio_port[1])) + gpio_free(pdata->reset_gpio_port[1]); + } return 0; } @@ -364,7 +372,7 @@ static const struct hc_driver ehci_omap_hc_driver = { .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, }; -MODULE_ALIAS("platform:ehci-omap"); +MODULE_ALIAS("platform:omap-ehci"); MODULE_AUTHOR("Texas Instruments, Inc."); MODULE_AUTHOR("Felipe Balbi "); diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 23d1369..fd252f0 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -135,7 +135,7 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh) * qtd is updated in qh_completions(). Update the QH * overlay here. */ - if (qh->hw->hw_token & ACTIVE_BIT(ehci)) { + if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) { qh->hw->hw_qtd_next = qtd->hw_next; qtd = NULL; } @@ -449,19 +449,11 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) else if (last_status == -EINPROGRESS && !urb->unlinked) continue; - /* - * If this was the active qtd when the qh was unlinked - * and the overlay's token is active, then the overlay - * hasn't been written back to the qtd yet so use its - * token instead of the qtd's. After the qtd is - * processed and removed, the overlay won't be valid - * any more. - */ - if (state == QH_STATE_IDLE && - qh->qtd_list.next == &qtd->qtd_list && - (hw->hw_token & ACTIVE_BIT(ehci))) { + /* qh unlinked; token in overlay may be most current */ + if (state == QH_STATE_IDLE + && cpu_to_hc32(ehci, qtd->qtd_dma) + == hw->hw_current) { token = hc32_to_cpu(ehci, hw->hw_token); - hw->hw_token &= ~ACTIVE_BIT(ehci); /* An unlink may leave an incomplete * async transaction in the TT buffer. @@ -1178,7 +1170,7 @@ static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh) struct ehci_qh *prev; /* Add to the end of the list of QHs waiting for the next IAAD */ - qh->qh_state = QH_STATE_UNLINK_WAIT; + qh->qh_state = QH_STATE_UNLINK; if (ehci->async_unlink) ehci->async_unlink_last->unlink_next = qh; else @@ -1221,19 +1213,9 @@ static void start_iaa_cycle(struct ehci_hcd *ehci, bool nested) /* Do only the first waiting QH (nVidia bug?) */ qh = ehci->async_unlink; - - /* - * Intel (?) bug: The HC can write back the overlay region - * even after the IAA interrupt occurs. In self-defense, - * always go through two IAA cycles for each QH. - */ - if (qh->qh_state == QH_STATE_UNLINK_WAIT) { - qh->qh_state = QH_STATE_UNLINK; - } else { - ehci->async_iaa = qh; - ehci->async_unlink = qh->unlink_next; - qh->unlink_next = NULL; - } + ehci->async_iaa = qh; + ehci->async_unlink = qh->unlink_next; + qh->unlink_next = NULL; /* Make sure the unlinks are all visible to the hardware */ wmb(); @@ -1316,19 +1298,6 @@ static void unlink_empty_async(struct ehci_hcd *ehci) } } -/* The root hub is suspended; unlink all the async QHs */ -static void unlink_empty_async_suspended(struct ehci_hcd *ehci) -{ - struct ehci_qh *qh; - - while (ehci->async->qh_next.qh) { - qh = ehci->async->qh_next.qh; - WARN_ON(!list_empty(&qh->qtd_list)); - single_unlink_async(ehci, qh); - } - start_iaa_cycle(ehci, false); -} - /* makes sure the async qh will become idle */ /* caller must own ehci->lock */ diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 010f686..b476daf 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -1214,7 +1214,6 @@ itd_urb_transaction ( memset (itd, 0, sizeof *itd); itd->itd_dma = itd_dma; - itd->frame = 9999; /* an invalid value */ list_add (&itd->itd_list, &sched->td_list); } spin_unlock_irqrestore (&ehci->lock, flags); @@ -1916,7 +1915,6 @@ sitd_urb_transaction ( memset (sitd, 0, sizeof *sitd); sitd->sitd_dma = sitd_dma; - sitd->frame = 9999; /* an invalid value */ list_add (&sitd->sitd_list, &iso_sched->td_list); } diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c index c3fa130..f904071 100644 --- a/drivers/usb/host/ehci-timer.c +++ b/drivers/usb/host/ehci-timer.c @@ -113,14 +113,15 @@ static void ehci_poll_ASS(struct ehci_hcd *ehci) if (want != actual) { - /* Poll again later, but give up after about 20 ms */ - if (ehci->ASS_poll_count++ < 20) { - ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true); - return; - } - ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n", - want, actual); + /* Poll again later */ + ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true); + ++ehci->ASS_poll_count; + return; } + + if (ehci->ASS_poll_count > 20) + ehci_dbg(ehci, "ASS poll count reached %d\n", + ehci->ASS_poll_count); ehci->ASS_poll_count = 0; /* The status is up-to-date; restart or stop the schedule as needed */ @@ -159,14 +160,14 @@ static void ehci_poll_PSS(struct ehci_hcd *ehci) if (want != actual) { - /* Poll again later, but give up after about 20 ms */ - if (ehci->PSS_poll_count++ < 20) { - ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true); - return; - } - ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n", - want, actual); + /* Poll again later */ + ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true); + return; } + + if (ehci->PSS_poll_count > 20) + ehci_dbg(ehci, "PSS poll count reached %d\n", + ehci->PSS_poll_count); ehci->PSS_poll_count = 0; /* The status is up-to-date; restart or stop the schedule as needed */ @@ -304,7 +305,7 @@ static void ehci_iaa_watchdog(struct ehci_hcd *ehci) * (a) SMP races against real IAA firing and retriggering, and * (b) clean HC shutdown, when IAA watchdog was pending. */ - if (1) { + if (ehci->async_iaa) { u32 cmd, status; /* If we get here, IAA is *REALLY* late. It's barely diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 1a3e81a..180a2b0 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -857,13 +857,9 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd) } if (ints & OHCI_INTR_WDH) { - if (ohci->hcca->done_head == 0) { - ints &= ~OHCI_INTR_WDH; - } else { - spin_lock (&ohci->lock); - dl_done_list (ohci); - spin_unlock (&ohci->lock); - } + spin_lock (&ohci->lock); + dl_done_list (ohci); + spin_unlock (&ohci->lock); } if (quirk_zfmicro(ohci) && (ints & OHCI_INTR_SF)) { diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 2573cf4..7f76a49 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2027,8 +2027,8 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, if (event_trb != ep_ring->dequeue && event_trb != td->last_trb) td->urb->actual_length = - td->urb->transfer_buffer_length - - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + td->urb->transfer_buffer_length + - TRB_LEN(le32_to_cpu(event->transfer_len)); else td->urb->actual_length = 0; @@ -2060,7 +2060,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, /* Maybe the event was for the data stage? */ td->urb->actual_length = td->urb->transfer_buffer_length - - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + TRB_LEN(le32_to_cpu(event->transfer_len)); xhci_dbg(xhci, "Waiting for status " "stage event\n"); return 0; @@ -2096,7 +2096,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, /* handle completion code */ switch (trb_comp_code) { case COMP_SUCCESS: - if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) { + if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) { frame->status = 0; break; } @@ -2141,7 +2141,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); } len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + TRB_LEN(le32_to_cpu(event->transfer_len)); if (trb_comp_code != COMP_STOP_INVAL) { frame->actual_length = len; @@ -2199,7 +2199,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, case COMP_SUCCESS: /* Double check that the HW transferred everything. */ if (event_trb != td->last_trb || - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { + TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { xhci_warn(xhci, "WARN Successful completion " "on short TX\n"); if (td->urb->transfer_flags & URB_SHORT_NOT_OK) @@ -2227,18 +2227,18 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, "%d bytes untransferred\n", td->urb->ep->desc.bEndpointAddress, td->urb->transfer_buffer_length, - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))); + TRB_LEN(le32_to_cpu(event->transfer_len))); /* Fast path - was this the last TRB in the TD for this URB? */ if (event_trb == td->last_trb) { - if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { + if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { td->urb->actual_length = td->urb->transfer_buffer_length - - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + TRB_LEN(le32_to_cpu(event->transfer_len)); if (td->urb->transfer_buffer_length < td->urb->actual_length) { xhci_warn(xhci, "HC gave bad length " "of %d bytes left\n", - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))); + TRB_LEN(le32_to_cpu(event->transfer_len))); td->urb->actual_length = 0; if (td->urb->transfer_flags & URB_SHORT_NOT_OK) *status = -EREMOTEIO; @@ -2280,7 +2280,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, if (trb_comp_code != COMP_STOP_INVAL) td->urb->actual_length += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + TRB_LEN(le32_to_cpu(event->transfer_len)); } return finish_td(xhci, td, event_trb, event, ep, status, false); @@ -2368,7 +2368,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, * transfer type */ case COMP_SUCCESS: - if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) + if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) break; if (xhci->quirks & XHCI_TRUST_TX_LENGTH) trb_comp_code = COMP_SHORT_TX; @@ -2461,21 +2461,14 @@ static int handle_tx_event(struct xhci_hcd *xhci, * TD list. */ if (list_empty(&ep_ring->td_list)) { - /* - * A stopped endpoint may generate an extra completion - * event if the device was suspended. Don't print - * warnings. - */ - if (!(trb_comp_code == COMP_STOP || - trb_comp_code == COMP_STOP_INVAL)) { - xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", - TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), - ep_index); - xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", - (le32_to_cpu(event->flags) & - TRB_TYPE_BITMASK)>>10); - xhci_print_trb_offsets(xhci, (union xhci_trb *) event); - } + xhci_warn(xhci, "WARN Event TRB for slot %d ep %d " + "with no TDs queued?\n", + TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), + ep_index); + xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", + (le32_to_cpu(event->flags) & + TRB_TYPE_BITMASK)>>10); + xhci_print_trb_offsets(xhci, (union xhci_trb *) event); if (ep->skip) { ep->skip = false; xhci_dbg(xhci, "td_list is empty while skip " diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 849470b..f1f01a8 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -350,7 +350,7 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd) * generate interrupts. Don't even try to enable MSI. */ if (xhci->quirks & XHCI_BROKEN_MSI) - goto legacy_irq; + return 0; /* unregister the legacy interrupt */ if (hcd->irq) @@ -371,7 +371,6 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd) return -EINVAL; } - legacy_irq: /* fall back to legacy interrupt*/ ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, hcd->irq_descr, hcd); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 6a563ef..f791bd0 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -206,8 +206,8 @@ struct xhci_op_regs { /* bits 12:31 are reserved (and should be preserved on writes). */ /* IMAN - Interrupt Management Register */ -#define IMAN_IE (1 << 1) -#define IMAN_IP (1 << 0) +#define IMAN_IP (1 << 1) +#define IMAN_IE (1 << 0) /* USBSTS - USB status - status bitmasks */ /* HC not running - set to 1 when run/stop bit is cleared. */ @@ -972,10 +972,6 @@ struct xhci_transfer_event { __le32 flags; }; -/* Transfer event TRB length bit mask */ -/* bits 0:23 */ -#define EVENT_TRB_LEN(p) ((p) & 0xffffff) - /** Transfer Event bit fields **/ #define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f) diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c index ba6a5d6..0fc6e5f 100644 --- a/drivers/usb/misc/appledisplay.c +++ b/drivers/usb/misc/appledisplay.c @@ -63,7 +63,6 @@ static const struct usb_device_id appledisplay_table[] = { { APPLEDISPLAY_DEVICE(0x9219) }, { APPLEDISPLAY_DEVICE(0x921c) }, { APPLEDISPLAY_DEVICE(0x921d) }, - { APPLEDISPLAY_DEVICE(0x9236) }, /* Terminating entry */ { } diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c index 59eea21..c107d7c 100644 --- a/drivers/usb/musb/am35x.c +++ b/drivers/usb/musb/am35x.c @@ -365,7 +365,7 @@ static int am35x_musb_init(struct musb *musb) usb_nop_xceiv_register(); musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); if (IS_ERR_OR_NULL(musb->xceiv)) - return -EPROBE_DEFER; + return -ENODEV; setup_timer(&otg_workaround, otg_timer, (unsigned long) musb); diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c index dbb31b3..14dab9f 100644 --- a/drivers/usb/musb/blackfin.c +++ b/drivers/usb/musb/blackfin.c @@ -406,7 +406,7 @@ static int bfin_musb_init(struct musb *musb) musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); if (IS_ERR_OR_NULL(musb->xceiv)) { gpio_free(musb->config->gpio_vrsel); - return -EPROBE_DEFER; + return -ENODEV; } bfin_musb_reg_init(musb); diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c index 41613a2..97996af 100644 --- a/drivers/usb/musb/da8xx.c +++ b/drivers/usb/musb/da8xx.c @@ -327,7 +327,7 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci) u8 devctl = musb_readb(mregs, MUSB_DEVCTL); int err; - err = musb->int_usb & MUSB_INTR_VBUSERROR; + err = musb->int_usb & USB_INTR_VBUSERROR; if (err) { /* * The Mentor core doesn't debounce VBUS as needed @@ -410,7 +410,6 @@ static int da8xx_musb_init(struct musb *musb) { void __iomem *reg_base = musb->ctrl_base; u32 rev; - int ret = -ENODEV; musb->mregs += DA8XX_MENTOR_CORE_OFFSET; @@ -421,10 +420,8 @@ static int da8xx_musb_init(struct musb *musb) usb_nop_xceiv_register(); musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); - if (IS_ERR_OR_NULL(musb->xceiv)) { - ret = -EPROBE_DEFER; + if (IS_ERR_OR_NULL(musb->xceiv)) goto fail; - } setup_timer(&otg_workaround, otg_timer, (unsigned long)musb); @@ -444,7 +441,7 @@ static int da8xx_musb_init(struct musb *musb) musb->isr = da8xx_musb_interrupt; return 0; fail: - return ret; + return -ENODEV; } static int da8xx_musb_exit(struct musb *musb) diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c index e040d91..b1c01ca 100644 --- a/drivers/usb/musb/davinci.c +++ b/drivers/usb/musb/davinci.c @@ -380,14 +380,11 @@ static int davinci_musb_init(struct musb *musb) { void __iomem *tibase = musb->ctrl_base; u32 revision; - int ret = -ENODEV; usb_nop_xceiv_register(); musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); - if (IS_ERR_OR_NULL(musb->xceiv)) { - ret = -EPROBE_DEFER; + if (IS_ERR_OR_NULL(musb->xceiv)) goto unregister; - } musb->mregs += DAVINCI_BASE_OFFSET; @@ -441,7 +438,7 @@ fail: usb_put_phy(musb->xceiv); unregister: usb_nop_xceiv_unregister(); - return ret; + return -ENODEV; } static int davinci_musb_exit(struct musb *musb) diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 60b41cc..fd34867 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -1993,7 +1993,6 @@ fail2: musb_platform_exit(musb); fail1: - pm_runtime_disable(musb->controller); dev_err(musb->controller, "musb_init_controller failed with status %d\n", status); diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 341a4b5..f7d764d 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -419,7 +419,7 @@ static int dsps_musb_init(struct musb *musb) usb_nop_xceiv_register(); musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); if (IS_ERR_OR_NULL(musb->xceiv)) - return -EPROBE_DEFER; + return -ENODEV; /* Returns zero if e.g. not clocked */ rev = dsps_readl(reg_base, wrp->revision); diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index d7772856..da00af4 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -369,7 +369,7 @@ static int omap2430_musb_init(struct musb *musb) musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); if (IS_ERR_OR_NULL(musb->xceiv)) { pr_err("HS USB OTG: no transceiver configured\n"); - return -EPROBE_DEFER; + return -ENODEV; } musb->isr = omap2430_musb_interrupt; diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index 464bd23..3969813 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c @@ -1069,7 +1069,7 @@ static int tusb_musb_init(struct musb *musb) usb_nop_xceiv_register(); musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); if (IS_ERR_OR_NULL(musb->xceiv)) - return -EPROBE_DEFER; + return -ENODEV; pdev = to_platform_device(musb->controller); diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c index 13a3929..a27ca1a 100644 --- a/drivers/usb/musb/ux500.c +++ b/drivers/usb/musb/ux500.c @@ -61,7 +61,7 @@ static int ux500_musb_init(struct musb *musb) musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); if (IS_ERR_OR_NULL(musb->xceiv)) { pr_err("HS USB OTG: no transceiver configured\n"); - return -EPROBE_DEFER; + return -ENODEV; } musb->isr = ux500_musb_interrupt; @@ -108,7 +108,7 @@ static int ux500_probe(struct platform_device *pdev) goto err3; } - ret = clk_prepare_enable(clk); + ret = clk_enable(clk); if (ret) { dev_err(&pdev->dev, "failed to enable clock\n"); goto err4; @@ -148,7 +148,7 @@ static int ux500_probe(struct platform_device *pdev) return 0; err5: - clk_disable_unprepare(clk); + clk_disable(clk); err4: clk_put(clk); @@ -168,7 +168,7 @@ static int ux500_remove(struct platform_device *pdev) struct ux500_glue *glue = platform_get_drvdata(pdev); platform_device_unregister(glue->musb); - clk_disable_unprepare(glue->clk); + clk_disable(glue->clk); clk_put(glue->clk); kfree(glue); @@ -182,7 +182,7 @@ static int ux500_suspend(struct device *dev) struct musb *musb = glue_to_musb(glue); usb_phy_set_suspend(musb->xceiv, 1); - clk_disable_unprepare(glue->clk); + clk_disable(glue->clk); return 0; } @@ -193,7 +193,7 @@ static int ux500_resume(struct device *dev) struct musb *musb = glue_to_musb(glue); int ret; - ret = clk_prepare_enable(glue->clk); + ret = clk_enable(glue->clk); if (ret) { dev_err(dev, "failed to enable clock\n"); return ret; diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c index 0b44e45..a88882c 100644 --- a/drivers/usb/serial/ark3116.c +++ b/drivers/usb/serial/ark3116.c @@ -62,6 +62,7 @@ static int is_irda(struct usb_serial *serial) } struct ark3116_private { + wait_queue_head_t delta_msr_wait; struct async_icount icount; int irda; /* 1 for irda device */ @@ -145,6 +146,7 @@ static int ark3116_port_probe(struct usb_serial_port *port) if (!priv) return -ENOMEM; + init_waitqueue_head(&priv->delta_msr_wait); mutex_init(&priv->hw_lock); spin_lock_init(&priv->status_lock); @@ -454,14 +456,10 @@ static int ark3116_ioctl(struct tty_struct *tty, case TIOCMIWAIT: for (;;) { struct async_icount prev = priv->icount; - interruptible_sleep_on(&port->delta_msr_wait); + interruptible_sleep_on(&priv->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; - - if (port->serial->disconnected) - return -EIO; - if ((prev.rng == priv->icount.rng) && (prev.dsr == priv->icount.dsr) && (prev.dcd == priv->icount.dcd) && @@ -582,7 +580,7 @@ static void ark3116_update_msr(struct usb_serial_port *port, __u8 msr) priv->icount.dcd++; if (msr & UART_MSR_TERI) priv->icount.rng++; - wake_up_interruptible(&port->delta_msr_wait); + wake_up_interruptible(&priv->delta_msr_wait); } } diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 07d4650..d255f66 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -80,6 +80,7 @@ MODULE_DEVICE_TABLE(usb, id_table); struct ch341_private { spinlock_t lock; /* access lock */ + wait_queue_head_t delta_msr_wait; /* wait queue for modem status */ unsigned baud_rate; /* set baud rate */ u8 line_control; /* set line control value RTS/DTR */ u8 line_status; /* active status of modem control inputs */ @@ -251,6 +252,7 @@ static int ch341_port_probe(struct usb_serial_port *port) return -ENOMEM; spin_lock_init(&priv->lock); + init_waitqueue_head(&priv->delta_msr_wait); priv->baud_rate = DEFAULT_BAUD_RATE; priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR; @@ -296,7 +298,7 @@ static void ch341_dtr_rts(struct usb_serial_port *port, int on) priv->line_control &= ~(CH341_BIT_RTS | CH341_BIT_DTR); spin_unlock_irqrestore(&priv->lock, flags); ch341_set_handshake(port->serial->dev, priv->line_control); - wake_up_interruptible(&port->delta_msr_wait); + wake_up_interruptible(&priv->delta_msr_wait); } static void ch341_close(struct usb_serial_port *port) @@ -489,7 +491,7 @@ static void ch341_read_int_callback(struct urb *urb) tty_kref_put(tty); } - wake_up_interruptible(&port->delta_msr_wait); + wake_up_interruptible(&priv->delta_msr_wait); } exit: @@ -515,14 +517,11 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) spin_unlock_irqrestore(&priv->lock, flags); while (!multi_change) { - interruptible_sleep_on(&port->delta_msr_wait); + interruptible_sleep_on(&priv->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; - if (port->serial->disconnected) - return -EIO; - spin_lock_irqsave(&priv->lock, flags); status = priv->line_status; multi_change = priv->multi_status_change; diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 4747d1c..edc0f0d 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -85,7 +85,6 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */ { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ - { USB_DEVICE(0x2405, 0x0003) }, /* West Mountain Radio RIGblaster Advantage */ { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */ { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */ @@ -151,25 +150,6 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */ - { USB_DEVICE(0x1FB9, 0x0100) }, /* Lake Shore Model 121 Current Source */ - { USB_DEVICE(0x1FB9, 0x0200) }, /* Lake Shore Model 218A Temperature Monitor */ - { USB_DEVICE(0x1FB9, 0x0201) }, /* Lake Shore Model 219 Temperature Monitor */ - { USB_DEVICE(0x1FB9, 0x0202) }, /* Lake Shore Model 233 Temperature Transmitter */ - { USB_DEVICE(0x1FB9, 0x0203) }, /* Lake Shore Model 235 Temperature Transmitter */ - { USB_DEVICE(0x1FB9, 0x0300) }, /* Lake Shore Model 335 Temperature Controller */ - { USB_DEVICE(0x1FB9, 0x0301) }, /* Lake Shore Model 336 Temperature Controller */ - { USB_DEVICE(0x1FB9, 0x0302) }, /* Lake Shore Model 350 Temperature Controller */ - { USB_DEVICE(0x1FB9, 0x0303) }, /* Lake Shore Model 371 AC Bridge */ - { USB_DEVICE(0x1FB9, 0x0400) }, /* Lake Shore Model 411 Handheld Gaussmeter */ - { USB_DEVICE(0x1FB9, 0x0401) }, /* Lake Shore Model 425 Gaussmeter */ - { USB_DEVICE(0x1FB9, 0x0402) }, /* Lake Shore Model 455A Gaussmeter */ - { USB_DEVICE(0x1FB9, 0x0403) }, /* Lake Shore Model 475A Gaussmeter */ - { USB_DEVICE(0x1FB9, 0x0404) }, /* Lake Shore Model 465 Three Axis Gaussmeter */ - { USB_DEVICE(0x1FB9, 0x0600) }, /* Lake Shore Model 625A Superconducting MPS */ - { USB_DEVICE(0x1FB9, 0x0601) }, /* Lake Shore Model 642A Magnet Power Supply */ - { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */ - { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */ - { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */ { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */ { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */ { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */ diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c index a06076f..fd8c35f 100644 --- a/drivers/usb/serial/cypress_m8.c +++ b/drivers/usb/serial/cypress_m8.c @@ -111,6 +111,7 @@ struct cypress_private { int baud_rate; /* stores current baud rate in integer form */ int isthrottled; /* if throttled, discard reads */ + wait_queue_head_t delta_msr_wait; /* used for TIOCMIWAIT */ char prev_status, diff_status; /* used for TIOCMIWAIT */ /* we pass a pointer to this as the argument sent to cypress_set_termios old_termios */ @@ -448,6 +449,7 @@ static int cypress_generic_port_probe(struct usb_serial_port *port) kfree(priv); return -ENOMEM; } + init_waitqueue_head(&priv->delta_msr_wait); usb_reset_configuration(serial->dev); @@ -866,16 +868,12 @@ static int cypress_ioctl(struct tty_struct *tty, switch (cmd) { /* This code comes from drivers/char/serial.c and ftdi_sio.c */ case TIOCMIWAIT: - for (;;) { - interruptible_sleep_on(&port->delta_msr_wait); + while (priv != NULL) { + interruptible_sleep_on(&priv->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; - - if (port->serial->disconnected) - return -EIO; - - { + else { char diff = priv->diff_status; if (diff == 0) return -EIO; /* no change => error */ @@ -1189,7 +1187,7 @@ static void cypress_read_int_callback(struct urb *urb) if (priv->current_status != priv->prev_status) { priv->diff_status |= priv->current_status ^ priv->prev_status; - wake_up_interruptible(&port->delta_msr_wait); + wake_up_interruptible(&priv->delta_msr_wait); priv->prev_status = priv->current_status; } spin_unlock_irqrestore(&priv->lock, flags); diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c index 1e64343..6e4eb57 100644 --- a/drivers/usb/serial/f81232.c +++ b/drivers/usb/serial/f81232.c @@ -47,6 +47,7 @@ MODULE_DEVICE_TABLE(usb, id_table); struct f81232_private { spinlock_t lock; + wait_queue_head_t delta_msr_wait; u8 line_control; u8 line_status; }; @@ -111,7 +112,7 @@ static void f81232_process_read_urb(struct urb *urb) line_status = priv->line_status; priv->line_status &= ~UART_STATE_TRANSIENT_MASK; spin_unlock_irqrestore(&priv->lock, flags); - wake_up_interruptible(&port->delta_msr_wait); + wake_up_interruptible(&priv->delta_msr_wait); if (!urb->actual_length) return; @@ -260,14 +261,11 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) spin_unlock_irqrestore(&priv->lock, flags); while (1) { - interruptible_sleep_on(&port->delta_msr_wait); + interruptible_sleep_on(&priv->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; - if (port->serial->disconnected) - return -EIO; - spin_lock_irqsave(&priv->lock, flags); status = priv->line_status; spin_unlock_irqrestore(&priv->lock, flags); @@ -329,6 +327,7 @@ static int f81232_port_probe(struct usb_serial_port *port) return -ENOMEM; spin_lock_init(&priv->lock); + init_waitqueue_head(&priv->delta_msr_wait); usb_set_serial_port_data(port, priv); diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 77f78ad..90ceef1 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -69,7 +69,9 @@ struct ftdi_private { int flags; /* some ASYNC_xxxx flags are supported */ unsigned long last_dtr_rts; /* saved modem control outputs */ struct async_icount icount; + wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ char prev_status; /* Used for TIOCMIWAIT */ + bool dev_gone; /* Used to abort TIOCMIWAIT */ char transmit_empty; /* If transmitter is empty or not */ __u16 interface; /* FT2232C, FT2232H or FT4232H port interface (0 for FT232/245) */ @@ -189,7 +191,6 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) }, - { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_BOOST_PID) }, { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, @@ -641,7 +642,6 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) }, { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) }, { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) }, - { USB_DEVICE(MITSUBISHI_VID, MITSUBISHI_FXUSB_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) }, @@ -871,9 +871,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) }, { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, - { USB_DEVICE(ST_VID, ST_STMCLT_2232_PID), - .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, - { USB_DEVICE(ST_VID, ST_STMCLT_4232_PID), + { USB_DEVICE(ST_VID, ST_STMCLT1030_PID), .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk }, { USB_DEVICE(FTDI_VID, FTDI_RF_R106) }, { USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID), @@ -1693,8 +1691,10 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port) kref_init(&priv->kref); mutex_init(&priv->cfg_lock); + init_waitqueue_head(&priv->delta_msr_wait); priv->flags = ASYNC_LOW_LATENCY; + priv->dev_gone = false; if (quirk && quirk->port_probe) quirk->port_probe(priv); @@ -1795,24 +1795,20 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial) } /* - * First two ports on JTAG adaptors using an FT4232 such as STMicroelectronics's - * ST Micro Connect Lite are reserved for JTAG or other non-UART interfaces and - * can be accessed from userspace. - * The next two ports are enabled as UARTs by default, where port 2 is - * a conventional RS-232 UART. + * First and second port on STMCLiteadaptors is reserved for JTAG interface + * and the forth port for pio */ static int ftdi_stmclite_probe(struct usb_serial *serial) { struct usb_device *udev = serial->dev; struct usb_interface *interface = serial->interface; - if (interface == udev->actconfig->interface[0] || - interface == udev->actconfig->interface[1]) { - dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n"); - return -ENODEV; - } + if (interface == udev->actconfig->interface[2]) + return 0; - return 0; + dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n"); + + return -ENODEV; } /* @@ -1844,7 +1840,8 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); - wake_up_interruptible(&port->delta_msr_wait); + priv->dev_gone = true; + wake_up_interruptible_all(&priv->delta_msr_wait); remove_sysfs_attrs(port); @@ -1889,22 +1886,24 @@ static void ftdi_dtr_rts(struct usb_serial_port *port, int on) { struct ftdi_private *priv = usb_get_serial_port_data(port); - /* Disable flow control */ - if (!on) { - if (usb_control_msg(port->serial->dev, + mutex_lock(&port->serial->disc_mutex); + if (!port->serial->disconnected) { + /* Disable flow control */ + if (!on && usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0), FTDI_SIO_SET_FLOW_CTRL_REQUEST, FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, 0, priv->interface, NULL, 0, WDR_TIMEOUT) < 0) { - dev_err(&port->dev, "error from flowcontrol urb\n"); + dev_err(&port->dev, "error from flowcontrol urb\n"); } + /* drop RTS and DTR */ + if (on) + set_mctrl(port, TIOCM_DTR | TIOCM_RTS); + else + clear_mctrl(port, TIOCM_DTR | TIOCM_RTS); } - /* drop RTS and DTR */ - if (on) - set_mctrl(port, TIOCM_DTR | TIOCM_RTS); - else - clear_mctrl(port, TIOCM_DTR | TIOCM_RTS); + mutex_unlock(&port->serial->disc_mutex); } /* @@ -1993,7 +1992,7 @@ static int ftdi_process_packet(struct tty_struct *tty, if (diff_status & FTDI_RS0_RLSD) priv->icount.dcd++; - wake_up_interruptible(&port->delta_msr_wait); + wake_up_interruptible_all(&priv->delta_msr_wait); priv->prev_status = status; } @@ -2450,15 +2449,11 @@ static int ftdi_ioctl(struct tty_struct *tty, */ case TIOCMIWAIT: cprev = priv->icount; - for (;;) { - interruptible_sleep_on(&port->delta_msr_wait); + while (!priv->dev_gone) { + interruptible_sleep_on(&priv->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; - - if (port->serial->disconnected) - return -EIO; - cnow = priv->icount; if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || @@ -2468,6 +2463,8 @@ static int ftdi_ioctl(struct tty_struct *tty, } cprev = cnow; } + return -EIO; + break; case TIOCSERGETLSR: return get_lsr_info(port, (struct serial_struct __user *)arg); break; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 9852827..9d359e1 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -74,7 +74,6 @@ #define FTDI_OPENDCC_THROTTLE_PID 0xBFDA #define FTDI_OPENDCC_GATEWAY_PID 0xBFDB #define FTDI_OPENDCC_GBM_PID 0xBFDC -#define FTDI_OPENDCC_GBM_BOOST_PID 0xBFDD /* NZR SEM 16+ USB (http://www.nzr.de) */ #define FTDI_NZR_SEM_USB_PID 0xC1E0 /* NZR SEM-LOG16+ */ @@ -585,13 +584,6 @@ #define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */ /* - * Mitsubishi Electric Corp. (http://www.meau.com) - * Submitted by Konstantin Holoborodko - */ -#define MITSUBISHI_VID 0x06D3 -#define MITSUBISHI_FXUSB_PID 0x0284 /* USB/RS422 converters: FX-USB-AW/-BD */ - -/* * Definitions for B&B Electronics products. */ #define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */ @@ -1151,8 +1143,7 @@ * STMicroelectonics */ #define ST_VID 0x0483 -#define ST_STMCLT_2232_PID 0x3746 -#define ST_STMCLT_4232_PID 0x3747 +#define ST_STMCLT1030_PID 0x3747 /* ST Micro Connect Lite STMCLT1030 */ /* * Papouch products (http://www.papouch.com/) diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index 34e702b..203358d 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c @@ -959,7 +959,10 @@ static void garmin_close(struct usb_serial_port *port) if (!serial) return; - garmin_clear(garmin_data_p); + mutex_lock(&port->serial->disc_mutex); + + if (!port->serial->disconnected) + garmin_clear(garmin_data_p); /* shutdown our urbs */ usb_kill_urb(port->read_urb); @@ -968,6 +971,8 @@ static void garmin_close(struct usb_serial_port *port) /* keep reset state so we know that we must start a new session */ if (garmin_data_p->state != STATE_RESET) garmin_data_p->state = STATE_DISCONNECTED; + + mutex_unlock(&port->serial->disc_mutex); } diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index adfd73d..7b770c7 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -110,6 +110,7 @@ struct edgeport_port { wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ wait_queue_head_t wait_open; /* for handling sleeping while waiting for open to finish */ wait_queue_head_t wait_command; /* for handling sleeping while waiting for command to finish */ + wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */ struct async_icount icount; struct usb_serial_port *port; /* loop back to the owner of this object */ @@ -883,6 +884,7 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port) /* initialize our wait queues */ init_waitqueue_head(&edge_port->wait_open); init_waitqueue_head(&edge_port->wait_chase); + init_waitqueue_head(&edge_port->delta_msr_wait); init_waitqueue_head(&edge_port->wait_command); /* initialize our icount structure */ @@ -1667,17 +1669,13 @@ static int edge_ioctl(struct tty_struct *tty, dev_dbg(&port->dev, "%s (%d) TIOCMIWAIT\n", __func__, port->number); cprev = edge_port->icount; while (1) { - prepare_to_wait(&port->delta_msr_wait, + prepare_to_wait(&edge_port->delta_msr_wait, &wait, TASK_INTERRUPTIBLE); schedule(); - finish_wait(&port->delta_msr_wait, &wait); + finish_wait(&edge_port->delta_msr_wait, &wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; - - if (port->serial->disconnected) - return -EIO; - cnow = edge_port->icount; if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) @@ -2057,7 +2055,7 @@ static void handle_new_msr(struct edgeport_port *edge_port, __u8 newMsr) icount->dcd++; if (newMsr & EDGEPORT_MSR_DELTA_RI) icount->rng++; - wake_up_interruptible(&edge_port->port->delta_msr_wait); + wake_up_interruptible(&edge_port->delta_msr_wait); } /* Save the new modem status */ diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index 1db782d..82afc4d 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c @@ -87,6 +87,9 @@ struct edgeport_port { int close_pending; int lsr_event; struct async_icount icount; + wait_queue_head_t delta_msr_wait; /* for handling sleeping while + waiting for msr change to + happen */ struct edgeport_serial *edge_serial; struct usb_serial_port *port; __u8 bUartMode; /* Port type, 0: RS232, etc. */ @@ -1515,7 +1518,7 @@ static void handle_new_msr(struct edgeport_port *edge_port, __u8 msr) icount->dcd++; if (msr & EDGEPORT_MSR_DELTA_RI) icount->rng++; - wake_up_interruptible(&edge_port->port->delta_msr_wait); + wake_up_interruptible(&edge_port->delta_msr_wait); } /* Save the new modem status */ @@ -1818,6 +1821,7 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port) dev = port->serial->dev; memset(&(edge_port->icount), 0x00, sizeof(edge_port->icount)); + init_waitqueue_head(&edge_port->delta_msr_wait); /* turn off loopback */ status = ti_do_config(edge_port, UMPC_SET_CLR_LOOPBACK, 0); @@ -2484,14 +2488,10 @@ static int edge_ioctl(struct tty_struct *tty, dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__); cprev = edge_port->icount; while (1) { - interruptible_sleep_on(&port->delta_msr_wait); + interruptible_sleep_on(&edge_port->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; - - if (port->serial->disconnected) - return -EIO; - cnow = edge_port->icount; if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) @@ -2702,7 +2702,6 @@ static struct usb_serial_driver edgeport_2port_device = { .set_termios = edge_set_termios, .tiocmget = edge_tiocmget, .tiocmset = edge_tiocmset, - .get_icount = edge_get_icount, .write = edge_write, .write_room = edge_write_room, .chars_in_buffer = edge_chars_in_buffer, diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c index 3b9f834..b691175 100644 --- a/drivers/usb/serial/mct_u232.c +++ b/drivers/usb/serial/mct_u232.c @@ -114,6 +114,8 @@ struct mct_u232_private { unsigned char last_msr; /* Modem Status Register */ unsigned int rx_flags; /* Throttling flags */ struct async_icount icount; + wait_queue_head_t msr_wait; /* for handling sleeping while waiting + for msr change to happen */ }; #define THROTTLED 0x01 @@ -407,6 +409,7 @@ static int mct_u232_port_probe(struct usb_serial_port *port) return -ENOMEM; spin_lock_init(&priv->lock); + init_waitqueue_head(&priv->msr_wait); usb_set_serial_port_data(port, priv); @@ -496,15 +499,19 @@ static void mct_u232_dtr_rts(struct usb_serial_port *port, int on) unsigned int control_state; struct mct_u232_private *priv = usb_get_serial_port_data(port); - spin_lock_irq(&priv->lock); - if (on) - priv->control_state |= TIOCM_DTR | TIOCM_RTS; - else - priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS); - control_state = priv->control_state; - spin_unlock_irq(&priv->lock); - - mct_u232_set_modem_ctrl(port, control_state); + mutex_lock(&port->serial->disc_mutex); + if (!port->serial->disconnected) { + /* drop DTR and RTS */ + spin_lock_irq(&priv->lock); + if (on) + priv->control_state |= TIOCM_DTR | TIOCM_RTS; + else + priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS); + control_state = priv->control_state; + spin_unlock_irq(&priv->lock); + mct_u232_set_modem_ctrl(port, control_state); + } + mutex_unlock(&port->serial->disc_mutex); } static void mct_u232_close(struct usb_serial_port *port) @@ -603,7 +610,7 @@ static void mct_u232_read_int_callback(struct urb *urb) tty_kref_put(tty); } #endif - wake_up_interruptible(&port->delta_msr_wait); + wake_up_interruptible(&priv->msr_wait); spin_unlock_irqrestore(&priv->lock, flags); exit: retval = usb_submit_urb(urb, GFP_ATOMIC); @@ -812,17 +819,13 @@ static int mct_u232_ioctl(struct tty_struct *tty, cprev = mct_u232_port->icount; spin_unlock_irqrestore(&mct_u232_port->lock, flags); for ( ; ; ) { - prepare_to_wait(&port->delta_msr_wait, + prepare_to_wait(&mct_u232_port->msr_wait, &wait, TASK_INTERRUPTIBLE); schedule(); - finish_wait(&port->delta_msr_wait, &wait); + finish_wait(&mct_u232_port->msr_wait, &wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; - - if (port->serial->disconnected) - return -EIO; - spin_lock_irqsave(&mct_u232_port->lock, flags); cnow = mct_u232_port->icount; spin_unlock_irqrestore(&mct_u232_port->lock, flags); diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 3b909e0..66d9e08 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -219,6 +219,7 @@ struct moschip_port { char open; char open_ports; wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ + wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */ int delta_msr_cond; struct async_icount icount; struct usb_serial_port *port; /* loop back to the owner of this object */ @@ -422,9 +423,6 @@ static void mos7840_handle_new_msr(struct moschip_port *port, __u8 new_msr) icount->rng++; smp_wmb(); } - - mos7840_port->delta_msr_cond = 1; - wake_up_interruptible(&port->port->delta_msr_wait); } } @@ -1133,6 +1131,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port) /* initialize our wait queues */ init_waitqueue_head(&mos7840_port->wait_chase); + init_waitqueue_head(&mos7840_port->delta_msr_wait); /* initialize our icount structure */ memset(&(mos7840_port->icount), 0x00, sizeof(mos7840_port->icount)); @@ -2022,6 +2021,8 @@ static void mos7840_change_port_settings(struct tty_struct *tty, mos7840_port->read_urb_busy = false; } } + wake_up(&mos7840_port->delta_msr_wait); + mos7840_port->delta_msr_cond = 1; dev_dbg(&port->dev, "%s - mos7840_port->shadowLCR is End %x\n", __func__, mos7840_port->shadowLCR); } @@ -2222,18 +2223,13 @@ static int mos7840_ioctl(struct tty_struct *tty, while (1) { /* interruptible_sleep_on(&mos7840_port->delta_msr_wait); */ mos7840_port->delta_msr_cond = 0; - wait_event_interruptible(port->delta_msr_wait, - (port->serial->disconnected || - mos7840_port-> + wait_event_interruptible(mos7840_port->delta_msr_wait, + (mos7840_port-> delta_msr_cond == 1)); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; - - if (port->serial->disconnected) - return -EIO; - cnow = mos7840_port->icount; smp_rmb(); if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index bff059a..567bc77 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -341,13 +341,10 @@ static void option_instat_callback(struct urb *urb); #define CINTERION_PRODUCT_EU3_E 0x0051 #define CINTERION_PRODUCT_EU3_P 0x0052 #define CINTERION_PRODUCT_PH8 0x0053 -#define CINTERION_PRODUCT_AH6 0x0055 -#define CINTERION_PRODUCT_PLS8 0x0060 /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c #define OLIVETTI_PRODUCT_OLICARD100 0xc000 -#define OLIVETTI_PRODUCT_OLICARD145 0xc003 /* Celot products */ #define CELOT_VENDOR_ID 0x211f @@ -482,7 +479,6 @@ static const struct option_blacklist_info four_g_w14_blacklist = { static const struct option_blacklist_info alcatel_x200_blacklist = { .sendsetup = BIT(0) | BIT(1), - .reserved = BIT(4), }; static const struct option_blacklist_info zte_0037_blacklist = { @@ -579,15 +575,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) }, - { USB_DEVICE(QUANTA_VENDOR_ID, 0xea42), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c1f, USB_CLASS_COMM, 0x02, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff), @@ -1226,14 +1215,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist }, - { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D), - .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, - { USB_DEVICE(ALCATEL_VENDOR_ID, 0x0052), - .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, - { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b6), - .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, - { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b7), - .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, @@ -1264,8 +1246,6 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) }, - { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AH6) }, - { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLS8) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) }, @@ -1274,7 +1254,6 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, - { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) }, { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ @@ -1352,12 +1331,6 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) }, - { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */ - { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */ - { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x02, 0x01) }, - { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) }, - { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) }, - { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c index ae4495a..d217fd6 100644 --- a/drivers/usb/serial/oti6858.c +++ b/drivers/usb/serial/oti6858.c @@ -188,6 +188,7 @@ struct oti6858_private { u8 setup_done; struct delayed_work delayed_setup_work; + wait_queue_head_t intr_wait; struct usb_serial_port *port; /* USB port with which associated */ }; @@ -338,6 +339,7 @@ static int oti6858_port_probe(struct usb_serial_port *port) return -ENOMEM; spin_lock_init(&priv->lock); + init_waitqueue_head(&priv->intr_wait); priv->port = port; INIT_DELAYED_WORK(&priv->delayed_setup_work, setup_line); INIT_DELAYED_WORK(&priv->delayed_write_work, send_data); @@ -662,15 +664,11 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) spin_unlock_irqrestore(&priv->lock, flags); while (1) { - wait_event_interruptible(port->delta_msr_wait, - port->serial->disconnected || + wait_event_interruptible(priv->intr_wait, priv->status.pin_state != prev); if (signal_pending(current)) return -ERESTARTSYS; - if (port->serial->disconnected) - return -EIO; - spin_lock_irqsave(&priv->lock, flags); status = priv->status.pin_state & PIN_MASK; spin_unlock_irqrestore(&priv->lock, flags); @@ -765,7 +763,7 @@ static void oti6858_read_int_callback(struct urb *urb) if (!priv->transient) { if (xs->pin_state != priv->status.pin_state) - wake_up_interruptible(&port->delta_msr_wait); + wake_up_interruptible(&priv->intr_wait); memcpy(&priv->status, xs, OTI6858_CTRL_PKT_SIZE); } diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index bb056a1..6002419 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -139,6 +139,7 @@ struct pl2303_serial_private { struct pl2303_private { spinlock_t lock; + wait_queue_head_t delta_msr_wait; u8 line_control; u8 line_status; }; @@ -232,6 +233,7 @@ static int pl2303_port_probe(struct usb_serial_port *port) return -ENOMEM; spin_lock_init(&priv->lock); + init_waitqueue_head(&priv->delta_msr_wait); usb_set_serial_port_data(port, priv); @@ -605,14 +607,11 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) spin_unlock_irqrestore(&priv->lock, flags); while (1) { - interruptible_sleep_on(&port->delta_msr_wait); + interruptible_sleep_on(&priv->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; - if (port->serial->disconnected) - return -EIO; - spin_lock_irqsave(&priv->lock, flags); status = priv->line_status; spin_unlock_irqrestore(&priv->lock, flags); @@ -720,7 +719,7 @@ static void pl2303_update_line_status(struct usb_serial_port *port, spin_unlock_irqrestore(&priv->lock, flags); if (priv->line_status & UART_BREAK_ERROR) usb_serial_handle_break(port); - wake_up_interruptible(&port->delta_msr_wait); + wake_up_interruptible(&priv->delta_msr_wait); tty = tty_port_tty_get(&port->port); if (!tty) @@ -785,7 +784,7 @@ static void pl2303_process_read_urb(struct urb *urb) line_status = priv->line_status; priv->line_status &= ~UART_STATE_TRANSIENT_MASK; spin_unlock_irqrestore(&priv->lock, flags); - wake_up_interruptible(&port->delta_msr_wait); + wake_up_interruptible(&priv->delta_msr_wait); if (!urb->actual_length) return; diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c index 31f81c3..9b1b96f 100644 --- a/drivers/usb/serial/qcaux.c +++ b/drivers/usb/serial/qcaux.c @@ -69,7 +69,6 @@ static struct usb_device_id id_table[] = { { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfd, 0xff) }, /* NMEA */ { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfe, 0xff) }, /* WMC */ { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xff, 0xff) }, /* DIAG */ - { USB_DEVICE_AND_INTERFACE_INFO(0x1fac, 0x0151, 0xff, 0xff, 0xff) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 59b32b7..2466254 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -197,15 +197,12 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) if (is_gobi1k) { /* Gobi 1K USB layout: - * 0: DM/DIAG (use libqcdm from ModemManager for communication) + * 0: serial port (doesn't respond) * 1: serial port (doesn't respond) * 2: AT-capable modem port * 3: QMI/net */ - if (ifnum == 0) { - dev_dbg(dev, "Gobi 1K DM/DIAG interface found\n"); - altsetting = 1; - } else if (ifnum == 2) + if (ifnum == 2) dev_dbg(dev, "Modem port found\n"); else altsetting = -1; diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c index 9f34c99..d152be9 100644 --- a/drivers/usb/serial/quatech2.c +++ b/drivers/usb/serial/quatech2.c @@ -128,6 +128,7 @@ struct qt2_port_private { u8 shadowLSR; u8 shadowMSR; + wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ struct async_icount icount; struct usb_serial_port *port; @@ -505,9 +506,8 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) spin_unlock_irqrestore(&priv->lock, flags); while (1) { - wait_event_interruptible(port->delta_msr_wait, - (port->serial->disconnected || - (priv->icount.rng != prev.rng) || + wait_event_interruptible(priv->delta_msr_wait, + ((priv->icount.rng != prev.rng) || (priv->icount.dsr != prev.dsr) || (priv->icount.dcd != prev.dcd) || (priv->icount.cts != prev.cts))); @@ -515,9 +515,6 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) if (signal_pending(current)) return -ERESTARTSYS; - if (port->serial->disconnected) - return -EIO; - spin_lock_irqsave(&priv->lock, flags); cur = priv->icount; spin_unlock_irqrestore(&priv->lock, flags); @@ -844,6 +841,7 @@ static int qt2_port_probe(struct usb_serial_port *port) spin_lock_init(&port_priv->lock); spin_lock_init(&port_priv->urb_lock); + init_waitqueue_head(&port_priv->delta_msr_wait); port_priv->port = port; port_priv->write_urb = usb_alloc_urb(0, GFP_KERNEL); @@ -947,17 +945,19 @@ static void qt2_dtr_rts(struct usb_serial_port *port, int on) struct usb_device *dev = port->serial->dev; struct qt2_port_private *port_priv = usb_get_serial_port_data(port); - /* Disable flow control */ - if (!on) { - if (qt2_setregister(dev, port_priv->device_port, + mutex_lock(&port->serial->disc_mutex); + if (!port->serial->disconnected) { + /* Disable flow control */ + if (!on && qt2_setregister(dev, port_priv->device_port, UART_MCR, 0) < 0) dev_warn(&port->dev, "error from flowcontrol urb\n"); + /* drop RTS and DTR */ + if (on) + update_mctrl(port_priv, TIOCM_DTR | TIOCM_RTS, 0); + else + update_mctrl(port_priv, 0, TIOCM_DTR | TIOCM_RTS); } - /* drop RTS and DTR */ - if (on) - update_mctrl(port_priv, TIOCM_DTR | TIOCM_RTS, 0); - else - update_mctrl(port_priv, 0, TIOCM_DTR | TIOCM_RTS); + mutex_unlock(&port->serial->disc_mutex); } static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch) @@ -986,7 +986,7 @@ static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch) if (newMSR & UART_MSR_TERI) port_priv->icount.rng++; - wake_up_interruptible(&port->delta_msr_wait); + wake_up_interruptible(&port_priv->delta_msr_wait); } } diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index d4426c0..af06f2f 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -861,13 +861,19 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port) static void sierra_dtr_rts(struct usb_serial_port *port, int on) { + struct usb_serial *serial = port->serial; struct sierra_port_private *portdata; portdata = usb_get_serial_port_data(port); portdata->rts_state = on; portdata->dtr_state = on; - sierra_send_setup(port); + if (serial->dev) { + mutex_lock(&serial->disc_mutex); + if (!serial->disconnected) + sierra_send_setup(port); + mutex_unlock(&serial->disc_mutex); + } } static int sierra_startup(struct usb_serial *serial) diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c index 85de44d..a42536a 100644 --- a/drivers/usb/serial/spcp8x5.c +++ b/drivers/usb/serial/spcp8x5.c @@ -149,6 +149,7 @@ enum spcp8x5_type { struct spcp8x5_private { spinlock_t lock; enum spcp8x5_type type; + wait_queue_head_t delta_msr_wait; u8 line_control; u8 line_status; }; @@ -178,6 +179,7 @@ static int spcp8x5_port_probe(struct usb_serial_port *port) return -ENOMEM; spin_lock_init(&priv->lock); + init_waitqueue_head(&priv->delta_msr_wait); priv->type = type; usb_set_serial_port_data(port , priv); @@ -474,7 +476,7 @@ static void spcp8x5_process_read_urb(struct urb *urb) priv->line_status &= ~UART_STATE_TRANSIENT_MASK; spin_unlock_irqrestore(&priv->lock, flags); /* wake up the wait for termios */ - wake_up_interruptible(&port->delta_msr_wait); + wake_up_interruptible(&priv->delta_msr_wait); if (!urb->actual_length) return; @@ -524,15 +526,12 @@ static int spcp8x5_wait_modem_info(struct usb_serial_port *port, while (1) { /* wake up in bulk read */ - interruptible_sleep_on(&port->delta_msr_wait); + interruptible_sleep_on(&priv->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; - if (port->serial->disconnected) - return -EIO; - spin_lock_irqsave(&priv->lock, flags); status = priv->line_status; spin_unlock_irqrestore(&priv->lock, flags); diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c index 44d5949..4543ea3 100644 --- a/drivers/usb/serial/ssu100.c +++ b/drivers/usb/serial/ssu100.c @@ -61,6 +61,7 @@ struct ssu100_port_private { spinlock_t status_lock; u8 shadowLSR; u8 shadowMSR; + wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ struct async_icount icount; }; @@ -354,9 +355,8 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) spin_unlock_irqrestore(&priv->status_lock, flags); while (1) { - wait_event_interruptible(port->delta_msr_wait, - (port->serial->disconnected || - (priv->icount.rng != prev.rng) || + wait_event_interruptible(priv->delta_msr_wait, + ((priv->icount.rng != prev.rng) || (priv->icount.dsr != prev.dsr) || (priv->icount.dcd != prev.dcd) || (priv->icount.cts != prev.cts))); @@ -364,9 +364,6 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) if (signal_pending(current)) return -ERESTARTSYS; - if (port->serial->disconnected) - return -EIO; - spin_lock_irqsave(&priv->status_lock, flags); cur = priv->icount; spin_unlock_irqrestore(&priv->status_lock, flags); @@ -448,6 +445,7 @@ static int ssu100_port_probe(struct usb_serial_port *port) return -ENOMEM; spin_lock_init(&priv->status_lock); + init_waitqueue_head(&priv->delta_msr_wait); usb_set_serial_port_data(port, priv); @@ -508,16 +506,19 @@ static void ssu100_dtr_rts(struct usb_serial_port *port, int on) { struct usb_device *dev = port->serial->dev; - /* Disable flow control */ - if (!on) { - if (ssu100_setregister(dev, 0, UART_MCR, 0) < 0) + mutex_lock(&port->serial->disc_mutex); + if (!port->serial->disconnected) { + /* Disable flow control */ + if (!on && + ssu100_setregister(dev, 0, UART_MCR, 0) < 0) dev_err(&port->dev, "error from flowcontrol urb\n"); + /* drop RTS and DTR */ + if (on) + set_mctrl(dev, TIOCM_DTR | TIOCM_RTS); + else + clear_mctrl(dev, TIOCM_DTR | TIOCM_RTS); } - /* drop RTS and DTR */ - if (on) - set_mctrl(dev, TIOCM_DTR | TIOCM_RTS); - else - clear_mctrl(dev, TIOCM_DTR | TIOCM_RTS); + mutex_unlock(&port->serial->disc_mutex); } static void ssu100_update_msr(struct usb_serial_port *port, u8 msr) @@ -539,7 +540,7 @@ static void ssu100_update_msr(struct usb_serial_port *port, u8 msr) priv->icount.dcd++; if (msr & UART_MSR_TERI) priv->icount.rng++; - wake_up_interruptible(&port->delta_msr_wait); + wake_up_interruptible(&priv->delta_msr_wait); } } diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 4a8b685..f2530d2 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -74,6 +74,7 @@ struct ti_port { int tp_flags; int tp_closing_wait;/* in .01 secs */ struct async_icount tp_icount; + wait_queue_head_t tp_msr_wait; /* wait for msr change */ wait_queue_head_t tp_write_wait; struct ti_device *tp_tdev; struct usb_serial_port *tp_port; @@ -431,6 +432,7 @@ static int ti_port_probe(struct usb_serial_port *port) else tport->tp_uart_base_addr = TI_UART2_BASE_ADDR; tport->tp_closing_wait = closing_wait; + init_waitqueue_head(&tport->tp_msr_wait); init_waitqueue_head(&tport->tp_write_wait); if (kfifo_alloc(&tport->write_fifo, TI_WRITE_BUF_SIZE, GFP_KERNEL)) { kfree(tport); @@ -782,13 +784,9 @@ static int ti_ioctl(struct tty_struct *tty, dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__); cprev = tport->tp_icount; while (1) { - interruptible_sleep_on(&port->delta_msr_wait); + interruptible_sleep_on(&tport->tp_msr_wait); if (signal_pending(current)) return -ERESTARTSYS; - - if (port->serial->disconnected) - return -EIO; - cnow = tport->tp_icount; if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) @@ -1402,7 +1400,7 @@ static void ti_handle_new_msr(struct ti_port *tport, __u8 msr) icount->dcd++; if (msr & TI_MSR_DELTA_RI) icount->rng++; - wake_up_interruptible(&tport->tp_port->delta_msr_wait); + wake_up_interruptible(&tport->tp_msr_wait); spin_unlock_irqrestore(&tport->tp_lock, flags); } diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index dec95e8..64bda13 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -151,7 +151,6 @@ static void destroy_serial(struct kref *kref) } } - usb_put_intf(serial->interface); usb_put_dev(serial->dev); kfree(serial); } @@ -615,7 +614,7 @@ static struct usb_serial *create_serial(struct usb_device *dev, } serial->dev = usb_get_dev(dev); serial->type = driver; - serial->interface = usb_get_intf(interface); + serial->interface = interface; kref_init(&serial->kref); mutex_init(&serial->disc_mutex); serial->minor = SERIAL_TTY_NO_MINOR; @@ -689,20 +688,10 @@ static int serial_carrier_raised(struct tty_port *port) static void serial_dtr_rts(struct tty_port *port, int on) { struct usb_serial_port *p = container_of(port, struct usb_serial_port, port); - struct usb_serial *serial = p->serial; - struct usb_serial_driver *drv = serial->type; + struct usb_serial_driver *drv = p->serial->type; - if (!drv->dtr_rts) - return; - /* - * Work-around bug in the tty-layer which can result in dtr_rts - * being called after a disconnect (and tty_unregister_device - * has returned). Remove once bug has been squashed. - */ - mutex_lock(&serial->disc_mutex); - if (!serial->disconnected) + if (drv->dtr_rts) drv->dtr_rts(p, on); - mutex_unlock(&serial->disc_mutex); } static const struct tty_port_operations serial_port_ops = { @@ -897,7 +886,6 @@ static int usb_serial_probe(struct usb_interface *interface, port->port.ops = &serial_port_ops; port->serial = serial; spin_lock_init(&port->lock); - init_waitqueue_head(&port->delta_msr_wait); /* Keep this for private driver use for the moment but should probably go away */ INIT_WORK(&port->work, usb_serial_port_work); diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index 1355a6c..01c94aa 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c @@ -38,6 +38,7 @@ void usb_wwan_dtr_rts(struct usb_serial_port *port, int on) { + struct usb_serial *serial = port->serial; struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; @@ -47,11 +48,12 @@ void usb_wwan_dtr_rts(struct usb_serial_port *port, int on) return; portdata = usb_get_serial_port_data(port); - /* FIXME: locking */ + mutex_lock(&serial->disc_mutex); portdata->rts_state = on; portdata->dtr_state = on; - - intfdata->send_setup(port); + if (serial->dev) + intfdata->send_setup(port); + mutex_unlock(&serial->disc_mutex); } EXPORT_SYMBOL(usb_wwan_dtr_rts); diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c index d944088..070b5c0 100644 --- a/drivers/usb/storage/cypress_atacb.c +++ b/drivers/usb/storage/cypress_atacb.c @@ -248,26 +248,14 @@ static int cypress_probe(struct usb_interface *intf, { struct us_data *us; int result; - struct usb_device *device; result = usb_stor_probe1(&us, intf, id, (id - cypress_usb_ids) + cypress_unusual_dev_list); if (result) return result; - /* Among CY7C68300 chips, the A revision does not support Cypress ATACB - * Filter out this revision from EEPROM default descriptor values - */ - device = interface_to_usbdev(intf); - if (device->descriptor.iManufacturer != 0x38 || - device->descriptor.iProduct != 0x4e || - device->descriptor.iSerialNumber != 0x64) { - us->protocol_name = "Transparent SCSI with Cypress ATACB"; - us->proto_handler = cypress_atacb_passthrough; - } else { - us->protocol_name = "Transparent SCSI"; - us->proto_handler = usb_stor_transparent_scsi_command; - } + us->protocol_name = "Transparent SCSI with Cypress ATACB"; + us->proto_handler = cypress_atacb_passthrough; result = usb_stor_probe2(us); return result; diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c index 105d900..16b0bf0 100644 --- a/drivers/usb/storage/initializers.c +++ b/drivers/usb/storage/initializers.c @@ -92,8 +92,8 @@ int usb_stor_ucr61s2b_init(struct us_data *us) return 0; } -/* This places the HUAWEI E220 devices in multi-port mode */ -int usb_stor_huawei_e220_init(struct us_data *us) +/* This places the HUAWEI usb dongles in multi-port mode */ +static int usb_stor_huawei_feature_init(struct us_data *us) { int result; @@ -104,3 +104,75 @@ int usb_stor_huawei_e220_init(struct us_data *us) US_DEBUGP("Huawei mode set result is %d\n", result); return 0; } + +/* + * It will send a scsi switch command called rewind' to huawei dongle. + * When the dongle receives this command at the first time, + * it will reboot immediately. After rebooted, it will ignore this command. + * So it is unnecessary to read its response. + */ +static int usb_stor_huawei_scsi_init(struct us_data *us) +{ + int result = 0; + int act_len = 0; + struct bulk_cb_wrap *bcbw = (struct bulk_cb_wrap *) us->iobuf; + char rewind_cmd[] = {0x11, 0x06, 0x20, 0x00, 0x00, 0x01, 0x01, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + + bcbw->Signature = cpu_to_le32(US_BULK_CB_SIGN); + bcbw->Tag = 0; + bcbw->DataTransferLength = 0; + bcbw->Flags = bcbw->Lun = 0; + bcbw->Length = sizeof(rewind_cmd); + memset(bcbw->CDB, 0, sizeof(bcbw->CDB)); + memcpy(bcbw->CDB, rewind_cmd, sizeof(rewind_cmd)); + + result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcbw, + US_BULK_CB_WRAP_LEN, &act_len); + US_DEBUGP("transfer actual length=%d, result=%d\n", act_len, result); + return result; +} + +/* + * It tries to find the supported Huawei USB dongles. + * In Huawei, they assign the following product IDs + * for all of their mobile broadband dongles, + * including the new dongles in the future. + * So if the product ID is not included in this list, + * it means it is not Huawei's mobile broadband dongles. + */ +static int usb_stor_huawei_dongles_pid(struct us_data *us) +{ + struct usb_interface_descriptor *idesc; + int idProduct; + + idesc = &us->pusb_intf->cur_altsetting->desc; + idProduct = us->pusb_dev->descriptor.idProduct; + /* The first port is CDROM, + * means the dongle in the single port mode, + * and a switch command is required to be sent. */ + if (idesc && idesc->bInterfaceNumber == 0) { + if ((idProduct == 0x1001) + || (idProduct == 0x1003) + || (idProduct == 0x1004) + || (idProduct >= 0x1401 && idProduct <= 0x1500) + || (idProduct >= 0x1505 && idProduct <= 0x1600) + || (idProduct >= 0x1c02 && idProduct <= 0x2202)) { + return 1; + } + } + return 0; +} + +int usb_stor_huawei_init(struct us_data *us) +{ + int result = 0; + + if (usb_stor_huawei_dongles_pid(us)) { + if (us->pusb_dev->descriptor.idProduct >= 0x1446) + result = usb_stor_huawei_scsi_init(us); + else + result = usb_stor_huawei_feature_init(us); + } + return result; +} diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h index 529327f..5376d4f 100644 --- a/drivers/usb/storage/initializers.h +++ b/drivers/usb/storage/initializers.h @@ -46,5 +46,5 @@ int usb_stor_euscsi_init(struct us_data *us); * flash reader */ int usb_stor_ucr61s2b_init(struct us_data *us); -/* This places the HUAWEI E220 devices in multi-port mode */ -int usb_stor_huawei_e220_init(struct us_data *us); +/* This places the HUAWEI usb dongles in multi-port mode */ +int usb_stor_huawei_init(struct us_data *us); diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h index 65a6a75..2c85530 100644 --- a/drivers/usb/storage/unusual_cypress.h +++ b/drivers/usb/storage/unusual_cypress.h @@ -31,7 +31,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999, "Cypress ISD-300LP", USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), -UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219, +UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x9999, "Super Top", "USB 2.0 SATA BRIDGE", USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index b75e90b..72923b5 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -488,13 +488,6 @@ UNUSUAL_DEV( 0x04e8, 0x5122, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG), -/* Added by Dmitry Artamonow */ -UNUSUAL_DEV( 0x04e8, 0x5136, 0x0000, 0x9999, - "Samsung", - "YP-Z3", - USB_SC_DEVICE, USB_PR_DEVICE, NULL, - US_FL_MAX_SECTORS_64), - /* Entry and supporting patch by Theodore Kilgore . * Device uses standards-violating 32-byte Bulk Command Block Wrappers and * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011. @@ -1534,335 +1527,10 @@ UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100, /* Reported by fangxiaozhi * This brings the HUAWEI data card devices into multi-port mode */ -UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1402, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1404, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1407, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140A, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140B, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140C, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140D, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140E, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140F, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141A, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141B, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141C, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141D, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141E, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141F, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1420, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1421, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1422, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1423, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1424, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1425, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1426, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1427, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1428, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1429, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142A, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142B, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142C, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142D, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142E, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142F, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1430, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1431, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1432, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1433, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1434, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1435, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1436, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1437, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1438, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1439, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143A, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143B, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143C, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143D, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143E, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000, +UNUSUAL_VENDOR_INTF(0x12d1, 0x08, 0x06, 0x50, "HUAWEI MOBILE", "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init, 0), /* Reported by Vilius Bilinkevicius = max || hdr.start + hdr.count > max) + hdr.count > vfio_pci_get_irq_count(vdev, hdr.index)) return -EINVAL; data = memdup_user((void __user *)(arg + minsz), diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index ec6fb3f..959b1cd 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -339,8 +339,7 @@ static void handle_tx(struct vhost_net *net) msg.msg_controllen = 0; ubufs = NULL; } else { - struct ubuf_info *ubuf; - ubuf = vq->ubuf_info + vq->upend_idx; + struct ubuf_info *ubuf = &vq->ubuf_info[head]; vq->heads[vq->upend_idx].len = VHOST_DMA_IN_PROGRESS; diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c index 025428e..12cf5f3 100644 --- a/drivers/video/atmel_lcdfb.c +++ b/drivers/video/atmel_lcdfb.c @@ -422,22 +422,17 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var, = var->bits_per_pixel; break; case 16: - /* Older SOCs use IBGR:555 rather than BGR:565. */ - if (sinfo->have_intensity_bit) - var->green.length = 5; - else - var->green.length = 6; - if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) { - /* RGB:5X5 mode */ - var->red.offset = var->green.length + 5; + /* RGB:565 mode */ + var->red.offset = 11; var->blue.offset = 0; } else { - /* BGR:5X5 mode */ + /* BGR:565 mode */ var->red.offset = 0; - var->blue.offset = var->green.length + 5; + var->blue.offset = 11; } var->green.offset = 5; + var->green.length = 6; var->red.length = var->blue.length = 5; break; case 32: @@ -684,7 +679,8 @@ static int atmel_lcdfb_setcolreg(unsigned int regno, unsigned int red, case FB_VISUAL_PSEUDOCOLOR: if (regno < 256) { - if (sinfo->have_intensity_bit) { + if (cpu_is_at91sam9261() || cpu_is_at91sam9263() + || cpu_is_at91sam9rl()) { /* old style I+BGR:555 */ val = ((red >> 11) & 0x001f); val |= ((green >> 6) & 0x03e0); @@ -874,10 +870,6 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev) } sinfo->info = info; sinfo->pdev = pdev; - if (cpu_is_at91sam9261() || cpu_is_at91sam9263() || - cpu_is_at91sam9rl()) { - sinfo->have_intensity_bit = true; - } strcpy(info->fix.id, sinfo->pdev->name); info->flags = ATMEL_LCDFB_FBINFO_DEFAULT; diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c index a77c9ca..6bb72c0 100644 --- a/drivers/video/backlight/adp8860_bl.c +++ b/drivers/video/backlight/adp8860_bl.c @@ -783,7 +783,7 @@ static int adp8860_i2c_suspend(struct i2c_client *client, pm_message_t message) static int adp8860_i2c_resume(struct i2c_client *client) { - adp8860_set_bits(client, ADP8860_MDCR, NSTBY | BLEN); + adp8860_set_bits(client, ADP8860_MDCR, NSTBY); return 0; } diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c index 712c25a..63c882b 100644 --- a/drivers/video/backlight/adp8870_bl.c +++ b/drivers/video/backlight/adp8870_bl.c @@ -957,7 +957,7 @@ static int adp8870_i2c_suspend(struct i2c_client *client, pm_message_t message) static int adp8870_i2c_resume(struct i2c_client *client) { - adp8870_set_bits(client, ADP8870_MDCR, NSTBY | BLEN); + adp8870_set_bits(client, ADP8870_MDCR, NSTBY); return 0; } diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 6e696e6..fdefa8f 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -529,33 +529,6 @@ static int search_for_mapped_con(void) return retval; } -static int do_fbcon_takeover(int show_logo) -{ - int err, i; - - if (!num_registered_fb) - return -ENODEV; - - if (!show_logo) - logo_shown = FBCON_LOGO_DONTSHOW; - - for (i = first_fb_vc; i <= last_fb_vc; i++) - con2fb_map[i] = info_idx; - - err = do_take_over_console(&fb_con, first_fb_vc, last_fb_vc, - fbcon_is_default); - - if (err) { - for (i = first_fb_vc; i <= last_fb_vc; i++) - con2fb_map[i] = -1; - info_idx = -1; - } else { - fbcon_has_console_bind = 1; - } - - return err; -} - static int fbcon_takeover(int show_logo) { int err, i; @@ -842,8 +815,6 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info, * * Maps a virtual console @unit to a frame buffer device * @newidx. - * - * This should be called with the console lock held. */ static int set_con2fb_map(int unit, int newidx, int user) { @@ -861,7 +832,7 @@ static int set_con2fb_map(int unit, int newidx, int user) if (!search_for_mapped_con() || !con_is_bound(&fb_con)) { info_idx = newidx; - return do_fbcon_takeover(0); + return fbcon_takeover(0); } if (oldidx != -1) @@ -869,6 +840,7 @@ static int set_con2fb_map(int unit, int newidx, int user) found = search_fb_in_map(newidx); + console_lock(); con2fb_map[unit] = newidx; if (!err && !found) err = con2fb_acquire_newinfo(vc, info, unit, oldidx); @@ -895,6 +867,7 @@ static int set_con2fb_map(int unit, int newidx, int user) if (!search_fb_in_map(info_idx)) info_idx = newidx; + console_unlock(); return err; } @@ -1017,7 +990,7 @@ static const char *fbcon_startup(void) } /* Setup default font */ - if (!p->fontdata && !vc->vc_font.data) { + if (!p->fontdata) { if (!fontname[0] || !(font = find_font(fontname))) font = get_default_font(info->var.xres, info->var.yres, @@ -1027,8 +1000,6 @@ static const char *fbcon_startup(void) vc->vc_font.height = font->height; vc->vc_font.data = (void *)(p->fontdata = font->data); vc->vc_font.charcount = 256; /* FIXME Need to support more fonts */ - } else { - p->fontdata = vc->vc_font.data; } cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); @@ -1188,9 +1159,9 @@ static void fbcon_init(struct vc_data *vc, int init) ops->p = &fb_display[fg_console]; } -static void fbcon_free_font(struct display *p, bool freefont) +static void fbcon_free_font(struct display *p) { - if (freefont && p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0)) + if (p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0)) kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int)); p->fontdata = NULL; p->userfont = 0; @@ -1202,8 +1173,8 @@ static void fbcon_deinit(struct vc_data *vc) struct fb_info *info; struct fbcon_ops *ops; int idx; - bool free_font = true; + fbcon_free_font(p); idx = con2fb_map[vc->vc_num]; if (idx == -1) @@ -1214,8 +1185,6 @@ static void fbcon_deinit(struct vc_data *vc) if (!info) goto finished; - if (info->flags & FBINFO_MISC_FIRMWARE) - free_font = false; ops = info->fbcon_par; if (!ops) @@ -1227,10 +1196,6 @@ static void fbcon_deinit(struct vc_data *vc) ops->flags &= ~FBCON_FLAGS_INIT; finished: - fbcon_free_font(p, free_font); - if (free_font) - vc->vc_font.data = NULL; - if (!con_is_bound(&fb_con)) fbcon_exit(); @@ -3012,7 +2977,7 @@ static int fbcon_unbind(void) { int ret; - ret = do_unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc, + ret = unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc, fbcon_is_default); if (!ret) @@ -3027,7 +2992,6 @@ static inline int fbcon_unbind(void) } #endif /* CONFIG_VT_HW_CONSOLE_BINDING */ -/* called with console_lock held */ static int fbcon_fb_unbind(int idx) { int i, new_idx = -1, ret = 0; @@ -3054,7 +3018,6 @@ static int fbcon_fb_unbind(int idx) return ret; } -/* called with console_lock held */ static int fbcon_fb_unregistered(struct fb_info *info) { int i, idx; @@ -3087,12 +3050,11 @@ static int fbcon_fb_unregistered(struct fb_info *info) primary_device = -1; if (!num_registered_fb) - do_unregister_con_driver(&fb_con); + unregister_con_driver(&fb_con); return 0; } -/* called with console_lock held */ static void fbcon_remap_all(int idx) { int i; @@ -3137,7 +3099,6 @@ static inline void fbcon_select_primary(struct fb_info *info) } #endif /* CONFIG_FRAMEBUFFER_DETECT_PRIMARY */ -/* called with console_lock held */ static int fbcon_fb_registered(struct fb_info *info) { int ret = 0, i, idx; @@ -3154,7 +3115,7 @@ static int fbcon_fb_registered(struct fb_info *info) } if (info_idx != -1) - ret = do_fbcon_takeover(1); + ret = fbcon_takeover(1); } else { for (i = first_fb_vc; i <= last_fb_vc; i++) { if (con2fb_map_boot[i] == idx) @@ -3290,7 +3251,6 @@ static int fbcon_event_notify(struct notifier_block *self, ret = fbcon_fb_unregistered(info); break; case FB_EVENT_SET_CONSOLE_MAP: - /* called with console lock held */ con2fb = event->data; ret = set_con2fb_map(con2fb->console - 1, con2fb->framebuffer, 1); diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 5855d17..d449a74 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -1064,7 +1064,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512) unsigned short video_port_status = vga_video_port_reg + 6; int font_select = 0x00, beg, i; char *charmap; - bool clear_attribs = false; + if (vga_video_type != VIDEO_TYPE_EGAM) { charmap = (char *) VGA_MAP_MEM(colourmap, 0); beg = 0x0e; @@ -1169,6 +1169,12 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512) /* if 512 char mode is already enabled don't re-enable it. */ if ((set) && (ch512 != vga_512_chars)) { + /* attribute controller */ + for (i = 0; i < MAX_NR_CONSOLES; i++) { + struct vc_data *c = vc_cons[i].d; + if (c && c->vc_sw == &vga_con) + c->vc_hi_font_mask = ch512 ? 0x0800 : 0; + } vga_512_chars = ch512; /* 256-char: enable intensity bit 512-char: disable intensity bit */ @@ -1179,22 +1185,8 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512) it means, but it works, and it appears necessary */ inb_p(video_port_status); vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0); - clear_attribs = true; } raw_spin_unlock_irq(&vga_lock); - - if (clear_attribs) { - for (i = 0; i < MAX_NR_CONSOLES; i++) { - struct vc_data *c = vc_cons[i].d; - if (c && c->vc_sw == &vga_con) { - /* force hi font mask to 0, so we always clear - the bit on either transition */ - c->vc_hi_font_mask = 0x00; - clear_buffer_attributes(c); - c->vc_hi_font_mask = ch512 ? 0x0800 : 0; - } - } - } return 0; } diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c index e06cd5d..3f2519d 100644 --- a/drivers/video/ep93xx-fb.c +++ b/drivers/video/ep93xx-fb.c @@ -23,7 +23,6 @@ #include #include #include -#include #include diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index 0a49456..3ff0105 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c @@ -1177,10 +1177,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, event.data = &con2fb; if (!lock_fb_info(info)) return -ENODEV; - console_lock(); event.info = info; ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event); - console_unlock(); unlock_fb_info(info); break; case FBIOBLANK: @@ -1373,12 +1371,15 @@ fb_mmap(struct file *file, struct vm_area_struct * vma) { struct fb_info *info = file_fb_info(file); struct fb_ops *fb; - unsigned long mmio_pgoff; + unsigned long off; unsigned long start; u32 len; if (!info) return -ENODEV; + if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) + return -EINVAL; + off = vma->vm_pgoff << PAGE_SHIFT; fb = info->fbops; if (!fb) return -ENODEV; @@ -1390,24 +1391,32 @@ fb_mmap(struct file *file, struct vm_area_struct * vma) return res; } - /* - * Ugh. This can be either the frame buffer mapping, or - * if pgoff points past it, the mmio mapping. - */ + /* frame buffer memory */ start = info->fix.smem_start; - len = info->fix.smem_len; - mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT; - if (vma->vm_pgoff >= mmio_pgoff) { - vma->vm_pgoff -= mmio_pgoff; + len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len); + if (off >= len) { + /* memory mapped io */ + off -= len; + if (info->var.accel_flags) { + mutex_unlock(&info->mm_lock); + return -EINVAL; + } start = info->fix.mmio_start; - len = info->fix.mmio_len; + len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len); } mutex_unlock(&info->mm_lock); - + start &= PAGE_MASK; + if ((vma->vm_end - vma->vm_start + off) > len) + return -EINVAL; + off += start; + vma->vm_pgoff = off >> PAGE_SHIFT; + /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by io_remap_pfn_range()*/ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - fb_pgprotect(file, vma, start); - - return vm_iomap_memory(vma, start, len); + fb_pgprotect(file, vma, off); + if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, vma->vm_page_prot)) + return -EAGAIN; + return 0; } static int @@ -1641,9 +1650,7 @@ static int do_register_framebuffer(struct fb_info *fb_info) event.info = fb_info; if (!lock_fb_info(fb_info)) return -ENODEV; - console_lock(); fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event); - console_unlock(); unlock_fb_info(fb_info); return 0; } @@ -1659,10 +1666,8 @@ static int do_unregister_framebuffer(struct fb_info *fb_info) if (!lock_fb_info(fb_info)) return -ENODEV; - console_lock(); event.info = fb_info; ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event); - console_unlock(); unlock_fb_info(fb_info); if (ret) @@ -1677,9 +1682,7 @@ static int do_unregister_framebuffer(struct fb_info *fb_info) num_registered_fb--; fb_cleanup_device(fb_info); event.info = fb_info; - console_lock(); fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); - console_unlock(); /* this may free fb info */ put_fb_info(fb_info); @@ -1850,8 +1853,11 @@ int fb_new_modelist(struct fb_info *info) err = 1; if (!list_empty(&info->modelist)) { + if (!lock_fb_info(info)) + return -ENODEV; event.info = info; err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event); + unlock_fb_info(info); } return err; diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c index ef476b0..a55e366 100644 --- a/drivers/video/fbsysfs.c +++ b/drivers/video/fbsysfs.c @@ -177,8 +177,6 @@ static ssize_t store_modes(struct device *device, if (i * sizeof(struct fb_videomode) != count) return -EINVAL; - if (!lock_fb_info(fb_info)) - return -ENODEV; console_lock(); list_splice(&fb_info->modelist, &old_list); fb_videomode_to_modelist((const struct fb_videomode *)buf, i, @@ -190,7 +188,6 @@ static ssize_t store_modes(struct device *device, fb_destroy_modelist(&old_list); console_unlock(); - unlock_fb_info(fb_info); return 0; } diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c index 41fbd94..19cfd7a 100644 --- a/drivers/video/fsl-diu-fb.c +++ b/drivers/video/fsl-diu-fb.c @@ -944,7 +944,7 @@ static u32 fsl_diu_get_pixel_format(unsigned int bits_per_pixel) #define PF_COMP_0_MASK 0x0000000F #define PF_COMP_0_SHIFT 0 -#define MAKE_PF(alpha, red, green, blue, size, c0, c1, c2, c3) \ +#define MAKE_PF(alpha, red, blue, green, size, c0, c1, c2, c3) \ cpu_to_le32(PF_BYTE_F | (alpha << PF_ALPHA_C_SHIFT) | \ (blue << PF_BLUE_C_SHIFT) | (green << PF_GREEN_C_SHIFT) | \ (red << PF_RED_C_SHIFT) | (c3 << PF_COMP_3_SHIFT) | \ @@ -954,10 +954,10 @@ static u32 fsl_diu_get_pixel_format(unsigned int bits_per_pixel) switch (bits_per_pixel) { case 32: /* 0x88883316 */ - return MAKE_PF(3, 2, 1, 0, 3, 8, 8, 8, 8); + return MAKE_PF(3, 2, 0, 1, 3, 8, 8, 8, 8); case 24: /* 0x88082219 */ - return MAKE_PF(4, 0, 1, 2, 2, 8, 8, 8, 0); + return MAKE_PF(4, 0, 1, 2, 2, 0, 8, 8, 8); case 16: /* 0x65053118 */ return MAKE_PF(4, 2, 1, 0, 1, 5, 6, 5, 0); @@ -1232,16 +1232,6 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd, return 0; } -static inline void fsl_diu_enable_interrupts(struct fsl_diu_data *data) -{ - u32 int_mask = INT_UNDRUN; /* enable underrun detection */ - - if (IS_ENABLED(CONFIG_NOT_COHERENT_CACHE)) - int_mask |= INT_VSYNC; /* enable vertical sync */ - - clrbits32(&data->diu_reg->int_mask, int_mask); -} - /* turn on fb if count == 1 */ static int fsl_diu_open(struct fb_info *info, int user) @@ -1261,7 +1251,19 @@ static int fsl_diu_open(struct fb_info *info, int user) if (res < 0) mfbi->count--; else { - fsl_diu_enable_interrupts(mfbi->parent); + struct fsl_diu_data *data = mfbi->parent; + +#ifdef CONFIG_NOT_COHERENT_CACHE + /* + * Enable underrun detection and vertical sync + * interrupts. + */ + clrbits32(&data->diu_reg->int_mask, + INT_UNDRUN | INT_VSYNC); +#else + /* Enable underrun detection */ + clrbits32(&data->diu_reg->int_mask, INT_UNDRUN); +#endif fsl_diu_enable_panel(info); } } @@ -1281,18 +1283,9 @@ static int fsl_diu_release(struct fb_info *info, int user) mfbi->count--; if (mfbi->count == 0) { struct fsl_diu_data *data = mfbi->parent; - bool disable = true; - int i; - /* Disable interrupts only if all AOIs are closed */ - for (i = 0; i < NUM_AOIS; i++) { - struct mfb_info *mi = data->fsl_diu_info[i].par; - - if (mi->count) - disable = false; - } - if (disable) - out_be32(&data->diu_reg->int_mask, 0xffffffff); + /* Disable interrupts */ + out_be32(&data->diu_reg->int_mask, 0xffffffff); fsl_diu_disable_panel(info); } @@ -1621,6 +1614,14 @@ static int fsl_diu_probe(struct platform_device *pdev) out_be32(&data->diu_reg->desc[1], data->dummy_ad.paddr); out_be32(&data->diu_reg->desc[2], data->dummy_ad.paddr); + for (i = 0; i < NUM_AOIS; i++) { + ret = install_fb(&data->fsl_diu_info[i]); + if (ret) { + dev_err(&pdev->dev, "could not register fb %d\n", i); + goto error; + } + } + /* * Older versions of U-Boot leave interrupts enabled, so disable * all of them and clear the status register. @@ -1629,21 +1630,12 @@ static int fsl_diu_probe(struct platform_device *pdev) in_be32(&data->diu_reg->int_status); ret = request_irq(data->irq, fsl_diu_isr, 0, "fsl-diu-fb", - data->diu_reg); + &data->diu_reg); if (ret) { dev_err(&pdev->dev, "could not claim irq\n"); goto error; } - for (i = 0; i < NUM_AOIS; i++) { - ret = install_fb(&data->fsl_diu_info[i]); - if (ret) { - dev_err(&pdev->dev, "could not register fb %d\n", i); - free_irq(data->irq, data->diu_reg); - goto error; - } - } - sysfs_attr_init(&data->dev_attr.attr); data->dev_attr.attr.name = "monitor"; data->dev_attr.attr.mode = S_IRUGO|S_IWUSR; @@ -1675,7 +1667,7 @@ static int fsl_diu_remove(struct platform_device *pdev) data = dev_get_drvdata(&pdev->dev); disable_lcdc(&data->fsl_diu_info[0]); - free_irq(data->irq, data->diu_reg); + free_irq(data->irq, &data->diu_reg); for (i = 0; i < NUM_AOIS; i++) uninstall_fb(&data->fsl_diu_info[i]); diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c index 012817a..85b363a 100644 --- a/drivers/w1/masters/w1-gpio.c +++ b/drivers/w1/masters/w1-gpio.c @@ -72,7 +72,7 @@ static int w1_gpio_probe_dt(struct platform_device *pdev) return 0; } -static int w1_gpio_probe(struct platform_device *pdev) +static int __init w1_gpio_probe(struct platform_device *pdev) { struct w1_bus_master *master; struct w1_gpio_platform_data *pdata; @@ -158,7 +158,7 @@ static int w1_gpio_probe(struct platform_device *pdev) return err; } -static int w1_gpio_remove(struct platform_device *pdev) +static int __exit w1_gpio_remove(struct platform_device *pdev) { struct w1_bus_master *master = platform_get_drvdata(pdev); struct w1_gpio_platform_data *pdata = pdev->dev.platform_data; @@ -210,7 +210,7 @@ static struct platform_driver w1_gpio_driver = { .of_match_table = of_match_ptr(w1_gpio_dt_ids), }, .probe = w1_gpio_probe, - .remove = w1_gpio_remove, + .remove = __exit_p(w1_gpio_remove), .suspend = w1_gpio_suspend, .resume = w1_gpio_resume, }; diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c index 7ce277d..7994d933 100644 --- a/drivers/w1/w1.c +++ b/drivers/w1/w1.c @@ -924,8 +924,7 @@ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb tmp64 = (triplet_ret >> 2); rn |= (tmp64 << i); - /* ensure we're called from kthread and not by netlink callback */ - if (!dev->priv && kthread_should_stop()) { + if (kthread_should_stop()) { mutex_unlock(&dev->bus_mutex); dev_dbg(&dev->dev, "Abort w1_search\n"); return; diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 19fa73a..7f809fd 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -79,7 +79,6 @@ config DA9052_WATCHDOG config DA9055_WATCHDOG tristate "Dialog Semiconductor DA9055 Watchdog" depends on MFD_DA9055 - select WATCHDOG_CORE help If you say yes here you get support for watchdog on the Dialog Semiconductor DA9055 PMIC. diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c index 0e9d8c4..2b0e000 100644 --- a/drivers/watchdog/sp5100_tco.c +++ b/drivers/watchdog/sp5100_tco.c @@ -40,12 +40,13 @@ #include "sp5100_tco.h" /* Module and version information */ -#define TCO_VERSION "0.05" +#define TCO_VERSION "0.03" #define TCO_MODULE_NAME "SP5100 TCO timer" #define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION /* internal variables */ static u32 tcobase_phys; +static u32 resbase_phys; static u32 tco_wdt_fired; static void __iomem *tcobase; static unsigned int pm_iobase; @@ -53,6 +54,10 @@ static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */ static unsigned long timer_alive; static char tco_expect_close; static struct pci_dev *sp5100_tco_pci; +static struct resource wdt_res = { + .name = "Watchdog Timer", + .flags = IORESOURCE_MEM, +}; /* the watchdog platform device */ static struct platform_device *sp5100_tco_platform_device; @@ -70,6 +75,12 @@ module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started." " (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); +static unsigned int force_addr; +module_param(force_addr, uint, 0); +MODULE_PARM_DESC(force_addr, "Force the use of specified MMIO address." + " ONLY USE THIS PARAMETER IF YOU REALLY KNOW" + " WHAT YOU ARE DOING (default=none)"); + /* * Some TCO specific functions */ @@ -165,6 +176,39 @@ static void tco_timer_enable(void) } } +static void tco_timer_disable(void) +{ + int val; + + if (sp5100_tco_pci->revision >= 0x40) { + /* For SB800 or later */ + /* Enable watchdog decode bit and Disable watchdog timer */ + outb(SB800_PM_WATCHDOG_CONTROL, SB800_IO_PM_INDEX_REG); + val = inb(SB800_IO_PM_DATA_REG); + val |= SB800_PCI_WATCHDOG_DECODE_EN; + val |= SB800_PM_WATCHDOG_DISABLE; + outb(val, SB800_IO_PM_DATA_REG); + } else { + /* For SP5100 or SB7x0 */ + /* Enable watchdog decode bit */ + pci_read_config_dword(sp5100_tco_pci, + SP5100_PCI_WATCHDOG_MISC_REG, + &val); + + val |= SP5100_PCI_WATCHDOG_DECODE_EN; + + pci_write_config_dword(sp5100_tco_pci, + SP5100_PCI_WATCHDOG_MISC_REG, + val); + + /* Disable Watchdog timer */ + outb(SP5100_PM_WATCHDOG_CONTROL, SP5100_IO_PM_INDEX_REG); + val = inb(SP5100_IO_PM_DATA_REG); + val |= SP5100_PM_WATCHDOG_DISABLE; + outb(val, SP5100_IO_PM_DATA_REG); + } +} + /* * /dev/watchdog handling */ @@ -415,8 +459,74 @@ static unsigned char sp5100_tco_setupdevice(void) } else pr_debug("SBResource_MMIO is disabled(0x%04x)\n", val); - pr_notice("failed to find MMIO address, giving up.\n"); - goto unreg_region; + /* + * Lastly re-programming the watchdog timer MMIO address, + * This method is a last resort... + * + * Before re-programming, to ensure that the watchdog timer + * is disabled, disable the watchdog timer. + */ + tco_timer_disable(); + + if (force_addr) { + /* + * Force the use of watchdog timer MMIO address, and aligned to + * 8byte boundary. + */ + force_addr &= ~0x7; + val = force_addr; + + pr_info("Force the use of 0x%04x as MMIO address\n", val); + } else { + /* + * Get empty slot into the resource tree for watchdog timer. + */ + if (allocate_resource(&iomem_resource, + &wdt_res, + SP5100_WDT_MEM_MAP_SIZE, + 0xf0000000, + 0xfffffff8, + 0x8, + NULL, + NULL)) { + pr_err("MMIO allocation failed\n"); + goto unreg_region; + } + + val = resbase_phys = wdt_res.start; + pr_debug("Got 0x%04x from resource tree\n", val); + } + + /* Restore to the low three bits, if chipset is SB8x0(or later) */ + if (sp5100_tco_pci->revision >= 0x40) { + u8 reserved_bit; + reserved_bit = inb(base_addr) & 0x7; + val |= (u32)reserved_bit; + } + + /* Re-programming the watchdog timer base address */ + outb(base_addr+0, index_reg); + /* Low three bits of BASE are reserved */ + outb((val >> 0) & 0xf8, data_reg); + outb(base_addr+1, index_reg); + outb((val >> 8) & 0xff, data_reg); + outb(base_addr+2, index_reg); + outb((val >> 16) & 0xff, data_reg); + outb(base_addr+3, index_reg); + outb((val >> 24) & 0xff, data_reg); + + /* + * Clear unnecessary the low three bits, + * if chipset is SB8x0(or later) + */ + if (sp5100_tco_pci->revision >= 0x40) + val &= ~0x7; + + if (!request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE, + dev_name)) { + pr_err("MMIO address 0x%04x already in use\n", val); + goto unreg_resource; + } setup_wdt: tcobase_phys = val; @@ -456,6 +566,9 @@ setup_wdt: unreg_mem_region: release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); +unreg_resource: + if (resbase_phys) + release_resource(&wdt_res); unreg_region: release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); exit: @@ -465,6 +578,7 @@ exit: static int sp5100_tco_init(struct platform_device *dev) { int ret; + char addr_str[16]; /* * Check whether or not the hardware watchdog is there. If found, then @@ -496,14 +610,23 @@ static int sp5100_tco_init(struct platform_device *dev) clear_bit(0, &timer_alive); /* Show module parameters */ - pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n", - tcobase, heartbeat, nowayout); + if (force_addr == tcobase_phys) + /* The force_addr is vaild */ + sprintf(addr_str, "0x%04x", force_addr); + else + strcpy(addr_str, "none"); + + pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d, " + "force_addr=%s)\n", + tcobase, heartbeat, nowayout, addr_str); return 0; exit: iounmap(tcobase); release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); + if (resbase_phys) + release_resource(&wdt_res); release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); return ret; } @@ -518,6 +641,8 @@ static void sp5100_tco_cleanup(void) misc_deregister(&sp5100_tco_miscdev); iounmap(tcobase); release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); + if (resbase_phys) + release_resource(&wdt_res); release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); } diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h index 2b28c00..71594a0 100644 --- a/drivers/watchdog/sp5100_tco.h +++ b/drivers/watchdog/sp5100_tco.h @@ -57,7 +57,7 @@ #define SB800_PM_WATCHDOG_DISABLE (1 << 2) #define SB800_PM_WATCHDOG_SECOND_RES (3 << 0) #define SB800_ACPI_MMIO_DECODE_EN (1 << 0) -#define SB800_ACPI_MMIO_SEL (1 << 1) +#define SB800_ACPI_MMIO_SEL (1 << 2) #define SB800_PM_WDT_MMIO_OFFSET 0xB00 diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 8aa3867..74d77df 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -388,23 +388,11 @@ static void unmask_evtchn(int port) if (unlikely((cpu != cpu_from_evtchn(port)))) do_hypercall = 1; - else { - /* - * Need to clear the mask before checking pending to - * avoid a race with an event becoming pending. - * - * EVTCHNOP_unmask will only trigger an upcall if the - * mask bit was set, so if a hypercall is needed - * remask the event. - */ - sync_clear_bit(port, &s->evtchn_mask[0]); + else evtchn_pending = sync_test_bit(port, &s->evtchn_pending[0]); - if (unlikely(evtchn_pending && xen_hvm_domain())) { - sync_set_bit(port, &s->evtchn_mask[0]); - do_hypercall = 1; - } - } + if (unlikely(evtchn_pending && xen_hvm_domain())) + do_hypercall = 1; /* Slow path (hypercall) if this is a non-local port or if this is * an hvm domain and an event is pending (hvm domains don't have @@ -415,6 +403,8 @@ static void unmask_evtchn(int port) } else { struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); + sync_clear_bit(port, &s->evtchn_mask[0]); + /* * The following is basically the equivalent of * 'hw_resend_irq'. Just like a real IO-APIC we 'lose diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index b2db77e..b1f60a0 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c @@ -269,14 +269,6 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port) u->name, (void *)(unsigned long)port); if (rc >= 0) rc = evtchn_make_refcounted(port); - else { - /* bind failed, should close the port now */ - struct evtchn_close close; - close.port = port; - if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) - BUG(); - set_port_user(port, NULL); - } return rc; } @@ -285,8 +277,6 @@ static void evtchn_unbind_from_user(struct per_user_data *u, int port) { int irq = irq_from_evtchn(port); - BUG_ON(irq < 0); - unbind_from_irqhandler(irq, (void *)(unsigned long)port); set_port_user(port, NULL); diff --git a/drivers/xen/fallback.c b/drivers/xen/fallback.c index b04fb64..0ef7c4d 100644 --- a/drivers/xen/fallback.c +++ b/drivers/xen/fallback.c @@ -44,7 +44,7 @@ int xen_event_channel_op_compat(int cmd, void *arg) } EXPORT_SYMBOL_GPL(xen_event_channel_op_compat); -int xen_physdev_op_compat(int cmd, void *arg) +int HYPERVISOR_physdev_op_compat(int cmd, void *arg) { struct physdev_op op; int rc; @@ -78,4 +78,3 @@ int xen_physdev_op_compat(int cmd, void *arg) return rc; } -EXPORT_SYMBOL_GPL(xen_physdev_op_compat); diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index a2278ba..9204126 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c @@ -17,7 +17,6 @@ #include #include #include -#include #include "pciback.h" #include "conf_space.h" #include "conf_space_quirks.h" @@ -86,52 +85,37 @@ static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev) static void pcistub_device_release(struct kref *kref) { struct pcistub_device *psdev; - struct pci_dev *dev; struct xen_pcibk_dev_data *dev_data; psdev = container_of(kref, struct pcistub_device, kref); - dev = psdev->dev; - dev_data = pci_get_drvdata(dev); + dev_data = pci_get_drvdata(psdev->dev); - dev_dbg(&dev->dev, "pcistub_device_release\n"); + dev_dbg(&psdev->dev->dev, "pcistub_device_release\n"); - xen_unregister_device_domain_owner(dev); + xen_unregister_device_domain_owner(psdev->dev); /* Call the reset function which does not take lock as this * is called from "unbind" which takes a device_lock mutex. */ - __pci_reset_function_locked(dev); - if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state)) - dev_dbg(&dev->dev, "Could not reload PCI state\n"); - else - pci_restore_state(dev); - - if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) { - struct physdev_pci_device ppdev = { - .seg = pci_domain_nr(dev->bus), - .bus = dev->bus->number, - .devfn = dev->devfn - }; - int err = HYPERVISOR_physdev_op(PHYSDEVOP_release_msix, - &ppdev); - - if (err) - dev_warn(&dev->dev, "MSI-X release failed (%d)\n", - err); - } + __pci_reset_function_locked(psdev->dev); + if (pci_load_and_free_saved_state(psdev->dev, + &dev_data->pci_saved_state)) { + dev_dbg(&psdev->dev->dev, "Could not reload PCI state\n"); + } else + pci_restore_state(psdev->dev); /* Disable the device */ - xen_pcibk_reset_device(dev); + xen_pcibk_reset_device(psdev->dev); kfree(dev_data); - pci_set_drvdata(dev, NULL); + pci_set_drvdata(psdev->dev, NULL); /* Clean-up the device */ - xen_pcibk_config_free_dyn_fields(dev); - xen_pcibk_config_free_dev(dev); + xen_pcibk_config_free_dyn_fields(psdev->dev); + xen_pcibk_config_free_dev(psdev->dev); - dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; - pci_dev_put(dev); + psdev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; + pci_dev_put(psdev->dev); kfree(psdev); } @@ -371,19 +355,6 @@ static int pcistub_init_device(struct pci_dev *dev) if (err) goto config_release; - if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) { - struct physdev_pci_device ppdev = { - .seg = pci_domain_nr(dev->bus), - .bus = dev->bus->number, - .devfn = dev->devfn - }; - - err = HYPERVISOR_physdev_op(PHYSDEVOP_prepare_msix, &ppdev); - if (err) - dev_err(&dev->dev, "MSI-X preparation failed (%d)\n", - err); - } - /* We need the device active to save the state. */ dev_dbg(&dev->dev, "save state of device\n"); pci_save_state(dev); diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c index b98cf0c..37c1f82 100644 --- a/drivers/xen/xen-pciback/pciback_ops.c +++ b/drivers/xen/xen-pciback/pciback_ops.c @@ -113,8 +113,7 @@ void xen_pcibk_reset_device(struct pci_dev *dev) if (dev->msi_enabled) pci_disable_msi(dev); #endif - if (pci_is_enabled(dev)) - pci_disable_device(dev); + pci_disable_device(dev); pci_write_config_word(dev, PCI_COMMAND, 0); diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 61786be..bcf3ba4 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -30,7 +30,6 @@ * IN THE SOFTWARE. */ -#include #include #include #include diff --git a/fs/aio.c b/fs/aio.c index ed762ae..71f613c 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -1027,9 +1027,9 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent) spin_unlock(&info->ring_lock); out: + kunmap_atomic(ring); dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret, (unsigned long)ring->head, (unsigned long)ring->tail); - kunmap_atomic(ring); return ret; } diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index b0f12d7..b785e77 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -34,7 +34,6 @@ #include #include #include -#include #include #include diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index b2f1903..01443ce 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c @@ -61,6 +61,15 @@ static int autofs4_mount_busy(struct vfsmount *mnt, struct dentry *dentry) /* This is an autofs submount, we can't expire it */ if (autofs_type_indirect(sbi->type)) goto done; + + /* + * Otherwise it's an offset mount and we need to check + * if we can umount its mount, if there is one. + */ + if (!d_mountpoint(path.dentry)) { + status = 0; + goto done; + } } /* Update the expiry counter if fs is busy */ @@ -157,7 +166,7 @@ again: parent = p->d_parent; if (!spin_trylock(&parent->d_lock)) { spin_unlock(&p->d_lock); - cpu_chill(); + cpu_relax(); goto relock; } spin_unlock(&p->d_lock); diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 5843a47..0c42cdb 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1132,7 +1132,6 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma, goto whole; if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE)) goto whole; - return 0; } /* Do not dump I/O mapped devices or special mappings */ diff --git a/fs/block_dev.c b/fs/block_dev.c index 883dc49..172f849 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -551,7 +551,6 @@ struct block_device *bdgrab(struct block_device *bdev) ihold(bdev->bd_inode); return bdev; } -EXPORT_SYMBOL(bdgrab); long nr_blockdev_pages(void) { @@ -995,7 +994,6 @@ int revalidate_disk(struct gendisk *disk) mutex_lock(&bdev->bd_mutex); check_disk_size_change(disk, bdev); - bdev->bd_invalidated = 0; mutex_unlock(&bdev->bd_mutex); bdput(bdev); return ret; @@ -1034,9 +1032,7 @@ void bd_set_size(struct block_device *bdev, loff_t size) { unsigned bsize = bdev_logical_block_size(bdev); - mutex_lock(&bdev->bd_inode->i_mutex); - i_size_write(bdev->bd_inode, size); - mutex_unlock(&bdev->bd_inode->i_mutex); + bdev->bd_inode->i_size = size; while (bsize < PAGE_CACHE_SIZE) { if (size & bsize) break; diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index ce1c169..eea5da7 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -651,8 +651,6 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info, if (tree_mod_dont_log(fs_info, NULL)) return 0; - __tree_mod_log_free_eb(fs_info, old_root); - ret = tree_mod_alloc(fs_info, flags, &tm); if (ret < 0) goto out; @@ -738,7 +736,7 @@ tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq) static noinline void tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, struct extent_buffer *src, unsigned long dst_offset, - unsigned long src_offset, int nr_items, int log_removal) + unsigned long src_offset, int nr_items) { int ret; int i; @@ -752,12 +750,10 @@ tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, } for (i = 0; i < nr_items; i++) { - if (log_removal) { - ret = tree_mod_log_insert_key_locked(fs_info, src, - i + src_offset, - MOD_LOG_KEY_REMOVE); - BUG_ON(ret < 0); - } + ret = tree_mod_log_insert_key_locked(fs_info, src, + i + src_offset, + MOD_LOG_KEY_REMOVE); + BUG_ON(ret < 0); ret = tree_mod_log_insert_key_locked(fs_info, dst, i + dst_offset, MOD_LOG_KEY_ADD); @@ -931,6 +927,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, ret = btrfs_dec_ref(trans, root, buf, 1, 1); BUG_ON(ret); /* -ENOMEM */ } + tree_mod_log_free_eb(root->fs_info, buf); clean_tree_block(trans, root, buf); *last_ref = 1; } @@ -1049,7 +1046,6 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, btrfs_set_node_ptr_generation(parent, parent_slot, trans->transid); btrfs_mark_buffer_dirty(parent); - tree_mod_log_free_eb(root->fs_info, buf); btrfs_free_tree_block(trans, root, buf, parent_start, last_ref); } @@ -1759,6 +1755,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, goto enospc; } + tree_mod_log_free_eb(root->fs_info, root->node); tree_mod_log_set_root_pointer(root, child); rcu_assign_pointer(root->node, child); @@ -3003,7 +3000,7 @@ static int push_node_left(struct btrfs_trans_handle *trans, push_items = min(src_nritems - 8, push_items); tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0, - push_items, 1); + push_items); copy_extent_buffer(dst, src, btrfs_node_key_ptr_offset(dst_nritems), btrfs_node_key_ptr_offset(0), @@ -3074,7 +3071,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans, sizeof(struct btrfs_key_ptr)); tree_mod_log_eb_copy(root->fs_info, dst, src, 0, - src_nritems - push_items, push_items, 1); + src_nritems - push_items, push_items); copy_extent_buffer(dst, src, btrfs_node_key_ptr_offset(0), btrfs_node_key_ptr_offset(src_nritems - push_items), @@ -3226,18 +3223,12 @@ static noinline int split_node(struct btrfs_trans_handle *trans, int mid; int ret; u32 c_nritems; - int tree_mod_log_removal = 1; c = path->nodes[level]; WARN_ON(btrfs_header_generation(c) != trans->transid); if (c == root->node) { /* trying to split the root, lets make a new one */ ret = insert_new_root(trans, root, path, level + 1); - /* - * removal of root nodes has been logged by - * tree_mod_log_set_root_pointer due to locking - */ - tree_mod_log_removal = 0; if (ret) return ret; } else { @@ -3275,8 +3266,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans, (unsigned long)btrfs_header_chunk_tree_uuid(split), BTRFS_UUID_SIZE); - tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid, - tree_mod_log_removal); + tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid); copy_extent_buffer(split, c, btrfs_node_key_ptr_offset(0), btrfs_node_key_ptr_offset(mid), diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index 105b265..ae94117 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -36,19 +36,16 @@ * compare two delayed tree backrefs with same bytenr and type */ static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2, - struct btrfs_delayed_tree_ref *ref1, int type) + struct btrfs_delayed_tree_ref *ref1) { - if (type == BTRFS_TREE_BLOCK_REF_KEY) { - if (ref1->root < ref2->root) - return -1; - if (ref1->root > ref2->root) - return 1; - } else { - if (ref1->parent < ref2->parent) - return -1; - if (ref1->parent > ref2->parent) - return 1; - } + if (ref1->root < ref2->root) + return -1; + if (ref1->root > ref2->root) + return 1; + if (ref1->parent < ref2->parent) + return -1; + if (ref1->parent > ref2->parent) + return 1; return 0; } @@ -112,8 +109,7 @@ static int comp_entry(struct btrfs_delayed_ref_node *ref2, if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY || ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) { return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2), - btrfs_delayed_node_to_tree_ref(ref1), - ref1->type); + btrfs_delayed_node_to_tree_ref(ref1)); } else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY || ref1->type == BTRFS_SHARED_DATA_REF_KEY) { return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2), diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index d170412..5a3327b 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4308,7 +4308,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info) spin_lock(&sinfo->lock); spin_lock(&block_rsv->lock); - block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024); + block_rsv->size = num_bytes; num_bytes = sinfo->bytes_used + sinfo->bytes_pinned + sinfo->bytes_reserved + sinfo->bytes_readonly + @@ -4601,49 +4601,14 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) * If the inodes csum_bytes is the same as the original * csum_bytes then we know we haven't raced with any free()ers * so we can just reduce our inodes csum bytes and carry on. + * Otherwise we have to do the normal free thing to account for + * the case that the free side didn't free up its reserve + * because of this outstanding reservation. */ - if (BTRFS_I(inode)->csum_bytes == csum_bytes) { + if (BTRFS_I(inode)->csum_bytes == csum_bytes) calc_csum_metadata_size(inode, num_bytes, 0); - } else { - u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes; - u64 bytes; - - /* - * This is tricky, but first we need to figure out how much we - * free'd from any free-ers that occured during this - * reservation, so we reset ->csum_bytes to the csum_bytes - * before we dropped our lock, and then call the free for the - * number of bytes that were freed while we were trying our - * reservation. - */ - bytes = csum_bytes - BTRFS_I(inode)->csum_bytes; - BTRFS_I(inode)->csum_bytes = csum_bytes; - to_free = calc_csum_metadata_size(inode, bytes, 0); - - - /* - * Now we need to see how much we would have freed had we not - * been making this reservation and our ->csum_bytes were not - * artificially inflated. - */ - BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes; - bytes = csum_bytes - orig_csum_bytes; - bytes = calc_csum_metadata_size(inode, bytes, 0); - - /* - * Now reset ->csum_bytes to what it should be. If bytes is - * more than to_free then we would have free'd more space had we - * not had an artificially high ->csum_bytes, so we need to free - * the remainder. If bytes is the same or less then we don't - * need to do anything, the other free-ers did the correct - * thing. - */ - BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes; - if (bytes > to_free) - to_free = bytes - to_free; - else - to_free = 0; - } + else + to_free = calc_csum_metadata_size(inode, num_bytes, 0); spin_unlock(&BTRFS_I(inode)->lock); if (dropped) to_free += btrfs_calc_trans_metadata_size(root, dropped); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 125397e..1b319df 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1258,39 +1258,6 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end) GFP_NOFS); } -int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end) -{ - unsigned long index = start >> PAGE_CACHE_SHIFT; - unsigned long end_index = end >> PAGE_CACHE_SHIFT; - struct page *page; - - while (index <= end_index) { - page = find_get_page(inode->i_mapping, index); - BUG_ON(!page); /* Pages should be in the extent_io_tree */ - clear_page_dirty_for_io(page); - page_cache_release(page); - index++; - } - return 0; -} - -int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) -{ - unsigned long index = start >> PAGE_CACHE_SHIFT; - unsigned long end_index = end >> PAGE_CACHE_SHIFT; - struct page *page; - - while (index <= end_index) { - page = find_get_page(inode->i_mapping, index); - BUG_ON(!page); /* Pages should be in the extent_io_tree */ - account_page_redirty(page); - __set_page_dirty_nobuffers(page); - page_cache_release(page); - index++; - } - return 0; -} - /* * helper function to set both pages and extents in the tree writeback */ diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 715b474..2eacfab 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -329,8 +329,6 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset, unsigned long *map_len); int extent_range_uptodate(struct extent_io_tree *tree, u64 start, u64 end); -int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end); -int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end); int extent_clear_unlock_delalloc(struct inode *inode, struct extent_io_tree *tree, u64 start, u64 end, struct page *locked_page, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 4b5398c..cc93b23 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -265,7 +265,6 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans, return 1; } - set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); btrfs_delalloc_release_metadata(inode, end + 1 - start); btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); return 0; @@ -352,7 +351,6 @@ static noinline int compress_file_range(struct inode *inode, int i; int will_compress; int compress_type = root->fs_info->compress_type; - int redirty = 0; /* if this is a small write inside eof, kick off a defrag */ if ((end - start + 1) < 16 * 1024 && @@ -415,17 +413,6 @@ again: if (BTRFS_I(inode)->force_compress) compress_type = BTRFS_I(inode)->force_compress; - /* - * we need to call clear_page_dirty_for_io on each - * page in the range. Otherwise applications with the file - * mmap'd can wander in and change the page contents while - * we are compressing them. - * - * If the compression fails for any reason, we set the pages - * dirty again later on. - */ - extent_range_clear_dirty_for_io(inode, start, end); - redirty = 1; ret = btrfs_compress_pages(compress_type, inode->i_mapping, start, total_compressed, pages, @@ -567,8 +554,6 @@ cleanup_and_bail_uncompressed: __set_page_dirty_nobuffers(locked_page); /* unlocked later on in the async handlers */ } - if (redirty) - extent_range_redirty_for_io(inode, start, end); add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0, BTRFS_COMPRESS_NONE); *num_added += 1; @@ -2484,7 +2469,6 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) */ set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, &BTRFS_I(inode)->runtime_flags); - atomic_inc(&root->orphan_inodes); /* if we have links, this was a truncate, lets do that */ if (inode->i_nlink) { @@ -2507,8 +2491,6 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) goto out; ret = btrfs_truncate(inode); - if (ret) - btrfs_orphan_del(NULL, inode); } else { nr_unlink++; } @@ -5794,9 +5776,7 @@ out: * block must be cow'd */ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, - struct inode *inode, u64 offset, u64 *len, - u64 *orig_start, u64 *orig_block_len, - u64 *ram_bytes) + struct inode *inode, u64 offset, u64 len) { struct btrfs_path *path; int ret; @@ -5853,12 +5833,8 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); backref_offset = btrfs_file_extent_offset(leaf, fi); - *orig_start = key.offset - backref_offset; - *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi); - *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); - extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); - if (extent_end < offset + *len) { + if (extent_end < offset + len) { /* extent doesn't include our full range, must cow */ goto out; } @@ -5882,14 +5858,13 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, */ disk_bytenr += backref_offset; disk_bytenr += offset - key.offset; - num_bytes = min(offset + *len, extent_end) - offset; + num_bytes = min(offset + len, extent_end) - offset; if (csum_exist_in_range(root, disk_bytenr, num_bytes)) goto out; /* * all of the above have passed, it is safe to overwrite this extent * without cow */ - *len = num_bytes; ret = 1; out: btrfs_free_path(path); @@ -6099,7 +6074,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, em->block_start != EXTENT_MAP_HOLE)) { int type; int ret; - u64 block_start, orig_start, orig_block_len, ram_bytes; + u64 block_start; if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) type = BTRFS_ORDERED_PREALLOC; @@ -6117,8 +6092,10 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, if (IS_ERR(trans)) goto must_cow; - if (can_nocow_odirect(trans, inode, start, &len, &orig_start, - &orig_block_len, &ram_bytes) == 1) { + if (can_nocow_odirect(trans, inode, start, len) == 1) { + u64 orig_start = em->orig_start; + u64 orig_block_len = em->orig_block_len; + if (type == BTRFS_ORDERED_PREALLOC) { free_extent_map(em); em = create_pinned_em(inode, start, len, diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 48761b6..67783e0 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -541,6 +541,7 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) eb = path->nodes[0]; ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); item_size = btrfs_item_size_nr(eb, path->slots[0]); + btrfs_release_path(path); if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { do { @@ -556,9 +557,7 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) ret < 0 ? -1 : ref_level, ret < 0 ? -1 : ref_root); } while (ret != 1); - btrfs_release_path(path); } else { - btrfs_release_path(path); swarn.path = path; swarn.dev = dev; iterate_extent_inodes(fs_info, found_key.objectid, diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 8a00e2f..9027bb1 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -318,7 +318,6 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, unsigned long src_ptr; unsigned long dst_ptr; int overwrite_root = 0; - bool inode_item = key->type == BTRFS_INODE_ITEM_KEY; if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) overwrite_root = 1; @@ -328,9 +327,6 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, /* look for the key in the destination tree */ ret = btrfs_search_slot(NULL, root, key, path, 0, 0); - if (ret < 0) - return ret; - if (ret == 0) { char *src_copy; char *dst_copy; @@ -372,30 +368,6 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, return 0; } - /* - * We need to load the old nbytes into the inode so when we - * replay the extents we've logged we get the right nbytes. - */ - if (inode_item) { - struct btrfs_inode_item *item; - u64 nbytes; - - item = btrfs_item_ptr(path->nodes[0], path->slots[0], - struct btrfs_inode_item); - nbytes = btrfs_inode_nbytes(path->nodes[0], item); - item = btrfs_item_ptr(eb, slot, - struct btrfs_inode_item); - btrfs_set_inode_nbytes(eb, item, nbytes); - } - } else if (inode_item) { - struct btrfs_inode_item *item; - - /* - * New inode, set nbytes to 0 so that the nbytes comes out - * properly when we replay the extents. - */ - item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); - btrfs_set_inode_nbytes(eb, item, 0); } insert: btrfs_release_path(path); @@ -516,7 +488,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, u64 mask = root->sectorsize - 1; u64 extent_end; u64 start = key->offset; - u64 nbytes = 0; + u64 saved_nbytes; struct btrfs_file_extent_item *item; struct inode *inode = NULL; unsigned long size; @@ -526,19 +498,10 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, found_type = btrfs_file_extent_type(eb, item); if (found_type == BTRFS_FILE_EXTENT_REG || - found_type == BTRFS_FILE_EXTENT_PREALLOC) { - nbytes = btrfs_file_extent_num_bytes(eb, item); - extent_end = start + nbytes; - - /* - * We don't add to the inodes nbytes if we are prealloc or a - * hole. - */ - if (btrfs_file_extent_disk_bytenr(eb, item) == 0) - nbytes = 0; - } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { + found_type == BTRFS_FILE_EXTENT_PREALLOC) + extent_end = start + btrfs_file_extent_num_bytes(eb, item); + else if (found_type == BTRFS_FILE_EXTENT_INLINE) { size = btrfs_file_extent_inline_len(eb, item); - nbytes = btrfs_file_extent_ram_bytes(eb, item); extent_end = (start + size + mask) & ~mask; } else { ret = 0; @@ -587,6 +550,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, } btrfs_release_path(path); + saved_nbytes = inode_get_bytes(inode); /* drop any overlapping extents */ ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1); BUG_ON(ret); @@ -673,7 +637,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, BUG_ON(ret); } - inode_add_bytes(inode, nbytes); + inode_set_bytes(inode, saved_nbytes); ret = btrfs_update_inode(trans, root, inode); out: if (inode) @@ -1420,10 +1384,7 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, btrfs_release_path(path); if (ret == 0) { - if (!inode->i_nlink) - set_nlink(inode, 1); - else - btrfs_inc_nlink(inode); + btrfs_inc_nlink(inode); ret = btrfs_update_inode(trans, root, inode); } else if (ret == -EEXIST) { ret = 0; @@ -3320,7 +3281,6 @@ static int log_one_extent(struct btrfs_trans_handle *trans, int ret; bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; -insert: INIT_LIST_HEAD(&ordered_sums); btrfs_init_map_token(&token); key.objectid = btrfs_ino(inode); @@ -3336,23 +3296,6 @@ insert: leaf = path->nodes[0]; fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); - - /* - * If we are overwriting an inline extent with a real one then we need - * to just delete the inline extent as it may not be large enough to - * have the entire file_extent_item. - */ - if (ret && btrfs_token_file_extent_type(leaf, fi, &token) == - BTRFS_FILE_EXTENT_INLINE) { - ret = btrfs_del_item(trans, log, path); - btrfs_release_path(path); - if (ret) { - path->really_keep_locks = 0; - return ret; - } - goto insert; - } - btrfs_set_token_file_extent_generation(leaf, fi, em->generation, &token); if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 1fd234a..5cbb7f4 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -647,7 +647,6 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) new_device->writeable = 0; new_device->in_fs_metadata = 0; new_device->can_discard = 0; - spin_lock_init(&new_device->io_lock); list_replace_rcu(&device->dev_list, &new_device->dev_list); call_rcu(&device->rcu, free_device); @@ -681,12 +680,6 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices) __btrfs_close_devices(fs_devices); free_fs_devices(fs_devices); } - /* - * Wait for rcu kworkers under __btrfs_close_devices - * to finish all blkdev_puts so device is really - * free when umount is done. - */ - rcu_barrier(); return ret; } diff --git a/fs/buffer.c b/fs/buffer.c index 8863f45..7a75c3e 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -280,7 +280,8 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) * decide that the page is now completely done. */ first = page_buffers(page); - flags = bh_uptodate_lock_irqsave(first); + local_irq_save(flags); + bit_spin_lock(BH_Uptodate_Lock, &first->b_state); clear_buffer_async_read(bh); unlock_buffer(bh); tmp = bh; @@ -293,7 +294,8 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) } tmp = tmp->b_this_page; } while (tmp != bh); - bh_uptodate_unlock_irqrestore(first, flags); + bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); + local_irq_restore(flags); /* * If none of the buffers had errors and they are all @@ -305,7 +307,9 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) return; still_busy: - bh_uptodate_unlock_irqrestore(first, flags); + bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); + local_irq_restore(flags); + return; } /* @@ -339,7 +343,8 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate) } first = page_buffers(page); - flags = bh_uptodate_lock_irqsave(first); + local_irq_save(flags); + bit_spin_lock(BH_Uptodate_Lock, &first->b_state); clear_buffer_async_write(bh); unlock_buffer(bh); @@ -351,12 +356,15 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate) } tmp = tmp->b_this_page; } - bh_uptodate_unlock_irqrestore(first, flags); + bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); + local_irq_restore(flags); end_page_writeback(page); return; still_busy: - bh_uptodate_unlock_irqrestore(first, flags); + bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); + local_irq_restore(flags); + return; } EXPORT_SYMBOL(end_buffer_async_write); @@ -3248,7 +3256,6 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); if (ret) { INIT_LIST_HEAD(&ret->b_assoc_buffers); - buffer_head_init_locks(ret); preempt_disable(); __this_cpu_inc(bh_accounting.nr); recalc_bh_state(); diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c index 1d36db1..cfd1ce3 100644 --- a/fs/cifs/asn1.c +++ b/fs/cifs/asn1.c @@ -614,10 +614,53 @@ decode_negTokenInit(unsigned char *security_blob, int length, } } - /* - * We currently ignore anything at the end of the SPNEGO blob after - * the mechTypes have been parsed, since none of that info is - * used at the moment. - */ + /* mechlistMIC */ + if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { + /* Check if we have reached the end of the blob, but with + no mechListMic (e.g. NTLMSSP instead of KRB5) */ + if (ctx.error == ASN1_ERR_DEC_EMPTY) + goto decode_negtoken_exit; + cFYI(1, "Error decoding last part negTokenInit exit3"); + return 0; + } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) { + /* tag = 3 indicating mechListMIC */ + cFYI(1, "Exit 4 cls = %d con = %d tag = %d end = %p (%d)", + cls, con, tag, end, *end); + return 0; + } + + /* sequence */ + if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { + cFYI(1, "Error decoding last part negTokenInit exit5"); + return 0; + } else if ((cls != ASN1_UNI) || (con != ASN1_CON) + || (tag != ASN1_SEQ)) { + cFYI(1, "cls = %d con = %d tag = %d end = %p (%d)", + cls, con, tag, end, *end); + } + + /* sequence of */ + if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { + cFYI(1, "Error decoding last part negTokenInit exit 7"); + return 0; + } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) { + cFYI(1, "Exit 8 cls = %d con = %d tag = %d end = %p (%d)", + cls, con, tag, end, *end); + return 0; + } + + /* general string */ + if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { + cFYI(1, "Error decoding last part negTokenInit exit9"); + return 0; + } else if ((cls != ASN1_UNI) || (con != ASN1_PRI) + || (tag != ASN1_GENSTR)) { + cFYI(1, "Exit10 cls = %d con = %d tag = %d end = %p (%d)", + cls, con, tag, end, *end); + return 0; + } + cFYI(1, "Need to call asn1_octets_decode() function for %s", + ctx.pointer); /* is this UTF-8 or ASCII? */ +decode_negtoken_exit: return 1; } diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index b9db388..de7f9168 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -91,30 +91,6 @@ struct workqueue_struct *cifsiod_wq; __u8 cifs_client_guid[SMB2_CLIENT_GUID_SIZE]; #endif -/* - * Bumps refcount for cifs super block. - * Note that it should be only called if a referece to VFS super block is - * already held, e.g. in open-type syscalls context. Otherwise it can race with - * atomic_dec_and_test in deactivate_locked_super. - */ -void -cifs_sb_active(struct super_block *sb) -{ - struct cifs_sb_info *server = CIFS_SB(sb); - - if (atomic_inc_return(&server->active) == 1) - atomic_inc(&sb->s_active); -} - -void -cifs_sb_deactive(struct super_block *sb) -{ - struct cifs_sb_info *server = CIFS_SB(sb); - - if (atomic_dec_and_test(&server->active)) - deactivate_super(sb); -} - static int cifs_read_super(struct super_block *sb) { @@ -582,11 +558,6 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb) dentry = ERR_PTR(-ENOENT); break; } - if (!S_ISDIR(dir->i_mode)) { - dput(dentry); - dentry = ERR_PTR(-ENOTDIR); - break; - } /* skip separators */ while (*s == sep) diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 0e32c34..7163419 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -41,10 +41,6 @@ extern struct file_system_type cifs_fs_type; extern const struct address_space_operations cifs_addr_ops; extern const struct address_space_operations cifs_addr_ops_smallbuf; -/* Functions related to super block operations */ -extern void cifs_sb_active(struct super_block *sb); -extern void cifs_sb_deactive(struct super_block *sb); - /* Functions related to inodes */ extern const struct inode_operations cifs_dir_inode_ops; extern struct inode *cifs_root_iget(struct super_block *); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index f7199b9..12b3da3 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1546,24 +1546,14 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, } break; case Opt_blank_pass: + vol->password = NULL; + break; + case Opt_pass: /* passwords have to be handled differently * to allow the character used for deliminator * to be passed within them */ - /* - * Check if this is a case where the password - * starts with a delimiter - */ - tmp_end = strchr(data, '='); - tmp_end++; - if (!(tmp_end < end && tmp_end[1] == delim)) { - /* No it is not. Set the password to NULL */ - vol->password = NULL; - break; - } - /* Yes it is. Drop down to Opt_pass below.*/ - case Opt_pass: /* Obtain the value string */ value = strchr(data, '='); value++; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 1d93ee8..8ea6ca5 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -294,8 +294,6 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, INIT_WORK(&cfile->oplock_break, cifs_oplock_break); mutex_init(&cfile->fh_mutex); - cifs_sb_active(inode->i_sb); - /* * If the server returned a read oplock and we have mandatory brlocks, * set oplock level to None. @@ -345,8 +343,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); struct TCP_Server_Info *server = tcon->ses->server; struct cifsInodeInfo *cifsi = CIFS_I(inode); - struct super_block *sb = inode->i_sb; - struct cifs_sb_info *cifs_sb = CIFS_SB(sb); + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifsLockInfo *li, *tmp; struct cifs_fid fid; struct cifs_pending_open open; @@ -411,7 +408,6 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) cifs_put_tlink(cifs_file->tlink); dput(cifs_file->dentry); - cifs_sb_deactive(sb); kfree(cifs_file); } diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index bceffe7..c9c7aa7 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -744,5 +744,4 @@ struct smb_version_values smb30_values = { .cap_unix = 0, .cap_nt_find = SMB2_NT_FIND, .cap_large_files = SMB2_LARGE_FILES, - .oplock_read = SMB2_OPLOCK_LEVEL_II, }; diff --git a/fs/compat.c b/fs/compat.c index a06dcbc..015e1e1 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -558,10 +558,6 @@ ssize_t compat_rw_copy_check_uvector(int type, } *ret_pointer = iov; - ret = -EFAULT; - if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector))) - goto out; - /* * Single unix specification: * We should -EINVAL if an element length is not >= 0 and fitting an @@ -1084,12 +1080,17 @@ static ssize_t compat_do_readv_writev(int type, struct file *file, if (!file->f_op) goto out; - ret = compat_rw_copy_check_uvector(type, uvector, nr_segs, + ret = -EFAULT; + if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector))) + goto out; + + tot_len = compat_rw_copy_check_uvector(type, uvector, nr_segs, UIO_FASTIOV, iovstack, &iov); - if (ret <= 0) + if (tot_len == 0) { + ret = 0; goto out; + } - tot_len = ret; ret = rw_verify_area(type, file, pos, tot_len); if (ret < 0) goto out; diff --git a/fs/dcache.c b/fs/dcache.c index 67f4681..19153a0 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -37,7 +37,6 @@ #include #include #include -#include #include "internal.h" #include "mount.h" @@ -471,7 +470,7 @@ static inline struct dentry *dentry_kill(struct dentry *dentry, int ref) if (inode && !spin_trylock(&inode->i_lock)) { relock: spin_unlock(&dentry->d_lock); - cpu_chill(); + cpu_relax(); return dentry; /* try again with same dentry */ } if (IS_ROOT(dentry)) @@ -853,7 +852,7 @@ relock: if (!spin_trylock(&dentry->d_lock)) { spin_unlock(&dcache_lru_lock); - cpu_chill(); + cpu_relax(); goto relock; } @@ -1233,10 +1232,8 @@ void shrink_dcache_parent(struct dentry * parent) LIST_HEAD(dispose); int found; - while ((found = select_parent(parent, &dispose)) != 0) { + while ((found = select_parent(parent, &dispose)) != 0) shrink_dentry_list(&dispose); - cond_resched(); - } } EXPORT_SYMBOL(shrink_dcache_parent); @@ -2087,7 +2084,7 @@ again: if (dentry->d_count == 1) { if (!spin_trylock(&inode->i_lock)) { spin_unlock(&dentry->d_lock); - cpu_chill(); + cpu_relax(); goto again; } dentry->d_flags &= ~DCACHE_CANT_MOUNT; @@ -2555,6 +2552,7 @@ static int prepend_path(const struct path *path, bool slash = false; int error = 0; + br_read_lock(&vfsmount_lock); while (dentry != root->dentry || vfsmnt != root->mnt) { struct dentry * parent; @@ -2584,6 +2582,8 @@ static int prepend_path(const struct path *path, if (!error && !slash) error = prepend(buffer, buflen, "/", 1); +out: + br_read_unlock(&vfsmount_lock); return error; global_root: @@ -2600,7 +2600,7 @@ global_root: error = prepend(buffer, buflen, "/", 1); if (!error) error = is_mounted(vfsmnt) ? 1 : 2; - return error; + goto out; } /** @@ -2627,11 +2627,9 @@ char *__d_path(const struct path *path, int error; prepend(&res, &buflen, "\0", 1); - br_read_lock(&vfsmount_lock); write_seqlock(&rename_lock); error = prepend_path(path, root, &res, &buflen); write_sequnlock(&rename_lock); - br_read_unlock(&vfsmount_lock); if (error < 0) return ERR_PTR(error); @@ -2648,11 +2646,9 @@ char *d_absolute_path(const struct path *path, int error; prepend(&res, &buflen, "\0", 1); - br_read_lock(&vfsmount_lock); write_seqlock(&rename_lock); error = prepend_path(path, &root, &res, &buflen); write_sequnlock(&rename_lock); - br_read_unlock(&vfsmount_lock); if (error > 1) error = -EINVAL; @@ -2716,13 +2712,11 @@ char *d_path(const struct path *path, char *buf, int buflen) return path->dentry->d_op->d_dname(path->dentry, buf, buflen); get_fs_root(current->fs, &root); - br_read_lock(&vfsmount_lock); write_seqlock(&rename_lock); error = path_with_deleted(path, &root, &res, &buflen); - write_sequnlock(&rename_lock); - br_read_unlock(&vfsmount_lock); if (error < 0) res = ERR_PTR(error); + write_sequnlock(&rename_lock); path_put(&root); return res; } @@ -2877,7 +2871,6 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) get_fs_root_and_pwd(current->fs, &root, &pwd); error = -ENOENT; - br_read_lock(&vfsmount_lock); write_seqlock(&rename_lock); if (!d_unlinked(pwd.dentry)) { unsigned long len; @@ -2887,7 +2880,6 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) prepend(&cwd, &buflen, "\0", 1); error = prepend_path(&pwd, &root, &cwd, &buflen); write_sequnlock(&rename_lock); - br_read_unlock(&vfsmount_lock); if (error < 0) goto out; @@ -2908,7 +2900,6 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) } } else { write_sequnlock(&rename_lock); - br_read_unlock(&vfsmount_lock); } out: diff --git a/fs/direct-io.c b/fs/direct-io.c index f853263..cf5b44b 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -261,9 +261,9 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is dio->end_io(dio->iocb, offset, transferred, dio->private, ret, is_async); } else { - inode_dio_done(dio->inode); if (is_async) aio_complete(dio->iocb, ret, 0); + inode_dio_done(dio->inode); } return ret; diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 343e14a..9fec183 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -497,12 +497,12 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests) */ static void ep_poll_safewake(wait_queue_head_t *wq) { - int this_cpu = get_cpu_light(); + int this_cpu = get_cpu(); ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); - put_cpu_light(); + put_cpu(); } static void ep_remove_wait_queue(struct eppoll_entry *pwq) diff --git a/fs/exec.c b/fs/exec.c index 09d1e6c..20df02c 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -613,7 +613,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) * when the old and new regions overlap clear from new_end. */ free_pgd_range(&tlb, new_end, old_end, new_end, - vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); + vma->vm_next ? vma->vm_next->vm_start : 0); } else { /* * otherwise, clean from old_start; this is done to not touch @@ -622,7 +622,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) * for the others its just a little faster. */ free_pgd_range(&tlb, old_start, old_end, new_end, - vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); + vma->vm_next ? vma->vm_next->vm_start : 0); } tlb_finish_mmu(&tlb, new_end, old_end); @@ -827,12 +827,10 @@ static int exec_mmap(struct mm_struct *mm) } } task_lock(tsk); - preempt_disable_rt(); active_mm = tsk->active_mm; tsk->mm = mm; tsk->active_mm = mm; activate_mm(active_mm, mm); - preempt_enable_rt(); task_unlock(tsk); arch_pick_mmap_layout(mm); if (old_mm) { @@ -900,13 +898,11 @@ static int de_thread(struct task_struct *tsk) sig->notify_count = -1; /* for exit_notify() */ for (;;) { - threadgroup_change_begin(tsk); write_lock_irq(&tasklist_lock); if (likely(leader->exit_state)) break; __set_current_state(TASK_KILLABLE); write_unlock_irq(&tasklist_lock); - threadgroup_change_end(tsk); schedule(); if (unlikely(__fatal_signal_pending(tsk))) goto killed; @@ -964,7 +960,6 @@ static int de_thread(struct task_struct *tsk) if (unlikely(leader->ptrace)) __wake_up_parent(leader, leader->parent); write_unlock_irq(&tasklist_lock); - threadgroup_change_end(tsk); release_task(leader); } diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 0a7f2d0..6e50223 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -353,7 +353,7 @@ static struct block_device *ext3_blkdev_get(dev_t dev, struct super_block *sb) return bdev; fail: - ext3_msg(sb, KERN_ERR, "error: failed to open journal device %s: %ld", + ext3_msg(sb, "error: failed to open journal device %s: %ld", __bdevname(dev, b), PTR_ERR(bdev)); return NULL; @@ -887,7 +887,7 @@ static ext3_fsblk_t get_sb_block(void **data, struct super_block *sb) /*todo: use simple_strtoll with >32bit ext3 */ sb_block = simple_strtoul(options, &options, 0); if (*options && *options != ',') { - ext3_msg(sb, KERN_ERR, "error: invalid sb specification: %s", + ext3_msg(sb, "error: invalid sb specification: %s", (char *) *data); return 1; } diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index efea5d5..9873587 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig @@ -71,5 +71,4 @@ config EXT4_DEBUG Enables run-time debugging support for the ext4 filesystem. If you select Y here, then you will be able to turn on debugging - with a command such as: - echo 1 > /sys/module/ext4/parameters/mballoc_debug + with a command such as "echo 1 > /sys/kernel/debug/ext4/mballoc-debug" diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 92e68b3..cf18217 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -358,7 +358,7 @@ void ext4_validate_block_bitmap(struct super_block *sb, } /** - * ext4_read_block_bitmap_nowait() + * ext4_read_block_bitmap() * @sb: super block * @block_group: given block group * @@ -457,8 +457,6 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) struct buffer_head *bh; bh = ext4_read_block_bitmap_nowait(sb, block_group); - if (!bh) - return NULL; if (ext4_wait_block_bitmap(sb, block_group, bh)) { put_bh(bh); return NULL; @@ -484,16 +482,11 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi, free_clusters = percpu_counter_read_positive(fcc); dirty_clusters = percpu_counter_read_positive(dcc); - - /* - * r_blocks_count should always be multiple of the cluster ratio so - * we are safe to do a plane bit shift only. - */ - root_clusters = ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits; + root_clusters = EXT4_B2C(sbi, ext4_r_blocks_count(sbi->s_es)); if (free_clusters - (nclusters + root_clusters + dirty_clusters) < EXT4_FREECLUSTERS_WATERMARK) { - free_clusters = percpu_counter_sum_positive(fcc); + free_clusters = EXT4_C2B(sbi, percpu_counter_sum_positive(fcc)); dirty_clusters = percpu_counter_sum_positive(dcc); } /* Check whether we have space after accounting for current @@ -635,7 +628,7 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb) brelse(bitmap_bh); printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu" ", computed = %llu, %llu\n", - EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)), + EXT4_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)), desc_count, bitmap_count); return bitmap_count; #else diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index bbcd6a0..8462eb3 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -338,9 +338,9 @@ struct ext4_group_desc */ struct flex_groups { - atomic64_t free_clusters; - atomic_t free_inodes; - atomic_t used_dirs; + atomic_t free_inodes; + atomic_t free_clusters; + atomic_t used_dirs; }; #define EXT4_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */ diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h index dbd9ae1..7177f9b 100644 --- a/fs/ext4/ext4_jbd2.h +++ b/fs/ext4/ext4_jbd2.h @@ -170,20 +170,16 @@ static inline void ext4_journal_callback_add(handle_t *handle, * ext4_journal_callback_del: delete a registered callback * @handle: active journal transaction handle on which callback was registered * @jce: registered journal callback entry to unregister - * Return true if object was sucessfully removed */ -static inline bool ext4_journal_callback_try_del(handle_t *handle, +static inline void ext4_journal_callback_del(handle_t *handle, struct ext4_journal_cb_entry *jce) { - bool deleted; struct ext4_sb_info *sbi = EXT4_SB(handle->h_transaction->t_journal->j_private); spin_lock(&sbi->s_md_lock); - deleted = !list_empty(&jce->jce_list); list_del_init(&jce->jce_list); spin_unlock(&sbi->s_md_lock); - return deleted; } int diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 4d315a0..5ae1674 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -725,7 +725,6 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, struct ext4_extent_header *eh; struct buffer_head *bh; short int depth, i, ppos = 0, alloc = 0; - int ret; eh = ext_inode_hdr(inode); depth = ext_depth(inode); @@ -753,15 +752,12 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, path[ppos].p_ext = NULL; bh = sb_getblk(inode->i_sb, path[ppos].p_block); - if (unlikely(!bh)) { - ret = -ENOMEM; + if (unlikely(!bh)) goto err; - } if (!bh_uptodate_or_lock(bh)) { trace_ext4_ext_load_extent(inode, block, path[ppos].p_block); - ret = bh_submit_read(bh); - if (ret < 0) { + if (bh_submit_read(bh) < 0) { put_bh(bh); goto err; } @@ -772,15 +768,13 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, put_bh(bh); EXT4_ERROR_INODE(inode, "ppos %d > depth %d", ppos, depth); - ret = -EIO; goto err; } path[ppos].p_bh = bh; path[ppos].p_hdr = eh; i--; - ret = ext4_ext_check_block(inode, eh, i, bh); - if (ret < 0) + if (ext4_ext_check_block(inode, eh, i, bh)) goto err; } @@ -802,7 +796,7 @@ err: ext4_ext_drop_refs(path); if (alloc) kfree(path); - return ERR_PTR(ret); + return ERR_PTR(-EIO); } /* @@ -957,7 +951,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, } bh = sb_getblk(inode->i_sb, newblock); if (!bh) { - err = -ENOMEM; + err = -EIO; goto cleanup; } lock_buffer(bh); @@ -1030,7 +1024,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, newblock = ablocks[--a]; bh = sb_getblk(inode->i_sb, newblock); if (!bh) { - err = -ENOMEM; + err = -EIO; goto cleanup; } lock_buffer(bh); @@ -1142,8 +1136,11 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, return err; bh = sb_getblk(inode->i_sb, newblock); - if (!bh) - return -ENOMEM; + if (!bh) { + err = -EIO; + ext4_std_error(inode->i_sb, err); + return err; + } lock_buffer(bh); err = ext4_journal_get_create_access(handle, bh); @@ -3089,7 +3086,6 @@ static int ext4_split_extent(handle_t *handle, int err = 0; int uninitialized; int split_flag1, flags1; - int allocated = map->m_len; depth = ext_depth(inode); ex = path[depth].p_ext; @@ -3109,8 +3105,6 @@ static int ext4_split_extent(handle_t *handle, map->m_lblk + map->m_len, split_flag1, flags1); if (err) goto out; - } else { - allocated = ee_len - (map->m_lblk - ee_block); } ext4_ext_drop_refs(path); @@ -3133,7 +3127,7 @@ static int ext4_split_extent(handle_t *handle, ext4_ext_show_leaf(inode, path); out: - return err ? err : allocated; + return err ? err : map->m_len; } /* @@ -3278,7 +3272,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, if (EXT4_EXT_MAY_ZEROOUT & split_flag) max_zeroout = sbi->s_extent_max_zeroout_kb >> - (inode->i_sb->s_blocksize_bits - 10); + inode->i_sb->s_blocksize_bits; /* If extent is less than s_max_zeroout_kb, zeroout directly */ if (max_zeroout && (ee_len <= max_zeroout)) { @@ -3723,7 +3717,6 @@ out: allocated - map->m_len); allocated = map->m_len; } - map->m_len = allocated; /* * If we have done fallocate with the offset that is already diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index e0ba8a4..3278e64 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c @@ -166,7 +166,8 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) if (journal->j_flags & JBD2_BARRIER && !jbd2_trans_will_send_data_barrier(journal, commit_tid)) needs_barrier = true; - ret = jbd2_complete_transaction(journal, commit_tid); + jbd2_log_start_commit(journal, commit_tid); + ret = jbd2_log_wait_commit(journal, commit_tid); if (needs_barrier) { err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); if (!ret) diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index ec2909e..3f32c80 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -324,8 +324,8 @@ error_return: } struct orlov_stats { - __u64 free_clusters; __u32 free_inodes; + __u32 free_clusters; __u32 used_dirs; }; @@ -342,7 +342,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g, if (flex_size > 1) { stats->free_inodes = atomic_read(&flex_group[g].free_inodes); - stats->free_clusters = atomic64_read(&flex_group[g].free_clusters); + stats->free_clusters = atomic_read(&flex_group[g].free_clusters); stats->used_dirs = atomic_read(&flex_group[g].used_dirs); return; } diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index 8d83d1e..20862f9 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -146,7 +146,6 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth, struct super_block *sb = inode->i_sb; Indirect *p = chain; struct buffer_head *bh; - int ret = -EIO; *err = 0; /* i_data is not going away, no lock needed */ @@ -155,10 +154,8 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth, goto no_block; while (--depth) { bh = sb_getblk(sb, le32_to_cpu(p->key)); - if (unlikely(!bh)) { - ret = -ENOMEM; + if (unlikely(!bh)) goto failure; - } if (!bh_uptodate_or_lock(bh)) { if (bh_submit_read(bh) < 0) { @@ -180,7 +177,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth, return NULL; failure: - *err = ret; + *err = -EIO; no_block: return p; } @@ -474,7 +471,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode, */ bh = sb_getblk(inode->i_sb, new_blocks[n-1]); if (unlikely(!bh)) { - err = -ENOMEM; + err = -EIO; goto failed; } diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 93a3408..387c47c 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -1188,7 +1188,7 @@ static int ext4_convert_inline_data_nolock(handle_t *handle, data_bh = sb_getblk(inode->i_sb, map.m_pblk); if (!data_bh) { - error = -ENOMEM; + error = -EIO; goto out_restore; } diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index c0fbd96..cbfe13b 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -55,21 +55,21 @@ static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, __u16 csum_hi = 0; __u32 csum; - csum_lo = le16_to_cpu(raw->i_checksum_lo); + csum_lo = raw->i_checksum_lo; raw->i_checksum_lo = 0; if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { - csum_hi = le16_to_cpu(raw->i_checksum_hi); + csum_hi = raw->i_checksum_hi; raw->i_checksum_hi = 0; } csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, EXT4_INODE_SIZE(inode->i_sb)); - raw->i_checksum_lo = cpu_to_le16(csum_lo); + raw->i_checksum_lo = csum_lo; if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) - raw->i_checksum_hi = cpu_to_le16(csum_hi); + raw->i_checksum_hi = csum_hi; return csum; } @@ -211,12 +211,12 @@ void ext4_evict_inode(struct inode *inode) * don't use page cache. */ if (ext4_should_journal_data(inode) && - (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) && - inode->i_ino != EXT4_JOURNAL_INO) { + (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) { journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; - jbd2_complete_transaction(journal, commit_tid); + jbd2_log_start_commit(journal, commit_tid); + jbd2_log_wait_commit(journal, commit_tid); filemap_write_and_wait(&inode->i_data); } truncate_inode_pages(&inode->i_data, 0); @@ -714,7 +714,7 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, bh = sb_getblk(inode->i_sb, map.m_pblk); if (!bh) { - *errp = -ENOMEM; + *errp = -EIO; return NULL; } if (map.m_flags & EXT4_MAP_NEW) { @@ -2977,9 +2977,9 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { ext4_free_io_end(io_end); out: - inode_dio_done(inode); if (is_async) aio_complete(iocb, ret, 0); + inode_dio_done(inode); return; } @@ -3660,8 +3660,11 @@ static int __ext4_get_inode_loc(struct inode *inode, iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); bh = sb_getblk(sb, block); - if (!bh) - return -ENOMEM; + if (!bh) { + EXT4_ERROR_INODE_BLOCK(inode, block, + "unable to read itable block"); + return -EIO; + } if (!buffer_uptodate(bh)) { lock_buffer(bh); diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index b443e62..1bf6fe7 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2829,8 +2829,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, if (sbi->s_log_groups_per_flex) { ext4_group_t flex_group = ext4_flex_group(sbi, ac->ac_b_ex.fe_group); - atomic64_sub(ac->ac_b_ex.fe_len, - &sbi->s_flex_groups[flex_group].free_clusters); + atomic_sub(ac->ac_b_ex.fe_len, + &sbi->s_flex_groups[flex_group].free_clusters); } err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); @@ -3444,7 +3444,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) win = offs; ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - - EXT4_NUM_B2C(sbi, win); + EXT4_B2C(sbi, win); BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); } @@ -4136,7 +4136,7 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) /* The max size of hash table is PREALLOC_TB_SIZE */ order = PREALLOC_TB_SIZE - 1; /* Add the prealloc space to lg */ - spin_lock(&lg->lg_prealloc_lock); + rcu_read_lock(); list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], pa_inode_list) { spin_lock(&tmp_pa->pa_lock); @@ -4160,12 +4160,12 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) if (!added) list_add_tail_rcu(&pa->pa_inode_list, &lg->lg_prealloc_list[order]); - spin_unlock(&lg->lg_prealloc_lock); + rcu_read_unlock(); /* Now trim the list to be not more than 8 elements */ if (lg_prealloc_count > 8) { ext4_mb_discard_lg_preallocations(sb, lg, - order, lg_prealloc_count); + order, lg_prealloc_count); return; } return ; @@ -4449,11 +4449,11 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, node = rb_prev(new_node); if (node) { entry = rb_entry(node, struct ext4_free_data, efd_node); - if (can_merge(entry, new_entry) && - ext4_journal_callback_try_del(handle, &entry->efd_jce)) { + if (can_merge(entry, new_entry)) { new_entry->efd_start_cluster = entry->efd_start_cluster; new_entry->efd_count += entry->efd_count; rb_erase(node, &(db->bb_free_root)); + ext4_journal_callback_del(handle, &entry->efd_jce); kmem_cache_free(ext4_free_data_cachep, entry); } } @@ -4461,10 +4461,10 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, node = rb_next(new_node); if (node) { entry = rb_entry(node, struct ext4_free_data, efd_node); - if (can_merge(new_entry, entry) && - ext4_journal_callback_try_del(handle, &entry->efd_jce)) { + if (can_merge(new_entry, entry)) { new_entry->efd_count += entry->efd_count; rb_erase(node, &(db->bb_free_root)); + ext4_journal_callback_del(handle, &entry->efd_jce); kmem_cache_free(ext4_free_data_cachep, entry); } } @@ -4590,7 +4590,7 @@ do_more: EXT4_BLOCKS_PER_GROUP(sb); count -= overflow; } - count_clusters = EXT4_NUM_B2C(sbi, count); + count_clusters = EXT4_B2C(sbi, count); bitmap_bh = ext4_read_block_bitmap(sb, block_group); if (!bitmap_bh) { err = -EIO; @@ -4691,8 +4691,8 @@ do_more: if (sbi->s_log_groups_per_flex) { ext4_group_t flex_group = ext4_flex_group(sbi, block_group); - atomic64_add(count_clusters, - &sbi->s_flex_groups[flex_group].free_clusters); + atomic_add(count_clusters, + &sbi->s_flex_groups[flex_group].free_clusters); } ext4_mb_unload_buddy(&e4b); @@ -4832,12 +4832,12 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, ext4_group_desc_csum_set(sb, block_group, desc); ext4_unlock_group(sb, block_group); percpu_counter_add(&sbi->s_freeclusters_counter, - EXT4_NUM_B2C(sbi, blocks_freed)); + EXT4_B2C(sbi, blocks_freed)); if (sbi->s_log_groups_per_flex) { ext4_group_t flex_group = ext4_flex_group(sbi, block_group); - atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed), - &sbi->s_flex_groups[flex_group].free_clusters); + atomic_add(EXT4_B2C(sbi, blocks_freed), + &sbi->s_flex_groups[flex_group].free_clusters); } ext4_mb_unload_buddy(&e4b); diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c index fe201c6..fe7c63f 100644 --- a/fs/ext4/mmp.c +++ b/fs/ext4/mmp.c @@ -7,7 +7,7 @@ #include "ext4.h" /* Checksumming functions */ -static __le32 ext4_mmp_csum(struct super_block *sb, struct mmp_struct *mmp) +static __u32 ext4_mmp_csum(struct super_block *sb, struct mmp_struct *mmp) { struct ext4_sb_info *sbi = EXT4_SB(sb); int offset = offsetof(struct mmp_struct, mmp_checksum); @@ -80,8 +80,6 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh, * is not blocked in the elevator. */ if (!*bh) *bh = sb_getblk(sb, mmp_block); - if (!*bh) - return -ENOMEM; if (*bh) { get_bh(*bh); lock_buffer(*bh); diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index b42d04f..0016fbc 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -103,13 +103,14 @@ static int ext4_end_io(ext4_io_end_t *io) "(inode %lu, offset %llu, size %zd, error %d)", inode->i_ino, offset, size, ret); } + if (io->iocb) + aio_complete(io->iocb, io->result, 0); + + if (io->flag & EXT4_IO_END_DIRECT) + inode_dio_done(inode); /* Wake up anyone waiting on unwritten extent conversion */ if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten)) wake_up_all(ext4_ioend_wq(inode)); - if (io->flag & EXT4_IO_END_DIRECT) - inode_dio_done(inode); - if (io->iocb) - aio_complete(io->iocb, io->result, 0); return ret; } diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 0cfa2f4..d99387b 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -334,7 +334,7 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb, bh = sb_getblk(sb, blk); if (!bh) - return ERR_PTR(-ENOMEM); + return ERR_PTR(-EIO); if ((err = ext4_journal_get_write_access(handle, bh))) { brelse(bh); bh = ERR_PTR(err); @@ -411,7 +411,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle, bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap); if (!bh) - return -ENOMEM; + return -EIO; err = ext4_journal_get_write_access(handle, bh); if (err) @@ -501,7 +501,7 @@ static int setup_new_flex_group_blocks(struct super_block *sb, gdb = sb_getblk(sb, block); if (!gdb) { - err = -ENOMEM; + err = -EIO; goto out; } @@ -1065,7 +1065,7 @@ static void update_backups(struct super_block *sb, int blk_off, char *data, bh = sb_getblk(sb, backup_block); if (!bh) { - err = -ENOMEM; + err = -EIO; break; } ext4_debug("update metadata backup %llu(+%llu)\n", @@ -1247,7 +1247,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb, ext4_inode_table_set(sb, gdp, group_data->inode_table); ext4_free_group_clusters_set(sb, gdp, - EXT4_NUM_B2C(sbi, group_data->free_blocks_count)); + EXT4_B2C(sbi, group_data->free_blocks_count)); ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb)); if (ext4_has_group_desc_csum(sb)) ext4_itable_unused_set(sb, gdp, @@ -1341,8 +1341,6 @@ static void ext4_update_super(struct super_block *sb, /* Update the global fs size fields */ sbi->s_groups_count += flex_gd->count; - sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count, - (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); /* Update the reserved block counts only once the new group is * active. */ @@ -1351,7 +1349,7 @@ static void ext4_update_super(struct super_block *sb, /* Update the free space counts */ percpu_counter_add(&sbi->s_freeclusters_counter, - EXT4_NUM_B2C(sbi, free_blocks)); + EXT4_B2C(sbi, free_blocks)); percpu_counter_add(&sbi->s_freeinodes_counter, EXT4_INODES_PER_GROUP(sb) * flex_gd->count); @@ -1362,8 +1360,8 @@ static void ext4_update_super(struct super_block *sb, sbi->s_log_groups_per_flex) { ext4_group_t flex_group; flex_group = ext4_flex_group(sbi, group_data[0].group); - atomic64_add(EXT4_NUM_B2C(sbi, free_blocks), - &sbi->s_flex_groups[flex_group].free_clusters); + atomic_add(EXT4_B2C(sbi, free_blocks), + &sbi->s_flex_groups[flex_group].free_clusters); atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count, &sbi->s_flex_groups[flex_group].free_inodes); } @@ -1880,10 +1878,6 @@ retry: return 0; ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &offset); - if (n_group > (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) { - ext4_warning(sb, "resize would cause inodes_count overflow"); - return -EINVAL; - } ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset); n_desc_blocks = num_desc_blocks(sb, n_group + 1); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 5575a45..3d4fb81 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -452,13 +452,10 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn) struct super_block *sb = journal->j_private; struct ext4_sb_info *sbi = EXT4_SB(sb); int error = is_journal_aborted(journal); - struct ext4_journal_cb_entry *jce; + struct ext4_journal_cb_entry *jce, *tmp; - BUG_ON(txn->t_state == T_FINISHED); spin_lock(&sbi->s_md_lock); - while (!list_empty(&txn->t_private_list)) { - jce = list_entry(txn->t_private_list.next, - struct ext4_journal_cb_entry, jce_list); + list_for_each_entry_safe(jce, tmp, &txn->t_private_list, jce_list) { list_del_init(&jce->jce_list); spin_unlock(&sbi->s_md_lock); jce->jce_func(sb, jce, error); @@ -1982,8 +1979,8 @@ static int ext4_fill_flex_info(struct super_block *sb) flex_group = ext4_flex_group(sbi, i); atomic_add(ext4_free_inodes_count(sb, gdp), &sbi->s_flex_groups[flex_group].free_inodes); - atomic64_add(ext4_free_group_clusters(sb, gdp), - &sbi->s_flex_groups[flex_group].free_clusters); + atomic_add(ext4_free_group_clusters(sb, gdp), + &sbi->s_flex_groups[flex_group].free_clusters); atomic_add(ext4_used_dirs_count(sb, gdp), &sbi->s_flex_groups[flex_group].used_dirs); } @@ -3238,7 +3235,7 @@ int ext4_calculate_overhead(struct super_block *sb) } /* Add the journal blocks as well */ if (sbi->s_journal) - overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen); + overhead += EXT4_B2C(sbi, sbi->s_journal->j_maxlen); sbi->s_overhead = overhead; smp_wmb(); @@ -4011,7 +4008,7 @@ no_journal: !(sb->s_flags & MS_RDONLY)) { err = ext4_enable_quotas(sb); if (err) - goto failed_mount8; + goto failed_mount7; } #endif /* CONFIG_QUOTA */ @@ -4038,10 +4035,6 @@ cantfind_ext4: ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); goto failed_mount; -#ifdef CONFIG_QUOTA -failed_mount8: - kobject_del(&sbi->s_kobj); -#endif failed_mount7: ext4_unregister_li_request(sb); failed_mount6: @@ -5012,9 +5005,9 @@ static int ext4_enable_quotas(struct super_block *sb) DQUOT_USAGE_ENABLED); if (err) { ext4_warning(sb, - "Failed to enable quota tracking " - "(type=%d, err=%d). Please run " - "e2fsck to fix.", type, err); + "Failed to enable quota (type=%d) " + "tracking. Please run e2fsck to fix.", + type); return err; } } diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index b93846b..3a91ebc 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -549,7 +549,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode, error = ext4_handle_dirty_xattr_block(handle, inode, bh); if (IS_SYNC(inode)) ext4_handle_sync(handle); - dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1)); + dquot_free_block(inode, 1); ea_bdebug(bh, "refcount now=%d; releasing", le32_to_cpu(BHDR(bh)->h_refcount)); } @@ -832,8 +832,7 @@ inserted: else { /* The old block is released after updating the inode. */ - error = dquot_alloc_block(inode, - EXT4_C2B(EXT4_SB(sb), 1)); + error = dquot_alloc_block(inode, 1); if (error) goto cleanup; error = ext4_journal_get_write_access(handle, @@ -888,17 +887,16 @@ inserted: new_bh = sb_getblk(sb, block); if (!new_bh) { - error = -ENOMEM; getblk_failed: ext4_free_blocks(handle, inode, NULL, block, 1, EXT4_FREE_BLOCKS_METADATA); + error = -EIO; goto cleanup; } lock_buffer(new_bh); error = ext4_journal_get_create_access(handle, new_bh); if (error) { unlock_buffer(new_bh); - error = -EIO; goto getblk_failed; } memcpy(new_bh->b_data, s->base, new_bh->b_size); @@ -930,7 +928,7 @@ cleanup: return error; cleanup_dquot: - dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1)); + dquot_free_block(inode, 1); goto cleanup; bad_block: diff --git a/fs/file.c b/fs/file.c index 328087b..2b3570b 100644 --- a/fs/file.c +++ b/fs/file.c @@ -98,14 +98,14 @@ static void free_fdtable_rcu(struct rcu_head *rcu) kfree(fdt->open_fds); kfree(fdt); } else { - fddef = &per_cpu(fdtable_defer_list, get_cpu_light()); + fddef = &get_cpu_var(fdtable_defer_list); spin_lock(&fddef->lock); fdt->next = fddef->next; fddef->next = fdt; /* vmallocs are handled from the workqueue context */ schedule_work(&fddef->wq); spin_unlock(&fddef->lock); - put_cpu_light(); + put_cpu_var(fdtable_defer_list); } } @@ -516,7 +516,7 @@ struct files_struct init_files = { .close_on_exec = init_files.close_on_exec_init, .open_fds = init_files.open_fds_init, }, - .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock), + .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock), }; /* diff --git a/fs/fscache/page.c b/fs/fscache/page.c index c84696c..ff000e5 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -796,13 +796,11 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie) _enter(""); - spin_lock(&cookie->stores_lock); - while (1) { - n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, - ARRAY_SIZE(results), - FSCACHE_COOKIE_PENDING_TAG); - if (n == 0) - break; + while (spin_lock(&cookie->stores_lock), + n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, + ARRAY_SIZE(results), + FSCACHE_COOKIE_PENDING_TAG), + n > 0) { for (i = n - 1; i >= 0; i--) { page = results[i]; radix_tree_delete(&cookie->stores, page->index); @@ -812,7 +810,6 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie) for (i = n - 1; i >= 0; i--) page_cache_release(results[i]); - spin_lock(&cookie->stores_lock); } spin_unlock(&cookie->stores_lock); diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c index 40d13c7..8179e8b 100644 --- a/fs/fscache/stats.c +++ b/fs/fscache/stats.c @@ -287,5 +287,5 @@ const struct file_operations fscache_stats_fops = { .open = fscache_stats_open, .read = seq_read, .llseek = seq_lseek, - .release = single_release, + .release = seq_release, }; diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 315e1f8..b7c09f9 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -682,14 +682,7 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry) spin_lock(&fc->lock); fi->attr_version = ++fc->attr_version; - /* - * If i_nlink == 0 then unlink doesn't make sense, yet this can - * happen if userspace filesystem is careless. It would be - * difficult to enforce correct nlink usage so just ignore this - * condition here - */ - if (inode->i_nlink > 0) - drop_nlink(inode); + drop_nlink(inode); spin_unlock(&fc->lock); fuse_invalidate_attr(inode); fuse_invalidate_attr(dir); diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 9a3945a..a68e91b 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -1286,10 +1286,6 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize) inode_dio_wait(inode); - ret = gfs2_rs_alloc(GFS2_I(inode)); - if (ret) - return ret; - oldsize = inode->i_size; if (newsize >= oldsize) return do_grow(inode, newsize); diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 7af426b..991ab2d 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -924,11 +924,8 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl) cmd = F_SETLK; fl->fl_type = F_UNLCK; } - if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { - if (fl->fl_type == F_UNLCK) - posix_lock_file_wait(file, fl); + if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) return -EIO; - } if (IS_GETLK(cmd)) return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl); else if (fl->fl_type == F_UNLCK) diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 9afba3d6..b7eff07 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -576,7 +576,7 @@ int gfs2_rs_alloc(struct gfs2_inode *ip) RB_CLEAR_NODE(&ip->i_res->rs_node); out: up_write(&ip->i_rw_mutex); - return error; + return 0; } static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs) diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c index fc8ddc1..eba76ea 100644 --- a/fs/hfsplus/extents.c +++ b/fs/hfsplus/extents.c @@ -533,7 +533,7 @@ void hfsplus_file_truncate(struct inode *inode) struct address_space *mapping = inode->i_mapping; struct page *page; void *fsdata; - loff_t size = inode->i_size; + u32 size = inode->i_size; res = pagecache_write_begin(NULL, mapping, size, 0, AOP_FLAG_UNINTERRUPTIBLE, diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index d0de769..78bde32 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -110,7 +110,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) * way when do_mmap_pgoff unwinds (may be important on powerpc * and ia64). */ - vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; + vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_ops = &hugetlb_vm_ops; if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) @@ -908,19 +908,19 @@ static int can_do_hugetlb_shm(void) static int get_hstate_idx(int page_size_log) { - struct hstate *h = hstate_sizelog(page_size_log); + struct hstate *h; + if (!page_size_log) + return default_hstate_idx; + h = size_to_hstate(1 << page_size_log); if (!h) return -1; return h - hstates; } -/* - * Note that size should be aligned to proper hugepage size in caller side, - * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. - */ -struct file *hugetlb_file_setup(const char *name, size_t size, - vm_flags_t acctflag, struct user_struct **user, +struct file *hugetlb_file_setup(const char *name, unsigned long addr, + size_t size, vm_flags_t acctflag, + struct user_struct **user, int creat_flags, int page_size_log) { int error = -ENOMEM; @@ -929,6 +929,8 @@ struct file *hugetlb_file_setup(const char *name, size_t size, struct path path; struct dentry *root; struct qstr quick_string; + struct hstate *hstate; + unsigned long num_pages; int hstate_idx; hstate_idx = get_hstate_idx(page_size_log); @@ -967,10 +969,12 @@ struct file *hugetlb_file_setup(const char *name, size_t size, if (!inode) goto out_dentry; + hstate = hstate_inode(inode); + size += addr & ~huge_page_mask(hstate); + num_pages = ALIGN(size, huge_page_size(hstate)) >> + huge_page_shift(hstate); error = -ENOMEM; - if (hugetlb_reserve_pages(inode, 0, - size >> huge_page_shift(hstate_inode(inode)), NULL, - acctflag)) + if (hugetlb_reserve_pages(inode, 0, num_pages, NULL, acctflag)) goto out_inode; d_instantiate(path.dentry, inode); diff --git a/fs/inode.c b/fs/inode.c index b98540e..14084b7 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -725,7 +725,7 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan) * inode to the back of the list so we don't spin on it. */ if (!spin_trylock(&inode->i_lock)) { - list_move(&inode->i_lru, &sb->s_inode_lru); + list_move_tail(&inode->i_lru, &sb->s_inode_lru); continue; } diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c index 95debd7..08c0304 100644 --- a/fs/jbd/checkpoint.c +++ b/fs/jbd/checkpoint.c @@ -129,8 +129,6 @@ void __log_wait_for_space(journal_t *journal) if (journal->j_flags & JFS_ABORT) return; spin_unlock(&journal->j_state_lock); - if (current->plug) - io_schedule(); mutex_lock(&journal->j_checkpoint_mutex); /* diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 069bf58..3091d42 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -382,7 +382,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) int space_left = 0; int first_tag = 0; int tag_flag; - int i; + int i, to_free = 0; int tag_bytes = journal_tag_bytes(journal); struct buffer_head *cbh = NULL; /* For transactional checksums */ __u32 crc32_sum = ~0; @@ -1126,7 +1126,7 @@ restart_loop: journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged; spin_unlock(&journal->j_history_lock); - commit_transaction->t_state = T_COMMIT_CALLBACK; + commit_transaction->t_state = T_FINISHED; J_ASSERT(commit_transaction == journal->j_committing_transaction); journal->j_commit_sequence = commit_transaction->t_tid; journal->j_committing_transaction = NULL; @@ -1141,44 +1141,38 @@ restart_loop: journal->j_average_commit_time*3) / 4; else journal->j_average_commit_time = commit_time; - write_unlock(&journal->j_state_lock); - if (journal->j_checkpoint_transactions == NULL) { - journal->j_checkpoint_transactions = commit_transaction; - commit_transaction->t_cpnext = commit_transaction; - commit_transaction->t_cpprev = commit_transaction; + if (commit_transaction->t_checkpoint_list == NULL && + commit_transaction->t_checkpoint_io_list == NULL) { + __jbd2_journal_drop_transaction(journal, commit_transaction); + to_free = 1; } else { - commit_transaction->t_cpnext = - journal->j_checkpoint_transactions; - commit_transaction->t_cpprev = - commit_transaction->t_cpnext->t_cpprev; - commit_transaction->t_cpnext->t_cpprev = - commit_transaction; - commit_transaction->t_cpprev->t_cpnext = + if (journal->j_checkpoint_transactions == NULL) { + journal->j_checkpoint_transactions = commit_transaction; + commit_transaction->t_cpnext = commit_transaction; + commit_transaction->t_cpprev = commit_transaction; + } else { + commit_transaction->t_cpnext = + journal->j_checkpoint_transactions; + commit_transaction->t_cpprev = + commit_transaction->t_cpnext->t_cpprev; + commit_transaction->t_cpnext->t_cpprev = + commit_transaction; + commit_transaction->t_cpprev->t_cpnext = commit_transaction; + } } spin_unlock(&journal->j_list_lock); - /* Drop all spin_locks because commit_callback may be block. - * __journal_remove_checkpoint() can not destroy transaction - * under us because it is not marked as T_FINISHED yet */ + if (journal->j_commit_callback) journal->j_commit_callback(journal, commit_transaction); trace_jbd2_end_commit(journal, commit_transaction); jbd_debug(1, "JBD2: commit %d complete, head %d\n", journal->j_commit_sequence, journal->j_tail_sequence); - - write_lock(&journal->j_state_lock); - spin_lock(&journal->j_list_lock); - commit_transaction->t_state = T_FINISHED; - /* Recheck checkpoint lists after j_list_lock was dropped */ - if (commit_transaction->t_checkpoint_list == NULL && - commit_transaction->t_checkpoint_io_list == NULL) { - __jbd2_journal_drop_transaction(journal, commit_transaction); + if (to_free) jbd2_journal_free_transaction(commit_transaction); - } - spin_unlock(&journal->j_list_lock); - write_unlock(&journal->j_state_lock); + wake_up(&journal->j_wait_done_commit); } diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 42f8cf6c..dbf41f9 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -698,37 +698,6 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid) } /* - * When this function returns the transaction corresponding to tid - * will be completed. If the transaction has currently running, start - * committing that transaction before waiting for it to complete. If - * the transaction id is stale, it is by definition already completed, - * so just return SUCCESS. - */ -int jbd2_complete_transaction(journal_t *journal, tid_t tid) -{ - int need_to_wait = 1; - - read_lock(&journal->j_state_lock); - if (journal->j_running_transaction && - journal->j_running_transaction->t_tid == tid) { - if (journal->j_commit_request != tid) { - /* transaction not yet started, so request it */ - read_unlock(&journal->j_state_lock); - jbd2_log_start_commit(journal, tid); - goto wait_commit; - } - } else if (!(journal->j_committing_transaction && - journal->j_committing_transaction->t_tid == tid)) - need_to_wait = 0; - read_unlock(&journal->j_state_lock); - if (!need_to_wait) - return 0; -wait_commit: - return jbd2_log_wait_commit(journal, tid); -} -EXPORT_SYMBOL(jbd2_complete_transaction); - -/* * Log buffer allocation routines: */ diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 73b9253..df9f297 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -1047,12 +1047,9 @@ out: void jbd2_journal_set_triggers(struct buffer_head *bh, struct jbd2_buffer_trigger_type *type) { - struct journal_head *jh = jbd2_journal_grab_journal_head(bh); + struct journal_head *jh = bh2jh(bh); - if (WARN_ON(!jh)) - return; jh->b_triggers = type; - jbd2_journal_put_journal_head(jh); } void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data, @@ -1104,18 +1101,17 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) { transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; - struct journal_head *jh; + struct journal_head *jh = bh2jh(bh); int ret = 0; + jbd_debug(5, "journal_head %p\n", jh); + JBUFFER_TRACE(jh, "entry"); if (is_handle_aborted(handle)) goto out; - jh = jbd2_journal_grab_journal_head(bh); - if (!jh) { + if (!buffer_jbd(bh)) { ret = -EUCLEAN; goto out; } - jbd_debug(5, "journal_head %p\n", jh); - JBUFFER_TRACE(jh, "entry"); jbd_lock_bh_state(bh); @@ -1206,7 +1202,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) spin_unlock(&journal->j_list_lock); out_unlock_bh: jbd_unlock_bh_state(bh); - jbd2_journal_put_journal_head(jh); out: JBUFFER_TRACE(jh, "exit"); WARN_ON(ret); /* All errors are bugs, so dump the stack */ diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c index 193f04c..ca0a080 100644 --- a/fs/lockd/clntlock.c +++ b/fs/lockd/clntlock.c @@ -144,9 +144,6 @@ int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout) timeout); if (ret < 0) return -ERESTARTSYS; - /* Reset the lock status after a server reboot so we resend */ - if (block->b_status == nlm_lck_denied_grace_period) - block->b_status = nlm_lck_blocked; req->a_res.status = block->b_status; return 0; } diff --git a/fs/namei.c b/fs/namei.c index ec97aef..43a97ee 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -693,6 +693,8 @@ void nd_jump_link(struct nameidata *nd, struct path *path) nd->path = *path; nd->inode = nd->path.dentry->d_inode; nd->flags |= LOOKUP_JUMPED; + + BUG_ON(nd->inode->i_op->follow_link); } static inline void put_link(struct nameidata *nd, struct path *link, void *cookie) diff --git a/fs/namespace.c b/fs/namespace.c index 859a026..55605c5 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -22,7 +22,6 @@ #include /* fsnotify_vfsmount_delete */ #include #include -#include #include "pnode.h" #include "internal.h" @@ -314,11 +313,8 @@ int __mnt_want_write(struct vfsmount *m) * incremented count after it has set MNT_WRITE_HOLD. */ smp_mb(); - while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { - preempt_enable(); - cpu_chill(); - preempt_disable(); - } + while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) + cpu_relax(); /* * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will * be set to match its requirements. So we must not load that until @@ -802,10 +798,6 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root, } mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD; - /* Don't allow unprivileged users to change mount flags */ - if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY)) - mnt->mnt.mnt_flags |= MNT_LOCK_READONLY; - atomic_inc(&sb->s_active); mnt->mnt.mnt_sb = sb; mnt->mnt.mnt_root = dget(root); @@ -1246,14 +1238,6 @@ static int do_umount(struct mount *mnt, int flags) } /* - * Is the caller allowed to modify his namespace? - */ -static inline bool may_mount(void) -{ - return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN); -} - -/* * Now umount can handle mount points as well as block devices. * This is important for filesystems which use unnamed block devices. * @@ -1271,9 +1255,6 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags) if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW)) return -EINVAL; - if (!may_mount()) - return -EPERM; - if (!(flags & UMOUNT_NOFOLLOW)) lookup_flags |= LOOKUP_FOLLOW; @@ -1287,6 +1268,10 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags) if (!check_mnt(mnt)) goto dput_and_out; + retval = -EPERM; + if (!ns_capable(mnt->mnt_ns->user_ns, CAP_SYS_ADMIN)) + goto dput_and_out; + retval = do_umount(mnt, flags); dput_and_out: /* we mustn't call path_put() as that would clear mnt_expiry_mark */ @@ -1310,7 +1295,7 @@ SYSCALL_DEFINE1(oldumount, char __user *, name) static int mount_is_safe(struct path *path) { - if (may_mount()) + if (ns_capable(real_mount(path->mnt)->mnt_ns->user_ns, CAP_SYS_ADMIN)) return 0; return -EPERM; #ifdef notyet @@ -1648,7 +1633,7 @@ static int do_change_type(struct path *path, int flag) int type; int err = 0; - if (!may_mount()) + if (!ns_capable(mnt->mnt_ns->user_ns, CAP_SYS_ADMIN)) return -EPERM; if (path->dentry != path->mnt->mnt_root) @@ -1744,9 +1729,6 @@ static int change_mount_flags(struct vfsmount *mnt, int ms_flags) if (readonly_request == __mnt_is_readonly(mnt)) return 0; - if (mnt->mnt_flags & MNT_LOCK_READONLY) - return -EPERM; - if (readonly_request) error = mnt_make_readonly(real_mount(mnt)); else @@ -1815,7 +1797,7 @@ static int do_move_mount(struct path *path, const char *old_name) struct mount *p; struct mount *old; int err = 0; - if (!may_mount()) + if (!ns_capable(real_mount(path->mnt)->mnt_ns->user_ns, CAP_SYS_ADMIN)) return -EPERM; if (!old_name || !*old_name) return -EINVAL; @@ -1951,14 +1933,16 @@ static int do_new_mount(struct path *path, const char *fstype, int flags, int mnt_flags, const char *name, void *data) { struct file_system_type *type; - struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; + struct user_namespace *user_ns; struct vfsmount *mnt; int err; if (!fstype) return -EINVAL; - if (!may_mount()) + /* we need capabilities... */ + user_ns = real_mount(path->mnt)->mnt_ns->user_ns; + if (!ns_capable(user_ns, CAP_SYS_ADMIN)) return -EPERM; type = get_fs_type(fstype); @@ -2376,7 +2360,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns, /* First pass: copy the tree topology */ copy_flags = CL_COPY_ALL | CL_EXPIRE; if (user_ns != mnt_ns->user_ns) - copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED; + copy_flags |= CL_SHARED_TO_SLAVE; new = copy_tree(old, old->mnt.mnt_root, copy_flags); if (IS_ERR(new)) { up_write(&namespace_sem); @@ -2583,7 +2567,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, struct mount *new_mnt, *root_mnt; int error; - if (!may_mount()) + if (!ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN)) return -EPERM; error = user_path_dir(new_root, &new); @@ -2769,51 +2753,6 @@ bool our_mnt(struct vfsmount *mnt) return check_mnt(real_mount(mnt)); } -bool current_chrooted(void) -{ - /* Does the current process have a non-standard root */ - struct path ns_root; - struct path fs_root; - bool chrooted; - - /* Find the namespace root */ - ns_root.mnt = ¤t->nsproxy->mnt_ns->root->mnt; - ns_root.dentry = ns_root.mnt->mnt_root; - path_get(&ns_root); - while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root)) - ; - - get_fs_root(current->fs, &fs_root); - - chrooted = !path_equal(&fs_root, &ns_root); - - path_put(&fs_root); - path_put(&ns_root); - - return chrooted; -} - -void update_mnt_policy(struct user_namespace *userns) -{ - struct mnt_namespace *ns = current->nsproxy->mnt_ns; - struct mount *mnt; - - down_read(&namespace_sem); - list_for_each_entry(mnt, &ns->list, mnt_list) { - switch (mnt->mnt.mnt_sb->s_magic) { - case SYSFS_MAGIC: - userns->may_mount_sysfs = true; - break; - case PROC_SUPER_MAGIC: - userns->may_mount_proc = true; - break; - } - if (userns->may_mount_sysfs && userns->may_mount_proc) - break; - } - up_read(&namespace_sem); -} - static void *mntns_get(struct task_struct *task) { struct mnt_namespace *ns = NULL; diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 434b93e..4fa788c 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -1273,7 +1273,6 @@ static const struct nfs_pageio_ops bl_pg_write_ops = { static struct pnfs_layoutdriver_type blocklayout_type = { .id = LAYOUT_BLOCK_VOLUME, .name = "LAYOUT_BLOCK_VOLUME", - .owner = THIS_MODULE, .read_pagelist = bl_read_pagelist, .write_pagelist = bl_write_pagelist, .alloc_layout_hdr = bl_alloc_layout_hdr, diff --git a/fs/nfs/blocklayout/blocklayoutdm.c b/fs/nfs/blocklayout/blocklayoutdm.c index 6fc7b5c..737d839 100644 --- a/fs/nfs/blocklayout/blocklayoutdm.c +++ b/fs/nfs/blocklayout/blocklayoutdm.c @@ -55,8 +55,7 @@ static void dev_remove(struct net *net, dev_t dev) bl_pipe_msg.bl_wq = &nn->bl_wq; memset(msg, 0, sizeof(*msg)); - msg->len = sizeof(bl_msg) + bl_msg.totallen; - msg->data = kzalloc(msg->len, GFP_NOFS); + msg->data = kzalloc(1 + sizeof(bl_umount_request), GFP_NOFS); if (!msg->data) goto out; @@ -67,6 +66,7 @@ static void dev_remove(struct net *net, dev_t dev) memcpy(msg->data, &bl_msg, sizeof(bl_msg)); dataptr = (uint8_t *) msg->data; memcpy(&dataptr[sizeof(bl_msg)], &bl_umount_request, sizeof(bl_umount_request)); + msg->len = sizeof(bl_msg) + bl_msg.totallen; add_wait_queue(&nn->bl_wq, &wq); if (rpc_queue_upcall(nn->bl_device_pipe, msg) < 0) { diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 2960512..264d1aa 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -183,15 +183,60 @@ static u32 initiate_file_draining(struct nfs_client *clp, static u32 initiate_bulk_draining(struct nfs_client *clp, struct cb_layoutrecallargs *args) { - int stat; + struct nfs_server *server; + struct pnfs_layout_hdr *lo; + struct inode *ino; + u32 rv = NFS4ERR_NOMATCHING_LAYOUT; + struct pnfs_layout_hdr *tmp; + LIST_HEAD(recall_list); + LIST_HEAD(free_me_list); + struct pnfs_layout_range range = { + .iomode = IOMODE_ANY, + .offset = 0, + .length = NFS4_MAX_UINT64, + }; - if (args->cbl_recall_type == RETURN_FSID) - stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true); - else - stat = pnfs_destroy_layouts_byclid(clp, true); - if (stat != 0) - return NFS4ERR_DELAY; - return NFS4ERR_NOMATCHING_LAYOUT; + spin_lock(&clp->cl_lock); + rcu_read_lock(); + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { + if ((args->cbl_recall_type == RETURN_FSID) && + memcmp(&server->fsid, &args->cbl_fsid, + sizeof(struct nfs_fsid))) + continue; + + list_for_each_entry(lo, &server->layouts, plh_layouts) { + ino = igrab(lo->plh_inode); + if (!ino) + continue; + spin_lock(&ino->i_lock); + /* Is this layout in the process of being freed? */ + if (NFS_I(ino)->layout != lo) { + spin_unlock(&ino->i_lock); + iput(ino); + continue; + } + pnfs_get_layout_hdr(lo); + spin_unlock(&ino->i_lock); + list_add(&lo->plh_bulk_recall, &recall_list); + } + } + rcu_read_unlock(); + spin_unlock(&clp->cl_lock); + + list_for_each_entry_safe(lo, tmp, + &recall_list, plh_bulk_recall) { + ino = lo->plh_inode; + spin_lock(&ino->i_lock); + set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags); + if (pnfs_mark_matching_lsegs_invalid(lo, &free_me_list, &range)) + rv = NFS4ERR_DELAY; + list_del_init(&lo->plh_bulk_recall); + spin_unlock(&ino->i_lock); + pnfs_free_lseg_list(&free_me_list); + pnfs_put_layout_hdr(lo); + iput(ino); + } + return rv; } static u32 do_callback_layoutrecall(struct nfs_client *clp, diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index cf4ed87..bc3968fa 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -725,9 +725,9 @@ out1: return ret; } -static int nfs_idmap_instantiate(struct key *key, struct key *authkey, char *data, size_t datalen) +static int nfs_idmap_instantiate(struct key *key, struct key *authkey, char *data) { - return key_instantiate_and_link(key, data, datalen, + return key_instantiate_and_link(key, data, strlen(data) + 1, id_resolver_cache->thread_keyring, authkey); } @@ -737,7 +737,6 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im, struct key *key, struct key *authkey) { char id_str[NFS_UINT_MAXLEN]; - size_t len; int ret = -ENOKEY; /* ret = -ENOKEY */ @@ -747,15 +746,13 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im, case IDMAP_CONV_NAMETOID: if (strcmp(upcall->im_name, im->im_name) != 0) break; - /* Note: here we store the NUL terminator too */ - len = sprintf(id_str, "%d", im->im_id) + 1; - ret = nfs_idmap_instantiate(key, authkey, id_str, len); + sprintf(id_str, "%d", im->im_id); + ret = nfs_idmap_instantiate(key, authkey, id_str); break; case IDMAP_CONV_IDTONAME: if (upcall->im_id != im->im_id) break; - len = strlen(im->im_name); - ret = nfs_idmap_instantiate(key, authkey, im->im_name, len); + ret = nfs_idmap_instantiate(key, authkey, im->im_name); break; default: ret = -EINVAL; diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index c53189d..2e9779b 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -300,7 +300,7 @@ int nfs40_walk_client_list(struct nfs_client *new, struct rpc_cred *cred) { struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); - struct nfs_client *pos, *prev = NULL; + struct nfs_client *pos, *n, *prev = NULL; struct nfs4_setclientid_res clid = { .clientid = new->cl_clientid, .confirm = new->cl_confirm, @@ -308,23 +308,10 @@ int nfs40_walk_client_list(struct nfs_client *new, int status = -NFS4ERR_STALE_CLIENTID; spin_lock(&nn->nfs_client_lock); - list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) { + list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { /* If "pos" isn't marked ready, we can't trust the * remaining fields in "pos" */ - if (pos->cl_cons_state > NFS_CS_READY) { - atomic_inc(&pos->cl_count); - spin_unlock(&nn->nfs_client_lock); - - if (prev) - nfs_put_client(prev); - prev = pos; - - status = nfs_wait_client_init_complete(pos); - spin_lock(&nn->nfs_client_lock); - if (status < 0) - continue; - } - if (pos->cl_cons_state != NFS_CS_READY) + if (pos->cl_cons_state < NFS_CS_READY) continue; if (pos->rpc_ops != new->rpc_ops) @@ -436,16 +423,16 @@ int nfs41_walk_client_list(struct nfs_client *new, struct rpc_cred *cred) { struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); - struct nfs_client *pos, *prev = NULL; + struct nfs_client *pos, *n, *prev = NULL; int status = -NFS4ERR_STALE_CLIENTID; spin_lock(&nn->nfs_client_lock); - list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) { + list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { /* If "pos" isn't marked ready, we can't trust the * remaining fields in "pos", especially the client * ID and serverowner fields. Wait for CREATE_SESSION * to finish. */ - if (pos->cl_cons_state > NFS_CS_READY) { + if (pos->cl_cons_state < NFS_CS_READY) { atomic_inc(&pos->cl_count); spin_unlock(&nn->nfs_client_lock); @@ -453,17 +440,18 @@ int nfs41_walk_client_list(struct nfs_client *new, nfs_put_client(prev); prev = pos; + nfs4_schedule_lease_recovery(pos); status = nfs_wait_client_init_complete(pos); - if (status == 0) { - nfs4_schedule_lease_recovery(pos); - status = nfs4_wait_clnt_recover(pos); + if (status < 0) { + nfs_put_client(pos); + spin_lock(&nn->nfs_client_lock); + continue; } + status = pos->cl_cons_state; spin_lock(&nn->nfs_client_lock); if (status < 0) continue; } - if (pos->cl_cons_state != NFS_CS_READY) - continue; if (pos->rpc_ops != new->rpc_ops) continue; @@ -481,18 +469,17 @@ int nfs41_walk_client_list(struct nfs_client *new, continue; atomic_inc(&pos->cl_count); - *result = pos; - status = 0; + spin_unlock(&nn->nfs_client_lock); dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n", __func__, pos, atomic_read(&pos->cl_count)); - break; + + *result = pos; + return 0; } /* No matching nfs_client found. */ spin_unlock(&nn->nfs_client_lock); dprintk("NFS: <-- %s status = %d\n", __func__, status); - if (prev) - nfs_put_client(prev); return status; } #endif /* CONFIG_NFS_V4_1 */ diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index 4fb234d..194c484 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c @@ -99,8 +99,7 @@ static void filelayout_reset_write(struct nfs_write_data *data) task->tk_status = pnfs_write_done_resend_to_mds(hdr->inode, &hdr->pages, - hdr->completion_ops, - hdr->dreq); + hdr->completion_ops); } } @@ -120,8 +119,7 @@ static void filelayout_reset_read(struct nfs_read_data *data) task->tk_status = pnfs_read_done_resend_to_mds(hdr->inode, &hdr->pages, - hdr->completion_ops, - hdr->dreq); + hdr->completion_ops); } } @@ -129,6 +127,7 @@ static void filelayout_fenceme(struct inode *inode, struct pnfs_layout_hdr *lo) { if (!test_and_clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) return; + clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags); pnfs_return_layout(inode); } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index e3c6121..cf747ef 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1374,12 +1374,6 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state case -ENOMEM: err = 0; goto out; - case -NFS4ERR_DELAY: - case -NFS4ERR_GRACE: - set_bit(NFS_DELEGATED_STATE, &state->flags); - ssleep(1); - err = -EAGAIN; - goto out; } err = nfs4_handle_exception(server, err, &exception); } while (exception.retry); @@ -1469,7 +1463,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata) struct nfs4_state_owner *sp = data->owner; if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) - goto out_wait; + return; /* * Check if we still need to send an OPEN call, or if we can use * a delegation instead. @@ -1504,7 +1498,6 @@ unlock_no_action: rcu_read_unlock(); out_no_action: task->tk_action = NULL; -out_wait: nfs4_sequence_done(task, &data->o_res.seq_res); } @@ -2157,7 +2150,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) dprintk("%s: begin!\n", __func__); if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) - goto out_wait; + return; task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; calldata->arg.fmode = FMODE_READ|FMODE_WRITE; @@ -2179,14 +2172,16 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) if (!call_close) { /* Note: exit _without_ calling nfs4_close_done */ - goto out_no_action; + task->tk_action = NULL; + nfs4_sequence_done(task, &calldata->res.seq_res); + goto out; } if (calldata->arg.fmode == 0) { task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; if (calldata->roc && pnfs_roc_drain(inode, &calldata->roc_barrier, task)) - goto out_wait; + goto out; } nfs_fattr_init(calldata->res.fattr); @@ -2196,12 +2191,8 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) &calldata->res.seq_res, task) != 0) nfs_release_seqid(calldata->arg.seqid); +out: dprintk("%s: done!\n", __func__); - return; -out_no_action: - task->tk_action = NULL; -out_wait: - nfs4_sequence_done(task, &calldata->res.seq_res); } static const struct rpc_call_ops nfs4_close_ops = { @@ -4432,10 +4423,12 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data) struct nfs4_unlockdata *calldata = data; if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) - goto out_wait; + return; if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { /* Note: exit _without_ running nfs4_locku_done */ - goto out_no_action; + task->tk_action = NULL; + nfs4_sequence_done(task, &calldata->res.seq_res); + return; } calldata->timestamp = jiffies; if (nfs4_setup_sequence(calldata->server, @@ -4443,11 +4436,6 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data) &calldata->res.seq_res, task) != 0) nfs_release_seqid(calldata->arg.seqid); - return; -out_no_action: - task->tk_action = NULL; -out_wait: - nfs4_sequence_done(task, &calldata->res.seq_res); } static const struct rpc_call_ops nfs4_locku_ops = { @@ -4513,9 +4501,9 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock * if (status != 0) goto out; /* Is this a delegated lock? */ - lsp = request->fl_u.nfs4_fl.owner; - if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0) + if (test_bit(NFS_DELEGATED_STATE, &state->flags)) goto out; + lsp = request->fl_u.nfs4_fl.owner; seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); status = -ENOMEM; if (seqid == NULL) @@ -4588,7 +4576,7 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) dprintk("%s: begin!\n", __func__); if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) - goto out_wait; + return; /* Do we need to do an open_to_lock_owner? */ if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) { if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { @@ -4608,8 +4596,6 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) nfs_release_seqid(data->arg.open_seqid); out_release_lock_seqid: nfs_release_seqid(data->arg.lock_seqid); -out_wait: - nfs4_sequence_done(task, &data->res.seq_res); dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); } @@ -6093,13 +6079,11 @@ static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags) static void nfs4_layoutget_release(void *calldata) { struct nfs4_layoutget *lgp = calldata; - struct inode *inode = lgp->args.inode; - struct nfs_server *server = NFS_SERVER(inode); + struct nfs_server *server = NFS_SERVER(lgp->args.inode); size_t max_pages = max_response_pages(server); dprintk("--> %s\n", __func__); nfs4_free_pages(lgp->args.layout.pages, max_pages); - pnfs_put_layout_hdr(NFS_I(inode)->layout); put_nfs_open_context(lgp->args.ctx); kfree(calldata); dprintk("<-- %s\n", __func__); @@ -6114,8 +6098,7 @@ static const struct rpc_call_ops nfs4_layoutget_call_ops = { struct pnfs_layout_segment * nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags) { - struct inode *inode = lgp->args.inode; - struct nfs_server *server = NFS_SERVER(inode); + struct nfs_server *server = NFS_SERVER(lgp->args.inode); size_t max_pages = max_response_pages(server); struct rpc_task *task; struct rpc_message msg = { @@ -6145,18 +6128,13 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags) lgp->res.layoutp = &lgp->args.layout; lgp->res.seq_res.sr_slot = NULL; nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); - - /* nfs4_layoutget_release calls pnfs_put_layout_hdr */ - pnfs_get_layout_hdr(NFS_I(inode)->layout); - task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return ERR_CAST(task); status = nfs4_wait_for_completion_rpc_task(task); if (status == 0) status = task->tk_status; - /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ - if (status == 0 && lgp->res.layoutp->len) + if (status == 0) lseg = pnfs_layout_process(lgp); rpc_put_task(task); dprintk("<-- %s status=%d\n", __func__, status); @@ -6372,8 +6350,22 @@ nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) static void nfs4_layoutcommit_release(void *calldata) { struct nfs4_layoutcommit_data *data = calldata; + struct pnfs_layout_segment *lseg, *tmp; + unsigned long *bitlock = &NFS_I(data->args.inode)->flags; pnfs_cleanup_layoutcommit(data); + /* Matched by references in pnfs_set_layoutcommit */ + list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) { + list_del_init(&lseg->pls_lc_list); + if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, + &lseg->pls_flags)) + pnfs_put_lseg(lseg); + } + + clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock); + smp_mb__after_clear_bit(); + wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING); + put_rpccred(data->cred); kfree(data); } diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 994fbe2..e61f68d 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1877,13 +1877,7 @@ again: status = PTR_ERR(clnt); break; } - /* Note: this is safe because we haven't yet marked the - * client as ready, so we are the only user of - * clp->cl_rpcclient - */ - clnt = xchg(&clp->cl_rpcclient, clnt); - rpc_shutdown_client(clnt); - clnt = clp->cl_rpcclient; + clp->cl_rpcclient = clnt; goto again; case -NFS4ERR_MINOR_VERS_MISMATCH: diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c index 88f9611..c6f9906 100644 --- a/fs/nfs/objlayout/objio_osd.c +++ b/fs/nfs/objlayout/objio_osd.c @@ -647,7 +647,6 @@ static struct pnfs_layoutdriver_type objlayout_type = { .flags = PNFS_LAYOUTRET_ON_SETATTR | PNFS_LAYOUTRET_ON_ERROR, - .owner = THIS_MODULE, .alloc_layout_hdr = objlayout_alloc_layout_hdr, .free_layout_hdr = objlayout_free_layout_hdr, diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 3b71623..d00260b 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -417,16 +417,6 @@ should_free_lseg(struct pnfs_layout_range *lseg_range, lo_seg_intersecting(lseg_range, recall_range); } -static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg, - struct list_head *tmp_list) -{ - if (!atomic_dec_and_test(&lseg->pls_refcount)) - return false; - pnfs_layout_remove_lseg(lseg->pls_layout, lseg); - list_add(&lseg->pls_list, tmp_list); - return true; -} - /* Returns 1 if lseg is removed from list, 0 otherwise */ static int mark_lseg_invalid(struct pnfs_layout_segment *lseg, struct list_head *tmp_list) @@ -440,8 +430,11 @@ static int mark_lseg_invalid(struct pnfs_layout_segment *lseg, */ dprintk("%s: lseg %p ref %d\n", __func__, lseg, atomic_read(&lseg->pls_refcount)); - if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list)) + if (atomic_dec_and_test(&lseg->pls_refcount)) { + pnfs_layout_remove_lseg(lseg->pls_layout, lseg); + list_add(&lseg->pls_list, tmp_list); rv = 1; + } } return rv; } @@ -512,147 +505,37 @@ pnfs_destroy_layout(struct nfs_inode *nfsi) } EXPORT_SYMBOL_GPL(pnfs_destroy_layout); -static bool -pnfs_layout_add_bulk_destroy_list(struct inode *inode, - struct list_head *layout_list) -{ - struct pnfs_layout_hdr *lo; - bool ret = false; - - spin_lock(&inode->i_lock); - lo = NFS_I(inode)->layout; - if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) { - pnfs_get_layout_hdr(lo); - list_add(&lo->plh_bulk_destroy, layout_list); - ret = true; - } - spin_unlock(&inode->i_lock); - return ret; -} - -/* Caller must hold rcu_read_lock and clp->cl_lock */ -static int -pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp, - struct nfs_server *server, - struct list_head *layout_list) -{ - struct pnfs_layout_hdr *lo, *next; - struct inode *inode; - - list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) { - inode = igrab(lo->plh_inode); - if (inode == NULL) - continue; - list_del_init(&lo->plh_layouts); - if (pnfs_layout_add_bulk_destroy_list(inode, layout_list)) - continue; - rcu_read_unlock(); - spin_unlock(&clp->cl_lock); - iput(inode); - spin_lock(&clp->cl_lock); - rcu_read_lock(); - return -EAGAIN; - } - return 0; -} - -static int -pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list, - bool is_bulk_recall) +/* + * Called by the state manger to remove all layouts established under an + * expired lease. + */ +void +pnfs_destroy_all_layouts(struct nfs_client *clp) { + struct nfs_server *server; struct pnfs_layout_hdr *lo; - struct inode *inode; - struct pnfs_layout_range range = { - .iomode = IOMODE_ANY, - .offset = 0, - .length = NFS4_MAX_UINT64, - }; - LIST_HEAD(lseg_list); - int ret = 0; - - while (!list_empty(layout_list)) { - lo = list_entry(layout_list->next, struct pnfs_layout_hdr, - plh_bulk_destroy); - dprintk("%s freeing layout for inode %lu\n", __func__, - lo->plh_inode->i_ino); - inode = lo->plh_inode; - spin_lock(&inode->i_lock); - list_del_init(&lo->plh_bulk_destroy); - lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */ - if (is_bulk_recall) - set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags); - if (pnfs_mark_matching_lsegs_invalid(lo, &lseg_list, &range)) - ret = -EAGAIN; - spin_unlock(&inode->i_lock); - pnfs_free_lseg_list(&lseg_list); - pnfs_put_layout_hdr(lo); - iput(inode); - } - return ret; -} + LIST_HEAD(tmp_list); -int -pnfs_destroy_layouts_byfsid(struct nfs_client *clp, - struct nfs_fsid *fsid, - bool is_recall) -{ - struct nfs_server *server; - LIST_HEAD(layout_list); + nfs4_deviceid_mark_client_invalid(clp); + nfs4_deviceid_purge_client(clp); spin_lock(&clp->cl_lock); rcu_read_lock(); -restart: list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { - if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0) - continue; - if (pnfs_layout_bulk_destroy_byserver_locked(clp, - server, - &layout_list) != 0) - goto restart; + if (!list_empty(&server->layouts)) + list_splice_init(&server->layouts, &tmp_list); } rcu_read_unlock(); spin_unlock(&clp->cl_lock); - if (list_empty(&layout_list)) - return 0; - return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall); -} - -int -pnfs_destroy_layouts_byclid(struct nfs_client *clp, - bool is_recall) -{ - struct nfs_server *server; - LIST_HEAD(layout_list); - - spin_lock(&clp->cl_lock); - rcu_read_lock(); -restart: - list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { - if (pnfs_layout_bulk_destroy_byserver_locked(clp, - server, - &layout_list) != 0) - goto restart; + while (!list_empty(&tmp_list)) { + lo = list_entry(tmp_list.next, struct pnfs_layout_hdr, + plh_layouts); + dprintk("%s freeing layout for inode %lu\n", __func__, + lo->plh_inode->i_ino); + list_del_init(&lo->plh_layouts); + pnfs_destroy_layout(NFS_I(lo->plh_inode)); } - rcu_read_unlock(); - spin_unlock(&clp->cl_lock); - - if (list_empty(&layout_list)) - return 0; - return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall); -} - -/* - * Called by the state manger to remove all layouts established under an - * expired lease. - */ -void -pnfs_destroy_all_layouts(struct nfs_client *clp) -{ - nfs4_deviceid_mark_client_invalid(clp); - nfs4_deviceid_purge_client(clp); - - pnfs_destroy_layouts_byclid(clp, false); } /* @@ -784,21 +667,6 @@ send_layoutget(struct pnfs_layout_hdr *lo, return lseg; } -static void pnfs_clear_layoutcommit(struct inode *inode, - struct list_head *head) -{ - struct nfs_inode *nfsi = NFS_I(inode); - struct pnfs_layout_segment *lseg, *tmp; - - if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) - return; - list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) { - if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) - continue; - pnfs_lseg_dec_and_remove_zero(lseg, head); - } -} - /* * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr * when the layout segment list is empty. @@ -830,7 +698,6 @@ _pnfs_return_layout(struct inode *ino) /* Reference matched in nfs4_layoutreturn_release */ pnfs_get_layout_hdr(lo); empty = list_empty(&lo->plh_segs); - pnfs_clear_layoutcommit(ino, &tmp_list); pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL); /* Don't send a LAYOUTRETURN if list was initially empty */ if (empty) { @@ -843,6 +710,8 @@ _pnfs_return_layout(struct inode *ino) spin_unlock(&ino->i_lock); pnfs_free_lseg_list(&tmp_list); + WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)); + lrp = kzalloc(sizeof(*lrp), GFP_KERNEL); if (unlikely(lrp == NULL)) { status = -ENOMEM; @@ -1019,7 +888,7 @@ alloc_init_layout_hdr(struct inode *ino, atomic_set(&lo->plh_refcount, 1); INIT_LIST_HEAD(&lo->plh_layouts); INIT_LIST_HEAD(&lo->plh_segs); - INIT_LIST_HEAD(&lo->plh_bulk_destroy); + INIT_LIST_HEAD(&lo->plh_bulk_recall); lo->plh_inode = ino; lo->plh_lc_cred = get_rpccred(ctx->state->owner->so_cred); return lo; @@ -1443,15 +1312,13 @@ EXPORT_SYMBOL_GPL(pnfs_generic_pg_test); int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *head, - const struct nfs_pgio_completion_ops *compl_ops, - struct nfs_direct_req *dreq) + const struct nfs_pgio_completion_ops *compl_ops) { struct nfs_pageio_descriptor pgio; LIST_HEAD(failed); /* Resend all requests through the MDS */ nfs_pageio_init_write(&pgio, inode, FLUSH_STABLE, compl_ops); - pgio.pg_dreq = dreq; while (!list_empty(head)) { struct nfs_page *req = nfs_list_entry(head->next); @@ -1480,13 +1347,13 @@ static void pnfs_ld_handle_write_error(struct nfs_write_data *data) dprintk("pnfs write error = %d\n", hdr->pnfs_error); if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags & PNFS_LAYOUTRET_ON_ERROR) { + clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags); pnfs_return_layout(hdr->inode); } if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) data->task.tk_status = pnfs_write_done_resend_to_mds(hdr->inode, &hdr->pages, - hdr->completion_ops, - hdr->dreq); + hdr->completion_ops); } /* @@ -1601,15 +1468,13 @@ EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages); int pnfs_read_done_resend_to_mds(struct inode *inode, struct list_head *head, - const struct nfs_pgio_completion_ops *compl_ops, - struct nfs_direct_req *dreq) + const struct nfs_pgio_completion_ops *compl_ops) { struct nfs_pageio_descriptor pgio; LIST_HEAD(failed); /* Resend all requests through the MDS */ nfs_pageio_init_read(&pgio, inode, compl_ops); - pgio.pg_dreq = dreq; while (!list_empty(head)) { struct nfs_page *req = nfs_list_entry(head->next); @@ -1634,13 +1499,13 @@ static void pnfs_ld_handle_read_error(struct nfs_read_data *data) dprintk("pnfs read error = %d\n", hdr->pnfs_error); if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags & PNFS_LAYOUTRET_ON_ERROR) { + clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags); pnfs_return_layout(hdr->inode); } if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) data->task.tk_status = pnfs_read_done_resend_to_mds(hdr->inode, &hdr->pages, - hdr->completion_ops, - hdr->dreq); + hdr->completion_ops); } /* @@ -1766,27 +1631,11 @@ static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp) list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) { if (lseg->pls_range.iomode == IOMODE_RW && - test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) + test_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) list_add(&lseg->pls_lc_list, listp); } } -static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp) -{ - struct pnfs_layout_segment *lseg, *tmp; - unsigned long *bitlock = &NFS_I(inode)->flags; - - /* Matched by references in pnfs_set_layoutcommit */ - list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) { - list_del_init(&lseg->pls_lc_list); - pnfs_put_lseg(lseg); - } - - clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock); - smp_mb__after_clear_bit(); - wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING); -} - void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg) { pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode); @@ -1831,7 +1680,6 @@ void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data) if (nfss->pnfs_curr_ld->cleanup_layoutcommit) nfss->pnfs_curr_ld->cleanup_layoutcommit(data); - pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list); } /* diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 94ba804..dbf7bba 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -132,7 +132,7 @@ struct pnfs_layoutdriver_type { struct pnfs_layout_hdr { atomic_t plh_refcount; struct list_head plh_layouts; /* other client layouts */ - struct list_head plh_bulk_destroy; + struct list_head plh_bulk_recall; /* clnt list of bulk recalls */ struct list_head plh_segs; /* layout segments list */ nfs4_stateid plh_stateid; atomic_t plh_outstanding; /* number of RPCs out */ @@ -196,11 +196,6 @@ struct pnfs_layout_segment *pnfs_layout_process(struct nfs4_layoutget *lgp); void pnfs_free_lseg_list(struct list_head *tmp_list); void pnfs_destroy_layout(struct nfs_inode *); void pnfs_destroy_all_layouts(struct nfs_client *); -int pnfs_destroy_layouts_byfsid(struct nfs_client *clp, - struct nfs_fsid *fsid, - bool is_recall); -int pnfs_destroy_layouts_byclid(struct nfs_client *clp, - bool is_recall); void pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo); void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new, @@ -230,11 +225,9 @@ struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino, void nfs4_deviceid_mark_client_invalid(struct nfs_client *clp); int pnfs_read_done_resend_to_mds(struct inode *inode, struct list_head *head, - const struct nfs_pgio_completion_ops *compl_ops, - struct nfs_direct_req *dreq); + const struct nfs_pgio_completion_ops *compl_ops); int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *head, - const struct nfs_pgio_completion_ops *compl_ops, - struct nfs_direct_req *dreq); + const struct nfs_pgio_completion_ops *compl_ops); struct nfs4_threshold *pnfs_mdsthreshold_alloc(void); /* nfs4_deviceid_flags */ diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c index 6edc807..3f79c77 100644 --- a/fs/nfs/unlink.c +++ b/fs/nfs/unlink.c @@ -336,14 +336,20 @@ static void nfs_async_rename_done(struct rpc_task *task, void *calldata) struct inode *old_dir = data->old_dir; struct inode *new_dir = data->new_dir; struct dentry *old_dentry = data->old_dentry; + struct dentry *new_dentry = data->new_dentry; if (!NFS_PROTO(old_dir)->rename_done(task, old_dir, new_dir)) { rpc_restart_call_prepare(task); return; } - if (task->tk_status != 0) + if (task->tk_status != 0) { nfs_cancel_async_unlink(old_dentry); + return; + } + + d_drop(old_dentry); + d_drop(new_dentry); } /** @@ -544,18 +550,6 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry) error = rpc_wait_for_completion_task(task); if (error == 0) error = task->tk_status; - switch (error) { - case 0: - /* The rename succeeded */ - nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); - d_move(dentry, sdentry); - break; - case -ERESTARTSYS: - /* The result of the rename is unknown. Play it safe by - * forcing a new lookup */ - d_drop(dentry); - d_drop(sdentry); - } rpc_put_task(task); out_dput: dput(sdentry); diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index ec668e1..9d1c5db 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -931,14 +931,14 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, nfs4_lock_state(); status = nfs4_preprocess_stateid_op(SVC_NET(rqstp), cstate, stateid, WR_STATE, &filp); + if (filp) + get_file(filp); + nfs4_unlock_state(); + if (status) { - nfs4_unlock_state(); dprintk("NFSD: nfsd4_write: couldn't process stateid!\n"); return status; } - if (filp) - get_file(filp); - nfs4_unlock_state(); cnt = write->wr_buflen; write->wr_how_written = write->wr_stable_how; diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 53a7c64..ac8ed96 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -210,7 +210,13 @@ static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag) { if (atomic_dec_and_test(&fp->fi_access[oflag])) { nfs4_file_put_fd(fp, oflag); - if (atomic_read(&fp->fi_access[1 - oflag]) == 0) + /* + * It's also safe to get rid of the RDWR open *if* + * we no longer have need of the other kind of access + * or if we already have the other kind of open: + */ + if (fp->fi_fds[1-oflag] + || atomic_read(&fp->fi_access[1 - oflag]) == 0) nfs4_file_put_fd(fp, O_RDWR); } } @@ -1054,8 +1060,6 @@ free_client(struct nfs4_client *clp) } free_svc_cred(&clp->cl_cred); kfree(clp->cl_name.data); - idr_remove_all(&clp->cl_stateids); - idr_destroy(&clp->cl_stateids); kfree(clp); } diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index cd5e6c1..0dc1158 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -264,7 +264,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, iattr->ia_valid |= ATTR_SIZE; } if (bmval[0] & FATTR4_WORD0_ACL) { - u32 nace; + int nace; struct nfs4_ace *ace; READ_BUF(4); len += 4; @@ -344,7 +344,10 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, all 32 bits of 'nseconds'. */ READ_BUF(12); len += 12; - READ64(iattr->ia_atime.tv_sec); + READ32(dummy32); + if (dummy32) + return nfserr_inval; + READ32(iattr->ia_atime.tv_sec); READ32(iattr->ia_atime.tv_nsec); if (iattr->ia_atime.tv_nsec >= (u32)1000000000) return nfserr_inval; @@ -367,7 +370,10 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, all 32 bits of 'nseconds'. */ READ_BUF(12); len += 12; - READ64(iattr->ia_mtime.tv_sec); + READ32(dummy32); + if (dummy32) + return nfserr_inval; + READ32(iattr->ia_mtime.tv_sec); READ32(iattr->ia_mtime.tv_nsec); if (iattr->ia_mtime.tv_nsec >= (u32)1000000000) return nfserr_inval; @@ -2380,7 +2386,8 @@ out_acl: if (bmval1 & FATTR4_WORD1_TIME_ACCESS) { if ((buflen -= 12) < 0) goto out_resource; - WRITE64((s64)stat.atime.tv_sec); + WRITE32(0); + WRITE32(stat.atime.tv_sec); WRITE32(stat.atime.tv_nsec); } if (bmval1 & FATTR4_WORD1_TIME_DELTA) { @@ -2393,13 +2400,15 @@ out_acl: if (bmval1 & FATTR4_WORD1_TIME_METADATA) { if ((buflen -= 12) < 0) goto out_resource; - WRITE64((s64)stat.ctime.tv_sec); + WRITE32(0); + WRITE32(stat.ctime.tv_sec); WRITE32(stat.ctime.tv_nsec); } if (bmval1 & FATTR4_WORD1_TIME_MODIFY) { if ((buflen -= 12) < 0) goto out_resource; - WRITE64((s64)stat.mtime.tv_sec); + WRITE32(0); + WRITE32(stat.mtime.tv_sec); WRITE32(stat.mtime.tv_nsec); } if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) { diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 69c6413..d586117 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1013,7 +1013,6 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, int host_err; int stable = *stablep; int use_wgather; - loff_t pos = offset; dentry = file->f_path.dentry; inode = dentry->d_inode; @@ -1026,7 +1025,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, /* Write the data. */ oldfs = get_fs(); set_fs(KERNEL_DS); - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos); + host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset); set_fs(oldfs); if (host_err < 0) goto out_nfserr; diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 595343e..228a2c2 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -574,7 +574,10 @@ static int inotify_update_existing_watch(struct fsnotify_group *group, int add = (arg & IN_MASK_ADD); int ret; + /* don't allow invalid bits: we don't want flags set */ mask = inotify_arg_to_mask(arg); + if (unlikely(!(mask & IN_ALL_EVENTS))) + return -EINVAL; fsn_mark = fsnotify_find_inode_mark(group, inode); if (!fsn_mark) @@ -624,7 +627,10 @@ static int inotify_new_watch(struct fsnotify_group *group, struct idr *idr = &group->inotify_data.idr; spinlock_t *idr_lock = &group->inotify_data.idr_lock; + /* don't allow invalid bits: we don't want flags set */ mask = inotify_arg_to_mask(arg); + if (unlikely(!(mask & IN_ALL_EVENTS))) + return -EINVAL; tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); if (unlikely(!tmp_i_mark)) @@ -751,10 +757,6 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname, int ret; unsigned flags = 0; - /* don't allow invalid bits: we don't want flags set */ - if (unlikely(!(mask & ALL_INOTIFY_BITS))) - return -EINVAL; - f = fdget(fd); if (unlikely(!f.file)) return -EBADF; diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index f5d4565..fa9c05f 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c @@ -108,7 +108,8 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) "0x%llx.", (unsigned long long)bh->b_blocknr); } first = page_buffers(page); - flags = bh_uptodate_lock_irqsave(first); + local_irq_save(flags); + bit_spin_lock(BH_Uptodate_Lock, &first->b_state); clear_buffer_async_read(bh); unlock_buffer(bh); tmp = bh; @@ -123,7 +124,8 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) } tmp = tmp->b_this_page; } while (tmp != bh); - bh_uptodate_unlock_irqrestore(first, flags); + bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); + local_irq_restore(flags); /* * If none of the buffers had errors then we can set the page uptodate, * but we first have to perform the post read mst fixups, if the @@ -144,13 +146,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) recs = PAGE_CACHE_SIZE / rec_size; /* Should have been verified before we got here... */ BUG_ON(!recs); - local_irq_save_nort(flags); + local_irq_save(flags); kaddr = kmap_atomic(page); for (i = 0; i < recs; i++) post_read_mst_fixup((NTFS_RECORD*)(kaddr + i * rec_size), rec_size); kunmap_atomic(kaddr); - local_irq_restore_nort(flags); + local_irq_restore(flags); flush_dcache_page(page); if (likely(page_uptodate && !PageError(page))) SetPageUptodate(page); @@ -158,7 +160,9 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) unlock_page(page); return; still_busy: - bh_uptodate_unlock_irqrestore(first, flags); + bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); + local_irq_restore(flags); + return; } /** diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 340bd02..6577432 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -593,9 +593,9 @@ static void ocfs2_dio_end_io(struct kiocb *iocb, level = ocfs2_iocb_rw_locked_level(iocb); ocfs2_rw_unlock(inode, level); - inode_dio_done(inode); if (is_async) aio_complete(iocb, ret, 0); + inode_dio_done(inode); } /* diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 88577eb..4f7795f 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -2545,7 +2545,6 @@ int ocfs2_super_lock(struct ocfs2_super *osb, * everything is up to the caller :) */ status = ocfs2_should_refresh_lock_res(lockres); if (status < 0) { - ocfs2_cluster_unlock(osb, lockres, level); mlog_errno(status); goto bail; } @@ -2554,10 +2553,8 @@ int ocfs2_super_lock(struct ocfs2_super *osb, ocfs2_complete_lock_res_refresh(lockres, status); - if (status < 0) { - ocfs2_cluster_unlock(osb, lockres, level); + if (status < 0) mlog_errno(status); - } ocfs2_track_lock_refresh(lockres); } bail: diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index b7e74b5..f169da4 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -642,7 +642,7 @@ ocfs2_block_group_alloc_discontig(handle_t *handle, * cluster groups will be staying in cache for the duration of * this operation. */ - ac->ac_disable_chain_relink = 1; + ac->ac_allow_chain_relink = 0; /* Claim the first region */ status = ocfs2_block_group_claim_bits(osb, handle, ac, min_bits, @@ -1823,7 +1823,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, * Do this *after* figuring out how many bits we're taking out * of our target group. */ - if (!ac->ac_disable_chain_relink && + if (ac->ac_allow_chain_relink && (prev_group_bh) && (ocfs2_block_group_reasonably_empty(bg, res->sr_bits))) { status = ocfs2_relink_block_group(handle, alloc_inode, @@ -1928,6 +1928,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, victim = ocfs2_find_victim_chain(cl); ac->ac_chain = victim; + ac->ac_allow_chain_relink = 1; status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, res, &bits_left); @@ -1946,7 +1947,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, * searching each chain in order. Don't allow chain relinking * because we only calculate enough journal credits for one * relink per alloc. */ - ac->ac_disable_chain_relink = 1; + ac->ac_allow_chain_relink = 0; for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) { if (i == victim) continue; diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h index a36d0aa..b8afabf 100644 --- a/fs/ocfs2/suballoc.h +++ b/fs/ocfs2/suballoc.h @@ -49,7 +49,7 @@ struct ocfs2_alloc_context { /* these are used by the chain search */ u16 ac_chain; - int ac_disable_chain_relink; + int ac_allow_chain_relink; group_search_t *ac_group_search; u64 ac_last_group; diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 2e3ea30..0ba9ea1 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -7189,7 +7189,7 @@ int ocfs2_init_security_and_acl(struct inode *dir, struct buffer_head *dir_bh = NULL; ret = ocfs2_init_security_get(inode, dir, qstr, NULL); - if (ret) { + if (!ret) { mlog_errno(ret); goto leave; } diff --git a/fs/pipe.c b/fs/pipe.c index 8e2e73f..bd3479d 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -863,9 +863,6 @@ pipe_rdwr_open(struct inode *inode, struct file *filp) { int ret = -ENOENT; - if (!(filp->f_mode & (FMODE_READ|FMODE_WRITE))) - return -EINVAL; - mutex_lock(&inode->i_mutex); if (inode->i_pipe) { diff --git a/fs/pnode.c b/fs/pnode.c index 8b29d21..3e000a5 100644 --- a/fs/pnode.c +++ b/fs/pnode.c @@ -9,7 +9,6 @@ #include #include #include -#include #include "internal.h" #include "pnode.h" @@ -221,7 +220,6 @@ static struct mount *get_source(struct mount *dest, int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry, struct mount *source_mnt, struct list_head *tree_list) { - struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; struct mount *m, *child; int ret = 0; struct mount *prev_dest_mnt = dest_mnt; @@ -239,10 +237,6 @@ int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry, source = get_source(m, prev_dest_mnt, prev_src_mnt, &type); - /* Notice when we are propagating across user namespaces */ - if (m->mnt_ns->user_ns != user_ns) - type |= CL_UNPRIVILEGED; - child = copy_tree(source, source->mnt.mnt_root, type); if (IS_ERR(child)) { ret = PTR_ERR(child); diff --git a/fs/pnode.h b/fs/pnode.h index a0493d5..19b853a3 100644 --- a/fs/pnode.h +++ b/fs/pnode.h @@ -23,7 +23,6 @@ #define CL_MAKE_SHARED 0x08 #define CL_PRIVATE 0x10 #define CL_SHARED_TO_SLAVE 0x20 -#define CL_UNPRIVILEGED 0x40 static inline void set_mnt_shared(struct mount *mnt) { diff --git a/fs/proc/array.c b/fs/proc/array.c index be3c22f..6a91e6f 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -143,7 +143,6 @@ static const char * const task_state_array[] = { "x (dead)", /* 64 */ "K (wakekill)", /* 128 */ "W (waking)", /* 256 */ - "P (parked)", /* 512 */ }; static inline const char *get_task_state(struct task_struct *tsk) diff --git a/fs/proc/generic.c b/fs/proc/generic.c index b796da2..76ddae8 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -412,7 +412,8 @@ static const struct dentry_operations proc_dentry_operations = struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir, struct dentry *dentry) { - struct inode *inode; + struct inode *inode = NULL; + int error = -ENOENT; spin_lock(&proc_subdir_lock); for (de = de->subdir; de ; de = de->next) { @@ -421,16 +422,22 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir, if (!memcmp(dentry->d_name.name, de->name, de->namelen)) { pde_get(de); spin_unlock(&proc_subdir_lock); + error = -ENOMEM; inode = proc_get_inode(dir->i_sb, de); - if (!inode) - return ERR_PTR(-ENOMEM); - d_set_d_op(dentry, &proc_dentry_operations); - d_add(dentry, inode); - return NULL; + goto out_unlock; } } spin_unlock(&proc_subdir_lock); - return ERR_PTR(-ENOENT); +out_unlock: + + if (inode) { + d_set_d_op(dentry, &proc_dentry_operations); + d_add(dentry, inode); + return NULL; + } + if (de) + pde_put(de); + return ERR_PTR(error); } struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry, diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 0ac1e1b..439ae688 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -445,10 +445,12 @@ static const struct file_operations proc_reg_file_ops_no_compat = { struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) { - struct inode *inode = new_inode_pseudo(sb); + struct inode * inode; - if (inode) { - inode->i_ino = de->low_ino; + inode = iget_locked(sb, de->low_ino); + if (!inode) + return NULL; + if (inode->i_state & I_NEW) { inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; PROC_I(inode)->pde = de; @@ -476,10 +478,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) inode->i_fop = de->proc_fops; } } + unlock_new_inode(inode); } else pde_put(de); return inode; -} +} int proc_fill_super(struct super_block *s) { @@ -496,5 +499,6 @@ int proc_fill_super(struct super_block *s) return 0; printk("proc_read_super: get root inode failed\n"); + pde_put(&proc_root); return -ENOMEM; } diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c index 66b51c0..b7a4719 100644 --- a/fs/proc/namespaces.c +++ b/fs/proc/namespaces.c @@ -118,7 +118,7 @@ static void *proc_ns_follow_link(struct dentry *dentry, struct nameidata *nd) struct super_block *sb = inode->i_sb; struct proc_inode *ei = PROC_I(inode); struct task_struct *task; - struct path ns_path; + struct dentry *ns_dentry; void *error = ERR_PTR(-EACCES); task = get_proc_task(inode); @@ -128,14 +128,14 @@ static void *proc_ns_follow_link(struct dentry *dentry, struct nameidata *nd) if (!ptrace_may_access(task, PTRACE_MODE_READ)) goto out_put_task; - ns_path.dentry = proc_ns_get_dentry(sb, task, ei->ns_ops); - if (IS_ERR(ns_path.dentry)) { - error = ERR_CAST(ns_path.dentry); + ns_dentry = proc_ns_get_dentry(sb, task, ei->ns_ops); + if (IS_ERR(ns_dentry)) { + error = ERR_CAST(ns_dentry); goto out_put_task; } - ns_path.mnt = mntget(nd->path.mnt); - nd_jump_link(nd, &ns_path); + dput(nd->path.dentry); + nd->path.dentry = ns_dentry; error = NULL; out_put_task: diff --git a/fs/proc/root.c b/fs/proc/root.c index 9c7fab1..c6e9fac 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -109,9 +108,6 @@ static struct dentry *proc_mount(struct file_system_type *fs_type, } else { ns = task_active_pid_ns(current); options = data; - - if (!current_user_ns()->may_mount_proc) - return ERR_PTR(-EPERM); } sb = sget(fs_type, proc_test_super, proc_set_super, flags, ns); diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index 86d1038..5ea2e77 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -96,27 +96,6 @@ static const char *get_reason_str(enum kmsg_dump_reason reason) } } -bool pstore_cannot_block_path(enum kmsg_dump_reason reason) -{ - /* - * In case of NMI path, pstore shouldn't be blocked - * regardless of reason. - */ - if (in_nmi()) - return true; - - switch (reason) { - /* In panic case, other cpus are stopped by smp_send_stop(). */ - case KMSG_DUMP_PANIC: - /* Emergency restart shouldn't be blocked by spin lock. */ - case KMSG_DUMP_EMERG: - return true; - default: - return false; - } -} -EXPORT_SYMBOL_GPL(pstore_cannot_block_path); - /* * callback from kmsg_dump. (s2,l2) has the most recently * written bytes, older bytes are in (s1,l1). Save as much @@ -135,12 +114,10 @@ static void pstore_dump(struct kmsg_dumper *dumper, why = get_reason_str(reason); - if (pstore_cannot_block_path(reason)) { - is_locked = spin_trylock_irqsave(&psinfo->buf_lock, flags); - if (!is_locked) { - pr_err("pstore dump routine blocked in %s path, may corrupt error record\n" - , in_nmi() ? "NMI" : why); - } + if (in_nmi()) { + is_locked = spin_trylock(&psinfo->buf_lock); + if (!is_locked) + pr_err("pstore dump routine blocked in NMI, may corrupt error record\n"); } else spin_lock_irqsave(&psinfo->buf_lock, flags); oopscount++; @@ -166,9 +143,9 @@ static void pstore_dump(struct kmsg_dumper *dumper, total += hsize + len; part++; } - if (pstore_cannot_block_path(reason)) { + if (in_nmi()) { if (is_locked) - spin_unlock_irqrestore(&psinfo->buf_lock, flags); + spin_unlock(&psinfo->buf_lock); } else spin_unlock_irqrestore(&psinfo->buf_lock, flags); } diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 4cce1d9..c196369 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -187,8 +187,8 @@ fill_with_dentries(void *buf, const char *name, int namelen, loff_t offset, if (dbuf->count == ARRAY_SIZE(dbuf->dentries)) return -ENOSPC; - if (name[0] == '.' && (namelen < 2 || - (namelen == 2 && name[1] == '.'))) + if (name[0] == '.' && (name[1] == '\0' || + (name[1] == '.' && name[2] == '\0'))) return 0; dentry = lookup_one_len(name, dbuf->xadir, namelen); diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index d924812..2fbdff6 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c @@ -1012,7 +1012,6 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir) enum kobj_ns_type type; const void *ns; ino_t ino; - loff_t off; type = sysfs_ns_type(parent_sd); ns = sysfs_info(dentry->d_sb)->ns[type]; @@ -1021,8 +1020,6 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir) ino = parent_sd->s_ino; if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) == 0) filp->f_pos++; - else - return 0; } if (filp->f_pos == 1) { if (parent_sd->s_parent) @@ -1031,11 +1028,8 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir) ino = parent_sd->s_ino; if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) == 0) filp->f_pos++; - else - return 0; } mutex_lock(&sysfs_mutex); - off = filp->f_pos; for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos); pos; pos = sysfs_dir_next_pos(ns, parent_sd, filp->f_pos, pos)) { @@ -1047,43 +1041,27 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir) len = strlen(name); ino = pos->s_ino; type = dt_type(pos); - off = filp->f_pos = pos->s_hash; + filp->f_pos = pos->s_hash; filp->private_data = sysfs_get(pos); mutex_unlock(&sysfs_mutex); - ret = filldir(dirent, name, len, off, ino, type); + ret = filldir(dirent, name, len, filp->f_pos, ino, type); mutex_lock(&sysfs_mutex); if (ret < 0) break; } mutex_unlock(&sysfs_mutex); - - /* don't reference last entry if its refcount is dropped */ - if (!pos) { + if ((filp->f_pos > 1) && !pos) { /* EOF */ + filp->f_pos = INT_MAX; filp->private_data = NULL; - - /* EOF and not changed as 0 or 1 in read/write path */ - if (off == filp->f_pos && off > 1) - filp->f_pos = INT_MAX; } return 0; } -static loff_t sysfs_dir_llseek(struct file *file, loff_t offset, int whence) -{ - struct inode *inode = file->f_path.dentry->d_inode; - loff_t ret; - - mutex_lock(&inode->i_mutex); - ret = generic_file_llseek(file, offset, whence); - mutex_unlock(&inode->i_mutex); - - return ret; -} const struct file_operations sysfs_dir_operations = { .read = generic_read_dir, .readdir = sysfs_readdir, .release = sysfs_dir_release, - .llseek = sysfs_dir_llseek, + .llseek = generic_file_llseek, }; diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c index fb328d1..db940a9 100644 --- a/fs/sysfs/mount.c +++ b/fs/sysfs/mount.c @@ -19,7 +19,6 @@ #include #include #include -#include #include "sysfs.h" @@ -112,9 +111,6 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type, struct super_block *sb; int error; - if (!(flags & MS_KERNMOUNT) && !current_user_ns()->may_mount_sysfs) - return ERR_PTR(-EPERM); - info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return ERR_PTR(-ENOMEM); diff --git a/fs/timerfd.c b/fs/timerfd.c index 522aeb8..d03822b 100644 --- a/fs/timerfd.c +++ b/fs/timerfd.c @@ -311,7 +311,7 @@ SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags, if (hrtimer_try_to_cancel(&ctx->tmr) >= 0) break; spin_unlock_irq(&ctx->wqh.lock); - hrtimer_wait_for_timer(&ctx->tmr); + cpu_relax(); } /* diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c index ba32da3..769701c 100644 --- a/fs/ubifs/orphan.c +++ b/fs/ubifs/orphan.c @@ -126,14 +126,13 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum) else if (inum > o->inum) p = p->rb_right; else { - if (o->del) { + if (o->dnext) { spin_unlock(&c->orphan_lock); dbg_gen("deleted twice ino %lu", (unsigned long)inum); return; } - if (o->cmt) { - o->del = 1; + if (o->cnext) { o->dnext = c->orph_dnext; c->orph_dnext = o; spin_unlock(&c->orphan_lock); @@ -173,9 +172,7 @@ int ubifs_orphan_start_commit(struct ubifs_info *c) last = &c->orph_cnext; list_for_each_entry(orphan, &c->orph_new, new_list) { ubifs_assert(orphan->new); - ubifs_assert(!orphan->cmt); orphan->new = 0; - orphan->cmt = 1; *last = orphan; last = &orphan->cnext; } @@ -302,9 +299,7 @@ static int write_orph_node(struct ubifs_info *c, int atomic) cnext = c->orph_cnext; for (i = 0; i < cnt; i++) { orphan = cnext; - ubifs_assert(orphan->cmt); orph->inos[i] = cpu_to_le64(orphan->inum); - orphan->cmt = 0; cnext = orphan->cnext; orphan->cnext = NULL; } @@ -383,7 +378,6 @@ static int consolidate(struct ubifs_info *c) list_for_each_entry(orphan, &c->orph_list, list) { if (orphan->new) continue; - orphan->cmt = 1; *last = orphan; last = &orphan->cnext; cnt += 1; @@ -448,7 +442,6 @@ static void erase_deleted(struct ubifs_info *c) orphan = dnext; dnext = orphan->dnext; ubifs_assert(!orphan->new); - ubifs_assert(orphan->del); rb_erase(&orphan->rb, &c->orph_tree); list_del(&orphan->list); c->tot_orphans -= 1; @@ -538,7 +531,6 @@ static int insert_dead_orphan(struct ubifs_info *c, ino_t inum) rb_link_node(&orphan->rb, parent, p); rb_insert_color(&orphan->rb, &c->orph_tree); list_add_tail(&orphan->list, &c->orph_list); - orphan->del = 1; orphan->dnext = c->orph_dnext; c->orph_dnext = orphan; dbg_mnt("ino %lu, new %d, tot %d", (unsigned long)inum, diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 97f6875..ddc0f6a 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1568,12 +1568,6 @@ static int ubifs_remount_rw(struct ubifs_info *c) c->remounting_rw = 1; c->ro_mount = 0; - if (c->space_fixup) { - err = ubifs_fixup_free_space(c); - if (err) - return err; - } - err = check_free_space(c); if (err) goto out; @@ -1690,6 +1684,12 @@ static int ubifs_remount_rw(struct ubifs_info *c) err = dbg_check_space_info(c); } + if (c->space_fixup) { + err = ubifs_fixup_free_space(c); + if (err) + goto out; + } + mutex_unlock(&c->umount_mutex); return err; diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index b2babce..d133c27 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -904,8 +904,6 @@ struct ubifs_budget_req { * @dnext: next orphan to delete * @inum: inode number * @new: %1 => added since the last commit, otherwise %0 - * @cmt: %1 => commit pending, otherwise %0 - * @del: %1 => delete pending, otherwise %0 */ struct ubifs_orphan { struct rb_node rb; @@ -914,9 +912,7 @@ struct ubifs_orphan { struct ubifs_orphan *cnext; struct ubifs_orphan *dnext; ino_t inum; - unsigned new:1; - unsigned cmt:1; - unsigned del:1; + int new; }; /** diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h index 8d1c9d4..5f02722 100644 --- a/fs/udf/udf_sb.h +++ b/fs/udf/udf_sb.h @@ -82,7 +82,7 @@ struct udf_virtual_data { struct udf_bitmap { __u32 s_extLength; __u32 s_extPosition; - int s_nr_groups; + __u16 s_nr_groups; struct buffer_head **s_block_bitmap; }; diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 572a858..cdb2d33 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -147,10 +147,7 @@ xfs_bmap_local_to_extents( xfs_fsblock_t *firstblock, /* first block allocated in xaction */ xfs_extlen_t total, /* total blocks needed by transaction */ int *logflagsp, /* inode logging flags */ - int whichfork, /* data or attr fork */ - void (*init_fn)(struct xfs_buf *bp, - struct xfs_inode *ip, - struct xfs_ifork *ifp)); + int whichfork); /* data or attr fork */ /* * Search the extents list for the inode, for the extent containing bno. @@ -360,42 +357,7 @@ xfs_bmap_add_attrfork_extents( } /* - * Block initialisation functions for local to extent format conversion. - * As these get more complex, they will be moved to the relevant files, - * but for now they are too simple to worry about. - */ -STATIC void -xfs_bmap_local_to_extents_init_fn( - struct xfs_buf *bp, - struct xfs_inode *ip, - struct xfs_ifork *ifp) -{ - bp->b_ops = &xfs_bmbt_buf_ops; - memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes); -} - -STATIC void -xfs_symlink_local_to_remote( - struct xfs_buf *bp, - struct xfs_inode *ip, - struct xfs_ifork *ifp) -{ - /* remote symlink blocks are not verifiable until CRCs come along */ - bp->b_ops = NULL; - memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes); -} - -/* - * Called from xfs_bmap_add_attrfork to handle local format files. Each - * different data fork content type needs a different callout to do the - * conversion. Some are basic and only require special block initialisation - * callouts for the data formating, others (directories) are so specialised they - * handle everything themselves. - * - * XXX (dgc): investigate whether directory conversion can use the generic - * formatting callout. It should be possible - it's just a very complex - * formatter. it would also require passing the transaction through to the init - * function. + * Called from xfs_bmap_add_attrfork to handle local format files. */ STATIC int /* error */ xfs_bmap_add_attrfork_local( @@ -406,29 +368,25 @@ xfs_bmap_add_attrfork_local( int *flags) /* inode logging flags */ { xfs_da_args_t dargs; /* args for dir/attr code */ + int error; /* error return value */ + xfs_mount_t *mp; /* mount structure pointer */ if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip)) return 0; - if (S_ISDIR(ip->i_d.di_mode)) { + mp = ip->i_mount; memset(&dargs, 0, sizeof(dargs)); dargs.dp = ip; dargs.firstblock = firstblock; dargs.flist = flist; - dargs.total = ip->i_mount->m_dirblkfsbs; + dargs.total = mp->m_dirblkfsbs; dargs.whichfork = XFS_DATA_FORK; dargs.trans = tp; - return xfs_dir2_sf_to_block(&dargs); - } - - if (S_ISLNK(ip->i_d.di_mode)) - return xfs_bmap_local_to_extents(tp, ip, firstblock, 1, - flags, XFS_DATA_FORK, - xfs_symlink_local_to_remote); - - return xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags, - XFS_DATA_FORK, - xfs_bmap_local_to_extents_init_fn); + error = xfs_dir2_sf_to_block(&dargs); + } else + error = xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags, + XFS_DATA_FORK); + return error; } /* @@ -3263,10 +3221,7 @@ xfs_bmap_local_to_extents( xfs_fsblock_t *firstblock, /* first block allocated in xaction */ xfs_extlen_t total, /* total blocks needed by transaction */ int *logflagsp, /* inode logging flags */ - int whichfork, - void (*init_fn)(struct xfs_buf *bp, - struct xfs_inode *ip, - struct xfs_ifork *ifp)) + int whichfork) /* data or attr fork */ { int error; /* error return value */ int flags; /* logging flags returned */ @@ -3286,12 +3241,12 @@ xfs_bmap_local_to_extents( xfs_buf_t *bp; /* buffer for extent block */ xfs_bmbt_rec_host_t *ep;/* extent record pointer */ - ASSERT((ifp->if_flags & - (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE); memset(&args, 0, sizeof(args)); args.tp = tp; args.mp = ip->i_mount; args.firstblock = *firstblock; + ASSERT((ifp->if_flags & + (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE); /* * Allocate a block. We know we need only one, since the * file currently fits in an inode. @@ -3307,20 +3262,17 @@ xfs_bmap_local_to_extents( args.mod = args.minleft = args.alignment = args.wasdel = args.isfl = args.minalignslop = 0; args.minlen = args.maxlen = args.prod = 1; - error = xfs_alloc_vextent(&args); - if (error) + if ((error = xfs_alloc_vextent(&args))) goto done; - - /* Can't fail, the space was reserved. */ + /* + * Can't fail, the space was reserved. + */ ASSERT(args.fsbno != NULLFSBLOCK); ASSERT(args.len == 1); *firstblock = args.fsbno; bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0); - - /* initialise the block and copy the data */ - init_fn(bp, ip, ifp); - - /* account for the change in fork size and log everything */ + bp->b_ops = &xfs_bmbt_buf_ops; + memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes); xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1); xfs_bmap_forkoff_reset(args.mp, ip, whichfork); xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); @@ -4967,32 +4919,8 @@ xfs_bmapi_write( XFS_STATS_INC(xs_blk_mapw); if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { - /* - * XXX (dgc): This assumes we are only called for inodes that - * contain content neutral data in local format. Anything that - * contains caller-specific data in local format that needs - * transformation to move to a block format needs to do the - * conversion to extent format itself. - * - * Directory data forks and attribute forks handle this - * themselves, but with the addition of metadata verifiers every - * data fork in local format now contains caller specific data - * and as such conversion through this function is likely to be - * broken. - * - * The only likely user of this branch is for remote symlinks, - * but we cannot overwrite the data fork contents of the symlink - * (EEXIST occurs higher up the stack) and so it will never go - * from local format to extent format here. Hence I don't think - * this branch is ever executed intentionally and we should - * consider removing it and asserting that xfs_bmapi_write() - * cannot be called directly on local format forks. i.e. callers - * are completely responsible for local to extent format - * conversion, not xfs_bmapi_write(). - */ error = xfs_bmap_local_to_extents(tp, ip, firstblock, total, - &bma.logflags, whichfork, - xfs_bmap_local_to_extents_init_fn); + &bma.logflags, whichfork); if (error) goto error0; } diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index aee7fd2..7d10f96 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -202,20 +202,6 @@ extern void warn_slowpath_null(const char *file, const int line); # define WARN_ON_SMP(x) ({0;}) #endif -#ifdef CONFIG_PREEMPT_RT_BASE -# define BUG_ON_RT(c) BUG_ON(c) -# define BUG_ON_NONRT(c) do { } while (0) -# define WARN_ON_RT(condition) WARN_ON(condition) -# define WARN_ON_NONRT(condition) do { } while (0) -# define WARN_ON_ONCE_NONRT(condition) do { } while (0) -#else -# define BUG_ON_RT(c) do { } while (0) -# define BUG_ON_NONRT(c) BUG_ON(c) -# define WARN_ON_RT(condition) do { } while (0) -# define WARN_ON_NONRT(condition) WARN_ON(condition) -# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition) -#endif - #endif /* __ASSEMBLY__ */ #endif diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h index d8d4c89..2533fdd 100644 --- a/include/asm-generic/cmpxchg-local.h +++ b/include/asm-generic/cmpxchg-local.h @@ -21,7 +21,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr, if (size == 8 && sizeof(unsigned long) != 8) wrong_size_cmpxchg(ptr); - raw_local_irq_save(flags); + local_irq_save(flags); switch (size) { case 1: prev = *(u8 *)ptr; if (prev == old) @@ -42,7 +42,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr, default: wrong_size_cmpxchg(ptr); } - raw_local_irq_restore(flags); + local_irq_restore(flags); return prev; } @@ -55,11 +55,11 @@ static inline u64 __cmpxchg64_local_generic(volatile void *ptr, u64 prev; unsigned long flags; - raw_local_irq_save(flags); + local_irq_save(flags); prev = *(u64 *)ptr; if (prev == old) *(u64 *)ptr = new; - raw_local_irq_restore(flags); + local_irq_restore(flags); return prev; } diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index f50a87d..5cf680a 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -7,16 +7,6 @@ #include #include -/* - * On almost all architectures and configurations, 0 can be used as the - * upper ceiling to free_pgtables(): on many architectures it has the same - * effect as using TASK_SIZE. However, there is one configuration which - * must impose a more careful limit, to avoid freeing kernel pgtables. - */ -#ifndef USER_PGTABLES_CEILING -#define USER_PGTABLES_CEILING 0UL -#endif - #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index b1b1fa6..25f01d0 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -99,12 +99,7 @@ struct mmu_gather { unsigned int need_flush : 1, /* Did free PTEs */ fast_mode : 1; /* No batching */ - /* we are in the middle of an operation to clear - * a full mm and can make some optimizations */ - unsigned int fullmm : 1, - /* we have performed an operation which - * requires a complete flush of the tlb */ - need_flush_all : 1; + unsigned int fullmm; struct mmu_gather_batch *active; struct mmu_gather_batch local; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 881fb15..fad21c9 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1559,8 +1559,9 @@ extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *s void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv); void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); -int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle); -void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf); +int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle); +int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle); +void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf); int drm_prime_add_dma_buf(struct drm_device *dev, struct drm_gem_object *obj); int drm_prime_lookup_obj(struct drm_device *dev, struct dma_buf *buf, diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index d7da55c..c5c35e6 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -227,7 +227,6 @@ {0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ - {0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ @@ -235,13 +234,11 @@ {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ - {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ - {0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ @@ -571,11 +568,7 @@ {0x1002, 0x9908, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ - {0x1002, 0x990B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ - {0x1002, 0x990C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ - {0x1002, 0x990D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ - {0x1002, 0x990E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ - {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ @@ -586,15 +579,6 @@ {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ - {0x1002, 0x9995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ - {0x1002, 0x9996, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ - {0x1002, 0x9997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ - {0x1002, 0x9998, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ - {0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ - {0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ - {0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ - {0x1002, 0x999C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ - {0x1002, 0x999D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ diff --git a/include/linux/ata.h b/include/linux/ata.h index ee0bd95..8f7a3d6 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -954,7 +954,7 @@ static inline int atapi_cdb_len(const u16 *dev_id) } } -static inline int atapi_command_packet_set(const u16 *dev_id) +static inline bool atapi_command_packet_set(const u16 *dev_id) { return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f; } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1dbdb1a..f94bc83 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -836,7 +836,7 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, unsigned int cmd_flags) { if (unlikely(cmd_flags & REQ_DISCARD)) - return min(q->limits.max_discard_sectors, UINT_MAX >> 9); + return q->limits.max_discard_sectors; if (unlikely(cmd_flags & REQ_WRITE_SAME)) return q->limits.max_write_same_sectors; diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 3f8e27b..458f497 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -72,52 +72,8 @@ struct buffer_head { struct address_space *b_assoc_map; /* mapping this buffer is associated with */ atomic_t b_count; /* users using this buffer_head */ -#ifdef CONFIG_PREEMPT_RT_BASE - spinlock_t b_uptodate_lock; -#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \ - defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE) - spinlock_t b_state_lock; - spinlock_t b_journal_head_lock; -#endif -#endif }; -static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh) -{ - unsigned long flags; - -#ifndef CONFIG_PREEMPT_RT_BASE - local_irq_save(flags); - bit_spin_lock(BH_Uptodate_Lock, &bh->b_state); -#else - spin_lock_irqsave(&bh->b_uptodate_lock, flags); -#endif - return flags; -} - -static inline void -bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags) -{ -#ifndef CONFIG_PREEMPT_RT_BASE - bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state); - local_irq_restore(flags); -#else - spin_unlock_irqrestore(&bh->b_uptodate_lock, flags); -#endif -} - -static inline void buffer_head_init_locks(struct buffer_head *bh) -{ -#ifdef CONFIG_PREEMPT_RT_BASE - spin_lock_init(&bh->b_uptodate_lock); -#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \ - defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE) - spin_lock_init(&bh->b_state_lock); - spin_lock_init(&bh->b_journal_head_lock); -#endif -#endif -} - /* * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() * and buffer_foo() functions. diff --git a/include/linux/capability.h b/include/linux/capability.h index d9a4f7f..98503b7 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -35,7 +35,6 @@ struct cpu_vfs_cap_data { #define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t)) -struct file; struct inode; struct dentry; struct user_namespace; @@ -212,7 +211,6 @@ extern bool capable(int cap); extern bool ns_capable(struct user_namespace *ns, int cap); extern bool nsown_capable(int cap); extern bool inode_capable(const struct inode *inode, int cap); -extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); /* audit system wants to get cap info from files as well */ extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 2322df7..7d73905 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -303,6 +303,9 @@ struct cftype { /* CFTYPE_* flags */ unsigned int flags; + /* file xattrs */ + struct simple_xattrs xattrs; + int (*open)(struct inode *inode, struct file *file); ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft, struct file *file, diff --git a/include/linux/completion.h b/include/linux/completion.h index ebb6565..51494e6 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -8,7 +8,7 @@ * See kernel/sched.c for details. */ -#include +#include /* * struct completion - structure used to maintain state for a "completion" @@ -24,11 +24,11 @@ */ struct completion { unsigned int done; - struct swait_head wait; + wait_queue_head_t wait; }; #define COMPLETION_INITIALIZER(work) \ - { 0, SWAIT_HEAD_INITIALIZER((work).wait) } + { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } #define COMPLETION_INITIALIZER_ONSTACK(work) \ ({ init_completion(&work); work; }) @@ -73,7 +73,7 @@ struct completion { static inline void init_completion(struct completion *x) { x->done = 0; - init_swait_head(&x->wait); + init_waitqueue_head(&x->wait); } extern void wait_for_completion(struct completion *); diff --git a/include/linux/console.h b/include/linux/console.h index 4a6948a..dedb082 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -77,9 +77,7 @@ extern const struct consw prom_con; /* SPARC PROM console */ int con_is_bound(const struct consw *csw); int register_con_driver(const struct consw *csw, int first, int last); int unregister_con_driver(const struct consw *csw); -int do_unregister_con_driver(const struct consw *csw); int take_over_console(const struct consw *sw, int first, int last, int deflt); -int do_take_over_console(const struct consw *sw, int first, int last, int deflt); void give_up_console(const struct consw *sw); #ifdef CONFIG_HW_CONSOLE int con_debug_enter(struct vc_data *vc); @@ -141,7 +139,6 @@ struct console { for (con = console_drivers; con != NULL; con = con->next) extern int console_set_on_cmdline; -extern struct console *early_console; extern int add_preferred_console(char *name, int idx, char *options); extern int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options); diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 7781c9e..ce7a074 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -175,8 +175,6 @@ extern struct bus_type cpu_subsys; extern void get_online_cpus(void); extern void put_online_cpus(void); -extern void pin_current_cpu(void); -extern void unpin_current_cpu(void); #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) #define register_hotcpu_notifier(nb) register_cpu_notifier(nb) #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) @@ -200,8 +198,6 @@ static inline void cpu_hotplug_driver_unlock(void) #define get_online_cpus() do { } while (0) #define put_online_cpus() do { } while (0) -static inline void pin_current_cpu(void) { } -static inline void unpin_current_cpu(void) { } #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) /* These aren't inline functions due to a GCC bug. */ #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) diff --git a/include/linux/delay.h b/include/linux/delay.h index e23a7c0..a6ecb34 100644 --- a/include/linux/delay.h +++ b/include/linux/delay.h @@ -52,10 +52,4 @@ static inline void ssleep(unsigned int seconds) msleep(seconds * 1000); } -#ifdef CONFIG_PREEMPT_RT_FULL -# define cpu_chill() msleep(1) -#else -# define cpu_chill() cpu_relax() -#endif - #endif /* defined(_LINUX_DELAY_H) */ diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index a5cda3e..bf6afa2 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -68,8 +68,8 @@ typedef void (*dm_postsuspend_fn) (struct dm_target *ti); typedef int (*dm_preresume_fn) (struct dm_target *ti); typedef void (*dm_resume_fn) (struct dm_target *ti); -typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, - unsigned status_flags, char *result, unsigned maxlen); +typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, + unsigned status_flags, char *result, unsigned maxlen); typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv); diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h index 324f931..d0ae3a8 100644 --- a/include/linux/fs_struct.h +++ b/include/linux/fs_struct.h @@ -50,6 +50,4 @@ static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root, spin_unlock(&fs->lock); } -extern bool current_chrooted(void); - #endif /* _LINUX_FS_STRUCT_H */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 616603d..92691d8 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -394,6 +394,7 @@ ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos); ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos); +loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int whence); int ftrace_regex_release(struct inode *inode, struct file *file); void __init @@ -566,8 +567,6 @@ static inline int ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } #endif /* CONFIG_DYNAMIC_FTRACE */ -loff_t ftrace_filter_lseek(struct file *file, loff_t offset, int whence); - /* totally disable ftrace - can not re-enable after this */ void ftrace_kill(void); diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 16ad63d..a3d4895 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -49,9 +49,7 @@ struct trace_entry { unsigned char flags; unsigned char preempt_count; int pid; - unsigned short migrate_disable; - unsigned short padding; - unsigned char preempt_lazy_count; + int padding; }; #define FTRACE_MAX_EVENT \ diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index dfa97de..624ef3f 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -61,11 +61,7 @@ #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) #define NMI_OFFSET (1UL << NMI_SHIFT) -#ifndef CONFIG_PREEMPT_RT_FULL -# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) -#else -# define SOFTIRQ_DISABLE_OFFSET (0) -#endif +#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) #ifndef PREEMPT_ACTIVE #define PREEMPT_ACTIVE_BITS 1 @@ -78,17 +74,10 @@ #endif #define hardirq_count() (preempt_count() & HARDIRQ_MASK) +#define softirq_count() (preempt_count() & SOFTIRQ_MASK) #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ | NMI_MASK)) -#ifndef CONFIG_PREEMPT_RT_FULL -# define softirq_count() (preempt_count() & SOFTIRQ_MASK) -# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) -#else -# define softirq_count() (0UL) -extern int in_serving_softirq(void); -#endif - /* * Are we doing bottom half or hardware interrupt processing? * Are we in a softirq context? Interrupt context? @@ -98,6 +87,7 @@ extern int in_serving_softirq(void); #define in_irq() (hardirq_count()) #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) +#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) /* * Are we in NMI context? diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 84223de..ef788b5 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -7,7 +7,6 @@ #include #include #include -#include #include @@ -86,51 +85,32 @@ static inline void __kunmap_atomic(void *addr) #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) -#ifndef CONFIG_PREEMPT_RT_FULL DECLARE_PER_CPU(int, __kmap_atomic_idx); -#endif static inline int kmap_atomic_idx_push(void) { -#ifndef CONFIG_PREEMPT_RT_FULL int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; -# ifdef CONFIG_DEBUG_HIGHMEM +#ifdef CONFIG_DEBUG_HIGHMEM WARN_ON_ONCE(in_irq() && !irqs_disabled()); BUG_ON(idx > KM_TYPE_NR); -# endif - return idx; -#else - current->kmap_idx++; - BUG_ON(current->kmap_idx > KM_TYPE_NR); - return current->kmap_idx - 1; #endif + return idx; } static inline int kmap_atomic_idx(void) { -#ifndef CONFIG_PREEMPT_RT_FULL return __this_cpu_read(__kmap_atomic_idx) - 1; -#else - return current->kmap_idx - 1; -#endif } static inline void kmap_atomic_idx_pop(void) { -#ifndef CONFIG_PREEMPT_RT_FULL -# ifdef CONFIG_DEBUG_HIGHMEM +#ifdef CONFIG_DEBUG_HIGHMEM int idx = __this_cpu_dec_return(__kmap_atomic_idx); BUG_ON(idx < 0); -# else - __this_cpu_dec(__kmap_atomic_idx); -# endif #else - current->kmap_idx--; -# ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(current->kmap_idx < 0); -# endif + __this_cpu_dec(__kmap_atomic_idx); #endif } diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 113bcf1..cc07d27 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -111,11 +111,6 @@ struct hrtimer { enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; unsigned long state; - struct list_head cb_entry; - int irqsafe; -#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST - ktime_t praecox; -#endif #ifdef CONFIG_TIMER_STATS int start_pid; void *start_site; @@ -152,7 +147,6 @@ struct hrtimer_clock_base { int index; clockid_t clockid; struct timerqueue_head active; - struct list_head expired; ktime_t resolution; ktime_t (*get_time)(void); ktime_t softirq_time; @@ -195,9 +189,6 @@ struct hrtimer_cpu_base { unsigned long nr_hangs; ktime_t max_hang_time; #endif -#ifdef CONFIG_PREEMPT_RT_BASE - wait_queue_head_t wait; -#endif struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; }; @@ -391,13 +382,6 @@ static inline int hrtimer_restart(struct hrtimer *timer) return hrtimer_start_expires(timer, HRTIMER_MODE_ABS); } -/* Softirq preemption could deadlock timer removal */ -#ifdef CONFIG_PREEMPT_RT_BASE - extern void hrtimer_wait_for_timer(const struct hrtimer *timer); -#else -# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0) -#endif - /* Query timers: */ extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp); diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index db695d5..0c80d3f 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -185,7 +185,8 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) extern const struct file_operations hugetlbfs_file_operations; extern const struct vm_operations_struct hugetlb_vm_ops; -struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, +struct file *hugetlb_file_setup(const char *name, unsigned long addr, + size_t size, vm_flags_t acct, struct user_struct **user, int creat_flags, int page_size_log); @@ -204,8 +205,8 @@ static inline int is_file_hugepages(struct file *file) #define is_file_hugepages(file) 0 static inline struct file * -hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, - struct user_struct **user, int creat_flags, +hugetlb_file_setup(const char *name, unsigned long addr, size_t size, + vm_flags_t acctflag, struct user_struct **user, int creat_flags, int page_size_log) { return ERR_PTR(-ENOSYS); @@ -283,13 +284,6 @@ static inline struct hstate *hstate_file(struct file *f) return hstate_inode(f->f_dentry->d_inode); } -static inline struct hstate *hstate_sizelog(int page_size_log) -{ - if (!page_size_log) - return &default_hstate; - return size_to_hstate(1 << page_size_log); -} - static inline struct hstate *hstate_vma(struct vm_area_struct *vma) { return hstate_file(vma->vm_file); @@ -354,12 +348,11 @@ static inline int hstate_index(struct hstate *h) return h - hstates; } -#else /* CONFIG_HUGETLB_PAGE */ +#else struct hstate {}; #define alloc_huge_page_node(h, nid) NULL #define alloc_bootmem_huge_page(h) NULL #define hstate_file(f) NULL -#define hstate_sizelog(s) NULL #define hstate_vma(v) NULL #define hstate_inode(i) NULL #define huge_page_size(h) PAGE_SIZE @@ -374,6 +367,6 @@ static inline unsigned int pages_per_huge_page(struct hstate *h) } #define hstate_index_to_shift(index) 0 #define hstate_index(h) 0 -#endif /* CONFIG_HUGETLB_PAGE */ +#endif #endif /* _LINUX_HUGETLB_H */ diff --git a/include/linux/idr.h b/include/linux/idr.h index e5eb125..de7e190 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -136,7 +136,7 @@ struct ida { struct ida_bitmap *free_bitmap; }; -#define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, } +#define IDA_INIT(name) { .idr = IDR_INIT(name), .free_bitmap = NULL, } #define DEFINE_IDA(name) struct ida name = IDA_INIT(name) int ida_pre_get(struct ida *ida, gfp_t gfp_mask); diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 218a3b6..d06cc5c 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -331,7 +331,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr) { __be16 proto; - unsigned short *rawp; + unsigned char *rawp; /* * Was a VLAN packet, grab the encapsulated protocol, which the layer @@ -344,8 +344,8 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb, return; } - rawp = (unsigned short *)(vhdr + 1); - if (*rawp == 0xFFFF) + rawp = skb->data; + if (*(unsigned short *) rawp == 0xFFFF) /* * This is a magic hack to spot IPX packets. Older Novell * breaks the protocol design and runs IPX over 802.3 without diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 10f32ab..6d087c5 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -141,12 +141,6 @@ extern struct task_group root_task_group; # define INIT_PERF_EVENTS(tsk) #endif -#ifdef CONFIG_PREEMPT_RT_BASE -# define INIT_TIMER_LIST .posix_timer_list = NULL, -#else -# define INIT_TIMER_LIST -#endif - #define INIT_TASK_COMM "swapper" /* @@ -202,7 +196,6 @@ extern struct task_group root_task_group; .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ .timer_slack_ns = 50000, /* 50 usec default slack */ \ - INIT_TIMER_LIST \ .pids = { \ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 11bdb1e..5fa5afe 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -58,7 +58,6 @@ * IRQF_NO_THREAD - Interrupt cannot be threaded * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device * resume time. - * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT) */ #define IRQF_DISABLED 0x00000020 #define IRQF_SHARED 0x00000080 @@ -72,7 +71,6 @@ #define IRQF_FORCE_RESUME 0x00008000 #define IRQF_NO_THREAD 0x00010000 #define IRQF_EARLY_RESUME 0x00020000 -#define IRQF_NO_SOFTIRQ_CALL 0x00040000 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) @@ -213,7 +211,7 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); #ifdef CONFIG_LOCKDEP # define local_irq_enable_in_hardirq() do { } while (0) #else -# define local_irq_enable_in_hardirq() local_irq_enable_nort() +# define local_irq_enable_in_hardirq() local_irq_enable() #endif extern void disable_irq_nosync(unsigned int irq); @@ -385,13 +383,9 @@ static inline int disable_irq_wake(unsigned int irq) #ifdef CONFIG_IRQ_FORCED_THREADING -# ifndef CONFIG_PREEMPT_RT_BASE - extern bool force_irqthreads; -# else -# define force_irqthreads (true) -# endif +extern bool force_irqthreads; #else -#define force_irqthreads (false) +#define force_irqthreads (0) #endif #ifndef __ARCH_SET_SOFTIRQ_PENDING @@ -447,14 +441,8 @@ struct softirq_action void (*action)(struct softirq_action *); }; -#ifndef CONFIG_PREEMPT_RT_FULL asmlinkage void do_softirq(void); asmlinkage void __do_softirq(void); -static inline void thread_do_softirq(void) { do_softirq(); } -#else -extern void thread_do_softirq(void); -#endif - extern void open_softirq(int nr, void (*action)(struct softirq_action *)); extern void softirq_init(void); extern void __raise_softirq_irqoff(unsigned int nr); @@ -462,8 +450,6 @@ extern void __raise_softirq_irqoff(unsigned int nr); extern void raise_softirq_irqoff(unsigned int nr); extern void raise_softirq(unsigned int nr); -extern void softirq_check_pending_idle(void); - /* This is the worklist that queues up per-cpu softirq work. * * send_remote_sendirq() adds work to these lists, and @@ -504,9 +490,8 @@ extern void __send_remote_softirq(struct call_single_data *cp, int cpu, to be executed on some cpu at least once after this. * If the tasklet is already scheduled, but its execution is still not started, it will be executed only once. - * If this tasklet is already running on another CPU, it is rescheduled - for later. - * Schedule must not be called from the tasklet itself (a lockup occurs) + * If this tasklet is already running on another CPU (or schedule is called + from tasklet itself), it is rescheduled for later. * Tasklet is strictly serialized wrt itself, but not wrt another tasklets. If client needs some intertask synchronization, he makes it with spinlocks. @@ -531,36 +516,27 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } enum { TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ - TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */ - TASKLET_STATE_PENDING /* Tasklet is pending */ + TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ }; -#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED) -#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN) -#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING) - -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) +#ifdef CONFIG_SMP static inline int tasklet_trylock(struct tasklet_struct *t) { return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); } -static inline int tasklet_tryunlock(struct tasklet_struct *t) -{ - return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN; -} - static inline void tasklet_unlock(struct tasklet_struct *t) { smp_mb__before_clear_bit(); clear_bit(TASKLET_STATE_RUN, &(t)->state); } -extern void tasklet_unlock_wait(struct tasklet_struct *t); - +static inline void tasklet_unlock_wait(struct tasklet_struct *t) +{ + while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } +} #else #define tasklet_trylock(t) 1 -#define tasklet_tryunlock(t) 1 #define tasklet_unlock_wait(t) do { } while (0) #define tasklet_unlock(t) do { } while (0) #endif @@ -609,8 +585,17 @@ static inline void tasklet_disable(struct tasklet_struct *t) smp_mb(); } -extern void tasklet_enable(struct tasklet_struct *t); -extern void tasklet_hi_enable(struct tasklet_struct *t); +static inline void tasklet_enable(struct tasklet_struct *t) +{ + smp_mb__before_atomic_dec(); + atomic_dec(&t->count); +} + +static inline void tasklet_hi_enable(struct tasklet_struct *t) +{ + smp_mb__before_atomic_dec(); + atomic_dec(&t->count); +} extern void tasklet_kill(struct tasklet_struct *t); extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); @@ -642,12 +627,6 @@ void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) tasklet_kill(&ttimer->tasklet); } -#ifdef CONFIG_PREEMPT_RT_FULL -extern void softirq_early_init(void); -#else -static inline void softirq_early_init(void) { } -#endif - /* * Autoprobing for irqs: * diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index c4d870b..ae221a7 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -43,8 +43,8 @@ struct ipc_namespace { size_t shm_ctlmax; size_t shm_ctlall; - unsigned long shm_tot; int shm_ctlmni; + int shm_tot; /* * Defines whether IPC_RMID is forced for _all_ shm segments regardless * of shmctl() diff --git a/include/linux/irq.h b/include/linux/irq.h index 3929bbe..fdf2c4a 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -70,7 +70,6 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data); * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context * IRQ_NESTED_TRHEAD - Interrupt nests into another thread * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable - * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT) */ enum { IRQ_TYPE_NONE = 0x00000000, @@ -95,14 +94,12 @@ enum { IRQ_NESTED_THREAD = (1 << 15), IRQ_NOTHREAD = (1 << 16), IRQ_PER_CPU_DEVID = (1 << 17), - IRQ_NO_SOFTIRQ_CALL = (1 << 18), }; #define IRQF_MODIFY_MASK \ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ - IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ - IRQ_NO_SOFTIRQ_CALL) + IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID) #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index a7edc47..623325e 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -52,7 +52,6 @@ struct irq_desc { unsigned int irq_count; /* For detecting broken IRQs */ unsigned long last_unhandled; /* Aging timer for unhandled count */ unsigned int irqs_unhandled; - u64 random_ip; raw_spinlock_t lock; struct cpumask *percpu_enabled; #ifdef CONFIG_SMP diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index a52b35d..d176d65 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -25,6 +25,8 @@ # define trace_softirqs_enabled(p) ((p)->softirqs_enabled) # define trace_hardirq_enter() do { current->hardirq_context++; } while (0) # define trace_hardirq_exit() do { current->hardirq_context--; } while (0) +# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) +# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, #else # define trace_hardirqs_on() do { } while (0) @@ -37,15 +39,9 @@ # define trace_softirqs_enabled(p) 0 # define trace_hardirq_enter() do { } while (0) # define trace_hardirq_exit() do { } while (0) -# define INIT_TRACE_IRQFLAGS -#endif - -#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL) -# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) -# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) -#else # define lockdep_softirq_enter() do { } while (0) # define lockdep_softirq_exit() do { } while (0) +# define INIT_TRACE_IRQFLAGS #endif #if defined(CONFIG_IRQSOFF_TRACER) || \ @@ -151,23 +147,4 @@ #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ -/* - * local_irq* variants depending on RT/!RT - */ -#ifdef CONFIG_PREEMPT_RT_FULL -# define local_irq_disable_nort() do { } while (0) -# define local_irq_enable_nort() do { } while (0) -# define local_irq_save_nort(flags) do { local_save_flags(flags); } while (0) -# define local_irq_restore_nort(flags) do { (void)(flags); } while (0) -# define local_irq_disable_rt() local_irq_disable() -# define local_irq_enable_rt() local_irq_enable() -#else -# define local_irq_disable_nort() local_irq_disable() -# define local_irq_enable_nort() local_irq_enable() -# define local_irq_save_nort(flags) local_irq_save(flags) -# define local_irq_restore_nort(flags) local_irq_restore(flags) -# define local_irq_disable_rt() do { } while (0) -# define local_irq_enable_rt() do { } while (0) -#endif - #endif diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 383bef0..e30b663 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -498,7 +498,6 @@ struct transaction_s T_COMMIT, T_COMMIT_DFLUSH, T_COMMIT_JFLUSH, - T_COMMIT_CALLBACK, T_FINISHED } t_state; @@ -1211,7 +1210,6 @@ int __jbd2_log_start_commit(journal_t *journal, tid_t tid); int jbd2_journal_start_commit(journal_t *journal, tid_t *tid); int jbd2_journal_force_commit_nested(journal_t *journal); int jbd2_log_wait_commit(journal_t *journal, tid_t tid); -int jbd2_complete_transaction(journal_t *journal, tid_t tid); int jbd2_log_do_checkpoint(journal_t *journal); int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid); diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h index 0dbc151..6133679 100644 --- a/include/linux/jbd_common.h +++ b/include/linux/jbd_common.h @@ -39,56 +39,32 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh) static inline void jbd_lock_bh_state(struct buffer_head *bh) { -#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_lock(BH_State, &bh->b_state); -#else - spin_lock(&bh->b_state_lock); -#endif } static inline int jbd_trylock_bh_state(struct buffer_head *bh) { -#ifndef CONFIG_PREEMPT_RT_BASE return bit_spin_trylock(BH_State, &bh->b_state); -#else - return spin_trylock(&bh->b_state_lock); -#endif } static inline int jbd_is_locked_bh_state(struct buffer_head *bh) { -#ifndef CONFIG_PREEMPT_RT_BASE return bit_spin_is_locked(BH_State, &bh->b_state); -#else - return spin_is_locked(&bh->b_state_lock); -#endif } static inline void jbd_unlock_bh_state(struct buffer_head *bh) { -#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_unlock(BH_State, &bh->b_state); -#else - spin_unlock(&bh->b_state_lock); -#endif } static inline void jbd_lock_bh_journal_head(struct buffer_head *bh) { -#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_lock(BH_JournalHead, &bh->b_state); -#else - spin_lock(&bh->b_journal_head_lock); -#endif } static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) { -#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_unlock(BH_JournalHead, &bh->b_state); -#else - spin_unlock(&bh->b_journal_head_lock); -#endif } #endif diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 8fb8edf..82ed068 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -75,6 +75,7 @@ extern int register_refined_jiffies(long clock_tick_rate); */ extern u64 __jiffy_data jiffies_64; extern unsigned long volatile __jiffy_data jiffies; +extern seqlock_t jiffies_lock; #if (BITS_PER_LONG < 64) u64 get_jiffies_64(void); diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 40c876b..0976fc4 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -50,8 +50,7 @@ #include #include -#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) && \ - !defined(CONFIG_PREEMPT_BASE) +#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) struct static_key { atomic_t enabled; diff --git a/include/linux/kdb.h b/include/linux/kdb.h index 680ad23..7f6fe6e 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -115,7 +115,7 @@ extern int kdb_trap_printk; extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args); extern __printf(1, 2) int kdb_printf(const char *, ...); typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...); -#define in_kdb_printk() (kdb_trap_printk) + extern void kdb_init(int level); /* Access to kdb specific polling devices */ @@ -150,7 +150,6 @@ extern int kdb_register_repeat(char *, kdb_func_t, char *, char *, extern int kdb_unregister(char *); #else /* ! CONFIG_KGDB_KDB */ static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } -#define in_kdb_printk() (0) static inline void kdb_init(int level) {} static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, char *help, short minlen) { return 0; } diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 8b3086d..c566927 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -412,7 +412,6 @@ extern enum system_states { SYSTEM_HALT, SYSTEM_POWER_OFF, SYSTEM_RESTART, - SYSTEM_SUSPEND, } system_state; #define TAINT_PROPRIETARY_MODULE 0 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ffdf8b7..2c497ab 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -511,7 +511,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len); int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, - gpa_t gpa, unsigned long len); + gpa_t gpa); int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index b0bcce0..fa7cc72 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -71,7 +71,6 @@ struct gfn_to_hva_cache { u64 generation; gpa_t gpa; unsigned long hva; - unsigned long len; struct kvm_memory_slot *memslot; }; diff --git a/include/linux/lglock.h b/include/linux/lglock.h index d2c0d6d..0d24e93 100644 --- a/include/linux/lglock.h +++ b/include/linux/lglock.h @@ -42,37 +42,22 @@ #endif struct lglock { -#ifndef CONFIG_PREEMPT_RT_FULL arch_spinlock_t __percpu *lock; -#else - struct rt_mutex __percpu *lock; -#endif #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lock_class_key lock_key; struct lockdep_map lock_dep_map; #endif }; -#ifndef CONFIG_PREEMPT_RT_FULL -# define DEFINE_LGLOCK(name) \ +#define DEFINE_LGLOCK(name) \ static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ = __ARCH_SPIN_LOCK_UNLOCKED; \ struct lglock name = { .lock = &name ## _lock } -# define DEFINE_STATIC_LGLOCK(name) \ +#define DEFINE_STATIC_LGLOCK(name) \ static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ = __ARCH_SPIN_LOCK_UNLOCKED; \ static struct lglock name = { .lock = &name ## _lock } -#else - -# define DEFINE_LGLOCK(name) \ - static DEFINE_PER_CPU(struct rt_mutex, name ## _lock); \ - struct lglock name = { .lock = &name ## _lock } - -# define DEFINE_STATIC_LGLOCK(name) \ - static DEFINE_PER_CPU(struct rt_mutex, name ## _lock); \ - static struct lglock name = { .lock = &name ## _lock } -#endif void lg_lock_init(struct lglock *lg, char *name); void lg_local_lock(struct lglock *lg); diff --git a/include/linux/libata.h b/include/linux/libata.h index 0621bca..649e5f8 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -398,7 +398,6 @@ enum { ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */ ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */ ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ - ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ diff --git a/include/linux/list.h b/include/linux/list.h index 7a9851b..cc6d2aa 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -362,17 +362,6 @@ static inline void list_splice_tail_init(struct list_head *list, list_entry((ptr)->next, type, member) /** - * list_last_entry - get the last element from a list - * @ptr: the list head to take the element from. - * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. - * - * Note, that list is expected to be not empty. - */ -#define list_last_entry(ptr, type, member) \ - list_entry((ptr)->prev, type, member) - -/** * list_for_each - iterate over a list * @pos: the &struct list_head to use as a loop cursor. * @head: the head for your list. diff --git a/include/linux/llist.h b/include/linux/llist.h index a5199f6..d0ab98f 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -125,6 +125,31 @@ static inline void init_llist_head(struct llist_head *list) (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member)) /** + * llist_for_each_entry_safe - iterate safely against remove over some entries + * of lock-less list of given type. + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as a temporary storage. + * @node: the fist entry of deleted list entries. + * @member: the name of the llist_node with the struct. + * + * In general, some entries of the lock-less list can be traversed + * safely only after being removed from list, so start with an entry + * instead of list head. This variant allows removal of entries + * as we iterate. + * + * If being used on entries deleted from lock-less list directly, the + * traverse order is from the newest to the oldest added entry. If + * you want to traverse from the oldest to the newest, you must + * reverse the order by yourself before traversing. + */ +#define llist_for_each_entry_safe(pos, n, node, member) \ + for ((pos) = llist_entry((node), typeof(*(pos)), member), \ + (n) = (pos)->member.next; \ + &(pos)->member != NULL; \ + (pos) = llist_entry(n, typeof(*(pos)), member), \ + (n) = (&(pos)->member != NULL) ? (pos)->member.next : NULL) + +/** * llist_empty - tests whether a lock-less list is empty * @head: the list to test * diff --git a/include/linux/locallock.h b/include/linux/locallock.h deleted file mode 100644 index a5eea5d..0000000 --- a/include/linux/locallock.h +++ /dev/null @@ -1,253 +0,0 @@ -#ifndef _LINUX_LOCALLOCK_H -#define _LINUX_LOCALLOCK_H - -#include - -#ifdef CONFIG_PREEMPT_RT_BASE - -#ifdef CONFIG_DEBUG_SPINLOCK -# define LL_WARN(cond) WARN_ON(cond) -#else -# define LL_WARN(cond) do { } while (0) -#endif - -/* - * per cpu lock based substitute for local_irq_*() - */ -struct local_irq_lock { - spinlock_t lock; - struct task_struct *owner; - int nestcnt; - unsigned long flags; -}; - -#define DEFINE_LOCAL_IRQ_LOCK(lvar) \ - DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \ - .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) } - -#define DECLARE_LOCAL_IRQ_LOCK(lvar) \ - DECLARE_PER_CPU(struct local_irq_lock, lvar) - -#define local_irq_lock_init(lvar) \ - do { \ - int __cpu; \ - for_each_possible_cpu(__cpu) \ - spin_lock_init(&per_cpu(lvar, __cpu).lock); \ - } while (0) - -static inline void __local_lock(struct local_irq_lock *lv) -{ - if (lv->owner != current) { - spin_lock(&lv->lock); - LL_WARN(lv->owner); - LL_WARN(lv->nestcnt); - lv->owner = current; - } - lv->nestcnt++; -} - -#define local_lock(lvar) \ - do { __local_lock(&get_local_var(lvar)); } while (0) - -static inline int __local_trylock(struct local_irq_lock *lv) -{ - if (lv->owner != current && spin_trylock(&lv->lock)) { - LL_WARN(lv->owner); - LL_WARN(lv->nestcnt); - lv->owner = current; - lv->nestcnt = 1; - return 1; - } - return 0; -} - -#define local_trylock(lvar) \ - ({ \ - int __locked; \ - __locked = __local_trylock(&get_local_var(lvar)); \ - if (!__locked) \ - put_local_var(lvar); \ - __locked; \ - }) - -static inline void __local_unlock(struct local_irq_lock *lv) -{ - LL_WARN(lv->nestcnt == 0); - LL_WARN(lv->owner != current); - if (--lv->nestcnt) - return; - - lv->owner = NULL; - spin_unlock(&lv->lock); -} - -#define local_unlock(lvar) \ - do { \ - __local_unlock(&__get_cpu_var(lvar)); \ - put_local_var(lvar); \ - } while (0) - -static inline void __local_lock_irq(struct local_irq_lock *lv) -{ - spin_lock_irqsave(&lv->lock, lv->flags); - LL_WARN(lv->owner); - LL_WARN(lv->nestcnt); - lv->owner = current; - lv->nestcnt = 1; -} - -#define local_lock_irq(lvar) \ - do { __local_lock_irq(&get_local_var(lvar)); } while (0) - -#define local_lock_irq_on(lvar, cpu) \ - do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0) - -static inline void __local_unlock_irq(struct local_irq_lock *lv) -{ - LL_WARN(!lv->nestcnt); - LL_WARN(lv->owner != current); - lv->owner = NULL; - lv->nestcnt = 0; - spin_unlock_irq(&lv->lock); -} - -#define local_unlock_irq(lvar) \ - do { \ - __local_unlock_irq(&__get_cpu_var(lvar)); \ - put_local_var(lvar); \ - } while (0) - -#define local_unlock_irq_on(lvar, cpu) \ - do { \ - __local_unlock_irq(&per_cpu(lvar, cpu)); \ - } while (0) - -static inline int __local_lock_irqsave(struct local_irq_lock *lv) -{ - if (lv->owner != current) { - __local_lock_irq(lv); - return 0; - } else { - lv->nestcnt++; - return 1; - } -} - -#define local_lock_irqsave(lvar, _flags) \ - do { \ - if (__local_lock_irqsave(&get_local_var(lvar))) \ - put_local_var(lvar); \ - _flags = __get_cpu_var(lvar).flags; \ - } while (0) - -#define local_lock_irqsave_on(lvar, _flags, cpu) \ - do { \ - __local_lock_irqsave(&per_cpu(lvar, cpu)); \ - _flags = per_cpu(lvar, cpu).flags; \ - } while (0) - -static inline int __local_unlock_irqrestore(struct local_irq_lock *lv, - unsigned long flags) -{ - LL_WARN(!lv->nestcnt); - LL_WARN(lv->owner != current); - if (--lv->nestcnt) - return 0; - - lv->owner = NULL; - spin_unlock_irqrestore(&lv->lock, lv->flags); - return 1; -} - -#define local_unlock_irqrestore(lvar, flags) \ - do { \ - if (__local_unlock_irqrestore(&__get_cpu_var(lvar), flags)) \ - put_local_var(lvar); \ - } while (0) - -#define local_unlock_irqrestore_on(lvar, flags, cpu) \ - do { \ - __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \ - } while (0) - -#define local_spin_trylock_irq(lvar, lock) \ - ({ \ - int __locked; \ - local_lock_irq(lvar); \ - __locked = spin_trylock(lock); \ - if (!__locked) \ - local_unlock_irq(lvar); \ - __locked; \ - }) - -#define local_spin_lock_irq(lvar, lock) \ - do { \ - local_lock_irq(lvar); \ - spin_lock(lock); \ - } while (0) - -#define local_spin_unlock_irq(lvar, lock) \ - do { \ - spin_unlock(lock); \ - local_unlock_irq(lvar); \ - } while (0) - -#define local_spin_lock_irqsave(lvar, lock, flags) \ - do { \ - local_lock_irqsave(lvar, flags); \ - spin_lock(lock); \ - } while (0) - -#define local_spin_unlock_irqrestore(lvar, lock, flags) \ - do { \ - spin_unlock(lock); \ - local_unlock_irqrestore(lvar, flags); \ - } while (0) - -#define get_locked_var(lvar, var) \ - (*({ \ - local_lock(lvar); \ - &__get_cpu_var(var); \ - })) - -#define put_locked_var(lvar, var) local_unlock(lvar) - -#define local_lock_cpu(lvar) \ - ({ \ - local_lock(lvar); \ - smp_processor_id(); \ - }) - -#define local_unlock_cpu(lvar) local_unlock(lvar) - -#else /* PREEMPT_RT_BASE */ - -#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar -#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar - -static inline void local_irq_lock_init(int lvar) { } - -#define local_lock(lvar) preempt_disable() -#define local_unlock(lvar) preempt_enable() -#define local_lock_irq(lvar) local_irq_disable() -#define local_unlock_irq(lvar) local_irq_enable() -#define local_lock_irqsave(lvar, flags) local_irq_save(flags) -#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags) - -#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock) -#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock) -#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock) -#define local_spin_lock_irqsave(lvar, lock, flags) \ - spin_lock_irqsave(lock, flags) -#define local_spin_unlock_irqrestore(lvar, lock, flags) \ - spin_unlock_irqrestore(lock, flags) - -#define get_locked_var(lvar, var) get_cpu_var(var) -#define put_locked_var(lvar, var) put_cpu_var(var) - -#define local_lock_cpu(lvar) get_cpu() -#define local_unlock_cpu(lvar) put_cpu() - -#endif - -#endif diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h index acf4d31..4b117a3 100644 --- a/include/linux/mfd/rtsx_pci.h +++ b/include/linux/mfd/rtsx_pci.h @@ -735,7 +735,6 @@ struct rtsx_pcr { unsigned int card_inserted; unsigned int card_removed; - unsigned int card_exist; struct delayed_work carddet_work; struct delayed_work idle_work; @@ -800,7 +799,6 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk); int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card); int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card); -int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card); int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage); unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr); void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr); diff --git a/include/linux/mm.h b/include/linux/mm.h index e3b3a15..66e2f7c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1259,59 +1259,27 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a * overflow into the next struct page (as it might with DEBUG_SPINLOCK). * When freeing, reset page->mapping so free_pages_check won't complain. */ -#ifndef CONFIG_PREEMPT_RT_FULL - #define __pte_lockptr(page) &((page)->ptl) - -static inline struct page *pte_lock_init(struct page *page) -{ - spin_lock_init(__pte_lockptr(page)); - return page; -} - +#define pte_lock_init(_page) do { \ + spin_lock_init(__pte_lockptr(_page)); \ +} while (0) #define pte_lock_deinit(page) ((page)->mapping = NULL) - -#else /* !PREEMPT_RT_FULL */ - -/* - * On PREEMPT_RT_FULL the spinlock_t's are too large to embed in the - * page frame, hence it only has a pointer and we need to dynamically - * allocate the lock when we allocate PTE-pages. - * - * This is an overall win, since only a small fraction of the pages - * will be PTE pages under normal circumstances. - */ - -#define __pte_lockptr(page) ((page)->ptl) - -extern struct page *pte_lock_init(struct page *page); -extern void pte_lock_deinit(struct page *page); - -#endif /* PREEMPT_RT_FULL */ - #define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));}) #else /* !USE_SPLIT_PTLOCKS */ /* * We use mm->page_table_lock to guard all pagetable pages of the mm. */ -static inline struct page *pte_lock_init(struct page *page) { return page; } +#define pte_lock_init(page) do {} while (0) #define pte_lock_deinit(page) do {} while (0) #define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;}) #endif /* USE_SPLIT_PTLOCKS */ -static inline struct page *__pgtable_page_ctor(struct page *page) +static inline void pgtable_page_ctor(struct page *page) { - page = pte_lock_init(page); - if (page) - inc_zone_page_state(page, NR_PAGETABLE); - return page; + pte_lock_init(page); + inc_zone_page_state(page, NR_PAGETABLE); } -#define pgtable_page_ctor(page) \ -do { \ - page = __pgtable_page_ctor(page); \ -} while (0) - static inline void pgtable_page_dtor(struct page *page) { pte_lock_deinit(page); @@ -1655,8 +1623,6 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); -int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); - struct page *follow_page(struct vm_area_struct *, unsigned long address, unsigned int foll_flags); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 6270199..f8f5162 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -142,11 +141,7 @@ struct page { * system if PG_buddy is set. */ #if USE_SPLIT_PTLOCKS -# ifndef CONFIG_PREEMPT_RT_FULL spinlock_t ptl; -# else - spinlock_t *ptl; -# endif #endif struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */ struct page *first_page; /* Compound tail pages */ @@ -441,9 +436,6 @@ struct mm_struct { int first_nid; #endif struct uprobes_state uprobes_state; -#ifdef CONFIG_PREEMPT_RT_BASE - struct rcu_head delayed_drop; -#endif }; /* first nid will either be a valid NID or one of these values */ diff --git a/include/linux/mount.h b/include/linux/mount.h index 73005f9..d7029f4 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -47,8 +47,6 @@ struct mnt_namespace; #define MNT_INTERNAL 0x4000 -#define MNT_LOCK_READONLY 0x400000 - struct vfsmount { struct dentry *mnt_root; /* root of the mounted tree */ struct super_block *mnt_sb; /* pointer to superblock */ diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index ef52d9c..7ccb3c5 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -187,13 +187,6 @@ typedef enum { * This happens with the Renesas AG-AND chips, possibly others. */ #define BBT_AUTO_REFRESH 0x00000080 -/* - * Chip requires ready check on read (for auto-incremented sequential read). - * True only for small page devices; large page devices do not support - * autoincrement. - */ -#define NAND_NEED_READRDY 0x00000100 - /* Chip does not allow subpage writes */ #define NAND_NO_SUBPAGE_WRITE 0x00000200 diff --git a/include/linux/mutex.h b/include/linux/mutex.h index bdf1da2..9121595 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -17,17 +17,6 @@ #include -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ - , .dep_map = { .name = #lockname } -#else -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) -#endif - -#ifdef CONFIG_PREEMPT_RT_FULL -# include -#else - /* * Simple, straightforward mutexes with strict semantics: * @@ -106,6 +95,13 @@ do { \ static inline void mutex_destroy(struct mutex *lock) {} #endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ + , .dep_map = { .name = #lockname } +#else +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +#endif + #define __MUTEX_INITIALIZER(lockname) \ { .count = ATOMIC_INIT(1) \ , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ @@ -171,9 +167,6 @@ extern int __must_check mutex_lock_killable(struct mutex *lock); */ extern int mutex_trylock(struct mutex *lock); extern void mutex_unlock(struct mutex *lock); - -#endif /* !PREEMPT_RT_FULL */ - extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); #ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX diff --git a/include/linux/mutex_rt.h b/include/linux/mutex_rt.h deleted file mode 100644 index c38a44b..0000000 --- a/include/linux/mutex_rt.h +++ /dev/null @@ -1,84 +0,0 @@ -#ifndef __LINUX_MUTEX_RT_H -#define __LINUX_MUTEX_RT_H - -#ifndef __LINUX_MUTEX_H -#error "Please include mutex.h" -#endif - -#include - -/* FIXME: Just for __lockfunc */ -#include - -struct mutex { - struct rt_mutex lock; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -}; - -#define __MUTEX_INITIALIZER(mutexname) \ - { \ - .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \ - __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ - } - -#define DEFINE_MUTEX(mutexname) \ - struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) - -extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key); -extern void __lockfunc _mutex_lock(struct mutex *lock); -extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock); -extern int __lockfunc _mutex_lock_killable(struct mutex *lock); -extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass); -extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); -extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass); -extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass); -extern int __lockfunc _mutex_trylock(struct mutex *lock); -extern void __lockfunc _mutex_unlock(struct mutex *lock); - -#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock) -#define mutex_lock(l) _mutex_lock(l) -#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l) -#define mutex_lock_killable(l) _mutex_lock_killable(l) -#define mutex_trylock(l) _mutex_trylock(l) -#define mutex_unlock(l) _mutex_unlock(l) -#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock) - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s) -# define mutex_lock_interruptible_nested(l, s) \ - _mutex_lock_interruptible_nested(l, s) -# define mutex_lock_killable_nested(l, s) \ - _mutex_lock_killable_nested(l, s) - -# define mutex_lock_nest_lock(lock, nest_lock) \ -do { \ - typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ - _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ -} while (0) - -#else -# define mutex_lock_nested(l, s) _mutex_lock(l) -# define mutex_lock_interruptible_nested(l, s) \ - _mutex_lock_interruptible(l) -# define mutex_lock_killable_nested(l, s) \ - _mutex_lock_killable(l) -# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) -#endif - -# define mutex_init(mutex) \ -do { \ - static struct lock_class_key __key; \ - \ - rt_mutex_init(&(mutex)->lock); \ - __mutex_do_init((mutex), #mutex, &__key); \ -} while (0) - -# define __mutex_init(mutex, name, key) \ -do { \ - rt_mutex_init(&(mutex)->lock); \ - __mutex_do_init((mutex), name, key); \ -} while (0) - -#endif diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 0b58fd6..9ef07d0 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -208,9 +208,9 @@ struct netdev_hw_addr { #define NETDEV_HW_ADDR_T_SLAVE 3 #define NETDEV_HW_ADDR_T_UNICAST 4 #define NETDEV_HW_ADDR_T_MULTICAST 5 + bool synced; bool global_use; int refcount; - int synced; struct rcu_head rcu_head; }; @@ -1579,7 +1579,7 @@ extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); extern rwlock_t dev_base_lock; /* Device list lock */ -extern struct mutex devnet_rename_mutex; +extern seqcount_t devnet_rename_seq; /* Device rename seq */ #define for_each_netdev(net, d) \ @@ -1783,7 +1783,6 @@ struct softnet_data { unsigned int dropped; struct sk_buff_head input_pkt_queue; struct napi_struct backlog; - struct sk_buff_head tofree_queue; }; static inline void input_queue_head_incr(struct softnet_data *sd) diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 7d083af..dd49566 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -3,7 +3,6 @@ #include -#include #include /** @@ -285,8 +284,6 @@ extern void xt_free_table_info(struct xt_table_info *info); */ DECLARE_PER_CPU(seqcount_t, xt_recseq); -DECLARE_LOCAL_IRQ_LOCK(xt_write_lock); - /** * xt_write_recseq_begin - start of a write section * @@ -301,9 +298,6 @@ static inline unsigned int xt_write_recseq_begin(void) { unsigned int addend; - /* RT protection */ - local_lock(xt_write_lock); - /* * Low order bit of sequence is set if we already * called xt_write_recseq_begin(). @@ -334,7 +328,6 @@ static inline void xt_write_recseq_end(unsigned int addend) /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */ smp_wmb(); __this_cpu_add(xt_recseq.sequence, addend); - local_unlock(xt_write_lock); } /* diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 6bfd703..d65746e 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -42,7 +42,9 @@ * in srcu_notifier_call_chain(): no cache bounces and no memory barriers. * As compensation, srcu_notifier_chain_unregister() is rather expensive. * SRCU notifier chains should be used when the chain will be called very - * often but notifier_blocks will seldom be removed. + * often but notifier_blocks will seldom be removed. Also, SRCU notifier + * chains are slightly more difficult to use because they require special + * runtime initialization. */ struct notifier_block { @@ -83,7 +85,7 @@ struct srcu_notifier_head { (name)->head = NULL; \ } while (0) -/* srcu_notifier_heads must be cleaned up dynamically */ +/* srcu_notifier_heads must be initialized and cleaned up dynamically */ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); #define srcu_cleanup_notifier_head(name) \ cleanup_srcu_struct(&(name)->srcu); @@ -96,13 +98,7 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); .head = NULL } #define RAW_NOTIFIER_INIT(name) { \ .head = NULL } - -#define SRCU_NOTIFIER_INIT(name, pcpu) \ - { \ - .mutex = __MUTEX_INITIALIZER(name.mutex), \ - .head = NULL, \ - .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \ - } +/* srcu_notifier_heads cannot be initialized statically */ #define ATOMIC_NOTIFIER_HEAD(name) \ struct atomic_notifier_head name = \ @@ -114,18 +110,6 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); struct raw_notifier_head name = \ RAW_NOTIFIER_INIT(name) -#define _SRCU_NOTIFIER_HEAD(name, mod) \ - static DEFINE_PER_CPU(struct srcu_struct_array, \ - name##_head_srcu_array); \ - mod struct srcu_notifier_head name = \ - SRCU_NOTIFIER_INIT(name, name##_head_srcu_array) - -#define SRCU_NOTIFIER_HEAD(name) \ - _SRCU_NOTIFIER_HEAD(name, ) - -#define SRCU_NOTIFIER_HEAD_STATIC(name) \ - _SRCU_NOTIFIER_HEAD(name, static) - #ifdef __KERNEL__ extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh, diff --git a/include/linux/of.h b/include/linux/of.h index bb35c42..5ebcc5c 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -92,7 +92,7 @@ static inline void of_node_put(struct device_node *node) { } extern struct device_node *of_allnodes; extern struct device_node *of_chosen; extern struct device_node *of_aliases; -extern raw_spinlock_t devtree_lock; +extern rwlock_t devtree_lock; static inline bool of_have_populated_dt(void) { diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index ca67e80..777a524 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h @@ -24,9 +24,6 @@ enum { */ struct page_cgroup { unsigned long flags; -#ifdef CONFIG_PREEMPT_RT_BASE - spinlock_t pcg_lock; -#endif struct mem_cgroup *mem_cgroup; }; @@ -77,20 +74,12 @@ static inline void lock_page_cgroup(struct page_cgroup *pc) * Don't take this lock in IRQ context. * This lock is for pc->mem_cgroup, USED, MIGRATION */ -#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_lock(PCG_LOCK, &pc->flags); -#else - spin_lock(&pc->pcg_lock); -#endif } static inline void unlock_page_cgroup(struct page_cgroup *pc) { -#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_unlock(PCG_LOCK, &pc->flags); -#else - spin_unlock(&pc->pcg_lock); -#endif } #else /* CONFIG_MEMCG */ @@ -113,10 +102,6 @@ static inline void __init page_cgroup_init_flatmem(void) { } -static inline void page_cgroup_lock_init(struct page_cgroup *pc) -{ -} - #endif /* CONFIG_MEMCG */ #include diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 12b394f..cc88172 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -48,31 +48,6 @@ preempt_enable(); \ } while (0) -#ifndef CONFIG_PREEMPT_RT_FULL -# define get_local_var(var) get_cpu_var(var) -# define put_local_var(var) put_cpu_var(var) -# define get_local_ptr(var) get_cpu_ptr(var) -# define put_local_ptr(var) put_cpu_ptr(var) -#else -# define get_local_var(var) (*({ \ - migrate_disable(); \ - &__get_cpu_var(var); })) - -# define put_local_var(var) do { \ - (void)&(var); \ - migrate_enable(); \ -} while (0) - -# define get_local_ptr(var) ({ \ - migrate_disable(); \ - this_cpu_ptr(var); }) - -# define put_local_ptr(var) do { \ - (void)(var); \ - migrate_enable(); \ -} while (0) -#endif - /* minimum unit size, also is the maximum supported allocation size */ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index a280650..6bfb2faa 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -794,12 +794,6 @@ static inline int __perf_event_disable(void *info) { return -1; } static inline void perf_event_task_tick(void) { } #endif -#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) -extern void perf_restore_debug_store(void); -#else -static inline void perf_restore_debug_store(void) { } -#endif - #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) /* diff --git a/include/linux/pid.h b/include/linux/pid.h index 3b67343..2381c97 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -2,7 +2,6 @@ #define _LINUX_PID_H #include -#include enum pid_type { diff --git a/include/linux/platform_data/cpsw.h b/include/linux/platform_data/cpsw.h index bb3cd58..24368a2 100644 --- a/include/linux/platform_data/cpsw.h +++ b/include/linux/platform_data/cpsw.h @@ -21,8 +21,6 @@ struct cpsw_slave_data { char phy_id[MII_BUS_ID_SIZE]; int phy_if; u8 mac_addr[ETH_ALEN]; - u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */ - }; struct cpsw_platform_data { @@ -30,15 +28,13 @@ struct cpsw_platform_data { u32 channels; /* number of cpdma channels (symmetric) */ u32 slaves; /* number of slave cpgmac ports */ struct cpsw_slave_data *slave_data; - u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */ + u32 cpts_active_slave; /* time stamping slave */ u32 cpts_clock_mult; /* convert input clock ticks to nanoseconds */ u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */ u32 ale_entries; /* ale table size */ u32 bd_ram_size; /*buffer descriptor ram size */ u32 rx_descs; /* Number of Rx Descriptios */ u32 mac_control; /* Mac control register */ - u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/ - bool dual_emac; /* Enable Dual EMAC mode */ }; #endif /* __CPSW_H__ */ diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h index 7db3eb9..0cc45ae 100644 --- a/include/linux/pps_kernel.h +++ b/include/linux/pps_kernel.h @@ -43,7 +43,7 @@ struct pps_source_info { int event, void *data); /* PPS echo function */ struct module *owner; - struct device *dev; /* Parent device for device_create */ + struct device *dev; }; struct pps_event_time { @@ -69,7 +69,6 @@ struct pps_device { wait_queue_head_t queue; /* PPS event queue */ unsigned int id; /* PPS source unique ID */ - void const *lookup_cookie; /* pps_lookup_dev only */ struct cdev cdev; struct device *dev; struct fasync_struct *async_queue; /* fasync method */ @@ -83,26 +82,16 @@ struct pps_device { extern struct device_attribute pps_attrs[]; /* - * Internal functions. - * - * These are not actually part of the exported API, but this is a - * convenient header file to put them in. - */ - -extern int pps_register_cdev(struct pps_device *pps); -extern void pps_unregister_cdev(struct pps_device *pps); - -/* * Exported functions */ extern struct pps_device *pps_register_source( struct pps_source_info *info, int default_params); extern void pps_unregister_source(struct pps_device *pps); +extern int pps_register_cdev(struct pps_device *pps); +extern void pps_unregister_cdev(struct pps_device *pps); extern void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event, void *data); -/* Look up a pps device by magic cookie */ -struct pps_device *pps_lookup_dev(void const *cookie); static inline void timespec_to_pps_ktime(struct pps_ktime *kt, struct timespec ts) diff --git a/include/linux/preempt.h b/include/linux/preempt.h index a7f4212..5a710b9 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -23,38 +23,15 @@ #define preempt_count() (current_thread_info()->preempt_count) -#ifdef CONFIG_PREEMPT_LAZY -#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0) -#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0) -#define inc_preempt_lazy_count() add_preempt_lazy_count(1) -#define dec_preempt_lazy_count() sub_preempt_lazy_count(1) -#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count) -#else -#define add_preempt_lazy_count(val) do { } while (0) -#define sub_preempt_lazy_count(val) do { } while (0) -#define inc_preempt_lazy_count() do { } while (0) -#define dec_preempt_lazy_count() do { } while (0) -#define preempt_lazy_count() (0) -#endif - #ifdef CONFIG_PREEMPT asmlinkage void preempt_schedule(void); -# ifdef CONFIG_PREEMPT_LAZY -#define preempt_check_resched() \ -do { \ - if (unlikely(test_thread_flag(TIF_NEED_RESCHED) || \ - test_thread_flag(TIF_NEED_RESCHED_LAZY))) \ - preempt_schedule(); \ -} while (0) -# else #define preempt_check_resched() \ do { \ - if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ + if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ preempt_schedule(); \ } while (0) -# endif #else /* !CONFIG_PREEMPT */ @@ -71,36 +48,17 @@ do { \ barrier(); \ } while (0) -#define preempt_lazy_disable() \ -do { \ - inc_preempt_lazy_count(); \ - barrier(); \ -} while (0) - #define sched_preempt_enable_no_resched() \ do { \ barrier(); \ dec_preempt_count(); \ } while (0) -#ifndef CONFIG_PREEMPT_RT_BASE -# define preempt_enable_no_resched() sched_preempt_enable_no_resched() -# define preempt_check_resched_rt() barrier() -#else -# define preempt_enable_no_resched() preempt_enable() -# define preempt_check_resched_rt() preempt_check_resched() -#endif +#define preempt_enable_no_resched() sched_preempt_enable_no_resched() #define preempt_enable() \ do { \ - sched_preempt_enable_no_resched(); \ - barrier(); \ - preempt_check_resched(); \ -} while (0) - -#define preempt_lazy_enable() \ -do { \ - dec_preempt_lazy_count(); \ + preempt_enable_no_resched(); \ barrier(); \ preempt_check_resched(); \ } while (0) @@ -135,45 +93,17 @@ do { \ #else /* !CONFIG_PREEMPT_COUNT */ -/* - * Even if we don't have any preemption, we need preempt disable/enable - * to be barriers, so that we don't have things like get_user/put_user - * that can cause faults and scheduling migrate into our preempt-protected - * region. - */ -#define preempt_disable() barrier() -#define sched_preempt_enable_no_resched() barrier() -#define preempt_enable_no_resched() barrier() -#define preempt_enable() barrier() +#define preempt_disable() do { } while (0) +#define sched_preempt_enable_no_resched() do { } while (0) +#define preempt_enable_no_resched() do { } while (0) +#define preempt_enable() do { } while (0) -#define preempt_disable_notrace() barrier() -#define preempt_enable_no_resched_notrace() barrier() -#define preempt_enable_notrace() barrier() -#define preempt_check_resched_rt() barrier() +#define preempt_disable_notrace() do { } while (0) +#define preempt_enable_no_resched_notrace() do { } while (0) +#define preempt_enable_notrace() do { } while (0) #endif /* CONFIG_PREEMPT_COUNT */ -#ifdef CONFIG_PREEMPT_RT_FULL -# define preempt_disable_rt() preempt_disable() -# define preempt_enable_rt() preempt_enable() -# define preempt_disable_nort() barrier() -# define preempt_enable_nort() barrier() -# ifdef CONFIG_SMP - extern void migrate_disable(void); - extern void migrate_enable(void); -# else /* CONFIG_SMP */ -# define migrate_disable() barrier() -# define migrate_enable() barrier() -# endif /* CONFIG_SMP */ -#else -# define preempt_disable_rt() barrier() -# define preempt_enable_rt() barrier() -# define preempt_disable_nort() preempt_disable() -# define preempt_enable_nort() preempt_enable() -# define migrate_disable() preempt_disable() -# define migrate_enable() preempt_enable() -#endif - #ifdef CONFIG_PREEMPT_NOTIFIERS struct preempt_notifier; diff --git a/include/linux/printk.h b/include/linux/printk.h index 812d102..9afc01e 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -95,16 +95,8 @@ int no_printk(const char *fmt, ...) return 0; } -#ifdef CONFIG_EARLY_PRINTK extern asmlinkage __printf(1, 2) void early_printk(const char *fmt, ...); -void early_vprintk(const char *fmt, va_list ap); -extern void printk_kill(void); -#else -static inline __printf(1, 2) __cold -void early_printk(const char *s, ...) { } -static inline void printk_kill(void) { } -#endif extern int printk_needs_cpu(int cpu); extern void printk_tick(void); @@ -140,6 +132,7 @@ extern int __printk_ratelimit(const char *func); #define printk_ratelimit() __printk_ratelimit(__func__) extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, unsigned int interval_msec); + extern int printk_delay_msec; extern int dmesg_restrict; extern int kptr_restrict; diff --git a/include/linux/pstore.h b/include/linux/pstore.h index 75d0176..1788909 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -68,18 +68,12 @@ struct pstore_info { #ifdef CONFIG_PSTORE extern int pstore_register(struct pstore_info *); -extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); #else static inline int pstore_register(struct pstore_info *psi) { return -ENODEV; } -static inline bool -pstore_cannot_block_path(enum kmsg_dump_reason reason) -{ - return false; -} #endif #endif /*_LINUX_PSTORE_H*/ diff --git a/include/linux/quota.h b/include/linux/quota.h index d133711..58fdef12 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -405,7 +405,6 @@ struct quota_module_name { #define INIT_QUOTA_MODULE_NAMES {\ {QFMT_VFS_OLD, "quota_v1"},\ {QFMT_VFS_V0, "quota_v2"},\ - {QFMT_VFS_V1, "quota_v2"},\ {0, NULL}} #endif /* _QUOTA_ */ diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 7ddfbf9..ffc444c 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -230,13 +230,7 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root, unsigned long index, unsigned long max_scan); unsigned long radix_tree_prev_hole(struct radix_tree_root *root, unsigned long index, unsigned long max_scan); - -#ifndef CONFIG_PREEMPT_RT_FULL int radix_tree_preload(gfp_t gfp_mask); -#else -static inline int radix_tree_preload(gfp_t gm) { return 0; } -#endif - void radix_tree_init(void); void *radix_tree_tag_set(struct radix_tree_root *root, unsigned long index, unsigned int tag); @@ -261,7 +255,7 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item); static inline void radix_tree_preload_end(void) { - preempt_enable_nort(); + preempt_enable(); } /** diff --git a/include/linux/random.h b/include/linux/random.h index f975382..d984608 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -12,7 +12,7 @@ extern void add_device_randomness(const void *, unsigned int); extern void add_input_randomness(unsigned int type, unsigned int code, unsigned int value); -extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip); +extern void add_interrupt_randomness(int irq, int irq_flags); extern void get_random_bytes(void *buf, int nbytes); extern void get_random_bytes_arch(void *buf, int nbytes); diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 33e1d2e..275aa3f1 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -120,9 +120,6 @@ extern void call_rcu(struct rcu_head *head, #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ -#ifdef CONFIG_PREEMPT_RT_FULL -#define call_rcu_bh call_rcu -#else /** * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. * @head: structure to be used for queueing the RCU updates. @@ -146,7 +143,6 @@ extern void call_rcu(struct rcu_head *head, */ extern void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *head)); -#endif /** * call_rcu_sched() - Queue an RCU for invocation after sched grace period. @@ -186,11 +182,6 @@ void synchronize_rcu(void); * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. */ #define rcu_preempt_depth() (current->rcu_read_lock_nesting) -#ifndef CONFIG_PREEMPT_RT_FULL -#define sched_rcu_preempt_depth() rcu_preempt_depth() -#else -static inline int sched_rcu_preempt_depth(void) { return 0; } -#endif #else /* #ifdef CONFIG_PREEMPT_RCU */ @@ -214,8 +205,6 @@ static inline int rcu_preempt_depth(void) return 0; } -#define sched_rcu_preempt_depth() rcu_preempt_depth() - #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ /* Internal to kernel */ @@ -370,14 +359,7 @@ static inline int rcu_read_lock_held(void) * rcu_read_lock_bh_held() is defined out of line to avoid #include-file * hell. */ -#ifdef CONFIG_PREEMPT_RT_FULL -static inline int rcu_read_lock_bh_held(void) -{ - return rcu_read_lock_held(); -} -#else extern int rcu_read_lock_bh_held(void); -#endif /** * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? @@ -825,14 +807,10 @@ static inline void rcu_read_unlock(void) static inline void rcu_read_lock_bh(void) { local_bh_disable(); -#ifdef CONFIG_PREEMPT_RT_FULL - rcu_read_lock(); -#else __acquire(RCU_BH); rcu_lock_acquire(&rcu_bh_lock_map); rcu_lockdep_assert(!rcu_is_cpu_idle(), "rcu_read_lock_bh() used illegally while idle"); -#endif } /* @@ -842,14 +820,10 @@ static inline void rcu_read_lock_bh(void) */ static inline void rcu_read_unlock_bh(void) { -#ifdef CONFIG_PREEMPT_RT_FULL - rcu_read_unlock(); -#else rcu_lockdep_assert(!rcu_is_cpu_idle(), "rcu_read_unlock_bh() used illegally while idle"); rcu_lock_release(&rcu_bh_lock_map); __release(RCU_BH); -#endif local_bh_enable(); } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index f1472a2..952b793 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -45,11 +45,7 @@ static inline void rcu_virt_note_context_switch(int cpu) rcu_note_context_switch(cpu); } -#ifdef CONFIG_PREEMPT_RT_FULL -# define synchronize_rcu_bh synchronize_rcu -#else extern void synchronize_rcu_bh(void); -#endif extern void synchronize_sched_expedited(void); extern void synchronize_rcu_expedited(void); @@ -77,30 +73,20 @@ static inline void synchronize_rcu_bh_expedited(void) } extern void rcu_barrier(void); -#ifdef CONFIG_PREEMPT_RT_FULL -# define rcu_barrier_bh rcu_barrier -#else extern void rcu_barrier_bh(void); -#endif extern void rcu_barrier_sched(void); extern unsigned long rcutorture_testseq; extern unsigned long rcutorture_vernum; extern long rcu_batches_completed(void); +extern long rcu_batches_completed_bh(void); extern long rcu_batches_completed_sched(void); extern void rcu_force_quiescent_state(void); +extern void rcu_bh_force_quiescent_state(void); extern void rcu_sched_force_quiescent_state(void); extern void rcu_scheduler_starting(void); extern int rcu_scheduler_active __read_mostly; -#ifndef CONFIG_PREEMPT_RT_FULL -extern void rcu_bh_force_quiescent_state(void); -extern long rcu_batches_completed_bh(void); -#else -# define rcu_bh_force_quiescent_state rcu_force_quiescent_state -# define rcu_batches_completed_bh rcu_batches_completed -#endif - #endif /* __LINUX_RCUTREE_H */ diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index 5ebd0bb..de17134 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -14,7 +14,7 @@ #include #include -#include +#include extern int max_lock_depth; /* for sysctl */ @@ -29,10 +29,9 @@ struct rt_mutex { raw_spinlock_t wait_lock; struct plist_head wait_list; struct task_struct *owner; - int save_state; #ifdef CONFIG_DEBUG_RT_MUTEXES - const char *file; - const char *name; + int save_state; + const char *name, *file; int line; void *magic; #endif @@ -57,39 +56,19 @@ struct hrtimer_sleeper; #ifdef CONFIG_DEBUG_RT_MUTEXES # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ , .name = #mutexname, .file = __FILE__, .line = __LINE__ - -# define rt_mutex_init(mutex) \ - do { \ - raw_spin_lock_init(&(mutex)->wait_lock); \ - __rt_mutex_init(mutex, #mutex); \ - } while (0) - +# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__) extern void rt_mutex_debug_task_free(struct task_struct *tsk); #else # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) - -# define rt_mutex_init(mutex) \ - do { \ - raw_spin_lock_init(&(mutex)->wait_lock); \ - __rt_mutex_init(mutex, #mutex); \ - } while (0) - +# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL) # define rt_mutex_debug_task_free(t) do { } while (0) #endif -#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ - .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ +#define __RT_MUTEX_INITIALIZER(mutexname) \ + { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list) \ , .owner = NULL \ - __DEBUG_RT_MUTEX_INITIALIZER(mutexname) - - -#define __RT_MUTEX_INITIALIZER(mutexname) \ - { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) } - -#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \ - { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ - , .save_state = 1 } + __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} #define DEFINE_RT_MUTEX(mutexname) \ struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) @@ -111,7 +90,6 @@ extern void rt_mutex_destroy(struct rt_mutex *lock); extern void rt_mutex_lock(struct rt_mutex *lock); extern int rt_mutex_lock_interruptible(struct rt_mutex *lock, int detect_deadlock); -extern int rt_mutex_lock_killable(struct rt_mutex *lock, int detect_deadlock); extern int rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout, int detect_deadlock); diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h deleted file mode 100644 index 853ee36..0000000 --- a/include/linux/rwlock_rt.h +++ /dev/null @@ -1,123 +0,0 @@ -#ifndef __LINUX_RWLOCK_RT_H -#define __LINUX_RWLOCK_RT_H - -#ifndef __LINUX_SPINLOCK_H -#error Do not include directly. Use spinlock.h -#endif - -#define rwlock_init(rwl) \ -do { \ - static struct lock_class_key __key; \ - \ - rt_mutex_init(&(rwl)->lock); \ - __rt_rwlock_init(rwl, #rwl, &__key); \ -} while (0) - -extern void __lockfunc rt_write_lock(rwlock_t *rwlock); -extern void __lockfunc rt_read_lock(rwlock_t *rwlock); -extern int __lockfunc rt_write_trylock(rwlock_t *rwlock); -extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags); -extern int __lockfunc rt_read_trylock(rwlock_t *rwlock); -extern void __lockfunc rt_write_unlock(rwlock_t *rwlock); -extern void __lockfunc rt_read_unlock(rwlock_t *rwlock); -extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock); -extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock); -extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key); - -#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock)) -#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock)) - -#define write_trylock_irqsave(lock, flags) \ - __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags)) - -#define read_lock_irqsave(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - migrate_disable(); \ - flags = rt_read_lock_irqsave(lock); \ - } while (0) - -#define write_lock_irqsave(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - migrate_disable(); \ - flags = rt_write_lock_irqsave(lock); \ - } while (0) - -#define read_lock(lock) \ - do { \ - migrate_disable(); \ - rt_read_lock(lock); \ - } while (0) - -#define read_lock_bh(lock) \ - do { \ - local_bh_disable(); \ - migrate_disable(); \ - rt_read_lock(lock); \ - } while (0) - -#define read_lock_irq(lock) read_lock(lock) - -#define write_lock(lock) \ - do { \ - migrate_disable(); \ - rt_write_lock(lock); \ - } while (0) - -#define write_lock_bh(lock) \ - do { \ - local_bh_disable(); \ - migrate_disable(); \ - rt_write_lock(lock); \ - } while (0) - -#define write_lock_irq(lock) write_lock(lock) - -#define read_unlock(lock) \ - do { \ - rt_read_unlock(lock); \ - migrate_enable(); \ - } while (0) - -#define read_unlock_bh(lock) \ - do { \ - rt_read_unlock(lock); \ - migrate_enable(); \ - local_bh_enable(); \ - } while (0) - -#define read_unlock_irq(lock) read_unlock(lock) - -#define write_unlock(lock) \ - do { \ - rt_write_unlock(lock); \ - migrate_enable(); \ - } while (0) - -#define write_unlock_bh(lock) \ - do { \ - rt_write_unlock(lock); \ - migrate_enable(); \ - local_bh_enable(); \ - } while (0) - -#define write_unlock_irq(lock) write_unlock(lock) - -#define read_unlock_irqrestore(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - (void) flags; \ - rt_read_unlock(lock); \ - migrate_enable(); \ - } while (0) - -#define write_unlock_irqrestore(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - (void) flags; \ - rt_write_unlock(lock); \ - migrate_enable(); \ - } while (0) - -#endif diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h index d0da966..cc0072e 100644 --- a/include/linux/rwlock_types.h +++ b/include/linux/rwlock_types.h @@ -1,10 +1,6 @@ #ifndef __LINUX_RWLOCK_TYPES_H #define __LINUX_RWLOCK_TYPES_H -#if !defined(__LINUX_SPINLOCK_TYPES_H) -# error "Do not include directly, include spinlock_types.h" -#endif - /* * include/linux/rwlock_types.h - generic rwlock type definitions * and initializers @@ -47,7 +43,6 @@ typedef struct { RW_DEP_MAP_INIT(lockname) } #endif -#define DEFINE_RWLOCK(name) \ - rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) +#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) #endif /* __LINUX_RWLOCK_TYPES_H */ diff --git a/include/linux/rwlock_types_rt.h b/include/linux/rwlock_types_rt.h deleted file mode 100644 index b138321..0000000 --- a/include/linux/rwlock_types_rt.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef __LINUX_RWLOCK_TYPES_RT_H -#define __LINUX_RWLOCK_TYPES_RT_H - -#ifndef __LINUX_SPINLOCK_TYPES_H -#error "Do not include directly. Include spinlock_types.h instead" -#endif - -/* - * rwlocks - rtmutex which allows single reader recursion - */ -typedef struct { - struct rt_mutex lock; - int read_depth; - unsigned int break_lock; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -} rwlock_t; - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } -#else -# define RW_DEP_MAP_INIT(lockname) -#endif - -#define __RW_LOCK_UNLOCKED(name) \ - { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \ - RW_DEP_MAP_INIT(name) } - -#define DEFINE_RWLOCK(name) \ - rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) - -#endif diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index f994bd3..8da67d6 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -16,10 +16,6 @@ #include -#ifdef CONFIG_PREEMPT_RT_FULL -#include -#else /* PREEMPT_RT_FULL */ - struct rw_semaphore; #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK @@ -143,6 +139,4 @@ do { \ # define down_write_nested(sem, subclass) down_write(sem) #endif -#endif /* !PREEMPT_RT_FULL */ - #endif /* _LINUX_RWSEM_H */ diff --git a/include/linux/rwsem_rt.h b/include/linux/rwsem_rt.h deleted file mode 100644 index e94d945..0000000 --- a/include/linux/rwsem_rt.h +++ /dev/null @@ -1,128 +0,0 @@ -#ifndef _LINUX_RWSEM_RT_H -#define _LINUX_RWSEM_RT_H - -#ifndef _LINUX_RWSEM_H -#error "Include rwsem.h" -#endif - -/* - * RW-semaphores are a spinlock plus a reader-depth count. - * - * Note that the semantics are different from the usual - * Linux rw-sems, in PREEMPT_RT mode we do not allow - * multiple readers to hold the lock at once, we only allow - * a read-lock owner to read-lock recursively. This is - * better for latency, makes the implementation inherently - * fair and makes it simpler as well. - */ - -#include - -struct rw_semaphore { - struct rt_mutex lock; - int read_depth; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -}; - -#define __RWSEM_INITIALIZER(name) \ - { .lock = __RT_MUTEX_INITIALIZER(name.lock), \ - RW_DEP_MAP_INIT(name) } - -#define DECLARE_RWSEM(lockname) \ - struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) - -extern void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name, - struct lock_class_key *key); - -#define __rt_init_rwsem(sem, name, key) \ - do { \ - rt_mutex_init(&(sem)->lock); \ - __rt_rwsem_init((sem), (name), (key));\ - } while (0) - -#define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key) - -# define rt_init_rwsem(sem) \ -do { \ - static struct lock_class_key __key; \ - \ - __rt_init_rwsem((sem), #sem, &__key); \ -} while (0) - -extern void rt_down_write(struct rw_semaphore *rwsem); -extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass); -extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass); -extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem, - struct lockdep_map *nest); -extern void rt_down_read(struct rw_semaphore *rwsem); -extern int rt_down_write_trylock(struct rw_semaphore *rwsem); -extern int rt_down_read_trylock(struct rw_semaphore *rwsem); -extern void rt_up_read(struct rw_semaphore *rwsem); -extern void rt_up_write(struct rw_semaphore *rwsem); -extern void rt_downgrade_write(struct rw_semaphore *rwsem); - -#define init_rwsem(sem) rt_init_rwsem(sem) -#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock) - -static inline void down_read(struct rw_semaphore *sem) -{ - rt_down_read(sem); -} - -static inline int down_read_trylock(struct rw_semaphore *sem) -{ - return rt_down_read_trylock(sem); -} - -static inline void down_write(struct rw_semaphore *sem) -{ - rt_down_write(sem); -} - -static inline int down_write_trylock(struct rw_semaphore *sem) -{ - return rt_down_write_trylock(sem); -} - -static inline void up_read(struct rw_semaphore *sem) -{ - rt_up_read(sem); -} - -static inline void up_write(struct rw_semaphore *sem) -{ - rt_up_write(sem); -} - -static inline void downgrade_write(struct rw_semaphore *sem) -{ - rt_downgrade_write(sem); -} - -static inline void down_read_nested(struct rw_semaphore *sem, int subclass) -{ - return rt_down_read_nested(sem, subclass); -} - -static inline void down_write_nested(struct rw_semaphore *sem, int subclass) -{ - rt_down_write_nested(sem, subclass); -} -#ifdef CONFIG_DEBUG_LOCK_ALLOC -static inline void down_write_nest_lock(struct rw_semaphore *sem, - struct rw_semaphore *nest_lock) -{ - rt_down_write_nested_lock(sem, &nest_lock->dep_map); -} - -#else - -static inline void down_write_nest_lock(struct rw_semaphore *sem, - struct rw_semaphore *nest_lock) -{ - rt_down_write_nested_lock(sem, NULL); -} -#endif -#endif diff --git a/include/linux/sched.h b/include/linux/sched.h index bcaa53f..d2112477 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -23,7 +23,6 @@ struct sched_param { #include #include -#include #include #include #include @@ -52,7 +51,6 @@ struct sched_param { #include #include #include -#include #include @@ -165,10 +163,9 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) #define TASK_DEAD 64 #define TASK_WAKEKILL 128 #define TASK_WAKING 256 -#define TASK_PARKED 512 -#define TASK_STATE_MAX 1024 +#define TASK_STATE_MAX 512 -#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP" +#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW" extern char ___assert_task_state[1 - 2*!!( sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; @@ -1064,7 +1061,6 @@ struct sched_domain; #define WF_SYNC 0x01 /* waker goes to sleep after wakup */ #define WF_FORK 0x02 /* child wakeup after fork */ #define WF_MIGRATED 0x04 /* internal use, task got migrated */ -#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */ #define ENQUEUE_WAKEUP 1 #define ENQUEUE_HEAD 2 @@ -1241,7 +1237,6 @@ enum perf_event_task_context { struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ - volatile long saved_state; /* saved state for "spinlock sleepers" */ void *stack; atomic_t usage; unsigned int flags; /* per process flags, defined below */ @@ -1281,12 +1276,6 @@ struct task_struct { #endif unsigned int policy; -#ifdef CONFIG_PREEMPT_RT_FULL - int migrate_disable; -# ifdef CONFIG_SCHED_DEBUG - int migrate_disable_atomic; -# endif -#endif int nr_cpus_allowed; cpumask_t cpus_allowed; @@ -1387,9 +1376,6 @@ struct task_struct { struct task_cputime cputime_expires; struct list_head cpu_timers[3]; -#ifdef CONFIG_PREEMPT_RT_BASE - struct task_struct *posix_timer_list; -#endif /* process credentials */ const struct cred __rcu *real_cred; /* objective and real subjective task @@ -1421,15 +1407,10 @@ struct task_struct { /* signal handlers */ struct signal_struct *signal; struct sighand_struct *sighand; - struct sigqueue *sigqueue_cache; sigset_t blocked, real_blocked; sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ struct sigpending pending; -#ifdef CONFIG_PREEMPT_RT_FULL - /* TODO: move me into ->restart_block ? */ - struct siginfo forced_info; -#endif unsigned long sas_ss_sp; size_t sas_ss_size; @@ -1466,9 +1447,6 @@ struct task_struct { /* mutex deadlock detection */ struct mutex_waiter *blocked_on; #endif -#ifdef CONFIG_PREEMPT_RT_FULL - int pagefault_disabled; -#endif #ifdef CONFIG_TRACE_IRQFLAGS unsigned int irq_events; unsigned long hardirq_enable_ip; @@ -1611,12 +1589,6 @@ struct task_struct { unsigned long trace; /* bitmask and counter of trace recursion */ unsigned long trace_recursion; -#ifdef CONFIG_WAKEUP_LATENCY_HIST - u64 preempt_timestamp_hist; -#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST - long timer_offset; -#endif -#endif #endif /* CONFIG_TRACING */ #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */ struct memcg_batch_info { @@ -1633,22 +1605,11 @@ struct task_struct { #ifdef CONFIG_UPROBES struct uprobe_task *utask; #endif -#ifdef CONFIG_PREEMPT_RT_BASE - struct rcu_head put_rcu; - int softirq_nestcnt; - unsigned int softirqs_raised; -#endif -#ifdef CONFIG_PREEMPT_RT_FULL -# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32 - int kmap_idx; - pte_t kmap_pte[KM_TYPE_NR]; -# endif -#endif -#ifdef CONFIG_DEBUG_PREEMPT - unsigned long preempt_disable_ip; -#endif }; +/* Future-safe accessor for struct task_struct's cpus_allowed. */ +#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) + #ifdef CONFIG_NUMA_BALANCING extern void task_numa_fault(int node, int pages, bool migrated); extern void set_numabalancing_state(bool enabled); @@ -1661,17 +1622,6 @@ static inline void set_numabalancing_state(bool enabled) } #endif -#ifdef CONFIG_PREEMPT_RT_FULL -static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; } -#else -static inline bool cur_pf_disabled(void) { return false; } -#endif - -static inline bool pagefault_disabled(void) -{ - return in_atomic() || cur_pf_disabled(); -} - /* * Priority of a process goes from 0..MAX_PRIO-1, valid RT * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH @@ -1834,15 +1784,6 @@ extern struct pid *cad_pid; extern void free_task(struct task_struct *tsk); #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) -#ifdef CONFIG_PREEMPT_RT_BASE -extern void __put_task_struct_cb(struct rcu_head *rhp); - -static inline void put_task_struct(struct task_struct *t) -{ - if (atomic_dec_and_test(&t->usage)) - call_rcu(&t->put_rcu, __put_task_struct_cb); -} -#else extern void __put_task_struct(struct task_struct *t); static inline void put_task_struct(struct task_struct *t) @@ -1850,7 +1791,6 @@ static inline void put_task_struct(struct task_struct *t) if (atomic_dec_and_test(&t->usage)) __put_task_struct(t); } -#endif extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); @@ -1858,7 +1798,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, /* * Per process flags */ -#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */ #define PF_EXITING 0x00000004 /* getting shut down */ #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ @@ -1876,7 +1815,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, #define PF_FROZEN 0x00010000 /* frozen for system suspend */ #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ #define PF_KSWAPD 0x00040000 /* I am kswapd */ -#define PF_STOMPER 0x00080000 /* I am a stomp machine thread */ #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ @@ -1983,10 +1921,6 @@ extern void do_set_cpus_allowed(struct task_struct *p, extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); -int migrate_me(void); -void tell_sched_cpu_down_begin(int cpu); -void tell_sched_cpu_down_done(int cpu); - #else static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) @@ -1999,9 +1933,6 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, return -EINVAL; return 0; } -static inline int migrate_me(void) { return 0; } -static inline void tell_sched_cpu_down_begin(int cpu) { } -static inline void tell_sched_cpu_down_done(int cpu) { } #endif #ifdef CONFIG_NO_HZ @@ -2176,7 +2107,6 @@ extern unsigned int sysctl_sched_cfs_bandwidth_slice; #ifdef CONFIG_RT_MUTEXES extern int rt_mutex_getprio(struct task_struct *p); extern void rt_mutex_setprio(struct task_struct *p, int prio); -extern int rt_mutex_check_prio(struct task_struct *task, int newprio); extern void rt_mutex_adjust_pi(struct task_struct *p); static inline bool tsk_is_pi_blocked(struct task_struct *tsk) { @@ -2187,10 +2117,6 @@ static inline int rt_mutex_getprio(struct task_struct *p) { return p->normal_prio; } -static inline int rt_mutex_check_prio(struct task_struct *task, int newprio) -{ - return 0; -} # define rt_mutex_adjust_pi(p) do { } while (0) static inline bool tsk_is_pi_blocked(struct task_struct *tsk) { @@ -2282,7 +2208,6 @@ extern void xtime_update(unsigned long ticks); extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); -extern int wake_up_lock_sleeper(struct task_struct * tsk); extern void wake_up_new_task(struct task_struct *tsk); #ifdef CONFIG_SMP extern void kick_process(struct task_struct *tsk); @@ -2387,24 +2312,12 @@ extern struct mm_struct * mm_alloc(void); /* mmdrop drops the mm and the page tables */ extern void __mmdrop(struct mm_struct *); - static inline void mmdrop(struct mm_struct * mm) { if (unlikely(atomic_dec_and_test(&mm->mm_count))) __mmdrop(mm); } -#ifdef CONFIG_PREEMPT_RT_BASE -extern void __mmdrop_delayed(struct rcu_head *rhp); -static inline void mmdrop_delayed(struct mm_struct *mm) -{ - if (atomic_dec_and_test(&mm->mm_count)) - call_rcu(&mm->delayed_drop, __mmdrop_delayed); -} -#else -# define mmdrop_delayed(mm) mmdrop(mm) -#endif - /* mmput gets rid of the mappings and all user-space */ extern void mmput(struct mm_struct *); /* Grab a reference to a task's mm, if it is not already going away */ @@ -2572,18 +2485,27 @@ static inline void threadgroup_change_end(struct task_struct *tsk) * * Lock the threadgroup @tsk belongs to. No new task is allowed to enter * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or - * change ->group_leader/pid. This is useful for cases where the threadgroup - * needs to stay stable across blockable operations. + * perform exec. This is useful for cases where the threadgroup needs to + * stay stable across blockable operations. * * fork and exit paths explicitly call threadgroup_change_{begin|end}() for * synchronization. While held, no new task will be added to threadgroup * and no existing live task will have its PF_EXITING set. * - * de_thread() does threadgroup_change_{begin|end}() when a non-leader - * sub-thread becomes a new leader. + * During exec, a task goes and puts its thread group through unusual + * changes. After de-threading, exclusive access is assumed to resources + * which are usually shared by tasks in the same group - e.g. sighand may + * be replaced with a new one. Also, the exec'ing task takes over group + * leader role including its pid. Exclude these changes while locked by + * grabbing cred_guard_mutex which is used to synchronize exec path. */ static inline void threadgroup_lock(struct task_struct *tsk) { + /* + * exec uses exit for de-threading nesting group_rwsem inside + * cred_guard_mutex. Grab cred_guard_mutex first. + */ + mutex_lock(&tsk->signal->cred_guard_mutex); down_write(&tsk->signal->group_rwsem); } @@ -2596,6 +2518,7 @@ static inline void threadgroup_lock(struct task_struct *tsk) static inline void threadgroup_unlock(struct task_struct *tsk) { up_write(&tsk->signal->group_rwsem); + mutex_unlock(&tsk->signal->cred_guard_mutex); } #else static inline void threadgroup_change_begin(struct task_struct *tsk) {} @@ -2687,52 +2610,6 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } -#ifdef CONFIG_PREEMPT_LAZY -static inline void set_tsk_need_resched_lazy(struct task_struct *tsk) -{ - set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); -} - -static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) -{ - clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); -} - -static inline int test_tsk_need_resched_lazy(struct task_struct *tsk) -{ - return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY)); -} - -static inline int need_resched_lazy(void) -{ - return test_thread_flag(TIF_NEED_RESCHED_LAZY); -} - -static inline int need_resched_now(void) -{ - return test_thread_flag(TIF_NEED_RESCHED); -} - -static inline int need_resched(void) -{ - return test_thread_flag(TIF_NEED_RESCHED) || - test_thread_flag(TIF_NEED_RESCHED_LAZY); -} -#else -static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { } -static inline int need_resched_lazy(void) { return 0; } - -static inline int need_resched_now(void) -{ - return test_thread_flag(TIF_NEED_RESCHED); -} - -static inline int need_resched(void) -{ - return test_thread_flag(TIF_NEED_RESCHED); -} -#endif - static inline int restart_syscall(void) { set_tsk_thread_flag(current, TIF_SIGPENDING); @@ -2764,6 +2641,11 @@ static inline int signal_pending_state(long state, struct task_struct *p) return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); } +static inline int need_resched(void) +{ + return unlikely(test_thread_flag(TIF_NEED_RESCHED)); +} + /* * cond_resched() and cond_resched_lock(): latency reduction via * explicit rescheduling in places that are safe. The return @@ -2780,7 +2662,7 @@ extern int _cond_resched(void); extern int __cond_resched_lock(spinlock_t *lock); -#if defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT_FULL) +#ifdef CONFIG_PREEMPT_COUNT #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET #else #define PREEMPT_LOCK_OFFSET 0 @@ -2791,16 +2673,12 @@ extern int __cond_resched_lock(spinlock_t *lock); __cond_resched_lock(lock); \ }) -#ifndef CONFIG_PREEMPT_RT_FULL extern int __cond_resched_softirq(void); #define cond_resched_softirq() ({ \ __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ __cond_resched_softirq(); \ }) -#else -# define cond_resched_softirq() cond_resched() -#endif /* * Does a critical section need to be broken due to another @@ -2872,26 +2750,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) #endif /* CONFIG_SMP */ -static inline int __migrate_disabled(struct task_struct *p) -{ -#ifdef CONFIG_PREEMPT_RT_FULL - return p->migrate_disable; -#else - return 0; -#endif -} - -/* Future-safe accessor for struct task_struct's cpus_allowed. */ -static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p) -{ -#ifdef CONFIG_PREEMPT_RT_FULL - if (p->migrate_disable) - return cpumask_of(task_cpu(p)); -#endif - - return &p->cpus_allowed; -} - extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); extern long sched_getaffinity(pid_t pid, struct cpumask *mask); diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 939ea1a..600060e2 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -30,12 +30,92 @@ #include #include +typedef struct { + unsigned sequence; + spinlock_t lock; +} seqlock_t; + +/* + * These macros triggered gcc-3.x compile-time problems. We think these are + * OK now. Be cautious. + */ +#define __SEQLOCK_UNLOCKED(lockname) \ + { 0, __SPIN_LOCK_UNLOCKED(lockname) } + +#define seqlock_init(x) \ + do { \ + (x)->sequence = 0; \ + spin_lock_init(&(x)->lock); \ + } while (0) + +#define DEFINE_SEQLOCK(x) \ + seqlock_t x = __SEQLOCK_UNLOCKED(x) + +/* Lock out other writers and update the count. + * Acts like a normal spin_lock/unlock. + * Don't need preempt_disable() because that is in the spin_lock already. + */ +static inline void write_seqlock(seqlock_t *sl) +{ + spin_lock(&sl->lock); + ++sl->sequence; + smp_wmb(); +} + +static inline void write_sequnlock(seqlock_t *sl) +{ + smp_wmb(); + sl->sequence++; + spin_unlock(&sl->lock); +} + +static inline int write_tryseqlock(seqlock_t *sl) +{ + int ret = spin_trylock(&sl->lock); + + if (ret) { + ++sl->sequence; + smp_wmb(); + } + return ret; +} + +/* Start of read calculation -- fetch last complete writer token */ +static __always_inline unsigned read_seqbegin(const seqlock_t *sl) +{ + unsigned ret; + +repeat: + ret = ACCESS_ONCE(sl->sequence); + if (unlikely(ret & 1)) { + cpu_relax(); + goto repeat; + } + smp_rmb(); + + return ret; +} + +/* + * Test if reader processed invalid data. + * + * If sequence value changed then writer changed data while in section. + */ +static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start) +{ + smp_rmb(); + + return unlikely(sl->sequence != start); +} + + /* * Version using sequence counter only. * This can be used when code has its own mutex protecting the * updating starting before the write_seqcountbeqin() and ending * after the write_seqcount_end(). */ + typedef struct seqcount { unsigned sequence; } seqcount_t; @@ -138,6 +218,7 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) { smp_rmb(); + return __read_seqcount_retry(s, start); } @@ -146,30 +227,18 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) * Sequence counter only version assumes that callers are using their * own mutexing. */ -static inline void __write_seqcount_begin(seqcount_t *s) +static inline void write_seqcount_begin(seqcount_t *s) { s->sequence++; smp_wmb(); } -static inline void write_seqcount_begin(seqcount_t *s) -{ - preempt_disable_rt(); - __write_seqcount_begin(s); -} - -static inline void __write_seqcount_end(seqcount_t *s) +static inline void write_seqcount_end(seqcount_t *s) { smp_wmb(); s->sequence++; } -static inline void write_seqcount_end(seqcount_t *s) -{ - __write_seqcount_end(s); - preempt_enable_rt(); -} - /** * write_seqcount_barrier - invalidate in-progress read-side seq operations * @s: pointer to seqcount_t @@ -183,124 +252,31 @@ static inline void write_seqcount_barrier(seqcount_t *s) s->sequence+=2; } -typedef struct { - struct seqcount seqcount; - spinlock_t lock; -} seqlock_t; - -/* - * These macros triggered gcc-3.x compile-time problems. We think these are - * OK now. Be cautious. - */ -#define __SEQLOCK_UNLOCKED(lockname) \ - { \ - .seqcount = SEQCNT_ZERO, \ - .lock = __SPIN_LOCK_UNLOCKED(lockname) \ - } - -#define seqlock_init(x) \ - do { \ - seqcount_init(&(x)->seqcount); \ - spin_lock_init(&(x)->lock); \ - } while (0) - -#define DEFINE_SEQLOCK(x) \ - seqlock_t x = __SEQLOCK_UNLOCKED(x) - -/* - * Read side functions for starting and finalizing a read side section. - */ -#ifndef CONFIG_PREEMPT_RT_FULL -static inline unsigned read_seqbegin(const seqlock_t *sl) -{ - return read_seqcount_begin(&sl->seqcount); -} -#else -/* - * Starvation safe read side for RT - */ -static inline unsigned read_seqbegin(seqlock_t *sl) -{ - unsigned ret; - -repeat: - ret = ACCESS_ONCE(sl->seqcount.sequence); - if (unlikely(ret & 1)) { - /* - * Take the lock and let the writer proceed (i.e. evtl - * boost it), otherwise we could loop here forever. - */ - spin_lock(&sl->lock); - spin_unlock(&sl->lock); - goto repeat; - } - return ret; -} -#endif - -static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) -{ - return read_seqcount_retry(&sl->seqcount, start); -} - /* - * Lock out other writers and update the count. - * Acts like a normal spin_lock/unlock. - * Don't need preempt_disable() because that is in the spin_lock already. + * Possible sw/hw IRQ protected versions of the interfaces. */ -static inline void write_seqlock(seqlock_t *sl) -{ - spin_lock(&sl->lock); - __write_seqcount_begin(&sl->seqcount); -} - -static inline void write_sequnlock(seqlock_t *sl) -{ - __write_seqcount_end(&sl->seqcount); - spin_unlock(&sl->lock); -} - -static inline void write_seqlock_bh(seqlock_t *sl) -{ - spin_lock_bh(&sl->lock); - __write_seqcount_begin(&sl->seqcount); -} - -static inline void write_sequnlock_bh(seqlock_t *sl) -{ - __write_seqcount_end(&sl->seqcount); - spin_unlock_bh(&sl->lock); -} - -static inline void write_seqlock_irq(seqlock_t *sl) -{ - spin_lock_irq(&sl->lock); - __write_seqcount_begin(&sl->seqcount); -} - -static inline void write_sequnlock_irq(seqlock_t *sl) -{ - __write_seqcount_end(&sl->seqcount); - spin_unlock_irq(&sl->lock); -} - -static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) -{ - unsigned long flags; - - spin_lock_irqsave(&sl->lock, flags); - __write_seqcount_begin(&sl->seqcount); - return flags; -} - #define write_seqlock_irqsave(lock, flags) \ - do { flags = __write_seqlock_irqsave(lock); } while (0) - -static inline void -write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) -{ - __write_seqcount_end(&sl->seqcount); - spin_unlock_irqrestore(&sl->lock, flags); -} + do { local_irq_save(flags); write_seqlock(lock); } while (0) +#define write_seqlock_irq(lock) \ + do { local_irq_disable(); write_seqlock(lock); } while (0) +#define write_seqlock_bh(lock) \ + do { local_bh_disable(); write_seqlock(lock); } while (0) + +#define write_sequnlock_irqrestore(lock, flags) \ + do { write_sequnlock(lock); local_irq_restore(flags); } while(0) +#define write_sequnlock_irq(lock) \ + do { write_sequnlock(lock); local_irq_enable(); } while(0) +#define write_sequnlock_bh(lock) \ + do { write_sequnlock(lock); local_bh_enable(); } while(0) + +#define read_seqbegin_irqsave(lock, flags) \ + ({ local_irq_save(flags); read_seqbegin(lock); }) + +#define read_seqretry_irqrestore(lock, iv, flags) \ + ({ \ + int ret = read_seqretry(lock, iv); \ + local_irq_restore(flags); \ + ret; \ + }) #endif /* __LINUX_SEQLOCK_H */ diff --git a/include/linux/signal.h b/include/linux/signal.h index 8942895..0a89ffc 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -226,7 +226,6 @@ static inline void init_sigpending(struct sigpending *sig) } extern void flush_sigqueue(struct sigpending *queue); -extern void flush_task_sigqueue(struct task_struct *tsk); /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ static inline int valid_signal(unsigned long sig) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 3da99c9b..320e976 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -132,7 +132,6 @@ struct sk_buff_head { __u32 qlen; spinlock_t lock; - raw_spinlock_t raw_lock; }; struct sk_buff; @@ -492,7 +491,7 @@ struct sk_buff { union { __u32 mark; __u32 dropcount; - __u32 reserved_tailroom; + __u32 avail_size; }; sk_buff_data_t inner_transport_header; @@ -1009,12 +1008,6 @@ static inline void skb_queue_head_init(struct sk_buff_head *list) __skb_queue_head_init(list); } -static inline void skb_queue_head_init_raw(struct sk_buff_head *list) -{ - raw_spin_lock_init(&list->raw_lock); - __skb_queue_head_init(list); -} - static inline void skb_queue_head_init_class(struct sk_buff_head *list, struct lock_class_key *class) { @@ -1276,13 +1269,11 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, * do not lose pfmemalloc information as the pages would not be * allocated using __GFP_MEMALLOC. */ + if (page->pfmemalloc && !page->mapping) + skb->pfmemalloc = true; frag->page.p = page; frag->page_offset = off; skb_frag_size_set(frag, size); - - page = compound_head(page); - if (page->pfmemalloc && !page->mapping) - skb->pfmemalloc = true; } /** @@ -1437,10 +1428,7 @@ static inline int skb_tailroom(const struct sk_buff *skb) */ static inline int skb_availroom(const struct sk_buff *skb) { - if (skb_is_nonlinear(skb)) - return 0; - - return skb->end - skb->tail - skb->reserved_tailroom; + return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len; } /** @@ -2604,13 +2592,6 @@ static inline void nf_reset(struct sk_buff *skb) #endif } -static inline void nf_reset_trace(struct sk_buff *skb) -{ -#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) - skb->nf_trace = 0; -#endif -} - /* Note: This doesn't put any conntrack and bridge info in dst. */ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) { diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index a58ad34..9db4825 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -54,7 +54,7 @@ struct kmem_cache_cpu { }; struct kmem_cache_node { - raw_spinlock_t list_lock; /* Protect partial list and nr_partial */ + spinlock_t list_lock; /* Protect partial list and nr_partial */ unsigned long nr_partial; struct list_head partial; #ifdef CONFIG_SLUB_DEBUG diff --git a/include/linux/smp.h b/include/linux/smp.h index 9e3255b..dd6f06b 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -218,9 +218,6 @@ static inline void kick_all_cpus_sync(void) { } #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) #define put_cpu() preempt_enable() -#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) -#define put_cpu_light() migrate_enable() - /* * Callback to arch code if there's nosmp or maxcpus=0 on the * boot command line: diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 0c11a7c..7d537ce 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -254,11 +254,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) /* Include rwlock functions */ -#ifdef CONFIG_PREEMPT_RT_FULL -# include -#else -# include -#endif +#include /* * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: @@ -269,10 +265,6 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) # include #endif -#ifdef CONFIG_PREEMPT_RT_FULL -# include -#else /* PREEMPT_RT_FULL */ - /* * Map the spin_lock functions to the raw variants for PREEMPT_RT=n */ @@ -402,6 +394,4 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); #define atomic_dec_and_lock(atomic, lock) \ __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) -#endif /* !PREEMPT_RT_FULL */ - #endif /* __LINUX_SPINLOCK_H */ diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 3f68f50..51df117 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -191,8 +191,6 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) return 0; } -#ifndef CONFIG_PREEMPT_RT_FULL -# include -#endif +#include #endif /* __LINUX_SPINLOCK_API_SMP_H */ diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h deleted file mode 100644 index 0618387..0000000 --- a/include/linux/spinlock_rt.h +++ /dev/null @@ -1,168 +0,0 @@ -#ifndef __LINUX_SPINLOCK_RT_H -#define __LINUX_SPINLOCK_RT_H - -#ifndef __LINUX_SPINLOCK_H -#error Do not include directly. Use spinlock.h -#endif - -#include - -extern void -__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key); - -#define spin_lock_init(slock) \ -do { \ - static struct lock_class_key __key; \ - \ - rt_mutex_init(&(slock)->lock); \ - __rt_spin_lock_init(slock, #slock, &__key); \ -} while (0) - -extern void __lockfunc rt_spin_lock(spinlock_t *lock); -extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock); -extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass); -extern void __lockfunc rt_spin_unlock(spinlock_t *lock); -extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock); -extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags); -extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock); -extern int __lockfunc rt_spin_trylock(spinlock_t *lock); -extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock); - -/* - * lockdep-less calls, for derived types like rwlock: - * (for trylock they can use rt_mutex_trylock() directly. - */ -extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock); -extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock); - -#define spin_lock_local(lock) rt_spin_lock(lock) -#define spin_unlock_local(lock) rt_spin_unlock(lock) - -#define spin_lock(lock) \ - do { \ - migrate_disable(); \ - rt_spin_lock(lock); \ - } while (0) - -#define spin_lock_bh(lock) \ - do { \ - local_bh_disable(); \ - migrate_disable(); \ - rt_spin_lock(lock); \ - } while (0) - -#define spin_lock_irq(lock) spin_lock(lock) - -#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock)) - -#define spin_trylock(lock) \ -({ \ - int __locked; \ - migrate_disable(); \ - __locked = spin_do_trylock(lock); \ - if (!__locked) \ - migrate_enable(); \ - __locked; \ -}) - -#ifdef CONFIG_LOCKDEP -# define spin_lock_nested(lock, subclass) \ - do { \ - migrate_disable(); \ - rt_spin_lock_nested(lock, subclass); \ - } while (0) - -# define spin_lock_irqsave_nested(lock, flags, subclass) \ - do { \ - typecheck(unsigned long, flags); \ - flags = 0; \ - migrate_disable(); \ - rt_spin_lock_nested(lock, subclass); \ - } while (0) -#else -# define spin_lock_nested(lock, subclass) spin_lock(lock) - -# define spin_lock_irqsave_nested(lock, flags, subclass) \ - do { \ - typecheck(unsigned long, flags); \ - flags = 0; \ - spin_lock(lock); \ - } while (0) -#endif - -#define spin_lock_irqsave(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - flags = 0; \ - spin_lock(lock); \ - } while (0) - -static inline unsigned long spin_lock_trace_flags(spinlock_t *lock) -{ - unsigned long flags = 0; -#ifdef CONFIG_TRACE_IRQFLAGS - flags = rt_spin_lock_trace_flags(lock); -#else - spin_lock(lock); /* lock_local */ -#endif - return flags; -} - -/* FIXME: we need rt_spin_lock_nest_lock */ -#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0) - -#define spin_unlock(lock) \ - do { \ - rt_spin_unlock(lock); \ - migrate_enable(); \ - } while (0) - -#define spin_unlock_bh(lock) \ - do { \ - rt_spin_unlock(lock); \ - migrate_enable(); \ - local_bh_enable(); \ - } while (0) - -#define spin_unlock_irq(lock) spin_unlock(lock) - -#define spin_unlock_irqrestore(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - (void) flags; \ - spin_unlock(lock); \ - } while (0) - -#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock)) -#define spin_trylock_irq(lock) spin_trylock(lock) - -#define spin_trylock_irqsave(lock, flags) \ - rt_spin_trylock_irqsave(lock, &(flags)) - -#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock) - -#ifdef CONFIG_GENERIC_LOCKBREAK -# define spin_is_contended(lock) ((lock)->break_lock) -#else -# define spin_is_contended(lock) (((void)(lock), 0)) -#endif - -static inline int spin_can_lock(spinlock_t *lock) -{ - return !rt_mutex_is_locked(&lock->lock); -} - -static inline int spin_is_locked(spinlock_t *lock) -{ - return rt_mutex_is_locked(&lock->lock); -} - -static inline void assert_spin_locked(spinlock_t *lock) -{ - BUG_ON(!spin_is_locked(lock)); -} - -#define atomic_dec_and_lock(atomic, lock) \ - atomic_dec_and_spin_lock(atomic, lock) - -#endif diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 10bac71..73548eb 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -9,15 +9,80 @@ * Released under the General Public License (GPL). */ -#include +#if defined(CONFIG_SMP) +# include +#else +# include +#endif + +#include + +typedef struct raw_spinlock { + arch_spinlock_t raw_lock; +#ifdef CONFIG_GENERIC_LOCKBREAK + unsigned int break_lock; +#endif +#ifdef CONFIG_DEBUG_SPINLOCK + unsigned int magic, owner_cpu; + void *owner; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} raw_spinlock_t; + +#define SPINLOCK_MAGIC 0xdead4ead + +#define SPINLOCK_OWNER_INIT ((void *)-1L) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +#else +# define SPIN_DEP_MAP_INIT(lockname) +#endif -#ifndef CONFIG_PREEMPT_RT_FULL -# include -# include +#ifdef CONFIG_DEBUG_SPINLOCK +# define SPIN_DEBUG_INIT(lockname) \ + .magic = SPINLOCK_MAGIC, \ + .owner_cpu = -1, \ + .owner = SPINLOCK_OWNER_INIT, #else -# include -# include -# include +# define SPIN_DEBUG_INIT(lockname) #endif +#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ + { \ + .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ + SPIN_DEBUG_INIT(lockname) \ + SPIN_DEP_MAP_INIT(lockname) } + +#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ + (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) + +#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) + +typedef struct spinlock { + union { + struct raw_spinlock rlock; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) + struct { + u8 __padding[LOCK_PADSIZE]; + struct lockdep_map dep_map; + }; +#endif + }; +} spinlock_t; + +#define __SPIN_LOCK_INITIALIZER(lockname) \ + { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } + +#define __SPIN_LOCK_UNLOCKED(lockname) \ + (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) + +#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) + +#include + #endif /* __LINUX_SPINLOCK_TYPES_H */ diff --git a/include/linux/spinlock_types_nort.h b/include/linux/spinlock_types_nort.h deleted file mode 100644 index f1dac1f..0000000 --- a/include/linux/spinlock_types_nort.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef __LINUX_SPINLOCK_TYPES_NORT_H -#define __LINUX_SPINLOCK_TYPES_NORT_H - -#ifndef __LINUX_SPINLOCK_TYPES_H -#error "Do not include directly. Include spinlock_types.h instead" -#endif - -/* - * The non RT version maps spinlocks to raw_spinlocks - */ -typedef struct spinlock { - union { - struct raw_spinlock rlock; - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) - struct { - u8 __padding[LOCK_PADSIZE]; - struct lockdep_map dep_map; - }; -#endif - }; -} spinlock_t; - -#define __SPIN_LOCK_INITIALIZER(lockname) \ - { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } - -#define __SPIN_LOCK_UNLOCKED(lockname) \ - (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) - -#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) - -#endif diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h deleted file mode 100644 index edffc4d..0000000 --- a/include/linux/spinlock_types_raw.h +++ /dev/null @@ -1,56 +0,0 @@ -#ifndef __LINUX_SPINLOCK_TYPES_RAW_H -#define __LINUX_SPINLOCK_TYPES_RAW_H - -#if defined(CONFIG_SMP) -# include -#else -# include -#endif - -#include - -typedef struct raw_spinlock { - arch_spinlock_t raw_lock; -#ifdef CONFIG_GENERIC_LOCKBREAK - unsigned int break_lock; -#endif -#ifdef CONFIG_DEBUG_SPINLOCK - unsigned int magic, owner_cpu; - void *owner; -#endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -} raw_spinlock_t; - -#define SPINLOCK_MAGIC 0xdead4ead - -#define SPINLOCK_OWNER_INIT ((void *)-1L) - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } -#else -# define SPIN_DEP_MAP_INIT(lockname) -#endif - -#ifdef CONFIG_DEBUG_SPINLOCK -# define SPIN_DEBUG_INIT(lockname) \ - .magic = SPINLOCK_MAGIC, \ - .owner_cpu = -1, \ - .owner = SPINLOCK_OWNER_INIT, -#else -# define SPIN_DEBUG_INIT(lockname) -#endif - -#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ - { \ - .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ - SPIN_DEBUG_INIT(lockname) \ - SPIN_DEP_MAP_INIT(lockname) } - -#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ - (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) - -#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) - -#endif diff --git a/include/linux/spinlock_types_rt.h b/include/linux/spinlock_types_rt.h deleted file mode 100644 index 9fd4319..0000000 --- a/include/linux/spinlock_types_rt.h +++ /dev/null @@ -1,51 +0,0 @@ -#ifndef __LINUX_SPINLOCK_TYPES_RT_H -#define __LINUX_SPINLOCK_TYPES_RT_H - -#ifndef __LINUX_SPINLOCK_TYPES_H -#error "Do not include directly. Include spinlock_types.h instead" -#endif - -#include - -/* - * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field: - */ -typedef struct spinlock { - struct rt_mutex lock; - unsigned int break_lock; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -} spinlock_t; - -#ifdef CONFIG_DEBUG_RT_MUTEXES -# define __RT_SPIN_INITIALIZER(name) \ - { \ - .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ - .save_state = 1, \ - .file = __FILE__, \ - .line = __LINE__ , \ - } -#else -# define __RT_SPIN_INITIALIZER(name) \ - { \ - .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ - .save_state = 1, \ - } -#endif - -/* -.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock) -*/ - -#define __SPIN_LOCK_UNLOCKED(name) \ - { .lock = __RT_SPIN_INITIALIZER(name.lock), \ - SPIN_DEP_MAP_INIT(name) } - -#define __DEFINE_SPINLOCK(name) \ - spinlock_t name = __SPIN_LOCK_UNLOCKED(name) - -#define DEFINE_SPINLOCK(name) \ - spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name) - -#endif diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index e2369c1..a26e2fb 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -16,10 +16,7 @@ * In the debug case, 1 means unlocked, 0 means locked. (the values * are inverted, to catch initialization bugs) * - * No atomicity anywhere, we are on UP. However, we still need - * the compiler barriers, because we do not want the compiler to - * move potentially faulting instructions (notably user accesses) - * into the locked sequence, resulting in non-atomic execution. + * No atomicity anywhere, we are on UP. */ #ifdef CONFIG_DEBUG_SPINLOCK @@ -28,7 +25,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) { lock->slock = 0; - barrier(); } static inline void @@ -36,7 +32,6 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { local_irq_save(flags); lock->slock = 0; - barrier(); } static inline int arch_spin_trylock(arch_spinlock_t *lock) @@ -44,34 +39,32 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) char oldval = lock->slock; lock->slock = 0; - barrier(); return oldval > 0; } static inline void arch_spin_unlock(arch_spinlock_t *lock) { - barrier(); lock->slock = 1; } /* * Read-write spinlocks. No debug version. */ -#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) -#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) -#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) -#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; }) -#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0) -#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0) +#define arch_read_lock(lock) do { (void)(lock); } while (0) +#define arch_write_lock(lock) do { (void)(lock); } while (0) +#define arch_read_trylock(lock) ({ (void)(lock); 1; }) +#define arch_write_trylock(lock) ({ (void)(lock); 1; }) +#define arch_read_unlock(lock) do { (void)(lock); } while (0) +#define arch_write_unlock(lock) do { (void)(lock); } while (0) #else /* DEBUG_SPINLOCK */ #define arch_spin_is_locked(lock) ((void)(lock), 0) /* for sched.c and kernel_lock.c: */ -# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) -# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) -# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0) -# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; }) +# define arch_spin_lock(lock) do { (void)(lock); } while (0) +# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) +# define arch_spin_unlock(lock) do { (void)(lock); } while (0) +# define arch_spin_trylock(lock) ({ (void)(lock); 1; }) #endif /* DEBUG_SPINLOCK */ #define arch_spin_is_contended(lock) (((void)(lock), 0)) diff --git a/include/linux/srcu.h b/include/linux/srcu.h index fe9efd4..6eb691b 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -84,10 +84,10 @@ int init_srcu_struct(struct srcu_struct *sp); void process_srcu(struct work_struct *work); -#define __SRCU_STRUCT_INIT(name, pcpu_name) \ +#define __SRCU_STRUCT_INIT(name) \ { \ .completed = -300, \ - .per_cpu_ref = &pcpu_name, \ + .per_cpu_ref = &name##_srcu_array, \ .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ .running = false, \ .batch_queue = RCU_BATCH_INIT(name.batch_queue), \ @@ -102,13 +102,13 @@ void process_srcu(struct work_struct *work); * define and init a srcu struct at build time. * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it. */ -#define _DEFINE_SRCU(name, mod) \ +#define DEFINE_SRCU(name) \ static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ - mod struct srcu_struct name = \ - __SRCU_STRUCT_INIT(name, name##_srcu_array); + struct srcu_struct name = __SRCU_STRUCT_INIT(name); -#define DEFINE_SRCU(name) _DEFINE_SRCU(name, ) -#define DEFINE_STATIC_SRCU(name) _DEFINE_SRCU(name, static) +#define DEFINE_STATIC_SRCU(name) \ + static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ + static struct srcu_struct name = __SRCU_STRUCT_INIT(name); /** * call_srcu() - Queue a callback for invocation after an SRCU grace period diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h index 6fcfe99..9e492be 100644 --- a/include/linux/ssb/ssb_driver_chipcommon.h +++ b/include/linux/ssb/ssb_driver_chipcommon.h @@ -219,7 +219,6 @@ #define SSB_CHIPCO_PMU_CTL 0x0600 /* PMU control */ #define SSB_CHIPCO_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */ #define SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT 16 -#define SSB_CHIPCO_PMU_CTL_PLL_UPD 0x00000400 #define SSB_CHIPCO_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */ #define SSB_CHIPCO_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */ #define SSB_CHIPCO_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */ @@ -668,6 +667,5 @@ enum ssb_pmu_ldo_volt_id { void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc, enum ssb_pmu_ldo_volt_id id, u32 voltage); void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on); -void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid); #endif /* LINUX_SSB_CHIPCO_H_ */ diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index b15655f..14a8ff2 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -25,7 +25,6 @@ #include #include #include -#include #include /* For the /proc/sys support */ diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 90a8dfa..fe82022 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -44,7 +44,7 @@ /* Adding event notification support elements */ #define THERMAL_GENL_FAMILY_NAME "thermal_event" #define THERMAL_GENL_VERSION 0x01 -#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp" +#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_group" /* Default Thermal Governor */ #if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE) diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 2adf9c3..e1d558e 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -20,8 +20,6 @@ struct timekeeper { u32 shift; /* Number of clock cycles in one NTP interval. */ cycle_t cycle_interval; - /* Last cycle value (also stored in clock->cycle_last) */ - cycle_t cycle_last; /* Number of clock shifted nano seconds in one NTP interval. */ u64 xtime_interval; /* shifted nano seconds left over when rounding cycle_interval */ @@ -64,6 +62,8 @@ struct timekeeper { ktime_t offs_boot; /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */ struct timespec raw_time; + /* Seqlock for all timekeeper values */ + seqlock_t lock; }; static inline struct timespec tk_xtime(struct timekeeper *tk) diff --git a/include/linux/timer.h b/include/linux/timer.h index 5fcd72c..8c5a197 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -241,7 +241,7 @@ extern void add_timer(struct timer_list *timer); extern int try_to_del_timer_sync(struct timer_list *timer); -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) +#ifdef CONFIG_SMP extern int del_timer_sync(struct timer_list *timer); #else # define del_timer_sync(t) del_timer(t) diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 44b3751..5ca0951 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -6,37 +6,38 @@ /* * These routines enable/disable the pagefault handler in that - * it will not take any MM locks and go straight to the fixup table. + * it will not take any locks and go straight to the fixup table. + * + * They have great resemblance to the preempt_disable/enable calls + * and in fact they are identical; this is because currently there is + * no other way to make the pagefault handlers do this. So we do + * disable preemption but we don't necessarily care about that. */ -static inline void raw_pagefault_disable(void) +static inline void pagefault_disable(void) { inc_preempt_count(); + /* + * make sure to have issued the store before a pagefault + * can hit. + */ barrier(); } -static inline void raw_pagefault_enable(void) +static inline void pagefault_enable(void) { + /* + * make sure to issue those last loads/stores before enabling + * the pagefault handler again. + */ barrier(); dec_preempt_count(); + /* + * make sure we do.. + */ barrier(); preempt_check_resched(); } -#ifndef CONFIG_PREEMPT_RT_FULL -static inline void pagefault_disable(void) -{ - raw_pagefault_disable(); -} - -static inline void pagefault_enable(void) -{ - raw_pagefault_enable(); -} -#else -extern void pagefault_disable(void); -extern void pagefault_enable(void); -#endif - #ifndef ARCH_HAS_NOCACHE_UACCESS static inline unsigned long __copy_from_user_inatomic_nocache(void *to, @@ -76,9 +77,9 @@ static inline unsigned long __copy_from_user_nocache(void *to, mm_segment_t old_fs = get_fs(); \ \ set_fs(KERNEL_DS); \ - raw_pagefault_disable(); \ + pagefault_disable(); \ ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \ - raw_pagefault_enable(); \ + pagefault_enable(); \ set_fs(old_fs); \ ret; \ }) diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index ef03e33..4f628a6 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -26,7 +26,6 @@ #include #include -#include struct vm_area_struct; struct mm_struct; diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index cc25b70..3b8f9d4 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h @@ -127,7 +127,6 @@ struct cdc_ncm_ctx { u16 connected; }; -extern u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf); extern int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting); extern void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); extern struct sk_buff *cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign); diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 1819b59..ef9be7e 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -66,7 +66,6 @@ * port. * @flags: usb serial port flags * @write_wait: a wait_queue_head_t used by the port. - * @delta_msr_wait: modem-status-change wait queue * @work: work queue entry for the line discipline waking up. * @throttled: nonzero if the read urb is inactive to throttle the device * @throttle_req: nonzero if the tty wants to throttle us @@ -113,7 +112,6 @@ struct usb_serial_port { unsigned long flags; wait_queue_head_t write_wait; - wait_queue_head_t delta_msr_wait; struct work_struct work; char throttled; char throttle_req; diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 5209cfe..b9bd2e6 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -26,8 +26,6 @@ struct user_namespace { kuid_t owner; kgid_t group; unsigned int proc_inum; - bool may_mount_sysfs; - bool may_mount_proc; }; extern struct user_namespace init_user_ns; @@ -84,6 +82,4 @@ static inline void put_user_ns(struct user_namespace *ns) #endif -void update_mnt_policy(struct user_namespace *userns); - #endif /* _LINUX_USER_H */ diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 839806b..a13291f 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -29,9 +29,7 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states); static inline void __count_vm_event(enum vm_event_item item) { - preempt_disable_rt(); __this_cpu_inc(vm_event_states.event[item]); - preempt_enable_rt(); } static inline void count_vm_event(enum vm_event_item item) @@ -41,9 +39,7 @@ static inline void count_vm_event(enum vm_event_item item) static inline void __count_vm_events(enum vm_event_item item, long delta) { - preempt_disable_rt(); __this_cpu_add(vm_event_states.event[item], delta); - preempt_enable_rt(); } static inline void count_vm_events(enum vm_event_item item, long delta) diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index e8d6571..50ae7d0 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -47,7 +47,6 @@ int con_set_cmap(unsigned char __user *cmap); int con_get_cmap(unsigned char __user *cmap); void scrollback(struct vc_data *vc, int lines); void scrollfront(struct vc_data *vc, int lines); -void clear_buffer_attributes(struct vc_data *vc); void update_region(struct vc_data *vc, unsigned long start, int count); void redraw_screen(struct vc_data *vc, int is_switch); #define update_screen(x) redraw_screen(x, 0) @@ -131,8 +130,6 @@ void vt_event_post(unsigned int event, unsigned int old, unsigned int new); int vt_waitactive(int n); void change_console(struct vc_data *new_vc); void reset_vc(struct vc_data *vc); -extern int do_unbind_con_driver(const struct consw *csw, int first, int last, - int deflt); extern int unbind_con_driver(const struct consw *csw, int first, int last, int deflt); int vty_init(const struct file_operations *console_fops); diff --git a/include/linux/wait-simple.h b/include/linux/wait-simple.h deleted file mode 100644 index 4efba4d..0000000 --- a/include/linux/wait-simple.h +++ /dev/null @@ -1,199 +0,0 @@ -#ifndef _LINUX_WAIT_SIMPLE_H -#define _LINUX_WAIT_SIMPLE_H - -#include -#include - -#include - -struct swaiter { - struct task_struct *task; - struct list_head node; -}; - -#define DEFINE_SWAITER(name) \ - struct swaiter name = { \ - .task = current, \ - .node = LIST_HEAD_INIT((name).node), \ - } - -struct swait_head { - raw_spinlock_t lock; - struct list_head list; -}; - -#define SWAIT_HEAD_INITIALIZER(name) { \ - .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ - .list = LIST_HEAD_INIT((name).list), \ - } - -#define DEFINE_SWAIT_HEAD(name) \ - struct swait_head name = SWAIT_HEAD_INITIALIZER(name) - -extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key); - -#define init_swait_head(swh) \ - do { \ - static struct lock_class_key __key; \ - \ - __init_swait_head((swh), &__key); \ - } while (0) - -/* - * Waiter functions - */ -extern void swait_prepare_locked(struct swait_head *head, struct swaiter *w); -extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state); -extern void swait_finish_locked(struct swait_head *head, struct swaiter *w); -extern void swait_finish(struct swait_head *head, struct swaiter *w); - -/* - * Wakeup functions - */ -extern unsigned int __swait_wake(struct swait_head *head, unsigned int state, unsigned int num); -extern unsigned int __swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num); - -#define swait_wake(head) __swait_wake(head, TASK_NORMAL, 1) -#define swait_wake_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 1) -#define swait_wake_all(head) __swait_wake(head, TASK_NORMAL, 0) -#define swait_wake_all_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 0) - -/* - * Event API - */ -#define __swait_event(wq, condition) \ -do { \ - DEFINE_SWAITER(__wait); \ - \ - for (;;) { \ - swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ - if (condition) \ - break; \ - schedule(); \ - } \ - swait_finish(&wq, &__wait); \ -} while (0) - -/** - * swait_event - sleep until a condition gets true - * @wq: the waitqueue to wait on - * @condition: a C expression for the event to wait for - * - * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the - * @condition evaluates to true. The @condition is checked each time - * the waitqueue @wq is woken up. - * - * wake_up() has to be called after changing any variable that could - * change the result of the wait condition. - */ -#define swait_event(wq, condition) \ -do { \ - if (condition) \ - break; \ - __swait_event(wq, condition); \ -} while (0) - -#define __swait_event_interruptible(wq, condition, ret) \ -do { \ - DEFINE_SWAITER(__wait); \ - \ - for (;;) { \ - swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \ - if (condition) \ - break; \ - if (signal_pending(current)) { \ - ret = -ERESTARTSYS; \ - break; \ - } \ - schedule(); \ - } \ - swait_finish(&wq, &__wait); \ -} while (0) - -#define __swait_event_interruptible_timeout(wq, condition, ret) \ -do { \ - DEFINE_SWAITER(__wait); \ - \ - for (;;) { \ - swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \ - if (condition) \ - break; \ - if (signal_pending(current)) { \ - ret = -ERESTARTSYS; \ - break; \ - } \ - ret = schedule_timeout(ret); \ - if (!ret) \ - break; \ - } \ - swait_finish(&wq, &__wait); \ -} while (0) - -/** - * swait_event_interruptible - sleep until a condition gets true - * @wq: the waitqueue to wait on - * @condition: a C expression for the event to wait for - * - * The process is put to sleep (TASK_INTERRUPTIBLE) until the - * @condition evaluates to true. The @condition is checked each time - * the waitqueue @wq is woken up. - * - * wake_up() has to be called after changing any variable that could - * change the result of the wait condition. - */ -#define swait_event_interruptible(wq, condition) \ -({ \ - int __ret = 0; \ - if (!(condition)) \ - __swait_event_interruptible(wq, condition, __ret); \ - __ret; \ -}) - -#define swait_event_interruptible_timeout(wq, condition, timeout) \ -({ \ - int __ret = timeout; \ - if (!(condition)) \ - __swait_event_interruptible_timeout(wq, condition, __ret); \ - __ret; \ -}) - -#define __swait_event_timeout(wq, condition, ret) \ -do { \ - DEFINE_SWAITER(__wait); \ - \ - for (;;) { \ - swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ - if (condition) \ - break; \ - ret = schedule_timeout(ret); \ - if (!ret) \ - break; \ - } \ - swait_finish(&wq, &__wait); \ -} while (0) - -/** - * swait_event_timeout - sleep until a condition gets true or a timeout elapses - * @wq: the waitqueue to wait on - * @condition: a C expression for the event to wait for - * @timeout: timeout, in jiffies - * - * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the - * @condition evaluates to true. The @condition is checked each time - * the waitqueue @wq is woken up. - * - * wake_up() has to be called after changing any variable that could - * change the result of the wait condition. - * - * The function returns 0 if the @timeout elapsed, and the remaining - * jiffies if the condition evaluated to true before the timeout elapsed. - */ -#define swait_event_timeout(wq, condition, timeout) \ -({ \ - long __ret = timeout; \ - if (!(condition)) \ - __swait_event_timeout(wq, condition, __ret); \ - __ret; \ -}) - -#endif diff --git a/include/net/dst.h b/include/net/dst.h index 446d7b1..9a78810 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -36,9 +36,13 @@ struct dst_entry { struct net_device *dev; struct dst_ops *ops; unsigned long _metrics; - unsigned long expires; + union { + unsigned long expires; + /* point to where the dst_entry copied from */ + struct dst_entry *from; + }; struct dst_entry *path; - struct dst_entry *from; + void *__pad0; #ifdef CONFIG_XFRM struct xfrm_state *xfrm; #else @@ -392,7 +396,7 @@ static inline void dst_confirm(struct dst_entry *dst) static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, struct sk_buff *skb) { - struct hh_cache *hh; + const struct hh_cache *hh; if (dst->pending_confirm) { unsigned long now = jiffies; diff --git a/include/net/icmp.h b/include/net/icmp.h index 081439f..9ac2524 100644 --- a/include/net/icmp.h +++ b/include/net/icmp.h @@ -41,7 +41,6 @@ struct net; extern void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info); extern int icmp_rcv(struct sk_buff *skb); -extern void icmp_err(struct sk_buff *, u32 info); extern int icmp_init(void); extern void icmp_out_count(struct net *net, unsigned char type); diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index f071f50..9e34c87 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -28,16 +28,16 @@ struct inet_hashinfo; +/* I have no idea if this is a good hash for v6 or not. -DaveM */ static inline unsigned int inet6_ehashfn(struct net *net, const struct in6_addr *laddr, const u16 lport, const struct in6_addr *faddr, const __be16 fport) { - u32 ports = (((u32)lport) << 16) | (__force u32)fport; + u32 ports = (lport ^ (__force u16)fport); return jhash_3words((__force u32)laddr->s6_addr32[3], - ipv6_addr_jhash(faddr), - ports, - inet_ehash_secret + net_hash_mix(net)); + (__force u32)faddr->s6_addr32[3], + ports, inet_ehash_secret + net_hash_mix(net)); } static inline int inet6_sk_ehashfn(const struct sock *sk) diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 2ab2e43..32786a0 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -35,13 +35,6 @@ struct inet_frag_queue { #define INETFRAGS_HASHSZ 64 -/* averaged: - * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ / - * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or - * struct frag_queue)) - */ -#define INETFRAGS_MAXDEPTH 128 - struct inet_frags { struct hlist_head hash[INETFRAGS_HASHSZ]; rwlock_t lock; @@ -72,8 +65,6 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force); struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, struct inet_frags *f, void *key, unsigned int hash) __releases(&f->lock); -void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, - const char *prefix); static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) { diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 7235ae7..a4196cb 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -203,7 +203,6 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to, extern int inet_sk_rebuild_header(struct sock *sk); extern u32 inet_ehash_secret; -extern u32 ipv6_hash_secret; extern void build_ehash_secret(void); static inline unsigned int inet_ehashfn(struct net *net, diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 28d27a6..fdc48a9 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -166,35 +166,50 @@ static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst) static inline void rt6_clean_expires(struct rt6_info *rt) { + if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from) + dst_release(rt->dst.from); + rt->rt6i_flags &= ~RTF_EXPIRES; + rt->dst.from = NULL; } static inline void rt6_set_expires(struct rt6_info *rt, unsigned long expires) { - rt->dst.expires = expires; + if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from) + dst_release(rt->dst.from); + rt->rt6i_flags |= RTF_EXPIRES; + rt->dst.expires = expires; } -static inline void rt6_update_expires(struct rt6_info *rt0, int timeout) +static inline void rt6_update_expires(struct rt6_info *rt, int timeout) { - struct rt6_info *rt; - - for (rt = rt0; rt && !(rt->rt6i_flags & RTF_EXPIRES); - rt = (struct rt6_info *)rt->dst.from); - if (rt && rt != rt0) - rt0->dst.expires = rt->dst.expires; - - dst_set_expires(&rt0->dst, timeout); - rt0->rt6i_flags |= RTF_EXPIRES; + if (!(rt->rt6i_flags & RTF_EXPIRES)) { + if (rt->dst.from) + dst_release(rt->dst.from); + /* dst_set_expires relies on expires == 0 + * if it has not been set previously. + */ + rt->dst.expires = 0; + } + + dst_set_expires(&rt->dst, timeout); + rt->rt6i_flags |= RTF_EXPIRES; } static inline void rt6_set_from(struct rt6_info *rt, struct rt6_info *from) { struct dst_entry *new = (struct dst_entry *) from; + if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from) { + if (new == rt->dst.from) + return; + dst_release(rt->dst.from); + } + rt->rt6i_flags &= ~RTF_EXPIRES; - dst_hold(new); rt->dst.from = new; + dst_hold(new); } static inline void ip6_rt_put(struct rt6_info *rt) diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index e49db91..9497be1 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -152,16 +152,18 @@ struct fib_result_nl { }; #ifdef CONFIG_IP_ROUTE_MULTIPATH + #define FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel]) + +#define FIB_TABLE_HASHSZ 2 + #else /* CONFIG_IP_ROUTE_MULTIPATH */ + #define FIB_RES_NH(res) ((res).fi->fib_nh[0]) -#endif /* CONFIG_IP_ROUTE_MULTIPATH */ -#ifdef CONFIG_IP_MULTIPLE_TABLES #define FIB_TABLE_HASHSZ 256 -#else -#define FIB_TABLE_HASHSZ 2 -#endif + +#endif /* CONFIG_IP_ROUTE_MULTIPATH */ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh); diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 5a67919..5af66b2 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -15,7 +15,6 @@ #include #include -#include #include #include #include @@ -474,17 +473,6 @@ static inline u32 ipv6_addr_hash(const struct in6_addr *a) #endif } -/* more secured version of ipv6_addr_hash() */ -static inline u32 ipv6_addr_jhash(const struct in6_addr *a) -{ - u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1]; - - return jhash_3words(v, - (__force u32)a->s6_addr32[2], - (__force u32)a->s6_addr32[3], - ipv6_hash_secret); -} - static inline bool ipv6_addr_loopback(const struct in6_addr *a) { return (a->s6_addr32[0] | a->s6_addr32[1] | diff --git a/include/net/neighbour.h b/include/net/neighbour.h index f28b70c..0dab173 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -334,7 +334,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) } #endif -static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) +static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) { unsigned int seq; int hh_len; @@ -389,7 +389,7 @@ struct neighbour_cb { #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) -static inline void neigh_ha_snapshot(char *dst, struct neighbour *n, +static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, const struct net_device *dev) { unsigned int seq; diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 17e815d..2ae2b83 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -56,7 +56,6 @@ struct netns_ipv4 { int sysctl_icmp_echo_ignore_all; int sysctl_icmp_echo_ignore_broadcasts; - int sysctl_icmp_echo_sysrq; int sysctl_icmp_ignore_bogus_error_responses; int sysctl_icmp_ratelimit; int sysctl_icmp_ratemask; diff --git a/include/net/scm.h b/include/net/scm.h index b117081..975cca0 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -56,8 +56,8 @@ static __inline__ void scm_set_cred(struct scm_cookie *scm, scm->pid = get_pid(pid); scm->cred = cred ? get_cred(cred) : NULL; scm->creds.pid = pid_vnr(pid); - scm->creds.uid = cred ? cred->uid : INVALID_UID; - scm->creds.gid = cred ? cred->gid : INVALID_GID; + scm->creds.uid = cred ? cred->euid : INVALID_UID; + scm->creds.gid = cred ? cred->egid : INVALID_GID; } static __inline__ void scm_destroy_cred(struct scm_cookie *scm) diff --git a/include/net/sock.h b/include/net/sock.h index 25afaa0..182ca99 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1037,7 +1037,7 @@ static inline void sk_refcnt_debug_dec(struct sock *sk) sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); } -static inline void sk_refcnt_debug_release(const struct sock *sk) +inline void sk_refcnt_debug_release(const struct sock *sk) { if (atomic_read(&sk->sk_refcnt) != 1) printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n", diff --git a/include/net/tcp.h b/include/net/tcp.h index 4da2167..aed42c7 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1045,7 +1045,6 @@ static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) if (sysctl_tcp_low_latency || !tp->ucopy.task) return false; - skb_dst_force(skb); __skb_queue_tail(&tp->ucopy.prequeue, skb); tp->ucopy.memory += skb->truesize; if (tp->ucopy.memory > sk->sk_rcvbuf) { diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h index dfb42ca..f841ba4 100644 --- a/include/sound/emu10k1.h +++ b/include/sound/emu10k1.h @@ -1787,7 +1787,6 @@ struct snd_emu10k1 { unsigned int next_free_voice; const struct firmware *firmware; - const struct firmware *dock_fw; #ifdef CONFIG_PM_SLEEP unsigned int *saved_ptr; diff --git a/include/trace/events/hist.h b/include/trace/events/hist.h deleted file mode 100644 index 28646db..0000000 --- a/include/trace/events/hist.h +++ /dev/null @@ -1,69 +0,0 @@ -#undef TRACE_SYSTEM -#define TRACE_SYSTEM hist - -#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_HIST_H - -#include "latency_hist.h" -#include - -#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST) -#define trace_preemptirqsoff_hist(a,b) -#else -TRACE_EVENT(preemptirqsoff_hist, - - TP_PROTO(int reason, int starthist), - - TP_ARGS(reason, starthist), - - TP_STRUCT__entry( - __field(int, reason ) - __field(int, starthist ) - ), - - TP_fast_assign( - __entry->reason = reason; - __entry->starthist = starthist; - ), - - TP_printk("reason=%s starthist=%s", getaction(__entry->reason), - __entry->starthist ? "start" : "stop") -); -#endif - -#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST -#define trace_hrtimer_interrupt(a,b,c,d) -#else -TRACE_EVENT(hrtimer_interrupt, - - TP_PROTO(int cpu, long long offset, struct task_struct *curr, struct task_struct *task), - - TP_ARGS(cpu, offset, curr, task), - - TP_STRUCT__entry( - __field(int, cpu ) - __field(long long, offset ) - __array(char, ccomm, TASK_COMM_LEN) - __field(int, cprio ) - __array(char, tcomm, TASK_COMM_LEN) - __field(int, tprio ) - ), - - TP_fast_assign( - __entry->cpu = cpu; - __entry->offset = offset; - memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN); - __entry->cprio = curr->prio; - memcpy(__entry->tcomm, task != NULL ? task->comm : "", task != NULL ? TASK_COMM_LEN : 7); - __entry->tprio = task != NULL ? task->prio : -1; - ), - - TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]", - __entry->cpu, __entry->offset, __entry->ccomm, __entry->cprio, __entry->tcomm, __entry->tprio) -); -#endif - -#endif /* _TRACE_HIST_H */ - -/* This part must be outside protection */ -#include diff --git a/include/trace/events/latency_hist.h b/include/trace/events/latency_hist.h deleted file mode 100644 index 7f70794..0000000 --- a/include/trace/events/latency_hist.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef _LATENCY_HIST_H -#define _LATENCY_HIST_H - -enum hist_action { - IRQS_ON, - PREEMPT_ON, - TRACE_STOP, - IRQS_OFF, - PREEMPT_OFF, - TRACE_START, -}; - -static char *actions[] = { - "IRQS_ON", - "PREEMPT_ON", - "TRACE_STOP", - "IRQS_OFF", - "PREEMPT_OFF", - "TRACE_START", -}; - -static inline char *getaction(int action) -{ - if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0])) - return(actions[action]); - return("unknown"); -} - -#endif /* _LATENCY_HIST_H */ diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index e5586ca..5a8671e 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -147,7 +147,7 @@ TRACE_EVENT(sched_switch, __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|", { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" }, { 16, "Z" }, { 32, "X" }, { 64, "x" }, - { 128, "K" }, { 256, "W" }, { 512, "P" }) : "R", + { 128, "W" }) : "R", __entry->prev_state & TASK_STATE_MAX ? "+" : "", __entry->next_comm, __entry->next_pid, __entry->next_prio) ); diff --git a/include/uapi/asm-generic/signal.h b/include/uapi/asm-generic/signal.h index 8cc2850..6fae30f 100644 --- a/include/uapi/asm-generic/signal.h +++ b/include/uapi/asm-generic/signal.h @@ -93,10 +93,6 @@ typedef unsigned long old_sigset_t; #include -#ifdef SA_RESTORER -#define __ARCH_HAS_SA_RESTORER -#endif - struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h index afafd70..93f5fa9 100644 --- a/include/uapi/linux/packet_diag.h +++ b/include/uapi/linux/packet_diag.h @@ -33,11 +33,9 @@ enum { PACKET_DIAG_TX_RING, PACKET_DIAG_FANOUT, - __PACKET_DIAG_MAX, + PACKET_DIAG_MAX, }; -#define PACKET_DIAG_MAX (__PACKET_DIAG_MAX - 1) - struct packet_diag_info { __u32 pdi_index; __u32 pdi_version; diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index c019b24..2c6c85f 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -50,11 +50,8 @@ #define PORT_LPC3220 22 /* NXP LPC32xx SoC "Standard" UART */ #define PORT_8250_CIR 23 /* CIR infrared port, has its own driver */ #define PORT_XR17V35X 24 /* Exar XR17V35x UARTs */ -#define PORT_BRCM_TRUMANAGE 25 -#define PORT_ALTR_16550_F32 26 /* Altera 16550 UART with 32 FIFOs */ -#define PORT_ALTR_16550_F64 27 /* Altera 16550 UART with 64 FIFOs */ -#define PORT_ALTR_16550_F128 28 /* Altera 16550 UART with 128 FIFOs */ -#define PORT_MAX_8250 28 /* max port ID */ +#define PORT_BRCM_TRUMANAGE 24 +#define PORT_MAX_8250 25 /* max port ID */ /* * ARM specific type numbers. These are not currently guaranteed diff --git a/include/uapi/linux/unix_diag.h b/include/uapi/linux/unix_diag.h index b9e2a6a..b8a2494 100644 --- a/include/uapi/linux/unix_diag.h +++ b/include/uapi/linux/unix_diag.h @@ -39,11 +39,9 @@ enum { UNIX_DIAG_MEMINFO, UNIX_DIAG_SHUTDOWN, - __UNIX_DIAG_MAX, + UNIX_DIAG_MAX, }; -#define UNIX_DIAG_MAX (__UNIX_DIAG_MAX - 1) - struct unix_diag_vfs { __u32 udiag_vfs_ino; __u32 udiag_vfs_dev; diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h index d2314be..ac90037 100644 --- a/include/uapi/linux/usb/audio.h +++ b/include/uapi/linux/usb/audio.h @@ -384,16 +384,14 @@ static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_de int protocol) { __u8 control_size = uac_processing_unit_bControlSize(desc, protocol); - return *(uac_processing_unit_bmControls(desc, protocol) - + control_size); + return desc->baSourceID[desc->bNrInPins + control_size]; } static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_descriptor *desc, int protocol) { __u8 control_size = uac_processing_unit_bControlSize(desc, protocol); - return uac_processing_unit_bmControls(desc, protocol) - + control_size + 1; + return &desc->baSourceID[desc->bNrInPins + control_size + 1]; } /* 4.5.2 Class-Specific AS Interface Descriptor */ diff --git a/include/video/atmel_lcdc.h b/include/video/atmel_lcdc.h index 5f0e234..28447f1 100644 --- a/include/video/atmel_lcdc.h +++ b/include/video/atmel_lcdc.h @@ -62,7 +62,6 @@ struct atmel_lcdfb_info { void (*atmel_lcdfb_power_control)(int on); struct fb_monspecs *default_monspecs; u32 pseudo_palette[16]; - bool have_intensity_bit; }; #define ATMEL_LCDC_DMABADDR1 0x00 diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h index ffd4652..01c3d62 100644 --- a/include/xen/interface/io/blkif.h +++ b/include/xen/interface/io/blkif.h @@ -138,21 +138,11 @@ struct blkif_request_discard { uint8_t _pad3; } __attribute__((__packed__)); -struct blkif_request_other { - uint8_t _pad1; - blkif_vdev_t _pad2; /* only for read/write requests */ -#ifdef CONFIG_X86_64 - uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ -#endif - uint64_t id; /* private guest value, echoed in resp */ -} __attribute__((__packed__)); - struct blkif_request { uint8_t operation; /* BLKIF_OP_??? */ union { struct blkif_request_rw rw; struct blkif_request_discard discard; - struct blkif_request_other other; } u; } __attribute__((__packed__)); diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h index 7000bb1..1844d31 100644 --- a/include/xen/interface/physdev.h +++ b/include/xen/interface/physdev.h @@ -251,12 +251,6 @@ struct physdev_pci_device_add { #define PHYSDEVOP_pci_device_remove 26 #define PHYSDEVOP_restore_msi_ext 27 -/* - * Dom0 should use these two to announce MMIO resources assigned to - * MSI-X capable devices won't (prepare) or may (release) change. - */ -#define PHYSDEVOP_prepare_msix 30 -#define PHYSDEVOP_release_msix 31 struct physdev_pci_device { /* IN */ uint16_t seg; diff --git a/init/Kconfig b/init/Kconfig index d0590c7..be8b7f5 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -579,7 +579,7 @@ config RCU_FANOUT_EXACT config RCU_FAST_NO_HZ bool "Accelerate last non-dyntick-idle CPU's grace periods" - depends on NO_HZ && SMP && !PREEMPT_RT_FULL + depends on NO_HZ && SMP default n help This option causes RCU to attempt to accelerate grace periods in @@ -946,7 +946,6 @@ config RT_GROUP_SCHED bool "Group scheduling for SCHED_RR/FIFO" depends on EXPERIMENTAL depends on CGROUP_SCHED - depends on !PREEMPT_RT_FULL default n help This feature lets you explicitly allocate real CPU bandwidth @@ -1505,7 +1504,6 @@ config SLUB config SLOB depends on EXPERT bool "SLOB (Simple Allocator)" - depends on !PREEMPT_RT_FULL help SLOB replaces the stock allocator with a drastically simpler allocator. SLOB is generally more space efficient but diff --git a/init/Makefile b/init/Makefile index 88cf473..7bc47ee 100644 --- a/init/Makefile +++ b/init/Makefile @@ -33,4 +33,4 @@ silent_chk_compile.h = : include/generated/compile.h: FORCE @$($(quiet)chk_compile.h) $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \ - "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)" + "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)" diff --git a/init/main.c b/init/main.c index 2d80a11..cee4b5c 100644 --- a/init/main.c +++ b/init/main.c @@ -70,7 +70,6 @@ #include #include #include -#include #include #include @@ -503,7 +502,6 @@ asmlinkage void __init start_kernel(void) setup_command_line(command_line); setup_nr_cpu_ids(); setup_per_cpu_areas(); - softirq_early_init(); smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ build_all_zonelists(NULL, NULL); diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 4f7d959..71a3ca1 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -330,16 +330,8 @@ static struct dentry *mqueue_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { - if (!(flags & MS_KERNMOUNT)) { - struct ipc_namespace *ns = current->nsproxy->ipc_ns; - /* Don't allow mounting unless the caller has CAP_SYS_ADMIN - * over the ipc namespace. - */ - if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) - return ERR_PTR(-EPERM); - - data = ns; - } + if (!(flags & MS_KERNMOUNT)) + data = current->nsproxy->ipc_ns; return mount_ns(fs_type, flags, data, mqueue_fill_super); } @@ -848,8 +840,7 @@ out_putfd: fd = error; } mutex_unlock(&root->d_inode->i_mutex); - if (!ro) - mnt_drop_write(mnt); + mnt_drop_write(mnt); out_putname: putname(name); return fd; @@ -921,17 +912,12 @@ static inline void pipelined_send(struct mqueue_inode_info *info, struct msg_msg *message, struct ext_wait_queue *receiver) { - /* - * Keep them in one critical section for PREEMPT_RT: - */ - preempt_disable_rt(); receiver->msg = message; list_del(&receiver->list); receiver->state = STATE_PENDING; wake_up_process(receiver->task); smp_wmb(); receiver->state = STATE_READY; - preempt_enable_rt(); } /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() @@ -945,18 +931,13 @@ static inline void pipelined_receive(struct mqueue_inode_info *info) wake_up_interruptible(&info->wait_q); return; } - /* - * Keep them in one critical section for PREEMPT_RT: - */ - preempt_disable_rt(); - if (!msg_insert(sender->msg, info)) { - list_del(&sender->list); - sender->state = STATE_PENDING; - wake_up_process(sender->task); - smp_wmb(); - sender->state = STATE_READY; - } - preempt_enable_rt(); + if (msg_insert(sender->msg, info)) + return; + list_del(&sender->list); + sender->state = STATE_PENDING; + wake_up_process(sender->task); + smp_wmb(); + sender->state = STATE_READY; } SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, diff --git a/ipc/msg.c b/ipc/msg.c index 0b60596..950572f 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -259,20 +259,12 @@ static void expunge_all(struct msg_queue *msq, int res) while (tmp != &msq->q_receivers) { struct msg_receiver *msr; - /* - * Make sure that the wakeup doesnt preempt - * this CPU prematurely. (on PREEMPT_RT) - */ - preempt_disable_rt(); - msr = list_entry(tmp, struct msg_receiver, r_list); tmp = tmp->next; msr->r_msg = NULL; wake_up_process(msr->r_tsk); smp_mb(); msr->r_msg = ERR_PTR(res); - - preempt_enable_rt(); } } @@ -622,12 +614,6 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, msr->r_msgtype, msr->r_mode)) { - /* - * Make sure that the wakeup doesnt preempt - * this CPU prematurely. (on PREEMPT_RT) - */ - preempt_disable_rt(); - list_del(&msr->r_list); if (msr->r_maxsize < msg->m_ts) { msr->r_msg = NULL; @@ -641,11 +627,9 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) wake_up_process(msr->r_tsk); smp_mb(); msr->r_msg = msg; - preempt_enable_rt(); return 1; } - preempt_enable_rt(); } } return 0; @@ -836,17 +820,15 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, struct msg_msg *copy = NULL; unsigned long copy_number = 0; - ns = current->nsproxy->ipc_ns; - if (msqid < 0 || (long) bufsz < 0) return -EINVAL; if (msgflg & MSG_COPY) { - copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax), - msgflg, &msgtyp, ©_number); + copy = prepare_copy(buf, bufsz, msgflg, &msgtyp, ©_number); if (IS_ERR(copy)) return PTR_ERR(copy); } mode = convert_mode(&msgtyp, msgflg); + ns = current->nsproxy->ipc_ns; msq = msg_lock_check(ns, msqid); if (IS_ERR(msq)) { @@ -888,7 +870,6 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, goto out_unlock; break; } - msg = ERR_PTR(-EAGAIN); } else break; msg_counter++; diff --git a/ipc/msgutil.c b/ipc/msgutil.c index 5df8e4b..ebfcbfa 100644 --- a/ipc/msgutil.c +++ b/ipc/msgutil.c @@ -117,6 +117,9 @@ struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst) if (alen > DATALEN_MSG) alen = DATALEN_MSG; + dst->next = NULL; + dst->security = NULL; + memcpy(dst + 1, src + 1, alen); len -= alen; diff --git a/ipc/sem.c b/ipc/sem.c index d7bdd84..58d31f1 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -461,13 +461,6 @@ undo: static void wake_up_sem_queue_prepare(struct list_head *pt, struct sem_queue *q, int error) { -#ifdef CONFIG_PREEMPT_RT_BASE - struct task_struct *p = q->sleeper; - get_task_struct(p); - q->status = error; - wake_up_process(p); - put_task_struct(p); -#else if (list_empty(pt)) { /* * Hold preempt off so that we don't get preempted and have the @@ -479,7 +472,6 @@ static void wake_up_sem_queue_prepare(struct list_head *pt, q->pid = error; list_add_tail(&q->simple_list, pt); -#endif } /** @@ -493,7 +485,6 @@ static void wake_up_sem_queue_prepare(struct list_head *pt, */ static void wake_up_sem_queue_do(struct list_head *pt) { -#ifndef CONFIG_PREEMPT_RT_BASE struct sem_queue *q, *t; int did_something; @@ -506,7 +497,6 @@ static void wake_up_sem_queue_do(struct list_head *pt) } if (did_something) preempt_enable(); -#endif } static void unlink_queue(struct sem_array *sma, struct sem_queue *q) diff --git a/ipc/shm.c b/ipc/shm.c index 9ec2316..4fa6d8f 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -462,7 +462,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) size_t size = params->u.size; int error; struct shmid_kernel *shp; - size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT; struct file * file; char name[13]; int id; @@ -491,14 +491,10 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) sprintf (name, "SYSV%08x", key); if (shmflg & SHM_HUGETLB) { - struct hstate *hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) - & SHM_HUGE_MASK); - size_t hugesize = ALIGN(size, huge_page_size(hs)); - /* hugetlb_file_setup applies strict accounting */ if (shmflg & SHM_NORESERVE) acctflag = VM_NORESERVE; - file = hugetlb_file_setup(name, hugesize, acctflag, + file = hugetlb_file_setup(name, 0, size, acctflag, &shp->mlock_user, HUGETLB_SHMFS_INODE, (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); } else { diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks index f60af90..44511d1 100644 --- a/kernel/Kconfig.locks +++ b/kernel/Kconfig.locks @@ -222,4 +222,4 @@ endif config MUTEX_SPIN_ON_OWNER def_bool y - depends on SMP && !DEBUG_MUTEXES && !PREEMPT_RT_FULL + depends on SMP && !DEBUG_MUTEXES diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt index 38cecfe..3f9c974 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -1,16 +1,3 @@ -config PREEMPT - bool - select PREEMPT_COUNT - -config PREEMPT_RT_BASE - bool - select PREEMPT - -config HAVE_PREEMPT_LAZY - bool - -config PREEMPT_LAZY - def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL choice prompt "Preemption Model" @@ -46,9 +33,9 @@ config PREEMPT_VOLUNTARY Select this if you are building a kernel for a desktop system. -config PREEMPT__LL +config PREEMPT bool "Preemptible Kernel (Low-Latency Desktop)" - select PREEMPT + select PREEMPT_COUNT select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK help This option reduces the latency of the kernel by making @@ -65,22 +52,6 @@ config PREEMPT__LL embedded system with latency requirements in the milliseconds range. -config PREEMPT_RTB - bool "Preemptible Kernel (Basic RT)" - select PREEMPT_RT_BASE - help - This option is basically the same as (Low-Latency Desktop) but - enables changes which are preliminary for the full preemptiple - RT kernel. - -config PREEMPT_RT_FULL - bool "Fully Preemptible Kernel (RT)" - depends on IRQ_FORCED_THREADING - select PREEMPT_RT_BASE - select PREEMPT_RCU - help - All and everything - endchoice config PREEMPT_COUNT diff --git a/kernel/Makefile b/kernel/Makefile index 2c7ab7f..6c072b6 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -7,10 +7,10 @@ obj-y = fork.o exec_domain.o panic.o printk.o \ sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \ signal.o sys.o kmod.o workqueue.o pid.o task_work.o \ rcupdate.o extable.o params.o posix-timers.o \ - kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o \ - hrtimer.o nsproxy.o srcu.o semaphore.o \ + kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ + hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ notifier.o ksysfs.o cred.o \ - async.o range.o groups.o lglock.o smpboot.o wait-simple.o + async.o range.o groups.o lglock.o smpboot.o ifdef CONFIG_FUNCTION_TRACER # Do not trace debug files and internal ftrace files @@ -32,11 +32,7 @@ obj-$(CONFIG_FREEZER) += freezer.o obj-$(CONFIG_PROFILING) += profile.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-y += time/ -ifneq ($(CONFIG_PREEMPT_RT_FULL),y) -obj-y += mutex.o obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o -obj-y += rwsem.o -endif obj-$(CONFIG_LOCKDEP) += lockdep.o ifeq ($(CONFIG_PROC_FS),y) obj-$(CONFIG_LOCKDEP) += lockdep_proc.o @@ -48,7 +44,6 @@ endif obj-$(CONFIG_RT_MUTEXES) += rtmutex.o obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o -obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o obj-$(CONFIG_SMP) += smp.o ifneq ($(CONFIG_SMP),y) diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index a291aa2..642a89c 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -617,9 +617,9 @@ void audit_trim_trees(void) } spin_unlock(&hash_lock); trim_marked(tree); + put_tree(tree); drop_collected_mounts(root_mnt); skip_it: - put_tree(tree); mutex_lock(&audit_filter_mutex); } list_del(&cursor); diff --git a/kernel/capability.c b/kernel/capability.c index f6c2ce5..493d972 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -393,30 +393,6 @@ bool ns_capable(struct user_namespace *ns, int cap) EXPORT_SYMBOL(ns_capable); /** - * file_ns_capable - Determine if the file's opener had a capability in effect - * @file: The file we want to check - * @ns: The usernamespace we want the capability in - * @cap: The capability to be tested for - * - * Return true if task that opened the file had a capability in effect - * when the file was opened. - * - * This does not set PF_SUPERPRIV because the caller may not - * actually be privileged. - */ -bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap) -{ - if (WARN_ON_ONCE(!cap_valid(cap))) - return false; - - if (security_capable(file->f_cred, ns, cap) == 0) - return true; - - return false; -} -EXPORT_SYMBOL(file_ns_capable); - -/** * capable - Determine if the current task has a superior capability in effect * @cap: The capability to be tested for * diff --git a/kernel/cgroup.c b/kernel/cgroup.c index cddf1d9..4855892 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -162,9 +162,6 @@ struct cfent { struct list_head node; struct dentry *dentry; struct cftype *type; - - /* file xattrs */ - struct simple_xattrs xattrs; }; /* @@ -429,20 +426,12 @@ static void __put_css_set(struct css_set *cg, int taskexit) struct cgroup *cgrp = link->cgrp; list_del(&link->cg_link_list); list_del(&link->cgrp_link_list); - - /* - * We may not be holding cgroup_mutex, and if cgrp->count is - * dropped to 0 the cgroup can be destroyed at any time, hence - * rcu_read_lock is used to keep it alive. - */ - rcu_read_lock(); if (atomic_dec_and_test(&cgrp->count) && notify_on_release(cgrp)) { if (taskexit) set_bit(CGRP_RELEASABLE, &cgrp->flags); check_for_release(cgrp); } - rcu_read_unlock(); kfree(link); } @@ -911,12 +900,13 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) } else { struct cfent *cfe = __d_cfe(dentry); struct cgroup *cgrp = dentry->d_parent->d_fsdata; + struct cftype *cft = cfe->type; WARN_ONCE(!list_empty(&cfe->node) && cgrp != &cgrp->root->top_cgroup, "cfe still linked for %s\n", cfe->type->name); - simple_xattrs_free(&cfe->xattrs); kfree(cfe); + simple_xattrs_free(&cft->xattrs); } iput(inode); } @@ -2068,7 +2058,7 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) if (!group) return -ENOMEM; /* pre-allocate to guarantee space while iterating in rcu read-side. */ - retval = flex_array_prealloc(group, 0, group_size, GFP_KERNEL); + retval = flex_array_prealloc(group, 0, group_size - 1, GFP_KERNEL); if (retval) goto out_free_group_list; @@ -2555,7 +2545,7 @@ static struct simple_xattrs *__d_xattrs(struct dentry *dentry) if (S_ISDIR(dentry->d_inode->i_mode)) return &__d_cgrp(dentry)->xattrs; else - return &__d_cfe(dentry)->xattrs; + return &__d_cft(dentry)->xattrs; } static inline int xattr_enabled(struct dentry *dentry) @@ -2731,6 +2721,8 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys, umode_t mode; char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 }; + simple_xattrs_init(&cft->xattrs); + if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) { strcpy(name, subsys->name); strcat(name, "."); @@ -2755,7 +2747,6 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys, cfe->type = (void *)cft; cfe->dentry = dentry; dentry->d_fsdata = cfe; - simple_xattrs_init(&cfe->xattrs); list_add_tail(&cfe->node, &parent->files); cfe = NULL; } diff --git a/kernel/cpu.c b/kernel/cpu.c index d44dea3..3046a50 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -63,274 +63,6 @@ static struct { .refcount = 0, }; -/** - * hotplug_pcp - per cpu hotplug descriptor - * @unplug: set when pin_current_cpu() needs to sync tasks - * @sync_tsk: the task that waits for tasks to finish pinned sections - * @refcount: counter of tasks in pinned sections - * @grab_lock: set when the tasks entering pinned sections should wait - * @synced: notifier for @sync_tsk to tell cpu_down it's finished - * @mutex: the mutex to make tasks wait (used when @grab_lock is true) - * @mutex_init: zero if the mutex hasn't been initialized yet. - * - * Although @unplug and @sync_tsk may point to the same task, the @unplug - * is used as a flag and still exists after @sync_tsk has exited and - * @sync_tsk set to NULL. - */ -struct hotplug_pcp { - struct task_struct *unplug; - struct task_struct *sync_tsk; - int refcount; - int grab_lock; - struct completion synced; -#ifdef CONFIG_PREEMPT_RT_FULL - spinlock_t lock; -#else - struct mutex mutex; -#endif - int mutex_init; -}; - -#ifdef CONFIG_PREEMPT_RT_FULL -# define hotplug_lock(hp) rt_spin_lock(&(hp)->lock) -# define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock) -#else -# define hotplug_lock(hp) mutex_lock(&(hp)->mutex) -# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex) -#endif - -static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp); - -/** - * pin_current_cpu - Prevent the current cpu from being unplugged - * - * Lightweight version of get_online_cpus() to prevent cpu from being - * unplugged when code runs in a migration disabled region. - * - * Must be called with preemption disabled (preempt_count = 1)! - */ -void pin_current_cpu(void) -{ - struct hotplug_pcp *hp; - int force = 0; - -retry: - hp = &__get_cpu_var(hotplug_pcp); - - if (!hp->unplug || hp->refcount || force || preempt_count() > 1 || - hp->unplug == current || (current->flags & PF_STOMPER)) { - hp->refcount++; - return; - } - - if (hp->grab_lock) { - preempt_enable(); - hotplug_lock(hp); - hotplug_unlock(hp); - } else { - preempt_enable(); - /* - * Try to push this task off of this CPU. - */ - if (!migrate_me()) { - preempt_disable(); - hp = &__get_cpu_var(hotplug_pcp); - if (!hp->grab_lock) { - /* - * Just let it continue it's already pinned - * or about to sleep. - */ - force = 1; - goto retry; - } - preempt_enable(); - } - } - preempt_disable(); - goto retry; -} - -/** - * unpin_current_cpu - Allow unplug of current cpu - * - * Must be called with preemption or interrupts disabled! - */ -void unpin_current_cpu(void) -{ - struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp); - - WARN_ON(hp->refcount <= 0); - - /* This is safe. sync_unplug_thread is pinned to this cpu */ - if (!--hp->refcount && hp->unplug && hp->unplug != current && - !(current->flags & PF_STOMPER)) - wake_up_process(hp->unplug); -} - -static void wait_for_pinned_cpus(struct hotplug_pcp *hp) -{ - set_current_state(TASK_UNINTERRUPTIBLE); - while (hp->refcount) { - schedule_preempt_disabled(); - set_current_state(TASK_UNINTERRUPTIBLE); - } -} - -static int sync_unplug_thread(void *data) -{ - struct hotplug_pcp *hp = data; - - preempt_disable(); - hp->unplug = current; - wait_for_pinned_cpus(hp); - - /* - * This thread will synchronize the cpu_down() with threads - * that have pinned the CPU. When the pinned CPU count reaches - * zero, we inform the cpu_down code to continue to the next step. - */ - set_current_state(TASK_UNINTERRUPTIBLE); - preempt_enable(); - complete(&hp->synced); - - /* - * If all succeeds, the next step will need tasks to wait till - * the CPU is offline before continuing. To do this, the grab_lock - * is set and tasks going into pin_current_cpu() will block on the - * mutex. But we still need to wait for those that are already in - * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop() - * will kick this thread out. - */ - while (!hp->grab_lock && !kthread_should_stop()) { - schedule(); - set_current_state(TASK_UNINTERRUPTIBLE); - } - - /* Make sure grab_lock is seen before we see a stale completion */ - smp_mb(); - - /* - * Now just before cpu_down() enters stop machine, we need to make - * sure all tasks that are in pinned CPU sections are out, and new - * tasks will now grab the lock, keeping them from entering pinned - * CPU sections. - */ - if (!kthread_should_stop()) { - preempt_disable(); - wait_for_pinned_cpus(hp); - preempt_enable(); - complete(&hp->synced); - } - - set_current_state(TASK_UNINTERRUPTIBLE); - while (!kthread_should_stop()) { - schedule(); - set_current_state(TASK_UNINTERRUPTIBLE); - } - set_current_state(TASK_RUNNING); - - /* - * Force this thread off this CPU as it's going down and - * we don't want any more work on this CPU. - */ - current->flags &= ~PF_THREAD_BOUND; - do_set_cpus_allowed(current, cpu_present_mask); - migrate_me(); - return 0; -} - -static void __cpu_unplug_sync(struct hotplug_pcp *hp) -{ - wake_up_process(hp->sync_tsk); - wait_for_completion(&hp->synced); -} - -/* - * Start the sync_unplug_thread on the target cpu and wait for it to - * complete. - */ -static int cpu_unplug_begin(unsigned int cpu) -{ - struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); - int err; - - /* Protected by cpu_hotplug.lock */ - if (!hp->mutex_init) { -#ifdef CONFIG_PREEMPT_RT_FULL - spin_lock_init(&hp->lock); -#else - mutex_init(&hp->mutex); -#endif - hp->mutex_init = 1; - } - - /* Inform the scheduler to migrate tasks off this CPU */ - tell_sched_cpu_down_begin(cpu); - - init_completion(&hp->synced); - - hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); - if (IS_ERR(hp->sync_tsk)) { - err = PTR_ERR(hp->sync_tsk); - hp->sync_tsk = NULL; - return err; - } - kthread_bind(hp->sync_tsk, cpu); - - /* - * Wait for tasks to get out of the pinned sections, - * it's still OK if new tasks enter. Some CPU notifiers will - * wait for tasks that are going to enter these sections and - * we must not have them block. - */ - __cpu_unplug_sync(hp); - - return 0; -} - -static void cpu_unplug_sync(unsigned int cpu) -{ - struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); - - init_completion(&hp->synced); - /* The completion needs to be initialzied before setting grab_lock */ - smp_wmb(); - - /* Grab the mutex before setting grab_lock */ - hotplug_lock(hp); - hp->grab_lock = 1; - - /* - * The CPU notifiers have been completed. - * Wait for tasks to get out of pinned CPU sections and have new - * tasks block until the CPU is completely down. - */ - __cpu_unplug_sync(hp); - - /* All done with the sync thread */ - kthread_stop(hp->sync_tsk); - hp->sync_tsk = NULL; -} - -static void cpu_unplug_done(unsigned int cpu) -{ - struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); - - hp->unplug = NULL; - /* Let all tasks know cpu unplug is finished before cleaning up */ - smp_wmb(); - - if (hp->sync_tsk) - kthread_stop(hp->sync_tsk); - - if (hp->grab_lock) { - hotplug_unlock(hp); - /* protected by cpu_hotplug.lock */ - hp->grab_lock = 0; - } - tell_sched_cpu_down_done(cpu); -} - void get_online_cpus(void) { might_sleep(); @@ -347,14 +79,15 @@ void put_online_cpus(void) { if (cpu_hotplug.active_writer == current) return; - mutex_lock(&cpu_hotplug.lock); + if (WARN_ON(!cpu_hotplug.refcount)) cpu_hotplug.refcount++; /* try to fix things up */ if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) wake_up_process(cpu_hotplug.active_writer); mutex_unlock(&cpu_hotplug.lock); + } EXPORT_SYMBOL_GPL(put_online_cpus); @@ -527,14 +260,13 @@ static int __ref take_cpu_down(void *_param) /* Requires cpu_add_remove_lock to be held */ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) { - int mycpu, err, nr_calls = 0; + int err, nr_calls = 0; void *hcpu = (void *)(long)cpu; unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; struct take_cpu_down_param tcd_param = { .mod = mod, .hcpu = hcpu, }; - cpumask_var_t cpumask; if (num_online_cpus() == 1) return -EBUSY; @@ -542,26 +274,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) if (!cpu_online(cpu)) return -EINVAL; - /* Move the downtaker off the unplug cpu */ - if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) - return -ENOMEM; - cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu)); - set_cpus_allowed_ptr(current, cpumask); - free_cpumask_var(cpumask); - migrate_disable(); - mycpu = smp_processor_id(); - if (mycpu == cpu) { - printk(KERN_ERR "Yuck! Still on unplug CPU\n!"); - migrate_enable(); - return -EBUSY; - } - cpu_hotplug_begin(); - err = cpu_unplug_begin(cpu); - if (err) { - printk("cpu_unplug_begin(%d) failed\n", cpu); - goto out_cancel; - } err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); if (err) { @@ -573,9 +286,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) } smpboot_park_threads(cpu); - /* Notifiers are done. Don't let any more tasks pin this CPU. */ - cpu_unplug_sync(cpu); - err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); if (err) { /* CPU didn't die: tell everyone. Can't complain. */ @@ -604,9 +314,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) check_for_tasks(cpu); out_release: - cpu_unplug_done(cpu); -out_cancel: - migrate_enable(); cpu_hotplug_done(); if (!err) cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 5bb9bf1..7bb63ee 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -2511,16 +2511,8 @@ void cpuset_print_task_mems_allowed(struct task_struct *tsk) dentry = task_cs(tsk)->css.cgroup->dentry; spin_lock(&cpuset_buffer_lock); - - if (!dentry) { - strcpy(cpuset_name, "/"); - } else { - spin_lock(&dentry->d_lock); - strlcpy(cpuset_name, (const char *)dentry->d_name.name, - CPUSET_NAME_LEN); - spin_unlock(&dentry->d_lock); - } - + snprintf(cpuset_name, CPUSET_NAME_LEN, + dentry ? (const char *)dentry->d_name.name : "/"); nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN, tsk->mems_allowed); printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n", diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index 399dba6..14ff484 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -554,6 +554,7 @@ int vkdb_printf(const char *fmt, va_list ap) int linecount; int colcount; int logging, saved_loglevel = 0; + int saved_trap_printk; int got_printf_lock = 0; int retlen = 0; int fnd, len; @@ -564,6 +565,8 @@ int vkdb_printf(const char *fmt, va_list ap) unsigned long uninitialized_var(flags); preempt_disable(); + saved_trap_printk = kdb_trap_printk; + kdb_trap_printk = 0; /* Serialize kdb_printf if multiple cpus try to write at once. * But if any cpu goes recursive in kdb, just print the output, @@ -830,6 +833,7 @@ kdb_print_out: } else { __release(kdb_printf_lock); } + kdb_trap_printk = saved_trap_printk; preempt_enable(); return retlen; } @@ -839,11 +843,9 @@ int kdb_printf(const char *fmt, ...) va_list ap; int r; - kdb_trap_printk++; va_start(ap, fmt); r = vkdb_printf(fmt, ap); va_end(ap); - kdb_trap_printk--; return r; } diff --git a/kernel/events/core.c b/kernel/events/core.c index 45f7b3e..7b6646a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -5328,7 +5328,7 @@ static void sw_perf_event_destroy(struct perf_event *event) static int perf_swevent_init(struct perf_event *event) { - u64 event_id = event->attr.config; + int event_id = event->attr.config; if (event->attr.type != PERF_TYPE_SOFTWARE) return -ENOENT; @@ -5638,7 +5638,6 @@ static void perf_swevent_init_hrtimer(struct perf_event *event) hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hwc->hrtimer.function = perf_swevent_hrtimer; - hwc->hrtimer.irqsafe = 1; /* * Since hrtimers have a fixed rate, we can do a static freq->period diff --git a/kernel/exit.c b/kernel/exit.c index 8fb4a48..b4df219 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -143,7 +143,7 @@ static void __exit_signal(struct task_struct *tsk) * Do this under ->siglock, we can race with another thread * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. */ - flush_task_sigqueue(tsk); + flush_sigqueue(&tsk->pending); tsk->sighand = NULL; spin_unlock(&sighand->siglock); diff --git a/kernel/fork.c b/kernel/fork.c index b814a02..c535f33 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -93,7 +93,7 @@ int max_threads; /* tunable limit on nr_threads */ DEFINE_PER_CPU(unsigned long, process_counts) = 0; -DEFINE_RWLOCK(tasklist_lock); /* outer */ +__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ #ifdef CONFIG_PROVE_RCU int lockdep_tasklist_lock_is_held(void) @@ -229,9 +229,7 @@ static inline void put_signal_struct(struct signal_struct *sig) if (atomic_dec_and_test(&sig->sigcnt)) free_signal_struct(sig); } -#ifdef CONFIG_PREEMPT_RT_BASE -static -#endif + void __put_task_struct(struct task_struct *tsk) { WARN_ON(!tsk->exit_state); @@ -246,18 +244,7 @@ void __put_task_struct(struct task_struct *tsk) if (!profile_handoff_task(tsk)) free_task(tsk); } -#ifndef CONFIG_PREEMPT_RT_BASE EXPORT_SYMBOL_GPL(__put_task_struct); -#else -void __put_task_struct_cb(struct rcu_head *rhp) -{ - struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu); - - __put_task_struct(tsk); - -} -EXPORT_SYMBOL_GPL(__put_task_struct_cb); -#endif void __init __weak arch_task_cache_init(void) { } @@ -616,19 +603,6 @@ void __mmdrop(struct mm_struct *mm) } EXPORT_SYMBOL_GPL(__mmdrop); -#ifdef CONFIG_PREEMPT_RT_BASE -/* - * RCU callback for delayed mm drop. Not strictly rcu, but we don't - * want another facility to make this work. - */ -void __mmdrop_delayed(struct rcu_head *rhp) -{ - struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop); - - __mmdrop(mm); -} -#endif - /* * Decrement the use count and release all resources for an mm. */ @@ -1138,9 +1112,6 @@ void mm_init_owner(struct mm_struct *mm, struct task_struct *p) */ static void posix_cpu_timers_init(struct task_struct *tsk) { -#ifdef CONFIG_PREEMPT_RT_BASE - tsk->posix_timer_list = NULL; -#endif tsk->cputime_expires.prof_exp = 0; tsk->cputime_expires.virt_exp = 0; tsk->cputime_expires.sched_exp = 0; @@ -1170,9 +1141,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) return ERR_PTR(-EINVAL); - if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) - return ERR_PTR(-EINVAL); - /* * Thread groups must share signals as well, and detached threads * can only be started up within the thread group. @@ -1259,7 +1227,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); - p->sigqueue_cache = NULL; p->utime = p->stime = p->gtime = 0; p->utimescaled = p->stimescaled = 0; @@ -1314,9 +1281,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->hardirq_context = 0; p->softirq_context = 0; #endif -#ifdef CONFIG_PREEMPT_RT_FULL - p->pagefault_disabled = 0; -#endif #ifdef CONFIG_LOCKDEP p->lockdep_depth = 0; /* no locks held yet */ p->curr_chain_key = 0; @@ -1837,7 +1801,7 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) * If unsharing a user namespace must also unshare the thread. */ if (unshare_flags & CLONE_NEWUSER) - unshare_flags |= CLONE_THREAD | CLONE_FS; + unshare_flags |= CLONE_THREAD; /* * If unsharing a pid namespace must also unshare the thread. */ diff --git a/kernel/futex.c b/kernel/futex.c index 473c3c4..19eb089 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -568,9 +568,7 @@ void exit_pi_state_list(struct task_struct *curr) * task still owns the PI-state: */ if (head->next != next) { - raw_spin_unlock_irq(&curr->pi_lock); spin_unlock(&hb->lock); - raw_spin_lock_irq(&curr->pi_lock); continue; } @@ -1444,16 +1442,6 @@ retry_private: requeue_pi_wake_futex(this, &key2, hb2); drop_count++; continue; - } else if (ret == -EAGAIN) { - /* - * Waiter was woken by timeout or - * signal and has set pi_blocked_on to - * PI_WAKEUP_INPROGRESS before we - * tried to enqueue it on the rtmutex. - */ - this->pi_state = NULL; - free_pi_state(pi_state); - continue; } else if (ret) { /* -EDEADLK */ this->pi_state = NULL; @@ -2298,7 +2286,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, struct hrtimer_sleeper timeout, *to = NULL; struct rt_mutex_waiter rt_waiter; struct rt_mutex *pi_mutex = NULL; - struct futex_hash_bucket *hb, *hb2; + struct futex_hash_bucket *hb; union futex_key key2 = FUTEX_KEY_INIT; struct futex_q q = futex_q_init; int res, ret; @@ -2323,7 +2311,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, * The waiter is allocated on our stack, manipulated by the requeue * code while we sleep on uaddr. */ - rt_mutex_init_waiter(&rt_waiter, false); + debug_rt_mutex_init_waiter(&rt_waiter); + rt_waiter.task = NULL; ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); if (unlikely(ret != 0)) @@ -2344,55 +2333,20 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, /* Queue the futex_q, drop the hb lock, wait for wakeup. */ futex_wait_queue_me(hb, &q, to); - /* - * On RT we must avoid races with requeue and trying to block - * on two mutexes (hb->lock and uaddr2's rtmutex) by - * serializing access to pi_blocked_on with pi_lock. - */ - raw_spin_lock_irq(¤t->pi_lock); - if (current->pi_blocked_on) { - /* - * We have been requeued or are in the process of - * being requeued. - */ - raw_spin_unlock_irq(¤t->pi_lock); - } else { - /* - * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS - * prevents a concurrent requeue from moving us to the - * uaddr2 rtmutex. After that we can safely acquire - * (and possibly block on) hb->lock. - */ - current->pi_blocked_on = PI_WAKEUP_INPROGRESS; - raw_spin_unlock_irq(¤t->pi_lock); - - spin_lock(&hb->lock); - - /* - * Clean up pi_blocked_on. We might leak it otherwise - * when we succeeded with the hb->lock in the fast - * path. - */ - raw_spin_lock_irq(¤t->pi_lock); - current->pi_blocked_on = NULL; - raw_spin_unlock_irq(¤t->pi_lock); - - ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); - spin_unlock(&hb->lock); - if (ret) - goto out_put_keys; - } + spin_lock(&hb->lock); + ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); + spin_unlock(&hb->lock); + if (ret) + goto out_put_keys; /* - * In order to be here, we have either been requeued, are in - * the process of being requeued, or requeue successfully - * acquired uaddr2 on our behalf. If pi_blocked_on was - * non-null above, we may be racing with a requeue. Do not - * rely on q->lock_ptr to be hb2->lock until after blocking on - * hb->lock or hb2->lock. The futex_requeue dropped our key1 - * reference and incremented our key2 reference count. + * In order for us to be here, we know our q.key == key2, and since + * we took the hb->lock above, we also know that futex_requeue() has + * completed and we no longer have to concern ourselves with a wakeup + * race with the atomic proxy lock acquisition by the requeue code. The + * futex_requeue dropped our key1 reference and incremented our key2 + * reference count. */ - hb2 = hash_futex(&key2); /* Check if the requeue code acquired the second futex for us. */ if (!q.rt_waiter) { @@ -2401,10 +2355,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, * did a lock-steal - fix up the PI-state in that case. */ if (q.pi_state && (q.pi_state->owner != current)) { - spin_lock(&hb2->lock); - BUG_ON(&hb2->lock != q.lock_ptr); + spin_lock(q.lock_ptr); ret = fixup_pi_state_owner(uaddr2, &q, current); - spin_unlock(&hb2->lock); + spin_unlock(q.lock_ptr); } } else { /* @@ -2417,8 +2370,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1); debug_rt_mutex_free_waiter(&rt_waiter); - spin_lock(&hb2->lock); - BUG_ON(&hb2->lock != q.lock_ptr); + spin_lock(q.lock_ptr); /* * Fixup the pi_state owner and possibly acquire the lock if we * haven't already. @@ -2519,6 +2471,8 @@ SYSCALL_DEFINE3(get_robust_list, int, pid, if (!futex_cmpxchg_enabled) return -ENOSYS; + WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n"); + rcu_read_lock(); ret = -ESRCH; diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index a9642d5..83e368b 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c @@ -142,6 +142,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, if (!futex_cmpxchg_enabled) return -ENOSYS; + WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n"); + rcu_read_lock(); ret = -ESRCH; diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index aa5eb4f..6db7a5e 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -49,7 +49,6 @@ #include #include -#include /* * The timer bases: @@ -62,7 +61,6 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = { - .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock), .clock_base = { { @@ -299,10 +297,6 @@ ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec) } else { unsigned long rem = do_div(nsec, NSEC_PER_SEC); - /* Make sure nsec fits into long */ - if (unlikely(nsec > KTIME_SEC_MAX)) - return (ktime_t){ .tv64 = KTIME_MAX }; - tmp = ktime_set((long)nsec, rem); } @@ -594,7 +588,8 @@ static int hrtimer_reprogram(struct hrtimer *timer, * When the callback is running, we do not reprogram the clock event * device. The timer callback is either running on a different CPU or * the callback is executed in the hrtimer_interrupt context. The - * reprogramming is handled at the end of the hrtimer_interrupt. + * reprogramming is handled either by the softirq, which called the + * callback or at the end of the hrtimer_interrupt. */ if (hrtimer_callback_running(timer)) return 0; @@ -629,9 +624,6 @@ static int hrtimer_reprogram(struct hrtimer *timer, return res; } -static void __run_hrtimer(struct hrtimer *timer, ktime_t *now); -static int hrtimer_rt_defer(struct hrtimer *timer); - /* * Initialize the high resolution related parts of cpu_base */ @@ -651,15 +643,18 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, struct hrtimer_clock_base *base, int wakeup) { - if (!(base->cpu_base->hres_active && hrtimer_reprogram(timer, base))) - return 0; - if (!wakeup) - return -ETIME; -#ifdef CONFIG_PREEMPT_RT_BASE - if (!hrtimer_rt_defer(timer)) - return -ETIME; -#endif - return 1; + if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { + if (wakeup) { + raw_spin_unlock(&base->cpu_base->lock); + raise_softirq_irqoff(HRTIMER_SOFTIRQ); + raw_spin_lock(&base->cpu_base->lock); + } else + __raise_softirq_irqoff(HRTIMER_SOFTIRQ); + + return 1; + } + + return 0; } static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) @@ -747,11 +742,6 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, } static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } static inline void retrigger_next_event(void *arg) { } -static inline int hrtimer_reprogram(struct hrtimer *timer, - struct hrtimer_clock_base *base) -{ - return 0; -} #endif /* CONFIG_HIGH_RES_TIMERS */ @@ -866,32 +856,6 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) } EXPORT_SYMBOL_GPL(hrtimer_forward); -#ifdef CONFIG_PREEMPT_RT_BASE -# define wake_up_timer_waiters(b) wake_up(&(b)->wait) - -/** - * hrtimer_wait_for_timer - Wait for a running timer - * - * @timer: timer to wait for - * - * The function waits in case the timers callback function is - * currently executed on the waitqueue of the timer base. The - * waitqueue is woken up after the timer callback function has - * finished execution. - */ -void hrtimer_wait_for_timer(const struct hrtimer *timer) -{ - struct hrtimer_clock_base *base = timer->base; - - if (base && base->cpu_base && !timer->irqsafe) - wait_event(base->cpu_base->wait, - !(timer->state & HRTIMER_STATE_CALLBACK)); -} - -#else -# define wake_up_timer_waiters(b) do { } while (0) -#endif - /* * enqueue_hrtimer - internal function to (re)start a timer * @@ -935,11 +899,6 @@ static void __remove_hrtimer(struct hrtimer *timer, if (!(timer->state & HRTIMER_STATE_ENQUEUED)) goto out; - if (unlikely(!list_empty(&timer->cb_entry))) { - list_del_init(&timer->cb_entry); - goto out; - } - next_timer = timerqueue_getnext(&base->active); timerqueue_del(&base->active, &timer->node); if (&timer->node == next_timer) { @@ -1024,17 +983,6 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, #endif } -#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST - { - ktime_t now = new_base->get_time(); - - if (ktime_to_ns(tim) < ktime_to_ns(now)) - timer->praecox = now; - else - timer->praecox = ktime_set(0, 0); - } -#endif - hrtimer_set_expires_range_ns(timer, tim, delta_ns); timer_stats_hrtimer_set_start_info(timer); @@ -1047,29 +995,8 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, * * XXX send_remote_softirq() ? */ - if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) { - ret = hrtimer_enqueue_reprogram(timer, new_base, wakeup); - if (ret < 0) { - /* - * In case we failed to reprogram the timer (mostly - * because out current timer is already elapsed), - * remove it again and report a failure. This avoids - * stale base->first entries. - */ - debug_deactivate(timer); - __remove_hrtimer(timer, new_base, - timer->state & HRTIMER_STATE_CALLBACK, 0); - } else if (ret > 0) { - /* - * We need to drop cpu_base->lock to avoid a - * lock ordering issue vs. rq->lock. - */ - raw_spin_unlock(&new_base->cpu_base->lock); - raise_softirq_irqoff(HRTIMER_SOFTIRQ); - local_irq_restore(flags); - return 0; - } - } + if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) + hrtimer_enqueue_reprogram(timer, new_base, wakeup); unlock_hrtimer_base(timer, &flags); @@ -1155,7 +1082,7 @@ int hrtimer_cancel(struct hrtimer *timer) if (ret >= 0) return ret; - hrtimer_wait_for_timer(timer); + cpu_relax(); } } EXPORT_SYMBOL_GPL(hrtimer_cancel); @@ -1234,7 +1161,6 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, base = hrtimer_clockid_to_base(clock_id); timer->base = &cpu_base->clock_base[base]; - INIT_LIST_HEAD(&timer->cb_entry); timerqueue_init(&timer->node); #ifdef CONFIG_TIMER_STATS @@ -1318,126 +1244,6 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) timer->state &= ~HRTIMER_STATE_CALLBACK; } -static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer); - -#ifdef CONFIG_PREEMPT_RT_BASE -static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer, - struct hrtimer_clock_base *base) -{ - /* - * Note, we clear the callback flag before we requeue the - * timer otherwise we trigger the callback_running() check - * in hrtimer_reprogram(). - */ - timer->state &= ~HRTIMER_STATE_CALLBACK; - - if (restart != HRTIMER_NORESTART) { - BUG_ON(hrtimer_active(timer)); - /* - * Enqueue the timer, if it's the leftmost timer then - * we need to reprogram it. - */ - if (!enqueue_hrtimer(timer, base)) - return; - -#ifndef CONFIG_HIGH_RES_TIMERS - } -#else - if (base->cpu_base->hres_active && - hrtimer_reprogram(timer, base)) - goto requeue; - - } else if (hrtimer_active(timer)) { - /* - * If the timer was rearmed on another CPU, reprogram - * the event device. - */ - if (&timer->node == base->active.next && - base->cpu_base->hres_active && - hrtimer_reprogram(timer, base)) - goto requeue; - } - return; - -requeue: - /* - * Timer is expired. Thus move it from tree to pending list - * again. - */ - __remove_hrtimer(timer, base, timer->state, 0); - list_add_tail(&timer->cb_entry, &base->expired); -#endif -} - -/* - * The changes in mainline which removed the callback modes from - * hrtimer are not yet working with -rt. The non wakeup_process() - * based callbacks which involve sleeping locks need to be treated - * seperately. - */ -static void hrtimer_rt_run_pending(void) -{ - enum hrtimer_restart (*fn)(struct hrtimer *); - struct hrtimer_cpu_base *cpu_base; - struct hrtimer_clock_base *base; - struct hrtimer *timer; - int index, restart; - - local_irq_disable(); - cpu_base = &per_cpu(hrtimer_bases, smp_processor_id()); - - raw_spin_lock(&cpu_base->lock); - - for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { - base = &cpu_base->clock_base[index]; - - while (!list_empty(&base->expired)) { - timer = list_first_entry(&base->expired, - struct hrtimer, cb_entry); - - /* - * Same as the above __run_hrtimer function - * just we run with interrupts enabled. - */ - debug_hrtimer_deactivate(timer); - __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); - timer_stats_account_hrtimer(timer); - fn = timer->function; - - raw_spin_unlock_irq(&cpu_base->lock); - restart = fn(timer); - raw_spin_lock_irq(&cpu_base->lock); - - hrtimer_rt_reprogram(restart, timer, base); - } - } - - raw_spin_unlock_irq(&cpu_base->lock); - - wake_up_timer_waiters(cpu_base); -} - -static int hrtimer_rt_defer(struct hrtimer *timer) -{ - if (timer->irqsafe) - return 0; - - __remove_hrtimer(timer, timer->base, timer->state, 0); - list_add_tail(&timer->cb_entry, &timer->base->expired); - return 1; -} - -#else - -static inline void hrtimer_rt_run_pending(void) -{ - hrtimer_peek_ahead_timers(); -} - -static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; } - -#endif - #ifdef CONFIG_HIGH_RES_TIMERS /* @@ -1448,7 +1254,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) { struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); ktime_t expires_next, now, entry_time, delta; - int i, retries = 0, raise = 0; + int i, retries = 0; BUG_ON(!cpu_base->hres_active); cpu_base->nr_events++; @@ -1483,15 +1289,6 @@ retry: timer = container_of(node, struct hrtimer, node); - trace_hrtimer_interrupt(raw_smp_processor_id(), - ktime_to_ns(ktime_sub(ktime_to_ns(timer->praecox) ? - timer->praecox : hrtimer_get_expires(timer), - basenow)), - current, - timer->function == hrtimer_wakeup ? - container_of(timer, struct hrtimer_sleeper, - timer)->task : NULL); - /* * The immediate goal for using the softexpires is * minimizing wakeups, not running timers at the @@ -1510,17 +1307,12 @@ retry: expires = ktime_sub(hrtimer_get_expires(timer), base->offset); - if (expires.tv64 < 0) - expires.tv64 = KTIME_MAX; if (expires.tv64 < expires_next.tv64) expires_next = expires; break; } - if (!hrtimer_rt_defer(timer)) - __run_hrtimer(timer, &basenow); - else - raise = 1; + __run_hrtimer(timer, &basenow); } } @@ -1535,7 +1327,7 @@ retry: if (expires_next.tv64 == KTIME_MAX || !tick_program_event(expires_next, 0)) { cpu_base->hang_detected = 0; - goto out; + return; } /* @@ -1579,9 +1371,6 @@ retry: tick_program_event(expires_next, 1); printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta)); -out: - if (raise) - raise_softirq_irqoff(HRTIMER_SOFTIRQ); } /* @@ -1618,26 +1407,24 @@ void hrtimer_peek_ahead_timers(void) local_irq_restore(flags); } -#else /* CONFIG_HIGH_RES_TIMERS */ - -static inline void __hrtimer_peek_ahead_timers(void) { } - -#endif /* !CONFIG_HIGH_RES_TIMERS */ - static void run_hrtimer_softirq(struct softirq_action *h) { -#ifdef CONFIG_HIGH_RES_TIMERS struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); if (cpu_base->clock_was_set) { cpu_base->clock_was_set = 0; clock_was_set(); } -#endif - hrtimer_rt_run_pending(); + hrtimer_peek_ahead_timers(); } +#else /* CONFIG_HIGH_RES_TIMERS */ + +static inline void __hrtimer_peek_ahead_timers(void) { } + +#endif /* !CONFIG_HIGH_RES_TIMERS */ + /* * Called from timer softirq every jiffy, expire hrtimers: * @@ -1670,7 +1457,7 @@ void hrtimer_run_queues(void) struct timerqueue_node *node; struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); struct hrtimer_clock_base *base; - int index, gettime = 1, raise = 0; + int index, gettime = 1; if (hrtimer_hres_active()) return; @@ -1695,16 +1482,10 @@ void hrtimer_run_queues(void) hrtimer_get_expires_tv64(timer)) break; - if (!hrtimer_rt_defer(timer)) - __run_hrtimer(timer, &base->softirq_time); - else - raise = 1; + __run_hrtimer(timer, &base->softirq_time); } raw_spin_unlock(&cpu_base->lock); } - - if (raise) - raise_softirq_irqoff(HRTIMER_SOFTIRQ); } /* @@ -1726,7 +1507,6 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) { sl->timer.function = hrtimer_wakeup; - sl->timer.irqsafe = 1; sl->task = task; } EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); @@ -1860,16 +1640,14 @@ static void __cpuinit init_hrtimers_cpu(int cpu) struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); int i; + raw_spin_lock_init(&cpu_base->lock); + for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { cpu_base->clock_base[i].cpu_base = cpu_base; timerqueue_init_head(&cpu_base->clock_base[i].active); - INIT_LIST_HEAD(&cpu_base->clock_base[i].expired); } hrtimer_init_hres(cpu_base); -#ifdef CONFIG_PREEMPT_RT_BASE - init_waitqueue_head(&cpu_base->wait); -#endif } #ifdef CONFIG_HOTPLUG_CPU @@ -1982,7 +1760,9 @@ void __init hrtimers_init(void) hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, (void *)(long)smp_processor_id()); register_cpu_notifier(&hrtimers_nb); +#ifdef CONFIG_HIGH_RES_TIMERS open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq); +#endif } /** diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 7f50c55..131ca17 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -132,8 +132,6 @@ static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action) irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) { - struct pt_regs *regs = get_irq_regs(); - u64 ip = regs ? instruction_pointer(regs) : 0; irqreturn_t retval = IRQ_NONE; unsigned int flags = 0, irq = desc->irq_data.irq; @@ -174,11 +172,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) action = action->next; } while (action); -#ifndef CONFIG_PREEMPT_RT_FULL - add_interrupt_randomness(irq, flags, ip); -#else - desc->random_ip = ip; -#endif + add_interrupt_randomness(irq, flags); if (!noirqdebug) note_interrupt(irq, desc, retval); diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 473b2b6..192a302 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -23,27 +23,10 @@ static struct lock_class_key irq_desc_lock_class; #if defined(CONFIG_SMP) -static int __init irq_affinity_setup(char *str) -{ - zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); - cpulist_parse(str, irq_default_affinity); - /* - * Set at least the boot cpu. We don't want to end up with - * bugreports caused by random comandline masks - */ - cpumask_set_cpu(smp_processor_id(), irq_default_affinity); - return 1; -} -__setup("irqaffinity=", irq_affinity_setup); - static void __init init_irq_default_affinity(void) { -#ifdef CONFIG_CPUMASK_OFFSTACK - if (!irq_default_affinity) - zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); -#endif - if (cpumask_empty(irq_default_affinity)) - cpumask_setall(irq_default_affinity); + alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); + cpumask_setall(irq_default_affinity); } #else static void __init init_irq_default_affinity(void) diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 1fba5cb..e49a288 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -21,7 +21,6 @@ #include "internals.h" #ifdef CONFIG_IRQ_FORCED_THREADING -# ifndef CONFIG_PREEMPT_RT_BASE __read_mostly bool force_irqthreads; static int __init setup_forced_irqthreads(char *arg) @@ -30,7 +29,6 @@ static int __init setup_forced_irqthreads(char *arg) return 0; } early_param("threadirqs", setup_forced_irqthreads); -# endif #endif /** @@ -782,15 +780,7 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) local_bh_disable(); ret = action->thread_fn(action->irq, action->dev_id); irq_finalize_oneshot(desc, action); - /* - * Interrupts which have real time requirements can be set up - * to avoid softirq processing in the thread handler. This is - * safe as these interrupts do not raise soft interrupts. - */ - if (irq_settings_no_softirq_call(desc)) - _local_bh_enable(); - else - local_bh_enable(); + local_bh_enable(); return ret; } @@ -879,12 +869,6 @@ static int irq_thread(void *data) if (!noirqdebug) note_interrupt(action->irq, desc, action_ret); -#ifdef CONFIG_PREEMPT_RT_FULL - migrate_disable(); - add_interrupt_randomness(action->irq, 0, - desc->random_ip ^ (unsigned long) action); - migrate_enable(); -#endif wake_threads_waitq(desc); } @@ -1141,9 +1125,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) irqd_set(&desc->irq_data, IRQD_NO_BALANCING); } - if (new->flags & IRQF_NO_SOFTIRQ_CALL) - irq_settings_set_no_softirq_call(desc); - /* Set default affinity mask once everything is setup */ setup_affinity(irq, desc, mask); diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h index 0d2c381..1162f10 100644 --- a/kernel/irq/settings.h +++ b/kernel/irq/settings.h @@ -14,7 +14,6 @@ enum { _IRQ_NO_BALANCING = IRQ_NO_BALANCING, _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, - _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL, _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, }; @@ -27,7 +26,6 @@ enum { #define IRQ_NOAUTOEN GOT_YOU_MORON #define IRQ_NESTED_THREAD GOT_YOU_MORON #define IRQ_PER_CPU_DEVID GOT_YOU_MORON -#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON #undef IRQF_MODIFY_MASK #define IRQF_MODIFY_MASK GOT_YOU_MORON @@ -38,16 +36,6 @@ irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set) desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK); } -static inline bool irq_settings_no_softirq_call(struct irq_desc *desc) -{ - return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL; -} - -static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc) -{ - desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL; -} - static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) { return desc->status_use_accessors & _IRQ_PER_CPU; diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 38a32b0..611cd60 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -80,11 +80,13 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) /* * All handlers must agree on IRQF_SHARED, so we test just the - * first. + * first. Check for action->next as well. */ action = desc->action; if (!action || !(action->flags & IRQF_SHARED) || - (action->flags & __IRQF_TIMER)) + (action->flags & __IRQF_TIMER) || + (action->handler(irq, action->dev_id) == IRQ_HANDLED) || + !action->next) goto out; /* Already running on another processor */ @@ -102,7 +104,6 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) do { if (handle_irq_event(desc) == IRQ_HANDLED) ret = IRQ_HANDLED; - /* Make sure that there is still a valid action */ action = desc->action; } while ((desc->istate & IRQS_PENDING) && action); desc->istate &= ~IRQS_POLL_INPROGRESS; @@ -340,11 +341,6 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true"); static int __init irqfixup_setup(char *str) { -#ifdef CONFIG_PREEMPT_RT_BASE - printk(KERN_WARNING "irqfixup boot option not supported " - "w/ CONFIG_PREEMPT_RT_BASE\n"); - return 1; -#endif irqfixup = 1; printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); printk(KERN_WARNING "This may impact system performance.\n"); @@ -357,11 +353,6 @@ module_param(irqfixup, int, 0644); static int __init irqpoll_setup(char *str) { -#ifdef CONFIG_PREEMPT_RT_BASE - printk(KERN_WARNING "irqpoll boot option not supported " - "w/ CONFIG_PREEMPT_RT_BASE\n"); - return 1; -#endif irqfixup = 2; printk(KERN_WARNING "Misrouted IRQ fixup and polling support " "enabled\n"); diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 170c2ea..1588e3b 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -107,10 +107,8 @@ void irq_work_run(void) if (llist_empty(this_list)) return; -#ifndef CONFIG_PREEMPT_RT_FULL BUG_ON(!in_irq()); BUG_ON(!irqs_disabled()); -#endif llnode = llist_del_all(this_list); while (llnode != NULL) { diff --git a/kernel/itimer.c b/kernel/itimer.c index d051390..8d262b4 100644 --- a/kernel/itimer.c +++ b/kernel/itimer.c @@ -213,7 +213,6 @@ again: /* We are sharing ->siglock with it_real_fn() */ if (hrtimer_try_to_cancel(timer) < 0) { spin_unlock_irq(&tsk->sighand->siglock); - hrtimer_wait_for_timer(&tsk->signal->real_timer); goto again; } expires = timeval_to_ktime(value->it_value); diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c index 1c991e3..6ada93c 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c @@ -132,15 +132,6 @@ KERNEL_ATTR_RO(vmcoreinfo); #endif /* CONFIG_KEXEC */ -#if defined(CONFIG_PREEMPT_RT_FULL) -static ssize_t realtime_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", 1); -} -KERNEL_ATTR_RO(realtime); -#endif - /* whether file capabilities are enabled */ static ssize_t fscaps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) @@ -206,9 +197,6 @@ static struct attribute * kernel_attrs[] = { &vmcoreinfo_attr.attr, #endif &rcu_expedited_attr.attr, -#ifdef CONFIG_PREEMPT_RT_FULL - &realtime_attr.attr, -#endif NULL }; diff --git a/kernel/kthread.c b/kernel/kthread.c index 9eb7fed..691dc2e 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -124,12 +124,12 @@ void *kthread_data(struct task_struct *task) static void __kthread_parkme(struct kthread *self) { - __set_current_state(TASK_PARKED); + __set_current_state(TASK_INTERRUPTIBLE); while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) { if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags)) complete(&self->parked); schedule(); - __set_current_state(TASK_PARKED); + __set_current_state(TASK_INTERRUPTIBLE); } clear_bit(KTHREAD_IS_PARKED, &self->flags); __set_current_state(TASK_RUNNING); @@ -256,13 +256,8 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), } EXPORT_SYMBOL(kthread_create_on_node); -static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state) +static void __kthread_bind(struct task_struct *p, unsigned int cpu) { - /* Must have done schedule() in kthread() before we set_task_cpu */ - if (!wait_task_inactive(p, state)) { - WARN_ON(1); - return; - } /* It's safe because the task is inactive. */ do_set_cpus_allowed(p, cpumask_of(cpu)); p->flags |= PF_THREAD_BOUND; @@ -279,7 +274,12 @@ static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state) */ void kthread_bind(struct task_struct *p, unsigned int cpu) { - __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE); + /* Must have done schedule() in kthread() before we set_task_cpu */ + if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { + WARN_ON(1); + return; + } + __kthread_bind(p, cpu); } EXPORT_SYMBOL(kthread_bind); @@ -324,22 +324,6 @@ static struct kthread *task_get_live_kthread(struct task_struct *k) return NULL; } -static void __kthread_unpark(struct task_struct *k, struct kthread *kthread) -{ - clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); - /* - * We clear the IS_PARKED bit here as we don't wait - * until the task has left the park code. So if we'd - * park before that happens we'd see the IS_PARKED bit - * which might be about to be cleared. - */ - if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { - if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) - __kthread_bind(k, kthread->cpu, TASK_PARKED); - wake_up_state(k, TASK_PARKED); - } -} - /** * kthread_unpark - unpark a thread created by kthread_create(). * @k: thread created by kthread_create(). @@ -352,8 +336,20 @@ void kthread_unpark(struct task_struct *k) { struct kthread *kthread = task_get_live_kthread(k); - if (kthread) - __kthread_unpark(k, kthread); + if (kthread) { + clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); + /* + * We clear the IS_PARKED bit here as we don't wait + * until the task has left the park code. So if we'd + * park before that happens we'd see the IS_PARKED bit + * which might be about to be cleared. + */ + if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { + if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) + __kthread_bind(k, kthread->cpu); + wake_up_process(k); + } + } put_task_struct(k); } @@ -411,7 +407,7 @@ int kthread_stop(struct task_struct *k) trace_sched_kthread_stop(k); if (kthread) { set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); - __kthread_unpark(k, kthread); + clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); wake_up_process(k); wait_for_completion(&kthread->exited); } diff --git a/kernel/lglock.c b/kernel/lglock.c index 0bbf5d1..6535a66 100644 --- a/kernel/lglock.c +++ b/kernel/lglock.c @@ -4,15 +4,6 @@ #include #include -#ifndef CONFIG_PREEMPT_RT_FULL -# define lg_lock_ptr arch_spinlock_t -# define lg_do_lock(l) arch_spin_lock(l) -# define lg_do_unlock(l) arch_spin_unlock(l) -#else -# define lg_lock_ptr struct rt_mutex -# define lg_do_lock(l) __rt_spin_lock(l) -# define lg_do_unlock(l) __rt_spin_unlock(l) -#endif /* * Note there is no uninit, so lglocks cannot be defined in * modules (but it's fine to use them from there) @@ -21,60 +12,51 @@ void lg_lock_init(struct lglock *lg, char *name) { -#ifdef CONFIG_PREEMPT_RT_FULL - int i; - - for_each_possible_cpu(i) { - struct rt_mutex *lock = per_cpu_ptr(lg->lock, i); - - rt_mutex_init(lock); - } -#endif LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0); } EXPORT_SYMBOL(lg_lock_init); void lg_local_lock(struct lglock *lg) { - lg_lock_ptr *lock; + arch_spinlock_t *lock; - migrate_disable(); + preempt_disable(); rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_); lock = this_cpu_ptr(lg->lock); - lg_do_lock(lock); + arch_spin_lock(lock); } EXPORT_SYMBOL(lg_local_lock); void lg_local_unlock(struct lglock *lg) { - lg_lock_ptr *lock; + arch_spinlock_t *lock; rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); lock = this_cpu_ptr(lg->lock); - lg_do_unlock(lock); - migrate_enable(); + arch_spin_unlock(lock); + preempt_enable(); } EXPORT_SYMBOL(lg_local_unlock); void lg_local_lock_cpu(struct lglock *lg, int cpu) { - lg_lock_ptr *lock; + arch_spinlock_t *lock; - preempt_disable_nort(); + preempt_disable(); rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_); lock = per_cpu_ptr(lg->lock, cpu); - lg_do_lock(lock); + arch_spin_lock(lock); } EXPORT_SYMBOL(lg_local_lock_cpu); void lg_local_unlock_cpu(struct lglock *lg, int cpu) { - lg_lock_ptr *lock; + arch_spinlock_t *lock; rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); lock = per_cpu_ptr(lg->lock, cpu); - lg_do_unlock(lock); - preempt_enable_nort(); + arch_spin_unlock(lock); + preempt_enable(); } EXPORT_SYMBOL(lg_local_unlock_cpu); @@ -82,12 +64,12 @@ void lg_global_lock(struct lglock *lg) { int i; - preempt_disable_nort(); + preempt_disable(); rwlock_acquire(&lg->lock_dep_map, 0, 0, _RET_IP_); for_each_possible_cpu(i) { - lg_lock_ptr *lock; + arch_spinlock_t *lock; lock = per_cpu_ptr(lg->lock, i); - lg_do_lock(lock); + arch_spin_lock(lock); } } EXPORT_SYMBOL(lg_global_lock); @@ -98,10 +80,10 @@ void lg_global_unlock(struct lglock *lg) rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); for_each_possible_cpu(i) { - lg_lock_ptr *lock; + arch_spinlock_t *lock; lock = per_cpu_ptr(lg->lock, i); - lg_do_unlock(lock); + arch_spin_unlock(lock); } - preempt_enable_nort(); + preempt_enable(); } EXPORT_SYMBOL(lg_global_unlock); diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 7f03801..7981e5b 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -3534,7 +3534,6 @@ static void check_flags(unsigned long flags) } } -#ifndef CONFIG_PREEMPT_RT_FULL /* * We dont accurately track softirq state in e.g. * hardirq contexts (such as on 4KSTACKS), so only @@ -3549,7 +3548,6 @@ static void check_flags(unsigned long flags) DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); } } -#endif if (!debug_locks) print_irqtrace_events(current); diff --git a/kernel/panic.c b/kernel/panic.c index 5dc4381..e1b2822 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -371,11 +371,9 @@ static u64 oops_id; static int init_oops_id(void) { -#ifndef CONFIG_PREEMPT_RT_FULL if (!oops_id) get_random_bytes(&oops_id, sizeof(oops_id)); else -#endif oops_id++; return 0; diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index bea15bd..c1c3dc1 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -181,7 +181,6 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns) int nr; int rc; struct task_struct *task, *me = current; - int init_pids = thread_group_leader(me) ? 1 : 2; /* Don't allow any more processes into the pid namespace */ disable_pid_allocation(pid_ns); @@ -231,7 +230,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns) */ for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); - if (pid_ns->nr_hashed == init_pids) + if (pid_ns->nr_hashed == 1) break; schedule(); } diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 06692e8..a278cad 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -661,7 +661,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags, /* * Disarm any old timer after extracting its expiry time. */ - BUG_ON_NONRT(!irqs_disabled()); + BUG_ON(!irqs_disabled()); ret = 0; old_incr = timer->it.cpu.incr; @@ -1177,7 +1177,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) /* * Now re-arm for the new expiry time. */ - BUG_ON_NONRT(!irqs_disabled()); + BUG_ON(!irqs_disabled()); arm_timer(timer); spin_unlock(&p->sighand->siglock); @@ -1241,11 +1241,10 @@ static inline int fastpath_timer_check(struct task_struct *tsk) sig = tsk->signal; if (sig->cputimer.running) { struct task_cputime group_sample; - unsigned long flags; - raw_spin_lock_irqsave(&sig->cputimer.lock, flags); + raw_spin_lock(&sig->cputimer.lock); group_sample = sig->cputimer.cputime; - raw_spin_unlock_irqrestore(&sig->cputimer.lock, flags); + raw_spin_unlock(&sig->cputimer.lock); if (task_cputime_expired(&group_sample, &sig->cputime_expires)) return 1; @@ -1259,13 +1258,13 @@ static inline int fastpath_timer_check(struct task_struct *tsk) * already updated our counts. We need to check if any timers fire now. * Interrupts are disabled. */ -static void __run_posix_cpu_timers(struct task_struct *tsk) +void run_posix_cpu_timers(struct task_struct *tsk) { LIST_HEAD(firing); struct k_itimer *timer, *next; unsigned long flags; - BUG_ON_NONRT(!irqs_disabled()); + BUG_ON(!irqs_disabled()); /* * The fast path checks that there are no expired thread or thread @@ -1323,190 +1322,6 @@ static void __run_posix_cpu_timers(struct task_struct *tsk) } } -#ifdef CONFIG_PREEMPT_RT_BASE -#include -#include -DEFINE_PER_CPU(struct task_struct *, posix_timer_task); -DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist); - -static int posix_cpu_timers_thread(void *data) -{ - int cpu = (long)data; - - BUG_ON(per_cpu(posix_timer_task,cpu) != current); - - while (!kthread_should_stop()) { - struct task_struct *tsk = NULL; - struct task_struct *next = NULL; - - if (cpu_is_offline(cpu)) - goto wait_to_die; - - /* grab task list */ - raw_local_irq_disable(); - tsk = per_cpu(posix_timer_tasklist, cpu); - per_cpu(posix_timer_tasklist, cpu) = NULL; - raw_local_irq_enable(); - - /* its possible the list is empty, just return */ - if (!tsk) { - set_current_state(TASK_INTERRUPTIBLE); - schedule(); - __set_current_state(TASK_RUNNING); - continue; - } - - /* Process task list */ - while (1) { - /* save next */ - next = tsk->posix_timer_list; - - /* run the task timers, clear its ptr and - * unreference it - */ - __run_posix_cpu_timers(tsk); - tsk->posix_timer_list = NULL; - put_task_struct(tsk); - - /* check if this is the last on the list */ - if (next == tsk) - break; - tsk = next; - } - } - return 0; - -wait_to_die: - /* Wait for kthread_stop */ - set_current_state(TASK_INTERRUPTIBLE); - while (!kthread_should_stop()) { - schedule(); - set_current_state(TASK_INTERRUPTIBLE); - } - __set_current_state(TASK_RUNNING); - return 0; -} - -static inline int __fastpath_timer_check(struct task_struct *tsk) -{ - /* tsk == current, ensure it is safe to use ->signal/sighand */ - if (unlikely(tsk->exit_state)) - return 0; - - if (!task_cputime_zero(&tsk->cputime_expires)) - return 1; - - if (!task_cputime_zero(&tsk->signal->cputime_expires)) - return 1; - - return 0; -} - -void run_posix_cpu_timers(struct task_struct *tsk) -{ - unsigned long cpu = smp_processor_id(); - struct task_struct *tasklist; - - BUG_ON(!irqs_disabled()); - if(!per_cpu(posix_timer_task, cpu)) - return; - /* get per-cpu references */ - tasklist = per_cpu(posix_timer_tasklist, cpu); - - /* check to see if we're already queued */ - if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) { - get_task_struct(tsk); - if (tasklist) { - tsk->posix_timer_list = tasklist; - } else { - /* - * The list is terminated by a self-pointing - * task_struct - */ - tsk->posix_timer_list = tsk; - } - per_cpu(posix_timer_tasklist, cpu) = tsk; - - wake_up_process(per_cpu(posix_timer_task, cpu)); - } -} - -/* - * posix_cpu_thread_call - callback that gets triggered when a CPU is added. - * Here we can start up the necessary migration thread for the new CPU. - */ -static int posix_cpu_thread_call(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - int cpu = (long)hcpu; - struct task_struct *p; - struct sched_param param; - - switch (action) { - case CPU_UP_PREPARE: - p = kthread_create(posix_cpu_timers_thread, hcpu, - "posixcputmr/%d",cpu); - if (IS_ERR(p)) - return NOTIFY_BAD; - p->flags |= PF_NOFREEZE; - kthread_bind(p, cpu); - /* Must be high prio to avoid getting starved */ - param.sched_priority = MAX_RT_PRIO-1; - sched_setscheduler(p, SCHED_FIFO, ¶m); - per_cpu(posix_timer_task,cpu) = p; - break; - case CPU_ONLINE: - /* Strictly unneccessary, as first user will wake it. */ - wake_up_process(per_cpu(posix_timer_task,cpu)); - break; -#ifdef CONFIG_HOTPLUG_CPU - case CPU_UP_CANCELED: - /* Unbind it from offline cpu so it can run. Fall thru. */ - kthread_bind(per_cpu(posix_timer_task, cpu), - cpumask_any(cpu_online_mask)); - kthread_stop(per_cpu(posix_timer_task,cpu)); - per_cpu(posix_timer_task,cpu) = NULL; - break; - case CPU_DEAD: - kthread_stop(per_cpu(posix_timer_task,cpu)); - per_cpu(posix_timer_task,cpu) = NULL; - break; -#endif - } - return NOTIFY_OK; -} - -/* Register at highest priority so that task migration (migrate_all_tasks) - * happens before everything else. - */ -static struct notifier_block posix_cpu_thread_notifier = { - .notifier_call = posix_cpu_thread_call, - .priority = 10 -}; - -static int __init posix_cpu_thread_init(void) -{ - void *hcpu = (void *)(long)smp_processor_id(); - /* Start one for boot CPU. */ - unsigned long cpu; - - /* init the per-cpu posix_timer_tasklets */ - for_each_possible_cpu(cpu) - per_cpu(posix_timer_tasklist, cpu) = NULL; - - posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu); - posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu); - register_cpu_notifier(&posix_cpu_thread_notifier); - return 0; -} -early_initcall(posix_cpu_thread_init); -#else /* CONFIG_PREEMPT_RT_BASE */ -void run_posix_cpu_timers(struct task_struct *tsk) -{ - __run_posix_cpu_timers(tsk); -} -#endif /* CONFIG_PREEMPT_RT_BASE */ - /* * Set one of the process-wide special case CPU timers or RLIMIT_CPU. * The tsk->sighand->siglock must be held by the caller. @@ -1586,10 +1401,8 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, while (!signal_pending(current)) { if (timer.it.cpu.expires.sched == 0) { /* - * Our timer fired and was reset, below - * deletion can not fail. + * Our timer fired and was reset. */ - posix_cpu_timer_del(&timer); spin_unlock_irq(&timer.it_lock); return 0; } @@ -1607,26 +1420,9 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, * We were interrupted by a signal. */ sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp); - error = posix_cpu_timer_set(&timer, 0, &zero_it, it); - if (!error) { - /* - * Timer is now unarmed, deletion can not fail. - */ - posix_cpu_timer_del(&timer); - } + posix_cpu_timer_set(&timer, 0, &zero_it, it); spin_unlock_irq(&timer.it_lock); - while (error == TIMER_RETRY) { - /* - * We need to handle case when timer was or is in the - * middle of firing. In other cases we already freed - * resources. - */ - spin_lock_irq(&timer.it_lock); - error = posix_cpu_timer_del(&timer); - spin_unlock_irq(&timer.it_lock); - } - if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) { /* * It actually did fire already. diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 1d5e435..69185ae 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -439,7 +439,6 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) static struct pid *good_sigevent(sigevent_t * event) { struct task_struct *rtn = current->group_leader; - int sig = event->sigev_signo; if ((event->sigev_notify & SIGEV_THREAD_ID ) && (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) || @@ -448,8 +447,7 @@ static struct pid *good_sigevent(sigevent_t * event) return NULL; if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) && - (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) || - sig_kernel_coredump(sig))) + ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) return NULL; return task_pid(rtn); @@ -641,13 +639,6 @@ static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags) { struct k_itimer *timr; - /* - * timer_t could be any type >= int and we want to make sure any - * @timer_id outside positive int range fails lookup. - */ - if ((unsigned long long)timer_id > INT_MAX) - return NULL; - rcu_read_lock(); timr = idr_find(&posix_timers_id, (int)timer_id); if (timr) { @@ -773,20 +764,6 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id) return overrun; } -/* - * Protected by RCU! - */ -static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr) -{ -#ifdef CONFIG_PREEMPT_RT_FULL - if (kc->timer_set == common_timer_set) - hrtimer_wait_for_timer(&timr->it.real.timer); - else - /* FIXME: Whacky hack for posix-cpu-timers */ - schedule_timeout(1); -#endif -} - /* Set a POSIX.1b interval timer. */ /* timr->it_lock is taken. */ static int @@ -864,7 +841,6 @@ retry: if (!timr) return -EINVAL; - rcu_read_lock(); kc = clockid_to_kclock(timr->it_clock); if (WARN_ON_ONCE(!kc || !kc->timer_set)) error = -EINVAL; @@ -873,12 +849,9 @@ retry: unlock_timer(timr, flag); if (error == TIMER_RETRY) { - timer_wait_for_callback(kc, timr); rtn = NULL; // We already got the old time... - rcu_read_unlock(); goto retry; } - rcu_read_unlock(); if (old_setting && !error && copy_to_user(old_setting, &old_spec, sizeof (old_spec))) @@ -916,15 +889,10 @@ retry_delete: if (!timer) return -EINVAL; - rcu_read_lock(); if (timer_delete_hook(timer) == TIMER_RETRY) { unlock_timer(timer, flags); - timer_wait_for_callback(clockid_to_kclock(timer->it_clock), - timer); - rcu_read_unlock(); goto retry_delete; } - rcu_read_unlock(); spin_lock(¤t->sighand->siglock); list_del(&timer->list); @@ -950,18 +918,8 @@ static void itimer_delete(struct k_itimer *timer) retry_delete: spin_lock_irqsave(&timer->it_lock, flags); - /* On RT we can race with a deletion */ - if (!timer->it_signal) { - unlock_timer(timer, flags); - return; - } - if (timer_delete_hook(timer) == TIMER_RETRY) { - rcu_read_lock(); unlock_timer(timer, flags); - timer_wait_for_callback(clockid_to_kclock(timer->it_clock), - timer); - rcu_read_unlock(); goto retry_delete; } list_del(&timer->list); diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 3321e2b..b26f5f1 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -275,8 +275,6 @@ static int create_image(int platform_mode) local_irq_disable(); - system_state = SYSTEM_SUSPEND; - error = syscore_suspend(); if (error) { printk(KERN_ERR "PM: Some system devices failed to power down, " @@ -304,7 +302,6 @@ static int create_image(int platform_mode) syscore_resume(); Enable_irqs: - system_state = SYSTEM_RUNNING; local_irq_enable(); Enable_cpus: @@ -430,7 +427,6 @@ static int resume_target_kernel(bool platform_mode) goto Enable_cpus; local_irq_disable(); - system_state = SYSTEM_SUSPEND; error = syscore_suspend(); if (error) @@ -464,7 +460,6 @@ static int resume_target_kernel(bool platform_mode) syscore_resume(); Enable_irqs: - system_state = SYSTEM_RUNNING; local_irq_enable(); Enable_cpus: @@ -547,7 +542,6 @@ int hibernation_platform_enter(void) goto Platform_finish; local_irq_disable(); - system_state = SYSTEM_SUSPEND; syscore_suspend(); if (pm_wakeup_pending()) { error = -EAGAIN; @@ -560,7 +554,6 @@ int hibernation_platform_enter(void) Power_up: syscore_resume(); - system_state = SYSTEM_RUNNING; local_irq_enable(); enable_nonboot_cpus(); diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index ff2dade..c8b7446 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -165,8 +165,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) arch_suspend_disable_irqs(); BUG_ON(!irqs_disabled()); - system_state = SYSTEM_SUSPEND; - error = syscore_suspend(); if (!error) { *wakeup = pm_wakeup_pending(); @@ -177,8 +175,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) syscore_resume(); } - system_state = SYSTEM_RUNNING; - arch_suspend_enable_irqs(); BUG_ON(irqs_disabled()); diff --git a/kernel/printk.c b/kernel/printk.c index 6d52c34..267ce78 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -48,6 +48,13 @@ #define CREATE_TRACE_POINTS #include +/* + * Architectures can override it: + */ +void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...) +{ +} + /* printk's without a loglevel use this.. */ #define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL @@ -749,62 +756,6 @@ module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ignore_loglevel, "ignore loglevel setting, to" "print all kernel messages to the console."); -#ifdef CONFIG_EARLY_PRINTK -struct console *early_console; - -void early_vprintk(const char *fmt, va_list ap) -{ - if (early_console) { - char buf[512]; - int n = vscnprintf(buf, sizeof(buf), fmt, ap); - - early_console->write(early_console, buf, n); - } -} - -asmlinkage void early_printk(const char *fmt, ...) -{ - va_list ap; - - va_start(ap, fmt); - early_vprintk(fmt, ap); - va_end(ap); -} - -/* - * This is independent of any log levels - a global - * kill switch that turns off all of printk. - * - * Used by the NMI watchdog if early-printk is enabled. - */ -static bool __read_mostly printk_killswitch; - -static int __init force_early_printk_setup(char *str) -{ - printk_killswitch = true; - return 0; -} -early_param("force_early_printk", force_early_printk_setup); - -void printk_kill(void) -{ - printk_killswitch = true; -} - -static int forced_early_printk(const char *fmt, va_list ap) -{ - if (!printk_killswitch) - return 0; - early_vprintk(fmt, ap); - return 1; -} -#else -static inline int forced_early_printk(const char *fmt, va_list ap) -{ - return 0; -} -#endif - #ifdef CONFIG_BOOT_PRINTK_DELAY static int boot_delay; /* msecs delay after each printk during bootup */ @@ -1072,7 +1023,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear) { char *text; int len = 0; - int attempts = 0; text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); if (!text) @@ -1084,14 +1034,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) u64 seq; u32 idx; enum log_flags prev; - int num_msg; -try_again: - attempts++; - if (attempts > 10) { - len = -EBUSY; - goto out; - } - num_msg = 0; + if (clear_seq < log_first_seq) { /* messages are gone, move to first available one */ clear_seq = log_first_seq; @@ -1112,14 +1055,6 @@ try_again: prev = msg->flags; idx = log_next(idx); seq++; - num_msg++; - if (num_msg > 5) { - num_msg = 0; - raw_spin_unlock_irq(&logbuf_lock); - raw_spin_lock_irq(&logbuf_lock); - if (clear_seq < log_first_seq) - goto try_again; - } } /* move first record forward until length fits into the buffer */ @@ -1133,14 +1068,6 @@ try_again: prev = msg->flags; idx = log_next(idx); seq++; - num_msg++; - if (num_msg > 5) { - num_msg = 0; - raw_spin_unlock_irq(&logbuf_lock); - raw_spin_lock_irq(&logbuf_lock); - if (clear_seq < log_first_seq) - goto try_again; - } } /* last message fitting into this dump */ @@ -1182,7 +1109,6 @@ try_again: clear_seq = log_next_seq; clear_idx = log_next_idx; } -out: raw_spin_unlock_irq(&logbuf_lock); kfree(text); @@ -1340,7 +1266,6 @@ static void call_console_drivers(int level, const char *text, size_t len) if (!console_drivers) return; - migrate_disable(); for_each_console(con) { if (exclusive_console && con != exclusive_console) continue; @@ -1353,7 +1278,6 @@ static void call_console_drivers(int level, const char *text, size_t len) continue; con->write(con, text, len); } - migrate_enable(); } /* @@ -1413,18 +1337,12 @@ static inline int can_use_console(unsigned int cpu) * interrupts disabled. It should return with 'lockbuf_lock' * released but interrupts still disabled. */ -static int console_trylock_for_printk(unsigned int cpu, unsigned long flags) +static int console_trylock_for_printk(unsigned int cpu) __releases(&logbuf_lock) { int retval = 0, wake = 0; -#ifdef CONFIG_PREEMPT_RT_FULL - int lock = !early_boot_irqs_disabled && !irqs_disabled_flags(flags) && - (preempt_count() <= 1); -#else - int lock = 1; -#endif - if (lock && console_trylock()) { + if (console_trylock()) { retval = 1; /* @@ -1440,9 +1358,9 @@ static int console_trylock_for_printk(unsigned int cpu, unsigned long flags) } } logbuf_cpu = UINT_MAX; - raw_spin_unlock(&logbuf_lock); if (wake) up(&console_sem); + raw_spin_unlock(&logbuf_lock); return retval; } @@ -1577,13 +1495,6 @@ asmlinkage int vprintk_emit(int facility, int level, int this_cpu; int printed_len = 0; - /* - * Fall back to early_printk if a debugging subsystem has - * killed printk output - */ - if (unlikely(forced_early_printk(fmt, args))) - return 1; - boot_delay_msec(level); printk_delay(); @@ -1703,15 +1614,8 @@ asmlinkage int vprintk_emit(int facility, int level, * The console_trylock_for_printk() function will release 'logbuf_lock' * regardless of whether it actually gets the console semaphore or not. */ - if (console_trylock_for_printk(this_cpu, flags)) { -#ifndef CONFIG_PREEMPT_RT_FULL - console_unlock(); -#else - raw_local_irq_restore(flags); + if (console_trylock_for_printk(this_cpu)) console_unlock(); - raw_local_irq_save(flags); -#endif - } lockdep_on(); out_restore_irqs: @@ -2070,8 +1974,8 @@ void printk_tick(void) int printk_needs_cpu(int cpu) { - if (unlikely(cpu_is_offline(cpu))) - __this_cpu_write(printk_pending, 0); + if (cpu_is_offline(cpu)) + printk_tick(); return __this_cpu_read(printk_pending); } @@ -2100,16 +2004,11 @@ static void console_cont_flush(char *text, size_t size) goto out; len = cont_print_text(text, size); -#ifndef CONFIG_PREEMPT_RT_FULL raw_spin_unlock(&logbuf_lock); stop_critical_timings(); call_console_drivers(cont.level, text, len); start_critical_timings(); local_irq_restore(flags); -#else - raw_spin_unlock_irqrestore(&logbuf_lock, flags); - call_console_drivers(cont.level, text, len); -#endif return; out: raw_spin_unlock_irqrestore(&logbuf_lock, flags); @@ -2192,17 +2091,12 @@ skip: console_idx = log_next(console_idx); console_seq++; console_prev = msg->flags; - -#ifndef CONFIG_PREEMPT_RT_FULL raw_spin_unlock(&logbuf_lock); + stop_critical_timings(); /* don't trace print latency */ call_console_drivers(level, text, len); start_critical_timings(); local_irq_restore(flags); -#else - raw_spin_unlock_irqrestore(&logbuf_lock, flags); - call_console_drivers(level, text, len); -#endif } console_locked = 0; diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 8906a79..a2cf761 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -156,7 +156,6 @@ int debug_lockdep_rcu_enabled(void) } EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); -#ifndef CONFIG_PREEMPT_RT_FULL /** * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? * @@ -183,7 +182,6 @@ int rcu_read_lock_bh_held(void) return in_softirq() || irqs_disabled(); } EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); -#endif #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index 9bc2fe6..e7dce58 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c @@ -371,7 +371,6 @@ void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) } EXPORT_SYMBOL_GPL(call_rcu_sched); -#ifndef CONFIG_PREEMPT_RT_FULL /* * Post an RCU bottom-half callback to be invoked after any subsequent * quiescent state. @@ -381,4 +380,3 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) __call_rcu(head, func, &rcu_bh_ctrlblk); } EXPORT_SYMBOL_GPL(call_rcu_bh); -#endif diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h index 14cae12..f85016a 100644 --- a/kernel/rcutiny_plugin.h +++ b/kernel/rcutiny_plugin.h @@ -26,7 +26,6 @@ #include #include #include -#include /* Global control variables for rcupdate callback mechanism. */ struct rcu_ctrlblk { @@ -261,7 +260,7 @@ static void show_tiny_preempt_stats(struct seq_file *m) /* Controls for rcu_kthread() kthread. */ static struct task_struct *rcu_kthread_task; -static DEFINE_SWAIT_HEAD(rcu_kthread_wq); +static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq); static unsigned long have_rcu_kthread_work; /* @@ -561,7 +560,7 @@ void rcu_read_unlock_special(struct task_struct *t) rcu_preempt_cpu_qs(); /* Hardware IRQ handlers cannot block. */ - if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) { + if (in_irq() || in_serving_softirq()) { local_irq_restore(flags); return; } @@ -714,7 +713,7 @@ void synchronize_rcu(void) } EXPORT_SYMBOL_GPL(synchronize_rcu); -static DEFINE_SWAIT_HEAD(sync_rcu_preempt_exp_wq); +static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); static unsigned long sync_rcu_preempt_exp_count; static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); @@ -736,7 +735,7 @@ static int rcu_preempted_readers_exp(void) */ static void rcu_report_exp_done(void) { - swait_wake(&sync_rcu_preempt_exp_wq); + wake_up(&sync_rcu_preempt_exp_wq); } /* @@ -788,8 +787,8 @@ void synchronize_rcu_expedited(void) } else { rcu_initiate_boost(); local_irq_restore(flags); - swait_event(sync_rcu_preempt_exp_wq, - !rcu_preempted_readers_exp()); + wait_event(sync_rcu_preempt_exp_wq, + !rcu_preempted_readers_exp()); } /* Clean up and exit. */ @@ -859,7 +858,7 @@ static void invoke_rcu_callbacks(void) { have_rcu_kthread_work = 1; if (rcu_kthread_task != NULL) - swait_wake(&rcu_kthread_wq); + wake_up(&rcu_kthread_wq); } #ifdef CONFIG_RCU_TRACE @@ -889,8 +888,8 @@ static int rcu_kthread(void *arg) unsigned long flags; for (;;) { - swait_event_interruptible(rcu_kthread_wq, - have_rcu_kthread_work != 0); + wait_event_interruptible(rcu_kthread_wq, + have_rcu_kthread_work != 0); morework = rcu_boost(); local_irq_save(flags); work = have_rcu_kthread_work; diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 7ec834d..e441b77 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -181,14 +181,6 @@ void rcu_sched_qs(int cpu) rdp->passed_quiesce = 1; } -#ifdef CONFIG_PREEMPT_RT_FULL -static void rcu_preempt_qs(int cpu); - -void rcu_bh_qs(int cpu) -{ - rcu_preempt_qs(cpu); -} -#else void rcu_bh_qs(int cpu) { struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); @@ -197,7 +189,6 @@ void rcu_bh_qs(int cpu) trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs"); rdp->passed_quiesce = 1; } -#endif /* * Note a context switch. This is a quiescent state for RCU-sched, @@ -251,7 +242,6 @@ long rcu_batches_completed_sched(void) } EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); -#ifndef CONFIG_PREEMPT_RT_FULL /* * Return the number of RCU BH batches processed thus far for debug & stats. */ @@ -269,7 +259,6 @@ void rcu_bh_force_quiescent_state(void) force_quiescent_state(&rcu_bh_state); } EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); -#endif /* * Record the number of times rcutorture tests have been initiated and @@ -1319,7 +1308,7 @@ static int __noreturn rcu_gp_kthread(void *arg) /* Handle grace-period start. */ for (;;) { - swait_event_interruptible(rsp->gp_wq, + wait_event_interruptible(rsp->gp_wq, rsp->gp_flags & RCU_GP_FLAG_INIT); if ((rsp->gp_flags & RCU_GP_FLAG_INIT) && @@ -1338,7 +1327,7 @@ static int __noreturn rcu_gp_kthread(void *arg) } for (;;) { rsp->jiffies_force_qs = jiffies + j; - ret = swait_event_interruptible_timeout(rsp->gp_wq, + ret = wait_event_interruptible_timeout(rsp->gp_wq, (rsp->gp_flags & RCU_GP_FLAG_FQS) || (!ACCESS_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)), @@ -1423,7 +1412,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) local_irq_restore(flags); /* Wake up rcu_gp_kthread() to start the grace period. */ - swait_wake(&rsp->gp_wq); + wake_up(&rsp->gp_wq); } /* @@ -1438,7 +1427,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) { WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); - swait_wake(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ + wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ } /* @@ -2003,8 +1992,7 @@ static void force_quiescent_state(struct rcu_state *rsp) } rsp->gp_flags |= RCU_GP_FLAG_FQS; raw_spin_unlock_irqrestore(&rnp_old->lock, flags); - /* Memory barrier implied by wake_up() path. */ - swait_wake(&rsp->gp_wq); + wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ } /* @@ -2195,7 +2183,6 @@ void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) } EXPORT_SYMBOL_GPL(call_rcu_sched); -#ifndef CONFIG_PREEMPT_RT_FULL /* * Queue an RCU callback for invocation after a quicker grace period. */ @@ -2204,7 +2191,6 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) __call_rcu(head, func, &rcu_bh_state, -1, 0); } EXPORT_SYMBOL_GPL(call_rcu_bh); -#endif /* * Because a context switch is a grace period for RCU-sched and RCU-bh, @@ -2282,7 +2268,6 @@ void synchronize_sched(void) } EXPORT_SYMBOL_GPL(synchronize_sched); -#ifndef CONFIG_PREEMPT_RT_FULL /** * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. * @@ -2309,7 +2294,6 @@ void synchronize_rcu_bh(void) wait_rcu_gp(call_rcu_bh); } EXPORT_SYMBOL_GPL(synchronize_rcu_bh); -#endif static int synchronize_sched_expedited_cpu_stop(void *data) { @@ -2698,7 +2682,6 @@ static void _rcu_barrier(struct rcu_state *rsp) mutex_unlock(&rsp->barrier_mutex); } -#ifndef CONFIG_PREEMPT_RT_FULL /** * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. */ @@ -2707,7 +2690,6 @@ void rcu_barrier_bh(void) _rcu_barrier(&rcu_bh_state); } EXPORT_SYMBOL_GPL(rcu_barrier_bh); -#endif /** * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. @@ -3000,7 +2982,7 @@ static void __init rcu_init_one(struct rcu_state *rsp, } rsp->rda = rda; - init_swait_head(&rsp->gp_wq); + init_waitqueue_head(&rsp->gp_wq); rnp = rsp->level[rcu_num_lvls - 1]; for_each_possible_cpu(i) { while (i > rnp->grphi) diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 5cfdff9..4b69291 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h @@ -397,7 +397,7 @@ struct rcu_state { unsigned long gpnum; /* Current gp number. */ unsigned long completed; /* # of last completed gp. */ struct task_struct *gp_kthread; /* Task for grace periods. */ - struct swait_head gp_wq; /* Where GP task waits. */ + wait_queue_head_t gp_wq; /* Where GP task waits. */ int gp_flags; /* Commands for GP task. */ /* End of fields guarded by root rcu_node's lock. */ diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 778f138..c1cc7e1 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -351,7 +351,7 @@ void rcu_read_unlock_special(struct task_struct *t) } /* Hardware IRQ handlers cannot block. */ - if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) { + if (in_irq() || in_serving_softirq()) { local_irq_restore(flags); return; } @@ -1519,7 +1519,7 @@ static void __cpuinit rcu_prepare_kthreads(int cpu) #endif /* #else #ifdef CONFIG_RCU_BOOST */ -#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) +#if !defined(CONFIG_RCU_FAST_NO_HZ) /* * Check to see if any future RCU-related work will need to be done @@ -1535,9 +1535,6 @@ int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) *delta_jiffies = ULONG_MAX; return rcu_cpu_has_callbacks(cpu); } -#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */ - -#if !defined(CONFIG_RCU_FAST_NO_HZ) /* * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it. @@ -1654,7 +1651,6 @@ static bool rcu_cpu_has_nonlazy_callbacks(int cpu) rcu_preempt_cpu_has_nonlazy_callbacks(cpu); } -#ifndef CONFIG_PREEMPT_RT_FULL /* * Allow the CPU to enter dyntick-idle mode if either: (1) There are no * callbacks on this CPU, (2) this CPU has not yet attempted to enter @@ -1698,7 +1694,6 @@ int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) } return 0; } -#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */ /* * Handler for smp_call_function_single(). The only point of this diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index 93f8e8f..0d095dc 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c @@ -97,7 +97,7 @@ static const struct file_operations rcubarrier_fops = { .open = rcubarrier_open, .read = seq_read, .llseek = no_llseek, - .release = single_release, + .release = seq_release, }; #ifdef CONFIG_RCU_BOOST @@ -208,7 +208,7 @@ static const struct file_operations rcuexp_fops = { .open = rcuexp_open, .read = seq_read, .llseek = no_llseek, - .release = single_release, + .release = seq_release, }; #ifdef CONFIG_RCU_BOOST @@ -308,7 +308,7 @@ static const struct file_operations rcuhier_fops = { .open = rcuhier_open, .read = seq_read, .llseek = no_llseek, - .release = single_release, + .release = seq_release, }; static void show_one_rcugp(struct seq_file *m, struct rcu_state *rsp) @@ -350,7 +350,7 @@ static const struct file_operations rcugp_fops = { .open = rcugp_open, .read = seq_read, .llseek = no_llseek, - .release = single_release, + .release = seq_release, }; static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp) diff --git a/kernel/relay.c b/kernel/relay.c index 56ba44f..e8cd202 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -340,10 +340,6 @@ static void wakeup_readers(unsigned long data) { struct rchan_buf *buf = (struct rchan_buf *)data; wake_up_interruptible(&buf->read_wait); - /* - * Stupid polling for now: - */ - mod_timer(&buf->timer, jiffies + 1); } /** @@ -361,7 +357,6 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init) init_waitqueue_head(&buf->read_wait); kref_init(&buf->kref); setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf); - mod_timer(&buf->timer, jiffies + 1); } else del_timer_sync(&buf->timer); @@ -744,6 +739,15 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length) else buf->early_bytes += buf->chan->subbuf_size - buf->padding[old_subbuf]; + smp_mb(); + if (waitqueue_active(&buf->read_wait)) + /* + * Calling wake_up_interruptible() from here + * will deadlock if we happen to be logging + * from the scheduler (trying to re-grab + * rq->lock), so defer it. + */ + mod_timer(&buf->timer, jiffies + 1); } old = buf->data; diff --git a/kernel/res_counter.c b/kernel/res_counter.c index cfecbee..ff55247 100644 --- a/kernel/res_counter.c +++ b/kernel/res_counter.c @@ -49,7 +49,7 @@ static int __res_counter_charge(struct res_counter *counter, unsigned long val, r = ret = 0; *limit_fail_at = NULL; - local_irq_save_nort(flags); + local_irq_save(flags); for (c = counter; c != NULL; c = c->parent) { spin_lock(&c->lock); r = res_counter_charge_locked(c, val, force); @@ -69,7 +69,7 @@ static int __res_counter_charge(struct res_counter *counter, unsigned long val, spin_unlock(&u->lock); } } - local_irq_restore_nort(flags); + local_irq_restore(flags); return ret; } @@ -103,7 +103,7 @@ u64 res_counter_uncharge_until(struct res_counter *counter, struct res_counter *c; u64 ret = 0; - local_irq_save_nort(flags); + local_irq_save(flags); for (c = counter; c != top; c = c->parent) { u64 r; spin_lock(&c->lock); @@ -112,7 +112,7 @@ u64 res_counter_uncharge_until(struct res_counter *counter, ret = r; spin_unlock(&c->lock); } - local_irq_restore_nort(flags); + local_irq_restore(flags); return ret; } diff --git a/kernel/rt.c b/kernel/rt.c deleted file mode 100644 index 433ae42..0000000 --- a/kernel/rt.c +++ /dev/null @@ -1,453 +0,0 @@ -/* - * kernel/rt.c - * - * Real-Time Preemption Support - * - * started by Ingo Molnar: - * - * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar - * Copyright (C) 2006, Timesys Corp., Thomas Gleixner - * - * historic credit for proving that Linux spinlocks can be implemented via - * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow - * and others) who prototyped it on 2.4 and did lots of comparative - * research and analysis; TimeSys, for proving that you can implement a - * fully preemptible kernel via the use of IRQ threading and mutexes; - * Bill Huey for persuasively arguing on lkml that the mutex model is the - * right one; and to MontaVista, who ported pmutexes to 2.6. - * - * This code is a from-scratch implementation and is not based on pmutexes, - * but the idea of converting spinlocks to mutexes is used here too. - * - * lock debugging, locking tree, deadlock detection: - * - * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey - * Released under the General Public License (GPL). - * - * Includes portions of the generic R/W semaphore implementation from: - * - * Copyright (c) 2001 David Howells (dhowells@redhat.com). - * - Derived partially from idea by Andrea Arcangeli - * - Derived also from comments by Linus - * - * Pending ownership of locks and ownership stealing: - * - * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt - * - * (also by Steven Rostedt) - * - Converted single pi_lock to individual task locks. - * - * By Esben Nielsen: - * Doing priority inheritance with help of the scheduler. - * - * Copyright (C) 2006, Timesys Corp., Thomas Gleixner - * - major rework based on Esben Nielsens initial patch - * - replaced thread_info references by task_struct refs - * - removed task->pending_owner dependency - * - BKL drop/reacquire for semaphore style locks to avoid deadlocks - * in the scheduler return path as discussed with Steven Rostedt - * - * Copyright (C) 2006, Kihon Technologies Inc. - * Steven Rostedt - * - debugged and patched Thomas Gleixner's rework. - * - added back the cmpxchg to the rework. - * - turned atomic require back on for SMP. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "rtmutex_common.h" - -/* - * struct mutex functions - */ -void __mutex_do_init(struct mutex *mutex, const char *name, - struct lock_class_key *key) -{ -#ifdef CONFIG_DEBUG_LOCK_ALLOC - /* - * Make sure we are not reinitializing a held lock: - */ - debug_check_no_locks_freed((void *)mutex, sizeof(*mutex)); - lockdep_init_map(&mutex->dep_map, name, key, 0); -#endif - mutex->lock.save_state = 0; -} -EXPORT_SYMBOL(__mutex_do_init); - -void __lockfunc _mutex_lock(struct mutex *lock) -{ - mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); - rt_mutex_lock(&lock->lock); -} -EXPORT_SYMBOL(_mutex_lock); - -int __lockfunc _mutex_lock_interruptible(struct mutex *lock) -{ - int ret; - - mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); - ret = rt_mutex_lock_interruptible(&lock->lock, 0); - if (ret) - mutex_release(&lock->dep_map, 1, _RET_IP_); - return ret; -} -EXPORT_SYMBOL(_mutex_lock_interruptible); - -int __lockfunc _mutex_lock_killable(struct mutex *lock) -{ - int ret; - - mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); - ret = rt_mutex_lock_killable(&lock->lock, 0); - if (ret) - mutex_release(&lock->dep_map, 1, _RET_IP_); - return ret; -} -EXPORT_SYMBOL(_mutex_lock_killable); - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass) -{ - mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); - rt_mutex_lock(&lock->lock); -} -EXPORT_SYMBOL(_mutex_lock_nested); - -void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) -{ - mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_); - rt_mutex_lock(&lock->lock); -} -EXPORT_SYMBOL(_mutex_lock_nest_lock); - -int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass) -{ - int ret; - - mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); - ret = rt_mutex_lock_interruptible(&lock->lock, 0); - if (ret) - mutex_release(&lock->dep_map, 1, _RET_IP_); - return ret; -} -EXPORT_SYMBOL(_mutex_lock_interruptible_nested); - -int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass) -{ - int ret; - - mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); - ret = rt_mutex_lock_killable(&lock->lock, 0); - if (ret) - mutex_release(&lock->dep_map, 1, _RET_IP_); - return ret; -} -EXPORT_SYMBOL(_mutex_lock_killable_nested); -#endif - -int __lockfunc _mutex_trylock(struct mutex *lock) -{ - int ret = rt_mutex_trylock(&lock->lock); - - if (ret) - mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); - - return ret; -} -EXPORT_SYMBOL(_mutex_trylock); - -void __lockfunc _mutex_unlock(struct mutex *lock) -{ - mutex_release(&lock->dep_map, 1, _RET_IP_); - rt_mutex_unlock(&lock->lock); -} -EXPORT_SYMBOL(_mutex_unlock); - -/* - * rwlock_t functions - */ -int __lockfunc rt_write_trylock(rwlock_t *rwlock) -{ - int ret = rt_mutex_trylock(&rwlock->lock); - - migrate_disable(); - if (ret) - rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); - else - migrate_enable(); - - return ret; -} -EXPORT_SYMBOL(rt_write_trylock); - -int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags) -{ - int ret; - - *flags = 0; - migrate_disable(); - ret = rt_write_trylock(rwlock); - if (!ret) - migrate_enable(); - return ret; -} -EXPORT_SYMBOL(rt_write_trylock_irqsave); - -int __lockfunc rt_read_trylock(rwlock_t *rwlock) -{ - struct rt_mutex *lock = &rwlock->lock; - int ret = 1; - - /* - * recursive read locks succeed when current owns the lock, - * but not when read_depth == 0 which means that the lock is - * write locked. - */ - migrate_disable(); - if (rt_mutex_owner(lock) != current) { - ret = rt_mutex_trylock(lock); - if (ret) - rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); - } else if (!rwlock->read_depth) { - ret = 0; - } - - if (ret) - rwlock->read_depth++; - else - migrate_enable(); - - return ret; -} -EXPORT_SYMBOL(rt_read_trylock); - -void __lockfunc rt_write_lock(rwlock_t *rwlock) -{ - rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); - __rt_spin_lock(&rwlock->lock); -} -EXPORT_SYMBOL(rt_write_lock); - -void __lockfunc rt_read_lock(rwlock_t *rwlock) -{ - struct rt_mutex *lock = &rwlock->lock; - - /* - * recursive read locks succeed when current owns the lock - */ - if (rt_mutex_owner(lock) != current) { - rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); - __rt_spin_lock(lock); - } - rwlock->read_depth++; -} - -EXPORT_SYMBOL(rt_read_lock); - -void __lockfunc rt_write_unlock(rwlock_t *rwlock) -{ - /* NOTE: we always pass in '1' for nested, for simplicity */ - rwlock_release(&rwlock->dep_map, 1, _RET_IP_); - __rt_spin_unlock(&rwlock->lock); -} -EXPORT_SYMBOL(rt_write_unlock); - -void __lockfunc rt_read_unlock(rwlock_t *rwlock) -{ - /* Release the lock only when read_depth is down to 0 */ - if (--rwlock->read_depth == 0) { - rwlock_release(&rwlock->dep_map, 1, _RET_IP_); - __rt_spin_unlock(&rwlock->lock); - } -} -EXPORT_SYMBOL(rt_read_unlock); - -unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock) -{ - rt_write_lock(rwlock); - - return 0; -} -EXPORT_SYMBOL(rt_write_lock_irqsave); - -unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock) -{ - rt_read_lock(rwlock); - - return 0; -} -EXPORT_SYMBOL(rt_read_lock_irqsave); - -void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key) -{ -#ifdef CONFIG_DEBUG_LOCK_ALLOC - /* - * Make sure we are not reinitializing a held lock: - */ - debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock)); - lockdep_init_map(&rwlock->dep_map, name, key, 0); -#endif - rwlock->lock.save_state = 1; - rwlock->read_depth = 0; -} -EXPORT_SYMBOL(__rt_rwlock_init); - -/* - * rw_semaphores - */ - -void rt_up_write(struct rw_semaphore *rwsem) -{ - rwsem_release(&rwsem->dep_map, 1, _RET_IP_); - rt_mutex_unlock(&rwsem->lock); -} -EXPORT_SYMBOL(rt_up_write); - -void rt_up_read(struct rw_semaphore *rwsem) -{ - if (--rwsem->read_depth == 0) { - rwsem_release(&rwsem->dep_map, 1, _RET_IP_); - rt_mutex_unlock(&rwsem->lock); - } -} -EXPORT_SYMBOL(rt_up_read); - -/* - * downgrade a write lock into a read lock - * - just wake up any readers at the front of the queue - */ -void rt_downgrade_write(struct rw_semaphore *rwsem) -{ - BUG_ON(rt_mutex_owner(&rwsem->lock) != current); - rwsem->read_depth = 1; -} -EXPORT_SYMBOL(rt_downgrade_write); - -int rt_down_write_trylock(struct rw_semaphore *rwsem) -{ - int ret = rt_mutex_trylock(&rwsem->lock); - - if (ret) - rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); - return ret; -} -EXPORT_SYMBOL(rt_down_write_trylock); - -void rt_down_write(struct rw_semaphore *rwsem) -{ - rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_); - rt_mutex_lock(&rwsem->lock); -} -EXPORT_SYMBOL(rt_down_write); - -void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass) -{ - rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_); - rt_mutex_lock(&rwsem->lock); -} -EXPORT_SYMBOL(rt_down_write_nested); - -void rt_down_write_nested_lock(struct rw_semaphore *rwsem, - struct lockdep_map *nest) -{ - rwsem_acquire_nest(&rwsem->dep_map, 0, 0, nest, _RET_IP_); - rt_mutex_lock(&rwsem->lock); -} - -int rt_down_read_trylock(struct rw_semaphore *rwsem) -{ - struct rt_mutex *lock = &rwsem->lock; - int ret = 1; - - /* - * recursive read locks succeed when current owns the rwsem, - * but not when read_depth == 0 which means that the rwsem is - * write locked. - */ - if (rt_mutex_owner(lock) != current) { - ret = rt_mutex_trylock(&rwsem->lock); - if (ret) - rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); - } else if (!rwsem->read_depth) { - ret = 0; - } - - if (ret) - rwsem->read_depth++; - return ret; -} -EXPORT_SYMBOL(rt_down_read_trylock); - -static void __rt_down_read(struct rw_semaphore *rwsem, int subclass) -{ - struct rt_mutex *lock = &rwsem->lock; - - if (rt_mutex_owner(lock) != current) { - rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_); - rt_mutex_lock(&rwsem->lock); - } - rwsem->read_depth++; -} - -void rt_down_read(struct rw_semaphore *rwsem) -{ - __rt_down_read(rwsem, 0); -} -EXPORT_SYMBOL(rt_down_read); - -void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass) -{ - __rt_down_read(rwsem, subclass); -} -EXPORT_SYMBOL(rt_down_read_nested); - -void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name, - struct lock_class_key *key) -{ -#ifdef CONFIG_DEBUG_LOCK_ALLOC - /* - * Make sure we are not reinitializing a held lock: - */ - debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem)); - lockdep_init_map(&rwsem->dep_map, name, key, 0); -#endif - rwsem->read_depth = 0; - rwsem->lock.save_state = 0; -} -EXPORT_SYMBOL(__rt_rwsem_init); - -/** - * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 - * @cnt: the atomic which we are to dec - * @lock: the mutex to return holding if we dec to 0 - * - * return true and hold lock if we dec to 0, return false otherwise - */ -int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) -{ - /* dec if we can't possibly hit 0 */ - if (atomic_add_unless(cnt, -1, 1)) - return 0; - /* we might hit 0, so take the lock */ - mutex_lock(lock); - if (!atomic_dec_and_test(cnt)) { - /* when we actually did the dec, we didn't hit 0 */ - mutex_unlock(lock); - return 0; - } - /* we hit 0, and we hold the lock */ - return 1; -} -EXPORT_SYMBOL(atomic_dec_and_mutex_lock); diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 20742e7..a242e69 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -8,12 +8,6 @@ * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt * Copyright (C) 2006 Esben Nielsen * - * Adaptive Spinlocks: - * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich, - * and Peter Morreale, - * Adaptive Spinlocks simplification: - * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt - * * See Documentation/rt-mutex-design.txt for details. */ #include @@ -73,12 +67,6 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) clear_rt_mutex_waiters(lock); } -static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) -{ - return waiter && waiter != PI_WAKEUP_INPROGRESS && - waiter != PI_REQUEUE_INPROGRESS; -} - /* * We can speed up the acquire/release, if the architecture * supports cmpxchg and if there's no debugging state to be set up @@ -102,12 +90,6 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) } #endif -static inline void init_lists(struct rt_mutex *lock) -{ - if (unlikely(!lock->wait_list.node_list.prev)) - plist_head_init(&lock->wait_list); -} - /* * Calculate task priority from the waiter list priority * @@ -124,18 +106,6 @@ int rt_mutex_getprio(struct task_struct *task) } /* - * Called by sched_setscheduler() to check whether the priority change - * is overruled by a possible priority boosting. - */ -int rt_mutex_check_prio(struct task_struct *task, int newprio) -{ - if (!task_has_pi_waiters(task)) - return 0; - - return task_top_pi_waiter(task)->pi_list_entry.prio <= newprio; -} - -/* * Adjust the priority of a task, after its pi_waiters got modified. * * This can be both boosting and unboosting. task->pi_lock must be held. @@ -166,14 +136,6 @@ static void rt_mutex_adjust_prio(struct task_struct *task) raw_spin_unlock_irqrestore(&task->pi_lock, flags); } -static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter) -{ - if (waiter->savestate) - wake_up_lock_sleeper(waiter->task); - else - wake_up_process(waiter->task); -} - /* * Max number of times we'll walk the boosting chain: */ @@ -234,7 +196,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * reached or the state of the chain has changed while we * dropped the locks. */ - if (!rt_mutex_real_waiter(waiter)) + if (!waiter) goto out_unlock_pi; /* @@ -285,15 +247,13 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, /* Release the task */ raw_spin_unlock_irqrestore(&task->pi_lock, flags); if (!rt_mutex_owner(lock)) { - struct rt_mutex_waiter *lock_top_waiter; - /* * If the requeue above changed the top waiter, then we need * to wake the new top waiter up to try to get the lock. */ - lock_top_waiter = rt_mutex_top_waiter(lock); - if (top_waiter != lock_top_waiter) - rt_mutex_wake_waiter(lock_top_waiter); + + if (top_waiter != rt_mutex_top_waiter(lock)) + wake_up_process(rt_mutex_top_waiter(lock)->task); raw_spin_unlock(&lock->wait_lock); goto out_put_task; } @@ -338,25 +298,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, return ret; } - -#define STEAL_NORMAL 0 -#define STEAL_LATERAL 1 - -/* - * Note that RT tasks are excluded from lateral-steals to prevent the - * introduction of an unbounded latency - */ -static inline int lock_is_stealable(struct task_struct *task, - struct task_struct *pendowner, int mode) -{ - if (mode == STEAL_NORMAL || rt_task(task)) { - if (task->prio >= pendowner->prio) - return 0; - } else if (task->prio > pendowner->prio) - return 0; - return 1; -} - /* * Try to take an rt-mutex * @@ -366,9 +307,8 @@ static inline int lock_is_stealable(struct task_struct *task, * @task: the task which wants to acquire the lock * @waiter: the waiter that is queued to the lock's wait list. (could be NULL) */ -static int -__try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, - struct rt_mutex_waiter *waiter, int mode) +static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, + struct rt_mutex_waiter *waiter) { /* * We have to be careful here if the atomic speedups are @@ -401,14 +341,12 @@ __try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, * 3) it is top waiter */ if (rt_mutex_has_waiters(lock)) { - struct task_struct *pown = rt_mutex_top_waiter(lock)->task; - - if (task != pown && !lock_is_stealable(task, pown, mode)) - return 0; + if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) { + if (!waiter || waiter != rt_mutex_top_waiter(lock)) + return 0; + } } - /* We got the lock. */ - if (waiter || rt_mutex_has_waiters(lock)) { unsigned long flags; struct rt_mutex_waiter *top; @@ -433,6 +371,7 @@ __try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, raw_spin_unlock_irqrestore(&task->pi_lock, flags); } + /* We got the lock. */ debug_rt_mutex_lock(lock); rt_mutex_set_owner(lock, task); @@ -442,13 +381,6 @@ __try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, return 1; } -static inline int -try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, - struct rt_mutex_waiter *waiter) -{ - return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL); -} - /* * Task blocks on lock. * @@ -467,23 +399,6 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, int chain_walk = 0, res; raw_spin_lock_irqsave(&task->pi_lock, flags); - - /* - * In the case of futex requeue PI, this will be a proxy - * lock. The task will wake unaware that it is enqueueed on - * this lock. Avoid blocking on two locks and corrupting - * pi_blocked_on via the PI_WAKEUP_INPROGRESS - * flag. futex_wait_requeue_pi() sets this when it wakes up - * before requeue (due to a signal or timeout). Do not enqueue - * the task if PI_WAKEUP_INPROGRESS is set. - */ - if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) { - raw_spin_unlock_irqrestore(&task->pi_lock, flags); - return -EAGAIN; - } - - BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)); - __rt_mutex_adjust_prio(task); waiter->task = task; waiter->lock = lock; @@ -508,7 +423,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, plist_add(&waiter->pi_list_entry, &owner->pi_waiters); __rt_mutex_adjust_prio(owner); - if (rt_mutex_real_waiter(owner->pi_blocked_on)) + if (owner->pi_blocked_on) chain_walk = 1; raw_spin_unlock_irqrestore(&owner->pi_lock, flags); } @@ -563,7 +478,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock) raw_spin_unlock_irqrestore(¤t->pi_lock, flags); - rt_mutex_wake_waiter(waiter); + wake_up_process(waiter->task); } /* @@ -602,7 +517,7 @@ static void remove_waiter(struct rt_mutex *lock, } __rt_mutex_adjust_prio(owner); - if (rt_mutex_real_waiter(owner->pi_blocked_on)) + if (owner->pi_blocked_on) chain_walk = 1; raw_spin_unlock_irqrestore(&owner->pi_lock, flags); @@ -636,316 +551,23 @@ void rt_mutex_adjust_pi(struct task_struct *task) raw_spin_lock_irqsave(&task->pi_lock, flags); waiter = task->pi_blocked_on; - if (!rt_mutex_real_waiter(waiter) || - waiter->list_entry.prio == task->prio) { + if (!waiter || waiter->list_entry.prio == task->prio) { raw_spin_unlock_irqrestore(&task->pi_lock, flags); return; } + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + /* gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(task); - raw_spin_unlock_irqrestore(&task->pi_lock, flags); rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); } -#ifdef CONFIG_PREEMPT_RT_FULL -/* - * preemptible spin_lock functions: - */ -static inline void rt_spin_lock_fastlock(struct rt_mutex *lock, - void (*slowfn)(struct rt_mutex *lock)) -{ - might_sleep(); - - if (likely(rt_mutex_cmpxchg(lock, NULL, current))) - rt_mutex_deadlock_account_lock(lock, current); - else - slowfn(lock); -} - -static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock, - void (*slowfn)(struct rt_mutex *lock)) -{ - if (likely(rt_mutex_cmpxchg(lock, current, NULL))) - rt_mutex_deadlock_account_unlock(current); - else - slowfn(lock); -} - -#ifdef CONFIG_SMP -/* - * Note that owner is a speculative pointer and dereferencing relies - * on rcu_read_lock() and the check against the lock owner. - */ -static int adaptive_wait(struct rt_mutex *lock, - struct task_struct *owner) -{ - int res = 0; - - rcu_read_lock(); - for (;;) { - if (owner != rt_mutex_owner(lock)) - break; - /* - * Ensure that owner->on_cpu is dereferenced _after_ - * checking the above to be valid. - */ - barrier(); - if (!owner->on_cpu) { - res = 1; - break; - } - cpu_relax(); - } - rcu_read_unlock(); - return res; -} -#else -static int adaptive_wait(struct rt_mutex *lock, - struct task_struct *orig_owner) -{ - return 1; -} -#endif - -# define pi_lock(lock) raw_spin_lock_irq(lock) -# define pi_unlock(lock) raw_spin_unlock_irq(lock) - -/* - * Slow path lock function spin_lock style: this variant is very - * careful not to miss any non-lock wakeups. - * - * We store the current state under p->pi_lock in p->saved_state and - * the try_to_wake_up() code handles this accordingly. - */ -static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock) -{ - struct task_struct *lock_owner, *self = current; - struct rt_mutex_waiter waiter, *top_waiter; - int ret; - - rt_mutex_init_waiter(&waiter, true); - - raw_spin_lock(&lock->wait_lock); - init_lists(lock); - - if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) { - raw_spin_unlock(&lock->wait_lock); - return; - } - - BUG_ON(rt_mutex_owner(lock) == self); - - /* - * We save whatever state the task is in and we'll restore it - * after acquiring the lock taking real wakeups into account - * as well. We are serialized via pi_lock against wakeups. See - * try_to_wake_up(). - */ - pi_lock(&self->pi_lock); - self->saved_state = self->state; - __set_current_state(TASK_UNINTERRUPTIBLE); - pi_unlock(&self->pi_lock); - - ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0); - BUG_ON(ret); - - for (;;) { - /* Try to acquire the lock again. */ - if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL)) - break; - - top_waiter = rt_mutex_top_waiter(lock); - lock_owner = rt_mutex_owner(lock); - - raw_spin_unlock(&lock->wait_lock); - - debug_rt_mutex_print_deadlock(&waiter); - - if (top_waiter != &waiter || adaptive_wait(lock, lock_owner)) - schedule_rt_mutex(lock); - - raw_spin_lock(&lock->wait_lock); - - pi_lock(&self->pi_lock); - __set_current_state(TASK_UNINTERRUPTIBLE); - pi_unlock(&self->pi_lock); - } - - /* - * Restore the task state to current->saved_state. We set it - * to the original state above and the try_to_wake_up() code - * has possibly updated it when a real (non-rtmutex) wakeup - * happened while we were blocked. Clear saved_state so - * try_to_wakeup() does not get confused. - */ - pi_lock(&self->pi_lock); - __set_current_state(self->saved_state); - self->saved_state = TASK_RUNNING; - pi_unlock(&self->pi_lock); - - /* - * try_to_take_rt_mutex() sets the waiter bit - * unconditionally. We might have to fix that up: - */ - fixup_rt_mutex_waiters(lock); - - BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock)); - BUG_ON(!plist_node_empty(&waiter.list_entry)); - - raw_spin_unlock(&lock->wait_lock); - - debug_rt_mutex_free_waiter(&waiter); -} - -/* - * Slow path to release a rt_mutex spin_lock style - */ -static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) -{ - raw_spin_lock(&lock->wait_lock); - - debug_rt_mutex_unlock(lock); - - rt_mutex_deadlock_account_unlock(current); - - if (!rt_mutex_has_waiters(lock)) { - lock->owner = NULL; - raw_spin_unlock(&lock->wait_lock); - return; - } - - wakeup_next_waiter(lock); - - raw_spin_unlock(&lock->wait_lock); - - /* Undo pi boosting.when necessary */ - rt_mutex_adjust_prio(current); -} - -void __lockfunc rt_spin_lock(spinlock_t *lock) -{ - rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); - spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); -} -EXPORT_SYMBOL(rt_spin_lock); - -void __lockfunc __rt_spin_lock(struct rt_mutex *lock) -{ - rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock); -} -EXPORT_SYMBOL(__rt_spin_lock); - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass) -{ - rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); - spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); -} -EXPORT_SYMBOL(rt_spin_lock_nested); -#endif - -void __lockfunc rt_spin_unlock(spinlock_t *lock) -{ - /* NOTE: we always pass in '1' for nested, for simplicity */ - spin_release(&lock->dep_map, 1, _RET_IP_); - rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); -} -EXPORT_SYMBOL(rt_spin_unlock); - -void __lockfunc __rt_spin_unlock(struct rt_mutex *lock) -{ - rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock); -} -EXPORT_SYMBOL(__rt_spin_unlock); - -/* - * Wait for the lock to get unlocked: instead of polling for an unlock - * (like raw spinlocks do), we lock and unlock, to force the kernel to - * schedule if there's contention: - */ -void __lockfunc rt_spin_unlock_wait(spinlock_t *lock) -{ - spin_lock(lock); - spin_unlock(lock); -} -EXPORT_SYMBOL(rt_spin_unlock_wait); - -int __lockfunc rt_spin_trylock(spinlock_t *lock) -{ - int ret = rt_mutex_trylock(&lock->lock); - - if (ret) - spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); - return ret; -} -EXPORT_SYMBOL(rt_spin_trylock); - -int __lockfunc rt_spin_trylock_bh(spinlock_t *lock) -{ - int ret; - - local_bh_disable(); - ret = rt_mutex_trylock(&lock->lock); - if (ret) { - migrate_disable(); - spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); - } else - local_bh_enable(); - return ret; -} -EXPORT_SYMBOL(rt_spin_trylock_bh); - -int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags) -{ - int ret; - - *flags = 0; - migrate_disable(); - ret = rt_mutex_trylock(&lock->lock); - if (ret) - spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); - else - migrate_enable(); - return ret; -} -EXPORT_SYMBOL(rt_spin_trylock_irqsave); - -int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock) -{ - /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ - if (atomic_add_unless(atomic, -1, 1)) - return 0; - migrate_disable(); - rt_spin_lock(lock); - if (atomic_dec_and_test(atomic)) - return 1; - rt_spin_unlock(lock); - migrate_enable(); - return 0; -} -EXPORT_SYMBOL(atomic_dec_and_spin_lock); - -void -__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key) -{ -#ifdef CONFIG_DEBUG_LOCK_ALLOC - /* - * Make sure we are not reinitializing a held lock: - */ - debug_check_no_locks_freed((void *)lock, sizeof(*lock)); - lockdep_init_map(&lock->dep_map, name, key, 0); -#endif -} -EXPORT_SYMBOL(__rt_spin_lock_init); - -#endif /* PREEMPT_RT_FULL */ - /** * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop * @lock: the rt_mutex to take * @state: the state the task should block in (TASK_INTERRUPTIBLE - * or TASK_UNINTERRUPTIBLE) + * or TASK_UNINTERRUPTIBLE) * @timeout: the pre-initialized and started timer, or NULL for none * @waiter: the pre-initialized rt_mutex_waiter * @@ -1001,10 +623,9 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, struct rt_mutex_waiter waiter; int ret = 0; - rt_mutex_init_waiter(&waiter, false); + debug_rt_mutex_init_waiter(&waiter); raw_spin_lock(&lock->wait_lock); - init_lists(lock); /* Try to acquire the lock again: */ if (try_to_take_rt_mutex(lock, current, NULL)) { @@ -1057,7 +678,6 @@ rt_mutex_slowtrylock(struct rt_mutex *lock) int ret = 0; raw_spin_lock(&lock->wait_lock); - init_lists(lock); if (likely(rt_mutex_owner(lock) != current)) { @@ -1171,12 +791,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); /** * rt_mutex_lock_interruptible - lock a rt_mutex interruptible * - * @lock: the rt_mutex to be locked + * @lock: the rt_mutex to be locked * @detect_deadlock: deadlock detection on/off * * Returns: - * 0 on success - * -EINTR when interrupted by a signal + * 0 on success + * -EINTR when interrupted by a signal * -EDEADLK when the lock would deadlock (when deadlock detection is on) */ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, @@ -1190,38 +810,17 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); /** - * rt_mutex_lock_killable - lock a rt_mutex killable - * - * @lock: the rt_mutex to be locked - * @detect_deadlock: deadlock detection on/off - * - * Returns: - * 0 on success - * -EINTR when interrupted by a signal - * -EDEADLK when the lock would deadlock (when deadlock detection is on) - */ -int __sched rt_mutex_lock_killable(struct rt_mutex *lock, - int detect_deadlock) -{ - might_sleep(); - - return rt_mutex_fastlock(lock, TASK_KILLABLE, - detect_deadlock, rt_mutex_slowlock); -} -EXPORT_SYMBOL_GPL(rt_mutex_lock_killable); - -/** * rt_mutex_timed_lock - lock a rt_mutex interruptible * the timeout structure is provided * by the caller * - * @lock: the rt_mutex to be locked + * @lock: the rt_mutex to be locked * @timeout: timeout structure or NULL (no timeout) * @detect_deadlock: deadlock detection on/off * * Returns: - * 0 on success - * -EINTR when interrupted by a signal + * 0 on success + * -EINTR when interrupted by a signal * -ETIMEDOUT when the timeout expired * -EDEADLK when the lock would deadlock (when deadlock detection is on) */ @@ -1290,11 +889,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy); void __rt_mutex_init(struct rt_mutex *lock, const char *name) { lock->owner = NULL; + raw_spin_lock_init(&lock->wait_lock); plist_head_init(&lock->wait_list); debug_rt_mutex_init(lock, name); } -EXPORT_SYMBOL(__rt_mutex_init); +EXPORT_SYMBOL_GPL(__rt_mutex_init); /** * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a @@ -1309,7 +909,7 @@ EXPORT_SYMBOL(__rt_mutex_init); void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner) { - rt_mutex_init(lock); + __rt_mutex_init(lock, NULL); debug_rt_mutex_proxy_lock(lock, proxy_owner); rt_mutex_set_owner(lock, proxy_owner); rt_mutex_deadlock_account_lock(lock, proxy_owner); @@ -1358,35 +958,6 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, return 1; } -#ifdef CONFIG_PREEMPT_RT_FULL - /* - * In PREEMPT_RT there's an added race. - * If the task, that we are about to requeue, times out, - * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue - * to skip this task. But right after the task sets - * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then - * block on the spin_lock(&hb->lock), which in RT is an rtmutex. - * This will replace the PI_WAKEUP_INPROGRESS with the actual - * lock that it blocks on. We *must not* place this task - * on this proxy lock in that case. - * - * To prevent this race, we first take the task's pi_lock - * and check if it has updated its pi_blocked_on. If it has, - * we assume that it woke up and we return -EAGAIN. - * Otherwise, we set the task's pi_blocked_on to - * PI_REQUEUE_INPROGRESS, so that if the task is waking up - * it will know that we are in the process of requeuing it. - */ - raw_spin_lock_irq(&task->pi_lock); - if (task->pi_blocked_on) { - raw_spin_unlock_irq(&task->pi_lock); - raw_spin_unlock(&lock->wait_lock); - return -EAGAIN; - } - task->pi_blocked_on = PI_REQUEUE_INPROGRESS; - raw_spin_unlock_irq(&task->pi_lock); -#endif - ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); if (ret && !rt_mutex_owner(lock)) { diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h index 6ec3dc1..53a66c8 100644 --- a/kernel/rtmutex_common.h +++ b/kernel/rtmutex_common.h @@ -49,7 +49,6 @@ struct rt_mutex_waiter { struct plist_node pi_list_entry; struct task_struct *task; struct rt_mutex *lock; - bool savestate; #ifdef CONFIG_DEBUG_RT_MUTEXES unsigned long ip; struct pid *deadlock_task_pid; @@ -104,9 +103,6 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) /* * PI-futex support (proxy locking functions, etc.): */ -#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1) -#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2) - extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner); @@ -127,12 +123,4 @@ extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, # include "rtmutex.h" #endif -static inline void -rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate) -{ - debug_rt_mutex_init_waiter(waiter); - waiter->task = NULL; - waiter->savestate = savestate; -} - #endif diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index c3ae144..c685e31 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -176,36 +176,10 @@ static u64 sched_clock_remote(struct sched_clock_data *scd) u64 this_clock, remote_clock; u64 *ptr, old_val, val; -#if BITS_PER_LONG != 64 -again: - /* - * Careful here: The local and the remote clock values need to - * be read out atomic as we need to compare the values and - * then update either the local or the remote side. So the - * cmpxchg64 below only protects one readout. - * - * We must reread via sched_clock_local() in the retry case on - * 32bit as an NMI could use sched_clock_local() via the - * tracer and hit between the readout of - * the low32bit and the high 32bit portion. - */ - this_clock = sched_clock_local(my_scd); - /* - * We must enforce atomic readout on 32bit, otherwise the - * update on the remote cpu can hit inbetween the readout of - * the low32bit and the high 32bit portion. - */ - remote_clock = cmpxchg64(&scd->clock, 0, 0); -#else - /* - * On 64bit the read of [my]scd->clock is atomic versus the - * update, so we can avoid the above 32bit dance. - */ sched_clock_local(my_scd); again: this_clock = my_scd->clock; remote_clock = scd->clock; -#endif /* * Use the opportunity that we have both locks diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 505e08f..26058d0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -272,11 +272,7 @@ late_initcall(sched_init_debug); * Number of tasks to iterate in a single balance run. * Limited because this is done with IRQs disabled. */ -#ifndef CONFIG_PREEMPT_RT_FULL const_debug unsigned int sysctl_sched_nr_migrate = 32; -#else -const_debug unsigned int sysctl_sched_nr_migrate = 8; -#endif /* * period over which we average the RT time consumption, measured @@ -493,7 +489,6 @@ static void init_rq_hrtick(struct rq *rq) hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); rq->hrtick_timer.function = hrtick; - rq->hrtick_timer.irqsafe = 1; } #else /* CONFIG_SCHED_HRTICK */ static inline void hrtick_clear(struct rq *rq) @@ -543,37 +538,6 @@ void resched_task(struct task_struct *p) smp_send_reschedule(cpu); } -#ifdef CONFIG_PREEMPT_LAZY -void resched_task_lazy(struct task_struct *p) -{ - int cpu; - - if (!sched_feat(PREEMPT_LAZY)) { - resched_task(p); - return; - } - - assert_raw_spin_locked(&task_rq(p)->lock); - - if (test_tsk_need_resched(p)) - return; - - if (test_tsk_need_resched_lazy(p)) - return; - - set_tsk_need_resched_lazy(p); - - cpu = task_cpu(p); - if (cpu == smp_processor_id()) - return; - - /* NEED_RESCHED_LAZY must be visible before we test polling */ - smp_mb(); - if (!tsk_is_polling(p)) - smp_send_reschedule(cpu); -} -#endif - void resched_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); @@ -690,17 +654,6 @@ void resched_task(struct task_struct *p) assert_raw_spin_locked(&task_rq(p)->lock); set_tsk_need_resched(p); } -#ifdef CONFIG_PREEMPT_LAZY -void resched_task_lazy(struct task_struct *p) -{ - if (!sched_feat(PREEMPT_LAZY)) { - resched_task(p); - return; - } - assert_raw_spin_locked(&task_rq(p)->lock); - set_tsk_need_resched_lazy(p); -} -#endif #endif /* CONFIG_SMP */ #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ @@ -1083,8 +1036,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) * is actually now running somewhere else! */ while (task_running(rq, p)) { - if (match_state && unlikely(p->state != match_state) - && unlikely(p->saved_state != match_state)) + if (match_state && unlikely(p->state != match_state)) return 0; cpu_relax(); } @@ -1099,8 +1051,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) running = task_running(rq, p); on_rq = p->on_rq; ncsw = 0; - if (!match_state || p->state == match_state - || p->saved_state == match_state) + if (!match_state || p->state == match_state) ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ task_rq_unlock(rq, p, &flags); @@ -1236,12 +1187,6 @@ out: } } - /* - * Clear PF_THREAD_BOUND, otherwise we wreckage - * migrate_disable/enable. See optimization for - * PF_THREAD_BOUND tasks there. - */ - p->flags &= ~PF_THREAD_BOUND; return dest_cpu; } @@ -1488,27 +1433,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) smp_wmb(); raw_spin_lock_irqsave(&p->pi_lock, flags); - if (!(p->state & state)) { - /* - * The task might be running due to a spinlock sleeper - * wakeup. Check the saved state and set it to running - * if the wakeup condition is true. - */ - if (!(wake_flags & WF_LOCK_SLEEPER)) { - if (p->saved_state & state) { - p->saved_state = TASK_RUNNING; - success = 1; - } - } + if (!(p->state & state)) goto out; - } - - /* - * If this is a regular wakeup, then we can unconditionally - * clear the saved state of a "lock sleeper". - */ - if (!(wake_flags & WF_LOCK_SLEEPER)) - p->saved_state = TASK_RUNNING; success = 1; /* we're going to change ->state */ cpu = task_cpu(p); @@ -1562,10 +1488,8 @@ static void try_to_wake_up_local(struct task_struct *p) { struct rq *rq = task_rq(p); - if (WARN_ON_ONCE(rq != this_rq()) || - WARN_ON_ONCE(p == current)) - return; - + BUG_ON(rq != this_rq()); + BUG_ON(p == current); lockdep_assert_held(&rq->lock); if (!raw_spin_trylock(&p->pi_lock)) { @@ -1604,18 +1528,6 @@ int wake_up_process(struct task_struct *p) } EXPORT_SYMBOL(wake_up_process); -/** - * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock" - * @p: The process to be woken up. - * - * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate - * the nature of the wakeup. - */ -int wake_up_lock_sleeper(struct task_struct *p) -{ - return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER); -} - int wake_up_state(struct task_struct *p, unsigned int state) { return try_to_wake_up(p, state, 0); @@ -1762,9 +1674,6 @@ void sched_fork(struct task_struct *p) /* Want to start with kernel preemption disabled. */ task_thread_info(p)->preempt_count = 1; #endif -#ifdef CONFIG_HAVE_PREEMPT_LAZY - task_thread_info(p)->preempt_lazy_count = 0; -#endif #ifdef CONFIG_SMP plist_node_init(&p->pushable_tasks, MAX_PRIO); #endif @@ -1931,12 +1840,8 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) finish_arch_post_lock_switch(); fire_sched_in_preempt_notifiers(current); - /* - * We use mmdrop_delayed() here so we don't have to do the - * full __mmdrop() when we are the last user. - */ if (mm) - mmdrop_delayed(mm); + mmdrop(mm); if (unlikely(prev_state == TASK_DEAD)) { /* * Remove function-return probe instances associated with this @@ -2836,13 +2741,8 @@ void __kprobes add_preempt_count(int val) DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK - 10); #endif - if (preempt_count() == val) { - unsigned long ip = get_parent_ip(CALLER_ADDR1); -#ifdef CONFIG_DEBUG_PREEMPT - current->preempt_disable_ip = ip; -#endif - trace_preempt_off(CALLER_ADDR0, ip); - } + if (preempt_count() == val) + trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); } EXPORT_SYMBOL(add_preempt_count); @@ -2885,13 +2785,6 @@ static noinline void __schedule_bug(struct task_struct *prev) print_modules(); if (irqs_disabled()) print_irqtrace_events(prev); -#ifdef CONFIG_DEBUG_PREEMPT - if (in_atomic_preempt_off()) { - pr_err("Preemption disabled at:"); - print_ip_sym(current->preempt_disable_ip); - pr_cont("\n"); - } -#endif dump_stack(); add_taint(TAINT_WARN); } @@ -2915,128 +2808,6 @@ static inline void schedule_debug(struct task_struct *prev) schedstat_inc(this_rq(), sched_count); } -#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP) -#define MIGRATE_DISABLE_SET_AFFIN (1<<30) /* Can't make a negative */ -#define migrate_disabled_updated(p) ((p)->migrate_disable & MIGRATE_DISABLE_SET_AFFIN) -#define migrate_disable_count(p) ((p)->migrate_disable & ~MIGRATE_DISABLE_SET_AFFIN) - -static inline void update_migrate_disable(struct task_struct *p) -{ - const struct cpumask *mask; - - if (likely(!p->migrate_disable)) - return; - - /* Did we already update affinity? */ - if (unlikely(migrate_disabled_updated(p))) - return; - - /* - * Since this is always current we can get away with only locking - * rq->lock, the ->cpus_allowed value can normally only be changed - * while holding both p->pi_lock and rq->lock, but seeing that this - * is current, we cannot actually be waking up, so all code that - * relies on serialization against p->pi_lock is out of scope. - * - * Having rq->lock serializes us against things like - * set_cpus_allowed_ptr() that can still happen concurrently. - */ - mask = tsk_cpus_allowed(p); - - if (p->sched_class->set_cpus_allowed) - p->sched_class->set_cpus_allowed(p, mask); - p->nr_cpus_allowed = cpumask_weight(mask); - - /* Let migrate_enable know to fix things back up */ - p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN; -} - -void migrate_disable(void) -{ - struct task_struct *p = current; - - if (in_atomic()) { -#ifdef CONFIG_SCHED_DEBUG - p->migrate_disable_atomic++; -#endif - return; - } - -#ifdef CONFIG_SCHED_DEBUG - WARN_ON_ONCE(p->migrate_disable_atomic); -#endif - - preempt_disable(); - if (p->migrate_disable) { - p->migrate_disable++; - preempt_enable(); - return; - } - - preempt_lazy_disable(); - pin_current_cpu(); - p->migrate_disable = 1; - preempt_enable(); -} -EXPORT_SYMBOL(migrate_disable); - -void migrate_enable(void) -{ - struct task_struct *p = current; - const struct cpumask *mask; - unsigned long flags; - struct rq *rq; - - if (in_atomic()) { -#ifdef CONFIG_SCHED_DEBUG - p->migrate_disable_atomic--; -#endif - return; - } - -#ifdef CONFIG_SCHED_DEBUG - WARN_ON_ONCE(p->migrate_disable_atomic); -#endif - WARN_ON_ONCE(p->migrate_disable <= 0); - - preempt_disable(); - if (migrate_disable_count(p) > 1) { - p->migrate_disable--; - preempt_enable(); - return; - } - - if (unlikely(migrate_disabled_updated(p))) { - /* - * Undo whatever update_migrate_disable() did, also see there - * about locking. - */ - rq = this_rq(); - raw_spin_lock_irqsave(&rq->lock, flags); - - /* - * Clearing migrate_disable causes tsk_cpus_allowed to - * show the tasks original cpu affinity. - */ - p->migrate_disable = 0; - mask = tsk_cpus_allowed(p); - if (p->sched_class->set_cpus_allowed) - p->sched_class->set_cpus_allowed(p, mask); - p->nr_cpus_allowed = cpumask_weight(mask); - raw_spin_unlock_irqrestore(&rq->lock, flags); - } else - p->migrate_disable = 0; - - unpin_current_cpu(); - preempt_enable(); - preempt_lazy_enable(); -} -EXPORT_SYMBOL(migrate_enable); -#else -static inline void update_migrate_disable(struct task_struct *p) { } -#define migrate_disabled_updated(p) 0 -#endif - static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->on_rq || rq->skip_clock_update < 0) @@ -3130,8 +2901,6 @@ need_resched: raw_spin_lock_irq(&rq->lock); - update_migrate_disable(prev); - switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { if (unlikely(signal_pending_state(prev->state, prev))) { @@ -3144,10 +2913,8 @@ need_resched: * If a worker went to sleep, notify and ask workqueue * whether it wants to wake up a task to maintain * concurrency. - * Only call wake up if prev isn't blocked on a sleeping - * spin lock. */ - if (prev->flags & PF_WQ_WORKER && !prev->saved_state) { + if (prev->flags & PF_WQ_WORKER) { struct task_struct *to_wakeup; to_wakeup = wq_worker_sleeping(prev, cpu); @@ -3166,7 +2933,6 @@ need_resched: put_prev_task(rq, prev); next = pick_next_task(rq); clear_tsk_need_resched(prev); - clear_tsk_need_resched_lazy(prev); rq->skip_clock_update = 0; if (likely(prev != next)) { @@ -3303,26 +3069,9 @@ asmlinkage void __sched notrace preempt_schedule(void) if (likely(ti->preempt_count || irqs_disabled())) return; -#ifdef CONFIG_PREEMPT_LAZY - /* - * Check for lazy preemption - */ - if (ti->preempt_lazy_count && !test_thread_flag(TIF_NEED_RESCHED)) - return; -#endif - do { add_preempt_count_notrace(PREEMPT_ACTIVE); - /* - * The add/subtract must not be traced by the function - * tracer. But we still want to account for the - * preempt off latency tracer. Since the _notrace versions - * of add/subtract skip the accounting for latency tracer - * we must force it manually. - */ - start_critical_timings(); __schedule(); - stop_critical_timings(); sub_preempt_count_notrace(PREEMPT_ACTIVE); /* @@ -3491,10 +3240,10 @@ void complete(struct completion *x) { unsigned long flags; - raw_spin_lock_irqsave(&x->wait.lock, flags); + spin_lock_irqsave(&x->wait.lock, flags); x->done++; - __swait_wake_locked(&x->wait, TASK_NORMAL, 1); - raw_spin_unlock_irqrestore(&x->wait.lock, flags); + __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL); + spin_unlock_irqrestore(&x->wait.lock, flags); } EXPORT_SYMBOL(complete); @@ -3511,10 +3260,10 @@ void complete_all(struct completion *x) { unsigned long flags; - raw_spin_lock_irqsave(&x->wait.lock, flags); + spin_lock_irqsave(&x->wait.lock, flags); x->done += UINT_MAX/2; - __swait_wake_locked(&x->wait, TASK_NORMAL, 0); - raw_spin_unlock_irqrestore(&x->wait.lock, flags); + __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL); + spin_unlock_irqrestore(&x->wait.lock, flags); } EXPORT_SYMBOL(complete_all); @@ -3522,20 +3271,20 @@ static inline long __sched do_wait_for_common(struct completion *x, long timeout, int state) { if (!x->done) { - DEFINE_SWAITER(wait); + DECLARE_WAITQUEUE(wait, current); - swait_prepare_locked(&x->wait, &wait); + __add_wait_queue_tail_exclusive(&x->wait, &wait); do { if (signal_pending_state(state, current)) { timeout = -ERESTARTSYS; break; } __set_current_state(state); - raw_spin_unlock_irq(&x->wait.lock); + spin_unlock_irq(&x->wait.lock); timeout = schedule_timeout(timeout); - raw_spin_lock_irq(&x->wait.lock); + spin_lock_irq(&x->wait.lock); } while (!x->done && timeout); - swait_finish_locked(&x->wait, &wait); + __remove_wait_queue(&x->wait, &wait); if (!x->done) return timeout; } @@ -3548,9 +3297,9 @@ wait_for_common(struct completion *x, long timeout, int state) { might_sleep(); - raw_spin_lock_irq(&x->wait.lock); + spin_lock_irq(&x->wait.lock); timeout = do_wait_for_common(x, timeout, state); - raw_spin_unlock_irq(&x->wait.lock); + spin_unlock_irq(&x->wait.lock); return timeout; } @@ -3681,12 +3430,12 @@ bool try_wait_for_completion(struct completion *x) unsigned long flags; int ret = 1; - raw_spin_lock_irqsave(&x->wait.lock, flags); + spin_lock_irqsave(&x->wait.lock, flags); if (!x->done) ret = 0; else x->done--; - raw_spin_unlock_irqrestore(&x->wait.lock, flags); + spin_unlock_irqrestore(&x->wait.lock, flags); return ret; } EXPORT_SYMBOL(try_wait_for_completion); @@ -3704,10 +3453,10 @@ bool completion_done(struct completion *x) unsigned long flags; int ret = 1; - raw_spin_lock_irqsave(&x->wait.lock, flags); + spin_lock_irqsave(&x->wait.lock, flags); if (!x->done) ret = 0; - raw_spin_unlock_irqrestore(&x->wait.lock, flags); + spin_unlock_irqrestore(&x->wait.lock, flags); return ret; } EXPORT_SYMBOL(completion_done); @@ -3768,8 +3517,7 @@ EXPORT_SYMBOL(sleep_on_timeout); * This function changes the 'effective' priority of a task. It does * not touch ->normal_prio like __setscheduler(). * - * Used by the rt_mutex code to implement priority inheritance - * logic. Call site only calls if the priority of the task changed. + * Used by the rt_mutex code to implement priority inheritance logic. */ void rt_mutex_setprio(struct task_struct *p, int prio) { @@ -3992,25 +3740,20 @@ static struct task_struct *find_process_by_pid(pid_t pid) return pid ? find_task_by_vpid(pid) : current; } -static void __setscheduler_params(struct task_struct *p, int policy, int prio) -{ - p->policy = policy; - p->rt_priority = prio; - p->normal_prio = normal_prio(p); - set_load_weight(p); -} - /* Actually do priority change: must hold rq lock. */ static void __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) { - __setscheduler_params(p, policy, prio); + p->policy = policy; + p->rt_priority = prio; + p->normal_prio = normal_prio(p); /* we are holding p->pi_lock already */ p->prio = rt_mutex_getprio(p); if (rt_prio(p->prio)) p->sched_class = &rt_sched_class; else p->sched_class = &fair_sched_class; + set_load_weight(p); } /* @@ -4032,7 +3775,6 @@ static bool check_same_owner(struct task_struct *p) static int __sched_setscheduler(struct task_struct *p, int policy, const struct sched_param *param, bool user) { - int newprio = MAX_RT_PRIO - 1 - param->sched_priority; int retval, oldprio, oldpolicy = -1, on_rq, running; unsigned long flags; const struct sched_class *prev_class; @@ -4128,13 +3870,10 @@ recheck: } /* - * If not changing anything there's no need to proceed - * further, but store a possible modification of - * reset_on_fork. + * If not changing anything there's no need to proceed further: */ if (unlikely(policy == p->policy && (!rt_policy(policy) || param->sched_priority == p->rt_priority))) { - p->sched_reset_on_fork = reset_on_fork; task_rq_unlock(rq, p, &flags); return 0; } @@ -4160,25 +3899,6 @@ recheck: task_rq_unlock(rq, p, &flags); goto recheck; } - - p->sched_reset_on_fork = reset_on_fork; - oldprio = p->prio; - - /* - * Special case for priority boosted tasks. - * - * If the new priority is lower or equal (user space view) - * than the current (boosted) priority, we just store the new - * normal parameters and do not touch the scheduler class and - * the runqueue. This will be done when the task deboost - * itself. - */ - if (rt_mutex_check_prio(p, newprio)) { - __setscheduler_params(p, policy, param->sched_priority); - task_rq_unlock(rq, p, &flags); - return 0; - } - on_rq = p->on_rq; running = task_current(rq, p); if (on_rq) @@ -4186,18 +3906,17 @@ recheck: if (running) p->sched_class->put_prev_task(rq, p); + p->sched_reset_on_fork = reset_on_fork; + + oldprio = p->prio; prev_class = p->sched_class; __setscheduler(rq, p, policy, param->sched_priority); if (running) p->sched_class->set_curr_task(rq); - if (on_rq) { - /* - * We enqueue to tail when the priority of a task is - * increased (user space view). - */ - enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0); - } + if (on_rq) + enqueue_task(rq, p, 0); + check_class_changed(rq, p, prev_class, oldprio); task_rq_unlock(rq, p, &flags); @@ -4549,17 +4268,9 @@ static inline int should_resched(void) static void __cond_resched(void) { - do { - add_preempt_count(PREEMPT_ACTIVE); - __schedule(); - sub_preempt_count(PREEMPT_ACTIVE); - /* - * Check again in case we missed a preemption - * opportunity between schedule and now. - */ - barrier(); - - } while (need_resched()); + add_preempt_count(PREEMPT_ACTIVE); + __schedule(); + sub_preempt_count(PREEMPT_ACTIVE); } int __sched _cond_resched(void) @@ -4600,7 +4311,6 @@ int __cond_resched_lock(spinlock_t *lock) } EXPORT_SYMBOL(__cond_resched_lock); -#ifndef CONFIG_PREEMPT_RT_FULL int __sched __cond_resched_softirq(void) { BUG_ON(!in_softirq()); @@ -4614,7 +4324,6 @@ int __sched __cond_resched_softirq(void) return 0; } EXPORT_SYMBOL(__cond_resched_softirq); -#endif /** * yield - yield the current processor to other threads. @@ -4945,7 +4654,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) rcu_read_unlock(); rq->curr = rq->idle = idle; - idle->on_rq = 1; #if defined(CONFIG_SMP) idle->on_cpu = 1; #endif @@ -4953,9 +4661,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) /* Set the preempt count _outside_ the spinlocks! */ task_thread_info(idle)->preempt_count = 0; -#ifdef CONFIG_HAVE_PREEMPT_LAZY - task_thread_info(idle)->preempt_lazy_count = 0; -#endif + /* * The idle tasks have their own, simple scheduling class: */ @@ -4969,90 +4675,11 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) #ifdef CONFIG_SMP void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { - if (!migrate_disabled_updated(p)) { - if (p->sched_class && p->sched_class->set_cpus_allowed) - p->sched_class->set_cpus_allowed(p, new_mask); - p->nr_cpus_allowed = cpumask_weight(new_mask); - } - cpumask_copy(&p->cpus_allowed, new_mask); -} - -static DEFINE_PER_CPU(struct cpumask, sched_cpumasks); -static DEFINE_MUTEX(sched_down_mutex); -static cpumask_t sched_down_cpumask; - -void tell_sched_cpu_down_begin(int cpu) -{ - mutex_lock(&sched_down_mutex); - cpumask_set_cpu(cpu, &sched_down_cpumask); - mutex_unlock(&sched_down_mutex); -} - -void tell_sched_cpu_down_done(int cpu) -{ - mutex_lock(&sched_down_mutex); - cpumask_clear_cpu(cpu, &sched_down_cpumask); - mutex_unlock(&sched_down_mutex); -} - -/** - * migrate_me - try to move the current task off this cpu - * - * Used by the pin_current_cpu() code to try to get tasks - * to move off the current CPU as it is going down. - * It will only move the task if the task isn't pinned to - * the CPU (with migrate_disable, affinity or THREAD_BOUND) - * and the task has to be in a RUNNING state. Otherwise the - * movement of the task will wake it up (change its state - * to running) when the task did not expect it. - * - * Returns 1 if it succeeded in moving the current task - * 0 otherwise. - */ -int migrate_me(void) -{ - struct task_struct *p = current; - struct migration_arg arg; - struct cpumask *cpumask; - struct cpumask *mask; - unsigned long flags; - unsigned int dest_cpu; - struct rq *rq; - - /* - * We can not migrate tasks bounded to a CPU or tasks not - * running. The movement of the task will wake it up. - */ - if (p->flags & PF_THREAD_BOUND || p->state) - return 0; - - mutex_lock(&sched_down_mutex); - rq = task_rq_lock(p, &flags); - - cpumask = &__get_cpu_var(sched_cpumasks); - mask = &p->cpus_allowed; - - cpumask_andnot(cpumask, mask, &sched_down_cpumask); - - if (!cpumask_weight(cpumask)) { - /* It's only on this CPU? */ - task_rq_unlock(rq, p, &flags); - mutex_unlock(&sched_down_mutex); - return 0; - } - - dest_cpu = cpumask_any_and(cpu_active_mask, cpumask); + if (p->sched_class && p->sched_class->set_cpus_allowed) + p->sched_class->set_cpus_allowed(p, new_mask); - arg.task = p; - arg.dest_cpu = dest_cpu; - - task_rq_unlock(rq, p, &flags); - - stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); - tlb_migrate_finish(p->mm); - mutex_unlock(&sched_down_mutex); - - return 1; + cpumask_copy(&p->cpus_allowed, new_mask); + p->nr_cpus_allowed = cpumask_weight(new_mask); } /* @@ -5103,7 +4730,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) do_set_cpus_allowed(p, new_mask); /* Can the task run on the task's current CPU? If so, we're done */ - if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p)) + if (cpumask_test_cpu(task_cpu(p), new_mask)) goto out; dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); @@ -5192,8 +4819,6 @@ static int migration_cpu_stop(void *data) #ifdef CONFIG_HOTPLUG_CPU -static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm); - /* * Ensures that the idle task is using init_mm right before its cpu goes * offline. @@ -5206,12 +4831,7 @@ void idle_task_exit(void) if (mm != &init_mm) switch_mm(mm, &init_mm, current); - - /* - * Defer the cleanup to an alive cpu. On RT we can neither - * call mmdrop() nor mmdrop_delayed() from here. - */ - per_cpu(idle_last_mm, smp_processor_id()) = mm; + mmdrop(mm); } /* @@ -5328,7 +4948,7 @@ static void sd_free_ctl_entry(struct ctl_table **tablep) } static int min_load_idx = 0; -static int max_load_idx = CPU_LOAD_IDX_MAX-1; +static int max_load_idx = CPU_LOAD_IDX_MAX; static void set_table_entry(struct ctl_table *entry, @@ -5528,10 +5148,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_DEAD: calc_load_migrate(rq); - if (per_cpu(idle_last_mm, cpu)) { - mmdrop(per_cpu(idle_last_mm, cpu)); - per_cpu(idle_last_mm, cpu) = NULL; - } break; #endif } @@ -7384,8 +7000,7 @@ void __init sched_init(void) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP static inline int preempt_count_equals(int preempt_offset) { - int nested = (preempt_count() & ~PREEMPT_ACTIVE) + - sched_rcu_preempt_depth(); + int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); return (nested == preempt_offset); } @@ -7395,8 +7010,7 @@ void __might_sleep(const char *file, int line, int preempt_offset) static unsigned long prev_jiffy; /* ratelimiting */ rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ - if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && - !is_idle_task(current)) || + if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) || system_state != SYSTEM_RUNNING || oops_in_progress) return; if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) @@ -7414,13 +7028,6 @@ void __might_sleep(const char *file, int line, int preempt_offset) debug_show_held_locks(current); if (irqs_disabled()) print_irqtrace_events(current); -#ifdef CONFIG_DEBUG_PREEMPT - if (!preempt_count_equals(preempt_offset)) { - pr_err("Preemption disabled at:"); - print_ip_sym(current->preempt_disable_ip); - pr_cont("\n"); - } -#endif dump_stack(); } EXPORT_SYMBOL(__might_sleep); diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 2cac500..7ae4c4c 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -253,9 +253,6 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) P(rt_throttled); PN(rt_time); PN(rt_runtime); -#ifdef CONFIG_SMP - P(rt_nr_migratory); -#endif #undef PN #undef P @@ -510,10 +507,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) P(se.load.weight); P(policy); P(prio); -#ifdef CONFIG_PREEMPT_RT_FULL - P(migrate_disable); -#endif - P(nr_cpus_allowed); #undef PN #undef __PN #undef P diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 392fcf3..81fa536 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1827,7 +1827,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; if (delta_exec > ideal_runtime) { - resched_task_lazy(rq_of(cfs_rq)->curr); + resched_task(rq_of(cfs_rq)->curr); /* * The current task ran long enough, ensure it doesn't get * re-elected due to buddy favours. @@ -1851,7 +1851,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) return; if (delta > ideal_runtime) - resched_task_lazy(rq_of(cfs_rq)->curr); + resched_task(rq_of(cfs_rq)->curr); } static void @@ -1971,7 +1971,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) * validating it and just reschedule. */ if (queued) { - resched_task_lazy(rq_of(cfs_rq)->curr); + resched_task(rq_of(cfs_rq)->curr); return; } /* @@ -2160,7 +2160,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, * hierarchy can be throttled */ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) - resched_task_lazy(rq_of(cfs_rq)->curr); + resched_task(rq_of(cfs_rq)->curr); } static __always_inline @@ -2745,7 +2745,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) if (delta < 0) { if (rq->curr == p) - resched_task_lazy(p); + resched_task(p); return; } @@ -3577,7 +3577,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ return; preempt: - resched_task_lazy(curr); + resched_task(curr); /* * Only set the backward buddy when the current task is still * on the rq. This can happen when a wakeup gets interleaved @@ -5772,7 +5772,7 @@ static void task_fork_fair(struct task_struct *p) * 'current' within the tree based on its new key value. */ swap(curr->vruntime, se->vruntime); - resched_task_lazy(rq->curr); + resched_task(rq->curr); } se->vruntime -= cfs_rq->min_vruntime; @@ -5797,7 +5797,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) */ if (rq->curr == p) { if (p->prio > oldprio) - resched_task_lazy(rq->curr); + resched_task(rq->curr); } else check_preempt_curr(rq, p, 0); } diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 771b529..1ad1d2b 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -57,18 +57,11 @@ SCHED_FEAT(OWNER_SPIN, true) */ SCHED_FEAT(NONTASK_POWER, true) -#ifndef CONFIG_PREEMPT_RT_FULL /* * Queue remote wakeups on the target CPU and process them * using the scheduler IPI. Reduces rq->lock contention/bounces. */ SCHED_FEAT(TTWU_QUEUE, true) -#else -SCHED_FEAT(TTWU_QUEUE, false) -# ifdef CONFIG_PREEMPT_LAZY -SCHED_FEAT(PREEMPT_LAZY, true) -# endif -#endif SCHED_FEAT(FORCE_SD_OVERLAP, false) SCHED_FEAT(RT_RUNTIME_SHARE, true) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 46faf69..4f02b28 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -41,7 +41,6 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - rt_b->rt_period_timer.irqsafe = 1; rt_b->rt_period_timer.function = sched_rt_period_timer; } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index d055951..fc88644 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -897,15 +897,6 @@ extern void init_sched_fair_class(void); extern void resched_task(struct task_struct *p); extern void resched_cpu(int cpu); -#ifdef CONFIG_PREEMPT_LAZY -extern void resched_task_lazy(struct task_struct *tsk); -#else -static inline void resched_task_lazy(struct task_struct *tsk) -{ - resched_task(tsk); -} -#endif - extern struct rt_bandwidth def_rt_bandwidth; extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); diff --git a/kernel/signal.c b/kernel/signal.c index 03b6e8f..3d09cf6 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -348,45 +348,13 @@ static bool task_participate_group_stop(struct task_struct *task) return false; } -#ifdef __HAVE_ARCH_CMPXCHG -static inline struct sigqueue *get_task_cache(struct task_struct *t) -{ - struct sigqueue *q = t->sigqueue_cache; - - if (cmpxchg(&t->sigqueue_cache, q, NULL) != q) - return NULL; - return q; -} - -static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) -{ - if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL) - return 0; - return 1; -} - -#else - -static inline struct sigqueue *get_task_cache(struct task_struct *t) -{ - return NULL; -} - -static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) -{ - return 1; -} - -#endif - /* * allocate a new signal queue record * - this may be called without locks if and only if t == current, otherwise an * appropriate lock must be held to stop the target task from exiting */ static struct sigqueue * -__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags, - int override_rlimit, int fromslab) +__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) { struct sigqueue *q = NULL; struct user_struct *user; @@ -403,10 +371,7 @@ __sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags, if (override_rlimit || atomic_read(&user->sigpending) <= task_rlimit(t, RLIMIT_SIGPENDING)) { - if (!fromslab) - q = get_task_cache(t); - if (!q) - q = kmem_cache_alloc(sigqueue_cachep, flags); + q = kmem_cache_alloc(sigqueue_cachep, flags); } else { print_dropped_signal(sig); } @@ -423,13 +388,6 @@ __sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags, return q; } -static struct sigqueue * -__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, - int override_rlimit) -{ - return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0); -} - static void __sigqueue_free(struct sigqueue *q) { if (q->flags & SIGQUEUE_PREALLOC) @@ -439,21 +397,6 @@ static void __sigqueue_free(struct sigqueue *q) kmem_cache_free(sigqueue_cachep, q); } -static void sigqueue_free_current(struct sigqueue *q) -{ - struct user_struct *up; - - if (q->flags & SIGQUEUE_PREALLOC) - return; - - up = q->user; - if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) { - atomic_dec(&up->sigpending); - free_uid(up); - } else - __sigqueue_free(q); -} - void flush_sigqueue(struct sigpending *queue) { struct sigqueue *q; @@ -467,21 +410,6 @@ void flush_sigqueue(struct sigpending *queue) } /* - * Called from __exit_signal. Flush tsk->pending and - * tsk->sigqueue_cache - */ -void flush_task_sigqueue(struct task_struct *tsk) -{ - struct sigqueue *q; - - flush_sigqueue(&tsk->pending); - - q = get_task_cache(tsk); - if (q) - kmem_cache_free(sigqueue_cachep, q); -} - -/* * Flush all pending signals for a task. */ void __flush_signals(struct task_struct *t) @@ -557,9 +485,6 @@ flush_signal_handlers(struct task_struct *t, int force_default) if (force_default || ka->sa.sa_handler != SIG_IGN) ka->sa.sa_handler = SIG_DFL; ka->sa.sa_flags = 0; -#ifdef __ARCH_HAS_SA_RESTORER - ka->sa.sa_restorer = NULL; -#endif sigemptyset(&ka->sa.sa_mask); ka++; } @@ -633,7 +558,7 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) still_pending: list_del_init(&first->list); copy_siginfo(info, &first->info); - sigqueue_free_current(first); + __sigqueue_free(first); } else { /* * Ok, it wasn't in the queue. This must be @@ -679,8 +604,6 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) { int signr; - WARN_ON_ONCE(tsk != current); - /* We only dequeue private signals from ourselves, we don't let * signalfd steal them */ @@ -1302,8 +1225,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, * We don't want to have recursive SIGSEGV's etc, for example, * that is why we also clear SIGNAL_UNKILLABLE. */ -static int -do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t) +int +force_sig_info(int sig, struct siginfo *info, struct task_struct *t) { unsigned long int flags; int ret, blocked, ignored; @@ -1328,39 +1251,6 @@ do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t) return ret; } -int force_sig_info(int sig, struct siginfo *info, struct task_struct *t) -{ -/* - * On some archs, PREEMPT_RT has to delay sending a signal from a trap - * since it can not enable preemption, and the signal code's spin_locks - * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will - * send the signal on exit of the trap. - */ -#ifdef ARCH_RT_DELAYS_SIGNAL_SEND - if (in_atomic()) { - if (WARN_ON_ONCE(t != current)) - return 0; - if (WARN_ON_ONCE(t->forced_info.si_signo)) - return 0; - - if (is_si_special(info)) { - WARN_ON_ONCE(info != SEND_SIG_PRIV); - t->forced_info.si_signo = sig; - t->forced_info.si_errno = 0; - t->forced_info.si_code = SI_KERNEL; - t->forced_info.si_pid = 0; - t->forced_info.si_uid = 0; - } else { - t->forced_info = *info; - } - - set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); - return 0; - } -#endif - return do_force_sig_info(sig, info, t); -} - /* * Nuke all other threads in the group. */ @@ -1391,12 +1281,12 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, struct sighand_struct *sighand; for (;;) { - local_irq_save_nort(*flags); + local_irq_save(*flags); rcu_read_lock(); sighand = rcu_dereference(tsk->sighand); if (unlikely(sighand == NULL)) { rcu_read_unlock(); - local_irq_restore_nort(*flags); + local_irq_restore(*flags); break; } @@ -1407,7 +1297,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, } spin_unlock(&sighand->siglock); rcu_read_unlock(); - local_irq_restore_nort(*flags); + local_irq_restore(*flags); } return sighand; @@ -1652,8 +1542,7 @@ EXPORT_SYMBOL(kill_pid); */ struct sigqueue *sigqueue_alloc(void) { - /* Preallocated sigqueue objects always from the slabcache ! */ - struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1); + struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); if (q) q->flags |= SIGQUEUE_PREALLOC; @@ -2010,7 +1899,15 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) if (gstop_done && ptrace_reparented(current)) do_notify_parent_cldstop(current, false, why); + /* + * Don't want to allow preemption here, because + * sys_ptrace() needs this task to be inactive. + * + * XXX: implement read_unlock_no_resched(). + */ + preempt_disable(); read_unlock(&tasklist_lock); + preempt_enable_no_resched(); freezable_schedule(); } else { /* @@ -2980,7 +2877,7 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) static int do_tkill(pid_t tgid, pid_t pid, int sig) { - struct siginfo info = {}; + struct siginfo info; info.si_signo = sig; info.si_errno = 0; diff --git a/kernel/softirq.c b/kernel/softirq.c index 8447c8d..ed567ba 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -21,12 +21,10 @@ #include #include #include -#include #include #include #include #include -#include #define CREATE_TRACE_POINTS #include @@ -64,98 +62,6 @@ char *softirq_to_name[NR_SOFTIRQS] = { "TASKLET", "SCHED", "HRTIMER", "RCU" }; -#ifdef CONFIG_NO_HZ -# ifdef CONFIG_PREEMPT_RT_FULL - -struct softirq_runner { - struct task_struct *runner[NR_SOFTIRQS]; -}; - -static DEFINE_PER_CPU(struct softirq_runner, softirq_runners); - -static inline void softirq_set_runner(unsigned int sirq) -{ - struct softirq_runner *sr = &__get_cpu_var(softirq_runners); - - sr->runner[sirq] = current; -} - -static inline void softirq_clr_runner(unsigned int sirq) -{ - struct softirq_runner *sr = &__get_cpu_var(softirq_runners); - - sr->runner[sirq] = NULL; -} - -/* - * On preempt-rt a softirq running context might be blocked on a - * lock. There might be no other runnable task on this CPU because the - * lock owner runs on some other CPU. So we have to go into idle with - * the pending bit set. Therefor we need to check this otherwise we - * warn about false positives which confuses users and defeats the - * whole purpose of this test. - * - * This code is called with interrupts disabled. - */ -void softirq_check_pending_idle(void) -{ - static int rate_limit; - struct softirq_runner *sr = &__get_cpu_var(softirq_runners); - u32 warnpending; - int i; - - if (rate_limit >= 10) - return; - - warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; - for (i = 0; i < NR_SOFTIRQS; i++) { - struct task_struct *tsk = sr->runner[i]; - - /* - * The wakeup code in rtmutex.c wakes up the task - * _before_ it sets pi_blocked_on to NULL under - * tsk->pi_lock. So we need to check for both: state - * and pi_blocked_on. - */ - if (tsk) { - raw_spin_lock(&tsk->pi_lock); - if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) { - /* Clear all bits pending in that task */ - warnpending &= ~(tsk->softirqs_raised); - warnpending &= ~(1 << i); - } - raw_spin_unlock(&tsk->pi_lock); - } - } - - if (warnpending) { - printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", - warnpending); - rate_limit++; - } -} -# else -/* - * On !PREEMPT_RT we just printk rate limited: - */ -void softirq_check_pending_idle(void) -{ - static int rate_limit; - - if (rate_limit < 10 && - (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { - printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", - local_softirq_pending()); - rate_limit++; - } -} -# endif - -#else /* !NO_HZ */ -static inline void softirq_set_runner(unsigned int sirq) { } -static inline void softirq_clr_runner(unsigned int sirq) { } -#endif - /* * we cannot loop indefinitely here to avoid userspace starvation, * but we also don't want to introduce a worst case 1/HZ latency @@ -171,57 +77,6 @@ static void wakeup_softirqd(void) wake_up_process(tsk); } -static void handle_softirq(unsigned int vec_nr, int cpu, int need_rcu_bh_qs) -{ - struct softirq_action *h = softirq_vec + vec_nr; - unsigned int prev_count = preempt_count(); - - kstat_incr_softirqs_this_cpu(vec_nr); - trace_softirq_entry(vec_nr); - h->action(h); - trace_softirq_exit(vec_nr); - - if (unlikely(prev_count != preempt_count())) { - pr_err("softirq %u %s %p preempt count leak: %08x -> %08x\n", - vec_nr, softirq_to_name[vec_nr], h->action, - prev_count, (unsigned int) preempt_count()); - preempt_count() = prev_count; - } - if (need_rcu_bh_qs) - rcu_bh_qs(cpu); -} - -#ifndef CONFIG_PREEMPT_RT_FULL -static inline int ksoftirqd_softirq_pending(void) -{ - return local_softirq_pending(); -} - -static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs) -{ - unsigned int vec_nr; - - local_irq_enable(); - for (vec_nr = 0; pending; vec_nr++, pending >>= 1) { - if (pending & 1) - handle_softirq(vec_nr, cpu, need_rcu_bh_qs); - } - local_irq_disable(); -} - -static void run_ksoftirqd(unsigned int cpu) -{ - local_irq_disable(); - if (ksoftirqd_softirq_pending()) { - __do_softirq(); - rcu_note_context_switch(cpu); - local_irq_enable(); - cond_resched(); - return; - } - local_irq_enable(); -} - /* * preempt_count and SOFTIRQ_OFFSET usage: * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving @@ -352,6 +207,7 @@ EXPORT_SYMBOL(local_bh_enable_ip); asmlinkage void __do_softirq(void) { + struct softirq_action *h; __u32 pending; int max_restart = MAX_SOFTIRQ_RESTART; int cpu; @@ -368,7 +224,7 @@ asmlinkage void __do_softirq(void) vtime_account_irq_enter(current); __local_bh_disable((unsigned long)__builtin_return_address(0), - SOFTIRQ_OFFSET); + SOFTIRQ_OFFSET); lockdep_softirq_enter(); cpu = smp_processor_id(); @@ -376,7 +232,36 @@ restart: /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); - handle_pending_softirqs(pending, cpu, 1); + local_irq_enable(); + + h = softirq_vec; + + do { + if (pending & 1) { + unsigned int vec_nr = h - softirq_vec; + int prev_count = preempt_count(); + + kstat_incr_softirqs_this_cpu(vec_nr); + + trace_softirq_entry(vec_nr); + h->action(h); + trace_softirq_exit(vec_nr); + if (unlikely(prev_count != preempt_count())) { + printk(KERN_ERR "huh, entered softirq %u %s %p" + "with preempt_count %08x," + " exited with %08x?\n", vec_nr, + softirq_to_name[vec_nr], h->action, + prev_count, preempt_count()); + preempt_count() = prev_count; + } + + rcu_bh_qs(cpu); + } + h++; + pending >>= 1; + } while (pending); + + local_irq_disable(); pending = local_softirq_pending(); if (pending && --max_restart) @@ -415,259 +300,6 @@ asmlinkage void do_softirq(void) #endif /* - * This function must run with irqs disabled! - */ -void raise_softirq_irqoff(unsigned int nr) -{ - __raise_softirq_irqoff(nr); - - /* - * If we're in an interrupt or softirq, we're done - * (this also catches softirq-disabled code). We will - * actually run the softirq once we return from - * the irq or softirq. - * - * Otherwise we wake up ksoftirqd to make sure we - * schedule the softirq soon. - */ - if (!in_interrupt()) - wakeup_softirqd(); -} - -void __raise_softirq_irqoff(unsigned int nr) -{ - trace_softirq_raise(nr); - or_softirq_pending(1UL << nr); -} - -static inline void local_bh_disable_nort(void) { local_bh_disable(); } -static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } -static void ksoftirqd_set_sched_params(unsigned int cpu) { } -static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { } - -#else /* !PREEMPT_RT_FULL */ - -/* - * On RT we serialize softirq execution with a cpu local lock per softirq - */ -static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks); - -void __init softirq_early_init(void) -{ - int i; - - for (i = 0; i < NR_SOFTIRQS; i++) - local_irq_lock_init(local_softirq_locks[i]); -} - -static void lock_softirq(int which) -{ - __local_lock(&__get_cpu_var(local_softirq_locks[which])); -} - -static void unlock_softirq(int which) -{ - __local_unlock(&__get_cpu_var(local_softirq_locks[which])); -} - -static void do_single_softirq(int which, int need_rcu_bh_qs) -{ - unsigned long old_flags = current->flags; - - current->flags &= ~PF_MEMALLOC; - vtime_account(current); - current->flags |= PF_IN_SOFTIRQ; - lockdep_softirq_enter(); - local_irq_enable(); - handle_softirq(which, smp_processor_id(), need_rcu_bh_qs); - local_irq_disable(); - lockdep_softirq_exit(); - current->flags &= ~PF_IN_SOFTIRQ; - vtime_account(current); - tsk_restore_flags(current, old_flags, PF_MEMALLOC); -} - -/* - * Called with interrupts disabled. Process softirqs which were raised - * in current context (or on behalf of ksoftirqd). - */ -static void do_current_softirqs(int need_rcu_bh_qs) -{ - while (current->softirqs_raised) { - int i = __ffs(current->softirqs_raised); - unsigned int pending, mask = (1U << i); - - current->softirqs_raised &= ~mask; - local_irq_enable(); - - /* - * If the lock is contended, we boost the owner to - * process the softirq or leave the critical section - * now. - */ - lock_softirq(i); - local_irq_disable(); - softirq_set_runner(i); - /* - * Check with the local_softirq_pending() bits, - * whether we need to process this still or if someone - * else took care of it. - */ - pending = local_softirq_pending(); - if (pending & mask) { - set_softirq_pending(pending & ~mask); - do_single_softirq(i, need_rcu_bh_qs); - } - softirq_clr_runner(i); - unlock_softirq(i); - WARN_ON(current->softirq_nestcnt != 1); - } -} - -void local_bh_disable(void) -{ - migrate_disable(); - current->softirq_nestcnt++; -} -EXPORT_SYMBOL(local_bh_disable); - -void local_bh_enable(void) -{ - if (WARN_ON(current->softirq_nestcnt == 0)) - return; - - local_irq_disable(); - if (current->softirq_nestcnt == 1 && current->softirqs_raised) - do_current_softirqs(1); - local_irq_enable(); - - current->softirq_nestcnt--; - migrate_enable(); -} -EXPORT_SYMBOL(local_bh_enable); - -void local_bh_enable_ip(unsigned long ip) -{ - local_bh_enable(); -} -EXPORT_SYMBOL(local_bh_enable_ip); - -void _local_bh_enable(void) -{ - current->softirq_nestcnt--; - migrate_enable(); -} -EXPORT_SYMBOL(_local_bh_enable); - -int in_serving_softirq(void) -{ - return current->flags & PF_IN_SOFTIRQ; -} -EXPORT_SYMBOL(in_serving_softirq); - -/* Called with preemption disabled */ -static void run_ksoftirqd(unsigned int cpu) -{ - local_irq_disable(); - current->softirq_nestcnt++; - do_current_softirqs(1); - current->softirq_nestcnt--; - rcu_note_context_switch(cpu); - local_irq_enable(); -} - -/* - * Called from netif_rx_ni(). Preemption enabled, but migration - * disabled. So the cpu can't go away under us. - */ -void thread_do_softirq(void) -{ - if (!in_serving_softirq() && current->softirqs_raised) { - current->softirq_nestcnt++; - do_current_softirqs(0); - current->softirq_nestcnt--; - } -} - -static void do_raise_softirq_irqoff(unsigned int nr) -{ - trace_softirq_raise(nr); - or_softirq_pending(1UL << nr); - - /* - * If we are not in a hard interrupt and inside a bh disabled - * region, we simply raise the flag on current. local_bh_enable() - * will make sure that the softirq is executed. Otherwise we - * delegate it to ksoftirqd. - */ - if (!in_irq() && current->softirq_nestcnt) - current->softirqs_raised |= (1U << nr); - else if (__this_cpu_read(ksoftirqd)) - __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr); -} - -void __raise_softirq_irqoff(unsigned int nr) -{ - do_raise_softirq_irqoff(nr); - if (!in_irq() && !current->softirq_nestcnt) - wakeup_softirqd(); -} - -/* - * This function must run with irqs disabled! - */ -void raise_softirq_irqoff(unsigned int nr) -{ - do_raise_softirq_irqoff(nr); - - /* - * If we're in an hard interrupt we let irq return code deal - * with the wakeup of ksoftirqd. - */ - if (in_irq()) - return; - - /* - * If we are in thread context but outside of a bh disabled - * region, we need to wake ksoftirqd as well. - * - * CHECKME: Some of the places which do that could be wrapped - * into local_bh_disable/enable pairs. Though it's unclear - * whether this is worth the effort. To find those places just - * raise a WARN() if the condition is met. - */ - if (!current->softirq_nestcnt) - wakeup_softirqd(); -} - -static inline int ksoftirqd_softirq_pending(void) -{ - return current->softirqs_raised; -} - -static inline void local_bh_disable_nort(void) { } -static inline void _local_bh_enable_nort(void) { } - -static inline void ksoftirqd_set_sched_params(unsigned int cpu) -{ - struct sched_param param = { .sched_priority = 1 }; - - sched_setscheduler(current, SCHED_FIFO, ¶m); - /* Take over all pending softirqs when starting */ - local_irq_disable(); - current->softirqs_raised = local_softirq_pending(); - local_irq_enable(); -} - -static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) -{ - struct sched_param param = { .sched_priority = 0 }; - - sched_setscheduler(current, SCHED_NORMAL, ¶m); -} - -#endif /* PREEMPT_RT_FULL */ -/* * Enter an interrupt context. */ void irq_enter(void) @@ -680,9 +312,9 @@ void irq_enter(void) * Prevent raise_softirq from needlessly waking up ksoftirqd * here, as softirq will be serviced on return from interrupt. */ - local_bh_disable_nort(); + local_bh_disable(); tick_check_idle(cpu); - _local_bh_enable_nort(); + _local_bh_enable(); } __irq_enter(); @@ -690,7 +322,6 @@ void irq_enter(void) static inline void invoke_softirq(void) { -#ifndef CONFIG_PREEMPT_RT_FULL if (!force_irqthreads) { #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED __do_softirq(); @@ -703,15 +334,6 @@ static inline void invoke_softirq(void) wakeup_softirqd(); __local_bh_enable(SOFTIRQ_OFFSET); } -#else /* PREEMPT_RT_FULL */ - unsigned long flags; - - local_irq_save(flags); - if (__this_cpu_read(ksoftirqd) && - __this_cpu_read(ksoftirqd)->softirqs_raised) - wakeup_softirqd(); - local_irq_restore(flags); -#endif } /* @@ -734,6 +356,26 @@ void irq_exit(void) sched_preempt_enable_no_resched(); } +/* + * This function must run with irqs disabled! + */ +inline void raise_softirq_irqoff(unsigned int nr) +{ + __raise_softirq_irqoff(nr); + + /* + * If we're in an interrupt or softirq, we're done + * (this also catches softirq-disabled code). We will + * actually run the softirq once we return from + * the irq or softirq. + * + * Otherwise we wake up ksoftirqd to make sure we + * schedule the softirq soon. + */ + if (!in_interrupt()) + wakeup_softirqd(); +} + void raise_softirq(unsigned int nr) { unsigned long flags; @@ -743,6 +385,12 @@ void raise_softirq(unsigned int nr) local_irq_restore(flags); } +void __raise_softirq_irqoff(unsigned int nr) +{ + trace_softirq_raise(nr); + or_softirq_pending(1UL << nr); +} + void open_softirq(int nr, void (*action)(struct softirq_action *)) { softirq_vec[nr].action = action; @@ -760,45 +408,15 @@ struct tasklet_head static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); -static void inline -__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr) -{ - if (tasklet_trylock(t)) { -again: - /* We may have been preempted before tasklet_trylock - * and __tasklet_action may have already run. - * So double check the sched bit while the takslet - * is locked before adding it to the list. - */ - if (test_bit(TASKLET_STATE_SCHED, &t->state)) { - t->next = NULL; - *head->tail = t; - head->tail = &(t->next); - raise_softirq_irqoff(nr); - tasklet_unlock(t); - } else { - /* This is subtle. If we hit the corner case above - * It is possible that we get preempted right here, - * and another task has successfully called - * tasklet_schedule(), then this function, and - * failed on the trylock. Thus we must be sure - * before releasing the tasklet lock, that the - * SCHED_BIT is clear. Otherwise the tasklet - * may get its SCHED_BIT set, but not added to the - * list - */ - if (!tasklet_tryunlock(t)) - goto again; - } - } -} - void __tasklet_schedule(struct tasklet_struct *t) { unsigned long flags; local_irq_save(flags); - __tasklet_common_schedule(t, &__get_cpu_var(tasklet_vec), TASKLET_SOFTIRQ); + t->next = NULL; + *__this_cpu_read(tasklet_vec.tail) = t; + __this_cpu_write(tasklet_vec.tail, &(t->next)); + raise_softirq_irqoff(TASKLET_SOFTIRQ); local_irq_restore(flags); } @@ -809,7 +427,10 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) unsigned long flags; local_irq_save(flags); - __tasklet_common_schedule(t, &__get_cpu_var(tasklet_hi_vec), HI_SOFTIRQ); + t->next = NULL; + *__this_cpu_read(tasklet_hi_vec.tail) = t; + __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); + raise_softirq_irqoff(HI_SOFTIRQ); local_irq_restore(flags); } @@ -817,117 +438,48 @@ EXPORT_SYMBOL(__tasklet_hi_schedule); void __tasklet_hi_schedule_first(struct tasklet_struct *t) { - __tasklet_hi_schedule(t); -} + BUG_ON(!irqs_disabled()); -EXPORT_SYMBOL(__tasklet_hi_schedule_first); - -void tasklet_enable(struct tasklet_struct *t) -{ - if (!atomic_dec_and_test(&t->count)) - return; - if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state)) - tasklet_schedule(t); + t->next = __this_cpu_read(tasklet_hi_vec.head); + __this_cpu_write(tasklet_hi_vec.head, t); + __raise_softirq_irqoff(HI_SOFTIRQ); } -EXPORT_SYMBOL(tasklet_enable); +EXPORT_SYMBOL(__tasklet_hi_schedule_first); -void tasklet_hi_enable(struct tasklet_struct *t) +static void tasklet_action(struct softirq_action *a) { - if (!atomic_dec_and_test(&t->count)) - return; - if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state)) - tasklet_hi_schedule(t); -} - -EXPORT_SYMBOL(tasklet_hi_enable); + struct tasklet_struct *list; -static void -__tasklet_action(struct softirq_action *a, struct tasklet_struct *list) -{ - int loops = 1000000; + local_irq_disable(); + list = __this_cpu_read(tasklet_vec.head); + __this_cpu_write(tasklet_vec.head, NULL); + __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head); + local_irq_enable(); while (list) { struct tasklet_struct *t = list; list = list->next; - /* - * Should always succeed - after a tasklist got on the - * list (after getting the SCHED bit set from 0 to 1), - * nothing but the tasklet softirq it got queued to can - * lock it: - */ - if (!tasklet_trylock(t)) { - WARN_ON(1); - continue; - } - - t->next = NULL; - - /* - * If we cannot handle the tasklet because it's disabled, - * mark it as pending. tasklet_enable() will later - * re-schedule the tasklet. - */ - if (unlikely(atomic_read(&t->count))) { -out_disabled: - /* implicit unlock: */ - wmb(); - t->state = TASKLET_STATEF_PENDING; - continue; - } - - /* - * After this point on the tasklet might be rescheduled - * on another CPU, but it can only be added to another - * CPU's tasklet list if we unlock the tasklet (which we - * dont do yet). - */ - if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) - WARN_ON(1); - -again: - t->func(t->data); - - /* - * Try to unlock the tasklet. We must use cmpxchg, because - * another CPU might have scheduled or disabled the tasklet. - * We only allow the STATE_RUN -> 0 transition here. - */ - while (!tasklet_tryunlock(t)) { - /* - * If it got disabled meanwhile, bail out: - */ - if (atomic_read(&t->count)) - goto out_disabled; - /* - * If it got scheduled meanwhile, re-execute - * the tasklet function: - */ - if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) - goto again; - if (!--loops) { - printk("hm, tasklet state: %08lx\n", t->state); - WARN_ON(1); + if (tasklet_trylock(t)) { + if (!atomic_read(&t->count)) { + if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) + BUG(); + t->func(t->data); tasklet_unlock(t); - break; + continue; } + tasklet_unlock(t); } - } -} -static void tasklet_action(struct softirq_action *a) -{ - struct tasklet_struct *list; - - local_irq_disable(); - list = __get_cpu_var(tasklet_vec).head; - __get_cpu_var(tasklet_vec).head = NULL; - __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head; - local_irq_enable(); - - __tasklet_action(a, list); + local_irq_disable(); + t->next = NULL; + *__this_cpu_read(tasklet_vec.tail) = t; + __this_cpu_write(tasklet_vec.tail, &(t->next)); + __raise_softirq_irqoff(TASKLET_SOFTIRQ); + local_irq_enable(); + } } static void tasklet_hi_action(struct softirq_action *a) @@ -940,7 +492,29 @@ static void tasklet_hi_action(struct softirq_action *a) __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head); local_irq_enable(); - __tasklet_action(a, list); + while (list) { + struct tasklet_struct *t = list; + + list = list->next; + + if (tasklet_trylock(t)) { + if (!atomic_read(&t->count)) { + if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) + BUG(); + t->func(t->data); + tasklet_unlock(t); + continue; + } + tasklet_unlock(t); + } + + local_irq_disable(); + t->next = NULL; + *__this_cpu_read(tasklet_hi_vec.tail) = t; + __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); + __raise_softirq_irqoff(HI_SOFTIRQ); + local_irq_enable(); + } } @@ -963,7 +537,7 @@ void tasklet_kill(struct tasklet_struct *t) while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { do { - msleep(1); + yield(); } while (test_bit(TASKLET_STATE_SCHED, &t->state)); } tasklet_unlock_wait(t); @@ -1169,26 +743,22 @@ void __init softirq_init(void) open_softirq(HI_SOFTIRQ, tasklet_hi_action); } -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) -void tasklet_unlock_wait(struct tasklet_struct *t) +static int ksoftirqd_should_run(unsigned int cpu) { - while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { - /* - * Hack for now to avoid this busy-loop: - */ -#ifdef CONFIG_PREEMPT_RT_FULL - msleep(1); -#else - barrier(); -#endif - } + return local_softirq_pending(); } -EXPORT_SYMBOL(tasklet_unlock_wait); -#endif -static int ksoftirqd_should_run(unsigned int cpu) +static void run_ksoftirqd(unsigned int cpu) { - return ksoftirqd_softirq_pending(); + local_irq_disable(); + if (local_softirq_pending()) { + __do_softirq(); + rcu_note_context_switch(cpu); + local_irq_enable(); + cond_resched(); + return; + } + local_irq_enable(); } #ifdef CONFIG_HOTPLUG_CPU @@ -1271,8 +841,6 @@ static struct notifier_block __cpuinitdata cpu_nfb = { static struct smp_hotplug_thread softirq_threads = { .store = &ksoftirqd, - .setup = ksoftirqd_set_sched_params, - .cleanup = ksoftirqd_clr_sched_params, .thread_should_run = ksoftirqd_should_run, .thread_fn = run_ksoftirqd, .thread_comm = "ksoftirqd/%u", diff --git a/kernel/spinlock.c b/kernel/spinlock.c index da9775b..5cdd806 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c @@ -110,11 +110,8 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ * __[spin|read|write]_lock_bh() */ BUILD_LOCK_OPS(spin, raw_spinlock); - -#ifndef CONFIG_PREEMPT_RT_FULL BUILD_LOCK_OPS(read, rwlock); BUILD_LOCK_OPS(write, rwlock); -#endif #endif @@ -198,8 +195,6 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) EXPORT_SYMBOL(_raw_spin_unlock_bh); #endif -#ifndef CONFIG_PREEMPT_RT_FULL - #ifndef CONFIG_INLINE_READ_TRYLOCK int __lockfunc _raw_read_trylock(rwlock_t *lock) { @@ -344,8 +339,6 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) EXPORT_SYMBOL(_raw_write_unlock_bh); #endif -#endif /* !PREEMPT_RT_FULL */ - #ifdef CONFIG_DEBUG_LOCK_ALLOC void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index e98c70b..2f194e9 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -29,12 +29,12 @@ struct cpu_stop_done { atomic_t nr_todo; /* nr left to execute */ bool executed; /* actually executed? */ int ret; /* collected return value */ - struct task_struct *waiter; /* woken when nr_todo reaches 0 */ + struct completion completion; /* fired if nr_todo reaches 0 */ }; /* the actual stopper, one per every possible cpu, enabled on online cpus */ struct cpu_stopper { - raw_spinlock_t lock; + spinlock_t lock; bool enabled; /* is this stopper enabled? */ struct list_head works; /* list of pending works */ struct task_struct *thread; /* stopper thread */ @@ -47,7 +47,7 @@ static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) { memset(done, 0, sizeof(*done)); atomic_set(&done->nr_todo, nr_todo); - done->waiter = current; + init_completion(&done->completion); } /* signal completion unless @done is NULL */ @@ -56,10 +56,8 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed) if (done) { if (executed) done->executed = true; - if (atomic_dec_and_test(&done->nr_todo)) { - wake_up_process(done->waiter); - done->waiter = NULL; - } + if (atomic_dec_and_test(&done->nr_todo)) + complete(&done->completion); } } @@ -69,7 +67,7 @@ static void cpu_stop_queue_work(struct cpu_stopper *stopper, { unsigned long flags; - raw_spin_lock_irqsave(&stopper->lock, flags); + spin_lock_irqsave(&stopper->lock, flags); if (stopper->enabled) { list_add_tail(&work->list, &stopper->works); @@ -77,23 +75,7 @@ static void cpu_stop_queue_work(struct cpu_stopper *stopper, } else cpu_stop_signal_done(work->done, false); - raw_spin_unlock_irqrestore(&stopper->lock, flags); -} - -static void wait_for_stop_done(struct cpu_stop_done *done) -{ - set_current_state(TASK_UNINTERRUPTIBLE); - while (atomic_read(&done->nr_todo)) { - schedule(); - set_current_state(TASK_UNINTERRUPTIBLE); - } - /* - * We need to wait until cpu_stop_signal_done() has cleared - * done->waiter. - */ - while (done->waiter) - cpu_relax(); - set_current_state(TASK_RUNNING); + spin_unlock_irqrestore(&stopper->lock, flags); } /** @@ -127,7 +109,7 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) cpu_stop_init_done(&done, 1); cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work); - wait_for_stop_done(&done); + wait_for_completion(&done.completion); return done.executed ? done.ret : -ENOENT; } @@ -153,12 +135,11 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, /* static data for stop_cpus */ static DEFINE_MUTEX(stop_cpus_mutex); -static DEFINE_MUTEX(stopper_lock); static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work); static void queue_stop_cpus_work(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg, - struct cpu_stop_done *done, bool inactive) + struct cpu_stop_done *done) { struct cpu_stop_work *work; unsigned int cpu; @@ -172,19 +153,15 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask, } /* - * Make sure that all work is queued on all cpus before we - * any of the cpus can execute it. + * Disable preemption while queueing to avoid getting + * preempted by a stopper which might wait for other stoppers + * to enter @fn which can lead to deadlock. */ - if (!inactive) { - mutex_lock(&stopper_lock); - } else { - while (!mutex_trylock(&stopper_lock)) - cpu_relax(); - } + preempt_disable(); for_each_cpu(cpu, cpumask) cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &per_cpu(stop_cpus_work, cpu)); - mutex_unlock(&stopper_lock); + preempt_enable(); } static int __stop_cpus(const struct cpumask *cpumask, @@ -193,8 +170,8 @@ static int __stop_cpus(const struct cpumask *cpumask, struct cpu_stop_done done; cpu_stop_init_done(&done, cpumask_weight(cpumask)); - queue_stop_cpus_work(cpumask, fn, arg, &done, false); - wait_for_stop_done(&done); + queue_stop_cpus_work(cpumask, fn, arg, &done); + wait_for_completion(&done.completion); return done.executed ? done.ret : -ENOENT; } @@ -282,13 +259,13 @@ repeat: } work = NULL; - raw_spin_lock_irq(&stopper->lock); + spin_lock_irq(&stopper->lock); if (!list_empty(&stopper->works)) { work = list_first_entry(&stopper->works, struct cpu_stop_work, list); list_del_init(&work->list); } - raw_spin_unlock_irq(&stopper->lock); + spin_unlock_irq(&stopper->lock); if (work) { cpu_stop_fn_t fn = work->fn; @@ -298,16 +275,6 @@ repeat: __set_current_state(TASK_RUNNING); - /* - * Wait until the stopper finished scheduling on all - * cpus - */ - mutex_lock(&stopper_lock); - /* - * Let other cpu threads continue as well - */ - mutex_unlock(&stopper_lock); - /* cpu stop callbacks are not allowed to sleep */ preempt_disable(); @@ -322,13 +289,7 @@ repeat: kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL, ksym_buf), arg); - /* - * Make sure that the wakeup and setting done->waiter - * to NULL is atomic. - */ - local_irq_disable(); cpu_stop_signal_done(done, true); - local_irq_enable(); } else schedule(); @@ -356,7 +317,6 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, if (IS_ERR(p)) return notifier_from_errno(PTR_ERR(p)); get_task_struct(p); - p->flags |= PF_STOMPER; kthread_bind(p, cpu); sched_set_stop_task(cpu, p); stopper->thread = p; @@ -366,9 +326,9 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, /* strictly unnecessary, as first user will wake it */ wake_up_process(stopper->thread); /* mark enabled */ - raw_spin_lock_irq(&stopper->lock); + spin_lock_irq(&stopper->lock); stopper->enabled = true; - raw_spin_unlock_irq(&stopper->lock); + spin_unlock_irq(&stopper->lock); break; #ifdef CONFIG_HOTPLUG_CPU @@ -381,11 +341,11 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, /* kill the stopper */ kthread_stop(stopper->thread); /* drain remaining works */ - raw_spin_lock_irq(&stopper->lock); + spin_lock_irq(&stopper->lock); list_for_each_entry(work, &stopper->works, list) cpu_stop_signal_done(work->done, false); stopper->enabled = false; - raw_spin_unlock_irq(&stopper->lock); + spin_unlock_irq(&stopper->lock); /* release the stopper */ put_task_struct(stopper->thread); stopper->thread = NULL; @@ -416,7 +376,7 @@ static int __init cpu_stop_init(void) for_each_possible_cpu(cpu) { struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); - raw_spin_lock_init(&stopper->lock); + spin_lock_init(&stopper->lock); INIT_LIST_HEAD(&stopper->works); } @@ -606,11 +566,11 @@ int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data, set_state(&smdata, STOPMACHINE_PREPARE); cpu_stop_init_done(&done, num_active_cpus()); queue_stop_cpus_work(cpu_active_mask, stop_machine_cpu_stop, &smdata, - &done, true); + &done); ret = stop_machine_cpu_stop(&smdata); /* Busy wait for completion. */ - while (atomic_read(&done.nr_todo)) + while (!completion_done(&done.completion)) cpu_relax(); mutex_unlock(&stop_cpus_mutex); diff --git a/kernel/sys.c b/kernel/sys.c index 47f1d1b..265b376 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -323,6 +323,7 @@ void kernel_restart_prepare(char *cmd) system_state = SYSTEM_RESTART; usermodehelper_disable(); device_shutdown(); + syscore_shutdown(); } /** @@ -368,7 +369,6 @@ void kernel_restart(char *cmd) { kernel_restart_prepare(cmd); disable_nonboot_cpus(); - syscore_shutdown(); if (!cmd) printk(KERN_EMERG "Restarting system.\n"); else @@ -394,7 +394,6 @@ static void kernel_shutdown_prepare(enum system_states state) void kernel_halt(void) { kernel_shutdown_prepare(SYSTEM_HALT); - disable_nonboot_cpus(); syscore_shutdown(); printk(KERN_EMERG "System halted.\n"); kmsg_dump(KMSG_DUMP_HALT); diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c index 0ddf3a0..5a63844 100644 --- a/kernel/sysctl_binary.c +++ b/kernel/sysctl_binary.c @@ -1194,10 +1194,9 @@ static ssize_t bin_dn_node_address(struct file *file, /* Convert the decnet address to binary */ result = -EIO; - nodep = strchr(buf, '.'); + nodep = strchr(buf, '.') + 1; if (!nodep) goto out; - ++nodep; area = simple_strtoul(buf, NULL, 10); node = simple_strtoul(nodep, NULL, 10); diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index ca9e113..7a925ba 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -67,8 +67,7 @@ static struct clocksource clocksource_jiffies = { .shift = JIFFIES_SHIFT, }; -__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock); -__cacheline_aligned_in_smp seqcount_t jiffies_seq; +__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock); #if (BITS_PER_LONG < 64) u64 get_jiffies_64(void) @@ -77,9 +76,9 @@ u64 get_jiffies_64(void) u64 ret; do { - seq = read_seqcount_begin(&jiffies_seq); + seq = read_seqbegin(&jiffies_lock); ret = jiffies_64; - } while (read_seqcount_retry(&jiffies_seq, seq)); + } while (read_seqretry(&jiffies_lock, seq)); return ret; } EXPORT_SYMBOL(get_jiffies_64); diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index bb1edfa..24174b4 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -22,7 +22,7 @@ * NTP timekeeping variables: */ -DEFINE_RAW_SPINLOCK(ntp_lock); +DEFINE_SPINLOCK(ntp_lock); /* USER_HZ period (usecs): */ @@ -347,7 +347,7 @@ void ntp_clear(void) { unsigned long flags; - raw_spin_lock_irqsave(&ntp_lock, flags); + spin_lock_irqsave(&ntp_lock, flags); time_adjust = 0; /* stop active adjtime() */ time_status |= STA_UNSYNC; @@ -361,7 +361,7 @@ void ntp_clear(void) /* Clear PPS state variables */ pps_clear(); - raw_spin_unlock_irqrestore(&ntp_lock, flags); + spin_unlock_irqrestore(&ntp_lock, flags); } @@ -371,9 +371,9 @@ u64 ntp_tick_length(void) unsigned long flags; s64 ret; - raw_spin_lock_irqsave(&ntp_lock, flags); + spin_lock_irqsave(&ntp_lock, flags); ret = tick_length; - raw_spin_unlock_irqrestore(&ntp_lock, flags); + spin_unlock_irqrestore(&ntp_lock, flags); return ret; } @@ -394,7 +394,7 @@ int second_overflow(unsigned long secs) int leap = 0; unsigned long flags; - raw_spin_lock_irqsave(&ntp_lock, flags); + spin_lock_irqsave(&ntp_lock, flags); /* * Leap second processing. If in leap-insert state at the end of the @@ -478,7 +478,7 @@ int second_overflow(unsigned long secs) time_adjust = 0; out: - raw_spin_unlock_irqrestore(&ntp_lock, flags); + spin_unlock_irqrestore(&ntp_lock, flags); return leap; } @@ -660,7 +660,7 @@ int do_adjtimex(struct timex *txc) getnstimeofday(&ts); - raw_spin_lock_irq(&ntp_lock); + spin_lock_irq(&ntp_lock); if (txc->modes & ADJ_ADJTIME) { long save_adjust = time_adjust; @@ -702,7 +702,7 @@ int do_adjtimex(struct timex *txc) /* fill PPS status fields */ pps_fill_timex(txc); - raw_spin_unlock_irq(&ntp_lock); + spin_unlock_irq(&ntp_lock); txc->time.tv_sec = ts.tv_sec; txc->time.tv_usec = ts.tv_nsec; @@ -900,7 +900,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) pts_norm = pps_normalize_ts(*phase_ts); - raw_spin_lock_irqsave(&ntp_lock, flags); + spin_lock_irqsave(&ntp_lock, flags); /* clear the error bits, they will be set again if needed */ time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); @@ -913,7 +913,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) * just start the frequency interval */ if (unlikely(pps_fbase.tv_sec == 0)) { pps_fbase = *raw_ts; - raw_spin_unlock_irqrestore(&ntp_lock, flags); + spin_unlock_irqrestore(&ntp_lock, flags); return; } @@ -928,7 +928,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) time_status |= STA_PPSJITTER; /* restart the frequency calibration interval */ pps_fbase = *raw_ts; - raw_spin_unlock_irqrestore(&ntp_lock, flags); + spin_unlock_irqrestore(&ntp_lock, flags); pr_err("hardpps: PPSJITTER: bad pulse\n"); return; } @@ -945,7 +945,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) hardpps_update_phase(pts_norm.nsec); - raw_spin_unlock_irqrestore(&ntp_lock, flags); + spin_unlock_irqrestore(&ntp_lock, flags); } EXPORT_SYMBOL(hardpps); diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 239a323..f113755 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -66,17 +66,12 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc) */ int tick_check_broadcast_device(struct clock_event_device *dev) { - struct clock_event_device *cur = tick_broadcast_device.evtdev; - - if ((dev->features & CLOCK_EVT_FEAT_DUMMY) || - (tick_broadcast_device.evtdev && + if ((tick_broadcast_device.evtdev && tick_broadcast_device.evtdev->rating >= dev->rating) || (dev->features & CLOCK_EVT_FEAT_C3STOP)) return 0; clockevents_exchange_device(tick_broadcast_device.evtdev, dev); - if (cur) - cur->event_handler = clockevents_handle_noop; tick_broadcast_device.evtdev = dev; if (!cpumask_empty(tick_get_broadcast_mask())) tick_broadcast_start_periodic(dev); diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 7e05657..b1600a6 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -63,15 +63,13 @@ int tick_is_oneshot_available(void) static void tick_periodic(int cpu) { if (tick_do_timer_cpu == cpu) { - raw_spin_lock(&jiffies_lock); - write_seqcount_begin(&jiffies_seq); + write_seqlock(&jiffies_lock); /* Keep track of the next tick event */ tick_next_period = ktime_add(tick_next_period, tick_period); do_timer(1); - write_seqcount_end(&jiffies_seq); - raw_spin_unlock(&jiffies_lock); + write_sequnlock(&jiffies_lock); } update_process_times(user_mode(get_irq_regs())); @@ -132,9 +130,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) ktime_t next; do { - seq = read_seqcount_begin(&jiffies_seq); + seq = read_seqbegin(&jiffies_lock); next = tick_next_period; - } while (read_seqcount_retry(&jiffies_seq, seq)); + } while (read_seqretry(&jiffies_lock, seq)); clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); @@ -325,7 +323,6 @@ static void tick_shutdown(unsigned int *cpup) */ dev->mode = CLOCK_EVT_MODE_UNUSED; clockevents_exchange_device(dev, NULL); - dev->event_handler = clockevents_handle_noop; td->evtdev = NULL; } raw_spin_unlock_irqrestore(&tick_device_lock, flags); diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index ad8edee..cf3e59e 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -4,9 +4,6 @@ #include #include -extern raw_spinlock_t jiffies_lock; -extern seqcount_t jiffies_seq; - #ifdef CONFIG_GENERIC_CLOCKEVENTS_BUILD #define TICK_DO_TIMER_NONE -1 diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 626b320f..d58e552 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -56,8 +56,7 @@ static void tick_do_update_jiffies64(ktime_t now) return; /* Reevalute with jiffies_lock held */ - raw_spin_lock(&jiffies_lock); - write_seqcount_begin(&jiffies_seq); + write_seqlock(&jiffies_lock); delta = ktime_sub(now, last_jiffies_update); if (delta.tv64 >= tick_period.tv64) { @@ -80,8 +79,7 @@ static void tick_do_update_jiffies64(ktime_t now) /* Keep the tick_next_period variable up to date */ tick_next_period = ktime_add(last_jiffies_update, tick_period); } - write_seqcount_end(&jiffies_seq); - raw_spin_unlock(&jiffies_lock); + write_sequnlock(&jiffies_lock); } /* @@ -91,14 +89,12 @@ static ktime_t tick_init_jiffy_update(void) { ktime_t period; - raw_spin_lock(&jiffies_lock); - write_seqcount_begin(&jiffies_seq); + write_seqlock(&jiffies_lock); /* Did we start the jiffies update yet ? */ if (last_jiffies_update.tv64 == 0) last_jiffies_update = tick_next_period; period = last_jiffies_update; - write_seqcount_end(&jiffies_seq); - raw_spin_unlock(&jiffies_lock); + write_sequnlock(&jiffies_lock); return period; } @@ -329,11 +325,11 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, /* Read jiffies and the time when jiffies were updated last */ do { - seq = read_seqcount_begin(&jiffies_seq); + seq = read_seqbegin(&jiffies_lock); last_update = last_jiffies_update; last_jiffies = jiffies; time_delta = timekeeping_max_deferment(); - } while (read_seqcount_retry(&jiffies_seq, seq)); + } while (read_seqretry(&jiffies_lock, seq)); if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) || arch_needs_cpu(cpu)) { @@ -481,7 +477,14 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) return false; if (unlikely(local_softirq_pending() && cpu_online(cpu))) { - softirq_check_pending_idle(); + static int ratelimit; + + if (ratelimit < 10 && + (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { + printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", + (unsigned int) local_softirq_pending()); + ratelimit++; + } return false; } @@ -561,19 +564,14 @@ void tick_nohz_idle_enter(void) */ void tick_nohz_irq_exit(void) { - unsigned long flags; struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); if (!ts->inidle) return; - local_irq_save(flags); - - /* Cancel the timer because CPU already waken up from the C-states */ + /* Cancel the timer because CPU already waken up from the C-states*/ menu_hrtimer_cancel(); __tick_nohz_idle_enter(ts); - - local_irq_restore(flags); } /** @@ -860,7 +858,6 @@ void tick_setup_sched_timer(void) * Emulate tick processing via per-CPU hrtimers: */ hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - ts->sched_timer.irqsafe = 1; ts->sched_timer.function = tick_sched_timer; /* Get the next period (per cpu) */ diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index ea93e56..cbc6acb 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -23,12 +23,8 @@ #include #include -#include "tick-internal.h" static struct timekeeper timekeeper; -static DEFINE_RAW_SPINLOCK(timekeeper_lock); -static seqcount_t timekeeper_seq; -static struct timekeeper shadow_timekeeper; /* flag for if timekeeping is suspended */ int __read_mostly timekeeping_suspended; @@ -97,7 +93,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) old_clock = tk->clock; tk->clock = clock; - tk->cycle_last = clock->cycle_last = clock->read(clock); + clock->cycle_last = clock->read(clock); /* Do the ns -> cycle conversion first, using original mult */ tmp = NTP_INTERVAL_LENGTH; @@ -188,6 +184,8 @@ static void update_pvclock_gtod(struct timekeeper *tk) /** * pvclock_gtod_register_notifier - register a pvclock timedata update listener + * + * Must hold write on timekeeper.lock */ int pvclock_gtod_register_notifier(struct notifier_block *nb) { @@ -195,10 +193,11 @@ int pvclock_gtod_register_notifier(struct notifier_block *nb) unsigned long flags; int ret; - raw_spin_lock_irqsave(&timekeeper_lock, flags); + write_seqlock_irqsave(&tk->lock, flags); ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb); + /* update timekeeping data */ update_pvclock_gtod(tk); - raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + write_sequnlock_irqrestore(&tk->lock, flags); return ret; } @@ -207,22 +206,25 @@ EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier); /** * pvclock_gtod_unregister_notifier - unregister a pvclock * timedata update listener + * + * Must hold write on timekeeper.lock */ int pvclock_gtod_unregister_notifier(struct notifier_block *nb) { + struct timekeeper *tk = &timekeeper; unsigned long flags; int ret; - raw_spin_lock_irqsave(&timekeeper_lock, flags); + write_seqlock_irqsave(&tk->lock, flags); ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb); - raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + write_sequnlock_irqrestore(&tk->lock, flags); return ret; } EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier); -/* must hold timekeeper_lock */ -static void timekeeping_update(struct timekeeper *tk, bool clearntp, bool mirror) +/* must hold write on timekeeper.lock */ +static void timekeeping_update(struct timekeeper *tk, bool clearntp) { if (clearntp) { tk->ntp_error = 0; @@ -230,9 +232,6 @@ static void timekeeping_update(struct timekeeper *tk, bool clearntp, bool mirror } update_vsyscall(tk); update_pvclock_gtod(tk); - - if (mirror) - memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper)); } /** @@ -251,7 +250,7 @@ static void timekeeping_forward_now(struct timekeeper *tk) clock = tk->clock; cycle_now = clock->read(clock); cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; - tk->cycle_last = clock->cycle_last = cycle_now; + clock->cycle_last = cycle_now; tk->xtime_nsec += cycle_delta * tk->mult; @@ -279,12 +278,12 @@ void getnstimeofday(struct timespec *ts) WARN_ON(timekeeping_suspended); do { - seq = read_seqcount_begin(&timekeeper_seq); + seq = read_seqbegin(&tk->lock); ts->tv_sec = tk->xtime_sec; nsecs = timekeeping_get_ns(tk); - } while (read_seqcount_retry(&timekeeper_seq, seq)); + } while (read_seqretry(&tk->lock, seq)); ts->tv_nsec = 0; timespec_add_ns(ts, nsecs); @@ -300,11 +299,11 @@ ktime_t ktime_get(void) WARN_ON(timekeeping_suspended); do { - seq = read_seqcount_begin(&timekeeper_seq); + seq = read_seqbegin(&tk->lock); secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec; - } while (read_seqcount_retry(&timekeeper_seq, seq)); + } while (read_seqretry(&tk->lock, seq)); /* * Use ktime_set/ktime_add_ns to create a proper ktime on * 32-bit architectures without CONFIG_KTIME_SCALAR. @@ -331,12 +330,12 @@ void ktime_get_ts(struct timespec *ts) WARN_ON(timekeeping_suspended); do { - seq = read_seqcount_begin(&timekeeper_seq); + seq = read_seqbegin(&tk->lock); ts->tv_sec = tk->xtime_sec; nsec = timekeeping_get_ns(tk); tomono = tk->wall_to_monotonic; - } while (read_seqcount_retry(&timekeeper_seq, seq)); + } while (read_seqretry(&tk->lock, seq)); ts->tv_sec += tomono.tv_sec; ts->tv_nsec = 0; @@ -364,7 +363,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) WARN_ON_ONCE(timekeeping_suspended); do { - seq = read_seqcount_begin(&timekeeper_seq); + seq = read_seqbegin(&tk->lock); *ts_raw = tk->raw_time; ts_real->tv_sec = tk->xtime_sec; @@ -373,7 +372,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) nsecs_raw = timekeeping_get_ns_raw(tk); nsecs_real = timekeeping_get_ns(tk); - } while (read_seqcount_retry(&timekeeper_seq, seq)); + } while (read_seqretry(&tk->lock, seq)); timespec_add_ns(ts_raw, nsecs_raw); timespec_add_ns(ts_real, nsecs_real); @@ -413,8 +412,7 @@ int do_settimeofday(const struct timespec *tv) if (!timespec_valid_strict(tv)) return -EINVAL; - raw_spin_lock_irqsave(&timekeeper_lock, flags); - write_seqcount_begin(&timekeeper_seq); + write_seqlock_irqsave(&tk->lock, flags); timekeeping_forward_now(tk); @@ -426,10 +424,9 @@ int do_settimeofday(const struct timespec *tv) tk_set_xtime(tk, tv); - timekeeping_update(tk, true, true); + timekeeping_update(tk, true); - write_seqcount_end(&timekeeper_seq); - raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + write_sequnlock_irqrestore(&tk->lock, flags); /* signal hrtimers about time change */ clock_was_set(); @@ -454,8 +451,7 @@ int timekeeping_inject_offset(struct timespec *ts) if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) return -EINVAL; - raw_spin_lock_irqsave(&timekeeper_lock, flags); - write_seqcount_begin(&timekeeper_seq); + write_seqlock_irqsave(&tk->lock, flags); timekeeping_forward_now(tk); @@ -470,10 +466,9 @@ int timekeeping_inject_offset(struct timespec *ts) tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts)); error: /* even if we error out, we forwarded the time, so call update */ - timekeeping_update(tk, true, true); + timekeeping_update(tk, true); - write_seqcount_end(&timekeeper_seq); - raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + write_sequnlock_irqrestore(&tk->lock, flags); /* signal hrtimers about time change */ clock_was_set(); @@ -495,8 +490,7 @@ static int change_clocksource(void *data) new = (struct clocksource *) data; - raw_spin_lock_irqsave(&timekeeper_lock, flags); - write_seqcount_begin(&timekeeper_seq); + write_seqlock_irqsave(&tk->lock, flags); timekeeping_forward_now(tk); if (!new->enable || new->enable(new) == 0) { @@ -505,10 +499,9 @@ static int change_clocksource(void *data) if (old->disable) old->disable(old); } - timekeeping_update(tk, true, true); + timekeeping_update(tk, true); - write_seqcount_end(&timekeeper_seq); - raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + write_sequnlock_irqrestore(&tk->lock, flags); return 0; } @@ -558,11 +551,11 @@ void getrawmonotonic(struct timespec *ts) s64 nsecs; do { - seq = read_seqcount_begin(&timekeeper_seq); + seq = read_seqbegin(&tk->lock); nsecs = timekeeping_get_ns_raw(tk); *ts = tk->raw_time; - } while (read_seqcount_retry(&timekeeper_seq, seq)); + } while (read_seqretry(&tk->lock, seq)); timespec_add_ns(ts, nsecs); } @@ -578,11 +571,11 @@ int timekeeping_valid_for_hres(void) int ret; do { - seq = read_seqcount_begin(&timekeeper_seq); + seq = read_seqbegin(&tk->lock); ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; - } while (read_seqcount_retry(&timekeeper_seq, seq)); + } while (read_seqretry(&tk->lock, seq)); return ret; } @@ -597,11 +590,11 @@ u64 timekeeping_max_deferment(void) u64 ret; do { - seq = read_seqcount_begin(&timekeeper_seq); + seq = read_seqbegin(&tk->lock); ret = tk->clock->max_idle_ns; - } while (read_seqcount_retry(&timekeeper_seq, seq)); + } while (read_seqretry(&tk->lock, seq)); return ret; } @@ -662,10 +655,11 @@ void __init timekeeping_init(void) boot.tv_nsec = 0; } + seqlock_init(&tk->lock); + ntp_init(); - raw_spin_lock_irqsave(&timekeeper_lock, flags); - write_seqcount_begin(&timekeeper_seq); + write_seqlock_irqsave(&tk->lock, flags); clock = clocksource_default_clock(); if (clock->enable) clock->enable(clock); @@ -684,10 +678,7 @@ void __init timekeeping_init(void) tmp.tv_nsec = 0; tk_set_sleep_time(tk, tmp); - memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper)); - - write_seqcount_end(&timekeeper_seq); - raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + write_sequnlock_irqrestore(&tk->lock, flags); } /* time in seconds when suspend began */ @@ -734,17 +725,15 @@ void timekeeping_inject_sleeptime(struct timespec *delta) if (!(ts.tv_sec == 0 && ts.tv_nsec == 0)) return; - raw_spin_lock_irqsave(&timekeeper_lock, flags); - write_seqcount_begin(&timekeeper_seq); + write_seqlock_irqsave(&tk->lock, flags); timekeeping_forward_now(tk); __timekeeping_inject_sleeptime(tk, delta); - timekeeping_update(tk, true, true); + timekeeping_update(tk, true); - write_seqcount_end(&timekeeper_seq); - raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + write_sequnlock_irqrestore(&tk->lock, flags); /* signal hrtimers about time change */ clock_was_set(); @@ -768,20 +757,18 @@ static void timekeeping_resume(void) clockevents_resume(); clocksource_resume(); - raw_spin_lock_irqsave(&timekeeper_lock, flags); - write_seqcount_begin(&timekeeper_seq); + write_seqlock_irqsave(&tk->lock, flags); if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { ts = timespec_sub(ts, timekeeping_suspend_time); __timekeeping_inject_sleeptime(tk, &ts); } /* re-base the last cycle value */ - tk->cycle_last = tk->clock->cycle_last = tk->clock->read(tk->clock); + tk->clock->cycle_last = tk->clock->read(tk->clock); tk->ntp_error = 0; timekeeping_suspended = 0; - timekeeping_update(tk, false, true); - write_seqcount_end(&timekeeper_seq); - raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + timekeeping_update(tk, false); + write_sequnlock_irqrestore(&tk->lock, flags); touch_softlockup_watchdog(); @@ -800,8 +787,7 @@ static int timekeeping_suspend(void) read_persistent_clock(&timekeeping_suspend_time); - raw_spin_lock_irqsave(&timekeeper_lock, flags); - write_seqcount_begin(&timekeeper_seq); + write_seqlock_irqsave(&tk->lock, flags); timekeeping_forward_now(tk); timekeeping_suspended = 1; @@ -824,8 +810,7 @@ static int timekeeping_suspend(void) timekeeping_suspend_time = timespec_add(timekeeping_suspend_time, delta_delta); } - write_seqcount_end(&timekeeper_seq); - raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + write_sequnlock_irqrestore(&tk->lock, flags); clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); clocksource_suspend(); @@ -1092,16 +1077,15 @@ static inline void accumulate_nsecs_to_secs(struct timekeeper *tk) static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, u32 shift) { - cycle_t interval = tk->cycle_interval << shift; u64 raw_nsecs; /* If the offset is smaller then a shifted interval, do nothing */ - if (offset < interval) + if (offset < tk->cycle_interval<cycle_last += interval; + offset -= tk->cycle_interval << shift; + tk->clock->cycle_last += tk->cycle_interval << shift; tk->xtime_nsec += tk->xtime_interval << shift; accumulate_nsecs_to_secs(tk); @@ -1158,28 +1142,27 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk) static void update_wall_time(void) { struct clocksource *clock; - struct timekeeper *real_tk = &timekeeper; - struct timekeeper *tk = &shadow_timekeeper; + struct timekeeper *tk = &timekeeper; cycle_t offset; int shift = 0, maxshift; unsigned long flags; - raw_spin_lock_irqsave(&timekeeper_lock, flags); + write_seqlock_irqsave(&tk->lock, flags); /* Make sure we're fully resumed: */ if (unlikely(timekeeping_suspended)) goto out; - clock = real_tk->clock; + clock = tk->clock; #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET - offset = real_tk->cycle_interval; + offset = tk->cycle_interval; #else offset = (clock->read(clock) - clock->cycle_last) & clock->mask; #endif /* Check if there's really nothing to do */ - if (offset < real_tk->cycle_interval) + if (offset < tk->cycle_interval) goto out; /* @@ -1216,24 +1199,11 @@ static void update_wall_time(void) */ accumulate_nsecs_to_secs(tk); - write_seqcount_begin(&timekeeper_seq); - /* Update clock->cycle_last with the new value */ - clock->cycle_last = tk->cycle_last; - /* - * Update the real timekeeper. - * - * We could avoid this memcpy by switching pointers, but that - * requires changes to all other timekeeper usage sites as - * well, i.e. move the timekeeper pointer getter into the - * spinlocked/seqcount protected sections. And we trade this - * memcpy under the timekeeper_seq against one before we start - * updating. - */ - memcpy(real_tk, tk, sizeof(*tk)); - timekeeping_update(real_tk, false, false); - write_seqcount_end(&timekeeper_seq); + timekeeping_update(tk, false); + out: - raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + write_sequnlock_irqrestore(&tk->lock, flags); + } /** @@ -1280,13 +1250,13 @@ void get_monotonic_boottime(struct timespec *ts) WARN_ON(timekeeping_suspended); do { - seq = read_seqcount_begin(&timekeeper_seq); + seq = read_seqbegin(&tk->lock); ts->tv_sec = tk->xtime_sec; nsec = timekeeping_get_ns(tk); tomono = tk->wall_to_monotonic; sleep = tk->total_sleep_time; - } while (read_seqcount_retry(&timekeeper_seq, seq)); + } while (read_seqretry(&tk->lock, seq)); ts->tv_sec += tomono.tv_sec + sleep.tv_sec; ts->tv_nsec = 0; @@ -1345,10 +1315,10 @@ struct timespec current_kernel_time(void) unsigned long seq; do { - seq = read_seqcount_begin(&timekeeper_seq); + seq = read_seqbegin(&tk->lock); now = tk_xtime(tk); - } while (read_seqcount_retry(&timekeeper_seq, seq)); + } while (read_seqretry(&tk->lock, seq)); return now; } @@ -1361,11 +1331,11 @@ struct timespec get_monotonic_coarse(void) unsigned long seq; do { - seq = read_seqcount_begin(&timekeeper_seq); + seq = read_seqbegin(&tk->lock); now = tk_xtime(tk); mono = tk->wall_to_monotonic; - } while (read_seqcount_retry(&timekeeper_seq, seq)); + } while (read_seqretry(&tk->lock, seq)); set_normalized_timespec(&now, now.tv_sec + mono.tv_sec, now.tv_nsec + mono.tv_nsec); @@ -1396,11 +1366,11 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, unsigned long seq; do { - seq = read_seqcount_begin(&timekeeper_seq); + seq = read_seqbegin(&tk->lock); *xtim = tk_xtime(tk); *wtom = tk->wall_to_monotonic; *sleep = tk->total_sleep_time; - } while (read_seqcount_retry(&timekeeper_seq, seq)); + } while (read_seqretry(&tk->lock, seq)); } #ifdef CONFIG_HIGH_RES_TIMERS @@ -1420,14 +1390,14 @@ ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot) u64 secs, nsecs; do { - seq = read_seqcount_begin(&timekeeper_seq); + seq = read_seqbegin(&tk->lock); secs = tk->xtime_sec; nsecs = timekeeping_get_ns(tk); *offs_real = tk->offs_real; *offs_boot = tk->offs_boot; - } while (read_seqcount_retry(&timekeeper_seq, seq)); + } while (read_seqretry(&tk->lock, seq)); now = ktime_add_ns(ktime_set(secs, 0), nsecs); now = ktime_sub(now, *offs_real); @@ -1445,9 +1415,9 @@ ktime_t ktime_get_monotonic_offset(void) struct timespec wtom; do { - seq = read_seqcount_begin(&timekeeper_seq); + seq = read_seqbegin(&tk->lock); wtom = tk->wall_to_monotonic; - } while (read_seqcount_retry(&timekeeper_seq, seq)); + } while (read_seqretry(&tk->lock, seq)); return timespec_to_ktime(wtom); } @@ -1461,9 +1431,7 @@ EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset); */ void xtime_update(unsigned long ticks) { - raw_spin_lock(&jiffies_lock); - write_seqcount_begin(&jiffies_seq); + write_seqlock(&jiffies_lock); do_timer(ticks); - write_seqcount_end(&jiffies_seq); - raw_spin_unlock(&jiffies_lock); + write_sequnlock(&jiffies_lock); } diff --git a/kernel/timeconst.pl b/kernel/timeconst.pl index 3f42652..eb51d76 100644 --- a/kernel/timeconst.pl +++ b/kernel/timeconst.pl @@ -369,8 +369,10 @@ if ($hz eq '--can') { die "Usage: $0 HZ\n"; } - $cv = $canned_values{$hz}; - @val = defined($cv) ? @$cv : compute_values($hz); + @val = @{$canned_values{$hz}}; + if (!defined(@val)) { + @val = compute_values($hz); + } output($hz, @val); } exit 0; diff --git a/kernel/timer.c b/kernel/timer.c index 374e7b1..367d008 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -76,7 +76,6 @@ struct tvec_root { struct tvec_base { spinlock_t lock; struct timer_list *running_timer; - wait_queue_head_t wait_for_running_timer; unsigned long timer_jiffies; unsigned long next_timer; unsigned long active_timers; @@ -717,36 +716,6 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer, } } -#ifndef CONFIG_PREEMPT_RT_FULL -static inline struct tvec_base *switch_timer_base(struct timer_list *timer, - struct tvec_base *old, - struct tvec_base *new) -{ - /* See the comment in lock_timer_base() */ - timer_set_base(timer, NULL); - spin_unlock(&old->lock); - spin_lock(&new->lock); - timer_set_base(timer, new); - return new; -} -#else -static inline struct tvec_base *switch_timer_base(struct timer_list *timer, - struct tvec_base *old, - struct tvec_base *new) -{ - /* - * We cannot do the above because we might be preempted and - * then the preempter would see NULL and loop forever. - */ - if (spin_trylock(&new->lock)) { - timer_set_base(timer, new); - spin_unlock(&old->lock); - return new; - } - return old; -} -#endif - static inline int __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only, int pinned) @@ -766,15 +735,12 @@ __mod_timer(struct timer_list *timer, unsigned long expires, debug_activate(timer, expires); - preempt_disable_rt(); cpu = smp_processor_id(); #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) cpu = get_nohz_timer_target(); #endif - preempt_enable_rt(); - new_base = per_cpu(tvec_bases, cpu); if (base != new_base) { @@ -785,8 +751,14 @@ __mod_timer(struct timer_list *timer, unsigned long expires, * handler yet has not finished. This also guarantees that * the timer is serialized wrt itself. */ - if (likely(base->running_timer != timer)) - base = switch_timer_base(timer, base, new_base); + if (likely(base->running_timer != timer)) { + /* See the comment in lock_timer_base() */ + timer_set_base(timer, NULL); + spin_unlock(&base->lock); + base = new_base; + spin_lock(&base->lock); + timer_set_base(timer, base); + } } timer->expires = expires; @@ -969,29 +941,6 @@ void add_timer_on(struct timer_list *timer, int cpu) } EXPORT_SYMBOL_GPL(add_timer_on); -#ifdef CONFIG_PREEMPT_RT_FULL -/* - * Wait for a running timer - */ -static void wait_for_running_timer(struct timer_list *timer) -{ - struct tvec_base *base = timer->base; - - if (base->running_timer == timer) - wait_event(base->wait_for_running_timer, - base->running_timer != timer); -} - -# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_tunning_timer) -#else -static inline void wait_for_running_timer(struct timer_list *timer) -{ - cpu_relax(); -} - -# define wakeup_timer_waiters(b) do { } while (0) -#endif - /** * del_timer - deactive a timer. * @timer: the timer to be deactivated @@ -1049,7 +998,7 @@ int try_to_del_timer_sync(struct timer_list *timer) } EXPORT_SYMBOL(try_to_del_timer_sync); -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) +#ifdef CONFIG_SMP /** * del_timer_sync - deactivate a timer and wait for the handler to finish. * @timer: the timer to be deactivated @@ -1109,7 +1058,7 @@ int del_timer_sync(struct timer_list *timer) int ret = try_to_del_timer_sync(timer); if (ret >= 0) return ret; - wait_for_running_timer(timer); + cpu_relax(); } } EXPORT_SYMBOL(del_timer_sync); @@ -1226,17 +1175,15 @@ static inline void __run_timers(struct tvec_base *base) if (irqsafe) { spin_unlock(&base->lock); call_timer_fn(timer, fn, data); - base->running_timer = NULL; spin_lock(&base->lock); } else { spin_unlock_irq(&base->lock); call_timer_fn(timer, fn, data); - base->running_timer = NULL; spin_lock_irq(&base->lock); } } } - wake_up(&base->wait_for_running_timer); + base->running_timer = NULL; spin_unlock_irq(&base->lock); } @@ -1376,31 +1323,17 @@ unsigned long get_next_timer_interrupt(unsigned long now) if (cpu_is_offline(smp_processor_id())) return expires; -#ifdef CONFIG_PREEMPT_RT_FULL - /* - * On PREEMPT_RT we cannot sleep here. If the trylock does not - * succeed then we return the worst-case 'expires in 1 tick' - * value. We use the rt functions here directly to avoid a - * migrate_disable() call. - */ - if (!spin_do_trylock(&base->lock)) - return now + 1; -#else spin_lock(&base->lock); -#endif if (base->active_timers) { if (time_before_eq(base->next_timer, base->timer_jiffies)) base->next_timer = __next_timer_interrupt(base); expires = base->next_timer; } -#ifdef CONFIG_PREEMPT_RT_FULL - rt_spin_unlock(&base->lock); -#else spin_unlock(&base->lock); -#endif if (time_before_eq(expires, now)) return now; + return cmp_next_hrtimer_event(now, expires); } #endif @@ -1416,13 +1349,14 @@ void update_process_times(int user_tick) /* Note: this timer irq context must be accounted for as well. */ account_process_tick(p, user_tick); - scheduler_tick(); run_local_timers(); rcu_check_callbacks(cpu, user_tick); -#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL) + printk_tick(); +#ifdef CONFIG_IRQ_WORK if (in_irq()) irq_work_run(); #endif + scheduler_tick(); run_posix_cpu_timers(p); } @@ -1433,11 +1367,6 @@ static void run_timer_softirq(struct softirq_action *h) { struct tvec_base *base = __this_cpu_read(tvec_bases); -#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) - irq_work_run(); -#endif - - printk_tick(); hrtimer_run_pending(); if (time_after_eq(jiffies, base->timer_jiffies)) @@ -1755,7 +1684,6 @@ static int __cpuinit init_timers_cpu(int cpu) } spin_lock_init(&base->lock); - init_waitqueue_head(&base->wait_for_running_timer); for (j = 0; j < TVN_SIZE; j++) { INIT_LIST_HEAD(base->tv5.vec + j); @@ -1794,7 +1722,7 @@ static void __cpuinit migrate_timers(int cpu) BUG_ON(cpu_online(cpu)); old_base = per_cpu(tvec_bases, cpu); - new_base = get_local_var(tvec_bases); + new_base = get_cpu_var(tvec_bases); /* * The caller is globally serialized and nobody else * takes two locks at once, deadlock is not possible. @@ -1815,7 +1743,7 @@ static void __cpuinit migrate_timers(int cpu) spin_unlock(&old_base->lock); spin_unlock_irq(&new_base->lock); - put_local_var(tvec_bases); + put_cpu_var(tvec_bases); } #endif /* CONFIG_HOTPLUG_CPU */ diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index c872f5f..5d89335 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -202,24 +202,6 @@ config IRQSOFF_TRACER enabled. This option and the preempt-off timing option can be used together or separately.) -config INTERRUPT_OFF_HIST - bool "Interrupts-off Latency Histogram" - depends on IRQSOFF_TRACER - help - This option generates continuously updated histograms (one per cpu) - of the duration of time periods with interrupts disabled. The - histograms are disabled by default. To enable them, write a non-zero - number to - - /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff - - If PREEMPT_OFF_HIST is also selected, additional histograms (one - per cpu) are generated that accumulate the duration of time periods - when both interrupts and preemption are disabled. The histogram data - will be located in the debug file system at - - /sys/kernel/debug/tracing/latency_hist/irqsoff - config PREEMPT_TRACER bool "Preemption-off Latency Tracer" default n @@ -242,24 +224,6 @@ config PREEMPT_TRACER enabled. This option and the irqs-off timing option can be used together or separately.) -config PREEMPT_OFF_HIST - bool "Preemption-off Latency Histogram" - depends on PREEMPT_TRACER - help - This option generates continuously updated histograms (one per cpu) - of the duration of time periods with preemption disabled. The - histograms are disabled by default. To enable them, write a non-zero - number to - - /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff - - If INTERRUPT_OFF_HIST is also selected, additional histograms (one - per cpu) are generated that accumulate the duration of time periods - when both interrupts and preemption are disabled. The histogram data - will be located in the debug file system at - - /sys/kernel/debug/tracing/latency_hist/preemptoff - config SCHED_TRACER bool "Scheduling Latency Tracer" select GENERIC_TRACER @@ -269,74 +233,6 @@ config SCHED_TRACER This tracer tracks the latency of the highest priority task to be scheduled in, starting from the point it has woken up. -config WAKEUP_LATENCY_HIST - bool "Scheduling Latency Histogram" - depends on SCHED_TRACER - help - This option generates continuously updated histograms (one per cpu) - of the scheduling latency of the highest priority task. - The histograms are disabled by default. To enable them, write a - non-zero number to - - /sys/kernel/debug/tracing/latency_hist/enable/wakeup - - Two different algorithms are used, one to determine the latency of - processes that exclusively use the highest priority of the system and - another one to determine the latency of processes that share the - highest system priority with other processes. The former is used to - improve hardware and system software, the latter to optimize the - priority design of a given system. The histogram data will be - located in the debug file system at - - /sys/kernel/debug/tracing/latency_hist/wakeup - - and - - /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio - - If both Scheduling Latency Histogram and Missed Timer Offsets - Histogram are selected, additional histogram data will be collected - that contain, in addition to the wakeup latency, the timer latency, in - case the wakeup was triggered by an expired timer. These histograms - are available in the - - /sys/kernel/debug/tracing/latency_hist/timerandwakeup - - directory. They reflect the apparent interrupt and scheduling latency - and are best suitable to determine the worst-case latency of a given - system. To enable these histograms, write a non-zero number to - - /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup - -config MISSED_TIMER_OFFSETS_HIST - depends on HIGH_RES_TIMERS - select GENERIC_TRACER - bool "Missed Timer Offsets Histogram" - help - Generate a histogram of missed timer offsets in microseconds. The - histograms are disabled by default. To enable them, write a non-zero - number to - - /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets - - The histogram data will be located in the debug file system at - - /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets - - If both Scheduling Latency Histogram and Missed Timer Offsets - Histogram are selected, additional histogram data will be collected - that contain, in addition to the wakeup latency, the timer latency, in - case the wakeup was triggered by an expired timer. These histograms - are available in the - - /sys/kernel/debug/tracing/latency_hist/timerandwakeup - - directory. They reflect the apparent interrupt and scheduling latency - and are best suitable to determine the worst-case latency of a given - system. To enable these histograms, write a non-zero number to - - /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup - config ENABLE_DEFAULT_TRACERS bool "Trace process context switches and events" depends on !GENERIC_TRACER @@ -520,28 +416,24 @@ config PROBE_EVENTS def_bool n config DYNAMIC_FTRACE - bool "enable/disable function tracing dynamically" + bool "enable/disable ftrace tracepoints dynamically" depends on FUNCTION_TRACER depends on HAVE_DYNAMIC_FTRACE default y help - This option will modify all the calls to function tracing - dynamically (will patch them out of the binary image and - replace them with a No-Op instruction) on boot up. During - compile time, a table is made of all the locations that ftrace - can function trace, and this table is linked into the kernel - image. When this is enabled, functions can be individually - enabled, and the functions not enabled will not affect - performance of the system. - - See the files in /sys/kernel/debug/tracing: - available_filter_functions - set_ftrace_filter - set_ftrace_notrace + This option will modify all the calls to ftrace dynamically + (will patch them out of the binary image and replace them + with a No-Op instruction) as they are called. A table is + created to dynamically enable them again. This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but otherwise has native performance as long as no tracing is active. + The changes to the code are done by a kernel thread that + wakes up once a second and checks to see if any ftrace calls + were made. If so, it runs stop_machine (stops all CPUS) + and modifies the code to jump over the call to ftrace. + config FUNCTION_PROFILER bool "Kernel function profiler" depends on FUNCTION_TRACER diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index f5e0243..d7e2068 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -34,10 +34,6 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o -obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o -obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o -obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o -obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o obj-$(CONFIG_NOP_TRACER) += trace_nop.o obj-$(CONFIG_STACK_TRACER) += trace_stack.o obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 03dbc77..41473b4 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -650,7 +650,7 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); - for (i = 1; i < pages; i++) { + for (i = 0; i < pages; i++) { pg->next = (void *)get_zeroed_page(GFP_KERNEL); if (!pg->next) goto out_free; @@ -668,6 +668,7 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) free_page(tmp); } + free_page((unsigned long)stat->pages); stat->pages = NULL; stat->start = NULL; @@ -1027,19 +1028,6 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer) static struct pid * const ftrace_swapper_pid = &init_struct_pid; -loff_t -ftrace_filter_lseek(struct file *file, loff_t offset, int whence) -{ - loff_t ret; - - if (file->f_mode & FMODE_READ) - ret = seq_lseek(file, offset, whence); - else - file->f_pos = ret = 1; - - return ret; -} - #ifdef CONFIG_DYNAMIC_FTRACE #ifndef CONFIG_FTRACE_MCOUNT_RECORD @@ -2602,7 +2590,7 @@ static void ftrace_filter_reset(struct ftrace_hash *hash) * routine, you can use ftrace_filter_write() for the write * routine if @flag has FTRACE_ITER_FILTER set, or * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. - * ftrace_filter_lseek() should be used as the lseek routine, and + * ftrace_regex_lseek() should be used as the lseek routine, and * release must call ftrace_regex_release(). */ int @@ -2686,6 +2674,19 @@ ftrace_notrace_open(struct inode *inode, struct file *file) inode, file); } +loff_t +ftrace_regex_lseek(struct file *file, loff_t offset, int whence) +{ + loff_t ret; + + if (file->f_mode & FMODE_READ) + ret = seq_lseek(file, offset, whence); + else + file->f_pos = ret = 1; + + return ret; +} + static int ftrace_match(char *str, char *regex, int len, int type) { int matched = 0; @@ -3081,8 +3082,8 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, continue; } - hlist_del_rcu(&entry->node); - call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu); + hlist_del(&entry->node); + call_rcu(&entry->rcu, ftrace_free_entry_rcu); } } __disable_ftrace_function_probe(); @@ -3548,7 +3549,7 @@ static const struct file_operations ftrace_filter_fops = { .open = ftrace_filter_open, .read = seq_read, .write = ftrace_filter_write, - .llseek = ftrace_filter_lseek, + .llseek = ftrace_regex_lseek, .release = ftrace_regex_release, }; @@ -3556,7 +3557,7 @@ static const struct file_operations ftrace_notrace_fops = { .open = ftrace_notrace_open, .read = seq_read, .write = ftrace_notrace_write, - .llseek = ftrace_filter_lseek, + .llseek = ftrace_regex_lseek, .release = ftrace_regex_release, }; @@ -3714,8 +3715,7 @@ out: if (fail) return -EINVAL; - ftrace_graph_filter_enabled = !!(*idx); - + ftrace_graph_filter_enabled = 1; return 0; } @@ -3762,8 +3762,8 @@ static const struct file_operations ftrace_graph_fops = { .open = ftrace_graph_open, .read = seq_read, .write = ftrace_graph_write, - .llseek = ftrace_filter_lseek, .release = ftrace_graph_release, + .llseek = seq_lseek, }; #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ @@ -3970,51 +3970,37 @@ static void ftrace_init_module(struct module *mod, ftrace_process_locs(mod, start, end); } -static int ftrace_module_notify_enter(struct notifier_block *self, - unsigned long val, void *data) +static int ftrace_module_notify(struct notifier_block *self, + unsigned long val, void *data) { struct module *mod = data; - if (val == MODULE_STATE_COMING) + switch (val) { + case MODULE_STATE_COMING: ftrace_init_module(mod, mod->ftrace_callsites, mod->ftrace_callsites + mod->num_ftrace_callsites); - return 0; -} - -static int ftrace_module_notify_exit(struct notifier_block *self, - unsigned long val, void *data) -{ - struct module *mod = data; - - if (val == MODULE_STATE_GOING) + break; + case MODULE_STATE_GOING: ftrace_release_mod(mod); + break; + } return 0; } #else -static int ftrace_module_notify_enter(struct notifier_block *self, - unsigned long val, void *data) -{ - return 0; -} -static int ftrace_module_notify_exit(struct notifier_block *self, - unsigned long val, void *data) +static int ftrace_module_notify(struct notifier_block *self, + unsigned long val, void *data) { return 0; } #endif /* CONFIG_MODULES */ -struct notifier_block ftrace_module_enter_nb = { - .notifier_call = ftrace_module_notify_enter, +struct notifier_block ftrace_module_nb = { + .notifier_call = ftrace_module_notify, .priority = INT_MAX, /* Run before anything that can use kprobes */ }; -struct notifier_block ftrace_module_exit_nb = { - .notifier_call = ftrace_module_notify_exit, - .priority = INT_MIN, /* Run after anything that can remove kprobes */ -}; - extern unsigned long __start_mcount_loc[]; extern unsigned long __stop_mcount_loc[]; @@ -4046,13 +4032,9 @@ void __init ftrace_init(void) __start_mcount_loc, __stop_mcount_loc); - ret = register_module_notifier(&ftrace_module_enter_nb); - if (ret) - pr_warning("Failed to register trace ftrace module enter notifier\n"); - - ret = register_module_notifier(&ftrace_module_exit_nb); + ret = register_module_notifier(&ftrace_module_nb); if (ret) - pr_warning("Failed to register trace ftrace module exit notifier\n"); + pr_warning("Failed to register trace ftrace module notifier\n"); set_ftrace_early_filters(); @@ -4421,7 +4403,7 @@ static const struct file_operations ftrace_pid_fops = { .open = ftrace_pid_open, .write = ftrace_pid_write, .read = seq_read, - .llseek = ftrace_filter_lseek, + .llseek = seq_lseek, .release = ftrace_pid_release, }; @@ -4537,8 +4519,12 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, ftrace_startup_sysctl(); /* we are starting ftrace again */ - if (ftrace_ops_list != &ftrace_list_end) - update_ftrace_function(); + if (ftrace_ops_list != &ftrace_list_end) { + if (ftrace_ops_list->next == &ftrace_list_end) + ftrace_trace_function = ftrace_ops_list->func; + else + ftrace_trace_function = ftrace_ops_list_func; + } } else { /* stopping ftrace calls (just send to ftrace_stub) */ diff --git a/kernel/trace/latency_hist.c b/kernel/trace/latency_hist.c deleted file mode 100644 index 6a4c869..0000000 --- a/kernel/trace/latency_hist.c +++ /dev/null @@ -1,1176 +0,0 @@ -/* - * kernel/trace/latency_hist.c - * - * Add support for histograms of preemption-off latency and - * interrupt-off latency and wakeup latency, it depends on - * Real-Time Preemption Support. - * - * Copyright (C) 2005 MontaVista Software, Inc. - * Yi Yang - * - * Converted to work with the new latency tracer. - * Copyright (C) 2008 Red Hat, Inc. - * Steven Rostedt - * - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "trace.h" -#include - -#define NSECS_PER_USECS 1000L - -#define CREATE_TRACE_POINTS -#include - -enum { - IRQSOFF_LATENCY = 0, - PREEMPTOFF_LATENCY, - PREEMPTIRQSOFF_LATENCY, - WAKEUP_LATENCY, - WAKEUP_LATENCY_SHAREDPRIO, - MISSED_TIMER_OFFSETS, - TIMERANDWAKEUP_LATENCY, - MAX_LATENCY_TYPE, -}; - -#define MAX_ENTRY_NUM 10240 - -struct hist_data { - atomic_t hist_mode; /* 0 log, 1 don't log */ - long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */ - long min_lat; - long max_lat; - unsigned long long below_hist_bound_samples; - unsigned long long above_hist_bound_samples; - long long accumulate_lat; - unsigned long long total_samples; - unsigned long long hist_array[MAX_ENTRY_NUM]; -}; - -struct enable_data { - int latency_type; - int enabled; -}; - -static char *latency_hist_dir_root = "latency_hist"; - -#ifdef CONFIG_INTERRUPT_OFF_HIST -static DEFINE_PER_CPU(struct hist_data, irqsoff_hist); -static char *irqsoff_hist_dir = "irqsoff"; -static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start); -static DEFINE_PER_CPU(int, hist_irqsoff_counting); -#endif - -#ifdef CONFIG_PREEMPT_OFF_HIST -static DEFINE_PER_CPU(struct hist_data, preemptoff_hist); -static char *preemptoff_hist_dir = "preemptoff"; -static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start); -static DEFINE_PER_CPU(int, hist_preemptoff_counting); -#endif - -#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) -static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist); -static char *preemptirqsoff_hist_dir = "preemptirqsoff"; -static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start); -static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting); -#endif - -#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST) -static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start); -static struct enable_data preemptirqsoff_enabled_data = { - .latency_type = PREEMPTIRQSOFF_LATENCY, - .enabled = 0, -}; -#endif - -#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ - defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -struct maxlatproc_data { - char comm[FIELD_SIZEOF(struct task_struct, comm)]; - char current_comm[FIELD_SIZEOF(struct task_struct, comm)]; - int pid; - int current_pid; - int prio; - int current_prio; - long latency; - long timeroffset; - cycle_t timestamp; -}; -#endif - -#ifdef CONFIG_WAKEUP_LATENCY_HIST -static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist); -static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio); -static char *wakeup_latency_hist_dir = "wakeup"; -static char *wakeup_latency_hist_dir_sharedprio = "sharedprio"; -static notrace void probe_wakeup_latency_hist_start(void *v, - struct task_struct *p, int success); -static notrace void probe_wakeup_latency_hist_stop(void *v, - struct task_struct *prev, struct task_struct *next); -static notrace void probe_sched_migrate_task(void *, - struct task_struct *task, int cpu); -static struct enable_data wakeup_latency_enabled_data = { - .latency_type = WAKEUP_LATENCY, - .enabled = 0, -}; -static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc); -static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio); -static DEFINE_PER_CPU(struct task_struct *, wakeup_task); -static DEFINE_PER_CPU(int, wakeup_sharedprio); -static unsigned long wakeup_pid; -#endif - -#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets); -static char *missed_timer_offsets_dir = "missed_timer_offsets"; -static notrace void probe_hrtimer_interrupt(void *v, int cpu, - long long offset, struct task_struct *curr, struct task_struct *task); -static struct enable_data missed_timer_offsets_enabled_data = { - .latency_type = MISSED_TIMER_OFFSETS, - .enabled = 0, -}; -static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc); -static unsigned long missed_timer_offsets_pid; -#endif - -#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ - defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist); -static char *timerandwakeup_latency_hist_dir = "timerandwakeup"; -static struct enable_data timerandwakeup_enabled_data = { - .latency_type = TIMERANDWAKEUP_LATENCY, - .enabled = 0, -}; -static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc); -#endif - -void notrace latency_hist(int latency_type, int cpu, long latency, - long timeroffset, cycle_t stop, - struct task_struct *p) -{ - struct hist_data *my_hist; -#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ - defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) - struct maxlatproc_data *mp = NULL; -#endif - - if (cpu < 0 || cpu >= NR_CPUS || latency_type < 0 || - latency_type >= MAX_LATENCY_TYPE) - return; - - switch (latency_type) { -#ifdef CONFIG_INTERRUPT_OFF_HIST - case IRQSOFF_LATENCY: - my_hist = &per_cpu(irqsoff_hist, cpu); - break; -#endif -#ifdef CONFIG_PREEMPT_OFF_HIST - case PREEMPTOFF_LATENCY: - my_hist = &per_cpu(preemptoff_hist, cpu); - break; -#endif -#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) - case PREEMPTIRQSOFF_LATENCY: - my_hist = &per_cpu(preemptirqsoff_hist, cpu); - break; -#endif -#ifdef CONFIG_WAKEUP_LATENCY_HIST - case WAKEUP_LATENCY: - my_hist = &per_cpu(wakeup_latency_hist, cpu); - mp = &per_cpu(wakeup_maxlatproc, cpu); - break; - case WAKEUP_LATENCY_SHAREDPRIO: - my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); - mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); - break; -#endif -#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST - case MISSED_TIMER_OFFSETS: - my_hist = &per_cpu(missed_timer_offsets, cpu); - mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); - break; -#endif -#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ - defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) - case TIMERANDWAKEUP_LATENCY: - my_hist = &per_cpu(timerandwakeup_latency_hist, cpu); - mp = &per_cpu(timerandwakeup_maxlatproc, cpu); - break; -#endif - - default: - return; - } - - latency += my_hist->offset; - - if (atomic_read(&my_hist->hist_mode) == 0) - return; - - if (latency < 0 || latency >= MAX_ENTRY_NUM) { - if (latency < 0) - my_hist->below_hist_bound_samples++; - else - my_hist->above_hist_bound_samples++; - } else - my_hist->hist_array[latency]++; - - if (unlikely(latency > my_hist->max_lat || - my_hist->min_lat == LONG_MAX)) { -#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ - defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) - if (latency_type == WAKEUP_LATENCY || - latency_type == WAKEUP_LATENCY_SHAREDPRIO || - latency_type == MISSED_TIMER_OFFSETS || - latency_type == TIMERANDWAKEUP_LATENCY) { - strncpy(mp->comm, p->comm, sizeof(mp->comm)); - strncpy(mp->current_comm, current->comm, - sizeof(mp->current_comm)); - mp->pid = task_pid_nr(p); - mp->current_pid = task_pid_nr(current); - mp->prio = p->prio; - mp->current_prio = current->prio; - mp->latency = latency; - mp->timeroffset = timeroffset; - mp->timestamp = stop; - } -#endif - my_hist->max_lat = latency; - } - if (unlikely(latency < my_hist->min_lat)) - my_hist->min_lat = latency; - my_hist->total_samples++; - my_hist->accumulate_lat += latency; -} - -static void *l_start(struct seq_file *m, loff_t *pos) -{ - loff_t *index_ptr = NULL; - loff_t index = *pos; - struct hist_data *my_hist = m->private; - - if (index == 0) { - char minstr[32], avgstr[32], maxstr[32]; - - atomic_dec(&my_hist->hist_mode); - - if (likely(my_hist->total_samples)) { - long avg = (long) div64_s64(my_hist->accumulate_lat, - my_hist->total_samples); - snprintf(minstr, sizeof(minstr), "%ld", - my_hist->min_lat - my_hist->offset); - snprintf(avgstr, sizeof(avgstr), "%ld", - avg - my_hist->offset); - snprintf(maxstr, sizeof(maxstr), "%ld", - my_hist->max_lat - my_hist->offset); - } else { - strcpy(minstr, ""); - strcpy(avgstr, minstr); - strcpy(maxstr, minstr); - } - - seq_printf(m, "#Minimum latency: %s microseconds\n" - "#Average latency: %s microseconds\n" - "#Maximum latency: %s microseconds\n" - "#Total samples: %llu\n" - "#There are %llu samples lower than %ld" - " microseconds.\n" - "#There are %llu samples greater or equal" - " than %ld microseconds.\n" - "#usecs\t%16s\n", - minstr, avgstr, maxstr, - my_hist->total_samples, - my_hist->below_hist_bound_samples, - -my_hist->offset, - my_hist->above_hist_bound_samples, - MAX_ENTRY_NUM - my_hist->offset, - "samples"); - } - if (index < MAX_ENTRY_NUM) { - index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL); - if (index_ptr) - *index_ptr = index; - } - - return index_ptr; -} - -static void *l_next(struct seq_file *m, void *p, loff_t *pos) -{ - loff_t *index_ptr = p; - struct hist_data *my_hist = m->private; - - if (++*pos >= MAX_ENTRY_NUM) { - atomic_inc(&my_hist->hist_mode); - return NULL; - } - *index_ptr = *pos; - return index_ptr; -} - -static void l_stop(struct seq_file *m, void *p) -{ - kfree(p); -} - -static int l_show(struct seq_file *m, void *p) -{ - int index = *(loff_t *) p; - struct hist_data *my_hist = m->private; - - seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset, - my_hist->hist_array[index]); - return 0; -} - -static struct seq_operations latency_hist_seq_op = { - .start = l_start, - .next = l_next, - .stop = l_stop, - .show = l_show -}; - -static int latency_hist_open(struct inode *inode, struct file *file) -{ - int ret; - - ret = seq_open(file, &latency_hist_seq_op); - if (!ret) { - struct seq_file *seq = file->private_data; - seq->private = inode->i_private; - } - return ret; -} - -static struct file_operations latency_hist_fops = { - .open = latency_hist_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ - defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -static void clear_maxlatprocdata(struct maxlatproc_data *mp) -{ - mp->comm[0] = mp->current_comm[0] = '\0'; - mp->prio = mp->current_prio = mp->pid = mp->current_pid = - mp->latency = mp->timeroffset = -1; - mp->timestamp = 0; -} -#endif - -static void hist_reset(struct hist_data *hist) -{ - atomic_dec(&hist->hist_mode); - - memset(hist->hist_array, 0, sizeof(hist->hist_array)); - hist->below_hist_bound_samples = 0ULL; - hist->above_hist_bound_samples = 0ULL; - hist->min_lat = LONG_MAX; - hist->max_lat = LONG_MIN; - hist->total_samples = 0ULL; - hist->accumulate_lat = 0LL; - - atomic_inc(&hist->hist_mode); -} - -static ssize_t -latency_hist_reset(struct file *file, const char __user *a, - size_t size, loff_t *off) -{ - int cpu; - struct hist_data *hist = NULL; -#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ - defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) - struct maxlatproc_data *mp = NULL; -#endif - off_t latency_type = (off_t) file->private_data; - - for_each_online_cpu(cpu) { - - switch (latency_type) { -#ifdef CONFIG_PREEMPT_OFF_HIST - case PREEMPTOFF_LATENCY: - hist = &per_cpu(preemptoff_hist, cpu); - break; -#endif -#ifdef CONFIG_INTERRUPT_OFF_HIST - case IRQSOFF_LATENCY: - hist = &per_cpu(irqsoff_hist, cpu); - break; -#endif -#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) - case PREEMPTIRQSOFF_LATENCY: - hist = &per_cpu(preemptirqsoff_hist, cpu); - break; -#endif -#ifdef CONFIG_WAKEUP_LATENCY_HIST - case WAKEUP_LATENCY: - hist = &per_cpu(wakeup_latency_hist, cpu); - mp = &per_cpu(wakeup_maxlatproc, cpu); - break; - case WAKEUP_LATENCY_SHAREDPRIO: - hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); - mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); - break; -#endif -#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST - case MISSED_TIMER_OFFSETS: - hist = &per_cpu(missed_timer_offsets, cpu); - mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); - break; -#endif -#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ - defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) - case TIMERANDWAKEUP_LATENCY: - hist = &per_cpu(timerandwakeup_latency_hist, cpu); - mp = &per_cpu(timerandwakeup_maxlatproc, cpu); - break; -#endif - } - - hist_reset(hist); -#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ - defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) - if (latency_type == WAKEUP_LATENCY || - latency_type == WAKEUP_LATENCY_SHAREDPRIO || - latency_type == MISSED_TIMER_OFFSETS || - latency_type == TIMERANDWAKEUP_LATENCY) - clear_maxlatprocdata(mp); -#endif - } - - return size; -} - -#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ - defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -static ssize_t -show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) -{ - char buf[64]; - int r; - unsigned long *this_pid = file->private_data; - - r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid); - return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); -} - -static ssize_t do_pid(struct file *file, const char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - char buf[64]; - unsigned long pid; - unsigned long *this_pid = file->private_data; - - if (cnt >= sizeof(buf)) - return -EINVAL; - - if (copy_from_user(&buf, ubuf, cnt)) - return -EFAULT; - - buf[cnt] = '\0'; - - if (strict_strtoul(buf, 10, &pid)) - return(-EINVAL); - - *this_pid = pid; - - return cnt; -} -#endif - -#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ - defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -static ssize_t -show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) -{ - int r; - struct maxlatproc_data *mp = file->private_data; - int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8); - unsigned long long t; - unsigned long usecs, secs; - char *buf; - - if (mp->pid == -1 || mp->current_pid == -1) { - buf = "(none)\n"; - return simple_read_from_buffer(ubuf, cnt, ppos, buf, - strlen(buf)); - } - - buf = kmalloc(strmaxlen, GFP_KERNEL); - if (buf == NULL) - return -ENOMEM; - - t = ns2usecs(mp->timestamp); - usecs = do_div(t, USEC_PER_SEC); - secs = (unsigned long) t; - r = snprintf(buf, strmaxlen, - "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid, - MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm, - mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm, - secs, usecs); - r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); - kfree(buf); - return r; -} -#endif - -static ssize_t -show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) -{ - char buf[64]; - struct enable_data *ed = file->private_data; - int r; - - r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled); - return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); -} - -static ssize_t -do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) -{ - char buf[64]; - long enable; - struct enable_data *ed = file->private_data; - - if (cnt >= sizeof(buf)) - return -EINVAL; - - if (copy_from_user(&buf, ubuf, cnt)) - return -EFAULT; - - buf[cnt] = 0; - - if (strict_strtol(buf, 10, &enable)) - return(-EINVAL); - - if ((enable && ed->enabled) || (!enable && !ed->enabled)) - return cnt; - - if (enable) { - int ret; - - switch (ed->latency_type) { -#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) - case PREEMPTIRQSOFF_LATENCY: - ret = register_trace_preemptirqsoff_hist( - probe_preemptirqsoff_hist, NULL); - if (ret) { - pr_info("wakeup trace: Couldn't assign " - "probe_preemptirqsoff_hist " - "to trace_preemptirqsoff_hist\n"); - return ret; - } - break; -#endif -#ifdef CONFIG_WAKEUP_LATENCY_HIST - case WAKEUP_LATENCY: - ret = register_trace_sched_wakeup( - probe_wakeup_latency_hist_start, NULL); - if (ret) { - pr_info("wakeup trace: Couldn't assign " - "probe_wakeup_latency_hist_start " - "to trace_sched_wakeup\n"); - return ret; - } - ret = register_trace_sched_wakeup_new( - probe_wakeup_latency_hist_start, NULL); - if (ret) { - pr_info("wakeup trace: Couldn't assign " - "probe_wakeup_latency_hist_start " - "to trace_sched_wakeup_new\n"); - unregister_trace_sched_wakeup( - probe_wakeup_latency_hist_start, NULL); - return ret; - } - ret = register_trace_sched_switch( - probe_wakeup_latency_hist_stop, NULL); - if (ret) { - pr_info("wakeup trace: Couldn't assign " - "probe_wakeup_latency_hist_stop " - "to trace_sched_switch\n"); - unregister_trace_sched_wakeup( - probe_wakeup_latency_hist_start, NULL); - unregister_trace_sched_wakeup_new( - probe_wakeup_latency_hist_start, NULL); - return ret; - } - ret = register_trace_sched_migrate_task( - probe_sched_migrate_task, NULL); - if (ret) { - pr_info("wakeup trace: Couldn't assign " - "probe_sched_migrate_task " - "to trace_sched_migrate_task\n"); - unregister_trace_sched_wakeup( - probe_wakeup_latency_hist_start, NULL); - unregister_trace_sched_wakeup_new( - probe_wakeup_latency_hist_start, NULL); - unregister_trace_sched_switch( - probe_wakeup_latency_hist_stop, NULL); - return ret; - } - break; -#endif -#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST - case MISSED_TIMER_OFFSETS: - ret = register_trace_hrtimer_interrupt( - probe_hrtimer_interrupt, NULL); - if (ret) { - pr_info("wakeup trace: Couldn't assign " - "probe_hrtimer_interrupt " - "to trace_hrtimer_interrupt\n"); - return ret; - } - break; -#endif -#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ - defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) - case TIMERANDWAKEUP_LATENCY: - if (!wakeup_latency_enabled_data.enabled || - !missed_timer_offsets_enabled_data.enabled) - return -EINVAL; - break; -#endif - default: - break; - } - } else { - switch (ed->latency_type) { -#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) - case PREEMPTIRQSOFF_LATENCY: - { - int cpu; - - unregister_trace_preemptirqsoff_hist( - probe_preemptirqsoff_hist, NULL); - for_each_online_cpu(cpu) { -#ifdef CONFIG_INTERRUPT_OFF_HIST - per_cpu(hist_irqsoff_counting, - cpu) = 0; -#endif -#ifdef CONFIG_PREEMPT_OFF_HIST - per_cpu(hist_preemptoff_counting, - cpu) = 0; -#endif -#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) - per_cpu(hist_preemptirqsoff_counting, - cpu) = 0; -#endif - } - } - break; -#endif -#ifdef CONFIG_WAKEUP_LATENCY_HIST - case WAKEUP_LATENCY: - { - int cpu; - - unregister_trace_sched_wakeup( - probe_wakeup_latency_hist_start, NULL); - unregister_trace_sched_wakeup_new( - probe_wakeup_latency_hist_start, NULL); - unregister_trace_sched_switch( - probe_wakeup_latency_hist_stop, NULL); - unregister_trace_sched_migrate_task( - probe_sched_migrate_task, NULL); - - for_each_online_cpu(cpu) { - per_cpu(wakeup_task, cpu) = NULL; - per_cpu(wakeup_sharedprio, cpu) = 0; - } - } -#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST - timerandwakeup_enabled_data.enabled = 0; -#endif - break; -#endif -#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST - case MISSED_TIMER_OFFSETS: - unregister_trace_hrtimer_interrupt( - probe_hrtimer_interrupt, NULL); -#ifdef CONFIG_WAKEUP_LATENCY_HIST - timerandwakeup_enabled_data.enabled = 0; -#endif - break; -#endif - default: - break; - } - } - ed->enabled = enable; - return cnt; -} - -static const struct file_operations latency_hist_reset_fops = { - .open = tracing_open_generic, - .write = latency_hist_reset, -}; - -static const struct file_operations enable_fops = { - .open = tracing_open_generic, - .read = show_enable, - .write = do_enable, -}; - -#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ - defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -static const struct file_operations pid_fops = { - .open = tracing_open_generic, - .read = show_pid, - .write = do_pid, -}; - -static const struct file_operations maxlatproc_fops = { - .open = tracing_open_generic, - .read = show_maxlatproc, -}; -#endif - -#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) -static notrace void probe_preemptirqsoff_hist(void *v, int reason, - int starthist) -{ - int cpu = raw_smp_processor_id(); - int time_set = 0; - - if (starthist) { - cycle_t uninitialized_var(start); - - if (!preempt_count() && !irqs_disabled()) - return; - -#ifdef CONFIG_INTERRUPT_OFF_HIST - if ((reason == IRQS_OFF || reason == TRACE_START) && - !per_cpu(hist_irqsoff_counting, cpu)) { - per_cpu(hist_irqsoff_counting, cpu) = 1; - start = ftrace_now(cpu); - time_set++; - per_cpu(hist_irqsoff_start, cpu) = start; - } -#endif - -#ifdef CONFIG_PREEMPT_OFF_HIST - if ((reason == PREEMPT_OFF || reason == TRACE_START) && - !per_cpu(hist_preemptoff_counting, cpu)) { - per_cpu(hist_preemptoff_counting, cpu) = 1; - if (!(time_set++)) - start = ftrace_now(cpu); - per_cpu(hist_preemptoff_start, cpu) = start; - } -#endif - -#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) - if (per_cpu(hist_irqsoff_counting, cpu) && - per_cpu(hist_preemptoff_counting, cpu) && - !per_cpu(hist_preemptirqsoff_counting, cpu)) { - per_cpu(hist_preemptirqsoff_counting, cpu) = 1; - if (!time_set) - start = ftrace_now(cpu); - per_cpu(hist_preemptirqsoff_start, cpu) = start; - } -#endif - } else { - cycle_t uninitialized_var(stop); - -#ifdef CONFIG_INTERRUPT_OFF_HIST - if ((reason == IRQS_ON || reason == TRACE_STOP) && - per_cpu(hist_irqsoff_counting, cpu)) { - cycle_t start = per_cpu(hist_irqsoff_start, cpu); - - stop = ftrace_now(cpu); - time_set++; - if (start) { - long latency = ((long) (stop - start)) / - NSECS_PER_USECS; - - latency_hist(IRQSOFF_LATENCY, cpu, latency, 0, - stop, NULL); - } - per_cpu(hist_irqsoff_counting, cpu) = 0; - } -#endif - -#ifdef CONFIG_PREEMPT_OFF_HIST - if ((reason == PREEMPT_ON || reason == TRACE_STOP) && - per_cpu(hist_preemptoff_counting, cpu)) { - cycle_t start = per_cpu(hist_preemptoff_start, cpu); - - if (!(time_set++)) - stop = ftrace_now(cpu); - if (start) { - long latency = ((long) (stop - start)) / - NSECS_PER_USECS; - - latency_hist(PREEMPTOFF_LATENCY, cpu, latency, - 0, stop, NULL); - } - per_cpu(hist_preemptoff_counting, cpu) = 0; - } -#endif - -#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) - if ((!per_cpu(hist_irqsoff_counting, cpu) || - !per_cpu(hist_preemptoff_counting, cpu)) && - per_cpu(hist_preemptirqsoff_counting, cpu)) { - cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu); - - if (!time_set) - stop = ftrace_now(cpu); - if (start) { - long latency = ((long) (stop - start)) / - NSECS_PER_USECS; - - latency_hist(PREEMPTIRQSOFF_LATENCY, cpu, - latency, 0, stop, NULL); - } - per_cpu(hist_preemptirqsoff_counting, cpu) = 0; - } -#endif - } -} -#endif - -#ifdef CONFIG_WAKEUP_LATENCY_HIST -static DEFINE_RAW_SPINLOCK(wakeup_lock); -static notrace void probe_sched_migrate_task(void *v, struct task_struct *task, - int cpu) -{ - int old_cpu = task_cpu(task); - - if (cpu != old_cpu) { - unsigned long flags; - struct task_struct *cpu_wakeup_task; - - raw_spin_lock_irqsave(&wakeup_lock, flags); - - cpu_wakeup_task = per_cpu(wakeup_task, old_cpu); - if (task == cpu_wakeup_task) { - put_task_struct(cpu_wakeup_task); - per_cpu(wakeup_task, old_cpu) = NULL; - cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task; - get_task_struct(cpu_wakeup_task); - } - - raw_spin_unlock_irqrestore(&wakeup_lock, flags); - } -} - -static notrace void probe_wakeup_latency_hist_start(void *v, - struct task_struct *p, int success) -{ - unsigned long flags; - struct task_struct *curr = current; - int cpu = task_cpu(p); - struct task_struct *cpu_wakeup_task; - - raw_spin_lock_irqsave(&wakeup_lock, flags); - - cpu_wakeup_task = per_cpu(wakeup_task, cpu); - - if (wakeup_pid) { - if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || - p->prio == curr->prio) - per_cpu(wakeup_sharedprio, cpu) = 1; - if (likely(wakeup_pid != task_pid_nr(p))) - goto out; - } else { - if (likely(!rt_task(p)) || - (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) || - p->prio > curr->prio) - goto out; - if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || - p->prio == curr->prio) - per_cpu(wakeup_sharedprio, cpu) = 1; - } - - if (cpu_wakeup_task) - put_task_struct(cpu_wakeup_task); - cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p; - get_task_struct(cpu_wakeup_task); - cpu_wakeup_task->preempt_timestamp_hist = - ftrace_now(raw_smp_processor_id()); -out: - raw_spin_unlock_irqrestore(&wakeup_lock, flags); -} - -static notrace void probe_wakeup_latency_hist_stop(void *v, - struct task_struct *prev, struct task_struct *next) -{ - unsigned long flags; - int cpu = task_cpu(next); - long latency; - cycle_t stop; - struct task_struct *cpu_wakeup_task; - - raw_spin_lock_irqsave(&wakeup_lock, flags); - - cpu_wakeup_task = per_cpu(wakeup_task, cpu); - - if (cpu_wakeup_task == NULL) - goto out; - - /* Already running? */ - if (unlikely(current == cpu_wakeup_task)) - goto out_reset; - - if (next != cpu_wakeup_task) { - if (next->prio < cpu_wakeup_task->prio) - goto out_reset; - - if (next->prio == cpu_wakeup_task->prio) - per_cpu(wakeup_sharedprio, cpu) = 1; - - goto out; - } - - if (current->prio == cpu_wakeup_task->prio) - per_cpu(wakeup_sharedprio, cpu) = 1; - - /* - * The task we are waiting for is about to be switched to. - * Calculate latency and store it in histogram. - */ - stop = ftrace_now(raw_smp_processor_id()); - - latency = ((long) (stop - next->preempt_timestamp_hist)) / - NSECS_PER_USECS; - - if (per_cpu(wakeup_sharedprio, cpu)) { - latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop, - next); - per_cpu(wakeup_sharedprio, cpu) = 0; - } else { - latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next); -#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST - if (timerandwakeup_enabled_data.enabled) { - latency_hist(TIMERANDWAKEUP_LATENCY, cpu, - next->timer_offset + latency, next->timer_offset, - stop, next); - } -#endif - } - -out_reset: -#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST - next->timer_offset = 0; -#endif - put_task_struct(cpu_wakeup_task); - per_cpu(wakeup_task, cpu) = NULL; -out: - raw_spin_unlock_irqrestore(&wakeup_lock, flags); -} -#endif - -#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -static notrace void probe_hrtimer_interrupt(void *v, int cpu, - long long latency_ns, struct task_struct *curr, struct task_struct *task) -{ - if (latency_ns <= 0 && task != NULL && rt_task(task) && - (task->prio < curr->prio || - (task->prio == curr->prio && - !cpumask_test_cpu(cpu, &task->cpus_allowed)))) { - long latency; - cycle_t now; - - if (missed_timer_offsets_pid) { - if (likely(missed_timer_offsets_pid != - task_pid_nr(task))) - return; - } - - now = ftrace_now(cpu); - latency = (long) div_s64(-latency_ns, NSECS_PER_USECS); - latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now, - task); -#ifdef CONFIG_WAKEUP_LATENCY_HIST - task->timer_offset = latency; -#endif - } -} -#endif - -static __init int latency_hist_init(void) -{ - struct dentry *latency_hist_root = NULL; - struct dentry *dentry; -#ifdef CONFIG_WAKEUP_LATENCY_HIST - struct dentry *dentry_sharedprio; -#endif - struct dentry *entry; - struct dentry *enable_root; - int i = 0; - struct hist_data *my_hist; - char name[64]; - char *cpufmt = "CPU%d"; -#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ - defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) - char *cpufmt_maxlatproc = "max_latency-CPU%d"; - struct maxlatproc_data *mp = NULL; -#endif - - dentry = tracing_init_dentry(); - latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry); - enable_root = debugfs_create_dir("enable", latency_hist_root); - -#ifdef CONFIG_INTERRUPT_OFF_HIST - dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root); - for_each_possible_cpu(i) { - sprintf(name, cpufmt, i); - entry = debugfs_create_file(name, 0444, dentry, - &per_cpu(irqsoff_hist, i), &latency_hist_fops); - my_hist = &per_cpu(irqsoff_hist, i); - atomic_set(&my_hist->hist_mode, 1); - my_hist->min_lat = LONG_MAX; - } - entry = debugfs_create_file("reset", 0644, dentry, - (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops); -#endif - -#ifdef CONFIG_PREEMPT_OFF_HIST - dentry = debugfs_create_dir(preemptoff_hist_dir, - latency_hist_root); - for_each_possible_cpu(i) { - sprintf(name, cpufmt, i); - entry = debugfs_create_file(name, 0444, dentry, - &per_cpu(preemptoff_hist, i), &latency_hist_fops); - my_hist = &per_cpu(preemptoff_hist, i); - atomic_set(&my_hist->hist_mode, 1); - my_hist->min_lat = LONG_MAX; - } - entry = debugfs_create_file("reset", 0644, dentry, - (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops); -#endif - -#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) - dentry = debugfs_create_dir(preemptirqsoff_hist_dir, - latency_hist_root); - for_each_possible_cpu(i) { - sprintf(name, cpufmt, i); - entry = debugfs_create_file(name, 0444, dentry, - &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops); - my_hist = &per_cpu(preemptirqsoff_hist, i); - atomic_set(&my_hist->hist_mode, 1); - my_hist->min_lat = LONG_MAX; - } - entry = debugfs_create_file("reset", 0644, dentry, - (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops); -#endif - -#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) - entry = debugfs_create_file("preemptirqsoff", 0644, - enable_root, (void *)&preemptirqsoff_enabled_data, - &enable_fops); -#endif - -#ifdef CONFIG_WAKEUP_LATENCY_HIST - dentry = debugfs_create_dir(wakeup_latency_hist_dir, - latency_hist_root); - dentry_sharedprio = debugfs_create_dir( - wakeup_latency_hist_dir_sharedprio, dentry); - for_each_possible_cpu(i) { - sprintf(name, cpufmt, i); - - entry = debugfs_create_file(name, 0444, dentry, - &per_cpu(wakeup_latency_hist, i), - &latency_hist_fops); - my_hist = &per_cpu(wakeup_latency_hist, i); - atomic_set(&my_hist->hist_mode, 1); - my_hist->min_lat = LONG_MAX; - - entry = debugfs_create_file(name, 0444, dentry_sharedprio, - &per_cpu(wakeup_latency_hist_sharedprio, i), - &latency_hist_fops); - my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i); - atomic_set(&my_hist->hist_mode, 1); - my_hist->min_lat = LONG_MAX; - - sprintf(name, cpufmt_maxlatproc, i); - - mp = &per_cpu(wakeup_maxlatproc, i); - entry = debugfs_create_file(name, 0444, dentry, mp, - &maxlatproc_fops); - clear_maxlatprocdata(mp); - - mp = &per_cpu(wakeup_maxlatproc_sharedprio, i); - entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp, - &maxlatproc_fops); - clear_maxlatprocdata(mp); - } - entry = debugfs_create_file("pid", 0644, dentry, - (void *)&wakeup_pid, &pid_fops); - entry = debugfs_create_file("reset", 0644, dentry, - (void *)WAKEUP_LATENCY, &latency_hist_reset_fops); - entry = debugfs_create_file("reset", 0644, dentry_sharedprio, - (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops); - entry = debugfs_create_file("wakeup", 0644, - enable_root, (void *)&wakeup_latency_enabled_data, - &enable_fops); -#endif - -#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST - dentry = debugfs_create_dir(missed_timer_offsets_dir, - latency_hist_root); - for_each_possible_cpu(i) { - sprintf(name, cpufmt, i); - entry = debugfs_create_file(name, 0444, dentry, - &per_cpu(missed_timer_offsets, i), &latency_hist_fops); - my_hist = &per_cpu(missed_timer_offsets, i); - atomic_set(&my_hist->hist_mode, 1); - my_hist->min_lat = LONG_MAX; - - sprintf(name, cpufmt_maxlatproc, i); - mp = &per_cpu(missed_timer_offsets_maxlatproc, i); - entry = debugfs_create_file(name, 0444, dentry, mp, - &maxlatproc_fops); - clear_maxlatprocdata(mp); - } - entry = debugfs_create_file("pid", 0644, dentry, - (void *)&missed_timer_offsets_pid, &pid_fops); - entry = debugfs_create_file("reset", 0644, dentry, - (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops); - entry = debugfs_create_file("missed_timer_offsets", 0644, - enable_root, (void *)&missed_timer_offsets_enabled_data, - &enable_fops); -#endif - -#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ - defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) - dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir, - latency_hist_root); - for_each_possible_cpu(i) { - sprintf(name, cpufmt, i); - entry = debugfs_create_file(name, 0444, dentry, - &per_cpu(timerandwakeup_latency_hist, i), - &latency_hist_fops); - my_hist = &per_cpu(timerandwakeup_latency_hist, i); - atomic_set(&my_hist->hist_mode, 1); - my_hist->min_lat = LONG_MAX; - - sprintf(name, cpufmt_maxlatproc, i); - mp = &per_cpu(timerandwakeup_maxlatproc, i); - entry = debugfs_create_file(name, 0444, dentry, mp, - &maxlatproc_fops); - clear_maxlatprocdata(mp); - } - entry = debugfs_create_file("reset", 0644, dentry, - (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops); - entry = debugfs_create_file("timerandwakeup", 0644, - enable_root, (void *)&timerandwakeup_enabled_data, - &enable_fops); -#endif - return 0; -} - -__initcall(latency_hist_init); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 1616217..3c13e46 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -703,7 +703,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) { - struct ring_buffer *buf; + struct ring_buffer *buf = tr->buffer; if (trace_stop_count) return; @@ -715,7 +715,6 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) } arch_spin_lock(&ftrace_max_lock); - buf = tr->buffer; tr->buffer = max_tr.buffer; max_tr.buffer = buf; @@ -1167,7 +1166,6 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, struct task_struct *tsk = current; entry->preempt_count = pc & 0xff; - entry->preempt_lazy_count = preempt_lazy_count(); entry->pid = (tsk) ? tsk->pid : 0; entry->padding = 0; entry->flags = @@ -1178,10 +1176,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, #endif ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | - (need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) | - (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0); - - entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; + (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); } EXPORT_SYMBOL_GPL(tracing_generic_entry_update); @@ -2034,17 +2029,14 @@ get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *e static void print_lat_help_header(struct seq_file *m) { - seq_puts(m, "# _--------=> CPU# \n"); - seq_puts(m, "# / _-------=> irqs-off \n"); - seq_puts(m, "# | / _------=> need-resched \n"); - seq_puts(m, "# || / _-----=> need-resched_lazy \n"); - seq_puts(m, "# ||| / _----=> hardirq/softirq \n"); - seq_puts(m, "# |||| / _---=> preempt-depth \n"); - seq_puts(m, "# ||||| / _--=> preempt-lazy-depth\n"); - seq_puts(m, "# |||||| / _-=> migrate-disable \n"); - seq_puts(m, "# ||||||| / delay \n"); - seq_puts(m, "# cmd pid |||||||| time | caller \n"); - seq_puts(m, "# \\ / |||||||| \\ | / \n"); + seq_puts(m, "# _------=> CPU# \n"); + seq_puts(m, "# / _-----=> irqs-off \n"); + seq_puts(m, "# | / _----=> need-resched \n"); + seq_puts(m, "# || / _---=> hardirq/softirq \n"); + seq_puts(m, "# ||| / _--=> preempt-depth \n"); + seq_puts(m, "# |||| / delay \n"); + seq_puts(m, "# cmd pid ||||| time | caller \n"); + seq_puts(m, "# \\ / ||||| \\ | / \n"); } static void print_event_info(struct trace_array *tr, struct seq_file *m) @@ -2068,16 +2060,13 @@ static void print_func_help_header(struct trace_array *tr, struct seq_file *m) static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m) { print_event_info(tr, m); - seq_puts(m, "# _-------=> irqs-off \n"); - seq_puts(m, "# / _------=> need-resched \n"); - seq_puts(m, "# |/ _-----=> need-resched_lazy \n"); - seq_puts(m, "# ||/ _----=> hardirq/softirq \n"); - seq_puts(m, "# |||/ _---=> preempt-depth \n"); - seq_puts(m, "# ||||/ _--=> preempt-lazy-depth\n"); - seq_puts(m, "# ||||| / _-=> migrate-disable \n"); - seq_puts(m, "# |||||| / delay\n"); - seq_puts(m, "# TASK-PID CPU# ||||||| TIMESTAMP FUNCTION\n"); - seq_puts(m, "# | | | ||||||| | |\n"); + seq_puts(m, "# _-----=> irqs-off\n"); + seq_puts(m, "# / _----=> need-resched\n"); + seq_puts(m, "# | / _---=> hardirq/softirq\n"); + seq_puts(m, "# || / _--=> preempt-depth\n"); + seq_puts(m, "# ||| / delay\n"); + seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"); + seq_puts(m, "# | | | |||| | |\n"); } void @@ -2846,25 +2835,11 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg) return -EINVAL; } -/* Some tracers require overwrite to stay enabled */ -int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) -{ - if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) - return -1; - - return 0; -} - -int set_tracer_flag(unsigned int mask, int enabled) +static void set_tracer_flags(unsigned int mask, int enabled) { /* do nothing if flag is already set */ if (!!(trace_flags & mask) == !!enabled) - return 0; - - /* Give the tracer a chance to approve the change */ - if (current_trace->flag_changed) - if (current_trace->flag_changed(current_trace, mask, !!enabled)) - return -EINVAL; + return; if (enabled) trace_flags |= mask; @@ -2874,24 +2849,18 @@ int set_tracer_flag(unsigned int mask, int enabled) if (mask == TRACE_ITER_RECORD_CMD) trace_event_enable_cmd_record(enabled); - if (mask == TRACE_ITER_OVERWRITE) { + if (mask == TRACE_ITER_OVERWRITE) ring_buffer_change_overwrite(global_trace.buffer, enabled); -#ifdef CONFIG_TRACER_MAX_TRACE - ring_buffer_change_overwrite(max_tr.buffer, enabled); -#endif - } if (mask == TRACE_ITER_PRINTK) trace_printk_start_stop_comm(enabled); - - return 0; } static int trace_set_options(char *option) { char *cmp; int neg = 0; - int ret = -ENODEV; + int ret = 0; int i; cmp = strstrip(option); @@ -2901,20 +2870,19 @@ static int trace_set_options(char *option) cmp += 2; } - mutex_lock(&trace_types_lock); - for (i = 0; trace_options[i]; i++) { if (strcmp(cmp, trace_options[i]) == 0) { - ret = set_tracer_flag(1 << i, !neg); + set_tracer_flags(1 << i, !neg); break; } } /* If no option could be set, test the specific tracer options */ - if (!trace_options[i]) + if (!trace_options[i]) { + mutex_lock(&trace_types_lock); ret = set_tracer_option(current_trace, cmp, neg); - - mutex_unlock(&trace_types_lock); + mutex_unlock(&trace_types_lock); + } return ret; } @@ -2924,7 +2892,6 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; - int ret; if (cnt >= sizeof(buf)) return -EINVAL; @@ -2934,9 +2901,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, buf[cnt] = 0; - ret = trace_set_options(buf); - if (ret < 0) - return ret; + trace_set_options(buf); *ppos += cnt; @@ -3242,9 +3207,6 @@ static int tracing_set_tracer(const char *buf) goto out; trace_branch_disable(); - - current_trace->enabled = false; - if (current_trace && current_trace->reset) current_trace->reset(tr); if (current_trace && current_trace->use_max_tr) { @@ -3276,7 +3238,6 @@ static int tracing_set_tracer(const char *buf) } current_trace = t; - current_trace->enabled = true; trace_branch_enable(tr); out: mutex_unlock(&trace_types_lock); @@ -4679,13 +4640,7 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, if (val != 0 && val != 1) return -EINVAL; - - mutex_lock(&trace_types_lock); - ret = set_tracer_flag(1 << index, val); - mutex_unlock(&trace_types_lock); - - if (ret < 0) - return ret; + set_tracer_flags(1 << index, val); *ppos += cnt; @@ -4895,8 +4850,6 @@ static __init int tracer_init_debugfs(void) trace_access_lock_init(); d_tracer = tracing_init_dentry(); - if (!d_tracer) - return 0; trace_create_file("trace_options", 0644, d_tracer, NULL, &tracing_iter_fops); @@ -5030,32 +4983,36 @@ void trace_init_global_iter(struct trace_iterator *iter) iter->cpu_file = TRACE_PIPE_ALL_CPU; } -void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) +static void +__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) { + static arch_spinlock_t ftrace_dump_lock = + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; /* use static because iter can be a bit big for the stack */ static struct trace_iterator iter; - static atomic_t dump_running; unsigned int old_userobj; + static int dump_ran; unsigned long flags; int cnt = 0, cpu; - /* Only allow one dump user at a time. */ - if (atomic_inc_return(&dump_running) != 1) { - atomic_dec(&dump_running); - return; - } + /* only one dump */ + local_irq_save(flags); + arch_spin_lock(&ftrace_dump_lock); + if (dump_ran) + goto out; + + dump_ran = 1; - /* - * Always turn off tracing when we dump. - * We don't need to show trace output of what happens - * between multiple crashes. - * - * If the user does a sysrq-z, then they can re-enable - * tracing with echo 1 > tracing_on. - */ tracing_off(); - local_irq_save(flags); + /* Did function tracer already get disabled? */ + if (ftrace_is_dead()) { + printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); + printk("# MAY BE MISSING FUNCTION EVENTS\n"); + } + + if (disable_tracing) + ftrace_kill(); trace_init_global_iter(&iter); @@ -5088,12 +5045,6 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) printk(KERN_TRACE "Dumping ftrace buffer:\n"); - /* Did function tracer already get disabled? */ - if (ftrace_is_dead()) { - printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); - printk("# MAY BE MISSING FUNCTION EVENTS\n"); - } - /* * We need to stop all tracing on all CPUS to read the * the next buffer. This is a bit expensive, but is @@ -5133,14 +5084,26 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) printk(KERN_TRACE "---------------------------------\n"); out_enable: - trace_flags |= old_userobj; + /* Re-enable tracing if requested */ + if (!disable_tracing) { + trace_flags |= old_userobj; - for_each_tracing_cpu(cpu) { - atomic_dec(&iter.tr->data[cpu]->disabled); + for_each_tracing_cpu(cpu) { + atomic_dec(&iter.tr->data[cpu]->disabled); + } + tracing_on(); } - atomic_dec(&dump_running); + + out: + arch_spin_unlock(&ftrace_dump_lock); local_irq_restore(flags); } + +/* By default: disable tracing after the dump */ +void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) +{ + __ftrace_dump(true, oops_dump_mode); +} EXPORT_SYMBOL_GPL(ftrace_dump); __init static int tracer_alloc_buffers(void) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 15f4a31..c75d798 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -116,7 +116,6 @@ struct uprobe_trace_entry_head { * NEED_RESCHED - reschedule is requested * HARDIRQ - inside an interrupt handler * SOFTIRQ - inside a softirq handler - * NEED_RESCHED_LAZY - lazy reschedule is requested */ enum trace_flag_type { TRACE_FLAG_IRQS_OFF = 0x01, @@ -124,7 +123,6 @@ enum trace_flag_type { TRACE_FLAG_NEED_RESCHED = 0x04, TRACE_FLAG_HARDIRQ = 0x08, TRACE_FLAG_SOFTIRQ = 0x10, - TRACE_FLAG_NEED_RESCHED_LAZY = 0x20, }; #define TRACE_BUF_SIZE 1024 @@ -285,14 +283,10 @@ struct tracer { enum print_line_t (*print_line)(struct trace_iterator *iter); /* If you handled the flag setting, return 0 */ int (*set_flag)(u32 old_flags, u32 bit, int set); - /* Return 0 if OK with change, else return non-zero */ - int (*flag_changed)(struct tracer *tracer, - u32 mask, int set); struct tracer *next; struct tracer_flags *flags; bool print_max; bool use_max_tr; - bool enabled; }; @@ -841,8 +835,6 @@ extern const char *__stop___trace_bprintk_fmt[]; void trace_printk_init_buffers(void); void trace_printk_start_comm(void); -int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); -int set_tracer_flag(unsigned int mask, int enabled); #undef FTRACE_ENTRY #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index a45a22d..880073d 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -116,8 +116,7 @@ static int trace_define_common_fields(void) __common_field(unsigned char, flags); __common_field(unsigned char, preempt_count); __common_field(int, pid); - __common_field(unsigned short, migrate_disable); - __common_field(unsigned short, padding); + __common_field(int, padding); return ret; } diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index f283bd0..713a2ca 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -17,7 +17,6 @@ #include #include "trace.h" -#include static struct trace_array *irqsoff_trace __read_mostly; static int tracer_enabled __read_mostly; @@ -33,7 +32,7 @@ enum { static int trace_type __read_mostly; -static int save_flags; +static int save_lat_flag; static void stop_irqsoff_tracer(struct trace_array *tr, int graph); static int start_irqsoff_tracer(struct trace_array *tr, int graph); @@ -439,13 +438,11 @@ void start_critical_timings(void) { if (preempt_trace() || irq_trace()) start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); - trace_preemptirqsoff_hist(TRACE_START, 1); } EXPORT_SYMBOL_GPL(start_critical_timings); void stop_critical_timings(void) { - trace_preemptirqsoff_hist(TRACE_STOP, 0); if (preempt_trace() || irq_trace()) stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } @@ -455,7 +452,6 @@ EXPORT_SYMBOL_GPL(stop_critical_timings); #ifdef CONFIG_PROVE_LOCKING void time_hardirqs_on(unsigned long a0, unsigned long a1) { - trace_preemptirqsoff_hist(IRQS_ON, 0); if (!preempt_trace() && irq_trace()) stop_critical_timing(a0, a1); } @@ -464,7 +460,6 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1) { if (!preempt_trace() && irq_trace()) start_critical_timing(a0, a1); - trace_preemptirqsoff_hist(IRQS_OFF, 1); } #else /* !CONFIG_PROVE_LOCKING */ @@ -490,7 +485,6 @@ inline void print_irqtrace_events(struct task_struct *curr) */ void trace_hardirqs_on(void) { - trace_preemptirqsoff_hist(IRQS_ON, 0); if (!preempt_trace() && irq_trace()) stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } @@ -500,13 +494,11 @@ void trace_hardirqs_off(void) { if (!preempt_trace() && irq_trace()) start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); - trace_preemptirqsoff_hist(IRQS_OFF, 1); } EXPORT_SYMBOL(trace_hardirqs_off); void trace_hardirqs_on_caller(unsigned long caller_addr) { - trace_preemptirqsoff_hist(IRQS_ON, 0); if (!preempt_trace() && irq_trace()) stop_critical_timing(CALLER_ADDR0, caller_addr); } @@ -516,7 +508,6 @@ void trace_hardirqs_off_caller(unsigned long caller_addr) { if (!preempt_trace() && irq_trace()) start_critical_timing(CALLER_ADDR0, caller_addr); - trace_preemptirqsoff_hist(IRQS_OFF, 1); } EXPORT_SYMBOL(trace_hardirqs_off_caller); @@ -526,14 +517,12 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller); #ifdef CONFIG_PREEMPT_TRACER void trace_preempt_on(unsigned long a0, unsigned long a1) { - trace_preemptirqsoff_hist(PREEMPT_ON, 0); if (preempt_trace() && !irq_trace()) stop_critical_timing(a0, a1); } void trace_preempt_off(unsigned long a0, unsigned long a1) { - trace_preemptirqsoff_hist(PREEMPT_ON, 1); if (preempt_trace() && !irq_trace()) start_critical_timing(a0, a1); } @@ -569,11 +558,8 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph) static void __irqsoff_tracer_init(struct trace_array *tr) { - save_flags = trace_flags; - - /* non overwrite screws up the latency tracers */ - set_tracer_flag(TRACE_ITER_OVERWRITE, 1); - set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1); + save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; + trace_flags |= TRACE_ITER_LATENCY_FMT; tracing_max_latency = 0; irqsoff_trace = tr; @@ -587,13 +573,10 @@ static void __irqsoff_tracer_init(struct trace_array *tr) static void irqsoff_tracer_reset(struct trace_array *tr) { - int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; - int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; - stop_irqsoff_tracer(tr, is_graph()); - set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag); - set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag); + if (!save_lat_flag) + trace_flags &= ~TRACE_ITER_LATENCY_FMT; } static void irqsoff_tracer_start(struct trace_array *tr) @@ -626,7 +609,6 @@ static struct tracer irqsoff_tracer __read_mostly = .print_line = irqsoff_print_line, .flags = &tracer_flags, .set_flag = irqsoff_set_flag, - .flag_changed = trace_keep_overwrite, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_irqsoff, #endif @@ -660,7 +642,6 @@ static struct tracer preemptoff_tracer __read_mostly = .print_line = irqsoff_print_line, .flags = &tracer_flags, .set_flag = irqsoff_set_flag, - .flag_changed = trace_keep_overwrite, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_preemptoff, #endif @@ -696,7 +677,6 @@ static struct tracer preemptirqsoff_tracer __read_mostly = .print_line = irqsoff_print_line, .flags = &tracer_flags, .set_flag = irqsoff_set_flag, - .flag_changed = trace_keep_overwrite, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_preemptirqsoff, #endif diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 2b0aea4..194d796 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -564,7 +564,6 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) { char hardsoft_irq; char need_resched; - char need_resched_lazy; char irqs_off; int hardirq; int softirq; @@ -579,17 +578,14 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) '.'; need_resched = (entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'; - need_resched_lazy = - (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.'; hardsoft_irq = (hardirq && softirq) ? 'H' : hardirq ? 'h' : softirq ? 's' : '.'; - if (!trace_seq_printf(s, "%c%c%c%c", - irqs_off, need_resched, need_resched_lazy, - hardsoft_irq)) + if (!trace_seq_printf(s, "%c%c%c", + irqs_off, need_resched, hardsoft_irq)) return 0; if (entry->preempt_count) @@ -597,16 +593,6 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) else ret = trace_seq_putc(s, '.'); - if (entry->preempt_lazy_count) - ret = trace_seq_printf(s, "%x", entry->preempt_lazy_count); - else - ret = trace_seq_putc(s, '.'); - - if (entry->migrate_disable) - ret = trace_seq_printf(s, "%x", entry->migrate_disable); - else - ret = trace_seq_putc(s, '.'); - return ret; } diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 17bfec6..9fe45fc 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -36,7 +36,7 @@ static void __wakeup_reset(struct trace_array *tr); static int wakeup_graph_entry(struct ftrace_graph_ent *trace); static void wakeup_graph_return(struct ftrace_graph_ret *trace); -static int save_flags; +static int save_lat_flag; #define TRACE_DISPLAY_GRAPH 1 @@ -540,11 +540,8 @@ static void stop_wakeup_tracer(struct trace_array *tr) static int __wakeup_tracer_init(struct trace_array *tr) { - save_flags = trace_flags; - - /* non overwrite screws up the latency tracers */ - set_tracer_flag(TRACE_ITER_OVERWRITE, 1); - set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1); + save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; + trace_flags |= TRACE_ITER_LATENCY_FMT; tracing_max_latency = 0; wakeup_trace = tr; @@ -566,15 +563,12 @@ static int wakeup_rt_tracer_init(struct trace_array *tr) static void wakeup_tracer_reset(struct trace_array *tr) { - int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; - int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; - stop_wakeup_tracer(tr); /* make sure we put back any tasks we are tracing */ wakeup_reset(tr); - set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag); - set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag); + if (!save_lat_flag) + trace_flags &= ~TRACE_ITER_LATENCY_FMT; } static void wakeup_tracer_start(struct trace_array *tr) @@ -600,7 +594,6 @@ static struct tracer wakeup_tracer __read_mostly = .print_line = wakeup_print_line, .flags = &tracer_flags, .set_flag = wakeup_set_flag, - .flag_changed = trace_keep_overwrite, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_wakeup, #endif @@ -622,7 +615,6 @@ static struct tracer wakeup_rt_tracer __read_mostly = .print_line = wakeup_print_line, .flags = &tracer_flags, .set_flag = wakeup_set_flag, - .flag_changed = trace_keep_overwrite, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_wakeup, #endif diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 81f6275..4762316 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -452,6 +452,7 @@ trace_selftest_function_recursion(void) char *func_name; int len; int ret; + int cnt; /* The previous test PASSED */ pr_cont("PASSED\n"); @@ -509,10 +510,19 @@ trace_selftest_function_recursion(void) unregister_ftrace_function(&test_recsafe_probe); + /* + * If arch supports all ftrace features, and no other task + * was on the list, we should be fine. + */ + if (!ftrace_nr_registered_ops() && !FTRACE_FORCE_LIST_FUNC) + cnt = 2; /* Should have recursed */ + else + cnt = 1; + ret = -1; - if (trace_selftest_recursion_cnt != 2) { - pr_cont("*callback not called expected 2 times (%d)* ", - trace_selftest_recursion_cnt); + if (trace_selftest_recursion_cnt != cnt) { + pr_cont("*callback not called expected %d times (%d)* ", + cnt, trace_selftest_recursion_cnt); goto out; } @@ -702,6 +712,8 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) /* Maximum number of functions to trace before diagnosing a hang */ #define GRAPH_MAX_FUNC_TEST 100000000 +static void +__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode); static unsigned int graph_hang_thresh; /* Wrap the real function entry probe to avoid possible hanging */ @@ -711,11 +723,8 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { ftrace_graph_stop(); printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); - if (ftrace_dump_on_oops) { - ftrace_dump(DUMP_ALL); - /* ftrace_dump() disables tracing */ - tracing_on(); - } + if (ftrace_dump_on_oops) + __ftrace_dump(false, DUMP_ALL); return 0; } diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index b20428c..42ca822 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -20,24 +20,13 @@ #define STACK_TRACE_ENTRIES 500 -#ifdef CC_USING_FENTRY -# define fentry 1 -#else -# define fentry 0 -#endif - static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; static unsigned stack_dump_index[STACK_TRACE_ENTRIES]; -/* - * Reserve one entry for the passed in ip. This will allow - * us to remove most or all of the stack size overhead - * added by the stack tracer itself. - */ static struct stack_trace max_stack_trace = { - .max_entries = STACK_TRACE_ENTRIES - 1, - .entries = &stack_dump_trace[1], + .max_entries = STACK_TRACE_ENTRIES, + .entries = stack_dump_trace, }; static unsigned long max_stack_size; @@ -50,34 +39,25 @@ static DEFINE_MUTEX(stack_sysctl_mutex); int stack_tracer_enabled; static int last_stack_tracer_enabled; -static inline void -check_stack(unsigned long ip, unsigned long *stack) +static inline void check_stack(void) { unsigned long this_size, flags; unsigned long *p, *top, *start; - static int tracer_frame; - int frame_size = ACCESS_ONCE(tracer_frame); int i; - this_size = ((unsigned long)stack) & (THREAD_SIZE-1); + this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1); this_size = THREAD_SIZE - this_size; - /* Remove the frame of the tracer */ - this_size -= frame_size; if (this_size <= max_stack_size) return; /* we do not handle interrupt stacks yet */ - if (!object_is_on_stack(stack)) + if (!object_is_on_stack(&this_size)) return; local_irq_save(flags); arch_spin_lock(&max_stack_lock); - /* In case another CPU set the tracer_frame on us */ - if (unlikely(!frame_size)) - this_size -= tracer_frame; - /* a race could have already updated it */ if (this_size <= max_stack_size) goto out; @@ -90,18 +70,10 @@ check_stack(unsigned long ip, unsigned long *stack) save_stack_trace(&max_stack_trace); /* - * Add the passed in ip from the function tracer. - * Searching for this on the stack will skip over - * most of the overhead from the stack tracer itself. - */ - stack_dump_trace[0] = ip; - max_stack_trace.nr_entries++; - - /* * Now find where in the stack these are. */ i = 0; - start = stack; + start = &this_size; top = (unsigned long *) (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); @@ -125,18 +97,6 @@ check_stack(unsigned long ip, unsigned long *stack) found = 1; /* Start the search from here */ start = p + 1; - /* - * We do not want to show the overhead - * of the stack tracer stack in the - * max stack. If we haven't figured - * out what that is, then figure it out - * now. - */ - if (unlikely(!tracer_frame) && i == 1) { - tracer_frame = (p - stack) * - sizeof(unsigned long); - max_stack_size -= tracer_frame; - } } } @@ -153,7 +113,6 @@ static void stack_trace_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *pt_regs) { - unsigned long stack; int cpu; preempt_disable_notrace(); @@ -163,26 +122,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip, if (per_cpu(trace_active, cpu)++ != 0) goto out; - /* - * When fentry is used, the traced function does not get - * its stack frame set up, and we lose the parent. - * The ip is pretty useless because the function tracer - * was called before that function set up its stack frame. - * In this case, we use the parent ip. - * - * By adding the return address of either the parent ip - * or the current ip we can disregard most of the stack usage - * caused by the stack tracer itself. - * - * The function tracer always reports the address of where the - * mcount call was, but the stack will hold the return address. - */ - if (fentry) - ip = parent_ip; - else - ip += MCOUNT_INSN_SIZE; - - check_stack(ip, &stack); + check_stack(); out: per_cpu(trace_active, cpu)--; @@ -382,7 +322,7 @@ static const struct file_operations stack_trace_filter_fops = { .open = stack_trace_filter_open, .read = seq_read, .write = ftrace_filter_write, - .llseek = ftrace_filter_lseek, + .llseek = ftrace_regex_lseek, .release = ftrace_regex_release, }; @@ -431,8 +371,6 @@ static __init int stack_trace_init(void) struct dentry *d_tracer; d_tracer = tracing_init_dentry(); - if (!d_tracer) - return 0; trace_create_file("stack_max_size", 0644, d_tracer, &max_stack_size, &stack_max_size_fops); diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c index 847f88a..96cffb2 100644 --- a/kernel/trace/trace_stat.c +++ b/kernel/trace/trace_stat.c @@ -307,8 +307,6 @@ static int tracing_stat_init(void) struct dentry *d_tracing; d_tracing = tracing_init_dentry(); - if (!d_tracing) - return 0; stat_dir = debugfs_create_dir("trace_stat", d_tracing); if (!stat_dir) diff --git a/kernel/user.c b/kernel/user.c index 68b70d7..33acb5e 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -53,8 +53,6 @@ struct user_namespace init_user_ns = { .owner = GLOBAL_ROOT_UID, .group = GLOBAL_ROOT_GID, .proc_inum = PROC_USER_INIT_INO, - .may_mount_sysfs = true, - .may_mount_proc = true, }; EXPORT_SYMBOL_GPL(init_user_ns); @@ -159,11 +157,11 @@ void free_uid(struct user_struct *up) if (!up) return; - local_irq_save_nort(flags); + local_irq_save(flags); if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) free_user(up, flags); else - local_irq_restore_nort(flags); + local_irq_restore(flags); } struct user_struct *alloc_uid(kuid_t uid) diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index f359dc7..2b042c4 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -21,12 +21,10 @@ #include #include #include -#include static struct kmem_cache *user_ns_cachep __read_mostly; -static bool new_idmap_permitted(const struct file *file, - struct user_namespace *ns, int cap_setid, +static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid, struct uid_gid_map *map); static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns) @@ -62,15 +60,6 @@ int create_user_ns(struct cred *new) kgid_t group = new->egid; int ret; - /* - * Verify that we can not violate the policy of which files - * may be accessed that is specified by the root directory, - * by verifing that the root directory is at the root of the - * mount namespace which allows all files to be accessed. - */ - if (current_chrooted()) - return -EPERM; - /* The creator needs a mapping in the parent user namespace * or else we won't be able to reasonably tell userspace who * created a user_namespace. @@ -97,8 +86,6 @@ int create_user_ns(struct cred *new) set_cred_user_ns(new, ns); - update_mnt_policy(ns); - return 0; } @@ -576,10 +563,10 @@ static ssize_t map_write(struct file *file, const char __user *buf, if (map->nr_extents != 0) goto out; - /* - * Adjusting namespace settings requires capabilities on the target. + /* Require the appropriate privilege CAP_SETUID or CAP_SETGID + * over the user namespace in order to set the id mapping. */ - if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN)) + if (cap_valid(cap_setid) && !ns_capable(ns, cap_setid)) goto out; /* Get a buffer */ @@ -667,7 +654,7 @@ static ssize_t map_write(struct file *file, const char __user *buf, ret = -EPERM; /* Validate the user is allowed to use user id's mapped to. */ - if (!new_idmap_permitted(file, ns, cap_setid, &new_map)) + if (!new_idmap_permitted(ns, cap_setid, &new_map)) goto out; /* Map the lower ids from the parent user namespace to the @@ -754,8 +741,7 @@ ssize_t proc_projid_map_write(struct file *file, const char __user *buf, size_t &ns->projid_map, &ns->parent->projid_map); } -static bool new_idmap_permitted(const struct file *file, - struct user_namespace *ns, int cap_setid, +static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid, struct uid_gid_map *new_map) { /* Allow mapping to your own filesystem ids */ @@ -763,12 +749,12 @@ static bool new_idmap_permitted(const struct file *file, u32 id = new_map->extent[0].lower_first; if (cap_setid == CAP_SETUID) { kuid_t uid = make_kuid(ns->parent, id); - if (uid_eq(uid, file->f_cred->fsuid)) + if (uid_eq(uid, current_fsuid())) return true; } else if (cap_setid == CAP_SETGID) { kgid_t gid = make_kgid(ns->parent, id); - if (gid_eq(gid, file->f_cred->fsgid)) + if (gid_eq(gid, current_fsgid())) return true; } } @@ -779,10 +765,8 @@ static bool new_idmap_permitted(const struct file *file, /* Allow the specified ids if we have the appropriate capability * (CAP_SETUID or CAP_SETGID) over the parent user namespace. - * And the opener of the id file also had the approprpiate capability. */ - if (ns_capable(ns->parent, cap_setid) && - file_ns_capable(file, ns->parent, cap_setid)) + if (ns_capable(ns->parent, cap_setid)) return true; return false; @@ -819,9 +803,6 @@ static int userns_install(struct nsproxy *nsproxy, void *ns) if (atomic_read(¤t->mm->mm_users) > 1) return -EINVAL; - if (current->fs->users != 1) - return -EINVAL; - if (!ns_capable(user_ns, CAP_SYS_ADMIN)) return -EPERM; diff --git a/kernel/wait-simple.c b/kernel/wait-simple.c deleted file mode 100644 index 4b9a0b5..0000000 --- a/kernel/wait-simple.c +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Simple waitqueues without fancy flags and callbacks - * - * (C) 2011 Thomas Gleixner - * - * Based on kernel/wait.c - * - * For licencing details see kernel-base/COPYING - */ -#include -#include -#include -#include - -/* Adds w to head->list. Must be called with head->lock locked. */ -static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w) -{ - list_add(&w->node, &head->list); -} - -/* Removes w from head->list. Must be called with head->lock locked. */ -static inline void __swait_dequeue(struct swaiter *w) -{ - list_del_init(&w->node); -} - -/* Check whether a head has waiters enqueued */ -static inline bool swait_head_has_waiters(struct swait_head *h) -{ - return !list_empty(&h->list); -} - -void __init_swait_head(struct swait_head *head, struct lock_class_key *key) -{ - raw_spin_lock_init(&head->lock); - lockdep_set_class(&head->lock, key); - INIT_LIST_HEAD(&head->list); -} -EXPORT_SYMBOL(__init_swait_head); - -void swait_prepare_locked(struct swait_head *head, struct swaiter *w) -{ - w->task = current; - if (list_empty(&w->node)) - __swait_enqueue(head, w); -} - -void swait_prepare(struct swait_head *head, struct swaiter *w, int state) -{ - unsigned long flags; - - raw_spin_lock_irqsave(&head->lock, flags); - swait_prepare_locked(head, w); - __set_current_state(state); - raw_spin_unlock_irqrestore(&head->lock, flags); -} -EXPORT_SYMBOL(swait_prepare); - -void swait_finish_locked(struct swait_head *head, struct swaiter *w) -{ - __set_current_state(TASK_RUNNING); - if (w->task) - __swait_dequeue(w); -} - -void swait_finish(struct swait_head *head, struct swaiter *w) -{ - unsigned long flags; - - __set_current_state(TASK_RUNNING); - if (w->task) { - raw_spin_lock_irqsave(&head->lock, flags); - __swait_dequeue(w); - raw_spin_unlock_irqrestore(&head->lock, flags); - } -} -EXPORT_SYMBOL(swait_finish); - -unsigned int -__swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num) -{ - struct swaiter *curr, *next; - int woken = 0; - - list_for_each_entry_safe(curr, next, &head->list, node) { - if (wake_up_state(curr->task, state)) { - __swait_dequeue(curr); - /* - * The waiting task can free the waiter as - * soon as curr->task = NULL is written, - * without taking any locks. A memory barrier - * is required here to prevent the following - * store to curr->task from getting ahead of - * the dequeue operation. - */ - smp_wmb(); - curr->task = NULL; - if (++woken == num) - break; - } - } - return woken; -} - -unsigned int -__swait_wake(struct swait_head *head, unsigned int state, unsigned int num) -{ - unsigned long flags; - int woken; - - if (!swait_head_has_waiters(head)) - return 0; - - raw_spin_lock_irqsave(&head->lock, flags); - woken = __swait_wake_locked(head, state, num); - raw_spin_unlock_irqrestore(&head->lock, flags); - return woken; -} -EXPORT_SYMBOL(__swait_wake); diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 7bbc18a..75a2ab3 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -206,8 +206,6 @@ static int is_softlockup(unsigned long touch_ts) #ifdef CONFIG_HARDLOCKUP_DETECTOR -static DEFINE_RAW_SPINLOCK(watchdog_output_lock); - static struct perf_event_attr wd_hw_attr = { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES, @@ -242,19 +240,10 @@ static void watchdog_overflow_callback(struct perf_event *event, if (__this_cpu_read(hard_watchdog_warn) == true) return; - /* - * If early-printk is enabled then make sure we do not - * lock up in printk() and kill console logging: - */ - printk_kill(); - - if (hardlockup_panic) { + if (hardlockup_panic) panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); - } else { - raw_spin_lock(&watchdog_output_lock); + else WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); - raw_spin_unlock(&watchdog_output_lock); - } __this_cpu_write(hard_watchdog_warn, true); return; @@ -358,7 +347,6 @@ static void watchdog_enable(unsigned int cpu) /* kick off the timer for the hardlockup detector */ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer->function = watchdog_timer_fn; - hrtimer->irqsafe = 1; if (!watchdog_enabled) { kthread_park(current); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 11285e4..fbc6576 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -41,7 +41,6 @@ #include #include #include -#include #include "workqueue_sched.h" @@ -139,7 +138,6 @@ struct worker { }; struct work_struct *current_work; /* L: work being processed */ - work_func_t current_func; /* L: current_work's fn */ struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */ struct list_head scheduled; /* L: scheduled works */ struct task_struct *task; /* I: worker task */ @@ -279,8 +277,6 @@ EXPORT_SYMBOL_GPL(system_unbound_wq); struct workqueue_struct *system_freezable_wq __read_mostly; EXPORT_SYMBOL_GPL(system_freezable_wq); -static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock); - #define CREATE_TRACE_POINTS #include @@ -914,8 +910,7 @@ static struct worker *__find_worker_executing_work(struct global_cwq *gcwq, struct hlist_node *tmp; hlist_for_each_entry(worker, tmp, bwh, hentry) - if (worker->current_work == work && - worker->current_func == work->func) + if (worker->current_work == work) return worker; return NULL; } @@ -925,27 +920,9 @@ static struct worker *__find_worker_executing_work(struct global_cwq *gcwq, * @gcwq: gcwq of interest * @work: work to find worker for * - * Find a worker which is executing @work on @gcwq by searching - * @gcwq->busy_hash which is keyed by the address of @work. For a worker - * to match, its current execution should match the address of @work and - * its work function. This is to avoid unwanted dependency between - * unrelated work executions through a work item being recycled while still - * being executed. - * - * This is a bit tricky. A work item may be freed once its execution - * starts and nothing prevents the freed area from being recycled for - * another work item. If the same work item address ends up being reused - * before the original execution finishes, workqueue will identify the - * recycled work item as currently executing and make it wait until the - * current execution finishes, introducing an unwanted dependency. - * - * This function checks the work item address, work function and workqueue - * to avoid false positives. Note that this isn't complete as one may - * construct a work function which can introduce dependency onto itself - * through a recycled work item. Well, if somebody wants to shoot oneself - * in the foot that badly, there's only so much we can do, and if such - * deadlock actually occurs, it should be easy to locate the culprit work - * function. + * Find a worker which is executing @work on @gcwq. This function is + * identical to __find_worker_executing_work() except that this + * function calculates @bwh itself. * * CONTEXT: * spin_lock_irq(gcwq->lock). @@ -1095,7 +1072,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, { struct global_cwq *gcwq; - local_lock_irqsave(pendingb_lock, *flags); + local_irq_save(*flags); /* try to steal the timer if it exists */ if (is_dwork) { @@ -1154,7 +1131,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, } spin_unlock(&gcwq->lock); fail: - local_unlock_irqrestore(pendingb_lock, *flags); + local_irq_restore(*flags); if (work_is_canceling(work)) return -ENOENT; cpu_relax(); @@ -1249,7 +1226,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, * queued or lose PENDING. Grabbing PENDING and queueing should * happen with IRQ disabled. */ - WARN_ON_ONCE_NONRT(!irqs_disabled()); + WARN_ON_ONCE(!irqs_disabled()); debug_work_activate(work); @@ -1339,14 +1316,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq, bool ret = false; unsigned long flags; - local_lock_irqsave(pendingb_lock,flags); + local_irq_save(flags); if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { __queue_work(cpu, wq, work); ret = true; } - local_unlock_irqrestore(pendingb_lock, flags); + local_irq_restore(flags); return ret; } EXPORT_SYMBOL_GPL(queue_work_on); @@ -1375,7 +1352,7 @@ void delayed_work_timer_fn(unsigned long __data) /* should have been called from irqsafe timer with irq already off */ __queue_work(dwork->cpu, cwq->wq, &dwork->work); } -EXPORT_SYMBOL(delayed_work_timer_fn); +EXPORT_SYMBOL_GPL(delayed_work_timer_fn); static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) @@ -1454,14 +1431,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, unsigned long flags; /* read the comment in __queue_work() */ - local_lock_irqsave(pendingb_lock, flags); + local_irq_save(flags); if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { __queue_delayed_work(cpu, wq, dwork, delay); ret = true; } - local_unlock_irqrestore(pendingb_lock, flags); + local_irq_restore(flags); return ret; } EXPORT_SYMBOL_GPL(queue_delayed_work_on); @@ -1511,7 +1488,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, if (likely(ret >= 0)) { __queue_delayed_work(cpu, wq, dwork, delay); - local_unlock_irqrestore(pendingb_lock, flags); + local_irq_restore(flags); } /* -ENOENT from try_to_grab_pending() becomes %true */ @@ -2191,6 +2168,7 @@ __acquires(&gcwq->lock) struct global_cwq *gcwq = pool->gcwq; struct hlist_head *bwh = busy_worker_head(gcwq, work); bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE; + work_func_t f = work->func; int work_color; struct worker *collision; #ifdef CONFIG_LOCKDEP @@ -2230,7 +2208,6 @@ __acquires(&gcwq->lock) debug_work_deactivate(work); hlist_add_head(&worker->hentry, bwh); worker->current_work = work; - worker->current_func = work->func; worker->current_cwq = cwq; work_color = get_work_color(work); @@ -2263,7 +2240,7 @@ __acquires(&gcwq->lock) lock_map_acquire_read(&cwq->wq->lockdep_map); lock_map_acquire(&lockdep_map); trace_workqueue_execute_start(work); - worker->current_func(work); + f(work); /* * While we must be careful to not use "work" after this, the trace * point will only record its address. @@ -2275,8 +2252,7 @@ __acquires(&gcwq->lock) if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" " last function: %pf\n", - current->comm, preempt_count(), task_pid_nr(current), - worker->current_func); + current->comm, preempt_count(), task_pid_nr(current), f); debug_show_held_locks(current); dump_stack(); } @@ -2290,7 +2266,6 @@ __acquires(&gcwq->lock) /* we're done with it, release */ hlist_del_init(&worker->hentry); worker->current_work = NULL; - worker->current_func = NULL; worker->current_cwq = NULL; cwq_dec_nr_in_flight(cwq, work_color); } @@ -2939,7 +2914,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) /* tell other tasks trying to grab @work to back off */ mark_work_canceling(work); - local_unlock_irqrestore(pendingb_lock, flags); + local_irq_restore(flags); flush_work(work); clear_work_data(work); @@ -2984,11 +2959,11 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); */ bool flush_delayed_work(struct delayed_work *dwork) { - local_lock_irq(pendingb_lock); + local_irq_disable(); if (del_timer_sync(&dwork->timer)) __queue_work(dwork->cpu, get_work_cwq(&dwork->work)->wq, &dwork->work); - local_unlock_irq(pendingb_lock); + local_irq_enable(); return flush_work(&dwork->work); } EXPORT_SYMBOL(flush_delayed_work); @@ -3018,7 +2993,7 @@ bool cancel_delayed_work(struct delayed_work *dwork) return false; set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work)); - local_unlock_irqrestore(pendingb_lock, flags); + local_irq_restore(flags); return ret; } EXPORT_SYMBOL(cancel_delayed_work); diff --git a/lib/Kconfig b/lib/Kconfig index 7669d65..75cdb77 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -315,7 +315,6 @@ config CHECK_SIGNATURE config CPUMASK_OFFSTACK bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS - depends on !PREEMPT_RT_FULL help Use dynamic allocation for cpumask_var_t, instead of putting them on the stack. This is a bit more expensive, but avoids diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 24b60ba..67604e5 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -164,7 +164,7 @@ config DEBUG_KERNEL config DEBUG_SHIRQ bool "Debug shared IRQ handlers" - depends on DEBUG_KERNEL && GENERIC_HARDIRQS && !PREEMPT_RT_BASE + depends on DEBUG_KERNEL && GENERIC_HARDIRQS help Enable this to generate a spurious interrupt as soon as a shared interrupt handler is registered, and just before one is deregistered. diff --git a/lib/Makefile b/lib/Makefile index 7e961f1..02ed6c0 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -38,11 +38,8 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o - -ifneq ($(CONFIG_PREEMPT_RT_FULL),y) lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o -endif lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) diff --git a/lib/debugobjects.c b/lib/debugobjects.c index cf5f02f..d11808c 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -309,10 +309,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) struct debug_obj *obj; unsigned long flags; -#ifdef CONFIG_PREEMPT_RT_FULL - if (preempt_count() == 0 && !irqs_disabled()) -#endif - fill_pool(); + fill_pool(); db = get_bucket((unsigned long) addr); diff --git a/lib/idr.c b/lib/idr.c index ca5aa00..6482390 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -625,14 +625,7 @@ void *idr_get_next(struct idr *idp, int *nextidp) return p; } - /* - * Proceed to the next layer at the current level. Unlike - * idr_for_each(), @id isn't guaranteed to be aligned to - * layer boundary at this point and adding 1 << n may - * incorrectly skip IDs. Make sure we jump to the - * beginning of the next layer using round_up(). - */ - id = round_up(id + 1, 1 << n); + id += 1 << n; while (n < fls(id)) { n += IDR_BITS; p = *--paa; diff --git a/lib/kobject.c b/lib/kobject.c index a654866..e07ee1f 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -529,13 +529,6 @@ struct kobject *kobject_get(struct kobject *kobj) return kobj; } -static struct kobject *kobject_get_unless_zero(struct kobject *kobj) -{ - if (!kref_get_unless_zero(&kobj->kref)) - kobj = NULL; - return kobj; -} - /* * kobject_cleanup - free kobject resources. * @kobj: object to cleanup @@ -758,7 +751,7 @@ struct kobject *kset_find_obj(struct kset *kset, const char *name) list_for_each_entry(k, &kset->list, entry) { if (kobject_name(k) && !strcmp(kobject_name(k), name)) { - ret = kobject_get_unless_zero(k); + ret = kobject_get(k); break; } } diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 23b8564..7aae0f2 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -47,10 +47,10 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose); * Normal standalone locks, for the circular and irq-context * dependency tests: */ -static DEFINE_RAW_SPINLOCK(lock_A); -static DEFINE_RAW_SPINLOCK(lock_B); -static DEFINE_RAW_SPINLOCK(lock_C); -static DEFINE_RAW_SPINLOCK(lock_D); +static DEFINE_SPINLOCK(lock_A); +static DEFINE_SPINLOCK(lock_B); +static DEFINE_SPINLOCK(lock_C); +static DEFINE_SPINLOCK(lock_D); static DEFINE_RWLOCK(rwlock_A); static DEFINE_RWLOCK(rwlock_B); @@ -73,12 +73,12 @@ static DECLARE_RWSEM(rwsem_D); * but X* and Y* are different classes. We do this so that * we do not trigger a real lockup: */ -static DEFINE_RAW_SPINLOCK(lock_X1); -static DEFINE_RAW_SPINLOCK(lock_X2); -static DEFINE_RAW_SPINLOCK(lock_Y1); -static DEFINE_RAW_SPINLOCK(lock_Y2); -static DEFINE_RAW_SPINLOCK(lock_Z1); -static DEFINE_RAW_SPINLOCK(lock_Z2); +static DEFINE_SPINLOCK(lock_X1); +static DEFINE_SPINLOCK(lock_X2); +static DEFINE_SPINLOCK(lock_Y1); +static DEFINE_SPINLOCK(lock_Y2); +static DEFINE_SPINLOCK(lock_Z1); +static DEFINE_SPINLOCK(lock_Z2); static DEFINE_RWLOCK(rwlock_X1); static DEFINE_RWLOCK(rwlock_X2); @@ -107,10 +107,10 @@ static DECLARE_RWSEM(rwsem_Z2); */ #define INIT_CLASS_FUNC(class) \ static noinline void \ -init_class_##class(raw_spinlock_t *lock, rwlock_t *rwlock, \ - struct mutex *mutex, struct rw_semaphore *rwsem)\ +init_class_##class(spinlock_t *lock, rwlock_t *rwlock, struct mutex *mutex, \ + struct rw_semaphore *rwsem) \ { \ - raw_spin_lock_init(lock); \ + spin_lock_init(lock); \ rwlock_init(rwlock); \ mutex_init(mutex); \ init_rwsem(rwsem); \ @@ -168,10 +168,10 @@ static void init_shared_classes(void) * Shortcuts for lock/unlock API variants, to keep * the testcases compact: */ -#define L(x) raw_spin_lock(&lock_##x) -#define U(x) raw_spin_unlock(&lock_##x) +#define L(x) spin_lock(&lock_##x) +#define U(x) spin_unlock(&lock_##x) #define LU(x) L(x); U(x) -#define SI(x) raw_spin_lock_init(&lock_##x) +#define SI(x) spin_lock_init(&lock_##x) #define WL(x) write_lock(&rwlock_##x) #define WU(x) write_unlock(&rwlock_##x) @@ -911,7 +911,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft) #define I2(x) \ do { \ - raw_spin_lock_init(&lock_##x); \ + spin_lock_init(&lock_##x); \ rwlock_init(&rwlock_##x); \ mutex_init(&mutex_##x); \ init_rwsem(&rwsem_##x); \ @@ -1175,7 +1175,6 @@ void locking_selftest(void) printk(" --------------------------------------------------------------------------\n"); -#ifndef CONFIG_PREEMPT_RT_FULL /* * irq-context testcases: */ @@ -1188,28 +1187,6 @@ void locking_selftest(void) DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion); // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2); -#else - /* On -rt, we only do hardirq context test for raw spinlock */ - DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12); - DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21); - - DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12); - DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21); - - DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123); - DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132); - DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213); - DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231); - DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312); - DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321); - - DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123); - DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132); - DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213); - DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231); - DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312); - DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321); -#endif if (unexpected_testcase_failures) { printk("-----------------------------------------------------------------\n"); diff --git a/lib/oid_registry.c b/lib/oid_registry.c index 318f382..d8de11f 100644 --- a/lib/oid_registry.c +++ b/lib/oid_registry.c @@ -9,7 +9,6 @@ * 2 of the Licence, or (at your option) any later version. */ -#include #include #include #include @@ -17,10 +16,6 @@ #include #include "oid_registry_data.c" -MODULE_DESCRIPTION("OID Registry"); -MODULE_AUTHOR("Red Hat, Inc."); -MODULE_LICENSE("GPL"); - /** * look_up_OID - Find an OID registration for the specified data * @data: Binary representation of the OID diff --git a/lib/percpu-rwsem.c b/lib/percpu-rwsem.c index 2db0f42..652a8ee 100644 --- a/lib/percpu-rwsem.c +++ b/lib/percpu-rwsem.c @@ -84,12 +84,8 @@ void percpu_down_read(struct percpu_rw_semaphore *brw) down_read(&brw->rw_sem); atomic_inc(&brw->slow_read_ctr); -#ifdef CONFIG_PREEMPT_RT_FULL - up_read(&brw->rw_sem); -#else /* avoid up_read()->rwsem_release() */ __up_read(&brw->rw_sem); -#endif } void percpu_up_read(struct percpu_rw_semaphore *brw) diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 63bac7d..e796429 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -215,13 +215,12 @@ radix_tree_node_alloc(struct radix_tree_root *root) * succeed in getting a node here (and never reach * kmem_cache_alloc) */ - rtp = &get_cpu_var(radix_tree_preloads); + rtp = &__get_cpu_var(radix_tree_preloads); if (rtp->nr) { ret = rtp->nodes[rtp->nr - 1]; rtp->nodes[rtp->nr - 1] = NULL; rtp->nr--; } - put_cpu_var(radix_tree_preloads); } if (ret == NULL) ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); @@ -256,7 +255,6 @@ radix_tree_node_free(struct radix_tree_node *node) call_rcu(&node->rcu_head, radix_tree_node_rcu_free); } -#ifndef CONFIG_PREEMPT_RT_FULL /* * Load up this CPU's radix_tree_node buffer with sufficient objects to * ensure that the addition of a single element in the tree cannot fail. On @@ -291,7 +289,6 @@ out: return ret; } EXPORT_SYMBOL(radix_tree_preload); -#endif /* * Return the maximum key which can be store into a diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 43603ee..7874b01 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -499,7 +499,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) flush_kernel_dcache_page(miter->page); if (miter->__flags & SG_MITER_ATOMIC) { - WARN_ON_ONCE(!pagefault_disabled()); + WARN_ON_ONCE(preemptible()); kunmap_atomic(miter->addr); } else kunmap(miter->page); @@ -539,7 +539,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, sg_miter_start(&miter, sgl, nents, sg_flags); - local_irq_save_nort(flags); + local_irq_save(flags); while (sg_miter_next(&miter) && offset < buflen) { unsigned int len; @@ -556,7 +556,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, sg_miter_stop(&miter); - local_irq_restore_nort(flags); + local_irq_restore(flags); return offset; } diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index dbb1570..4c0d0e5 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -39,9 +39,9 @@ notrace unsigned int debug_smp_processor_id(void) if (!printk_ratelimit()) goto out_enable; - printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] " - "code: %s/%d\n", preempt_count() - 1, - __migrate_disabled(current), current->comm, current->pid); + printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] " + "code: %s/%d\n", + preempt_count() - 1, current->comm, current->pid); print_symbol("caller is %s\n", (long)__builtin_return_address(0)); dump_stack(); diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 9497033..0374a59 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -31,7 +31,6 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, EXPORT_SYMBOL(__raw_spin_lock_init); -#ifndef CONFIG_PREEMPT_RT_FULL void __rwlock_init(rwlock_t *lock, const char *name, struct lock_class_key *key) { @@ -49,7 +48,6 @@ void __rwlock_init(rwlock_t *lock, const char *name, } EXPORT_SYMBOL(__rwlock_init); -#endif static void spin_dump(raw_spinlock_t *lock, const char *msg) { @@ -161,7 +159,6 @@ void do_raw_spin_unlock(raw_spinlock_t *lock) arch_spin_unlock(&lock->raw_lock); } -#ifndef CONFIG_PREEMPT_RT_FULL static void rwlock_bug(rwlock_t *lock, const char *msg) { if (!debug_locks_off()) @@ -303,5 +300,3 @@ void do_raw_write_unlock(rwlock_t *lock) debug_write_unlock(lock); arch_write_unlock(&lock->raw_lock); } - -#endif diff --git a/localversion-rt b/localversion-rt deleted file mode 100644 index 22746d6..0000000 --- a/localversion-rt +++ /dev/null @@ -1 +0,0 @@ --rt9 diff --git a/mm/Kconfig b/mm/Kconfig index c6ceefc..278e3ab 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -353,7 +353,7 @@ config NOMMU_INITIAL_TRIM_EXCESS config TRANSPARENT_HUGEPAGE bool "Transparent Hugepage Support" - depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL + depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE select COMPACTION help Transparent Hugepages allows the kernel to use huge pages and diff --git a/mm/bounce.c b/mm/bounce.c index 1e78ef7..0420867 100644 --- a/mm/bounce.c +++ b/mm/bounce.c @@ -51,11 +51,11 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom) unsigned long flags; unsigned char *vto; - local_irq_save_nort(flags); + local_irq_save(flags); vto = kmap_atomic(to->bv_page); memcpy(vto + to->bv_offset, vfrom, to->bv_len); kunmap_atomic(vto); - local_irq_restore_nort(flags); + local_irq_restore(flags); } #else /* CONFIG_HIGHMEM */ diff --git a/mm/fadvise.c b/mm/fadvise.c index 909ec55..a47f0f5 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -17,7 +17,6 @@ #include #include #include -#include #include @@ -121,22 +120,9 @@ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice) start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT; end_index = (endbyte >> PAGE_CACHE_SHIFT); - if (end_index >= start_index) { - unsigned long count = invalidate_mapping_pages(mapping, - start_index, end_index); - - /* - * If fewer pages were invalidated than expected then - * it is possible that some of the pages were on - * a per-cpu pagevec for a remote CPU. Drain all - * pagevecs and try again. - */ - if (count < (end_index - start_index + 1)) { - lru_add_drain_all(); - invalidate_mapping_pages(mapping, start_index, + if (end_index >= start_index) + invalidate_mapping_pages(mapping, start_index, end_index); - } - } break; default: ret = -EINVAL; diff --git a/mm/filemap.c b/mm/filemap.c index cb81968..83efee7 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1955,7 +1955,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, char *kaddr; size_t copied; - BUG_ON(!pagefault_disabled()); + BUG_ON(!in_atomic()); kaddr = kmap_atomic(page); if (likely(i->nr_segs == 1)) { int left; diff --git a/mm/highmem.c b/mm/highmem.c index b1c7d43..b32b70c 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -29,11 +29,10 @@ #include #include -#ifndef CONFIG_PREEMPT_RT_FULL + #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) DEFINE_PER_CPU(int, __kmap_atomic_idx); #endif -#endif /* * Virtual_count is not a pure "count". @@ -48,9 +47,8 @@ DEFINE_PER_CPU(int, __kmap_atomic_idx); unsigned long totalhigh_pages __read_mostly; EXPORT_SYMBOL(totalhigh_pages); -#ifndef CONFIG_PREEMPT_RT_FULL + EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); -#endif unsigned int nr_free_highpages (void) { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 88eb939..546db81 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2127,12 +2127,8 @@ int hugetlb_report_node_meminfo(int nid, char *buf) /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ unsigned long hugetlb_total_pages(void) { - struct hstate *h; - unsigned long nr_total_pages = 0; - - for_each_hstate(h) - nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); - return nr_total_pages; + struct hstate *h = &default_hstate; + return h->nr_huge_pages * pages_per_huge_page(h); } static int hugetlb_acct_memory(struct hstate *h, long delta) @@ -2965,17 +2961,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, break; } - /* - * We need call hugetlb_fault for both hugepages under migration - * (in which case hugetlb_fault waits for the migration,) and - * hwpoisoned hugepages (in which case we need to prevent the - * caller from accessing to them.) In order to do this, we use - * here is_swap_pte instead of is_hugetlb_entry_migration and - * is_hugetlb_entry_hwpoisoned. This is because it simply covers - * both cases, and because we can't follow correct pages - * directly from any kind of swap entries. - */ - if (absent || is_swap_pte(huge_ptep_get(pte)) || + if (absent || ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) { int ret; diff --git a/mm/memory.c b/mm/memory.c index 23b82ee..bb1369f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -212,7 +212,6 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) tlb->mm = mm; tlb->fullmm = fullmm; - tlb->need_flush_all = 0; tlb->start = -1UL; tlb->end = 0; tlb->need_flush = 0; @@ -2358,53 +2357,6 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, } EXPORT_SYMBOL(remap_pfn_range); -/** - * vm_iomap_memory - remap memory to userspace - * @vma: user vma to map to - * @start: start of area - * @len: size of area - * - * This is a simplified io_remap_pfn_range() for common driver use. The - * driver just needs to give us the physical memory range to be mapped, - * we'll figure out the rest from the vma information. - * - * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get - * whatever write-combining details or similar. - */ -int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) -{ - unsigned long vm_len, pfn, pages; - - /* Check that the physical memory area passed in looks valid */ - if (start + len < start) - return -EINVAL; - /* - * You *really* shouldn't map things that aren't page-aligned, - * but we've historically allowed it because IO memory might - * just have smaller alignment. - */ - len += start & ~PAGE_MASK; - pfn = start >> PAGE_SHIFT; - pages = (len + ~PAGE_MASK) >> PAGE_SHIFT; - if (pfn + pages < pfn) - return -EINVAL; - - /* We start the mapping 'vm_pgoff' pages into the area */ - if (vma->vm_pgoff > pages) - return -EINVAL; - pfn += vma->vm_pgoff; - pages -= vma->vm_pgoff; - - /* Can we fit all of the mapping? */ - vm_len = vma->vm_end - vma->vm_start; - if (vm_len >> PAGE_SHIFT > pages) - return -EINVAL; - - /* Ok, let it rip */ - return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); -} -EXPORT_SYMBOL(vm_iomap_memory); - static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) @@ -3717,32 +3669,6 @@ unlock: return 0; } -#ifdef CONFIG_PREEMPT_RT_FULL -void pagefault_disable(void) -{ - migrate_disable(); - current->pagefault_disabled++; - /* - * make sure to have issued the store before a pagefault - * can hit. - */ - barrier(); -} -EXPORT_SYMBOL(pagefault_disable); - -void pagefault_enable(void) -{ - /* - * make sure to issue those last loads/stores before enabling - * the pagefault handler again. - */ - barrier(); - current->pagefault_disabled--; - migrate_enable(); -} -EXPORT_SYMBOL(pagefault_enable); -#endif - /* * By the time we get here, we already hold the mm semaphore */ @@ -4314,35 +4240,3 @@ void copy_user_huge_page(struct page *dst, struct page *src, } } #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ - -#if defined(CONFIG_PREEMPT_RT_FULL) && (USE_SPLIT_PTLOCKS > 0) -/* - * Heinous hack, relies on the caller doing something like: - * - * pte = alloc_pages(PGALLOC_GFP, 0); - * if (pte) - * pgtable_page_ctor(pte); - * return pte; - * - * This ensures we release the page and return NULL when the - * lock allocation fails. - */ -struct page *pte_lock_init(struct page *page) -{ - page->ptl = kmalloc(sizeof(spinlock_t), GFP_KERNEL); - if (page->ptl) { - spin_lock_init(__pte_lockptr(page)); - } else { - __free_page(page); - page = NULL; - } - return page; -} - -void pte_lock_deinit(struct page *page) -{ - kfree(page->ptl); - page->mapping = NULL; -} - -#endif diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 3df6d12..e2df1c1 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2386,8 +2386,8 @@ restart: *mpol_new = *n->policy; atomic_set(&mpol_new->refcnt, 1); sp_node_init(n_new, n->end, end, mpol_new); - n->end = start; sp_insert(sp, n_new); + n->end = start; n_new = NULL; mpol_new = NULL; break; diff --git a/mm/mmap.c b/mm/mmap.c index e6beac4..d1e4124 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1296,20 +1296,15 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, file = fget(fd); if (!file) goto out; - if (is_file_hugepages(file)) - len = ALIGN(len, huge_page_size(hstate_file(file))); } else if (flags & MAP_HUGETLB) { struct user_struct *user = NULL; - - len = ALIGN(len, huge_page_size(hstate_sizelog( - (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK))); /* * VM_NORESERVE is used because the reservations will be * taken when vm_ops->mmap() is called * A dummy user value is used because we are not locking * memory so no accounting is necessary */ - file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, + file = hugetlb_file_setup(HUGETLB_ANON_FILE, addr, len, VM_NORESERVE, &user, HUGETLB_ANONHUGE_INODE, (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); @@ -1927,7 +1922,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) /* Check the cache first. */ /* (Cache hit rate is typically around 35%.) */ - vma = ACCESS_ONCE(mm->mmap_cache); + vma = mm->mmap_cache; if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { struct rb_node *rb_node; @@ -2174,28 +2169,9 @@ int expand_downwards(struct vm_area_struct *vma, return error; } -/* - * Note how expand_stack() refuses to expand the stack all the way to - * abut the next virtual mapping, *unless* that mapping itself is also - * a stack mapping. We want to leave room for a guard page, after all - * (the guard page itself is not added here, that is done by the - * actual page faulting logic) - * - * This matches the behavior of the guard page logic (see mm/memory.c: - * check_stack_guard_page()), which only allows the guard page to be - * removed under these circumstances. - */ #ifdef CONFIG_STACK_GROWSUP int expand_stack(struct vm_area_struct *vma, unsigned long address) { - struct vm_area_struct *next; - - address &= PAGE_MASK; - next = vma->vm_next; - if (next && next->vm_start == address + PAGE_SIZE) { - if (!(next->vm_flags & VM_GROWSUP)) - return -ENOMEM; - } return expand_upwards(vma, address); } @@ -2218,14 +2194,6 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) #else int expand_stack(struct vm_area_struct *vma, unsigned long address) { - struct vm_area_struct *prev; - - address &= PAGE_MASK; - prev = vma->vm_prev; - if (prev && prev->vm_end == address) { - if (!(prev->vm_flags & VM_GROWSDOWN)) - return -ENOMEM; - } return expand_downwards(vma, address); } @@ -2294,7 +2262,7 @@ static void unmap_region(struct mm_struct *mm, update_hiwater_rss(mm); unmap_vmas(&tlb, vma, start, end); free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, - next ? next->vm_start : USER_PGTABLES_CEILING); + next ? next->vm_start : 0); tlb_finish_mmu(&tlb, start, end); } @@ -2672,7 +2640,7 @@ void exit_mmap(struct mm_struct *mm) /* Use -1 here to ensure all VMAs in the mm are unmapped */ unmap_vmas(&tlb, vma, 0, -1); - free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); + free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); tlb_finish_mmu(&tlb, 0, -1); /* diff --git a/mm/mmu_context.c b/mm/mmu_context.c index 1385e48..3dcfaf4 100644 --- a/mm/mmu_context.c +++ b/mm/mmu_context.c @@ -26,7 +26,6 @@ void use_mm(struct mm_struct *mm) struct task_struct *tsk = current; task_lock(tsk); - preempt_disable_rt(); active_mm = tsk->active_mm; if (active_mm != mm) { atomic_inc(&mm->mm_count); @@ -34,7 +33,6 @@ void use_mm(struct mm_struct *mm) } tsk->mm = mm; switch_mm(active_mm, mm, tsk); - preempt_enable_rt(); task_unlock(tsk); if (active_mm != mm) diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index f5c3d96..8a5ac8c 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -37,51 +37,49 @@ static struct srcu_struct srcu; void __mmu_notifier_release(struct mm_struct *mm) { struct mmu_notifier *mn; + struct hlist_node *n; int id; /* - * srcu_read_lock() here will block synchronize_srcu() in - * mmu_notifier_unregister() until all registered - * ->release() callouts this function makes have - * returned. + * SRCU here will block mmu_notifier_unregister until + * ->release returns. */ id = srcu_read_lock(&srcu); + hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) + /* + * if ->release runs before mmu_notifier_unregister it + * must be handled as it's the only way for the driver + * to flush all existing sptes and stop the driver + * from establishing any more sptes before all the + * pages in the mm are freed. + */ + if (mn->ops->release) + mn->ops->release(mn, mm); + srcu_read_unlock(&srcu, id); + spin_lock(&mm->mmu_notifier_mm->lock); while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { mn = hlist_entry(mm->mmu_notifier_mm->list.first, struct mmu_notifier, hlist); - /* - * Unlink. This will prevent mmu_notifier_unregister() - * from also making the ->release() callout. + * We arrived before mmu_notifier_unregister so + * mmu_notifier_unregister will do nothing other than + * to wait ->release to finish and + * mmu_notifier_unregister to return. */ hlist_del_init_rcu(&mn->hlist); - spin_unlock(&mm->mmu_notifier_mm->lock); - - /* - * Clear sptes. (see 'release' description in mmu_notifier.h) - */ - if (mn->ops->release) - mn->ops->release(mn, mm); - - spin_lock(&mm->mmu_notifier_mm->lock); } spin_unlock(&mm->mmu_notifier_mm->lock); /* - * All callouts to ->release() which we have done are complete. - * Allow synchronize_srcu() in mmu_notifier_unregister() to complete - */ - srcu_read_unlock(&srcu, id); - - /* - * mmu_notifier_unregister() may have unlinked a notifier and may - * still be calling out to it. Additionally, other notifiers - * may have been active via vmtruncate() et. al. Block here - * to ensure that all notifier callouts for this mm have been - * completed and the sptes are really cleaned up before returning - * to exit_mmap(). + * synchronize_srcu here prevents mmu_notifier_release to + * return to exit_mmap (which would proceed freeing all pages + * in the mm) until the ->release method returns, if it was + * invoked by mmu_notifier_unregister. + * + * The mmu_notifier_mm can't go away from under us because one + * mm_count is hold by exit_mmap. */ synchronize_srcu(&srcu); } @@ -296,31 +294,31 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) { BUG_ON(atomic_read(&mm->mm_count) <= 0); - spin_lock(&mm->mmu_notifier_mm->lock); if (!hlist_unhashed(&mn->hlist)) { + /* + * SRCU here will force exit_mmap to wait ->release to finish + * before freeing the pages. + */ int id; + id = srcu_read_lock(&srcu); /* - * Ensure we synchronize up with __mmu_notifier_release(). + * exit_mmap will block in mmu_notifier_release to + * guarantee ->release is called before freeing the + * pages. */ - id = srcu_read_lock(&srcu); - - hlist_del_rcu(&mn->hlist); - spin_unlock(&mm->mmu_notifier_mm->lock); - if (mn->ops->release) mn->ops->release(mn, mm); - - /* - * Allow __mmu_notifier_release() to complete. - */ srcu_read_unlock(&srcu, id); - } else + + spin_lock(&mm->mmu_notifier_mm->lock); + hlist_del_rcu(&mn->hlist); spin_unlock(&mm->mmu_notifier_mm->lock); + } /* - * Wait for any running method to finish, including ->release() if it - * was run by __mmu_notifier_release() instead of us. + * Wait any running method to finish, of course including + * ->release if it was run by mmu_notifier_relase instead of us. */ synchronize_srcu(&srcu); diff --git a/mm/nommu.c b/mm/nommu.c index bbe1f3f..79c3cac 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -819,7 +819,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) struct vm_area_struct *vma; /* check the cache first */ - vma = ACCESS_ONCE(mm->mmap_cache); + vma = mm->mmap_cache; if (vma && vma->vm_start <= addr && vma->vm_end > addr) return vma; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1734913..6a83cd3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -58,7 +58,6 @@ #include #include #include -#include #include #include @@ -220,18 +219,6 @@ EXPORT_SYMBOL(nr_node_ids); EXPORT_SYMBOL(nr_online_nodes); #endif -static DEFINE_LOCAL_IRQ_LOCK(pa_lock); - -#ifdef CONFIG_PREEMPT_RT_BASE -# define cpu_lock_irqsave(cpu, flags) \ - local_lock_irqsave_on(pa_lock, flags, cpu) -# define cpu_unlock_irqrestore(cpu, flags) \ - local_unlock_irqrestore_on(pa_lock, flags, cpu) -#else -# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) -# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags) -#endif - int page_group_by_mobility_disabled __read_mostly; void set_pageblock_migratetype(struct page *page, int migratetype) @@ -625,7 +612,7 @@ static inline int free_pages_check(struct page *page) } /* - * Frees a number of pages which have been collected from the pcp lists. + * Frees a number of pages from the PCP lists * Assumes all pages on list are in same zone, and of same order. * count is the number of pages to free. * @@ -636,50 +623,16 @@ static inline int free_pages_check(struct page *page) * pinned" detection logic. */ static void free_pcppages_bulk(struct zone *zone, int count, - struct list_head *list) + struct per_cpu_pages *pcp) { + int migratetype = 0; + int batch_free = 0; int to_free = count; - unsigned long flags; - spin_lock_irqsave(&zone->lock, flags); + spin_lock(&zone->lock); zone->all_unreclaimable = 0; zone->pages_scanned = 0; - while (!list_empty(list)) { - struct page *page = list_first_entry(list, struct page, lru); - int mt; /* migratetype of the to-be-freed page */ - - /* must delete as __free_one_page list manipulates */ - list_del(&page->lru); - - mt = get_freepage_migratetype(page); - /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ - __free_one_page(page, zone, 0, mt); - trace_mm_page_pcpu_drain(page, 0, mt); - if (likely(get_pageblock_migratetype(page) != MIGRATE_ISOLATE)) { - __mod_zone_page_state(zone, NR_FREE_PAGES, 1); - if (is_migrate_cma(mt)) - __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1); - } - - to_free--; - } - WARN_ON(to_free != 0); - spin_unlock_irqrestore(&zone->lock, flags); -} - -/* - * Moves a number of pages from the PCP lists to free list which - * is freed outside of the locked region. - * - * Assumes all pages on list are in same zone, and of same order. - * count is the number of pages to free. - */ -static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src, - struct list_head *dst) -{ - int migratetype = 0, batch_free = 0; - while (to_free) { struct page *page; struct list_head *list; @@ -695,7 +648,7 @@ static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src, batch_free++; if (++migratetype == MIGRATE_PCPTYPES) migratetype = 0; - list = &src->lists[migratetype]; + list = &pcp->lists[migratetype]; } while (list_empty(list)); /* This is the only non-empty list. Free them all. */ @@ -703,26 +656,36 @@ static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src, batch_free = to_free; do { - page = list_last_entry(list, struct page, lru); + int mt; /* migratetype of the to-be-freed page */ + + page = list_entry(list->prev, struct page, lru); + /* must delete as __free_one_page list manipulates */ list_del(&page->lru); - list_add(&page->lru, dst); + mt = get_freepage_migratetype(page); + /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ + __free_one_page(page, zone, 0, mt); + trace_mm_page_pcpu_drain(page, 0, mt); + if (likely(get_pageblock_migratetype(page) != MIGRATE_ISOLATE)) { + __mod_zone_page_state(zone, NR_FREE_PAGES, 1); + if (is_migrate_cma(mt)) + __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1); + } } while (--to_free && --batch_free && !list_empty(list)); } + spin_unlock(&zone->lock); } static void free_one_page(struct zone *zone, struct page *page, int order, int migratetype) { - unsigned long flags; - - spin_lock_irqsave(&zone->lock, flags); + spin_lock(&zone->lock); zone->all_unreclaimable = 0; zone->pages_scanned = 0; __free_one_page(page, zone, order, migratetype); if (unlikely(migratetype != MIGRATE_ISOLATE)) __mod_zone_freepage_state(zone, 1 << order, migratetype); - spin_unlock_irqrestore(&zone->lock, flags); + spin_unlock(&zone->lock); } static bool free_pages_prepare(struct page *page, unsigned int order) @@ -759,12 +722,12 @@ static void __free_pages_ok(struct page *page, unsigned int order) if (!free_pages_prepare(page, order)) return; - local_lock_irqsave(pa_lock, flags); + local_irq_save(flags); __count_vm_events(PGFREE, 1 << order); migratetype = get_pageblock_migratetype(page); set_freepage_migratetype(page, migratetype); free_one_page(page_zone(page), page, order, migratetype); - local_unlock_irqrestore(pa_lock, flags); + local_irq_restore(flags); } /* @@ -1204,20 +1167,18 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) { unsigned long flags; - LIST_HEAD(dst); int to_drain; - local_lock_irqsave(pa_lock, flags); + local_irq_save(flags); if (pcp->count >= pcp->batch) to_drain = pcp->batch; else to_drain = pcp->count; if (to_drain > 0) { - isolate_pcp_pages(to_drain, pcp, &dst); + free_pcppages_bulk(zone, to_drain, pcp); pcp->count -= to_drain; } - local_unlock_irqrestore(pa_lock, flags); - free_pcppages_bulk(zone, to_drain, &dst); + local_irq_restore(flags); } #endif @@ -1236,21 +1197,16 @@ static void drain_pages(unsigned int cpu) for_each_populated_zone(zone) { struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; - LIST_HEAD(dst); - int count; - cpu_lock_irqsave(cpu, flags); + local_irq_save(flags); pset = per_cpu_ptr(zone->pageset, cpu); pcp = &pset->pcp; - count = pcp->count; - if (count) { - isolate_pcp_pages(count, pcp, &dst); + if (pcp->count) { + free_pcppages_bulk(zone, pcp->count, pcp); pcp->count = 0; } - cpu_unlock_irqrestore(cpu, flags); - if (count) - free_pcppages_bulk(zone, count, &dst); + local_irq_restore(flags); } } @@ -1303,12 +1259,7 @@ void drain_all_pages(void) else cpumask_clear_cpu(cpu, &cpus_with_pcps); } -#ifndef CONFIG_PREEMPT_RT_BASE on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1); -#else - for_each_cpu(cpu, &cpus_with_pcps) - drain_pages(cpu); -#endif } #ifdef CONFIG_HIBERNATION @@ -1363,7 +1314,7 @@ void free_hot_cold_page(struct page *page, int cold) migratetype = get_pageblock_migratetype(page); set_freepage_migratetype(page, migratetype); - local_lock_irqsave(pa_lock, flags); + local_irq_save(flags); __count_vm_event(PGFREE); /* @@ -1388,19 +1339,12 @@ void free_hot_cold_page(struct page *page, int cold) list_add(&page->lru, &pcp->lists[migratetype]); pcp->count++; if (pcp->count >= pcp->high) { - LIST_HEAD(dst); - int count; - - isolate_pcp_pages(pcp->batch, pcp, &dst); + free_pcppages_bulk(zone, pcp->batch, pcp); pcp->count -= pcp->batch; - count = pcp->batch; - local_unlock_irqrestore(pa_lock, flags); - free_pcppages_bulk(zone, count, &dst); - return; } out: - local_unlock_irqrestore(pa_lock, flags); + local_irq_restore(flags); } /* @@ -1529,7 +1473,7 @@ again: struct per_cpu_pages *pcp; struct list_head *list; - local_lock_irqsave(pa_lock, flags); + local_irq_save(flags); pcp = &this_cpu_ptr(zone->pageset)->pcp; list = &pcp->lists[migratetype]; if (list_empty(list)) { @@ -1561,20 +1505,18 @@ again: */ WARN_ON_ONCE(order > 1); } - local_spin_lock_irqsave(pa_lock, &zone->lock, flags); + spin_lock_irqsave(&zone->lock, flags); page = __rmqueue(zone, order, migratetype); - if (!page) { - spin_unlock(&zone->lock); + spin_unlock(&zone->lock); + if (!page) goto failed; - } __mod_zone_freepage_state(zone, -(1 << order), get_pageblock_migratetype(page)); - spin_unlock(&zone->lock); } __count_zone_vm_events(PGALLOC, zone, 1 << order); zone_statistics(preferred_zone, zone, gfp_flags); - local_unlock_irqrestore(pa_lock, flags); + local_irq_restore(flags); VM_BUG_ON(bad_range(zone, page)); if (prep_new_page(page, order, gfp_flags)) @@ -1582,7 +1524,7 @@ again: return page; failed: - local_unlock_irqrestore(pa_lock, flags); + local_irq_restore(flags); return NULL; } @@ -2204,8 +2146,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct page *page; /* Page migration frees to the PCP lists but we want merging */ - drain_pages(get_cpu_light()); - put_cpu_light(); + drain_pages(get_cpu()); + put_cpu(); page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, high_zoneidx, @@ -5191,7 +5133,6 @@ static int page_alloc_cpu_notify(struct notifier_block *self, void __init page_alloc_init(void) { hotcpu_notifier(page_alloc_cpu_notify, 0); - local_irq_lock_init(pa_lock); } /* @@ -6015,23 +5956,21 @@ static int __meminit __zone_pcp_update(void *data) { struct zone *zone = data; int cpu; - unsigned long flags; + unsigned long batch = zone_batchsize(zone), flags; for_each_possible_cpu(cpu) { struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; - LIST_HEAD(dst); pset = per_cpu_ptr(zone->pageset, cpu); pcp = &pset->pcp; - cpu_lock_irqsave(cpu, flags); - if (pcp->count > 0) { - isolate_pcp_pages(pcp->count, pcp, &dst); - free_pcppages_bulk(zone, pcp->count, &dst); - } + local_irq_save(flags); + if (pcp->count > 0) + free_pcppages_bulk(zone, pcp->count, pcp); drain_zonestat(zone, pset); - cpu_unlock_irqrestore(cpu, flags); + setup_pageset(pset, batch); + local_irq_restore(flags); } return 0; } @@ -6049,7 +5988,7 @@ void zone_pcp_reset(struct zone *zone) struct per_cpu_pageset *pset; /* avoid races with drain_pages() */ - local_lock_irqsave(pa_lock, flags); + local_irq_save(flags); if (zone->pageset != &boot_pageset) { for_each_online_cpu(cpu) { pset = per_cpu_ptr(zone->pageset, cpu); @@ -6058,7 +5997,7 @@ void zone_pcp_reset(struct zone *zone) free_percpu(zone->pageset); zone->pageset = &boot_pageset; } - local_unlock_irqrestore(pa_lock, flags); + local_irq_restore(flags); } #ifdef CONFIG_MEMORY_HOTREMOVE diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 98caeee..6d757e3a 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c @@ -13,14 +13,6 @@ static unsigned long total_usage; -static void page_cgroup_lock_init(struct page_cgroup *pc, int nr_pages) -{ -#ifdef CONFIG_PREEMPT_RT_BASE - for (; nr_pages; nr_pages--, pc++) - spin_lock_init(&pc->pcg_lock); -#endif -} - #if !defined(CONFIG_SPARSEMEM) @@ -68,7 +60,6 @@ static int __init alloc_node_page_cgroup(int nid) return -ENOMEM; NODE_DATA(nid)->node_page_cgroup = base; total_usage += table_size; - page_cgroup_lock_init(base, nr_pages); return 0; } @@ -159,8 +150,6 @@ static int __meminit init_section_page_cgroup(unsigned long pfn, int nid) return -ENOMEM; } - page_cgroup_lock_init(base, PAGES_PER_SECTION); - /* * The passed "pfn" may not be aligned to SECTION. For the calculation * we need to apply a mask. diff --git a/mm/page_io.c b/mm/page_io.c index 6182870..78eee32 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -214,7 +214,6 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) kiocb.ki_left = PAGE_SIZE; kiocb.ki_nbytes = PAGE_SIZE; - set_page_writeback(page); unlock_page(page); ret = mapping->a_ops->direct_IO(KERNEL_WRITE, &kiocb, &iov, @@ -223,23 +222,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) if (ret == PAGE_SIZE) { count_vm_event(PSWPOUT); ret = 0; - } else { - /* - * In the case of swap-over-nfs, this can be a - * temporary failure if the system has limited - * memory for allocating transmit buffers. - * Mark the page dirty and avoid - * rotate_reclaimable_page but rate-limit the - * messages but do not flag PageError like - * the normal direct-to-bio case as it could - * be temporary. - */ - set_page_dirty(page); - ClearPageReclaim(page); - pr_err_ratelimited("Write error on dio swapfile (%Lu)\n", - page_file_offset(page)); } - end_page_writeback(page); return ret; } diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index fd26d04..926b466 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c @@ -429,6 +429,12 @@ compat_process_vm_rw(compat_pid_t pid, if (flags != 0) return -EINVAL; + if (!access_ok(VERIFY_READ, lvec, liovcnt * sizeof(*lvec))) + goto out; + + if (!access_ok(VERIFY_READ, rvec, riovcnt * sizeof(*rvec))) + goto out; + if (vm_write) rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt, UIO_FASTIOV, iovstack_l, @@ -453,6 +459,8 @@ free_iovecs: kfree(iov_r); if (iov_l != iovstack_l) kfree(iov_l); + +out: return rc; } diff --git a/mm/shmem.c b/mm/shmem.c index efd0b3a..5dd56f6 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2487,7 +2487,6 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) unsigned long inodes; int error = -EINVAL; - config.mpol = NULL; if (shmem_parse_options(data, &config, true)) return error; @@ -2512,13 +2511,8 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) sbinfo->max_inodes = config.max_inodes; sbinfo->free_inodes = config.max_inodes - inodes; - /* - * Preserve previous mempolicy unless mpol remount option was specified. - */ - if (config.mpol) { - mpol_put(sbinfo->mpol); - sbinfo->mpol = config.mpol; /* transfers initial ref */ - } + mpol_put(sbinfo->mpol); + sbinfo->mpol = config.mpol; /* transfers initial ref */ out: spin_unlock(&sbinfo->stat_lock); return error; diff --git a/mm/slab.c b/mm/slab.c index 6604ced..e7667a3 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -116,7 +116,6 @@ #include #include #include -#include #include @@ -697,78 +696,12 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) #endif static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); -static DEFINE_PER_CPU(struct list_head, slab_free_list); -static DEFINE_LOCAL_IRQ_LOCK(slab_lock); - -#ifndef CONFIG_PREEMPT_RT_BASE -# define slab_on_each_cpu(func, cp) on_each_cpu(func, cp, 1) -#else -/* - * execute func() for all CPUs. On PREEMPT_RT we dont actually have - * to run on the remote CPUs - we only have to take their CPU-locks. - * (This is a rare operation, so cacheline bouncing is not an issue.) - */ -static void -slab_on_each_cpu(void (*func)(void *arg, int this_cpu), void *arg) -{ - unsigned int i; - - get_cpu_light(); - for_each_online_cpu(i) - func(arg, i); - put_cpu_light(); -} - -static void lock_slab_on(unsigned int cpu) -{ - local_lock_irq_on(slab_lock, cpu); -} - -static void unlock_slab_on(unsigned int cpu) -{ - local_unlock_irq_on(slab_lock, cpu); -} -#endif - -static void free_delayed(struct list_head *h) -{ - while(!list_empty(h)) { - struct page *page = list_first_entry(h, struct page, lru); - - list_del(&page->lru); - __free_pages(page, page->index); - } -} - -static void unlock_l3_and_free_delayed(spinlock_t *list_lock) -{ - LIST_HEAD(tmp); - - list_splice_init(&__get_cpu_var(slab_free_list), &tmp); - local_spin_unlock_irq(slab_lock, list_lock); - free_delayed(&tmp); -} - -static void unlock_slab_and_free_delayed(unsigned long flags) -{ - LIST_HEAD(tmp); - - list_splice_init(&__get_cpu_var(slab_free_list), &tmp); - local_unlock_irqrestore(slab_lock, flags); - free_delayed(&tmp); -} static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) { return cachep->array[smp_processor_id()]; } -static inline struct array_cache *cpu_cache_get_on_cpu(struct kmem_cache *cachep, - int cpu) -{ - return cachep->array[cpu]; -} - static inline struct kmem_cache *__find_general_cachep(size_t size, gfp_t gfpflags) { @@ -1238,10 +1171,9 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) if (l3->alien) { struct array_cache *ac = l3->alien[node]; - if (ac && ac->avail && - local_spin_trylock_irq(slab_lock, &ac->lock)) { + if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { __drain_alien_cache(cachep, ac, node); - local_spin_unlock_irq(slab_lock, &ac->lock); + spin_unlock_irq(&ac->lock); } } } @@ -1256,9 +1188,9 @@ static void drain_alien_cache(struct kmem_cache *cachep, for_each_online_node(i) { ac = alien[i]; if (ac) { - local_spin_lock_irqsave(slab_lock, &ac->lock, flags); + spin_lock_irqsave(&ac->lock, flags); __drain_alien_cache(cachep, ac, i); - local_spin_unlock_irqrestore(slab_lock, &ac->lock, flags); + spin_unlock_irqrestore(&ac->lock, flags); } } } @@ -1337,11 +1269,11 @@ static int init_cache_nodelists_node(int node) cachep->nodelists[node] = l3; } - local_spin_lock_irq(slab_lock, &cachep->nodelists[node]->list_lock); + spin_lock_irq(&cachep->nodelists[node]->list_lock); cachep->nodelists[node]->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; - local_spin_unlock_irq(slab_lock, &cachep->nodelists[node]->list_lock); + spin_unlock_irq(&cachep->nodelists[node]->list_lock); } return 0; } @@ -1366,7 +1298,7 @@ static void __cpuinit cpuup_canceled(long cpu) if (!l3) goto free_array_cache; - local_spin_lock_irq(slab_lock, &l3->list_lock); + spin_lock_irq(&l3->list_lock); /* Free limit for this kmem_list3 */ l3->free_limit -= cachep->batchcount; @@ -1374,7 +1306,7 @@ static void __cpuinit cpuup_canceled(long cpu) free_block(cachep, nc->entry, nc->avail, node); if (!cpumask_empty(mask)) { - unlock_l3_and_free_delayed(&l3->list_lock); + spin_unlock_irq(&l3->list_lock); goto free_array_cache; } @@ -1388,7 +1320,7 @@ static void __cpuinit cpuup_canceled(long cpu) alien = l3->alien; l3->alien = NULL; - unlock_l3_and_free_delayed(&l3->list_lock); + spin_unlock_irq(&l3->list_lock); kfree(shared); if (alien) { @@ -1462,7 +1394,7 @@ static int __cpuinit cpuup_prepare(long cpu) l3 = cachep->nodelists[node]; BUG_ON(!l3); - local_spin_lock_irq(slab_lock, &l3->list_lock); + spin_lock_irq(&l3->list_lock); if (!l3->shared) { /* * We are serialised from CPU_DEAD or @@ -1477,7 +1409,7 @@ static int __cpuinit cpuup_prepare(long cpu) alien = NULL; } #endif - local_spin_unlock_irq(slab_lock, &l3->list_lock); + spin_unlock_irq(&l3->list_lock); kfree(shared); free_alien_cache(alien); if (cachep->flags & SLAB_DEBUG_OBJECTS) @@ -1680,10 +1612,6 @@ void __init kmem_cache_init(void) if (num_possible_nodes() == 1) use_alien_caches = 0; - local_irq_lock_init(slab_lock); - for_each_possible_cpu(i) - INIT_LIST_HEAD(&per_cpu(slab_free_list, i)); - for (i = 0; i < NUM_INIT_LISTS; i++) kmem_list3_init(&initkmem_list3[i]); @@ -1984,14 +1912,12 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) /* * Interface to system's page release. */ -static void kmem_freepages(struct kmem_cache *cachep, void *addr, bool delayed) +static void kmem_freepages(struct kmem_cache *cachep, void *addr) { unsigned long i = (1 << cachep->gfporder); - struct page *page, *basepage = virt_to_page(addr); + struct page *page = virt_to_page(addr); const unsigned long nr_freed = i; - page = basepage; - kmemcheck_free_shadow(page, cachep->gfporder); if (cachep->flags & SLAB_RECLAIM_ACCOUNT) @@ -2010,12 +1936,7 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr, bool delayed) memcg_release_pages(cachep, cachep->gfporder); if (current->reclaim_state) current->reclaim_state->reclaimed_slab += nr_freed; - if (!delayed) { - free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder); - } else { - basepage->index = cachep->gfporder; - list_add(&basepage->lru, &__get_cpu_var(slab_free_list)); - } + free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder); } static void kmem_rcu_free(struct rcu_head *head) @@ -2023,7 +1944,7 @@ static void kmem_rcu_free(struct rcu_head *head) struct slab_rcu *slab_rcu = (struct slab_rcu *)head; struct kmem_cache *cachep = slab_rcu->cachep; - kmem_freepages(cachep, slab_rcu->addr, false); + kmem_freepages(cachep, slab_rcu->addr); if (OFF_SLAB(cachep)) kmem_cache_free(cachep->slabp_cache, slab_rcu); } @@ -2242,8 +2163,7 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab * Before calling the slab must have been unlinked from the cache. The * cache-lock is not held/needed. */ -static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp, - bool delayed) +static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) { void *addr = slabp->s_mem - slabp->colouroff; @@ -2256,7 +2176,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp, slab_rcu->addr = addr; call_rcu(&slab_rcu->head, kmem_rcu_free); } else { - kmem_freepages(cachep, addr, delayed); + kmem_freepages(cachep, addr); if (OFF_SLAB(cachep)) kmem_cache_free(cachep->slabp_cache, slabp); } @@ -2613,7 +2533,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) #if DEBUG static void check_irq_off(void) { - BUG_ON_NONRT(!irqs_disabled()); + BUG_ON(!irqs_disabled()); } static void check_irq_on(void) @@ -2648,43 +2568,26 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, struct array_cache *ac, int force, int node); -static void __do_drain(void *arg, unsigned int cpu) +static void do_drain(void *arg) { struct kmem_cache *cachep = arg; struct array_cache *ac; - int node = cpu_to_mem(cpu); + int node = numa_mem_id(); - ac = cpu_cache_get_on_cpu(cachep, cpu); + check_irq_off(); + ac = cpu_cache_get(cachep); spin_lock(&cachep->nodelists[node]->list_lock); free_block(cachep, ac->entry, ac->avail, node); spin_unlock(&cachep->nodelists[node]->list_lock); ac->avail = 0; } -#ifndef CONFIG_PREEMPT_RT_BASE -static void do_drain(void *arg) -{ - __do_drain(arg, smp_processor_id()); -} -#else -static void do_drain(void *arg, int cpu) -{ - LIST_HEAD(tmp); - - lock_slab_on(cpu); - __do_drain(arg, cpu); - list_splice_init(&per_cpu(slab_free_list, cpu), &tmp); - unlock_slab_on(cpu); - free_delayed(&tmp); -} -#endif - static void drain_cpu_caches(struct kmem_cache *cachep) { struct kmem_list3 *l3; int node; - slab_on_each_cpu(do_drain, cachep); + on_each_cpu(do_drain, cachep, 1); check_irq_on(); for_each_online_node(node) { l3 = cachep->nodelists[node]; @@ -2715,10 +2618,10 @@ static int drain_freelist(struct kmem_cache *cache, nr_freed = 0; while (nr_freed < tofree && !list_empty(&l3->slabs_free)) { - local_spin_lock_irq(slab_lock, &l3->list_lock); + spin_lock_irq(&l3->list_lock); p = l3->slabs_free.prev; if (p == &l3->slabs_free) { - local_spin_unlock_irq(slab_lock, &l3->list_lock); + spin_unlock_irq(&l3->list_lock); goto out; } @@ -2732,8 +2635,8 @@ static int drain_freelist(struct kmem_cache *cache, * to the cache. */ l3->free_objects -= cache->num; - local_spin_unlock_irq(slab_lock, &l3->list_lock); - slab_destroy(cache, slabp, false); + spin_unlock_irq(&l3->list_lock); + slab_destroy(cache, slabp); nr_freed++; } out: @@ -3007,7 +2910,7 @@ static int cache_grow(struct kmem_cache *cachep, offset *= cachep->colour_off; if (local_flags & __GFP_WAIT) - local_unlock_irq(slab_lock); + local_irq_enable(); /* * The test for missing atomic flag is performed here, rather than @@ -3037,7 +2940,7 @@ static int cache_grow(struct kmem_cache *cachep, cache_init_objs(cachep, slabp); if (local_flags & __GFP_WAIT) - local_lock_irq(slab_lock); + local_irq_disable(); check_irq_off(); spin_lock(&l3->list_lock); @@ -3048,10 +2951,10 @@ static int cache_grow(struct kmem_cache *cachep, spin_unlock(&l3->list_lock); return 1; opps1: - kmem_freepages(cachep, objp, false); + kmem_freepages(cachep, objp); failed: if (local_flags & __GFP_WAIT) - local_lock_irq(slab_lock); + local_irq_disable(); return 0; } @@ -3465,11 +3368,11 @@ retry: * set and go into memory reserves if necessary. */ if (local_flags & __GFP_WAIT) - local_unlock_irq(slab_lock); + local_irq_enable(); kmem_flagcheck(cache, flags); obj = kmem_getpages(cache, local_flags, numa_mem_id()); if (local_flags & __GFP_WAIT) - local_lock_irq(slab_lock); + local_irq_disable(); if (obj) { /* * Insert into the appropriate per node queues @@ -3589,7 +3492,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, cachep = memcg_kmem_get_cache(cachep, flags); cache_alloc_debugcheck_before(cachep, flags); - local_lock_irqsave(slab_lock, save_flags); + local_irq_save(save_flags); if (nodeid == NUMA_NO_NODE) nodeid = slab_node; @@ -3614,7 +3517,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, /* ___cache_alloc_node can fall back to other nodes */ ptr = ____cache_alloc_node(cachep, flags, nodeid); out: - local_unlock_irqrestore(slab_lock, save_flags); + local_irq_restore(save_flags); ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags, flags); @@ -3676,9 +3579,9 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) cachep = memcg_kmem_get_cache(cachep, flags); cache_alloc_debugcheck_before(cachep, flags); - local_lock_irqsave(slab_lock, save_flags); + local_irq_save(save_flags); objp = __do_cache_alloc(cachep, flags); - local_unlock_irqrestore(slab_lock, save_flags); + local_irq_restore(save_flags); objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags, flags); @@ -3729,7 +3632,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, * a different cache, refer to comments before * alloc_slabmgmt. */ - slab_destroy(cachep, slabp, true); + slab_destroy(cachep, slabp); } else { list_add(&slabp->list, &l3->slabs_free); } @@ -3992,12 +3895,12 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) if (!cachep) return; + local_irq_save(flags); debug_check_no_locks_freed(objp, cachep->object_size); if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) debug_check_no_obj_freed(objp, cachep->object_size); - local_lock_irqsave(slab_lock, flags); __cache_free(cachep, objp, _RET_IP_); - unlock_slab_and_free_delayed(flags); + local_irq_restore(flags); trace_kmem_cache_free(_RET_IP_, objp); } @@ -4021,14 +3924,14 @@ void kfree(const void *objp) if (unlikely(ZERO_OR_NULL_PTR(objp))) return; + local_irq_save(flags); kfree_debugcheck(objp); c = virt_to_cache(objp); debug_check_no_locks_freed(objp, c->object_size); debug_check_no_obj_freed(objp, c->object_size); - local_lock_irqsave(slab_lock, flags); __cache_free(c, (void *)objp, _RET_IP_); - unlock_slab_and_free_delayed(flags); + local_irq_restore(flags); } EXPORT_SYMBOL(kfree); @@ -4065,7 +3968,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) if (l3) { struct array_cache *shared = l3->shared; - local_spin_lock_irq(slab_lock, &l3->list_lock); + spin_lock_irq(&l3->list_lock); if (shared) free_block(cachep, shared->entry, @@ -4078,8 +3981,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) } l3->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; - unlock_l3_and_free_delayed(&l3->list_lock); - + spin_unlock_irq(&l3->list_lock); kfree(shared); free_alien_cache(new_alien); continue; @@ -4126,29 +4028,18 @@ struct ccupdate_struct { struct array_cache *new[0]; }; -static void __do_ccupdate_local(void *info, int cpu) +static void do_ccupdate_local(void *info) { struct ccupdate_struct *new = info; struct array_cache *old; - old = cpu_cache_get_on_cpu(new->cachep, cpu); + check_irq_off(); + old = cpu_cache_get(new->cachep); - new->cachep->array[cpu] = new->new[cpu]; - new->new[cpu] = old; + new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; + new->new[smp_processor_id()] = old; } -#ifndef CONFIG_PREEMPT_RT_BASE -static void do_ccupdate_local(void *info) -{ - __do_ccupdate_local(info, smp_processor_id()); -} -#else -static void do_ccupdate_local(void *info, int cpu) -{ - __do_ccupdate_local(info, cpu); -} -#endif - /* Always called with the slab_mutex held */ static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount, int shared, gfp_t gfp) @@ -4173,7 +4064,7 @@ static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, } new->cachep = cachep; - slab_on_each_cpu(do_ccupdate_local, (void *)new); + on_each_cpu(do_ccupdate_local, (void *)new, 1); check_irq_on(); cachep->batchcount = batchcount; @@ -4184,11 +4075,9 @@ static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, struct array_cache *ccold = new->new[i]; if (!ccold) continue; - local_spin_lock_irq(slab_lock, - &cachep->nodelists[cpu_to_mem(i)]->list_lock); + spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock); free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i)); - - unlock_l3_and_free_delayed(&cachep->nodelists[cpu_to_mem(i)]->list_lock); + spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock); kfree(ccold); } kfree(new); @@ -4303,7 +4192,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, if (ac->touched && !force) { ac->touched = 0; } else { - local_spin_lock_irq(slab_lock, &l3->list_lock); + spin_lock_irq(&l3->list_lock); if (ac->avail) { tofree = force ? ac->avail : (ac->limit + 4) / 5; if (tofree > ac->avail) @@ -4313,7 +4202,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail); } - local_spin_unlock_irq(slab_lock, &l3->list_lock); + spin_unlock_irq(&l3->list_lock); } } @@ -4406,7 +4295,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) continue; check_irq_on(); - local_spin_lock_irq(slab_lock, &l3->list_lock); + spin_lock_irq(&l3->list_lock); list_for_each_entry(slabp, &l3->slabs_full, list) { if (slabp->inuse != cachep->num && !error) @@ -4431,7 +4320,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) if (l3->shared) shared_avail += l3->shared->avail; - local_spin_unlock_irq(slab_lock, &l3->list_lock); + spin_unlock_irq(&l3->list_lock); } num_slabs += active_slabs; num_objs = num_slabs * cachep->num; @@ -4631,13 +4520,13 @@ static int leaks_show(struct seq_file *m, void *p) continue; check_irq_on(); - local_spin_lock_irq(slab_lock, &l3->list_lock); + spin_lock_irq(&l3->list_lock); list_for_each_entry(slabp, &l3->slabs_full, list) handle_slab(n, cachep, slabp); list_for_each_entry(slabp, &l3->slabs_partial, list) handle_slab(n, cachep, slabp); - local_spin_unlock_irq(slab_lock, &l3->list_lock); + spin_unlock_irq(&l3->list_lock); } name = cachep->name; if (n[0] == n[1]) { diff --git a/mm/slub.c b/mm/slub.c index f6871c5..ba2ca53 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1070,7 +1070,7 @@ static noinline struct kmem_cache_node *free_debug_processing( { struct kmem_cache_node *n = get_node(s, page_to_nid(page)); - raw_spin_lock_irqsave(&n->list_lock, *flags); + spin_lock_irqsave(&n->list_lock, *flags); slab_lock(page); if (!check_slab(s, page)) @@ -1118,7 +1118,7 @@ out: fail: slab_unlock(page); - raw_spin_unlock_irqrestore(&n->list_lock, *flags); + spin_unlock_irqrestore(&n->list_lock, *flags); slab_fix(s, "Object at 0x%p not freed", object); return NULL; } @@ -1253,12 +1253,6 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) {} #endif /* CONFIG_SLUB_DEBUG */ -struct slub_free_list { - raw_spinlock_t lock; - struct list_head list; -}; -static DEFINE_PER_CPU(struct slub_free_list, slub_free_list); - /* * Slab allocation and freeing */ @@ -1280,15 +1274,10 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) struct page *page; struct kmem_cache_order_objects oo = s->oo; gfp_t alloc_gfp; - bool enableirqs; flags &= gfp_allowed_mask; - enableirqs = (flags & __GFP_WAIT) != 0; -#ifdef CONFIG_PREEMPT_RT_FULL - enableirqs |= system_state == SYSTEM_RUNNING; -#endif - if (enableirqs) + if (flags & __GFP_WAIT) local_irq_enable(); flags |= s->allocflags; @@ -1328,7 +1317,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) kmemcheck_mark_unallocated_pages(page, pages); } - if (enableirqs) + if (flags & __GFP_WAIT) local_irq_disable(); if (!page) return NULL; @@ -1346,10 +1335,8 @@ static void setup_object(struct kmem_cache *s, struct page *page, void *object) { setup_object_debug(s, page, object); -#ifndef CONFIG_PREEMPT_RT_FULL if (unlikely(s->ctor)) s->ctor(object); -#endif } static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) @@ -1427,16 +1414,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page) __free_memcg_kmem_pages(page, order); } -static void free_delayed(struct kmem_cache *s, struct list_head *h) -{ - while(!list_empty(h)) { - struct page *page = list_first_entry(h, struct page, lru); - - list_del(&page->lru); - __free_slab(s, page); - } -} - #define need_reserve_slab_rcu \ (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) @@ -1471,12 +1448,6 @@ static void free_slab(struct kmem_cache *s, struct page *page) } call_rcu(head, rcu_free_slab); - } else if (irqs_disabled()) { - struct slub_free_list *f = &__get_cpu_var(slub_free_list); - - raw_spin_lock(&f->lock); - list_add(&page->lru, &f->list); - raw_spin_unlock(&f->lock); } else __free_slab(s, page); } @@ -1578,7 +1549,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, if (!n || !n->nr_partial) return NULL; - raw_spin_lock(&n->list_lock); + spin_lock(&n->list_lock); list_for_each_entry_safe(page, page2, &n->partial, lru) { void *t; int available; @@ -1603,7 +1574,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, break; } - raw_spin_unlock(&n->list_lock); + spin_unlock(&n->list_lock); return object; } @@ -1845,7 +1816,7 @@ redo: * that acquire_slab() will see a slab page that * is frozen */ - raw_spin_lock(&n->list_lock); + spin_lock(&n->list_lock); } } else { m = M_FULL; @@ -1856,7 +1827,7 @@ redo: * slabs from diagnostic functions will not see * any frozen slabs. */ - raw_spin_lock(&n->list_lock); + spin_lock(&n->list_lock); } } @@ -1891,7 +1862,7 @@ redo: goto redo; if (lock) - raw_spin_unlock(&n->list_lock); + spin_unlock(&n->list_lock); if (m == M_FREE) { stat(s, DEACTIVATE_EMPTY); @@ -1922,10 +1893,10 @@ static void unfreeze_partials(struct kmem_cache *s, n2 = get_node(s, page_to_nid(page)); if (n != n2) { if (n) - raw_spin_unlock(&n->list_lock); + spin_unlock(&n->list_lock); n = n2; - raw_spin_lock(&n->list_lock); + spin_lock(&n->list_lock); } do { @@ -1954,7 +1925,7 @@ static void unfreeze_partials(struct kmem_cache *s, } if (n) - raw_spin_unlock(&n->list_lock); + spin_unlock(&n->list_lock); while (discard_page) { page = discard_page; @@ -1990,21 +1961,14 @@ static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) pobjects = oldpage->pobjects; pages = oldpage->pages; if (drain && pobjects > s->cpu_partial) { - struct slub_free_list *f; unsigned long flags; - LIST_HEAD(tofree); /* * partial array is full. Move the existing * set to the per node partial list. */ local_irq_save(flags); unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); - f = &__get_cpu_var(slub_free_list); - raw_spin_lock(&f->lock); - list_splice_init(&f->list, &tofree); - raw_spin_unlock(&f->lock); local_irq_restore(flags); - free_delayed(s, &tofree); oldpage = NULL; pobjects = 0; pages = 0; @@ -2067,22 +2031,7 @@ static bool has_cpu_slab(int cpu, void *info) static void flush_all(struct kmem_cache *s) { - LIST_HEAD(tofree); - int cpu; - on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC); - for_each_online_cpu(cpu) { - struct slub_free_list *f; - - if (!has_cpu_slab(cpu, s)) - continue; - - f = &per_cpu(slub_free_list, cpu); - raw_spin_lock_irq(&f->lock); - list_splice_init(&f->list, &tofree); - raw_spin_unlock_irq(&f->lock); - free_delayed(s, &tofree); - } } /* @@ -2092,7 +2041,7 @@ static void flush_all(struct kmem_cache *s) static inline int node_match(struct page *page, int node) { #ifdef CONFIG_NUMA - if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node)) + if (node != NUMA_NO_NODE && page_to_nid(page) != node) return 0; #endif return 1; @@ -2110,10 +2059,10 @@ static unsigned long count_partial(struct kmem_cache_node *n, unsigned long x = 0; struct page *page; - raw_spin_lock_irqsave(&n->list_lock, flags); + spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) x += get_count(page); - raw_spin_unlock_irqrestore(&n->list_lock, flags); + spin_unlock_irqrestore(&n->list_lock, flags); return x; } @@ -2256,11 +2205,9 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page) static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, unsigned long addr, struct kmem_cache_cpu *c) { - struct slub_free_list *f; void *freelist; struct page *page; unsigned long flags; - LIST_HEAD(tofree); local_irq_save(flags); #ifdef CONFIG_PREEMPT @@ -2323,13 +2270,7 @@ load_freelist: VM_BUG_ON(!c->page->frozen); c->freelist = get_freepointer(s, freelist); c->tid = next_tid(c->tid); -out: - f = &__get_cpu_var(slub_free_list); - raw_spin_lock(&f->lock); - list_splice_init(&f->list, &tofree); - raw_spin_unlock(&f->lock); local_irq_restore(flags); - free_delayed(s, &tofree); return freelist; new_slab: @@ -2347,7 +2288,9 @@ new_slab: if (unlikely(!freelist)) { if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) slab_out_of_memory(s, gfpflags, node); - goto out; + + local_irq_restore(flags); + return NULL; } page = c->page; @@ -2361,7 +2304,8 @@ new_slab: deactivate_slab(s, page, get_freepointer(s, freelist)); c->page = NULL; c->freelist = NULL; - goto out; + local_irq_restore(flags); + return freelist; } /* @@ -2387,13 +2331,13 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, s = memcg_kmem_get_cache(s, gfpflags); redo: + /* - * Preemption is disabled for the retrieval of the tid because that - * must occur from the current processor. We cannot allow rescheduling - * on a different processor between the determination of the pointer - * and the retrieval of the tid. + * Must read kmem_cache cpu data via this cpu ptr. Preemption is + * enabled. We may switch back and forth between cpus while + * reading from one cpu area. That does not matter as long + * as we end up on the original cpu again when doing the cmpxchg. */ - preempt_disable(); c = __this_cpu_ptr(s->cpu_slab); /* @@ -2403,7 +2347,7 @@ redo: * linked list in between. */ tid = c->tid; - preempt_enable(); + barrier(); object = c->freelist; page = c->page; @@ -2439,10 +2383,6 @@ redo: if (unlikely(gfpflags & __GFP_ZERO) && object) memset(object, 0, s->object_size); -#ifdef CONFIG_PREEMPT_RT_FULL - if (unlikely(s->ctor) && object) - s->ctor(object); -#endif slab_post_alloc_hook(s, gfpflags, object); @@ -2537,7 +2477,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, do { if (unlikely(n)) { - raw_spin_unlock_irqrestore(&n->list_lock, flags); + spin_unlock_irqrestore(&n->list_lock, flags); n = NULL; } prior = page->freelist; @@ -2567,7 +2507,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, * Otherwise the list_lock will synchronize with * other processors updating the list of slabs. */ - raw_spin_lock_irqsave(&n->list_lock, flags); + spin_lock_irqsave(&n->list_lock, flags); } } @@ -2608,7 +2548,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, add_partial(n, page, DEACTIVATE_TO_TAIL); stat(s, FREE_ADD_PARTIAL); } - raw_spin_unlock_irqrestore(&n->list_lock, flags); + spin_unlock_irqrestore(&n->list_lock, flags); return; slab_empty: @@ -2622,7 +2562,7 @@ slab_empty: /* Slab must be on the full list */ remove_full(s, page); - raw_spin_unlock_irqrestore(&n->list_lock, flags); + spin_unlock_irqrestore(&n->list_lock, flags); stat(s, FREE_SLAB); discard_slab(s, page); } @@ -2654,11 +2594,10 @@ redo: * data is retrieved via this pointer. If we are on the same cpu * during the cmpxchg then the free will succedd. */ - preempt_disable(); c = __this_cpu_ptr(s->cpu_slab); tid = c->tid; - preempt_enable(); + barrier(); if (likely(page == c->page)) { set_freepointer(s, object, c->freelist); @@ -2824,7 +2763,7 @@ static void init_kmem_cache_node(struct kmem_cache_node *n) { n->nr_partial = 0; - raw_spin_lock_init(&n->list_lock); + spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); #ifdef CONFIG_SLUB_DEBUG atomic_long_set(&n->nr_slabs, 0); @@ -3511,7 +3450,7 @@ int kmem_cache_shrink(struct kmem_cache *s) for (i = 0; i < objects; i++) INIT_LIST_HEAD(slabs_by_inuse + i); - raw_spin_lock_irqsave(&n->list_lock, flags); + spin_lock_irqsave(&n->list_lock, flags); /* * Build lists indexed by the items in use in each slab. @@ -3532,7 +3471,7 @@ int kmem_cache_shrink(struct kmem_cache *s) for (i = objects - 1; i > 0; i--) list_splice(slabs_by_inuse + i, n->partial.prev); - raw_spin_unlock_irqrestore(&n->list_lock, flags); + spin_unlock_irqrestore(&n->list_lock, flags); /* Release empty slabs */ list_for_each_entry_safe(page, t, slabs_by_inuse, lru) @@ -3702,12 +3641,6 @@ void __init kmem_cache_init(void) boot_kmem_cache_node; int i; int caches = 2; - int cpu; - - for_each_possible_cpu(cpu) { - raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock); - INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list); - } if (debug_guardpage_minorder()) slub_max_order = 0; @@ -4099,7 +4032,7 @@ static int validate_slab_node(struct kmem_cache *s, struct page *page; unsigned long flags; - raw_spin_lock_irqsave(&n->list_lock, flags); + spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) { validate_slab_slab(s, page, map); @@ -4122,7 +4055,7 @@ static int validate_slab_node(struct kmem_cache *s, atomic_long_read(&n->nr_slabs)); out: - raw_spin_unlock_irqrestore(&n->list_lock, flags); + spin_unlock_irqrestore(&n->list_lock, flags); return count; } @@ -4312,12 +4245,12 @@ static int list_locations(struct kmem_cache *s, char *buf, if (!atomic_long_read(&n->nr_slabs)) continue; - raw_spin_lock_irqsave(&n->list_lock, flags); + spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) process_slab(&t, s, page, alloc, map); list_for_each_entry(page, &n->full, lru) process_slab(&t, s, page, alloc, map); - raw_spin_unlock_irqrestore(&n->list_lock, flags); + spin_unlock_irqrestore(&n->list_lock, flags); } for (i = 0; i < t.count; i++) { diff --git a/mm/swap.c b/mm/swap.c index 5812f96..6310dc2 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -30,7 +30,6 @@ #include #include #include -#include #include "internal.h" @@ -41,9 +40,6 @@ static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); -static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); -static DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); - /* * This path almost never happens for VM activity - pages are normally * freed via pagevecs. But it gets used by networking. @@ -358,11 +354,11 @@ void rotate_reclaimable_page(struct page *page) unsigned long flags; page_cache_get(page); - local_lock_irqsave(rotate_lock, flags); + local_irq_save(flags); pvec = &__get_cpu_var(lru_rotate_pvecs); if (!pagevec_add(pvec, page)) pagevec_move_tail(pvec); - local_unlock_irqrestore(rotate_lock, flags); + local_irq_restore(flags); } } @@ -407,13 +403,12 @@ static void activate_page_drain(int cpu) void activate_page(struct page *page) { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { - struct pagevec *pvec = &get_locked_var(swapvec_lock, - activate_page_pvecs); + struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); page_cache_get(page); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, __activate_page, NULL); - put_locked_var(swapvec_lock, activate_page_pvecs); + put_cpu_var(activate_page_pvecs); } } @@ -461,13 +456,13 @@ EXPORT_SYMBOL(mark_page_accessed); */ void __lru_cache_add(struct page *page, enum lru_list lru) { - struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvecs)[lru]; + struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; page_cache_get(page); if (!pagevec_space(pvec)) __pagevec_lru_add(pvec, lru); pagevec_add(pvec, page); - put_locked_var(swapvec_lock, lru_add_pvecs); + put_cpu_var(lru_add_pvecs); } EXPORT_SYMBOL(__lru_cache_add); @@ -602,9 +597,9 @@ void lru_add_drain_cpu(int cpu) unsigned long flags; /* No harm done if a racing interrupt already did this */ - local_lock_irqsave(rotate_lock, flags); + local_irq_save(flags); pagevec_move_tail(pvec); - local_unlock_irqrestore(rotate_lock, flags); + local_irq_restore(flags); } pvec = &per_cpu(lru_deactivate_pvecs, cpu); @@ -632,19 +627,18 @@ void deactivate_page(struct page *page) return; if (likely(get_page_unless_zero(page))) { - struct pagevec *pvec = &get_locked_var(swapvec_lock, - lru_deactivate_pvecs); + struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); - put_locked_var(swapvec_lock, lru_deactivate_pvecs); + put_cpu_var(lru_deactivate_pvecs); } } void lru_add_drain(void) { - lru_add_drain_cpu(local_lock_cpu(swapvec_lock)); - local_unlock_cpu(swapvec_lock); + lru_add_drain_cpu(get_cpu()); + put_cpu(); } static void lru_add_drain_per_cpu(struct work_struct *dummy) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index f4b4fee..5123a16 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -782,7 +782,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask) struct vmap_block *vb; struct vmap_area *va; unsigned long vb_idx; - int node, err, cpu; + int node, err; node = numa_node_id(); @@ -821,13 +821,12 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask) BUG_ON(err); radix_tree_preload_end(); - cpu = get_cpu_light(); - vbq = &__get_cpu_var(vmap_block_queue); + vbq = &get_cpu_var(vmap_block_queue); vb->vbq = vbq; spin_lock(&vbq->lock); list_add_rcu(&vb->free_list, &vbq->free); spin_unlock(&vbq->lock); - put_cpu_light(); + put_cpu_var(vmap_block_queue); return vb; } @@ -901,7 +900,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) struct vmap_block *vb; unsigned long addr = 0; unsigned int order; - int purge = 0, cpu; + int purge = 0; BUG_ON(size & ~PAGE_MASK); BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); @@ -917,8 +916,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) again: rcu_read_lock(); - cpu = get_cpu_light(); - vbq = &__get_cpu_var(vmap_block_queue); + vbq = &get_cpu_var(vmap_block_queue); list_for_each_entry_rcu(vb, &vbq->free, free_list) { int i; @@ -955,7 +953,7 @@ next: if (purge) purge_fragmented_blocks_thiscpu(); - put_cpu_light(); + put_cpu_var(vmap_block_queue); rcu_read_unlock(); if (!addr) { diff --git a/mm/vmstat.c b/mm/vmstat.c index a4dbd77..9800306 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -216,7 +216,6 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, long x; long t; - preempt_disable_rt(); x = delta + __this_cpu_read(*p); t = __this_cpu_read(pcp->stat_threshold); @@ -226,7 +225,6 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, x = 0; } __this_cpu_write(*p, x); - preempt_enable_rt(); } EXPORT_SYMBOL(__mod_zone_page_state); @@ -259,7 +257,6 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item) s8 __percpu *p = pcp->vm_stat_diff + item; s8 v, t; - preempt_disable_rt(); v = __this_cpu_inc_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v > t)) { @@ -268,7 +265,6 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item) zone_page_state_add(v + overstep, zone, item); __this_cpu_write(*p, -overstep); } - preempt_enable_rt(); } void __inc_zone_page_state(struct page *page, enum zone_stat_item item) @@ -283,7 +279,6 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item) s8 __percpu *p = pcp->vm_stat_diff + item; s8 v, t; - preempt_disable_rt(); v = __this_cpu_dec_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v < - t)) { @@ -292,7 +287,6 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item) zone_page_state_add(v - overstep, zone, item); __this_cpu_write(*p, overstep); } - preempt_enable_rt(); } void __dec_zone_page_state(struct page *page, enum zone_stat_item item) diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index acc74ad..a292e80 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -86,6 +86,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) grp = &vlan_info->grp; + /* Take it out of our own structures, but be sure to interlock with + * HW accelerating devices or SW vlan input packet processing if + * VLAN is not 0 (leave it there for 802.1p). + */ + if (vlan_id) + vlan_vid_del(real_dev, vlan_id); + grp->nr_vlan_devs--; if (vlan->flags & VLAN_FLAG_GVRP) @@ -101,13 +108,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) if (grp->nr_vlan_devs == 0) vlan_gvrp_uninit_applicant(real_dev); - /* Take it out of our own structures, but be sure to interlock with - * HW accelerating devices or SW vlan input packet processing if - * VLAN is not 0 (leave it there for 802.1p). - */ - if (vlan_id) - vlan_vid_del(real_dev, vlan_id); - /* Get rid of the vlan's reference to real_dev */ dev_put(real_dev); } diff --git a/net/atm/common.c b/net/atm/common.c index cf4b7e6..806fc0a 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -532,8 +532,6 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, struct sk_buff *skb; int copied, error = -EINVAL; - msg->msg_namelen = 0; - if (sock->state != SS_CONNECTED) return -ENOTCONN; diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index d53a123..779095d 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1647,7 +1647,6 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock, ax25_address src; const unsigned char *mac = skb_mac_header(skb); - memset(sax, 0, sizeof(struct full_sockaddr_ax25)); ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL, &digi, NULL, NULL); sax->sax25_family = AF_AX25; diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 1ee94d0..7d02ebd 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -1298,8 +1298,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb, batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff; /* unpack the aggregated packets and process them one by one */ - while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len, - batadv_ogm_packet->tt_num_changes)) { + do { tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN; batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff, @@ -1310,7 +1309,8 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb, packet_pos = packet_buff + buff_pos; batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos; - } + } while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len, + batadv_ogm_packet->tt_num_changes)); kfree_skb(skb); return NET_RX_SUCCESS; diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index b04795e..5355df6 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -230,8 +230,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, if (flags & (MSG_OOB)) return -EOPNOTSUPP; - msg->msg_namelen = 0; - skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) @@ -239,6 +237,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, return err; } + msg->msg_namelen = 0; + copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 970fc13..ce3f665 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -610,7 +610,6 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock, if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { rfcomm_dlc_accept(d); - msg->msg_namelen = 0; return 0; } diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index cc16d1b..57f250c 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -361,7 +361,6 @@ static void __sco_sock_close(struct sock *sk) sco_chan_del(sk, ECONNRESET); break; - case BT_CONNECT2: case BT_CONNECT: case BT_DISCONN: sco_chan_del(sk, ECONNRESET); @@ -667,7 +666,6 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { hci_conn_accept(pi->conn->hcon, 0); sk->sk_state = BT_CONFIG; - msg->msg_namelen = 0; release_sock(sk); return 0; diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 2897e40..acc9f4c 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -82,7 +82,6 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, port = p->port; if (port) { struct br_mdb_entry e; - memset(&e, 0, sizeof(e)); e.ifindex = port->dev->ifindex; e.state = p->state; if (p->addr.proto == htons(ETH_P_IP)) @@ -139,7 +138,6 @@ static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) break; bpm = nlmsg_data(nlh); - memset(bpm, 0, sizeof(*bpm)); bpm->ifindex = dev->ifindex; if (br_mdb_fill_info(skb, cb, dev) < 0) goto out; @@ -175,7 +173,6 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb, return -EMSGSIZE; bpm = nlmsg_data(nlh); - memset(bpm, 0, sizeof(*bpm)); bpm->family = AF_BRIDGE; bpm->ifindex = dev->ifindex; nest = nla_nest_start(skb, MDBA_MDB); @@ -233,7 +230,6 @@ void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, { struct br_mdb_entry entry; - memset(&entry, 0, sizeof(entry)); entry.ifindex = port->dev->ifindex; entry.addr.proto = group->proto; entry.addr.u.ip4 = group->u.ip4; diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 580e176..5dc66ab 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -28,7 +28,6 @@ static inline size_t br_port_info_size(void) + nla_total_size(1) /* IFLA_BRPORT_MODE */ + nla_total_size(1) /* IFLA_BRPORT_GUARD */ + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ - + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ + 0; } diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index ff2ff3c..095259f 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -286,8 +286,6 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; - m->msg_namelen = 0; - skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error; diff --git a/net/can/gw.c b/net/can/gw.c index 28e7bdc..574dda78e 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -436,7 +436,7 @@ static int cgw_notifier(struct notifier_block *nb, if (gwj->src.dev == dev || gwj->dst.dev == dev) { hlist_del(&gwj->list); cgw_unregister_filter(gwj); - kmem_cache_free(cgw_cache, gwj); + kfree(gwj); } } } @@ -829,7 +829,7 @@ static void cgw_remove_all_jobs(void) hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) { hlist_del(&gwj->list); cgw_unregister_filter(gwj); - kmem_cache_free(cgw_cache, gwj); + kfree(gwj); } } @@ -885,7 +885,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) hlist_del(&gwj->list); cgw_unregister_filter(gwj); - kmem_cache_free(cgw_cache, gwj); + kfree(gwj); err = 0; break; } diff --git a/net/core/dev.c b/net/core/dev.c index 2593a482..f64e439b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -203,7 +203,7 @@ static struct list_head offload_base __read_mostly; DEFINE_RWLOCK(dev_base_lock); EXPORT_SYMBOL(dev_base_lock); -DEFINE_MUTEX(devnet_rename_mutex); +seqcount_t devnet_rename_seq; static inline void dev_base_seq_inc(struct net *net) { @@ -225,14 +225,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) static inline void rps_lock(struct softnet_data *sd) { #ifdef CONFIG_RPS - raw_spin_lock(&sd->input_pkt_queue.raw_lock); + spin_lock(&sd->input_pkt_queue.lock); #endif } static inline void rps_unlock(struct softnet_data *sd) { #ifdef CONFIG_RPS - raw_spin_unlock(&sd->input_pkt_queue.raw_lock); + spin_unlock(&sd->input_pkt_queue.lock); #endif } @@ -1093,11 +1093,10 @@ int dev_change_name(struct net_device *dev, const char *newname) if (dev->flags & IFF_UP) return -EBUSY; - - mutex_lock(&devnet_rename_mutex); + write_seqcount_begin(&devnet_rename_seq); if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { - mutex_unlock(&devnet_rename_mutex); + write_seqcount_end(&devnet_rename_seq); return 0; } @@ -1105,7 +1104,7 @@ int dev_change_name(struct net_device *dev, const char *newname) err = dev_get_valid_name(net, dev, newname); if (err < 0) { - mutex_unlock(&devnet_rename_mutex); + write_seqcount_end(&devnet_rename_seq); return err; } @@ -1113,11 +1112,11 @@ rollback: ret = device_rename(&dev->dev, dev->name); if (ret) { memcpy(dev->name, oldname, IFNAMSIZ); - mutex_unlock(&devnet_rename_mutex); + write_seqcount_end(&devnet_rename_seq); return ret; } - mutex_unlock(&devnet_rename_mutex); + write_seqcount_end(&devnet_rename_seq); write_lock_bh(&dev_base_lock); hlist_del_rcu(&dev->name_hlist); @@ -1136,7 +1135,7 @@ rollback: /* err >= 0 after dev_alloc_name() or stores the first errno */ if (err >= 0) { err = ret; - mutex_lock(&devnet_rename_mutex); + write_seqcount_begin(&devnet_rename_seq); memcpy(dev->name, oldname, IFNAMSIZ); goto rollback; } else { @@ -1592,6 +1591,7 @@ void net_enable_timestamp(void) return; } #endif + WARN_ON(in_interrupt()); static_key_slow_inc(&netstamp_needed); } EXPORT_SYMBOL(net_enable_timestamp); @@ -1738,7 +1738,6 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) skb->mark = 0; secpath_reset(skb); nf_reset(skb); - nf_reset_trace(skb); return netif_rx(skb); } EXPORT_SYMBOL_GPL(dev_forward_skb); @@ -1947,7 +1946,6 @@ static inline void __netif_reschedule(struct Qdisc *q) sd->output_queue_tailp = &q->next_sched; raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); - preempt_check_resched_rt(); } void __netif_schedule(struct Qdisc *q) @@ -1969,7 +1967,6 @@ void dev_kfree_skb_irq(struct sk_buff *skb) sd->completion_queue = skb; raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); - preempt_check_resched_rt(); } } EXPORT_SYMBOL(dev_kfree_skb_irq); @@ -2021,9 +2018,6 @@ static void skb_warn_bad_offload(const struct sk_buff *skb) struct net_device *dev = skb->dev; const char *driver = ""; - if (!net_ratelimit()) - return; - if (dev && dev->dev.parent) driver = dev_driver_string(dev->dev.parent); @@ -3058,7 +3052,6 @@ enqueue: rps_unlock(sd); local_irq_restore(flags); - preempt_check_resched_rt(); atomic_long_inc(&skb->dev->rx_dropped); kfree_skb(skb); @@ -3096,7 +3089,7 @@ int netif_rx(struct sk_buff *skb) struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu; - migrate_disable(); + preempt_disable(); rcu_read_lock(); cpu = get_rps_cpu(skb->dev, skb, &rflow); @@ -3106,13 +3099,13 @@ int netif_rx(struct sk_buff *skb) ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); rcu_read_unlock(); - migrate_enable(); + preempt_enable(); } else #endif { unsigned int qtail; - ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail); - put_cpu_light(); + ret = enqueue_to_backlog(skb, get_cpu(), &qtail); + put_cpu(); } return ret; } @@ -3122,44 +3115,16 @@ int netif_rx_ni(struct sk_buff *skb) { int err; - local_bh_disable(); + preempt_disable(); err = netif_rx(skb); - local_bh_enable(); + if (local_softirq_pending()) + do_softirq(); + preempt_enable(); return err; } EXPORT_SYMBOL(netif_rx_ni); -#ifdef CONFIG_PREEMPT_RT_FULL -/* - * RT runs ksoftirqd as a real time thread and the root_lock is a - * "sleeping spinlock". If the trylock fails then we can go into an - * infinite loop when ksoftirqd preempted the task which actually - * holds the lock, because we requeue q and raise NET_TX softirq - * causing ksoftirqd to loop forever. - * - * It's safe to use spin_lock on RT here as softirqs run in thread - * context and cannot deadlock against the thread which is holding - * root_lock. - * - * On !RT the trylock might fail, but there we bail out from the - * softirq loop after 10 attempts which we can't do on RT. And the - * task holding root_lock cannot be preempted, so the only downside of - * that trylock is that we need 10 loops to decide that we should have - * given up in the first one :) - */ -static inline int take_root_lock(spinlock_t *lock) -{ - spin_lock(lock); - return 1; -} -#else -static inline int take_root_lock(spinlock_t *lock) -{ - return spin_trylock(lock); -} -#endif - static void net_tx_action(struct softirq_action *h) { struct softnet_data *sd = &__get_cpu_var(softnet_data); @@ -3198,7 +3163,7 @@ static void net_tx_action(struct softirq_action *h) head = head->next_sched; root_lock = qdisc_lock(q); - if (take_root_lock(root_lock)) { + if (spin_trylock(root_lock)) { smp_mb__before_clear_bit(); clear_bit(__QDISC_STATE_SCHED, &q->state); @@ -3312,7 +3277,6 @@ int netdev_rx_handler_register(struct net_device *dev, if (dev->rx_handler) return -EBUSY; - /* Note: rx_handler_data must be set before rx_handler */ rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); rcu_assign_pointer(dev->rx_handler, rx_handler); @@ -3333,11 +3297,6 @@ void netdev_rx_handler_unregister(struct net_device *dev) ASSERT_RTNL(); RCU_INIT_POINTER(dev->rx_handler, NULL); - /* a reader seeing a non NULL rx_handler in a rcu_read_lock() - * section has a guarantee to see a non NULL rx_handler_data - * as well. - */ - synchronize_net(); RCU_INIT_POINTER(dev->rx_handler_data, NULL); } EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); @@ -3460,7 +3419,6 @@ ncls: } switch (rx_handler(&skb)) { case RX_HANDLER_CONSUMED: - ret = NET_RX_SUCCESS; goto unlock; case RX_HANDLER_ANOTHER: goto another_round; @@ -3569,7 +3527,7 @@ static void flush_backlog(void *arg) skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { if (skb->dev == dev) { __skb_unlink(skb, &sd->input_pkt_queue); - __skb_queue_tail(&sd->tofree_queue, skb); + kfree_skb(skb); input_queue_head_incr(sd); } } @@ -3578,13 +3536,10 @@ static void flush_backlog(void *arg) skb_queue_walk_safe(&sd->process_queue, skb, tmp) { if (skb->dev == dev) { __skb_unlink(skb, &sd->process_queue); - __skb_queue_tail(&sd->tofree_queue, skb); + kfree_skb(skb); input_queue_head_incr(sd); } } - - if (!skb_queue_empty(&sd->tofree_queue)) - raise_softirq_irqoff(NET_RX_SOFTIRQ); } static int napi_gro_complete(struct sk_buff *skb) @@ -3943,7 +3898,6 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd) } else #endif local_irq_enable(); - preempt_check_resched_rt(); } static int process_backlog(struct napi_struct *napi, int quota) @@ -4016,7 +3970,6 @@ void __napi_schedule(struct napi_struct *n) local_irq_save(flags); ____napi_schedule(&__get_cpu_var(softnet_data), n); local_irq_restore(flags); - preempt_check_resched_rt(); } EXPORT_SYMBOL(__napi_schedule); @@ -4091,17 +4044,10 @@ static void net_rx_action(struct softirq_action *h) struct softnet_data *sd = &__get_cpu_var(softnet_data); unsigned long time_limit = jiffies + 2; int budget = netdev_budget; - struct sk_buff *skb; void *have; local_irq_disable(); - while ((skb = __skb_dequeue(&sd->tofree_queue))) { - local_irq_enable(); - kfree_skb(skb); - local_irq_disable(); - } - while (!list_empty(&sd->poll_list)) { struct napi_struct *n; int work, weight; @@ -4224,6 +4170,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg) { struct net_device *dev; struct ifreq ifr; + unsigned seq; /* * Fetch the caller's info block. @@ -4232,18 +4179,19 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg) if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) return -EFAULT; - mutex_lock(&devnet_rename_mutex); +retry: + seq = read_seqcount_begin(&devnet_rename_seq); rcu_read_lock(); dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex); if (!dev) { rcu_read_unlock(); - mutex_unlock(&devnet_rename_mutex); return -ENODEV; } strcpy(ifr.ifr_name, dev->name); rcu_read_unlock(); - mutex_unlock(&devnet_rename_mutex); + if (read_seqcount_retry(&devnet_rename_seq, seq)) + goto retry; if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) return -EFAULT; @@ -6571,7 +6519,6 @@ static int dev_cpu_callback(struct notifier_block *nfb, raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_enable(); - preempt_check_resched_rt(); /* Process offline CPU's input_pkt_queue */ while ((skb = __skb_dequeue(&oldsd->process_queue))) { @@ -6582,9 +6529,6 @@ static int dev_cpu_callback(struct notifier_block *nfb, netif_rx(skb); input_queue_head_incr(oldsd); } - while ((skb = __skb_dequeue(&oldsd->tofree_queue))) { - kfree_skb(skb); - } return NOTIFY_OK; } @@ -6857,9 +6801,8 @@ static int __init net_dev_init(void) struct softnet_data *sd = &per_cpu(softnet_data, i); memset(sd, 0, sizeof(*sd)); - skb_queue_head_init_raw(&sd->input_pkt_queue); - skb_queue_head_init_raw(&sd->process_queue); - skb_queue_head_init_raw(&sd->tofree_queue); + skb_queue_head_init(&sd->input_pkt_queue); + skb_queue_head_init(&sd->process_queue); sd->completion_queue = NULL; INIT_LIST_HEAD(&sd->poll_list); sd->output_queue = NULL; diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index 7841d87..b079c7b 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -38,7 +38,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list, ha->type = addr_type; ha->refcount = 1; ha->global_use = global; - ha->synced = 0; + ha->synced = false; list_add_tail_rcu(&ha->list, &list->list); list->count++; @@ -166,7 +166,7 @@ int __hw_addr_sync(struct netdev_hw_addr_list *to_list, addr_len, ha->type); if (err) break; - ha->synced++; + ha->synced = true; ha->refcount++; } else if (ha->refcount == 1) { __hw_addr_del(to_list, ha->addr, addr_len, ha->type); @@ -187,7 +187,7 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, if (ha->synced) { __hw_addr_del(to_list, ha->addr, addr_len, ha->type); - ha->synced--; + ha->synced = false; __hw_addr_del(from_list, ha->addr, addr_len, ha->type); } diff --git a/net/core/dst.c b/net/core/dst.c index 35fd12f..ee6153e 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -179,7 +179,6 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev, dst_init_metrics(dst, dst_default_metrics, true); dst->expires = 0UL; dst->path = dst; - dst->from = NULL; #ifdef CONFIG_XFRM dst->xfrm = NULL; #endif diff --git a/net/core/flow.c b/net/core/flow.c index 3bad824..b0901ee 100644 --- a/net/core/flow.c +++ b/net/core/flow.c @@ -329,7 +329,7 @@ static void flow_cache_flush_per_cpu(void *data) struct flow_flush_info *info = data; struct tasklet_struct *tasklet; - tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet; + tasklet = this_cpu_ptr(&info->cache->percpu->flush_tasklet); tasklet->data = (unsigned long)info; tasklet_schedule(tasklet); } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 055fb13..1868625 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -976,7 +976,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, * report anything. */ ivi.spoofchk = -1; - memset(ivi.mac, 0, sizeof(ivi.mac)); if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) break; vf_mac.vf = @@ -1068,7 +1067,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) rcu_read_lock(); cb->seq = net->dev_base_seq; - if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, + if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, ifla_policy) >= 0) { if (tb[IFLA_EXT_MASK]) @@ -1924,7 +1923,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) u32 ext_filter_mask = 0; u16 min_ifinfo_dump_size = 0; - if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, + if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, ifla_policy) >= 0) { if (tb[IFLA_EXT_MASK]) ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); @@ -2539,7 +2538,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len); while (RTA_OK(attr, attrlen)) { - unsigned int flavor = attr->rta_type & NLA_TYPE_MASK; + unsigned int flavor = attr->rta_type; if (flavor) { if (flavor > rta_max[sz_idx]) return -EINVAL; diff --git a/net/core/scm.c b/net/core/scm.c index 2dc6cda..905dcc6 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -53,8 +52,7 @@ static __inline__ int scm_check_creds(struct ucred *creds) if (!uid_valid(uid) || !gid_valid(gid)) return -EINVAL; - if ((creds->pid == task_tgid_vnr(current) || - ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) && + if ((creds->pid == task_tgid_vnr(current) || nsown_capable(CAP_SYS_ADMIN)) && ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 39b45c0..32443eb 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -60,7 +60,6 @@ #include #include #include -#include #include #include @@ -348,7 +347,6 @@ struct netdev_alloc_cache { unsigned int pagecnt_bias; }; static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); -static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock); #define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768) #define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER) @@ -361,7 +359,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) int order; unsigned long flags; - local_lock_irqsave(netdev_alloc_lock, flags); + local_irq_save(flags); nc = &__get_cpu_var(netdev_alloc_cache); if (unlikely(!nc->frag.page)) { refill: @@ -395,7 +393,7 @@ recycle: nc->frag.offset += fragsz; nc->pagecnt_bias--; end: - local_unlock_irqrestore(netdev_alloc_lock, flags); + local_irq_restore(flags); return data; } diff --git a/net/core/sock.c b/net/core/sock.c index 2754c99..bc131d4 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -571,6 +571,7 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval, struct net *net = sock_net(sk); struct net_device *dev; char devname[IFNAMSIZ]; + unsigned seq; if (sk->sk_bound_dev_if == 0) { len = 0; @@ -581,19 +582,20 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval, if (len < IFNAMSIZ) goto out; - mutex_lock(&devnet_rename_mutex); +retry: + seq = read_seqcount_begin(&devnet_rename_seq); rcu_read_lock(); dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if); ret = -ENODEV; if (!dev) { rcu_read_unlock(); - mutex_unlock(&devnet_rename_mutex); goto out; } strcpy(devname, dev->name); rcu_read_unlock(); - mutex_unlock(&devnet_rename_mutex); + if (read_seqcount_retry(&devnet_rename_seq, seq)) + goto retry; len = strlen(devname) + 1; @@ -2285,11 +2287,12 @@ void lock_sock_nested(struct sock *sk, int subclass) if (sk->sk_lock.owned) __lock_sock(sk); sk->sk_lock.owned = 1; - spin_unlock_bh(&sk->sk_lock.slock); + spin_unlock(&sk->sk_lock.slock); /* * The sk_lock has mutex_lock() semantics here: */ mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); + local_bh_enable(); } EXPORT_SYMBOL(lock_sock_nested); diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c index 750f44f..602cd63 100644 --- a/net/core/sock_diag.c +++ b/net/core/sock_diag.c @@ -121,9 +121,6 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if (nlmsg_len(nlh) < sizeof(*req)) return -EINVAL; - if (req->sdiag_family >= AF_MAX) - return -EINVAL; - hndl = sock_diag_lock_handler(req->sdiag_family); if (hndl == NULL) err = -ENOENT; diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 21291f1..1b588e2 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -284,7 +284,6 @@ static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh, if (!netdev->dcbnl_ops->getpermhwaddr) return -EOPNOTSUPP; - memset(perm_addr, 0, sizeof(perm_addr)); netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr); @@ -1043,7 +1042,6 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->ieee_getets) { struct ieee_ets ets; - memset(&ets, 0, sizeof(ets)); err = ops->ieee_getets(netdev, &ets); if (!err && nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) @@ -1052,7 +1050,6 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->ieee_getmaxrate) { struct ieee_maxrate maxrate; - memset(&maxrate, 0, sizeof(maxrate)); err = ops->ieee_getmaxrate(netdev, &maxrate); if (!err) { err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, @@ -1064,7 +1061,6 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->ieee_getpfc) { struct ieee_pfc pfc; - memset(&pfc, 0, sizeof(pfc)); err = ops->ieee_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) @@ -1098,7 +1094,6 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) /* get peer info if available */ if (ops->ieee_peer_getets) { struct ieee_ets ets; - memset(&ets, 0, sizeof(ets)); err = ops->ieee_peer_getets(netdev, &ets); if (!err && nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) @@ -1107,7 +1102,6 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->ieee_peer_getpfc) { struct ieee_pfc pfc; - memset(&pfc, 0, sizeof(pfc)); err = ops->ieee_peer_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) @@ -1286,7 +1280,6 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) /* peer info if available */ if (ops->cee_peer_getpg) { struct cee_pg pg; - memset(&pg, 0, sizeof(pg)); err = ops->cee_peer_getpg(netdev, &pg); if (!err && nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg)) @@ -1295,7 +1288,6 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) if (ops->cee_peer_getpfc) { struct cee_pfc pfc; - memset(&pfc, 0, sizeof(pfc)); err = ops->cee_peer_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc)) diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c index 76c3d0a..f651da6 100644 --- a/net/ieee802154/6lowpan.c +++ b/net/ieee802154/6lowpan.c @@ -1234,7 +1234,7 @@ static inline int __init lowpan_netlink_init(void) return rtnl_link_register(&lowpan_link_ops); } -static inline void lowpan_netlink_fini(void) +static inline void __init lowpan_netlink_fini(void) { rtnl_link_unregister(&lowpan_link_ops); } diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h index bba5f83..8c2251f 100644 --- a/net/ieee802154/6lowpan.h +++ b/net/ieee802154/6lowpan.h @@ -84,7 +84,7 @@ (memcmp(addr1, addr2, length >> 3) == 0) /* local link, i.e. FE80::/10 */ -#define is_addr_link_local(a) (((a)->s6_addr16[0]) == htons(0xFE80)) +#define is_addr_link_local(a) (((a)->s6_addr16[0]) == 0x80FE) /* * check whether we can compress the IID to 16 bits, diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index fcf104e..24b384b 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -248,12 +248,8 @@ EXPORT_SYMBOL(inet_listen); u32 inet_ehash_secret __read_mostly; EXPORT_SYMBOL(inet_ehash_secret); -u32 ipv6_hash_secret __read_mostly; -EXPORT_SYMBOL(ipv6_hash_secret); - /* - * inet_ehash_secret must be set exactly once, and to a non nul value - * ipv6_hash_secret must be set exactly once. + * inet_ehash_secret must be set exactly once */ void build_ehash_secret(void) { @@ -263,8 +259,7 @@ void build_ehash_secret(void) get_random_bytes(&rnd, sizeof(rnd)); } while (rnd == 0); - if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0) - get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret)); + cmpxchg(&inet_ehash_secret, 0, rnd); } EXPORT_SYMBOL(build_ehash_secret); @@ -1595,7 +1590,7 @@ static const struct net_offload udp_offload = { static const struct net_protocol icmp_protocol = { .handler = icmp_rcv, - .err_handler = icmp_err, + .err_handler = ping_err, .no_policy = 1, .netns_ok = 1, }; diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 4cfe34d..3b4f0cd 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -139,6 +139,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) /* skb is pure payload to encrypt */ + err = -ENOMEM; + esp = x->data; aead = esp->aead; alen = crypto_aead_authsize(aead); @@ -174,10 +176,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) } tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); - if (!tmp) { - err = -ENOMEM; + if (!tmp) goto error; - } seqhi = esp_tmp_seqhi(tmp); iv = esp_tmp_iv(aead, tmp, seqhilen); diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index d8bbe94..17ff9fd 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -69,7 +69,6 @@ #include #include #include -#include #include #include #include @@ -769,30 +768,6 @@ static void icmp_redirect(struct sk_buff *skb) } /* - * 32bit and 64bit have different timestamp length, so we check for - * the cookie at offset 20 and verify it is repeated at offset 50 - */ -#define CO_POS0 20 -#define CO_POS1 50 -#define CO_SIZE sizeof(int) -#define ICMP_SYSRQ_SIZE 57 - -/* - * We got a ICMP_SYSRQ_SIZE sized ping request. Check for the cookie - * pattern and if it matches send the next byte as a trigger to sysrq. - */ -static void icmp_check_sysrq(struct net *net, struct sk_buff *skb) -{ - int cookie = htonl(net->ipv4.sysctl_icmp_echo_sysrq); - char *p = skb->data; - - if (!memcmp(&cookie, p + CO_POS0, CO_SIZE) && - !memcmp(&cookie, p + CO_POS1, CO_SIZE) && - p[CO_POS0 + CO_SIZE] == p[CO_POS1 + CO_SIZE]) - handle_sysrq(p[CO_POS0 + CO_SIZE]); -} - -/* * Handle ICMP_ECHO ("ping") requests. * * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo @@ -819,11 +794,6 @@ static void icmp_echo(struct sk_buff *skb) icmp_param.data_len = skb->len; icmp_param.head_len = sizeof(struct icmphdr); icmp_reply(&icmp_param, skb); - - if (skb->len == ICMP_SYSRQ_SIZE && - net->ipv4.sysctl_icmp_echo_sysrq) { - icmp_check_sysrq(net, skb); - } } } @@ -964,29 +934,6 @@ error: goto drop; } -void icmp_err(struct sk_buff *skb, u32 info) -{ - struct iphdr *iph = (struct iphdr *)skb->data; - struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2)); - int type = icmp_hdr(skb)->type; - int code = icmp_hdr(skb)->code; - struct net *net = dev_net(skb->dev); - - /* - * Use ping_err to handle all icmp errors except those - * triggered by ICMP_ECHOREPLY which sent from kernel. - */ - if (icmph->type != ICMP_ECHOREPLY) { - ping_err(skb, info); - return; - } - - if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) - ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ICMP, 0); - else if (type == ICMP_REDIRECT) - ipv4_redirect(skb, net, 0, 0, IPPROTO_ICMP, 0); -} - /* * This table is the definition of how we handle ICMP. */ diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 03f5af7..4750d2b 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -21,7 +21,6 @@ #include #include -#include #include static void inet_frag_secret_rebuild(unsigned long dummy) @@ -277,7 +276,6 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, { struct inet_frag_queue *q; struct hlist_node *n; - int depth = 0; hlist_for_each_entry(q, n, &f->hash[hash], list) { if (q->net == nf && f->match(q, key)) { @@ -285,25 +283,9 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, read_unlock(&f->lock); return q; } - depth++; } read_unlock(&f->lock); - if (depth <= INETFRAGS_MAXDEPTH) - return inet_frag_create(nf, f, key); - else - return ERR_PTR(-ENOBUFS); + return inet_frag_create(nf, f, key); } EXPORT_SYMBOL(inet_frag_find); - -void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, - const char *prefix) -{ - static const char msg[] = "inet_frag_find: Fragment hash bucket" - " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH) - ". Dropping fragment.\n"; - - if (PTR_ERR(q) == -ENOBUFS) - LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg); -} -EXPORT_SYMBOL(inet_frag_maybe_warn_overflow); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 0fcfee3..eb9d63a 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -255,7 +255,8 @@ static void ip_expire(unsigned long arg) if (!head->dev) goto out_rcu_unlock; - /* skb has no dst, perform route lookup again */ + /* skb dst is stale, drop it, and perform route lookup again */ + skb_dst_drop(head); iph = ip_hdr(head); err = ip_route_input_noref(head, iph->daddr, iph->saddr, iph->tos, head->dev); @@ -298,11 +299,14 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); - if (IS_ERR_OR_NULL(q)) { - inet_frag_maybe_warn_overflow(q, pr_fmt()); - return NULL; - } + if (q == NULL) + goto out_nomem; + return container_of(q, struct ipq, q); + +out_nomem: + LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n")); + return NULL; } /* Is the fragment too far ahead to be part of ipq? */ @@ -524,16 +528,8 @@ found: qp->q.max_size = skb->len + ihl; if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && - qp->q.meat == qp->q.len) { - unsigned long orefdst = skb->_skb_refdst; - - skb->_skb_refdst = 0UL; - err = ip_frag_reasm(qp, prev, dev); - skb->_skb_refdst = orefdst; - return err; - } - - skb_dst_drop(skb); + qp->q.meat == qp->q.len) + return ip_frag_reasm(qp, prev, dev); write_lock(&ip4_frags.lock); list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index a85062b..e81b1ca 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -761,7 +761,10 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev if (dev->header_ops && dev->type == ARPHRD_IPGRE) { gre_hlen = 0; - tiph = (const struct iphdr *)skb->data; + if (skb->protocol == htons(ETH_P_IP)) + tiph = (const struct iphdr *)skb->data; + else + tiph = &tunnel->parms.iph; } else { gre_hlen = tunnel->hlen; tiph = &tunnel->parms.iph; diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 9100fe0..f6289bf 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -370,6 +370,7 @@ int ip_options_compile(struct net *net, } switch (optptr[3]&0xF) { case IPOPT_TS_TSONLY: + opt->ts = optptr - iph; if (skb) timeptr = &optptr[optptr[2]-1]; opt->ts_needtime = 1; @@ -380,6 +381,7 @@ int ip_options_compile(struct net *net, pp_ptr = optptr + 2; goto error; } + opt->ts = optptr - iph; if (rt) { spec_dst_fill(&spec_dst, skb); memcpy(&optptr[optptr[2]-1], &spec_dst, 4); @@ -394,6 +396,7 @@ int ip_options_compile(struct net *net, pp_ptr = optptr + 2; goto error; } + opt->ts = optptr - iph; { __be32 addr; memcpy(&addr, &optptr[optptr[2]-1], 4); @@ -426,12 +429,12 @@ int ip_options_compile(struct net *net, pp_ptr = optptr + 3; goto error; } + opt->ts = optptr - iph; if (skb) { optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4); opt->is_changed = 1; } } - opt->ts = optptr - iph; break; case IPOPT_RA: if (optlen < 4) { diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 253692b..3e98ed2 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1508,8 +1508,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, if (IS_ERR(rt)) return; - get_cpu_light(); - inet = &__get_cpu_var(unicast_sock); + inet = &get_cpu_var(unicast_sock); inet->tos = arg->tos; sk = &inet->sk; @@ -1533,7 +1532,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, ip_push_pending_frames(sk, &fl4); } - put_cpu_light(); + put_cpu_var(unicast_sock); ip_rt_put(rt); } diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c index c49dcd0..c301300 100644 --- a/net/ipv4/netfilter/ipt_rpfilter.c +++ b/net/ipv4/netfilter/ipt_rpfilter.c @@ -66,12 +66,6 @@ static bool rpfilter_lookup_reverse(struct flowi4 *fl4, return dev_match; } -static bool rpfilter_is_local(const struct sk_buff *skb) -{ - const struct rtable *rt = skb_rtable(skb); - return rt && (rt->rt_flags & RTCF_LOCAL); -} - static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_rpfilter_info *info; @@ -82,7 +76,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) info = par->matchinfo; invert = info->flags & XT_RPFILTER_INVERT; - if (rpfilter_is_local(skb)) + if (par->in->flags & IFF_LOOPBACK) return true ^ invert; iph = ip_hdr(skb); diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index dc454cc..6f9c072 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -322,8 +322,8 @@ void ping_err(struct sk_buff *skb, u32 info) struct iphdr *iph = (struct iphdr *)skb->data; struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2)); struct inet_sock *inet_sock; - int type = icmp_hdr(skb)->type; - int code = icmp_hdr(skb)->code; + int type = icmph->type; + int code = icmph->code; struct net *net = dev_net(skb->dev); struct sock *sk; int harderr; diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index f962f19..b236ef0 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -348,8 +348,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, * hasn't changed since we received the original syn, but I see * no easy way to do this. */ - flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark, - RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP, + flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk), + RT_SCOPE_UNIVERSE, IPPROTO_TCP, inet_sk_flowi_flags(sk), (opt && opt->srr) ? opt->faddr : ireq->rmt_addr, ireq->loc_addr, th->source, th->dest); diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 44bf3b0..d84400b 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -815,13 +815,6 @@ static struct ctl_table ipv4_net_table[] = { .proc_handler = proc_dointvec }, { - .procname = "icmp_echo_sysrq", - .data = &init_net.ipv4.sysctl_icmp_echo_sysrq, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec - }, - { .procname = "icmp_ignore_bogus_error_responses", .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses, .maxlen = sizeof(int), diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 45b63ca..2aa69c8 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -773,7 +773,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) * Make sure that we have exactly size bytes * available to the caller, no more, no less. */ - skb->reserved_tailroom = skb->end - skb->tail - size; + skb->avail_size = size; return skb; } __kfree_skb(skb); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index b4e8b79..ad70a96 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -116,7 +116,6 @@ int sysctl_tcp_early_retrans __read_mostly = 2; #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ -#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) @@ -2065,8 +2064,11 @@ void tcp_enter_loss(struct sock *sk, int how) if (tcp_is_reno(tp)) tcp_reset_reno_sack(tp); - tp->undo_marker = tp->snd_una; - if (how) { + if (!how) { + /* Push undo marker, if it was plain RTO and nothing + * was retransmitted. */ + tp->undo_marker = tp->snd_una; + } else { tp->sacked_out = 0; tp->fackets_out = 0; } @@ -3573,27 +3575,6 @@ static void tcp_send_challenge_ack(struct sock *sk) } } -static void tcp_store_ts_recent(struct tcp_sock *tp) -{ - tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; - tp->rx_opt.ts_recent_stamp = get_seconds(); -} - -static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) -{ - if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { - /* PAWS bug workaround wrt. ACK frames, the PAWS discard - * extra check below makes sure this can only happen - * for pure ACK frames. -DaveM - * - * Not only, also it occurs for expired timestamps. - */ - - if (tcp_paws_check(&tp->rx_opt, 0)) - tcp_store_ts_recent(tp); - } -} - /* This routine deals with incoming acks, but not outgoing ones. */ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) { @@ -3646,12 +3627,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) prior_fackets = tp->fackets_out; prior_in_flight = tcp_packets_in_flight(tp); - /* ts_recent update must be made after we are sure that the packet - * is in window. - */ - if (flag & FLAG_UPDATE_TS_RECENT) - tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); - if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { /* Window is constant, pure forward advance. * No more checks are required. @@ -3968,6 +3943,27 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th) EXPORT_SYMBOL(tcp_parse_md5sig_option); #endif +static inline void tcp_store_ts_recent(struct tcp_sock *tp) +{ + tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; + tp->rx_opt.ts_recent_stamp = get_seconds(); +} + +static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) +{ + if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { + /* PAWS bug workaround wrt. ACK frames, the PAWS discard + * extra check below makes sure this can only happen + * for pure ACK frames. -DaveM + * + * Not only, also it occurs for expired timestamps. + */ + + if (tcp_paws_check(&tp->rx_opt, 0)) + tcp_store_ts_recent(tp); + } +} + /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM * * It is not fatal. If this ACK does _not_ change critical state (seqs, window) @@ -5502,9 +5498,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, if (tcp_checksum_complete_user(sk, skb)) goto csum_error; - if ((int)skb->truesize > sk->sk_forward_alloc) - goto step5; - /* Predicted packet is in window by definition. * seq == rcv_nxt and rcv_wup <= rcv_nxt. * Hence, check seq<=rcv_wup reduces to: @@ -5516,6 +5509,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, tcp_rcv_rtt_measure_ts(sk, skb); + if ((int)skb->truesize > sk->sk_forward_alloc) + goto step5; + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); /* Bulk data transfer: receiver */ @@ -5563,9 +5559,14 @@ slow_path: return 0; step5: - if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0) + if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) goto discard; + /* ts_recent update must be made after we are sure that the packet + * is in window. + */ + tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); + tcp_rcv_rtt_measure_ts(sk, skb); /* Process urgent data. */ @@ -5999,8 +6000,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, /* step 5: check the ACK field */ if (true) { - int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | - FLAG_UPDATE_TS_RECENT) > 0; + int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0; switch (sk->sk_state) { case TCP_SYN_RECV: @@ -6151,6 +6151,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, } } + /* ts_recent update must be made after we are sure that the packet + * is in window. + */ + tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); + /* step 6: check the URG bit */ tcp_urg(sk, skb, th); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index d9130a9..eadb693 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -274,6 +274,13 @@ static void tcp_v4_mtu_reduced(struct sock *sk) struct inet_sock *inet = inet_sk(sk); u32 mtu = tcp_sk(sk)->mtu_info; + /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs + * send out by Linux are always <576bytes so they should go through + * unfragmented). + */ + if (sk->sk_state == TCP_LISTEN) + return; + dst = inet_csk_update_pmtu(sk, mtu); if (!dst) return; @@ -401,13 +408,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) goto out; if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ - /* We are not interested in TCP_LISTEN and open_requests - * (SYN-ACKs send out by Linux are always <576bytes so - * they should go through unfragmented). - */ - if (sk->sk_state == TCP_LISTEN) - goto out; - tp->mtu_info = info; if (!sock_owned_by_user(sk)) { tcp_v4_mtu_reduced(sk); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index a9f50ee..5d45159 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1298,6 +1298,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len) eat = min_t(int, len, skb_headlen(skb)); if (eat) { __skb_pull(skb, eat); + skb->avail_size -= eat; len -= eat; if (!len) return; @@ -1350,8 +1351,8 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) return 0; } -/* Calculate MSS not accounting any TCP options. */ -static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) +/* Calculate MSS. Not accounting for SACKs here. */ +int tcp_mtu_to_mss(struct sock *sk, int pmtu) { const struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); @@ -1380,15 +1381,11 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) /* Then reserve room for full set of TCP options and 8 bytes of data */ if (mss_now < 48) mss_now = 48; - return mss_now; -} -/* Calculate MSS. Not accounting for SACKs here. */ -int tcp_mtu_to_mss(struct sock *sk, int pmtu) -{ - /* Subtract TCP options size, not including SACKs */ - return __tcp_mtu_to_mss(sk, pmtu) - - (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); + /* Now subtract TCP options size, not including SACKs */ + mss_now -= tp->tcp_header_len - sizeof(struct tcphdr); + + return mss_now; } /* Inverse of above */ @@ -1809,11 +1806,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) goto send_now; } - /* Ok, it looks like it is advisable to defer. - * Do not rearm the timer if already set to not break TCP ACK clocking. - */ - if (!tp->tso_deferred) - tp->tso_deferred = 1 | (jiffies << 1); + /* Ok, it looks like it is advisable to defer. */ + tp->tso_deferred = 1 | (jiffies << 1); return true; @@ -2388,12 +2382,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) */ TCP_SKB_CB(skb)->when = tcp_time_stamp; - /* make sure skb->data is aligned on arches that require it - * and check if ack-trimming & collapsing extended the headroom - * beyond what csum_start can cover. - */ - if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || - skb_headroom(skb) >= 0xFFFF)) { + /* make sure skb->data is aligned on arches that require it */ + if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) { struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : @@ -2940,7 +2930,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) */ if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp) tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; - space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - + space = tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - MAX_TCP_OPTION_SPACE; syn_data = skb_copy_expand(syn, skb_headroom(syn), space, diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index e8676c2..1b5d8cb 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2525,9 +2525,6 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) static void init_loopback(struct net_device *dev) { struct inet6_dev *idev; - struct net_device *sp_dev; - struct inet6_ifaddr *sp_ifa; - struct rt6_info *sp_rt; /* ::1 */ @@ -2539,30 +2536,6 @@ static void init_loopback(struct net_device *dev) } add_addr(idev, &in6addr_loopback, 128, IFA_HOST); - - /* Add routes to other interface's IPv6 addresses */ - for_each_netdev(dev_net(dev), sp_dev) { - if (!strcmp(sp_dev->name, dev->name)) - continue; - - idev = __in6_dev_get(sp_dev); - if (!idev) - continue; - - read_lock_bh(&idev->lock); - list_for_each_entry(sp_ifa, &idev->addr_list, if_list) { - - if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE)) - continue; - - sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0); - - /* Failure cases are ignored */ - if (!IS_ERR(sp_rt)) - ip6_ins_rt(sp_rt); - } - read_unlock_bh(&idev->lock); - } } static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr) @@ -4814,20 +4787,26 @@ static void addrconf_sysctl_unregister(struct inet6_dev *idev) static int __net_init addrconf_init_net(struct net *net) { - int err = -ENOMEM; + int err; struct ipv6_devconf *all, *dflt; - all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL); - if (all == NULL) - goto err_alloc_all; + err = -ENOMEM; + all = &ipv6_devconf; + dflt = &ipv6_devconf_dflt; - dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL); - if (dflt == NULL) - goto err_alloc_dflt; + if (!net_eq(net, &init_net)) { + all = kmemdup(all, sizeof(ipv6_devconf), GFP_KERNEL); + if (all == NULL) + goto err_alloc_all; - /* these will be inherited by all namespaces */ - dflt->autoconf = ipv6_defaults.autoconf; - dflt->disable_ipv6 = ipv6_defaults.disable_ipv6; + dflt = kmemdup(dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL); + if (dflt == NULL) + goto err_alloc_dflt; + } else { + /* these will be inherited by all namespaces */ + dflt->autoconf = ipv6_defaults.autoconf; + dflt->disable_ipv6 = ipv6_defaults.disable_ipv6; + } net->ipv6.devconf_all = all; net->ipv6.devconf_dflt = dflt; diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index dee9964..a52d864 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -118,27 +118,6 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt ipv6_addr_loopback(&hdr->daddr)) goto err; - /* RFC4291 Errata ID: 3480 - * Interface-Local scope spans only a single interface on a - * node and is useful only for loopback transmission of - * multicast. Packets with interface-local scope received - * from another node must be discarded. - */ - if (!(skb->pkt_type == PACKET_LOOPBACK || - dev->flags & IFF_LOOPBACK) && - ipv6_addr_is_multicast(&hdr->daddr) && - IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1) - goto err; - - /* RFC4291 2.7 - * Nodes must not originate a packet to a multicast address whose scope - * field contains the reserved value 0; if such a packet is received, it - * must be silently dropped. - */ - if (ipv6_addr_is_multicast(&hdr->daddr) && - IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 0) - goto err; - /* * RFC4291 2.7 * Multicast addresses must not be used as source addresses in IPv6 @@ -291,8 +270,7 @@ int ip6_mc_input(struct sk_buff *skb) * IPv6 multicast router mode is now supported ;) */ if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && - !(ipv6_addr_type(&hdr->daddr) & - (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) && + !(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) && likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { /* * Okay, we try to forward - split and duplicate diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c index 0ea43c7..83acc14 100644 --- a/net/ipv6/netfilter/ip6t_NPT.c +++ b/net/ipv6/netfilter/ip6t_NPT.c @@ -57,7 +57,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt, if (pfx_len - i >= 32) mask = 0; else - mask = htonl((1 << (i - pfx_len + 32)) - 1); + mask = htonl(~((1 << (pfx_len - i)) - 1)); idx = i / 32; addr->s6_addr32[idx] &= mask; diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c index e0983f3..5060d54 100644 --- a/net/ipv6/netfilter/ip6t_rpfilter.c +++ b/net/ipv6/netfilter/ip6t_rpfilter.c @@ -71,12 +71,6 @@ static bool rpfilter_lookup_reverse6(const struct sk_buff *skb, return ret; } -static bool rpfilter_is_local(const struct sk_buff *skb) -{ - const struct rt6_info *rt = (const void *) skb_dst(skb); - return rt && (rt->rt6i_flags & RTF_LOCAL); -} - static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_rpfilter_info *info = par->matchinfo; @@ -84,7 +78,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) struct ipv6hdr *iph; bool invert = info->flags & XT_RPFILTER_INVERT; - if (rpfilter_is_local(skb)) + if (par->in->flags & IFF_LOOPBACK) return true ^ invert; iph = ipv6_hdr(skb); diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 2f3a018..3dacecc 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -14,8 +14,6 @@ * 2 of the License, or (at your option) any later version. */ -#define pr_fmt(fmt) "IPv6-nf: " fmt - #include #include #include @@ -182,11 +180,13 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id, q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash); local_bh_enable(); - if (IS_ERR_OR_NULL(q)) { - inet_frag_maybe_warn_overflow(q, pr_fmt()); - return NULL; - } + if (q == NULL) + goto oom; + return container_of(q, struct frag_queue, q); + +oom: + return NULL; } diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 7a610a6..e5253ec 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -26,9 +26,6 @@ * YOSHIFUJI,H. @USAGI Always remove fragment header to * calculate ICV correctly. */ - -#define pr_fmt(fmt) "IPv6: " fmt - #include #include #include @@ -200,10 +197,9 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6 hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd); q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); - if (IS_ERR_OR_NULL(q)) { - inet_frag_maybe_warn_overflow(q, pr_fmt()); + if (q == NULL) return NULL; - } + return container_of(q, struct frag_queue, q); } @@ -342,17 +338,8 @@ found: } if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && - fq->q.meat == fq->q.len) { - int res; - unsigned long orefdst = skb->_skb_refdst; - - skb->_skb_refdst = 0UL; - res = ip6_frag_reasm(fq, prev, dev); - skb->_skb_refdst = orefdst; - return res; - } - - skb_dst_drop(skb); + fq->q.meat == fq->q.len) + return ip6_frag_reasm(fq, prev, dev); write_lock(&ip6_frags.lock); list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 5845613..363d8b7 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -300,7 +300,6 @@ static void ip6_dst_destroy(struct dst_entry *dst) { struct rt6_info *rt = (struct rt6_info *)dst; struct inet6_dev *idev = rt->rt6i_idev; - struct dst_entry *from = dst->from; if (rt->n) neigh_release(rt->n); @@ -313,8 +312,8 @@ static void ip6_dst_destroy(struct dst_entry *dst) in6_dev_put(idev); } - dst->from = NULL; - dst_release(from); + if (!(rt->rt6i_flags & RTF_EXPIRES) && dst->from) + dst_release(dst->from); if (rt6_has_peer(rt)) { struct inet_peer *peer = rt6_peer_ptr(rt); @@ -1055,6 +1054,7 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori rt->rt6i_gateway = ort->rt6i_gateway; rt->rt6i_flags = ort->rt6i_flags; + rt6_clean_expires(rt); rt->rt6i_metric = 0; memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key)); @@ -1859,6 +1859,8 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort, if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) == (RTF_DEFAULT | RTF_ADDRCONF)) rt6_set_from(rt, ort); + else + rt6_clean_expires(rt); rt->rt6i_metric = 0; #ifdef CONFIG_IPV6_SUBTREES @@ -1990,8 +1992,7 @@ void rt6_purge_dflt_routers(struct net *net) restart: read_lock_bh(&table->tb6_lock); for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) { - if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) && - (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) { + if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) { dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); ip6_del_rt(rt); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 89dfedd..4f435371 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -386,17 +386,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, if (dst) dst->ops->redirect(dst, sk, skb); - goto out; } if (type == ICMPV6_PKT_TOOBIG) { - /* We are not interested in TCP_LISTEN and open_requests - * (SYN-ACKs send out by Linux are always <576bytes so - * they should go through unfragmented). - */ - if (sk->sk_state == TCP_LISTEN) - goto out; - tp->mtu_info = ntohl(info); if (!sock_owned_by_user(sk)) tcp_v6_mtu_reduced(sk); diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 8f32718..c984413 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -236,8 +236,6 @@ static void xfrm6_dst_destroy(struct dst_entry *dst) { struct xfrm_dst *xdst = (struct xfrm_dst *)dst; - if (likely(xdst->u.rt6.n)) - neigh_release(xdst->u.rt6.n); if (likely(xdst->u.rt6.rt6i_idev)) in6_dev_put(xdst->u.rt6.rt6i_idev); dst_destroy_metrics_generic(dst); diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 3c9bd59..b833677 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -1386,8 +1386,6 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); - msg->msg_namelen = 0; - skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb) @@ -2586,10 +2584,8 @@ bed: NULL, NULL, NULL); /* Check if the we got some results */ - if (!self->cachedaddr) { - err = -EAGAIN; /* Didn't find any devices */ - goto out; - } + if (!self->cachedaddr) + return -EAGAIN; /* Didn't find any devices */ daddr = self->cachedaddr; /* Cleanup */ self->cachedaddr = 0; diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 625bc50..cd6f7a9 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1331,8 +1331,6 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; - msg->msg_namelen = 0; - if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) && diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 9e1822e..8ee4a86 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -684,7 +684,6 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk, lsa->l2tp_addr = ipv6_hdr(skb)->saddr; lsa->l2tp_flowinfo = 0; lsa->l2tp_scope_id = 0; - lsa->l2tp_conn_id = 0; if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL) lsa->l2tp_scope_id = IP6CB(skb)->iif; } diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 044e9e1..716605c 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -355,7 +355,6 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh l2tp_xmit_skb(session, skb, session->hdr_len); sock_put(ps->tunnel_sock); - sock_put(sk); return error; diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 48aaa89..8870988 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -720,8 +720,6 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, int target; /* Read at least this many bytes */ long timeo; - msg->msg_namelen = 0; - lock_sock(sk); copied = -ENOTCONN; if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN)) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 49c48c6..0479c64 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2499,7 +2499,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local, list_del(&dep->list); mutex_unlock(&local->mtx); - ieee80211_roc_notify_destroy(dep, true); + ieee80211_roc_notify_destroy(dep); return 0; } @@ -2539,7 +2539,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local, ieee80211_start_next_roc(local); mutex_unlock(&local->mtx); - ieee80211_roc_notify_destroy(found, true); + ieee80211_roc_notify_destroy(found); } else { /* work may be pending so use it all the time */ found->abort = true; @@ -2549,8 +2549,6 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local, /* work will clean up etc */ flush_delayed_work(&found->work); - WARN_ON(!found->to_be_freed); - kfree(found); } return 0; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 55d8f89..2ed065c 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -346,7 +346,6 @@ struct ieee80211_roc_work { struct ieee80211_channel *chan; bool started, abort, hw_begun, notified; - bool to_be_freed; unsigned long hw_start_time; @@ -1364,7 +1363,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local); void ieee80211_roc_setup(struct ieee80211_local *local); void ieee80211_start_next_roc(struct ieee80211_local *local); void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata); -void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free); +void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc); void ieee80211_sw_roc_work(struct work_struct *work); void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index a1a7997..5107248 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1812,8 +1812,6 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata, WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, transmit_frame, frame_buf); ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED; - ieee80211_wake_queues_by_reason(&sdata->local->hw, - IEEE80211_QUEUE_STOP_REASON_CSA); mutex_unlock(&ifmgd->mtx); /* @@ -1858,6 +1856,8 @@ static void ieee80211_csa_connection_drop_work(struct work_struct *work) container_of(work, struct ieee80211_sub_if_data, u.mgd.csa_connection_drop_work); + ieee80211_wake_queues_by_reason(&sdata->local->hw, + IEEE80211_QUEUE_STOP_REASON_CSA); __ieee80211_disconnect(sdata, true); } @@ -3401,10 +3401,6 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, ret = 0; out: - /* don't print the message below for VHT mismatch if VHT is disabled */ - if (ret & IEEE80211_STA_DISABLE_VHT) - vht_chandef = *chandef; - while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, IEEE80211_CHAN_DISABLED)) { if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) { @@ -3723,16 +3719,8 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, /* prep auth_data so we don't go into idle on disassoc */ ifmgd->auth_data = auth_data; - if (ifmgd->associated) { - u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; - - ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, - WLAN_REASON_UNSPECIFIED, - false, frame_buf); - - __cfg80211_send_deauth(sdata->dev, frame_buf, - sizeof(frame_buf)); - } + if (ifmgd->associated) + ieee80211_set_disassoc(sdata, 0, 0, false, NULL); sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid); @@ -3791,16 +3779,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, mutex_lock(&ifmgd->mtx); - if (ifmgd->associated) { - u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; - - ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, - WLAN_REASON_UNSPECIFIED, - false, frame_buf); - - __cfg80211_send_deauth(sdata->dev, frame_buf, - sizeof(frame_buf)); - } + if (ifmgd->associated) + ieee80211_set_disassoc(sdata, 0, 0, false, NULL); if (ifmgd->auth_data && !ifmgd->auth_data->done) { err = -EBUSY; @@ -4092,17 +4072,6 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - /* - * Make sure some work items will not run after this, - * they will not do anything but might not have been - * cancelled when disconnecting. - */ - cancel_work_sync(&ifmgd->monitor_work); - cancel_work_sync(&ifmgd->beacon_connection_loss_work); - cancel_work_sync(&ifmgd->request_smps_work); - cancel_work_sync(&ifmgd->csa_connection_drop_work); - cancel_work_sync(&ifmgd->chswitch_work); - mutex_lock(&ifmgd->mtx); if (ifmgd->assoc_data) ieee80211_destroy_assoc_data(sdata, false); diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index 7acbdaa..a3ad4c3 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -299,13 +299,10 @@ void ieee80211_start_next_roc(struct ieee80211_local *local) } } -void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free) +void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc) { struct ieee80211_roc_work *dep, *tmp; - if (WARN_ON(roc->to_be_freed)) - return; - /* was never transmitted */ if (roc->frame) { cfg80211_mgmt_tx_status(&roc->sdata->wdev, @@ -321,12 +318,9 @@ void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free) GFP_KERNEL); list_for_each_entry_safe(dep, tmp, &roc->dependents, list) - ieee80211_roc_notify_destroy(dep, true); + ieee80211_roc_notify_destroy(dep); - if (free) - kfree(roc); - else - roc->to_be_freed = true; + kfree(roc); } void ieee80211_sw_roc_work(struct work_struct *work) @@ -339,9 +333,6 @@ void ieee80211_sw_roc_work(struct work_struct *work) mutex_lock(&local->mtx); - if (roc->to_be_freed) - goto out_unlock; - if (roc->abort) goto finish; @@ -381,7 +372,7 @@ void ieee80211_sw_roc_work(struct work_struct *work) finish: list_del(&roc->list); started = roc->started; - ieee80211_roc_notify_destroy(roc, !roc->abort); + ieee80211_roc_notify_destroy(roc); if (started) { drv_flush(local, false); @@ -421,7 +412,7 @@ static void ieee80211_hw_roc_done(struct work_struct *work) list_del(&roc->list); - ieee80211_roc_notify_destroy(roc, true); + ieee80211_roc_notify_destroy(roc); /* if there's another roc, start it now */ ieee80211_start_next_roc(local); @@ -471,14 +462,12 @@ void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata) list_for_each_entry_safe(roc, tmp, &tmp_list, list) { if (local->ops->remain_on_channel) { list_del(&roc->list); - ieee80211_roc_notify_destroy(roc, true); + ieee80211_roc_notify_destroy(roc); } else { ieee80211_queue_delayed_work(&local->hw, &roc->work, 0); /* work will clean up etc */ flush_delayed_work(&roc->work); - WARN_ON(!roc->to_be_freed); - kfree(roc); } } diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index 64619f4..79a48f3 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c @@ -52,8 +52,8 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) ieee80211_stop_queues_by_reason(hw, IEEE80211_QUEUE_STOP_REASON_SUSPEND); - /* flush out all packets and station cleanup call_rcu()s */ - rcu_barrier(); + /* flush out all packets */ + synchronize_net(); drv_flush(local, false); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index c58f3cd..580704e 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3144,7 +3144,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) struct ieee80211_supported_band *sband; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); - WARN_ON_ONCE_NONRT(softirq_count() == 0); + WARN_ON_ONCE(softirq_count() == 0); if (WARN_ON(status->band >= IEEE80211_NUM_BANDS)) goto drop; diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index c8b32a0..ca9fde1 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -756,7 +756,6 @@ int __must_check __sta_info_destroy(struct sta_info *sta) struct ieee80211_local *local; struct ieee80211_sub_if_data *sdata; int ret, i; - bool have_key = false; might_sleep(); @@ -784,19 +783,12 @@ int __must_check __sta_info_destroy(struct sta_info *sta) list_del_rcu(&sta->list); mutex_lock(&local->key_mtx); - for (i = 0; i < NUM_DEFAULT_KEYS; i++) { + for (i = 0; i < NUM_DEFAULT_KEYS; i++) __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i])); - have_key = true; - } - if (sta->ptk) { + if (sta->ptk) __ieee80211_key_free(key_mtx_dereference(local, sta->ptk)); - have_key = true; - } mutex_unlock(&local->key_mtx); - if (!have_key) - synchronize_net(); - sta->dead = true; local->num_sta--; diff --git a/net/netfilter/core.c b/net/netfilter/core.c index c646ec8..a9c488b 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -20,17 +20,11 @@ #include #include #include -#include #include #include #include "nf_internals.h" -#ifdef CONFIG_PREEMPT_RT_BASE -DEFINE_LOCAL_IRQ_LOCK(xt_write_lock); -EXPORT_PER_CPU_SYMBOL(xt_write_lock); -#endif - static DEFINE_MUTEX(afinfo_mutex); const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly; diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 38ca630..6d6d8f2 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -1470,8 +1470,7 @@ ip_set_utest(struct sock *ctnl, struct sk_buff *skb, if (ret == -EAGAIN) ret = 1; - return (ret < 0 && ret != -ENOTEMPTY) ? ret : - ret > 0 ? 0 : -IPSET_ERR_EXIST; + return ret < 0 ? ret : ret > 0 ? 0 : -IPSET_ERR_EXIST; } /* Get headed data of a set */ diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index 09c744a..8371c2b 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c @@ -174,13 +174,9 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id, { const struct set_elem *e = list_set_elem(map, i); - if (e->id != IPSET_INVALID_ID) { - const struct set_elem *x = list_set_elem(map, map->size - 1); - - /* Last element replaced or pushed off */ - if (x->id != IPSET_INVALID_ID) - ip_set_put_byindex(x->id); - } + if (i == map->size - 1 && e->id != IPSET_INVALID_ID) + /* Last element replaced: e.g. add new,before,last */ + ip_set_put_byindex(e->id); if (with_timeout(map->timeout)) list_elem_tadd(map, i, id, ip_set_timeout_set(timeout)); else diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c index e5920fb..12475ef 100644 --- a/net/netfilter/ipvs/ip_vs_pe_sip.c +++ b/net/netfilter/ipvs/ip_vs_pe_sip.c @@ -37,10 +37,14 @@ static int get_callid(const char *dptr, unsigned int dataoff, if (ret > 0) break; if (!ret) - return -EINVAL; + return 0; dataoff += *matchoff; } + /* Empty callid is useless */ + if (!*matchlen) + return -EINVAL; + /* Too large is useless */ if (*matchlen > IP_VS_PEDATA_MAXLEN) return -EINVAL; diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 91527d5..884f2b3 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -236,9 +236,7 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, /* We only allow helper re-assignment of the same sort since * we cannot reallocate the helper extension area. */ - struct nf_conntrack_helper *tmp = rcu_dereference(help->helper); - - if (tmp && tmp->help != helper->help) { + if (help->helper != helper) { RCU_INIT_POINTER(help->helper, NULL); goto out; } diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index a081915..627b0e5 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1705,9 +1705,6 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, if (nlh->nlmsg_flags & NLM_F_CREATE) { enum ip_conntrack_events events; - if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY]) - return -EINVAL; - ct = ctnetlink_create_conntrack(net, zone, cda, &otuple, &rtuple, u3); if (IS_ERR(ct)) diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index b4e0d1c..df8f4f2 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -1547,7 +1547,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff, msglen = origlen = end - dptr; if (msglen > datalen) - return NF_ACCEPT; + return NF_DROP; ret = process_sip_msg(skb, ct, protoff, dataoff, &dptr, &msglen); diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 4bc2aaf..5f2f910 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -468,22 +468,33 @@ EXPORT_SYMBOL_GPL(nf_nat_packet); struct nf_nat_proto_clean { u8 l3proto; u8 l4proto; + bool hash; }; -/* kill conntracks with affected NAT section */ -static int nf_nat_proto_remove(struct nf_conn *i, void *data) +/* Clear NAT section of all conntracks, in case we're loaded again. */ +static int nf_nat_proto_clean(struct nf_conn *i, void *data) { const struct nf_nat_proto_clean *clean = data; struct nf_conn_nat *nat = nfct_nat(i); if (!nat) return 0; - + if (!(i->status & IPS_SRC_NAT_DONE)) + return 0; if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) || (clean->l4proto && nf_ct_protonum(i) != clean->l4proto)) return 0; - return i->status & IPS_NAT_MASK ? 1 : 0; + if (clean->hash) { + spin_lock_bh(&nf_nat_lock); + hlist_del_rcu(&nat->bysource); + spin_unlock_bh(&nf_nat_lock); + } else { + memset(nat, 0, sizeof(*nat)); + i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | + IPS_SEQ_ADJUST); + } + return 0; } static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) @@ -495,8 +506,16 @@ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) struct net *net; rtnl_lock(); + /* Step 1 - remove from bysource hash */ + clean.hash = true; for_each_net(net) - nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean); + nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); + synchronize_rcu(); + + /* Step 2 - clean NAT section */ + clean.hash = false; + for_each_net(net) + nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); rtnl_unlock(); } @@ -508,9 +527,16 @@ static void nf_nat_l3proto_clean(u8 l3proto) struct net *net; rtnl_lock(); + /* Step 1 - remove from bysource hash */ + clean.hash = true; + for_each_net(net) + nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); + synchronize_rcu(); + /* Step 2 - clean NAT section */ + clean.hash = false; for_each_net(net) - nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean); + nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); rtnl_unlock(); } @@ -748,7 +774,7 @@ static void __net_exit nf_nat_net_exit(struct net *net) { struct nf_nat_proto_clean clean = {}; - nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean); + nf_ct_iterate_cleanup(net, &nf_nat_proto_clean, &clean); synchronize_rcu(); nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size); } diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 8a6c6ea..847d495 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c @@ -1189,6 +1189,8 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, struct netlbl_unlhsh_walk_arg cb_arg; u32 skip_bkt = cb->args[0]; u32 skip_chain = cb->args[1]; + u32 skip_addr4 = cb->args[2]; + u32 skip_addr6 = cb->args[3]; u32 iter_bkt; u32 iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0; struct netlbl_unlhsh_iface *iface; @@ -1213,7 +1215,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, continue; netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) { - if (iter_addr4++ < cb->args[2]) + if (iter_addr4++ < skip_addr4) continue; if (netlbl_unlabel_staticlist_gen( NLBL_UNLABEL_C_STATICLIST, @@ -1229,7 +1231,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, #if IS_ENABLED(CONFIG_IPV6) netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) { - if (iter_addr6++ < cb->args[3]) + if (iter_addr6++ < skip_addr6) continue; if (netlbl_unlabel_staticlist_gen( NLBL_UNLABEL_C_STATICLIST, @@ -1248,10 +1250,10 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, unlabel_staticlist_return: rcu_read_unlock(); - cb->args[0] = iter_bkt; - cb->args[1] = iter_chain; - cb->args[2] = iter_addr4; - cb->args[3] = iter_addr6; + cb->args[0] = skip_bkt; + cb->args[1] = skip_chain; + cb->args[2] = skip_addr4; + cb->args[3] = skip_addr6; return skb->len; } @@ -1271,9 +1273,12 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, { struct netlbl_unlhsh_walk_arg cb_arg; struct netlbl_unlhsh_iface *iface; - u32 iter_addr4 = 0, iter_addr6 = 0; + u32 skip_addr4 = cb->args[0]; + u32 skip_addr6 = cb->args[1]; + u32 iter_addr4 = 0; struct netlbl_af4list *addr4; #if IS_ENABLED(CONFIG_IPV6) + u32 iter_addr6 = 0; struct netlbl_af6list *addr6; #endif @@ -1287,7 +1292,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, goto unlabel_staticlistdef_return; netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) { - if (iter_addr4++ < cb->args[0]) + if (iter_addr4++ < skip_addr4) continue; if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, iface, @@ -1300,7 +1305,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, } #if IS_ENABLED(CONFIG_IPV6) netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) { - if (iter_addr6++ < cb->args[1]) + if (iter_addr6++ < skip_addr6) continue; if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, iface, @@ -1315,8 +1320,8 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, unlabel_staticlistdef_return: rcu_read_unlock(); - cb->args[0] = iter_addr4; - cb->args[1] = iter_addr6; + cb->args[0] = skip_addr4; + cb->args[1] = skip_addr6; return skb->len; } diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 5a55be3..f2aabb6 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -142,7 +142,6 @@ int genl_register_mc_group(struct genl_family *family, int err = 0; BUG_ON(grp->name[0] == '\0'); - BUG_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL); genl_lock(); diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 14c106b..7261eb8 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -1177,7 +1177,6 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, } if (sax != NULL) { - memset(sax, 0, sizeof(sax)); sax->sax25_family = AF_NETROM; skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call, AX25_ADDR_LEN); diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c index 48fb1de..fea22eb 100644 --- a/net/nfc/llcp/sock.c +++ b/net/nfc/llcp/sock.c @@ -644,8 +644,6 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock, pr_debug("%p %zu\n", sk, len); - msg->msg_namelen = 0; - lock_sock(sk); if (sk->sk_state == LLCP_CLOSED && @@ -686,7 +684,6 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock, pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap); - memset(&sockaddr, 0, sizeof(sockaddr)); sockaddr.sa_family = AF_NFC; sockaddr.nfc_protocol = NFC_PROTO_NFC_DEP; sockaddr.dsap = ui_cb->dsap; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 92a2359..c111bd0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -88,7 +88,6 @@ #include #include #include -#include #ifdef CONFIG_INET #include @@ -554,7 +553,7 @@ static void prb_retire_rx_blk_timer_expired(unsigned long data) if (BLOCK_NUM_PKTS(pbd)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ - cpu_chill(); + cpu_relax(); } } @@ -808,7 +807,7 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, if (!(status & TP_STATUS_BLK_TMO)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ - cpu_chill(); + cpu_relax(); } } prb_close_block(pkc, pbd, po, status); diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index 5a44c6e..e8fdb17 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c @@ -34,7 +34,6 @@ #include #include #include -#include #include "rds.h" #include "ib.h" @@ -287,7 +286,7 @@ static inline void wait_clean_list_grace(void) for_each_online_cpu(cpu) { flag = &per_cpu(clean_list_grace, cpu); while (test_bit(CLEAN_LIST_BUSY_BIT, flag)) - cpu_chill(); + cpu_relax(); } } diff --git a/net/rds/message.c b/net/rds/message.c index aff589c..f0a4658 100644 --- a/net/rds/message.c +++ b/net/rds/message.c @@ -197,9 +197,6 @@ struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp) { struct rds_message *rm; - if (extra_len > KMALLOC_MAX_SIZE - sizeof(struct rds_message)) - return NULL; - rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp); if (!rm) goto out; diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 7f645d1..c4719ce 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -1257,7 +1257,6 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (srose != NULL) { - memset(srose, 0, msg->msg_namelen); srose->srose_family = AF_ROSE; srose->srose_addr = rose->dest_addr; srose->srose_call = rose->dest_call; diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index ced81a1..0e19948 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -962,11 +962,8 @@ cbq_dequeue(struct Qdisc *sch) cbq_update(q); if ((incr -= incr2) < 0) incr = 0; - q->now += incr; - } else { - if (now > q->now) - q->now = now; } + q->now += incr; q->now_rt = now; for (;;) { diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 5578628..4e606fc 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -195,7 +195,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) flow->deficit = q->quantum; flow->dropped = 0; } - if (++sch->q.qlen <= sch->limit) + if (++sch->q.qlen < sch->limit) return NET_XMIT_SUCCESS; q->drop_overlimit++; diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 67c6823..b45ed1f 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1080,7 +1080,7 @@ struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc, transports) { if (transport == active) - continue; + break; list_for_each_entry(chunk, &transport->transmitted, transmitted_list) { if (key == chunk->subh.data_hdr->tsn) { diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index de1a013..5131fcf 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -2082,7 +2082,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net, } /* Delete the tempory new association. */ - sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc)); + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); /* Restore association pointer to provide SCTP command interpeter diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9ef5c73..cedd9bf 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -5653,9 +5653,6 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, if (len < sizeof(sctp_assoc_t)) return -EINVAL; - /* Allow the struct to grow and fill in as much as possible */ - len = min_t(size_t, len, sizeof(sas)); - if (copy_from_user(&sas, optval, len)) return -EFAULT; @@ -5689,6 +5686,9 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, /* Mark beginning of a new observation period */ asoc->stats.max_obs_rto = asoc->rto_min; + /* Allow the struct to grow and fill in as much as possible */ + len = min_t(size_t, len, sizeof(sas)); + if (put_user(len, optlen)) return -EFAULT; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 716aa41..507b5e8 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -511,7 +511,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, new = rpc_new_client(args, xprt); if (IS_ERR(new)) { err = PTR_ERR(new); - goto out_err; + goto out_put; } atomic_inc(&clnt->cl_count); @@ -524,6 +524,8 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, new->cl_chatty = clnt->cl_chatty; return new; +out_put: + xprt_put(xprt); out_err: dprintk("RPC: %s: returned error %d\n", __func__, err); return ERR_PTR(err); diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index f8529fc..fb20f25 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -180,8 +180,6 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); task->tk_waitqueue = queue; queue->qlen++; - /* barrier matches the read in rpc_wake_up_task_queue_locked() */ - smp_wmb(); rpc_set_queued(task); dprintk("RPC: %5u added to queue %p \"%s\"\n", @@ -432,11 +430,8 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task */ static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) { - if (RPC_IS_QUEUED(task)) { - smp_rmb(); - if (task->tk_waitqueue == queue) - __rpc_do_wake_up_task(queue, task); - } + if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue) + __rpc_do_wake_up_task(queue, task); } /* diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 2d34b6b..dbf12ac 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -515,6 +515,15 @@ EXPORT_SYMBOL_GPL(svc_create_pooled); void svc_shutdown_net(struct svc_serv *serv, struct net *net) { + /* + * The set of xprts (contained in the sv_tempsocks and + * sv_permsocks lists) is now constant, since it is modified + * only by accepting new sockets (done by service threads in + * svc_recv) or aging old ones (done by sv_temptimer), or + * configuration changes (excluded by whatever locking the + * caller is using--nfsd_mutex in the case of nfsd). So it's + * safe to traverse those lists and shut everything down: + */ svc_close_net(serv, net); if (serv->sv_shutdown) diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index ca71056..b8e47fa 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -856,6 +856,7 @@ static void svc_age_temp_xprts(unsigned long closure) struct svc_serv *serv = (struct svc_serv *)closure; struct svc_xprt *xprt; struct list_head *le, *next; + LIST_HEAD(to_be_aged); dprintk("svc_age_temp_xprts\n"); @@ -876,15 +877,25 @@ static void svc_age_temp_xprts(unsigned long closure) if (atomic_read(&xprt->xpt_ref.refcount) > 1 || test_bit(XPT_BUSY, &xprt->xpt_flags)) continue; - list_del_init(le); + svc_xprt_get(xprt); + list_move(le, &to_be_aged); set_bit(XPT_CLOSE, &xprt->xpt_flags); set_bit(XPT_DETACHED, &xprt->xpt_flags); + } + spin_unlock_bh(&serv->sv_lock); + + while (!list_empty(&to_be_aged)) { + le = to_be_aged.next; + /* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */ + list_del_init(le); + xprt = list_entry(le, struct svc_xprt, xpt_list); + dprintk("queuing xprt %p for closing\n", xprt); /* a thread will dequeue and close it soon */ svc_xprt_enqueue(xprt); + svc_xprt_put(xprt); } - spin_unlock_bh(&serv->sv_lock); mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); } @@ -948,24 +959,21 @@ void svc_close_xprt(struct svc_xprt *xprt) } EXPORT_SYMBOL_GPL(svc_close_xprt); -static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) +static void svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) { struct svc_xprt *xprt; - int ret = 0; spin_lock(&serv->sv_lock); list_for_each_entry(xprt, xprt_list, xpt_list) { if (xprt->xpt_net != net) continue; - ret++; set_bit(XPT_CLOSE, &xprt->xpt_flags); - svc_xprt_enqueue(xprt); + set_bit(XPT_BUSY, &xprt->xpt_flags); } spin_unlock(&serv->sv_lock); - return ret; } -static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net) +static void svc_clear_pools(struct svc_serv *serv, struct net *net) { struct svc_pool *pool; struct svc_xprt *xprt; @@ -980,46 +988,42 @@ static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net) if (xprt->xpt_net != net) continue; list_del_init(&xprt->xpt_ready); - spin_unlock_bh(&pool->sp_lock); - return xprt; } spin_unlock_bh(&pool->sp_lock); } - return NULL; } -static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net) +static void svc_clear_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) { struct svc_xprt *xprt; + struct svc_xprt *tmp; + LIST_HEAD(victims); - while ((xprt = svc_dequeue_net(serv, net))) { - set_bit(XPT_CLOSE, &xprt->xpt_flags); - svc_delete_xprt(xprt); + spin_lock(&serv->sv_lock); + list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { + if (xprt->xpt_net != net) + continue; + list_move(&xprt->xpt_list, &victims); } + spin_unlock(&serv->sv_lock); + + list_for_each_entry_safe(xprt, tmp, &victims, xpt_list) + svc_delete_xprt(xprt); } -/* - * Server threads may still be running (especially in the case where the - * service is still running in other network namespaces). - * - * So we shut down sockets the same way we would on a running server, by - * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do - * the close. In the case there are no such other threads, - * threads running, svc_clean_up_xprts() does a simple version of a - * server's main event loop, and in the case where there are other - * threads, we may need to wait a little while and then check again to - * see if they're done. - */ void svc_close_net(struct svc_serv *serv, struct net *net) { - int delay = 0; - - while (svc_close_list(serv, &serv->sv_permsocks, net) + - svc_close_list(serv, &serv->sv_tempsocks, net)) { + svc_close_list(serv, &serv->sv_tempsocks, net); + svc_close_list(serv, &serv->sv_permsocks, net); - svc_clean_up_xprts(serv, net); - msleep(delay++); - } + svc_clear_pools(serv, net); + /* + * At this point the sp_sockets lists will stay empty, since + * svc_xprt_enqueue will not add new entries without taking the + * sp_lock and checking XPT_BUSY. + */ + svc_clear_list(serv, &serv->sv_tempsocks, net); + svc_clear_list(serv, &serv->sv_permsocks, net); } /* diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index ab02588..33811db 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -485,17 +485,13 @@ EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks); * xprt_wait_for_buffer_space - wait for transport output buffer to clear * @task: task to be put to sleep * @action: function pointer to be executed after wait - * - * Note that we only set the timer for the case of RPC_IS_SOFT(), since - * we don't in general want to force a socket disconnection due to - * an incomplete RPC call transmission. */ void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action) { struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; - task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0; + task->tk_timeout = req->rq_timeout; rpc_sleep_on(&xprt->pending, task, action); } EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index fc906d9..9b4e483 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -806,7 +806,6 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg) if (addr) { addr->family = AF_TIPC; addr->addrtype = TIPC_ADDR_ID; - memset(&addr->addr, 0, sizeof(addr->addr)); addr->addr.id.ref = msg_origport(msg); addr->addr.id.node = msg_orignode(msg); addr->addr.name.domain = 0; /* could leave uninitialized */ @@ -921,9 +920,6 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock, goto exit; } - /* will be updated in set_orig_addr() if needed */ - m->msg_namelen = 0; - timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); restart: @@ -1033,9 +1029,6 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock, goto exit; } - /* will be updated in set_orig_addr() if needed */ - m->msg_namelen = 0; - target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index f347754..5b5c876 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -384,7 +384,7 @@ static void unix_sock_destructor(struct sock *sk) #endif } -static void unix_release_sock(struct sock *sk, int embrion) +static int unix_release_sock(struct sock *sk, int embrion) { struct unix_sock *u = unix_sk(sk); struct path path; @@ -453,6 +453,8 @@ static void unix_release_sock(struct sock *sk, int embrion) if (unix_tot_inflight) unix_gc(); /* Garbage collect fds */ + + return 0; } static void init_peercred(struct sock *sk) @@ -699,10 +701,9 @@ static int unix_release(struct socket *sock) if (!sk) return 0; - unix_release_sock(sk, 0); sock->sk = NULL; - return 0; + return unix_release_sock(sk, 0); } static int unix_autobind(struct socket *sock) @@ -1995,7 +1996,7 @@ again: if ((UNIXCB(skb).pid != siocb->scm->pid) || (UNIXCB(skb).cred != siocb->scm->cred)) break; - } else if (test_bit(SOCK_PASSCRED, &sock->flags)) { + } else { /* Copy credentials */ scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); check_creds = 1; diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 91ef82b..82c4fc7 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -883,7 +883,7 @@ static void handle_channel(struct wiphy *wiphy, return; REG_DBG_PRINT("Disabling freq %d MHz\n", chan->center_freq); - chan->flags |= IEEE80211_CHAN_DISABLED; + chan->flags = IEEE80211_CHAN_DISABLED; return; } diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst index e253917..06ba4a7 100644 --- a/scripts/Makefile.headersinst +++ b/scripts/Makefile.headersinst @@ -8,7 +8,7 @@ # ========================================================================== # called may set destination dir (when installing to asm/) -_dst := $(if $(destination-y),$(destination-y),$(if $(dst),$(dst),$(obj))) +_dst := $(or $(destination-y),$(dst),$(obj)) # generated header directory gen := $(if $(gen),$(gen),$(subst include/,include/generated/,$(obj))) @@ -48,14 +48,13 @@ all-files := $(header-y) $(genhdr-y) $(wrapper-files) output-files := $(addprefix $(installdir)/, $(all-files)) input-files := $(foreach hdr, $(header-y), \ - $(if $(wildcard $(srcdir)/$(hdr)), \ + $(or \ $(wildcard $(srcdir)/$(hdr)), \ - $(if $(wildcard $(oldsrcdir)/$(hdr)), \ - $(wildcard $(oldsrcdir)/$(hdr)), \ - $(error Missing UAPI file $(srcdir)/$(hdr))) \ + $(wildcard $(oldsrcdir)/$(hdr)), \ + $(error Missing UAPI file $(srcdir)/$(hdr)) \ )) \ $(foreach hdr, $(genhdr-y), \ - $(if $(wildcard $(gendir)/$(hdr)), \ + $(or \ $(wildcard $(gendir)/$(hdr)), \ $(error Missing generated UAPI file $(gendir)/$(hdr)) \ )) diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl index 68b85e1..3368939 100644 --- a/scripts/kconfig/streamline_config.pl +++ b/scripts/kconfig/streamline_config.pl @@ -156,6 +156,7 @@ sub read_kconfig { my $state = "NONE"; my $config; + my @kconfigs; my $cont = 0; my $line; @@ -189,13 +190,7 @@ sub read_kconfig { # collect any Kconfig sources if (/^source\s*"(.*)"/) { - my $kconfig = $1; - # prevent reading twice. - if (!defined($read_kconfigs{$kconfig})) { - $read_kconfigs{$kconfig} = 1; - read_kconfig($kconfig); - } - next; + $kconfigs[$#kconfigs+1] = $1; } # configs found @@ -255,6 +250,14 @@ sub read_kconfig { } } close($kinfile); + + # read in any configs that were found. + foreach my $kconfig (@kconfigs) { + if (!defined($read_kconfigs{$kconfig})) { + $read_kconfigs{$kconfig} = 1; + read_kconfig($kconfig); + } + } } if ($kconfig) { diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h index 5f44009..f221ddf 100755 --- a/scripts/mkcompile_h +++ b/scripts/mkcompile_h @@ -4,8 +4,7 @@ TARGET=$1 ARCH=$2 SMP=$3 PREEMPT=$4 -RT=$5 -CC=$6 +CC=$5 vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; } @@ -58,7 +57,6 @@ UTS_VERSION="#$VERSION" CONFIG_FLAGS="" if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi -if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP" # Truncate to maximum length diff --git a/security/keys/compat.c b/security/keys/compat.c index d65fa7f..1c26176 100644 --- a/security/keys/compat.c +++ b/security/keys/compat.c @@ -40,12 +40,12 @@ static long compat_keyctl_instantiate_key_iov( ARRAY_SIZE(iovstack), iovstack, &iov); if (ret < 0) - goto err; + return ret; if (ret == 0) goto no_payload_free; ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid); -err: + if (iov != iovstack) kfree(iov); return ret; diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index 42defae..20e4bf5 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c @@ -57,7 +57,7 @@ int install_user_keyrings(void) kenter("%p{%u}", user, uid); - if (user->uid_keyring && user->session_keyring) { + if (user->uid_keyring) { kleave(" = 0 [exist]"); return 0; } @@ -367,8 +367,6 @@ key_ref_t search_my_process_keyrings(struct key_type *type, switch (PTR_ERR(key_ref)) { case -EAGAIN: /* no key */ - if (ret) - break; case -ENOKEY: /* negative key */ ret = key_ref; break; @@ -839,7 +837,7 @@ void key_change_session_keyring(struct callback_head *twork) new-> sgid = old-> sgid; new->fsgid = old->fsgid; new->user = get_uid(old->user); - new->user_ns = get_user_ns(old->user_ns); + new->user_ns = get_user_ns(new->user_ns); new->group_info = get_group_info(old->group_info); new->securebits = old->securebits; diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c index 8ab2951..48665ec 100644 --- a/security/selinux/xfrm.c +++ b/security/selinux/xfrm.c @@ -310,7 +310,7 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, if (old_ctx) { new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len, - GFP_ATOMIC); + GFP_KERNEL); if (!new_ctx) return -ENOMEM; diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index f4aaf5a..09b4286 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -3222,10 +3222,18 @@ EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap); int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_struct *area) { - struct snd_pcm_runtime *runtime = substream->runtime;; + long size; + unsigned long offset; area->vm_page_prot = pgprot_noncached(area->vm_page_prot); - return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes); + area->vm_flags |= VM_IO; + size = area->vm_end - area->vm_start; + offset = area->vm_pgoff << PAGE_SHIFT; + if (io_remap_pfn_range(area, area->vm_start, + (substream->runtime->dma_addr + offset) >> PAGE_SHIFT, + size, area->vm_page_prot)) + return -EAGAIN; + return 0; } EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem); diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c index 24d44b2..160b1bd 100644 --- a/sound/core/seq/seq_timer.c +++ b/sound/core/seq/seq_timer.c @@ -290,10 +290,10 @@ int snd_seq_timer_open(struct snd_seq_queue *q) tid.device = SNDRV_TIMER_GLOBAL_SYSTEM; err = snd_timer_open(&t, str, &tid, q->queue); } - } - if (err < 0) { - snd_printk(KERN_ERR "seq fatal error: cannot create timer (%i)\n", err); - return err; + if (err < 0) { + snd_printk(KERN_ERR "seq fatal error: cannot create timer (%i)\n", err); + return err; + } } t->callback = snd_seq_timer_interrupt; t->callback_data = q; diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c index 0097f36..8575861 100644 --- a/sound/core/vmaster.c +++ b/sound/core/vmaster.c @@ -213,10 +213,7 @@ static int slave_put(struct snd_kcontrol *kcontrol, } if (!changed) return 0; - err = slave_put_val(slave, ucontrol); - if (err < 0) - return err; - return 1; + return slave_put_val(slave, ucontrol); } static int slave_tlv_cmd(struct snd_kcontrol *kcontrol, diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c index 64d5347..3d82232 100644 --- a/sound/drivers/aloop.c +++ b/sound/drivers/aloop.c @@ -286,14 +286,12 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd) loopback_active_notify(dpcm); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: - case SNDRV_PCM_TRIGGER_SUSPEND: spin_lock(&cable->lock); cable->pause |= stream; loopback_timer_stop(dpcm); spin_unlock(&cable->lock); break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: - case SNDRV_PCM_TRIGGER_RESUME: spin_lock(&cable->lock); dpcm->last_jiffies = jiffies; cable->pause &= ~stream; @@ -565,8 +563,7 @@ static snd_pcm_uframes_t loopback_pointer(struct snd_pcm_substream *substream) static struct snd_pcm_hardware loopback_pcm_hardware = { .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP | - SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | - SNDRV_PCM_INFO_RESUME), + SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE | SNDRV_PCM_FMTBIT_FLOAT_LE | SNDRV_PCM_FMTBIT_FLOAT_BE), diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c index e760af9..136a393 100644 --- a/sound/pci/ali5451/ali5451.c +++ b/sound/pci/ali5451/ali5451.c @@ -1435,7 +1435,7 @@ static snd_pcm_uframes_t snd_ali_pointer(struct snd_pcm_substream *substream) spin_lock(&codec->reg_lock); if (!pvoice->running) { - spin_unlock(&codec->reg_lock); + spin_unlock_irq(&codec->reg_lock); return 0; } outb(pvoice->number, ALI_REG(codec, ALI_GC_CIR)); diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c index 9febe55..cdd100d 100644 --- a/sound/pci/bt87x.c +++ b/sound/pci/bt87x.c @@ -836,8 +836,6 @@ static struct { {0x7063, 0x2000}, /* pcHDTV HD-2000 TV */ }; -static struct pci_driver driver; - /* return the id of the card, or a negative value if it's blacklisted */ static int snd_bt87x_detect_card(struct pci_dev *pci) { @@ -964,24 +962,11 @@ static DEFINE_PCI_DEVICE_TABLE(snd_bt87x_default_ids) = { { } }; -static struct pci_driver driver = { +static struct pci_driver bt87x_driver = { .name = KBUILD_MODNAME, .id_table = snd_bt87x_ids, .probe = snd_bt87x_probe, .remove = snd_bt87x_remove, }; -static int __init alsa_card_bt87x_init(void) -{ - if (load_all) - driver.id_table = snd_bt87x_default_ids; - return pci_register_driver(&driver); -} - -static void __exit alsa_card_bt87x_exit(void) -{ - pci_unregister_driver(&driver); -} - -module_init(alsa_card_bt87x_init) -module_exit(alsa_card_bt87x_exit) +module_pci_driver(bt87x_driver); diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c index bdd888e..a7c296a 100644 --- a/sound/pci/emu10k1/emu10k1_main.c +++ b/sound/pci/emu10k1/emu10k1_main.c @@ -657,14 +657,14 @@ static int snd_emu10k1_cardbus_init(struct snd_emu10k1 *emu) return 0; } -static int snd_emu1010_load_firmware(struct snd_emu10k1 *emu, - const struct firmware *fw_entry) +static int snd_emu1010_load_firmware(struct snd_emu10k1 *emu) { int n, i; int reg; int value; unsigned int write_post; unsigned long flags; + const struct firmware *fw_entry = emu->firmware; if (!fw_entry) return -EIO; @@ -725,34 +725,9 @@ static int emu1010_firmware_thread(void *data) /* Return to Audio Dock programming mode */ snd_printk(KERN_INFO "emu1010: Loading Audio Dock Firmware\n"); snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, EMU_HANA_FPGA_CONFIG_AUDIODOCK); - - if (!emu->dock_fw) { - const char *filename = NULL; - switch (emu->card_capabilities->emu_model) { - case EMU_MODEL_EMU1010: - filename = DOCK_FILENAME; - break; - case EMU_MODEL_EMU1010B: - filename = MICRO_DOCK_FILENAME; - break; - case EMU_MODEL_EMU1616: - filename = MICRO_DOCK_FILENAME; - break; - } - if (filename) { - err = request_firmware(&emu->dock_fw, - filename, - &emu->pci->dev); - if (err) - continue; - } - } - - if (emu->dock_fw) { - err = snd_emu1010_load_firmware(emu, emu->dock_fw); - if (err) - continue; - } + err = snd_emu1010_load_firmware(emu); + if (err != 0) + continue; snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, 0); snd_emu1010_fpga_read(emu, EMU_HANA_IRQ_STATUS, ®); @@ -887,12 +862,6 @@ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu) filename, emu->firmware->size); } - err = snd_emu1010_load_firmware(emu, emu->firmware); - if (err != 0) { - snd_printk(KERN_INFO "emu1010: Loading Firmware failed\n"); - return err; - } - /* ID, should read & 0x7f = 0x55 when FPGA programmed. */ snd_emu1010_fpga_read(emu, EMU_HANA_ID, ®); if ((reg & 0x3f) != 0x15) { @@ -1278,8 +1247,6 @@ static int snd_emu10k1_free(struct snd_emu10k1 *emu) kthread_stop(emu->emu1010.firmware_thread); if (emu->firmware) release_firmware(emu->firmware); - if (emu->dock_fw) - release_firmware(emu->dock_fw); if (emu->irq >= 0) free_irq(emu->irq, emu); /* remove reserved page */ diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 622f726..822df97 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -173,7 +173,7 @@ const char *snd_hda_get_jack_type(u32 cfg) "Line Out", "Speaker", "HP Out", "CD", "SPDIF Out", "Digital Out", "Modem Line", "Modem Hand", "Line In", "Aux", "Mic", "Telephony", - "SPDIF In", "Digital In", "Reserved", "Other" + "SPDIF In", "Digitial In", "Reserved", "Other" }; return jack_types[(cfg & AC_DEFCFG_DEVICE) @@ -2160,12 +2160,11 @@ struct snd_kcontrol *snd_hda_find_mixer_ctl(struct hda_codec *codec, EXPORT_SYMBOL_HDA(snd_hda_find_mixer_ctl); static int find_empty_mixer_ctl_idx(struct hda_codec *codec, const char *name, - int start_idx) + int dev) { - int i, idx; - /* 16 ctlrs should be large enough */ - for (i = 0, idx = start_idx; i < 16; i++, idx++) { - if (!find_mixer_ctl(codec, name, 0, idx)) + int idx; + for (idx = 0; idx < 16; idx++) { /* 16 ctlrs should be large enough */ + if (!find_mixer_ctl(codec, name, dev, idx)) return idx; } return -EBUSY; @@ -2967,7 +2966,7 @@ static unsigned int convert_to_spdif_status(unsigned short val) if (val & AC_DIG1_PROFESSIONAL) sbits |= IEC958_AES0_PROFESSIONAL; if (sbits & IEC958_AES0_PROFESSIONAL) { - if (val & AC_DIG1_EMPHASIS) + if (sbits & AC_DIG1_EMPHASIS) sbits |= IEC958_AES0_PRO_EMPHASIS_5015; } else { if (val & AC_DIG1_EMPHASIS) @@ -3133,29 +3132,30 @@ int snd_hda_create_dig_out_ctls(struct hda_codec *codec, int err; struct snd_kcontrol *kctl; struct snd_kcontrol_new *dig_mix; - int idx = 0; - const int spdif_index = 16; + int idx, dev = 0; + const int spdif_pcm_dev = 1; struct hda_spdif_out *spdif; - struct hda_bus *bus = codec->bus; - if (bus->primary_dig_out_type == HDA_PCM_TYPE_HDMI && + if (codec->primary_dig_out_type == HDA_PCM_TYPE_HDMI && type == HDA_PCM_TYPE_SPDIF) { - idx = spdif_index; - } else if (bus->primary_dig_out_type == HDA_PCM_TYPE_SPDIF && + dev = spdif_pcm_dev; + } else if (codec->primary_dig_out_type == HDA_PCM_TYPE_SPDIF && type == HDA_PCM_TYPE_HDMI) { - /* suppose a single SPDIF device */ - for (dig_mix = dig_mixes; dig_mix->name; dig_mix++) { - kctl = find_mixer_ctl(codec, dig_mix->name, 0, 0); - if (!kctl) - break; - kctl->id.index = spdif_index; + for (idx = 0; idx < codec->spdif_out.used; idx++) { + spdif = snd_array_elem(&codec->spdif_out, idx); + for (dig_mix = dig_mixes; dig_mix->name; dig_mix++) { + kctl = find_mixer_ctl(codec, dig_mix->name, 0, idx); + if (!kctl) + break; + kctl->id.device = spdif_pcm_dev; + } } - bus->primary_dig_out_type = HDA_PCM_TYPE_HDMI; + codec->primary_dig_out_type = HDA_PCM_TYPE_HDMI; } - if (!bus->primary_dig_out_type) - bus->primary_dig_out_type = type; + if (!codec->primary_dig_out_type) + codec->primary_dig_out_type = type; - idx = find_empty_mixer_ctl_idx(codec, "IEC958 Playback Switch", idx); + idx = find_empty_mixer_ctl_idx(codec, "IEC958 Playback Switch", dev); if (idx < 0) { printk(KERN_ERR "hda_codec: too many IEC958 outputs\n"); return -EBUSY; @@ -3165,6 +3165,7 @@ int snd_hda_create_dig_out_ctls(struct hda_codec *codec, kctl = snd_ctl_new1(dig_mix, codec); if (!kctl) return -ENOMEM; + kctl->id.device = dev; kctl->id.index = idx; kctl->private_value = codec->spdif_out.used - 1; err = snd_hda_ctl_add(codec, associated_nid, kctl); diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h index a35cf09..8665540 100644 --- a/sound/pci/hda/hda_codec.h +++ b/sound/pci/hda/hda_codec.h @@ -671,8 +671,6 @@ struct hda_bus { unsigned int response_reset:1; /* controller was reset */ unsigned int in_reset:1; /* during reset operation */ unsigned int power_keep_link_on:1; /* don't power off HDA link */ - - int primary_dig_out_type; /* primary digital out PCM type */ }; /* @@ -839,6 +837,7 @@ struct hda_codec { struct mutex hash_mutex; struct snd_array spdif_out; unsigned int spdif_in_enable; /* SPDIF input enable? */ + int primary_dig_out_type; /* primary digital out PCM type */ const hda_nid_t *slave_dig_outs; /* optional digital out slave widgets */ struct snd_array init_pins; /* initial (BIOS) pin configurations */ struct snd_array driver_pins; /* pin configs set by codec parser */ diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c index 86f6468..4c054f4 100644 --- a/sound/pci/hda/hda_eld.c +++ b/sound/pci/hda/hda_eld.c @@ -322,7 +322,7 @@ int snd_hdmi_get_eld(struct hdmi_eld *eld, struct hda_codec *codec, hda_nid_t nid) { int i; - int ret = 0; + int ret; int size; unsigned char *buf; diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 63607da..c78286f 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -3624,7 +3624,7 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH }, /* 5 Series/3400 */ { PCI_DEVICE(0x8086, 0x3b56), - .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM }, + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH }, /* Poulsbo */ { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM }, diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 09fae16..009b77a 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -1175,7 +1175,7 @@ static int patch_cxt5045(struct hda_codec *codec) } if (spec->beep_amp) - snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp)); + snd_hda_attach_beep_device(codec, spec->beep_amp); return 0; } @@ -1954,7 +1954,7 @@ static int patch_cxt5051(struct hda_codec *codec) } if (spec->beep_amp) - snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp)); + snd_hda_attach_beep_device(codec, spec->beep_amp); return 0; } @@ -3136,7 +3136,7 @@ static int patch_cxt5066(struct hda_codec *codec) } if (spec->beep_amp) - snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp)); + snd_hda_attach_beep_device(codec, spec->beep_amp); return 0; } @@ -4576,7 +4576,7 @@ static int patch_conexant_auto(struct hda_codec *codec) spec->capture_stream = &cx_auto_pcm_analog_capture; codec->patch_ops = cx_auto_patch_ops; if (spec->beep_amp) - snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp)); + snd_hda_attach_beep_device(codec, spec->beep_amp); /* Some laptops with Conexant chips show stalls in S3 resume, * which falls into the single-cmd mode. diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index c690b2a..807a2aa 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -714,10 +714,9 @@ static void hdmi_setup_fake_chmap(unsigned char *map, int ca) static void hdmi_setup_channel_mapping(struct hda_codec *codec, hda_nid_t pin_nid, bool non_pcm, int ca, - int channels, unsigned char *map, - bool chmap_set) + int channels, unsigned char *map) { - if (!non_pcm && chmap_set) { + if (!non_pcm && map) { hdmi_manual_setup_channel_mapping(codec, pin_nid, channels, map); } else { @@ -906,8 +905,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, int pin_idx, pin_nid, channels); hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca, - channels, per_pin->chmap, - per_pin->chmap_set); + channels, per_pin->chmap); hdmi_stop_infoframe_trans(codec, pin_nid); hdmi_fill_audio_infoframe(codec, pin_nid, ai.bytes, sizeof(ai)); @@ -917,8 +915,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, int pin_idx, * accordingly */ if (per_pin->non_pcm != non_pcm) hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca, - channels, per_pin->chmap, - per_pin->chmap_set); + channels, per_pin->chmap); } per_pin->non_pcm = non_pcm; @@ -1103,12 +1100,8 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo, if (!static_hdmi_pcm && eld->eld_valid) { snd_hdmi_eld_update_pcm_info(eld, hinfo); if (hinfo->channels_min > hinfo->channels_max || - !hinfo->rates || !hinfo->formats) { - per_cvt->assigned = 0; - hinfo->nid = 0; - snd_hda_spdif_ctls_unassign(codec, pin_idx); + !hinfo->rates || !hinfo->formats) return -ENODEV; - } } /* Store the updated parameters */ @@ -1172,7 +1165,6 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll) "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n", codec->addr, pin_nid, eld->monitor_present, eld_valid); - eld->eld_valid = false; if (eld_valid) { if (!snd_hdmi_get_eld(eld, codec, pin_nid)) snd_hdmi_show_eld(eld); @@ -1573,9 +1565,6 @@ static int generic_hdmi_build_jack(struct hda_codec *codec, int pin_idx) if (pcmdev > 0) sprintf(hdmi_str + strlen(hdmi_str), ",pcm=%d", pcmdev); - if (!is_jack_detectable(codec, per_pin->pin_nid)) - strncat(hdmi_str, " Phantom", - sizeof(hdmi_str) - strlen(hdmi_str) - 1); return snd_hda_jack_add_kctl(codec, per_pin->pin_nid, hdmi_str, 0); } diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 7f45d48..5faaad2 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5394,7 +5394,6 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601), SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), - SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), /* All Apple entries are in codec SSIDs */ SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF), @@ -5823,7 +5822,6 @@ enum { ALC269_TYPE_ALC280, ALC269_TYPE_ALC282, ALC269_TYPE_ALC284, - ALC269_TYPE_ALC286, }; /* @@ -5847,7 +5845,6 @@ static int alc269_parse_auto_config(struct hda_codec *codec) case ALC269_TYPE_ALC269VB: case ALC269_TYPE_ALC269VD: case ALC269_TYPE_ALC282: - case ALC269_TYPE_ALC286: ssids = alc269_ssids; break; default: @@ -6453,9 +6450,6 @@ static int patch_alc269(struct hda_codec *codec) case 0x10ec0292: spec->codec_variant = ALC269_TYPE_ALC284; break; - case 0x10ec0286: - spec->codec_variant = ALC269_TYPE_ALC286; - break; } /* automatic parse from the BIOS config */ @@ -6725,8 +6719,7 @@ static int alc662_parse_auto_config(struct hda_codec *codec) const hda_nid_t *ssids; if (codec->vendor_id == 0x10ec0272 || codec->vendor_id == 0x10ec0663 || - codec->vendor_id == 0x10ec0665 || codec->vendor_id == 0x10ec0670 || - codec->vendor_id == 0x10ec0671) + codec->vendor_id == 0x10ec0665 || codec->vendor_id == 0x10ec0670) ssids = alc663_ssids; else ssids = alc662_ssids; @@ -7162,7 +7155,6 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = { { .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 }, { .id = 0x10ec0283, .name = "ALC283", .patch = patch_alc269 }, { .id = 0x10ec0284, .name = "ALC284", .patch = patch_alc269 }, - { .id = 0x10ec0286, .name = "ALC286", .patch = patch_alc269 }, { .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 }, { .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 }, { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660", @@ -7180,7 +7172,6 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = { { .id = 0x10ec0665, .name = "ALC665", .patch = patch_alc662 }, { .id = 0x10ec0668, .name = "ALC668", .patch = patch_alc662 }, { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 }, - { .id = 0x10ec0671, .name = "ALC671", .patch = patch_alc662 }, { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 }, { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 }, { .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 }, diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c index 806407a..2ffdc35 100644 --- a/sound/pci/ice1712/ice1712.c +++ b/sound/pci/ice1712/ice1712.c @@ -2594,8 +2594,6 @@ static int snd_ice1712_create(struct snd_card *card, snd_ice1712_proc_init(ice); synchronize_irq(pci->irq); - card->private_data = ice; - err = pci_request_regions(pci, "ICE1712"); if (err < 0) { kfree(ice); diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c index 0ecd410..2450663 100644 --- a/sound/pci/rme32.c +++ b/sound/pci/rme32.c @@ -1017,7 +1017,7 @@ static int snd_rme32_capture_close(struct snd_pcm_substream *substream) spin_lock_irq(&rme32->lock); rme32->capture_substream = NULL; rme32->capture_periodsize = 0; - spin_unlock_irq(&rme32->lock); + spin_unlock(&rme32->lock); return 0; } diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c index 2899cb9..ef62c43 100644 --- a/sound/soc/codecs/arizona.c +++ b/sound/soc/codecs/arizona.c @@ -910,7 +910,7 @@ static int arizona_calc_fll(struct arizona_fll *fll, cfg->n = target / (ratio * Fref); - if (target % (ratio * Fref)) { + if (target % Fref) { gcd_fll = gcd(target, ratio * Fref); arizona_fll_dbg(fll, "GCD=%u\n", gcd_fll); @@ -922,15 +922,6 @@ static int arizona_calc_fll(struct arizona_fll *fll, cfg->lambda = 0; } - /* Round down to 16bit range with cost of accuracy lost. - * Denominator must be bigger than numerator so we only - * take care of it. - */ - while (cfg->lambda >= (1 << 16)) { - cfg->theta >>= 1; - cfg->lambda >>= 1; - } - arizona_fll_dbg(fll, "N=%x THETA=%x LAMBDA=%x\n", cfg->n, cfg->theta, cfg->lambda); arizona_fll_dbg(fll, "FRATIO=%x(%d) OUTDIV=%x REFCLK_DIV=%x\n", diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c index 5d36319..a4c16fd 100644 --- a/sound/soc/codecs/max98088.c +++ b/sound/soc/codecs/max98088.c @@ -2006,7 +2006,7 @@ static int max98088_probe(struct snd_soc_codec *codec) ret); goto err_access; } - dev_info(codec->dev, "revision %c\n", ret - 0x40 + 'A'); + dev_info(codec->dev, "revision %c\n", ret + 'A'); snd_soc_write(codec, M98088_REG_51_PWR_SYS, M98088_PWRSV); diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c index d5371e0..d8c65f5 100644 --- a/sound/soc/codecs/wm2200.c +++ b/sound/soc/codecs/wm2200.c @@ -1126,9 +1126,9 @@ SOC_DOUBLE_R_TLV("IN3 Volume", WM2200_IN3L_CONTROL, WM2200_IN3R_CONTROL, SOC_DOUBLE_R("IN1 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_1L, WM2200_ADC_DIGITAL_VOLUME_1R, WM2200_IN1L_MUTE_SHIFT, 1, 1), -SOC_DOUBLE_R("IN2 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_2L, +SOC_DOUBLE_R("IN2 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_1L, WM2200_ADC_DIGITAL_VOLUME_2R, WM2200_IN2L_MUTE_SHIFT, 1, 1), -SOC_DOUBLE_R("IN3 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_3L, +SOC_DOUBLE_R("IN3 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_1L, WM2200_ADC_DIGITAL_VOLUME_3R, WM2200_IN3L_MUTE_SHIFT, 1, 1), SOC_DOUBLE_R_TLV("IN1 Digital Volume", WM2200_ADC_DIGITAL_VOLUME_1L, diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c index b54c2e8..1440b3f 100644 --- a/sound/soc/codecs/wm5102.c +++ b/sound/soc/codecs/wm5102.c @@ -576,7 +576,7 @@ static int wm5102_sysclk_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = w->codec; - struct arizona *arizona = dev_get_drvdata(codec->dev->parent); + struct arizona *arizona = dev_get_drvdata(codec->dev); struct regmap *regmap = codec->control_data; const struct reg_default *patch = NULL; int i, patch_size; diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c index f8a31ad..134e41c 100644 --- a/sound/soc/codecs/wm8903.c +++ b/sound/soc/codecs/wm8903.c @@ -1083,8 +1083,6 @@ static const struct snd_soc_dapm_route wm8903_intercon[] = { { "ROP", NULL, "Right Speaker PGA" }, { "RON", NULL, "Right Speaker PGA" }, - { "Charge Pump", NULL, "CLK_DSP" }, - { "Left Headphone Output PGA", NULL, "Charge Pump" }, { "Right Headphone Output PGA", NULL, "Charge Pump" }, { "Left Line Output PGA", NULL, "Charge Pump" }, diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c index 63e453f..3b48042 100644 --- a/sound/soc/fsl/imx-ssi.c +++ b/sound/soc/fsl/imx-ssi.c @@ -496,8 +496,6 @@ static void imx_ssi_ac97_reset(struct snd_ac97 *ac97) if (imx_ssi->ac97_reset) imx_ssi->ac97_reset(ac97); - /* First read sometimes fails, do a dummy read */ - imx_ssi_ac97_read(ac97, 0); } static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97) @@ -506,9 +504,6 @@ static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97) if (imx_ssi->ac97_warm_reset) imx_ssi->ac97_warm_reset(ac97); - - /* First read sometimes fails, do a dummy read */ - imx_ssi_ac97_read(ac97, 0); } struct snd_ac97_bus_ops soc_ac97_ops = { diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c index 1a8b03e..19eff8f 100644 --- a/sound/soc/sh/dma-sh7760.c +++ b/sound/soc/sh/dma-sh7760.c @@ -342,8 +342,8 @@ static int camelot_pcm_new(struct snd_soc_pcm_runtime *rtd) return 0; } -static struct snd_soc_platform_driver sh7760_soc_platform = { - .ops = &camelot_pcm_ops, +static struct snd_soc_platform sh7760_soc_platform = { + .pcm_ops = &camelot_pcm_ops, .pcm_new = camelot_pcm_new, .pcm_free = camelot_pcm_free, }; diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index f3ab918..2370063 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -2959,7 +2959,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, val = val << shift; ret = snd_soc_update_bits_locked(codec, reg, val_mask, val); - if (ret < 0) + if (ret != 0) return ret; if (snd_soc_volsw_is_stereo(mc)) { diff --git a/sound/soc/spear/spear_pcm.c b/sound/soc/spear/spear_pcm.c index 5e7aebe..9b76cc5 100644 --- a/sound/soc/spear/spear_pcm.c +++ b/sound/soc/spear/spear_pcm.c @@ -149,9 +149,9 @@ static void spear_pcm_free(struct snd_pcm *pcm) static u64 spear_pcm_dmamask = DMA_BIT_MASK(32); -static int spear_pcm_new(struct snd_soc_pcm_runtime *rtd) +static int spear_pcm_new(struct snd_card *card, + struct snd_soc_dai *dai, struct snd_pcm *pcm) { - struct snd_card *card = rtd->card->snd_card; int ret; if (!card->dev->dma_mask) @@ -159,16 +159,16 @@ static int spear_pcm_new(struct snd_soc_pcm_runtime *rtd) if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = DMA_BIT_MASK(32); - if (rtd->cpu_dai->driver->playback.channels_min) { - ret = spear_pcm_preallocate_dma_buffer(rtd->pcm, + if (dai->driver->playback.channels_min) { + ret = spear_pcm_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_PLAYBACK, spear_pcm_hardware.buffer_bytes_max); if (ret) return ret; } - if (rtd->cpu_dai->driver->capture.channels_min) { - ret = spear_pcm_preallocate_dma_buffer(rtd->pcm, + if (dai->driver->capture.channels_min) { + ret = spear_pcm_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_CAPTURE, spear_pcm_hardware.buffer_bytes_max); if (ret) diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c index 40dd50a..e2ca12f 100644 --- a/sound/usb/6fire/pcm.c +++ b/sound/usb/6fire/pcm.c @@ -575,6 +575,7 @@ static void usb6fire_pcm_init_urb(struct pcm_urb *urb, urb->instance.pipe = in ? usb_rcvisocpipe(chip->dev, ep) : usb_sndisocpipe(chip->dev, ep); urb->instance.interval = 1; + urb->instance.transfer_flags = URB_ISO_ASAP; urb->instance.complete = handler; urb->instance.context = urb; urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB; diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c index b45e29b..fde9a7a 100644 --- a/sound/usb/caiaq/audio.c +++ b/sound/usb/caiaq/audio.c @@ -670,6 +670,7 @@ static void read_completed(struct urb *urb) if (send_it) { out->number_of_packets = outframe; + out->transfer_flags = URB_ISO_ASAP; usb_submit_urb(out, GFP_ATOMIC); } else { struct snd_usb_caiaq_cb_info *oinfo = out->context; @@ -685,6 +686,7 @@ requeue: } urb->number_of_packets = FRAMES_PER_URB; + urb->transfer_flags = URB_ISO_ASAP; usb_submit_urb(urb, GFP_ATOMIC); } @@ -749,6 +751,7 @@ static struct urb **alloc_urbs(struct snd_usb_caiaqdev *dev, int dir, int *ret) * BYTES_PER_FRAME; urbs[i]->context = &dev->data_cb_info[i]; urbs[i]->interval = 1; + urbs[i]->transfer_flags = URB_ISO_ASAP; urbs[i]->number_of_packets = FRAMES_PER_URB; urbs[i]->complete = (dir == SNDRV_PCM_STREAM_CAPTURE) ? read_completed : write_completed; diff --git a/sound/usb/card.c b/sound/usb/card.c index a9d5779..ccf95cf 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -612,9 +612,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip) int err = -ENODEV; down_read(&chip->shutdown_rwsem); - if (chip->probing) - err = 0; - else if (!chip->shutdown) + if (!chip->shutdown && !chip->probing) err = usb_autopm_get_interface(chip->pm_intf); up_read(&chip->shutdown_rwsem); diff --git a/sound/usb/card.h b/sound/usb/card.h index d32ea41..8a751b4 100644 --- a/sound/usb/card.h +++ b/sound/usb/card.h @@ -116,7 +116,6 @@ struct snd_usb_substream { unsigned int altset_idx; /* USB data format: index of alternate setting */ unsigned int txfr_quirk:1; /* allow sub-frame alignment */ unsigned int fmt_type; /* USB audio format type (1-3) */ - unsigned int pkt_offset_adj; /* Bytes to drop from beginning of packets (for non-compliant devices) */ unsigned int running: 1; /* running status */ diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index 63cca3a..21049b8 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c @@ -677,7 +677,7 @@ static int data_ep_set_params(struct snd_usb_endpoint *ep, if (!u->urb->transfer_buffer) goto out_of_memory; u->urb->pipe = ep->pipe; - u->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP; + u->urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; u->urb->interval = 1 << ep->datainterval; u->urb->context = u; u->urb->complete = snd_complete_urb; @@ -716,7 +716,8 @@ static int sync_ep_set_params(struct snd_usb_endpoint *ep, u->urb->transfer_dma = ep->sync_dma + i * 4; u->urb->transfer_buffer_length = 4; u->urb->pipe = ep->pipe; - u->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP; + u->urb->transfer_flags = URB_ISO_ASAP | + URB_NO_TRANSFER_DMA_MAP; u->urb->number_of_packets = 1; u->urb->interval = 1 << ep->syncinterval; u->urb->context = u; diff --git a/sound/usb/midi.c b/sound/usb/midi.c index e5fee18..34b9bb7 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -126,6 +126,7 @@ struct snd_usb_midi { struct snd_usb_midi_in_endpoint *in; } endpoints[MIDI_MAX_ENDPOINTS]; unsigned long input_triggered; + bool autopm_reference; unsigned int opened[2]; unsigned char disconnected; unsigned char input_running; @@ -1039,6 +1040,7 @@ static int substream_open(struct snd_rawmidi_substream *substream, int dir, { struct snd_usb_midi* umidi = substream->rmidi->private_data; struct snd_kcontrol *ctl; + int err; down_read(&umidi->disc_rwsem); if (umidi->disconnected) { @@ -1049,6 +1051,13 @@ static int substream_open(struct snd_rawmidi_substream *substream, int dir, mutex_lock(&umidi->mutex); if (open) { if (!umidi->opened[0] && !umidi->opened[1]) { + err = usb_autopm_get_interface(umidi->iface); + umidi->autopm_reference = err >= 0; + if (err < 0 && err != -EACCES) { + mutex_unlock(&umidi->mutex); + up_read(&umidi->disc_rwsem); + return -EIO; + } if (umidi->roland_load_ctl) { ctl = umidi->roland_load_ctl; ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; @@ -1071,6 +1080,8 @@ static int substream_open(struct snd_rawmidi_substream *substream, int dir, snd_ctl_notify(umidi->card, SNDRV_CTL_EVENT_MASK_INFO, &ctl->id); } + if (umidi->autopm_reference) + usb_autopm_put_interface(umidi->iface); } } mutex_unlock(&umidi->mutex); @@ -2245,8 +2256,6 @@ int snd_usbmidi_create(struct snd_card *card, return err; } - usb_autopm_get_interface_no_resume(umidi->iface); - list_add_tail(&umidi->list, midi_list); return 0; } diff --git a/sound/usb/misc/ua101.c b/sound/usb/misc/ua101.c index 6ad617b..8b81cb5 100644 --- a/sound/usb/misc/ua101.c +++ b/sound/usb/misc/ua101.c @@ -1120,7 +1120,8 @@ static int alloc_stream_urbs(struct ua101 *ua, struct ua101_stream *stream, usb_init_urb(&urb->urb); urb->urb.dev = ua->dev; urb->urb.pipe = stream->usb_pipe; - urb->urb.transfer_flags = URB_NO_TRANSFER_DMA_MAP; + urb->urb.transfer_flags = URB_ISO_ASAP | + URB_NO_TRANSFER_DMA_MAP; urb->urb.transfer_buffer = addr; urb->urb.transfer_dma = dma; urb->urb.transfer_buffer_length = max_packet_size; diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 64a564d..e90daf8 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -715,9 +715,8 @@ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_ case UAC2_CLOCK_SELECTOR: { struct uac_selector_unit_descriptor *d = p1; /* call recursively to retrieve the channel info */ - err = check_input_term(state, d->baSourceID[0], term); - if (err < 0) - return err; + if (check_input_term(state, d->baSourceID[0], term) < 0) + return -ENODEV; term->type = d->bDescriptorSubtype << 16; /* virtual type */ term->id = id; term->name = uac_selector_unit_iSelector(d); @@ -726,8 +725,7 @@ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_ case UAC1_PROCESSING_UNIT: case UAC1_EXTENSION_UNIT: /* UAC2_PROCESSING_UNIT_V2 */ - /* UAC2_EFFECT_UNIT */ - case UAC2_EXTENSION_UNIT_V2: { + /* UAC2_EFFECT_UNIT */ { struct uac_processing_unit_descriptor *d = p1; if (state->mixer->protocol == UAC_VERSION_2 && @@ -1357,9 +1355,8 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void return err; /* determine the input source type and name */ - err = check_input_term(state, hdr->bSourceID, &iterm); - if (err < 0) - return err; + if (check_input_term(state, hdr->bSourceID, &iterm) < 0) + return -EINVAL; master_bits = snd_usb_combine_bytes(bmaControls, csize); /* master configuration quirks */ @@ -2054,8 +2051,6 @@ static int parse_audio_unit(struct mixer_build *state, int unitid) return parse_audio_extension_unit(state, unitid, p1); else /* UAC_VERSION_2 */ return parse_audio_processing_unit(state, unitid, p1); - case UAC2_EXTENSION_UNIT_V2: - return parse_audio_extension_unit(state, unitid, p1); default: snd_printk(KERN_ERR "usbaudio: unit %u: unexpected type 0x%02x\n", unitid, p1[2]); return -EINVAL; @@ -2122,7 +2117,7 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) state.oterm.type = le16_to_cpu(desc->wTerminalType); state.oterm.name = desc->iTerminal; err = parse_audio_unit(&state, desc->bSourceID); - if (err < 0 && err != -EINVAL) + if (err < 0) return err; } else { /* UAC_VERSION_2 */ struct uac2_output_terminal_descriptor *desc = p; @@ -2134,12 +2129,12 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) state.oterm.type = le16_to_cpu(desc->wTerminalType); state.oterm.name = desc->iTerminal; err = parse_audio_unit(&state, desc->bSourceID); - if (err < 0 && err != -EINVAL) + if (err < 0) return err; /* for UAC2, use the same approach to also add the clock selectors */ err = parse_audio_unit(&state, desc->bCSourceID); - if (err < 0 && err != -EINVAL) + if (err < 0) return err; } } diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 190f434..15520de 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -509,7 +509,7 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol, else ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, - 0, wIndex, + 0, cpu_to_le16(wIndex), &tmp, sizeof(tmp), 1000); up_read(&mixer->chip->shutdown_rwsem); @@ -540,7 +540,7 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol, else ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, - wValue, wIndex, + cpu_to_le16(wValue), cpu_to_le16(wIndex), NULL, 0, 1000); up_read(&mixer->chip->shutdown_rwsem); diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index bcc50ed..d82e378 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -1161,7 +1161,7 @@ static void retire_capture_urb(struct snd_usb_substream *subs, stride = runtime->frame_bits >> 3; for (i = 0; i < urb->number_of_packets; i++) { - cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset + subs->pkt_offset_adj; + cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset; if (urb->iso_frame_desc[i].status && printk_ratelimit()) { snd_printdd(KERN_ERR "frame %d active: %d\n", i, urb->iso_frame_desc[i].status); // continue; diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 820580a..64d25a7 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -1750,7 +1750,7 @@ YAMAHA_DEVICE(0x7010, "UB99"), .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { /* .vendor_name = "Roland", */ /* .product_name = "A-PRO", */ - .ifnum = 0, + .ifnum = 1, .type = QUIRK_MIDI_FIXED_ENDPOINT, .data = & (const struct snd_usb_midi_endpoint_info) { .out_cables = 0x0003, diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index f581c3e..2c97185 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -486,7 +486,7 @@ static int snd_usb_nativeinstruments_boot_quirk(struct usb_device *dev) { int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE, - 1, 0, NULL, 0, 1000); + cpu_to_le16(1), 0, NULL, 0, 1000); if (ret < 0) return ret; @@ -533,7 +533,7 @@ static int snd_usb_mbox2_boot_quirk(struct usb_device *dev) { struct usb_host_config *config = dev->actconfig; int err; - u8 bootresponse[0x12]; + u8 bootresponse[12]; int fwsize; int count; @@ -837,7 +837,6 @@ static void set_format_emu_quirk(struct snd_usb_substream *subs, break; } snd_emuusb_set_samplerate(subs->stream->chip, emu_samplerate_id); - subs->pkt_offset_adj = (emu_samplerate_id >= EMU_QUIRK_SR_176400HZ) ? 4 : 0; } void snd_usb_set_format_quirk(struct snd_usb_substream *subs, diff --git a/sound/usb/stream.c b/sound/usb/stream.c index cfc4d4e..ad181d5 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c @@ -94,7 +94,6 @@ static void snd_usb_init_substream(struct snd_usb_stream *as, subs->dev = as->chip->dev; subs->txfr_quirk = as->chip->txfr_quirk; subs->speed = snd_usb_get_speed(subs->dev); - subs->pkt_offset_adj = 0; snd_usb_set_pcm_ops(as->pcm, stream); @@ -397,14 +396,6 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip, if (!csep && altsd->bNumEndpoints >= 2) csep = snd_usb_find_desc(alts->endpoint[1].extra, alts->endpoint[1].extralen, NULL, USB_DT_CS_ENDPOINT); - /* - * If we can't locate the USB_DT_CS_ENDPOINT descriptor in the extra - * bytes after the first endpoint, go search the entire interface. - * Some devices have it directly *before* the standard endpoint. - */ - if (!csep) - csep = snd_usb_find_desc(alts->extra, alts->extralen, NULL, USB_DT_CS_ENDPOINT); - if (!csep || csep->bLength < 7 || csep->bDescriptorSubtype != UAC_EP_GENERAL) { snd_printk(KERN_WARNING "%d:%u:%d : no or invalid" diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c index bf618e1..1e7a47a 100644 --- a/sound/usb/usx2y/usb_stream.c +++ b/sound/usb/usx2y/usb_stream.c @@ -69,6 +69,7 @@ static void init_pipe_urbs(struct usb_stream_kernel *sk, unsigned use_packsize, ++u, transfer += transfer_length) { struct urb *urb = urbs[u]; struct usb_iso_packet_descriptor *desc; + urb->transfer_flags = URB_ISO_ASAP; urb->transfer_buffer = transfer; urb->dev = dev; urb->pipe = pipe; diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c index b376532..520ef96 100644 --- a/sound/usb/usx2y/usbusx2yaudio.c +++ b/sound/usb/usx2y/usbusx2yaudio.c @@ -503,6 +503,7 @@ static int usX2Y_urbs_start(struct snd_usX2Y_substream *subs) if (0 == i) atomic_set(&subs->state, state_STARTING3); urb->dev = usX2Y->dev; + urb->transfer_flags = URB_ISO_ASAP; for (pack = 0; pack < nr_of_packs(); pack++) { urb->iso_frame_desc[pack].offset = subs->maxpacksize * pack; urb->iso_frame_desc[pack].length = subs->maxpacksize; diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c index f2a1acd..cc56007 100644 --- a/sound/usb/usx2y/usx2yhwdeppcm.c +++ b/sound/usb/usx2y/usx2yhwdeppcm.c @@ -443,6 +443,7 @@ static int usX2Y_usbpcm_urbs_start(struct snd_usX2Y_substream *subs) if (0 == u) atomic_set(&subs->state, state_STARTING3); urb->dev = usX2Y->dev; + urb->transfer_flags = URB_ISO_ASAP; for (pack = 0; pack < nr_of_packs(); pack++) { urb->iso_frame_desc[pack].offset = subs->maxpacksize * (pack + u * nr_of_packs()); urb->iso_frame_desc[pack].length = subs->maxpacksize; diff --git a/tools/perf/Makefile b/tools/perf/Makefile index fb1b1c4..8ab05e5 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -276,13 +276,13 @@ $(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-event $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c: util/parse-events.y - $(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c -p parse_events_ + $(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c $(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c: util/pmu.y - $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c -p perf_pmu_ + $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c $(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c $(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 6416580..aa84130 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -463,15 +463,11 @@ int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he, if (!perf_hpp__format[i].cond) continue; - /* - * If there's no field_sep, we still need - * to display initial ' '. - */ if (!sep || !first) { ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " "); advance_hpp(hpp, ret); - } else first = false; + } if (color && perf_hpp__format[i].color) ret = perf_hpp__format[i].color(hpp, he); diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index 7bf890e..0f9914a 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y @@ -1,4 +1,5 @@ %pure-parser +%name-prefix "parse_events_" %parse-param {void *_data} %parse-param {void *scanner} %lex-param {void* scanner} diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y index bfd7e85..ec89804 100644 --- a/tools/perf/util/pmu.y +++ b/tools/perf/util/pmu.y @@ -1,4 +1,5 @@ +%name-prefix "perf_pmu_" %parse-param {struct list_head *format} %parse-param {char *name} diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c index fe1e66b..8674b9e 100644 --- a/tools/usb/ffs-test.c +++ b/tools/usb/ffs-test.c @@ -38,7 +38,7 @@ #include #include -#include "../../include/uapi/linux/usb/functionfs.h" +#include "../../include/linux/usb/functionfs.h" /******************** Little Endian Handling ********************************/ diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 52058f0..cfb7e4d 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c @@ -73,12 +73,9 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; u64 redir_content; - if (redir_index < IOAPIC_NUM_PINS) - redir_content = - ioapic->redirtbl[redir_index].bits; - else - redir_content = ~0ULL; + ASSERT(redir_index < IOAPIC_NUM_PINS); + redir_content = ioapic->redirtbl[redir_index].bits; result = (ioapic->ioregsel & 0x1) ? (redir_content >> 32) & 0xffffffff : redir_content & 0xffffffff; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 10afa34..1cd693a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1476,38 +1476,21 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, } int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, - gpa_t gpa, unsigned long len) + gpa_t gpa) { struct kvm_memslots *slots = kvm_memslots(kvm); int offset = offset_in_page(gpa); - gfn_t start_gfn = gpa >> PAGE_SHIFT; - gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; - gfn_t nr_pages_needed = end_gfn - start_gfn + 1; - gfn_t nr_pages_avail; + gfn_t gfn = gpa >> PAGE_SHIFT; ghc->gpa = gpa; ghc->generation = slots->generation; - ghc->len = len; - ghc->memslot = gfn_to_memslot(kvm, start_gfn); - ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail); - if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) { + ghc->memslot = gfn_to_memslot(kvm, gfn); + ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL); + if (!kvm_is_error_hva(ghc->hva)) ghc->hva += offset; - } else { - /* - * If the requested region crosses two memslots, we still - * verify that the entire region is valid here. - */ - while (start_gfn <= end_gfn) { - ghc->memslot = gfn_to_memslot(kvm, start_gfn); - ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, - &nr_pages_avail); - if (kvm_is_error_hva(ghc->hva)) - return -EFAULT; - start_gfn += nr_pages_avail; - } - /* Use the slow path for cross page reads and writes. */ - ghc->memslot = NULL; - } + else + return -EFAULT; + return 0; } EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); @@ -1518,13 +1501,8 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, struct kvm_memslots *slots = kvm_memslots(kvm); int r; - BUG_ON(len > ghc->len); - if (slots->generation != ghc->generation) - kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); - - if (unlikely(!ghc->memslot)) - return kvm_write_guest(kvm, ghc->gpa, data, len); + kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); if (kvm_is_error_hva(ghc->hva)) return -EFAULT; @@ -1544,13 +1522,8 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, struct kvm_memslots *slots = kvm_memslots(kvm); int r; - BUG_ON(len > ghc->len); - if (slots->generation != ghc->generation) - kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); - - if (unlikely(!ghc->memslot)) - return kvm_read_guest(kvm, ghc->gpa, data, len); + kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); if (kvm_is_error_hva(ghc->hva)) return -EFAULT; -- cgit v0.10.2