From c65d2359a85da9a09768c75ea6b9ed6254a7df71 Mon Sep 17 00:00:00 2001 From: Karsten Merker Date: Sat, 24 Oct 2015 23:07:31 +0200 Subject: ARM: dts: sunxi: sun6i-a31s-primo81.dts: add touchscreen axis swapping property The MSI Primo81 has a display in portrait mode but a touchscreen in landscape mode. To have both of them use the same coordinate system, the touchscreen-swapped-x-y property has to be set for the touchscreen. Signed-off-by: Karsten Merker Signed-off-by: Maxime Ripard diff --git a/arch/arm/boot/dts/sun6i-a31s-primo81.dts b/arch/arm/boot/dts/sun6i-a31s-primo81.dts index 2d4250b..68b479b 100644 --- a/arch/arm/boot/dts/sun6i-a31s-primo81.dts +++ b/arch/arm/boot/dts/sun6i-a31s-primo81.dts @@ -83,6 +83,7 @@ reg = <0x5d>; interrupt-parent = <&pio>; interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>; /* PA3 */ + touchscreen-swapped-x-y; }; }; -- cgit v0.10.2 From 43675ffafd3c3373f82691f539df3dc7403877fe Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 4 Nov 2015 01:02:44 +0300 Subject: bus: sunxi-rsb: unlock on error in sunxi_rsb_read() Don't forget to unlock before returning an error code. Fixes: d787dcdb9c8f ('bus: sunxi-rsb: Add driver for Allwinner Reduced Serial Bus') Signed-off-by: Dan Carpenter Acked-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index 846bc29..0cfcb39 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -342,13 +342,13 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr, ret = _sunxi_rsb_run_xfer(rsb); if (ret) - goto out; + goto unlock; *buf = readl(rsb->regs + RSB_DATA); +unlock: mutex_unlock(&rsb->lock); -out: return ret; } -- cgit v0.10.2 From 2bc53b8046ce9a1543204b6c6da1ab95e4caac76 Mon Sep 17 00:00:00 2001 From: Ingo Tuchscherer Date: Fri, 27 Nov 2015 08:33:27 +0100 Subject: s390/zcrypt: Fix AP queue handling if queue is full When the AP queue depth of requests was reached additional requests have been ignored. These request are stuck in the request queue. The AP queue handling now push the next waiting request into the queue after fetching a previous serviced and finished reply. Signed-off-by: Ingo Tuchscherer Acked-by: Martin Schwidefsky Acked-by: Harald Freudenberger Signed-off-by: Martin Schwidefsky diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 61f7685..24ec282 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -599,8 +599,10 @@ static enum ap_wait ap_sm_read(struct ap_device *ap_dev) status = ap_sm_recv(ap_dev); switch (status.response_code) { case AP_RESPONSE_NORMAL: - if (ap_dev->queue_count > 0) + if (ap_dev->queue_count > 0) { + ap_dev->state = AP_STATE_WORKING; return AP_WAIT_AGAIN; + } ap_dev->state = AP_STATE_IDLE; return AP_WAIT_NONE; case AP_RESPONSE_NO_PENDING_REPLY: -- cgit v0.10.2 From 84ebac4d04d25ac5c1b1dc3ae621fd465eb38f4e Mon Sep 17 00:00:00 2001 From: John Keeping Date: Wed, 9 Dec 2015 11:38:13 +0000 Subject: ASoC: es8328: Fix deemphasis values This is using completely the wrong mask and value when updating the register. Since the correct values are already defined in the header, switch to using a table with explicit constants rather than shifting the array index. Signed-off-by: John Keeping Signed-off-by: Mark Brown Cc: stable@vger.kernel.org diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c index 969e337..c4c64e21 100644 --- a/sound/soc/codecs/es8328.c +++ b/sound/soc/codecs/es8328.c @@ -85,7 +85,15 @@ static const DECLARE_TLV_DB_SCALE(pga_tlv, 0, 300, 0); static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0); static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 300, 0); -static const int deemph_settings[] = { 0, 32000, 44100, 48000 }; +static const struct { + int rate; + unsigned int val; +} deemph_settings[] = { + { 0, ES8328_DACCONTROL6_DEEMPH_OFF }, + { 32000, ES8328_DACCONTROL6_DEEMPH_32k }, + { 44100, ES8328_DACCONTROL6_DEEMPH_44_1k }, + { 48000, ES8328_DACCONTROL6_DEEMPH_48k }, +}; static int es8328_set_deemph(struct snd_soc_codec *codec) { @@ -97,21 +105,22 @@ static int es8328_set_deemph(struct snd_soc_codec *codec) * rate. */ if (es8328->deemph) { - best = 1; - for (i = 2; i < ARRAY_SIZE(deemph_settings); i++) { - if (abs(deemph_settings[i] - es8328->playback_fs) < - abs(deemph_settings[best] - es8328->playback_fs)) + best = 0; + for (i = 1; i < ARRAY_SIZE(deemph_settings); i++) { + if (abs(deemph_settings[i].rate - es8328->playback_fs) < + abs(deemph_settings[best].rate - es8328->playback_fs)) best = i; } - val = best << 1; + val = deemph_settings[best].val; } else { - val = 0; + val = ES8328_DACCONTROL6_DEEMPH_OFF; } dev_dbg(codec->dev, "Set deemphasis %d\n", val); - return snd_soc_update_bits(codec, ES8328_DACCONTROL6, 0x6, val); + return snd_soc_update_bits(codec, ES8328_DACCONTROL6, + ES8328_DACCONTROL6_DEEMPH_MASK, val); } static int es8328_get_deemph(struct snd_kcontrol *kcontrol, diff --git a/sound/soc/codecs/es8328.h b/sound/soc/codecs/es8328.h index cb36afe..156c748 100644 --- a/sound/soc/codecs/es8328.h +++ b/sound/soc/codecs/es8328.h @@ -153,6 +153,7 @@ int es8328_probe(struct device *dev, struct regmap *regmap); #define ES8328_DACCONTROL6_CLICKFREE (1 << 3) #define ES8328_DACCONTROL6_DAC_INVR (1 << 4) #define ES8328_DACCONTROL6_DAC_INVL (1 << 5) +#define ES8328_DACCONTROL6_DEEMPH_MASK (3 << 6) #define ES8328_DACCONTROL6_DEEMPH_OFF (0 << 6) #define ES8328_DACCONTROL6_DEEMPH_32k (1 << 6) #define ES8328_DACCONTROL6_DEEMPH_44_1k (2 << 6) -- cgit v0.10.2 From c20875a3e638e4a03e099b343ec798edd1af5cc6 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 12 Nov 2015 16:43:02 +1100 Subject: KVM: PPC: Book3S HV: Prohibit setting illegal transaction state in MSR Currently it is possible for userspace (e.g. QEMU) to set a value for the MSR for a guest VCPU which has both of the TS bits set, which is an illegal combination. The result of this is that when we execute a hrfid (hypervisor return from interrupt doubleword) instruction to enter the guest, the CPU will take a TM Bad Thing type of program interrupt (vector 0x700). Now, if PR KVM is configured in the kernel along with HV KVM, we actually handle this without crashing the host or giving hypervisor privilege to the guest; instead what happens is that we deliver a program interrupt to the guest, with SRR0 reflecting the address of the hrfid instruction and SRR1 containing the MSR value at that point. If PR KVM is not configured in the kernel, then we try to run the host's program interrupt handler with the MMU set to the guest context, which almost certainly causes a host crash. This closes the hole by making kvmppc_set_msr_hv() check for the illegal combination and force the TS field to a safe value (00, meaning non-transactional). Cc: stable@vger.kernel.org # v3.9+ Signed-off-by: Paul Mackerras diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 54b45b7..a7352b5 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -224,6 +224,12 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) { + /* + * Check for illegal transactional state bit combination + * and if we find it, force the TS field to a safe state. + */ + if ((msr & MSR_TS_MASK) == MSR_TS_MASK) + msr &= ~MSR_TS_MASK; vcpu->arch.shregs.msr = msr; kvmppc_end_cede(vcpu); } -- cgit v0.10.2 From e2a0c9fa80227be5ee017b5476638829dd41cb39 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 11 Dec 2015 13:06:24 +0200 Subject: ASoC: davinci-mcasp: Fix XDATA check in mcasp_start_tx The condition for checking for XDAT being cleared was not correct. Fixes: 36bcecd0a73eb ("ASoC: davinci-mcasp: Correct TX start sequence") Signed-off-by: Peter Ujfalusi Signed-off-by: Mark Brown Cc: stable@vger.kernel.org diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c index 4495a40..41235d3 100644 --- a/sound/soc/davinci/davinci-mcasp.c +++ b/sound/soc/davinci/davinci-mcasp.c @@ -223,8 +223,8 @@ static void mcasp_start_tx(struct davinci_mcasp *mcasp) /* wait for XDATA to be cleared */ cnt = 0; - while (!(mcasp_get_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG) & - ~XRDATA) && (cnt < 100000)) + while ((mcasp_get_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG) & XRDATA) && + (cnt < 100000)) cnt++; /* Release TX state machine */ -- cgit v0.10.2 From 8b89fe1f6c430589122542f228a802d34995bebd Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 10 Dec 2015 18:37:32 +0100 Subject: kvm: x86: move tracepoints outside extended quiescent state Invoking tracepoints within kvm_guest_enter/kvm_guest_exit causes a lockdep splat. Reported-by: Borislav Petkov Signed-off-by: Paolo Bonzini diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 83a1c64..899c40f 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3422,6 +3422,8 @@ static int handle_exit(struct kvm_vcpu *vcpu) struct kvm_run *kvm_run = vcpu->run; u32 exit_code = svm->vmcb->control.exit_code; + trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM); + if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE)) vcpu->arch.cr0 = svm->vmcb->save.cr0; if (npt_enabled) @@ -3892,8 +3894,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; - trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM); - if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) kvm_before_handle_nmi(&svm->vcpu); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index af823a3..6b56056 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -8042,6 +8042,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) u32 exit_reason = vmx->exit_reason; u32 vectoring_info = vmx->idt_vectoring_info; + trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX); + /* * Flush logged GPAs PML buffer, this will make dirty_bitmap more * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before @@ -8668,7 +8670,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) vmx->loaded_vmcs->launched = 1; vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); - trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX); /* * the KVM_REQ_EVENT optimization bit is only on for one entry, and if diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index eed3228..b84ba4b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6515,6 +6515,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (req_immediate_exit) smp_send_reschedule(vcpu->cpu); + trace_kvm_entry(vcpu->vcpu_id); + wait_lapic_expire(vcpu); __kvm_guest_enter(); if (unlikely(vcpu->arch.switch_db_regs)) { @@ -6527,8 +6529,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; } - trace_kvm_entry(vcpu->vcpu_id); - wait_lapic_expire(vcpu); kvm_x86_ops->run(vcpu); /* -- cgit v0.10.2 From fdec12c12ed4333afb49c9948c29fbd5fb52da97 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Thu, 10 Dec 2015 22:46:50 +0100 Subject: KVM: arm/arm64: vgic: Fix kvm_vgic_map_is_active's dist check External inputs to the vgic from time to time need to poke into the state of a virtual interrupt, the prime example is the architected timer code. Since the IRQ's active state can be represented in two places; the LR or the distributor, we first loop over the LRs but if not active in the LRs we just return if *any* IRQ is active on the VCPU in question. This is of course bogus, as we should check if the specific IRQ in quesiton is active on the distributor instead. Reported-by: Eric Auger Acked-by: Marc Zyngier Signed-off-by: Christoffer Dall Signed-off-by: Marc Zyngier diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 65461f8..7a2f449 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -1114,7 +1114,7 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map) return true; } - return dist_active_irq(vcpu); + return vgic_irq_is_active(vcpu, map->virt_irq); } /* -- cgit v0.10.2 From 352d52e2442f42539c76d8a13d795ccab7079b26 Mon Sep 17 00:00:00 2001 From: John Keeping Date: Fri, 20 Nov 2015 11:42:22 +0000 Subject: ASoC: es8328: Fix shifts for mixer switches These are all off by one; the playback and bypass switches are the top two bits of the registers, which are at shifts 7 and 6 not 8 and 7. Signed-off-by: John Keeping Signed-off-by: Mark Brown diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c index c4c64e21..afa6c5d 100644 --- a/sound/soc/codecs/es8328.c +++ b/sound/soc/codecs/es8328.c @@ -214,18 +214,18 @@ static const struct snd_kcontrol_new es8328_right_line_controls = /* Left Mixer */ static const struct snd_kcontrol_new es8328_left_mixer_controls[] = { - SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL17, 8, 1, 0), - SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL17, 7, 1, 0), - SOC_DAPM_SINGLE("Right Playback Switch", ES8328_DACCONTROL18, 8, 1, 0), - SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL18, 7, 1, 0), + SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL17, 7, 1, 0), + SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL17, 6, 1, 0), + SOC_DAPM_SINGLE("Right Playback Switch", ES8328_DACCONTROL18, 7, 1, 0), + SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL18, 6, 1, 0), }; /* Right Mixer */ static const struct snd_kcontrol_new es8328_right_mixer_controls[] = { - SOC_DAPM_SINGLE("Left Playback Switch", ES8328_DACCONTROL19, 8, 1, 0), - SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL19, 7, 1, 0), - SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL20, 8, 1, 0), - SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL20, 7, 1, 0), + SOC_DAPM_SINGLE("Left Playback Switch", ES8328_DACCONTROL19, 7, 1, 0), + SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL19, 6, 1, 0), + SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL20, 7, 1, 0), + SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL20, 6, 1, 0), }; static const char * const es8328_pga_sel[] = { -- cgit v0.10.2 From 1ea5998afe903384ddc16391d4c023cd4c867bea Mon Sep 17 00:00:00 2001 From: Mans Rullgard Date: Fri, 11 Dec 2015 11:27:08 +0000 Subject: ASoC: wm8974: set cache type for regmap Attempting to use this codec driver triggers a BUG() in regcache_sync() since no cache type is set. The register map of this device is fairly small and has few holes so a flat cache is suitable. Signed-off-by: Mans Rullgard Acked-by: Charles Keepax Signed-off-by: Mark Brown Cc: stable@vger.kernel.org diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c index 0a60677..4c29bd2 100644 --- a/sound/soc/codecs/wm8974.c +++ b/sound/soc/codecs/wm8974.c @@ -574,6 +574,7 @@ static const struct regmap_config wm8974_regmap = { .max_register = WM8974_MONOMIX, .reg_defaults = wm8974_reg_defaults, .num_reg_defaults = ARRAY_SIZE(wm8974_reg_defaults), + .cache_type = REGCACHE_FLAT, }; static int wm8974_probe(struct snd_soc_codec *codec) -- cgit v0.10.2 From 5042f936c6810d0e7153cc9e1794c6998590a930 Mon Sep 17 00:00:00 2001 From: Sjoerd Simons Date: Fri, 11 Dec 2015 09:45:33 +0100 Subject: ASoC: rockchip: spdif: Set transmit data level to 16 samples Explicitly set the transmit data level on the transceiver to 16 samples rather then the default 0. This matches both the level set in the vendor kernel and the (seemingly very similar) i2s engine. This fixes audio glitches when playing back at 192k rate. At the same time, fix a trivial typo in the TDL mask definition Signed-off-by: Sjoerd Simons Signed-off-by: Mark Brown diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c index a38a302..bb09a07 100644 --- a/sound/soc/rockchip/rockchip_spdif.c +++ b/sound/soc/rockchip/rockchip_spdif.c @@ -152,8 +152,10 @@ static int rk_spdif_trigger(struct snd_pcm_substream *substream, case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: ret = regmap_update_bits(spdif->regmap, SPDIF_DMACR, - SPDIF_DMACR_TDE_ENABLE, - SPDIF_DMACR_TDE_ENABLE); + SPDIF_DMACR_TDE_ENABLE | + SPDIF_DMACR_TDL_MASK, + SPDIF_DMACR_TDE_ENABLE | + SPDIF_DMACR_TDL(16)); if (ret != 0) return ret; diff --git a/sound/soc/rockchip/rockchip_spdif.h b/sound/soc/rockchip/rockchip_spdif.h index 07f86a2..9c24dbc 100644 --- a/sound/soc/rockchip/rockchip_spdif.h +++ b/sound/soc/rockchip/rockchip_spdif.h @@ -42,7 +42,7 @@ #define SPDIF_DMACR_TDL_SHIFT 0 #define SPDIF_DMACR_TDL(x) ((x) << SPDIF_DMACR_TDL_SHIFT) -#define SPDIF_DMACR_TDL_MASK (0x1f << SDPIF_DMACR_TDL_SHIFT) +#define SPDIF_DMACR_TDL_MASK (0x1f << SPDIF_DMACR_TDL_SHIFT) /* * XFER -- cgit v0.10.2 From 81b1b9ca6d5ca5f3ce91c0095402def657cf5db3 Mon Sep 17 00:00:00 2001 From: Haozhong Zhang Date: Mon, 14 Dec 2015 23:13:38 +0800 Subject: KVM: VMX: Fix host initiated access to guest MSR_TSC_AUX The current handling of accesses to guest MSR_TSC_AUX returns error if vcpu does not support rdtscp, though those accesses are initiated by host. This can result in the reboot failure of some versions of QEMU. This patch fixes this issue by passing those host initiated accesses for further handling instead. Signed-off-by: Haozhong Zhang Signed-off-by: Paolo Bonzini diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 6b56056..44976a5 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2803,7 +2803,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = vcpu->arch.ia32_xss; break; case MSR_TSC_AUX: - if (!guest_cpuid_has_rdtscp(vcpu)) + if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated) return 1; /* Otherwise falls through */ default: @@ -2909,7 +2909,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) clear_atomic_switch_msr(vmx, MSR_IA32_XSS); break; case MSR_TSC_AUX: - if (!guest_cpuid_has_rdtscp(vcpu)) + if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated) return 1; /* Check reserved bit, higher 32 bits should be zero */ if ((data >> 32) != 0) -- cgit v0.10.2 From 13fdae1ae565f214f25753492783ce45cbf23532 Mon Sep 17 00:00:00 2001 From: Bai Ping Date: Wed, 9 Dec 2015 16:15:55 +0800 Subject: ARM: dts: imx: Fix the assigned-clock mismatch issue on imx6q/dl The 'assigned-clock-parents' and 'assigned-clock-rates' list should corresponding to the 'assigned-clocks' property clock list. Signed-off-by: Bai Ping Fixes: ed339363de1b ("ARM: dts: imx6qdl-sabreauto: Allow HDMI and LVDS to work simultaneously") Cc: Signed-off-by: Shawn Guo diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi index 8263fc1..d354d40 100644 --- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi @@ -113,14 +113,14 @@ &clks { assigned-clocks = <&clks IMX6QDL_PLL4_BYPASS_SRC>, <&clks IMX6QDL_PLL4_BYPASS>, - <&clks IMX6QDL_CLK_PLL4_POST_DIV>, <&clks IMX6QDL_CLK_LDB_DI0_SEL>, - <&clks IMX6QDL_CLK_LDB_DI1_SEL>; + <&clks IMX6QDL_CLK_LDB_DI1_SEL>, + <&clks IMX6QDL_CLK_PLL4_POST_DIV>; assigned-clock-parents = <&clks IMX6QDL_CLK_LVDS2_IN>, <&clks IMX6QDL_PLL4_BYPASS_SRC>, <&clks IMX6QDL_CLK_PLL3_USB_OTG>, <&clks IMX6QDL_CLK_PLL3_USB_OTG>; - assigned-clock-rates = <0>, <0>, <24576000>; + assigned-clock-rates = <0>, <0>, <0>, <0>, <24576000>; }; &ecspi1 { -- cgit v0.10.2 From 3a35e470bc6bc4ce34c19c410ebbe4e3bbf0bafe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krzysztof=20Ha=C5=82asa?= Date: Fri, 11 Dec 2015 14:22:04 +0100 Subject: ARM: dts: imx6: Fix Ethernet PHY mode on Ventana boards MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Gateworks Ventana boards seem to need "RGMII-ID" (internal delay) PHY mode, instead of simple "RGMII", for their Marvell 88E1510 transceiver. Otherwise, the Ethernet MAC doesn't work with Marvell PHY driver (TX doesn't seem to work correctly). Tested on GW5400 rev. C. This bug affects ARM Fedora 23. Signed-off-by: Krzysztof Hałasa Acked-by: Tim Harvey Cc: Signed-off-by: Shawn Guo diff --git a/arch/arm/boot/dts/imx6q-gw5400-a.dts b/arch/arm/boot/dts/imx6q-gw5400-a.dts index 58adf17..a51834e 100644 --- a/arch/arm/boot/dts/imx6q-gw5400-a.dts +++ b/arch/arm/boot/dts/imx6q-gw5400-a.dts @@ -154,7 +154,7 @@ &fec { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi index 7b31fdb..dc0cebf 100644 --- a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi @@ -94,7 +94,7 @@ &fec { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi index 1b66328..18cd411 100644 --- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi @@ -154,7 +154,7 @@ &fec { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi index 7c51839..eea90f3 100644 --- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi @@ -155,7 +155,7 @@ &fec { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi index 929e0b3..6c11a2a 100644 --- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi @@ -145,7 +145,7 @@ &fec { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>; status = "okay"; }; -- cgit v0.10.2 From c4aa1937b7f40adc93e2e0a901314a4bd8991174 Mon Sep 17 00:00:00 2001 From: Lijun Pan Date: Fri, 11 Dec 2015 13:55:02 -0600 Subject: fsl-ifc: add missing include on ARM64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Need to include sched.h to fix the following compilation error if FSL_IFC is enabled on ARM64 machine. In file included from include/linux/mmzone.h:9:0, from include/linux/gfp.h:5, from include/linux/kmod.h:22, from include/linux/module.h:13, from drivers/memory/fsl_ifc.c:22: drivers/memory/fsl_ifc.c: In function ‘check_nand_stat’: include/linux/wait.h:165:35: error: ‘TASK_NORMAL’ undeclared (first use in this function) #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) ^ drivers/memory/fsl_ifc.c:136:3: note: in expansion of macro ‘wake_up’ wake_up(&ctrl->nand_wait); ^ include/linux/wait.h:165:35: note: each undeclared identifier is reported only once for each function it appears in #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) ^ drivers/memory/fsl_ifc.c:136:3: note: in expansion of macro ‘wake_up’ wake_up(&ctrl->nand_wait); ^ Analysis is as follows: I put some instrumental code and get the following .h files inclusion sequence: In file included from ./arch/arm64/include/asm/compat.h:25:0, from ./arch/arm64/include/asm/stat.h:23, from include/linux/stat.h:5, from include/linux/module.h:10, from drivers/memory/fsl_ifc.c:23: include/linux/sched.h:113:1: error: expected ‘=’, ‘,’, ‘;’, ‘asm’ or ‘__attribute__’ before ‘struct’ struct sched_attr { ^ CONFIG_COMPAT=y is enabled while 39 and 48 bit VA is selected. When 42 bit VA is selected, it does not enable CONFIG_COMPAT=y In ./arch/arm64/include/asm/stat.h:23, it has "#ifdef CONFIG_COMPAT" "#include " "..." "#endif" Since ./arch/arm64/include/asm/stat.h does not include ./arch/arm64/include/asm/compat.h, then it will not include include/linux/sched.h Hence we have to manually add "#include " in drivers/memory/fsl_ifc.c Signed-off-by: Lijun Pan Signed-off-by: Arnd Bergmann diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c index e87459f..acd1460 100644 --- a/drivers/memory/fsl_ifc.c +++ b/drivers/memory/fsl_ifc.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include -- cgit v0.10.2 From dd39a26538e37f6c6131e829a4a510787e43c783 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 11 Dec 2015 12:09:03 +0000 Subject: scripts: recordmcount: break hardlinks recordmcount edits the file in-place, which can cause problems when using ccache in hardlink mode. Arrange for recordmcount to break a hardlinked object. Link: http://lkml.kernel.org/r/E1a7MVT-0000et-62@rmk-PC.arm.linux.org.uk Cc: stable@vger.kernel.org # 2.6.37+ Signed-off-by: Russell King Signed-off-by: Steven Rostedt diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index 698768b..91705ef 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -211,6 +211,20 @@ static void *mmap_file(char const *fname) addr = umalloc(sb.st_size); uread(fd_map, addr, sb.st_size); } + if (sb.st_nlink != 1) { + /* file is hard-linked, break the hard link */ + close(fd_map); + if (unlink(fname) < 0) { + perror(fname); + fail_file(); + } + fd_map = open(fname, O_RDWR | O_CREAT, sb.st_mode); + if (fd_map < 0) { + perror(fname); + fail_file(); + } + uwrite(fd_map, addr, sb.st_size); + } return addr; } -- cgit v0.10.2 From be20aa00c67102aaa54599518c086a2338b19f4c Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Sun, 29 Nov 2015 08:46:14 -0500 Subject: nfsd: don't hold ls_mutex across a layout recall We do need to serialize layout stateid morphing operations, but we currently hold the ls_mutex across a layout recall which is pretty ugly. It's also unnecessary -- once we've bumped the seqid and copied it, we don't need to serialize the rest of the CB_LAYOUTRECALL vs. anything else. Just drop the mutex once the copy is done. This was causing a "workqueue leaked lock or atomic" warning and an occasional deadlock. There's more work to be done here but this fixes the immediate regression. Fixes: cc8a55320b5f "nfsd: serialize layout stateid morphing operations" Cc: stable@vger.kernel.org Reported-by: Kinglong Mee Signed-off-by: Jeff Layton Signed-off-by: J. Bruce Fields diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index 9ffef06..c9d6c71 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c @@ -616,6 +616,7 @@ nfsd4_cb_layout_prepare(struct nfsd4_callback *cb) mutex_lock(&ls->ls_mutex); nfs4_inc_and_copy_stateid(&ls->ls_recall_sid, &ls->ls_stid); + mutex_unlock(&ls->ls_mutex); } static int @@ -659,7 +660,6 @@ nfsd4_cb_layout_release(struct nfsd4_callback *cb) trace_layout_recall_release(&ls->ls_stid.sc_stateid); - mutex_unlock(&ls->ls_mutex); nfsd4_return_all_layouts(ls, &reaplist); nfsd4_free_layouts(&reaplist); nfs4_put_stid(&ls->ls_stid); -- cgit v0.10.2 From a50bd43935586420fb75f4558369eb08566fac5e Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Tue, 15 Dec 2015 16:06:10 -0500 Subject: ftrace/scripts: Have recordmcount copy the object file Russell King found that he had weird side effects when compiling the kernel with hard linked ccache. The reason was that recordmcount modified the kernel in place via mmap, and when a file gets modified twice by recordmcount, it will complain about it. To fix this issue, Russell wrote a patch that checked if the file was hard linked more than once and would unlink it if it was. Linus Torvalds was not happy with the fact that recordmcount does this in place modification. Instead of doing the unlink only if the file has two or more hard links, it does the unlink all the time. In otherwords, it always does a copy if it changed something. That is, it does the write out if a change was made. Cc: stable@vger.kernel.org # 2.6.37+ Signed-off-by: Steven Rostedt diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index 91705ef..301d70b 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -48,12 +48,17 @@ static int fd_map; /* File descriptor for file being modified. */ static int mmap_failed; /* Boolean flag. */ -static void *ehdr_curr; /* current ElfXX_Ehdr * for resource cleanup */ static char gpfx; /* prefix for global symbol name (sometimes '_') */ static struct stat sb; /* Remember .st_size, etc. */ static jmp_buf jmpenv; /* setjmp/longjmp per-file error escape */ static const char *altmcount; /* alternate mcount symbol name */ static int warn_on_notrace_sect; /* warn when section has mcount not being recorded */ +static void *file_map; /* pointer of the mapped file */ +static void *file_end; /* pointer to the end of the mapped file */ +static int file_updated; /* flag to state file was changed */ +static void *file_ptr; /* current file pointer location */ +static void *file_append; /* added to the end of the file */ +static size_t file_append_size; /* how much is added to end of file */ /* setjmp() return values */ enum { @@ -67,10 +72,14 @@ static void cleanup(void) { if (!mmap_failed) - munmap(ehdr_curr, sb.st_size); + munmap(file_map, sb.st_size); else - free(ehdr_curr); - close(fd_map); + free(file_map); + file_map = NULL; + free(file_append); + file_append = NULL; + file_append_size = 0; + file_updated = 0; } static void __attribute__((noreturn)) @@ -92,12 +101,22 @@ succeed_file(void) static off_t ulseek(int const fd, off_t const offset, int const whence) { - off_t const w = lseek(fd, offset, whence); - if (w == (off_t)-1) { - perror("lseek"); + switch (whence) { + case SEEK_SET: + file_ptr = file_map + offset; + break; + case SEEK_CUR: + file_ptr += offset; + break; + case SEEK_END: + file_ptr = file_map + (sb.st_size - offset); + break; + } + if (file_ptr < file_map) { + fprintf(stderr, "lseek: seek before file\n"); fail_file(); } - return w; + return file_ptr - file_map; } static size_t @@ -114,12 +133,38 @@ uread(int const fd, void *const buf, size_t const count) static size_t uwrite(int const fd, void const *const buf, size_t const count) { - size_t const n = write(fd, buf, count); - if (n != count) { - perror("write"); - fail_file(); + size_t cnt = count; + off_t idx = 0; + + file_updated = 1; + + if (file_ptr + count >= file_end) { + off_t aoffset = (file_ptr + count) - file_end; + + if (aoffset > file_append_size) { + file_append = realloc(file_append, aoffset); + file_append_size = aoffset; + } + if (!file_append) { + perror("write"); + fail_file(); + } + if (file_ptr < file_end) { + cnt = file_end - file_ptr; + } else { + cnt = 0; + idx = aoffset - count; + } } - return n; + + if (cnt) + memcpy(file_ptr, buf, cnt); + + if (cnt < count) + memcpy(file_append + idx, buf + cnt, count - cnt); + + file_ptr += count; + return count; } static void * @@ -192,9 +237,7 @@ static int make_nop_arm64(void *map, size_t const offset) */ static void *mmap_file(char const *fname) { - void *addr; - - fd_map = open(fname, O_RDWR); + fd_map = open(fname, O_RDONLY); if (fd_map < 0 || fstat(fd_map, &sb) < 0) { perror(fname); fail_file(); @@ -203,29 +246,58 @@ static void *mmap_file(char const *fname) fprintf(stderr, "not a regular file: %s\n", fname); fail_file(); } - addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE, - fd_map, 0); + file_map = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE, + fd_map, 0); mmap_failed = 0; - if (addr == MAP_FAILED) { + if (file_map == MAP_FAILED) { mmap_failed = 1; - addr = umalloc(sb.st_size); - uread(fd_map, addr, sb.st_size); + file_map = umalloc(sb.st_size); + uread(fd_map, file_map, sb.st_size); } - if (sb.st_nlink != 1) { - /* file is hard-linked, break the hard link */ - close(fd_map); - if (unlink(fname) < 0) { - perror(fname); - fail_file(); - } - fd_map = open(fname, O_RDWR | O_CREAT, sb.st_mode); - if (fd_map < 0) { - perror(fname); + close(fd_map); + + file_end = file_map + sb.st_size; + + return file_map; +} + +static void write_file(const char *fname) +{ + char tmp_file[strlen(fname) + 4]; + size_t n; + + if (!file_updated) + return; + + sprintf(tmp_file, "%s.rc", fname); + + /* + * After reading the entire file into memory, delete it + * and write it back, to prevent weird side effects of modifying + * an object file in place. + */ + fd_map = open(tmp_file, O_WRONLY | O_TRUNC | O_CREAT, sb.st_mode); + if (fd_map < 0) { + perror(fname); + fail_file(); + } + n = write(fd_map, file_map, sb.st_size); + if (n != sb.st_size) { + perror("write"); + fail_file(); + } + if (file_append_size) { + n = write(fd_map, file_append, file_append_size); + if (n != file_append_size) { + perror("write"); fail_file(); } - uwrite(fd_map, addr, sb.st_size); } - return addr; + close(fd_map); + if (rename(tmp_file, fname) < 0) { + perror(fname); + fail_file(); + } } /* w8rev, w8nat, ...: Handle endianness. */ @@ -332,7 +404,6 @@ do_file(char const *const fname) Elf32_Ehdr *const ehdr = mmap_file(fname); unsigned int reltype = 0; - ehdr_curr = ehdr; w = w4nat; w2 = w2nat; w8 = w8nat; @@ -455,6 +526,7 @@ do_file(char const *const fname) } } /* end switch */ + write_file(fname); cleanup(); } @@ -507,11 +579,14 @@ main(int argc, char *argv[]) case SJ_SETJMP: /* normal sequence */ /* Avoid problems if early cleanup() */ fd_map = -1; - ehdr_curr = NULL; mmap_failed = 1; + file_map = NULL; + file_ptr = NULL; + file_updated = 0; do_file(file); break; case SJ_FAIL: /* error in do_file or below */ + sprintf("%s: failed\n", file); ++n_error; break; case SJ_SUCCEED: /* premature success */ -- cgit v0.10.2 From 97cb69dd800a471c3ee2467be3826badd9c12883 Mon Sep 17 00:00:00 2001 From: Sudip Mukherjee Date: Fri, 20 Nov 2015 15:44:20 +0530 Subject: UBI: fix return error code We are checking dfs_rootdir for error value or NULL. But in the conditional ternary operator we returned -ENODEV if dfs_rootdir contains an error value and returned PTR_ERR(dfs_rootdir) if dfs_rootdir is NULL. So in the case of dfs_rootdir being NULL we actually assigned 0 to err and returned it to the caller implying a success. Lets return -ENODEV when dfs_rootdir is NULL else return PTR_ERR(dfs_rootdir). Signed-off-by: Sudip Mukherjee Signed-off-by: Richard Weinberger diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c index b077e43..c4cb15a 100644 --- a/drivers/mtd/ubi/debug.c +++ b/drivers/mtd/ubi/debug.c @@ -236,7 +236,7 @@ int ubi_debugfs_init(void) dfs_rootdir = debugfs_create_dir("ubi", NULL); if (IS_ERR_OR_NULL(dfs_rootdir)) { - int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir); + int err = dfs_rootdir ? PTR_ERR(dfs_rootdir) : -ENODEV; pr_err("UBI error: cannot create \"ubi\" debugfs directory, error %d\n", err); -- cgit v0.10.2 From 2e69d4912f2fc9d4cd952311d58ceae1cd83057b Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Fri, 20 Nov 2015 14:10:54 -0800 Subject: UBI: fix use of "VID" vs. "EC" in header self-check Looks like a typo, using UBI_EC_HDR_SIZE_CRC (note the "EC") to compute the CRC for the VID header. This shouldn't cause any functional change, as both structures are 64 bytes. Verified with: BUILD_BUG_ON(UBI_VID_HDR_SIZE_CRC != UBI_EC_HDR_SIZE_CRC); Reported here: http://lists.infradead.org/pipermail/linux-mtd/2013-September/048570.html Reported by: Bill Pringlemeir Signed-off-by: Brian Norris Signed-off-by: Richard Weinberger diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index 1fc23e4..10cf3b5 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c @@ -1299,7 +1299,7 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum) if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) goto exit; - crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC); + crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC); hdr_crc = be32_to_cpu(vid_hdr->hdr_crc); if (hdr_crc != crc) { ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x", -- cgit v0.10.2 From 1a31b20cd81d5cbc7ec6e24cb08066009a1ca32d Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Thu, 26 Nov 2015 21:23:48 +0100 Subject: mtd: ubi: fixup error correction in do_sync_erase() Since fastmap we gained do_sync_erase(). This function can return an error and its error handling isn't obvious. First the memory allocation for struct ubi_work can fail and as such struct ubi_wl_entry is leaked. However if the memory allocation succeeds then the tail function takes care of the struct ubi_wl_entry. A free here could result in a double free. To make the error handling simpler, I split the tail function into one piece which does the work and another which frees the struct ubi_work which is passed as argument. As result do_sync_erase() can keep the struct on stack and we get rid of one error source. Cc: Fixes: 8199b901a ("UBI: Add fastmap support to the WL sub-system") Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Richard Weinberger diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index eb4489f9..f73233f 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -603,6 +603,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, return 0; } +static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk); /** * do_sync_erase - run the erase worker synchronously. * @ubi: UBI device description object @@ -615,20 +616,16 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int vol_id, int lnum, int torture) { - struct ubi_work *wl_wrk; + struct ubi_work wl_wrk; dbg_wl("sync erase of PEB %i", e->pnum); - wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); - if (!wl_wrk) - return -ENOMEM; - - wl_wrk->e = e; - wl_wrk->vol_id = vol_id; - wl_wrk->lnum = lnum; - wl_wrk->torture = torture; + wl_wrk.e = e; + wl_wrk.vol_id = vol_id; + wl_wrk.lnum = lnum; + wl_wrk.torture = torture; - return erase_worker(ubi, wl_wrk, 0); + return __erase_worker(ubi, &wl_wrk); } /** @@ -1014,7 +1011,7 @@ out_unlock: } /** - * erase_worker - physical eraseblock erase worker function. + * __erase_worker - physical eraseblock erase worker function. * @ubi: UBI device description object * @wl_wrk: the work object * @shutdown: non-zero if the worker has to free memory and exit @@ -1025,8 +1022,7 @@ out_unlock: * needed. Returns zero in case of success and a negative error code in case of * failure. */ -static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, - int shutdown) +static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) { struct ubi_wl_entry *e = wl_wrk->e; int pnum = e->pnum; @@ -1034,21 +1030,11 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, int lnum = wl_wrk->lnum; int err, available_consumed = 0; - if (shutdown) { - dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); - kfree(wl_wrk); - wl_entry_destroy(ubi, e); - return 0; - } - dbg_wl("erase PEB %d EC %d LEB %d:%d", pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum); err = sync_erase(ubi, e, wl_wrk->torture); if (!err) { - /* Fine, we've erased it successfully */ - kfree(wl_wrk); - spin_lock(&ubi->wl_lock); wl_tree_add(e, &ubi->free); ubi->free_count++; @@ -1066,7 +1052,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, } ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err); - kfree(wl_wrk); if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || err == -EBUSY) { @@ -1150,6 +1135,25 @@ out_ro: return err; } +static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, + int shutdown) +{ + int ret; + + if (shutdown) { + struct ubi_wl_entry *e = wl_wrk->e; + + dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec); + kfree(wl_wrk); + wl_entry_destroy(ubi, e); + return 0; + } + + ret = __erase_worker(ubi, wl_wrk); + kfree(wl_wrk); + return ret; +} + /** * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system. * @ubi: UBI device description object -- cgit v0.10.2 From 6b238de189f69dc77d660d4cce62eed15547f4c3 Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Thu, 26 Nov 2015 21:23:49 +0100 Subject: mtd: ubi: don't leak e if schedule_erase() fails If __erase_worker() fails to erase the EB and schedule_erase() fails as well to do anything about it then we go RO. But that is not a reason to leak the e argument here. Therefore clean up e. Cc: Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Richard Weinberger diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index f73233f..5606563 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -1060,6 +1060,7 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) /* Re-schedule the LEB for erasure */ err1 = schedule_erase(ubi, e, vol_id, lnum, 0); if (err1) { + wl_entry_destroy(ubi, e); err = err1; goto out_ro; } -- cgit v0.10.2 From 91acbeb68ab10c0c0f65f30b5b7fddbde4c97dd2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 14 Dec 2015 16:42:31 +0100 Subject: drm/amdgpu: fix user fence handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes a random corruption under memory pressure. We need to fence the BO for the user fence as well, otherwise it might be swapped out and the GPU could write the fence value to an undesired location. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 5a5f04d..048cfe0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1264,7 +1264,8 @@ struct amdgpu_cs_parser { struct ww_acquire_ctx ticket; /* user fence */ - struct amdgpu_user_fence uf; + struct amdgpu_user_fence uf; + struct amdgpu_bo_list_entry uf_entry; }; struct amdgpu_job { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 4f352ec..25a3e248 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -127,6 +127,37 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, return 0; } +static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, + struct drm_amdgpu_cs_chunk_fence *fence_data) +{ + struct drm_gem_object *gobj; + uint32_t handle; + + handle = fence_data->handle; + gobj = drm_gem_object_lookup(p->adev->ddev, p->filp, + fence_data->handle); + if (gobj == NULL) + return -EINVAL; + + p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); + p->uf.offset = fence_data->offset; + + if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) { + drm_gem_object_unreference_unlocked(gobj); + return -EINVAL; + } + + p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo); + p->uf_entry.prefered_domains = AMDGPU_GEM_DOMAIN_GTT; + p->uf_entry.allowed_domains = AMDGPU_GEM_DOMAIN_GTT; + p->uf_entry.priority = 0; + p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; + p->uf_entry.tv.shared = true; + + drm_gem_object_unreference_unlocked(gobj); + return 0; +} + int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) { union drm_amdgpu_cs *cs = data; @@ -207,28 +238,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) case AMDGPU_CHUNK_ID_FENCE: size = sizeof(struct drm_amdgpu_cs_chunk_fence); - if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) { - uint32_t handle; - struct drm_gem_object *gobj; - struct drm_amdgpu_cs_chunk_fence *fence_data; - - fence_data = (void *)p->chunks[i].kdata; - handle = fence_data->handle; - gobj = drm_gem_object_lookup(p->adev->ddev, - p->filp, handle); - if (gobj == NULL) { - ret = -EINVAL; - goto free_partial_kdata; - } - - p->uf.bo = gem_to_amdgpu_bo(gobj); - amdgpu_bo_ref(p->uf.bo); - drm_gem_object_unreference_unlocked(gobj); - p->uf.offset = fence_data->offset; - } else { + if (p->chunks[i].length_dw * sizeof(uint32_t) < size) { ret = -EINVAL; goto free_partial_kdata; } + + ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata); + if (ret) + goto free_partial_kdata; + break; case AMDGPU_CHUNK_ID_DEPENDENCIES: @@ -391,6 +409,9 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm, &p->validated); + if (p->uf.bo) + list_add(&p->uf_entry.tv.head, &p->validated); + if (need_mmap_lock) down_read(¤t->mm->mmap_sem); @@ -488,8 +509,8 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo for (i = 0; i < parser->num_ibs; i++) amdgpu_ib_free(parser->adev, &parser->ibs[i]); kfree(parser->ibs); - if (parser->uf.bo) - amdgpu_bo_unref(&parser->uf.bo); + amdgpu_bo_unref(&parser->uf.bo); + amdgpu_bo_unref(&parser->uf_entry.robj); } static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, -- cgit v0.10.2 From 74a599f09bec7419b2490039f0fb33bc8581ef7c Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Thu, 3 Dec 2015 17:24:00 +0100 Subject: virtio/s390: handle error values in irb The common I/O layer may pass an error value as the irb in the device's interrupt handler (for classic channel I/O). This won't happen in current virtio-ccw implementations, but it's better to be safe than sorry. Let's just return the error conveyed by the irb and clear any possible pending I/O indications. Signed-off-by: Cornelia Huck Reviewed-by: Guenther Hutzl Reviewed-by: Pierre Morel Signed-off-by: Michael S. Tsirkin diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index b2a1a81..1b83159 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -984,6 +984,36 @@ static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev, return vq; } +static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev, + __u32 activity) +{ + if (vcdev->curr_io & activity) { + switch (activity) { + case VIRTIO_CCW_DOING_READ_FEAT: + case VIRTIO_CCW_DOING_WRITE_FEAT: + case VIRTIO_CCW_DOING_READ_CONFIG: + case VIRTIO_CCW_DOING_WRITE_CONFIG: + case VIRTIO_CCW_DOING_WRITE_STATUS: + case VIRTIO_CCW_DOING_SET_VQ: + case VIRTIO_CCW_DOING_SET_IND: + case VIRTIO_CCW_DOING_SET_CONF_IND: + case VIRTIO_CCW_DOING_RESET: + case VIRTIO_CCW_DOING_READ_VQ_CONF: + case VIRTIO_CCW_DOING_SET_IND_ADAPTER: + case VIRTIO_CCW_DOING_SET_VIRTIO_REV: + vcdev->curr_io &= ~activity; + wake_up(&vcdev->wait_q); + break; + default: + /* don't know what to do... */ + dev_warn(&vcdev->cdev->dev, + "Suspicious activity '%08x'\n", activity); + WARN_ON(1); + break; + } + } +} + static void virtio_ccw_int_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) @@ -995,6 +1025,12 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev, if (!vcdev) return; + if (IS_ERR(irb)) { + vcdev->err = PTR_ERR(irb); + virtio_ccw_check_activity(vcdev, activity); + /* Don't poke around indicators, something's wrong. */ + return; + } /* Check if it's a notification from the host. */ if ((intparm == 0) && (scsw_stctl(&irb->scsw) == @@ -1010,31 +1046,7 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev, /* Map everything else to -EIO. */ vcdev->err = -EIO; } - if (vcdev->curr_io & activity) { - switch (activity) { - case VIRTIO_CCW_DOING_READ_FEAT: - case VIRTIO_CCW_DOING_WRITE_FEAT: - case VIRTIO_CCW_DOING_READ_CONFIG: - case VIRTIO_CCW_DOING_WRITE_CONFIG: - case VIRTIO_CCW_DOING_WRITE_STATUS: - case VIRTIO_CCW_DOING_SET_VQ: - case VIRTIO_CCW_DOING_SET_IND: - case VIRTIO_CCW_DOING_SET_CONF_IND: - case VIRTIO_CCW_DOING_RESET: - case VIRTIO_CCW_DOING_READ_VQ_CONF: - case VIRTIO_CCW_DOING_SET_IND_ADAPTER: - case VIRTIO_CCW_DOING_SET_VIRTIO_REV: - vcdev->curr_io &= ~activity; - wake_up(&vcdev->wait_q); - break; - default: - /* don't know what to do... */ - dev_warn(&cdev->dev, "Suspicious activity '%08x'\n", - activity); - WARN_ON(1); - break; - } - } + virtio_ccw_check_activity(vcdev, activity); for_each_set_bit(i, &vcdev->indicators, sizeof(vcdev->indicators) * BITS_PER_BYTE) { /* The bit clear must happen before the vring kick. */ -- cgit v0.10.2 From c803cc2dcd722e08020c1ba63bb5ceece4a19fdb Mon Sep 17 00:00:00 2001 From: Jean-Michel Hautbois Date: Thu, 17 Dec 2015 11:07:23 +0100 Subject: ASoC: sgtl5000: fix VAG power up timing When power up, a "pop" is heard on line-in and mic-in. An analysis of the PCM shows it lasts ~400ms and looks like a filter response. VAG power up should be delayed by 400ms as VAG power down is. Signed-off-by: Jean-Michel Hautbois Signed-off-by: Mark Brown diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index f540f82..08b4046 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -189,6 +189,7 @@ static int power_vag_event(struct snd_soc_dapm_widget *w, case SND_SOC_DAPM_POST_PMU: snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER, SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP); + msleep(400); break; case SND_SOC_DAPM_PRE_PMD: -- cgit v0.10.2 From 0b3e6fca4d1af4aa150d32506220f4241323a00c Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Mon, 14 Dec 2015 22:34:05 +0200 Subject: ARM: OMAP2+: am43xx: enable GENERIC_CLOCKEVENTS_BROADCAST System will misbehave in the following case: - AM43XX only build (UP); - CONFIG_CPU_IDLE=y - ARM TWD timer enabled and selected as clockevent device. In the above case, It's expected that broadcast timer will be used as backup timer when CPUIdle will put MPU in low power states where ARM TWD will stop and lose its context. But, the CONFIG_SMP might not be selected when kernel is built for AM43XX SoC only and, as result, GENERIC_CLOCKEVENTS_BROADCAST option will not be selected also. This will break CPUIdle and System will stuck in low power states. Hence, fix it by selecting GENERIC_CLOCKEVENTS_BROADCAST option for AM43XX SoCs always and add empty tick_broadcast() function implementation - no need to send any IPI on UP. After this change timer1 will be selected as broadcast timer the same way as for SMP, and CPUIdle will work properly. Signed-off-by: Grygorii Strashko Signed-off-by: Tony Lindgren diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 4b4371d..32a0086 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -65,6 +65,7 @@ config SOC_AM43XX select MACH_OMAP_GENERIC select MIGHT_HAVE_CACHE_L2X0 select HAVE_ARM_SCU + select GENERIC_CLOCKEVENTS_BROADCAST config SOC_DRA7XX bool "TI DRA7XX" diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index b18ebbe..f86692d 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -320,6 +320,12 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer, return r; } +#if !defined(CONFIG_SMP) && defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) +void tick_broadcast(const struct cpumask *mask) +{ +} +#endif + static void __init omap2_gp_clockevent_init(int gptimer_id, const char *fck_source, const char *property) -- cgit v0.10.2 From 54011103fb457ba8b23aed6699609c0ad65b05fb Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Mon, 14 Dec 2015 22:34:06 +0200 Subject: ARM: OMAP2+: AM43xx: select ARM TWD timer Make sure to tell the kernel that AM437x devices have ARM TWD timer. Signed-off-by: Felipe Balbi [grygorii.strashko@ti.com: drop ARM Global timer selection, because it's incompatible with PM (cpuidle/cpufreq). So, it's unsafe to enable it unconditionally] Signed-off-by: Grygorii Strashko Signed-off-by: Tony Lindgren diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 32a0086..0517f0c 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -66,6 +66,7 @@ config SOC_AM43XX select MIGHT_HAVE_CACHE_L2X0 select HAVE_ARM_SCU select GENERIC_CLOCKEVENTS_BROADCAST + select HAVE_ARM_TWD config SOC_DRA7XX bool "TI DRA7XX" -- cgit v0.10.2 From a814a29d7bbfdfe56fe1bb9641a185077066eb9f Mon Sep 17 00:00:00 2001 From: Martin Peres Date: Sun, 29 Nov 2015 16:10:18 +0200 Subject: drm/nouveau/bios/fan: hardcode the fan mode to linear MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is an oversight that made use of the trip-point-based fan managenent on cards that never expose those. This led the fan to stay at fan_min. Fortunately, the emergency code would kick when the temperature would reach 90°C. Reported-by: Tom Englund Tested-by: Tom Englund Signed-off-by: Martin Peres Tested-by: Daemon32 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=92126 Signed-off-by: Ben Skeggs Cc: stable@vger.kernel.org diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c index 43006db..80fed7e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c @@ -83,6 +83,7 @@ nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan) fan->type = NVBIOS_THERM_FAN_UNK; } + fan->fan_mode = NVBIOS_THERM_FAN_LINEAR; fan->min_duty = nvbios_rd08(bios, data + 0x02); fan->max_duty = nvbios_rd08(bios, data + 0x03); -- cgit v0.10.2 From 3e6db33aaf1d42a30339f831ec4850570d6cc7a3 Mon Sep 17 00:00:00 2001 From: Xiong Zhang Date: Fri, 18 Dec 2015 13:29:18 +0800 Subject: ALSA: hda - Set SKL+ hda controller power at freeze() and thaw() It takes three minutes to enter into hibernation on some OEM SKL machines and we see many codec spurious response after thaw() opertion. This is because HDA is still in D0 state after freeze() call and pci_pm_freeze/pci_pm_freeze_noirq() don't set D3 hot in pci_bus driver. It seems bios still access HDA when system enter into freeze state, HDA will receive codec response interrupt immediately after thaw() call. Because of this unexpected interrupt, HDA enter into a abnormal state and slow down the system enter into hibernation. In this patch, we put HDA into D3 hot state in azx_freeze_noirq() and put HDA into D0 state in azx_thaw_noirq(). V2: Only apply this fix to SKL+ Fix compile error when CONFIG_PM_SLEEP isn't defined [Yet another fix for CONFIG_PM_SLEEP ifdef and the additional comment by tiwai] Signed-off-by: Xiong Zhang Cc: Signed-off-by: Takashi Iwai diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index bff5c8b..3b36582 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -954,6 +954,36 @@ static int azx_resume(struct device *dev) } #endif /* CONFIG_PM_SLEEP || SUPPORT_VGA_SWITCHEROO */ +#ifdef CONFIG_PM_SLEEP +/* put codec down to D3 at hibernation for Intel SKL+; + * otherwise BIOS may still access the codec and screw up the driver + */ +#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170) +#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70) +#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) +#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) + +static int azx_freeze_noirq(struct device *dev) +{ + struct pci_dev *pci = to_pci_dev(dev); + + if (IS_SKL_PLUS(pci)) + pci_set_power_state(pci, PCI_D3hot); + + return 0; +} + +static int azx_thaw_noirq(struct device *dev) +{ + struct pci_dev *pci = to_pci_dev(dev); + + if (IS_SKL_PLUS(pci)) + pci_set_power_state(pci, PCI_D0); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + #ifdef CONFIG_PM static int azx_runtime_suspend(struct device *dev) { @@ -1063,6 +1093,10 @@ static int azx_runtime_idle(struct device *dev) static const struct dev_pm_ops azx_pm = { SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume) +#ifdef CONFIG_PM_SLEEP + .freeze_noirq = azx_freeze_noirq, + .thaw_noirq = azx_thaw_noirq, +#endif SET_RUNTIME_PM_OPS(azx_runtime_suspend, azx_runtime_resume, azx_runtime_idle) }; -- cgit v0.10.2 From 272fa59ccb4fc802af28b1d699c2463db6a71bf7 Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Thu, 17 Dec 2015 19:06:02 +0100 Subject: s390/dis: Fix handling of format specifiers The print_insn() function returns strings like "lghi %r1,0". To escape the '%' character in sprintf() a second '%' is used. For example "lghi %%r1,0" is converted into "lghi %r1,0". After print_insn() the output string is passed to printk(). Because format specifiers like "%r" or "%f" are ignored by printk() this works by chance most of the time. But for instructions with control registers like "lctl %c6,%c6,780" this fails because printk() interprets "%c" as character format specifier. Fix this problem and escape the '%' characters twice. For example "lctl %%%%c6,%%%%c6,780" is then converted by sprintf() into "lctl %%c6,%%c6,780" and by printk() into "lctl %c6,%c6,780". Signed-off-by: Michael Holzheu Cc: stable@vger.kernel.org Signed-off-by: Martin Schwidefsky diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index 8140d10..6e72961 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -1920,16 +1920,23 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr) } if (separator) ptr += sprintf(ptr, "%c", separator); + /* + * Use four '%' characters below because of the + * following two conversions: + * + * 1) sprintf: %%%%r -> %%r + * 2) printk : %%r -> %r + */ if (operand->flags & OPERAND_GPR) - ptr += sprintf(ptr, "%%r%i", value); + ptr += sprintf(ptr, "%%%%r%i", value); else if (operand->flags & OPERAND_FPR) - ptr += sprintf(ptr, "%%f%i", value); + ptr += sprintf(ptr, "%%%%f%i", value); else if (operand->flags & OPERAND_AR) - ptr += sprintf(ptr, "%%a%i", value); + ptr += sprintf(ptr, "%%%%a%i", value); else if (operand->flags & OPERAND_CR) - ptr += sprintf(ptr, "%%c%i", value); + ptr += sprintf(ptr, "%%%%c%i", value); else if (operand->flags & OPERAND_VR) - ptr += sprintf(ptr, "%%v%i", value); + ptr += sprintf(ptr, "%%%%v%i", value); else if (operand->flags & OPERAND_PCREL) ptr += sprintf(ptr, "%lx", (signed int) value + addr); -- cgit v0.10.2 From 3e3f8bd569558acefdfaae273d71f7a29b8c0b4f Mon Sep 17 00:00:00 2001 From: Zidan Wang Date: Fri, 18 Dec 2015 16:53:41 +0800 Subject: ASoC: fsl_sai: fix no frame clk in master mode After several open/close sai test with ctrl+c, there will be I/O error. The SAI can't work anymore, can't recover. There will be no frame clock. With adding the software reset in trigger stop, the issue can be fixed. This is a hardware bug/errata and reset is the only option. According to the reference manual, the software reset doesn't reset any control register but only internal hardware logics such as bit clock generator, status flags, and FIFO pointers. (Our purpose is just to reset the clock generator while the software reset is the only way to do that.) Since slave mode doesn't use the clock generator, only apply the reset procedure to the master mode. For asynchronous mode, TX will not be reset when RX is still running. In this case, i can't reproduce this issue. Signed-off-by: Zidan Wang Acked-by: Nicolin Chen Signed-off-by: Mark Brown diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c index a4435f5..a31f0ba 100644 --- a/sound/soc/fsl/fsl_sai.c +++ b/sound/soc/fsl/fsl_sai.c @@ -504,6 +504,24 @@ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd, FSL_SAI_CSR_FR, FSL_SAI_CSR_FR); regmap_update_bits(sai->regmap, FSL_SAI_RCSR, FSL_SAI_CSR_FR, FSL_SAI_CSR_FR); + + /* + * For sai master mode, after several open/close sai, + * there will be no frame clock, and can't recover + * anymore. Add software reset to fix this issue. + * This is a hardware bug, and will be fix in the + * next sai version. + */ + if (!sai->is_slave_mode) { + /* Software Reset for both Tx and Rx */ + regmap_write(sai->regmap, + FSL_SAI_TCSR, FSL_SAI_CSR_SR); + regmap_write(sai->regmap, + FSL_SAI_RCSR, FSL_SAI_CSR_SR); + /* Clear SR bit to finish the reset */ + regmap_write(sai->regmap, FSL_SAI_TCSR, 0); + regmap_write(sai->regmap, FSL_SAI_RCSR, 0); + } } break; default: -- cgit v0.10.2 From 0b4d6972d7b052b23d33ff9bdbb52958664fbb26 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Fri, 18 Dec 2015 10:13:50 -0800 Subject: ARM: dts: Fix UART wakeirq for omap4 duovero parlor Looks like we're missing the wakeirq for the console uart for duovero parlor. Let's add that as without it console acess just hangs with PM enabled. Cc: Arun Bharadwaj Cc: Ash Charles Cc: Florian Vaussard Signed-off-by: Tony Lindgren diff --git a/arch/arm/boot/dts/omap4-duovero-parlor.dts b/arch/arm/boot/dts/omap4-duovero-parlor.dts index 1a78f01..b75f7b2 100644 --- a/arch/arm/boot/dts/omap4-duovero-parlor.dts +++ b/arch/arm/boot/dts/omap4-duovero-parlor.dts @@ -189,3 +189,7 @@ }; }; +&uart3 { + interrupts-extended = <&wakeupgen GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH + &omap4_pmx_core OMAP4_UART3_RX>; +}; -- cgit v0.10.2 From c4e074074c142bb21b8c3283066d8e6c1fd2baba Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Fri, 27 Nov 2015 07:57:31 +0100 Subject: drm/exynos: atomic check only enabled crtc states Since atomic check is called also for disabled crtcs it should skip mode checking as it can be uninitialized. The patch fixes it. Signed-off-by: Andrzej Hajda Suggested-by: Daniel Vetter Tested-by: Javier Martinez Canillas Signed-off-by: Inki Dae diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index b3ba27f..e693571 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -55,6 +55,9 @@ static int exynos_crtc_atomic_check(struct drm_crtc *crtc, { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); + if (!state->enable) + return 0; + if (exynos_crtc->ops->atomic_check) return exynos_crtc->ops->atomic_check(exynos_crtc, state); -- cgit v0.10.2 From 312045eef985b61d74c28047ecd8eca6719d9516 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 21 Dec 2015 11:01:21 +1100 Subject: md: remove check for MD_RECOVERY_NEEDED in action_store. md currently doesn't allow a 'sync_action' such as 'reshape' to be set while MD_RECOVERY_NEEDED is set. This s a problem, particularly since commit 738a273806ee as that can cause ->check_shape to call mddev_resume() which sets MD_RECOVERY_NEEDED. So by the time we come to start 'reshape' it is very likely that MD_RECOVERY_NEEDED is still set. Testing for this flag is not really needed and is in any case very racy as it can be set at any moment - asynchronously. Any race between setting a sync_action and setting MD_RECOVERY_NEEDED must already be handled properly in some locked code, probably md_check_recovery(), so remove the test here. The test on MD_RECOVERY_RUNNING is also racy in the 'reshape' case so we should test it again after getting mddev_lock(). As this fixes a race and a regression which can cause 'reshape' to fail, it is suitable for -stable kernels since 4.1 Reported-by: Xiao Ni Fixes: 738a273806ee ("md/raid5: fix allocation of 'scribble' array.") Cc: stable@vger.kernel.org (v4.1+) Signed-off-by: NeilBrown diff --git a/drivers/md/md.c b/drivers/md/md.c index dbedc58..61aacab 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -4326,8 +4326,7 @@ action_store(struct mddev *mddev, const char *page, size_t len) } mddev_unlock(mddev); } - } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || - test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) + } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) return -EBUSY; else if (cmd_match(page, "resync")) clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); @@ -4340,8 +4339,12 @@ action_store(struct mddev *mddev, const char *page, size_t len) return -EINVAL; err = mddev_lock(mddev); if (!err) { - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - err = mddev->pers->start_reshape(mddev); + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) + err = -EBUSY; + else { + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + err = mddev->pers->start_reshape(mddev); + } mddev_unlock(mddev); } if (err) -- cgit v0.10.2 From ce360db703c98bd51fffe6f5d04a9e2294950514 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Wed, 16 Dec 2015 20:32:23 -0800 Subject: ACPI / processor: Fix thermal cooling device regression The processor cooling device is no longer present for passive thermal control. Commit 239708a3af44 ("ACPI: Split out ACPI PSS from ACPI Processor driver") moved the processing to a new function acpi_pss_perf_init(), but missed "return 0" after successful creation. This causes the error handling functions to be called, which will delete the previously created processor cooling device. Fixes: 239708a3af44 (ACPI: Split out ACPI PSS from ACPI Processor driver) Signed-off-by: Srinivas Pandruvada Cc: 4.3+ # 4.3+ Signed-off-by: Rafael J. Wysocki diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index f4e02ae..11154a3 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -200,7 +200,8 @@ static int acpi_pss_perf_init(struct acpi_processor *pr, goto err_remove_sysfs_thermal; } - sysfs_remove_link(&pr->cdev->device.kobj, "device"); + return 0; + err_remove_sysfs_thermal: sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); err_thermal_unregister: -- cgit v0.10.2 From 899cfd2bb0d1953d78d14d5e912e13b8bd92a58f Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Sat, 19 Dec 2015 13:43:34 +0530 Subject: ARC: mm: HIGHMEM: Fix section mismatch splat | WARNING: vmlinux.o(.text+0xd6c2): Section mismatch in reference from the function alloc_kmap_pgtable() to the function | .init.text:__alloc_bootmem_low() The function alloc_kmap_pgtable() references the function __init __alloc_bootmem_low(). This is often because alloc_kmap_pgtable lacks a __init annotation or the annotation of __alloc_bootmem_low is wrong. Signed-off-by: Vineet Gupta diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c index 065ee6b..92dd92c 100644 --- a/arch/arc/mm/highmem.c +++ b/arch/arc/mm/highmem.c @@ -111,7 +111,7 @@ void __kunmap_atomic(void *kv) } EXPORT_SYMBOL(__kunmap_atomic); -noinline pte_t *alloc_kmap_pgtable(unsigned long kvaddr) +static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr) { pgd_t *pgd_k; pud_t *pud_k; @@ -127,7 +127,7 @@ noinline pte_t *alloc_kmap_pgtable(unsigned long kvaddr) return pte_k; } -void kmap_init(void) +void __init kmap_init(void) { /* Due to recursive include hell, we can't do this in processor.h */ BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE)); -- cgit v0.10.2 From 4b32e89af7a054ae3f84f388cb622aeeb8beec9d Mon Sep 17 00:00:00 2001 From: Alexey Brodkin Date: Fri, 18 Dec 2015 22:28:51 +0300 Subject: ARC: mm: fix building for MMU v2 ARC700 cores with MMU v2 don't have IC_PTAG AUX register and so we only define ARC_REG_IC_PTAG for MMU versions >= 3. But current implementation of cache_line_loop_vX() routines assumes availability of all of them (v2, v3 and v4) simultaneously. And given undefined ARC_REG_IC_PTAG if CONFIG_MMU_VER=2 we're seeing compilation problem: ---------------------------------->8------------------------------- CC arch/arc/mm/cache.o arch/arc/mm/cache.c: In function '__cache_line_loop_v3': arch/arc/mm/cache.c:270:13: error: 'ARC_REG_IC_PTAG' undeclared (first use in this function) aux_tag = ARC_REG_IC_PTAG; ^ arch/arc/mm/cache.c:270:13: note: each undeclared identifier is reported only once for each function it appears in scripts/Makefile.build:258: recipe for target 'arch/arc/mm/cache.o' failed ---------------------------------->8------------------------------- The simples fix is to have ARC_REG_IC_PTAG defined regardless MMU version being used. We don't use it in cache_line_loop_v2() anyways so who cares. Signed-off-by: Alexey Brodkin Signed-off-by: Vineet Gupta diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index abf06e8..210ef3e 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -62,9 +62,7 @@ extern int ioc_exists; #define ARC_REG_IC_IVIC 0x10 #define ARC_REG_IC_CTRL 0x11 #define ARC_REG_IC_IVIL 0x19 -#if defined(CONFIG_ARC_MMU_V3) || defined(CONFIG_ARC_MMU_V4) #define ARC_REG_IC_PTAG 0x1E -#endif #define ARC_REG_IC_PTAG_HI 0x1F /* Bit val in IC_CTRL */ -- cgit v0.10.2 From 07fd7d4bbcb9a7c741c7e67e715680d9e5835ff2 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Mon, 21 Dec 2015 12:38:01 +0530 Subject: ARC: Fix linking errors with CONFIG_MODULE + CONFIG_CC_OPTIMIZE_FOR_SIZE At -Os, ARC gcc generates millicode thunk for function prologue/epilogue, which are served by libgcc. Modules historically are NOT linked with libgcc to avoid code bloat, reducing runtime relocation fixups etc. I even once tried doing that but got lost in makefile intricacies. This means modules at -Os don't get the millicode thunks, causing build failures below: | MODPOST 5 modules | ERROR: "__ld_r13_to_r18" [crypto/sha256_generic.ko] undefined! | ERROR: "__ld_r13_to_r18_ret" [crypto/sha256_generic.ko] undefined! | ERROR: "__st_r13_to_r18" [crypto/sha256_generic.ko] undefined! | ERROR: "__ld_r13_to_r17_ret" [crypto/sha256_generic.ko] undefined! | ERROR: "__st_r13_to_r17" [crypto/sha256_generic.ko] undefined! | ERROR: "__ld_r13_to_r16_ret" [crypto/sha256_generic.ko] undefined! | ERROR: "__st_r13_to_r16" [crypto/sha256_generic.ko] undefined! |.... |.... Workaround that by inhibiting millicode thunks for loadable modules Fixes STAR 9000641864: ("Linux built with optimizations for size emits errors for modules") Reported-by: Anton Kolesov Cc: Michal Marek Cc: linux-kernel@vger.kernel.org Signed-off-by: Vineet Gupta diff --git a/arch/arc/Makefile b/arch/arc/Makefile index cf0cf34..aeb1902 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -81,7 +81,7 @@ endif LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name) # Modules with short calls might break for calls into builtin-kernel -KBUILD_CFLAGS_MODULE += -mlong-calls +KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode # Finally dump eveything into kernel build system KBUILD_CFLAGS += $(cflags-y) -- cgit v0.10.2 From 2d64affc92d6281d2eca4faad326b981773bf040 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Mon, 21 Dec 2015 13:29:44 +0530 Subject: Revert "ARC: dw2 unwind: Ignore CIE version !=1 gracefully instead of bailing" Blingly ignoring CIE.version != 1 was a bad idea. It still leaves "desirability" when running perf with callgraphing where libgcc symbols might show in hotspot. More importantly, basic CIE.version == 3 support already exists in code: | | retAddrReg = state.version <= 1 ? *ptr++ : get_uleb128(&ptr, end); | Next commit with simply add continue-not-bail for CIE.version != 1 This reverts commit 323f41f9e7d0cb5b1d1586aded6682855f1e646d. diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index cf2828a..9f9ecc1 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c @@ -293,13 +293,13 @@ static void init_unwind_hdr(struct unwind_table *table, const u32 *cie = cie_for_fde(fde, table); signed ptrType; - if (cie == ¬_fde) /* only process FDE here */ + if (cie == ¬_fde) continue; if (cie == NULL || cie == &bad_cie) - continue; /* say FDE->CIE.version != 1 */ + return; ptrType = fde_pointer_type(cie); if (ptrType < 0) - continue; + return; ptr = (const u8 *)(fde + 2); if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde, @@ -343,10 +343,6 @@ static void init_unwind_hdr(struct unwind_table *table, if (fde[1] == 0xffffffff) continue; /* this is a CIE */ - - if (*(u8 *)(cie + 2) != 1) - continue; /* FDE->CIE.version not supported */ - ptr = (const u8 *)(fde + 2); header->table[n].start = read_pointer(&ptr, (const u8 *)(fde + 1) + @@ -523,8 +519,7 @@ static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table) if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde) || (*cie & (sizeof(*cie) - 1)) - || (cie[1] != 0xffffffff) - || ( *(u8 *)(cie + 2) != 1)) /* version 1 supported */ + || (cie[1] != 0xffffffff)) return NULL; /* this is not a (valid) CIE */ return cie; } -- cgit v0.10.2 From 6d0d506012c93d3393b8d3d0cac62e46e541c5b6 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Mon, 21 Dec 2015 13:40:05 +0530 Subject: ARC: dw2 unwind: Don't bail for CIE.version != 1 The rudimentary CIE.version == 3 handling is already present in code (for return address register specification) Signed-off-by: Vineet Gupta diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index 9f9ecc1..f34599a 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c @@ -600,9 +600,6 @@ static signed fde_pointer_type(const u32 *cie) const u8 *ptr = (const u8 *)(cie + 2); unsigned version = *ptr; - if (version != 1) - return -1; /* unsupported */ - if (*++ptr) { const char *aug; const u8 *end = (const u8 *)(cie + 1) + *cie; @@ -1014,9 +1011,7 @@ int arc_unwind(struct unwind_frame_info *frame) ptr = (const u8 *)(cie + 2); end = (const u8 *)(cie + 1) + *cie; frame->call_frame = 1; - if ((state.version = *ptr) != 1) - cie = NULL; /* unsupported version */ - else if (*++ptr) { + if (*++ptr) { /* check if augmentation size is first (thus present) */ if (*ptr == 'z') { while (++ptr < end && *ptr) { -- cgit v0.10.2 From 6b538db7c6b091960c57b23feae5431fc82286e7 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Mon, 21 Dec 2015 13:52:01 +0530 Subject: ARC: dw2 unwind: Catch Dwarf SNAFUs early Instead of seeing empty stack traces, let kernel fail early so dwarf issues can be fixed sooner Signed-off-by: Vineet Gupta diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index f34599a..5eb7076 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c @@ -296,10 +296,10 @@ static void init_unwind_hdr(struct unwind_table *table, if (cie == ¬_fde) continue; if (cie == NULL || cie == &bad_cie) - return; + goto ret_err; ptrType = fde_pointer_type(cie); if (ptrType < 0) - return; + goto ret_err; ptr = (const u8 *)(fde + 2); if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde, @@ -315,14 +315,14 @@ static void init_unwind_hdr(struct unwind_table *table, } if (tableSize || !n) - return; + goto ret_err; hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int) + 2 * n * sizeof(unsigned long); header = alloc(hdrSize); if (!header) - return; + goto ret_err; header->version = 1; header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native; @@ -361,6 +361,10 @@ static void init_unwind_hdr(struct unwind_table *table, table->hdrsz = hdrSize; smp_wmb(); table->header = (const void *)header; + return; + +ret_err: + panic("Attention !!! Dwarf FDE parsing errors\n");; } #ifdef CONFIG_MODULES -- cgit v0.10.2 From 71a71fb5374a23be36a91981b5614590b9e722c3 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Mon, 21 Dec 2015 10:03:30 +0100 Subject: parisc: Fix syscall restarts On parisc syscalls which are interrupted by signals sometimes failed to restart and instead returned -ENOSYS which in the worst case lead to userspace crashes. A similiar problem existed on MIPS and was fixed by commit e967ef02 ("MIPS: Fix restart of indirect syscalls"). On parisc the current syscall restart code assumes that all syscall callers load the syscall number in the delay slot of the ble instruction. That's how it is e.g. done in the unistd.h header file: ble 0x100(%sr2, %r0) ldi #syscall_nr, %r20 Because of that assumption the current code never restored %r20 before returning to userspace. This assumption is at least not true for code which uses the glibc syscall() function, which instead uses this syntax: ble 0x100(%sr2, %r0) copy regX, %r20 where regX depend on how the compiler optimizes the code and register usage. This patch fixes this problem by adding code to analyze how the syscall number is loaded in the delay branch and - if needed - copy the syscall number to regX prior returning to userspace for the syscall restart. Signed-off-by: Helge Deller Cc: stable@vger.kernel.org Cc: Mathieu Desnoyers diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index dc1ea79..2264f68 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -435,6 +435,55 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs, int in_syscall) regs->gr[28]); } +/* + * Check how the syscall number gets loaded into %r20 within + * the delay branch in userspace and adjust as needed. + */ + +static void check_syscallno_in_delay_branch(struct pt_regs *regs) +{ + u32 opcode, source_reg; + u32 __user *uaddr; + int err; + + /* Usually we don't have to restore %r20 (the system call number) + * because it gets loaded in the delay slot of the branch external + * instruction via the ldi instruction. + * In some cases a register-to-register copy instruction might have + * been used instead, in which case we need to copy the syscall + * number into the source register before returning to userspace. + */ + + /* A syscall is just a branch, so all we have to do is fiddle the + * return pointer so that the ble instruction gets executed again. + */ + regs->gr[31] -= 8; /* delayed branching */ + + /* Get assembler opcode of code in delay branch */ + uaddr = (unsigned int *) ((regs->gr[31] & ~3) + 4); + err = get_user(opcode, uaddr); + if (err) + return; + + /* Check if delay branch uses "ldi int,%r20" */ + if ((opcode & 0xffff0000) == 0x34140000) + return; /* everything ok, just return */ + + /* Check if delay branch uses "nop" */ + if (opcode == INSN_NOP) + return; + + /* Check if delay branch uses "copy %rX,%r20" */ + if ((opcode & 0xffe0ffff) == 0x08000254) { + source_reg = (opcode >> 16) & 31; + regs->gr[source_reg] = regs->gr[20]; + return; + } + + pr_warn("syscall restart: %s (pid %d): unexpected opcode 0x%08x\n", + current->comm, task_pid_nr(current), opcode); +} + static inline void syscall_restart(struct pt_regs *regs, struct k_sigaction *ka) { @@ -457,10 +506,7 @@ syscall_restart(struct pt_regs *regs, struct k_sigaction *ka) } /* fallthrough */ case -ERESTARTNOINTR: - /* A syscall is just a branch, so all - * we have to do is fiddle the return pointer. - */ - regs->gr[31] -= 8; /* delayed branching */ + check_syscallno_in_delay_branch(regs); break; } } @@ -510,15 +556,9 @@ insert_restart_trampoline(struct pt_regs *regs) } case -ERESTARTNOHAND: case -ERESTARTSYS: - case -ERESTARTNOINTR: { - /* Hooray for delayed branching. We don't - * have to restore %r20 (the system call - * number) because it gets loaded in the delay - * slot of the branch external instruction. - */ - regs->gr[31] -= 8; + case -ERESTARTNOINTR: + check_syscallno_in_delay_branch(regs); return; - } default: break; } -- cgit v0.10.2 From 9f660a1c43890c2cdd1f423fd73654e7ca08fe56 Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Tue, 22 Dec 2015 00:45:43 +0100 Subject: ALSA: hda/realtek - Fix silent headphone output on MacPro 4,1 (v2) Without this patch, internal speaker and line-out work, but front headphone output jack stays silent on the Mac Pro 4,1. This code path also gets executed on the MacPro 5,1 due to identical codec SSID, but i don't know if it has any positive or adverse effects there or not. (v2) Implement feedback from Takashi Iwai: Reuse alc889_fixup_mbp_vref and just add a new nid 0x19 for the MacPro 4,1. Signed-off-by: Mario Kleiner Cc: Signed-off-by: Takashi Iwai diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 6c268da..fe96428 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -1775,6 +1775,7 @@ enum { ALC889_FIXUP_MBA11_VREF, ALC889_FIXUP_MBA21_VREF, ALC889_FIXUP_MP11_VREF, + ALC889_FIXUP_MP41_VREF, ALC882_FIXUP_INV_DMIC, ALC882_FIXUP_NO_PRIMARY_HP, ALC887_FIXUP_ASUS_BASS, @@ -1863,7 +1864,7 @@ static void alc889_fixup_mbp_vref(struct hda_codec *codec, const struct hda_fixup *fix, int action) { struct alc_spec *spec = codec->spec; - static hda_nid_t nids[2] = { 0x14, 0x15 }; + static hda_nid_t nids[3] = { 0x14, 0x15, 0x19 }; int i; if (action != HDA_FIXUP_ACT_INIT) @@ -2153,6 +2154,12 @@ static const struct hda_fixup alc882_fixups[] = { .chained = true, .chain_id = ALC885_FIXUP_MACPRO_GPIO, }, + [ALC889_FIXUP_MP41_VREF] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc889_fixup_mbp_vref, + .chained = true, + .chain_id = ALC885_FIXUP_MACPRO_GPIO, + }, [ALC882_FIXUP_INV_DMIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_inv_dmic, @@ -2235,7 +2242,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF), - SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 5,1", ALC885_FIXUP_MACPRO_GPIO), + SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 4,1/5,1", ALC889_FIXUP_MP41_VREF), SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF), -- cgit v0.10.2 From 615cb24326bbe19834a1aba47677a6c80bdcfc01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Mon, 14 Dec 2015 13:16:48 +0200 Subject: drm/i915: Drop the broken cursor base==0 special casing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The cursor code tries to treat base==0 to mean disabled. That fails when the cursor bo gets bound at ggtt offset 0, and the user is left looking at an invisible cursor. We lose the disabled->disabled optimization, but that seems like something better handled at a slightly higher level. Cc: drm-intel-fixes@lists.freedesktop.org Cc: Takashi Iwai Cc: Jani Nikula Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1450091808-32607-3-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson (cherry picked from commit 663f3122d00c0b412d429f105dca129aa8f4f094) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 62211ab..1bdf995 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9910,14 +9910,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, return true; } -static void i845_update_cursor(struct drm_crtc *crtc, u32 base) +static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t cntl = 0, size = 0; - if (base) { + if (on) { unsigned int width = intel_crtc->base.cursor->state->crtc_w; unsigned int height = intel_crtc->base.cursor->state->crtc_h; unsigned int stride = roundup_pow_of_two(width) * 4; @@ -9972,16 +9972,15 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base) } } -static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) +static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - uint32_t cntl; + uint32_t cntl = 0; - cntl = 0; - if (base) { + if (on) { cntl = MCURSOR_GAMMA_ENABLE; switch (intel_crtc->base.cursor->state->crtc_w) { case 64: @@ -10032,18 +10031,17 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, int y = cursor_state->crtc_y; u32 base = 0, pos = 0; - if (on) - base = intel_crtc->cursor_addr; + base = intel_crtc->cursor_addr; if (x >= intel_crtc->config->pipe_src_w) - base = 0; + on = false; if (y >= intel_crtc->config->pipe_src_h) - base = 0; + on = false; if (x < 0) { if (x + cursor_state->crtc_w <= 0) - base = 0; + on = false; pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; x = -x; @@ -10052,16 +10050,13 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, if (y < 0) { if (y + cursor_state->crtc_h <= 0) - base = 0; + on = false; pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; y = -y; } pos |= y << CURSOR_Y_SHIFT; - if (base == 0 && intel_crtc->cursor_base == 0) - return; - I915_WRITE(CURPOS(pipe), pos); /* ILK+ do this automagically */ @@ -10072,9 +10067,9 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, } if (IS_845G(dev) || IS_I865G(dev)) - i845_update_cursor(crtc, base); + i845_update_cursor(crtc, base, on); else - i9xx_update_cursor(crtc, base); + i9xx_update_cursor(crtc, base, on); } static bool cursor_size_ok(struct drm_device *dev, -- cgit v0.10.2 From 62d622c1f8d34bde3e3b9fd06f15c35d4028a8ff Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 20 Nov 2015 14:16:39 +0000 Subject: drm/i915: Set the map-and-fenceable flag for preallocated objects As we mark the preallocated objects as bound, we should also flag them correctly as being map-and-fenceable (if appropriate!) so that later users do not get confused and try and rebind the pinned vma in order to get a map-and-fenceable binding. Signed-off-by: Chris Wilson Cc: "Goel, Akash" Cc: Daniel Vetter Cc: Jesse Barnes Cc: drm-intel-fixes@lists.freedesktop.org Link: http://patchwork.freedesktop.org/patch/msgid/1448029000-10616-1-git-send-email-chris@chris-wilson.co.uk Reviewed-by: Jesse Barnes Signed-off-by: Daniel Vetter (cherry picked from commit d0710abbcd88b1ff17760e97d74a673e67b49ea1) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a01e515..037a650 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2839,6 +2839,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags); +void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); int __must_check i915_vma_unbind(struct i915_vma *vma); /* * BEWARE: Do not use the function below unless you can _absolutely_ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 32e6aad..3163518 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4080,6 +4080,29 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) return false; } +void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) +{ + struct drm_i915_gem_object *obj = vma->obj; + bool mappable, fenceable; + u32 fence_size, fence_alignment; + + fence_size = i915_gem_get_gtt_size(obj->base.dev, + obj->base.size, + obj->tiling_mode); + fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev, + obj->base.size, + obj->tiling_mode, + true); + + fenceable = (vma->node.size == fence_size && + (vma->node.start & (fence_alignment - 1)) == 0); + + mappable = (vma->node.start + fence_size <= + to_i915(obj->base.dev)->gtt.mappable_end); + + obj->map_and_fenceable = mappable && fenceable; +} + static int i915_gem_object_do_pin(struct drm_i915_gem_object *obj, struct i915_address_space *vm, @@ -4147,25 +4170,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj, if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL && (bound ^ vma->bound) & GLOBAL_BIND) { - bool mappable, fenceable; - u32 fence_size, fence_alignment; - - fence_size = i915_gem_get_gtt_size(obj->base.dev, - obj->base.size, - obj->tiling_mode); - fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev, - obj->base.size, - obj->tiling_mode, - true); - - fenceable = (vma->node.size == fence_size && - (vma->node.start & (fence_alignment - 1)) == 0); - - mappable = (vma->node.start + fence_size <= - dev_priv->gtt.mappable_end); - - obj->map_and_fenceable = mappable && fenceable; - + __i915_vma_set_map_and_fenceable(vma); WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable); } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 43f35d1..86c7500 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2676,6 +2676,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, return ret; } vma->bound |= GLOBAL_BIND; + __i915_vma_set_map_and_fenceable(vma); list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list); } diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index cdacf3f..87e919a 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -687,6 +687,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, } vma->bound |= GLOBAL_BIND; + __i915_vma_set_map_and_fenceable(vma); list_add_tail(&vma->mm_list, &ggtt->inactive_list); } -- cgit v0.10.2 From a59fac67d31235730378180774fdb46f5a270f1e Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Thu, 3 Dec 2015 11:37:36 -0800 Subject: drm/i915: Disable primary plane if we fail to reconstruct BIOS fb (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If we fail to reconstruct the BIOS fb (e.g., because the FB is too large), we'll be left with plane state that indicates the primary plane is visible yet has a NULL fb. This mismatch causes problems later on (e.g., for the watermark code). Since we've failed to reconstruct the BIOS FB, the best solution is to just disable the primary plane and pretend the BIOS never had it enabled. v2: Add intel_pre_disable_primary() call (Maarten) Cc: Daniel Vetter Cc: Ville Syrjälä Cc: Maarten Lankhorst Cc: drm-intel-fixes@lists.freedesktop.org Signed-off-by: Matt Roper Reviewed-by: Maarten Lankhorst Link: http://patchwork.freedesktop.org/patch/msgid/1449171462-30763-2-git-send-email-matthew.d.roper@intel.com (cherry picked from commit 200757f5d7c6f7f7032a0a07bbb8c02a840bbf7d) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1bdf995..13bc6d4 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -116,6 +116,7 @@ static void skylake_pfit_enable(struct intel_crtc *crtc); static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); static void ironlake_pfit_enable(struct intel_crtc *crtc); static void intel_modeset_setup_hw_state(struct drm_device *dev); +static void intel_pre_disable_primary(struct drm_crtc *crtc); typedef struct { int min, max; @@ -2607,6 +2608,8 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, struct drm_i915_gem_object *obj; struct drm_plane *primary = intel_crtc->base.primary; struct drm_plane_state *plane_state = primary->state; + struct drm_crtc_state *crtc_state = intel_crtc->base.state; + struct intel_plane *intel_plane = to_intel_plane(primary); struct drm_framebuffer *fb; if (!plane_config->fb) @@ -2643,6 +2646,18 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, } } + /* + * We've failed to reconstruct the BIOS FB. Current display state + * indicates that the primary plane is visible, but has a NULL FB, + * which will lead to problems later if we don't fix it up. The + * simplest solution is to just disable the primary plane now and + * pretend the BIOS never had it enabled. + */ + to_intel_plane_state(plane_state)->visible = false; + crtc_state->plane_mask &= ~(1 << drm_plane_index(primary)); + intel_pre_disable_primary(&intel_crtc->base); + intel_plane->disable_plane(primary, &intel_crtc->base); + return; valid_fb: -- cgit v0.10.2 From 5dc62fdd8383afbd2faca6b6e6ea1052b45b0124 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 5 Aug 2015 16:41:37 +0100 Subject: MIPS: uaccess: Fix strlen_user with EVA The strlen_user() function calls __strlen_kernel_asm in both branches of the eva_kernel_access() conditional. For EVA it should be calling __strlen_user_eva for user accesses, otherwise it will load from the kernel address space instead of the user address space, and the access checking will likely be ineffective at preventing it due to EVA's overlapping user and kernel address spaces. This was found after extending the test_user_copy module to cover user string access functions, which gave the following error with EVA: test_user_copy: illegal strlen_user passed Fortunately the use of strlen_user() has been all but eradicated from the mainline kernel, so only out of tree modules could be affected. Fixes: e3a9b07a9caf ("MIPS: asm: uaccess: Add EVA support for str*_user operations") Signed-off-by: James Hogan Cc: Markos Chandras Cc: Paul Burton Cc: Leonid Yegoshin Cc: linux-mips@linux-mips.org Cc: # 3.15.x- Patchwork: https://patchwork.linux-mips.org/patch/10842/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 5305d69..3f959c0 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -1384,7 +1384,7 @@ static inline long strlen_user(const char __user *s) might_fault(); __asm__ __volatile__( "move\t$4, %1\n\t" - __MODULE_JAL(__strlen_kernel_asm) + __MODULE_JAL(__strlen_user_asm) "move\t%0, $2" : "=r" (res) : "r" (s) -- cgit v0.10.2 From e7571f7fd66c77a760338340adbe41d994fe93ac Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 11 Dec 2015 11:32:57 +0000 Subject: drm/i915: Break busywaiting for requests on pending signals The busywait in __i915_spin_request() does not respect pending signals and so may consume the entire timeslice for the task instead of returning to userspace to handle the signal. In the worst case this could cause a delay in signal processing of 20ms, which would be a noticeable jitter in cursor tracking. If a higher resolution signal was being used, for example to provide fairness of a server timeslices between clients, we could expect to detect some unfairness between clients (i.e. some windows not updating as fast as others). This issue was noticed when inspecting a report of poor interactivity resulting from excessively high __i915_spin_request usage. Fixes regression from commit 2def4ad99befa25775dd2f714fdd4d92faec6e34 [v4.2] Author: Chris Wilson Date: Tue Apr 7 16:20:41 2015 +0100 drm/i915: Optimistically spin for the request completion v2: Try to assess the impact of the bug Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Cc: Jens Axboe Cc; "Rogozhkin, Dmitry V" Cc: Daniel Vetter Cc: Tvrtko Ursulin Cc: Eero Tamminen Cc: "Rantala, Valtteri" Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449833608-22125-2-git-send-email-chris@chris-wilson.co.uk (cherry picked from commit 91b0c352ace9afec1fb51590c7b8bd60e0eb9fbd) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3163518..06631e1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1146,7 +1146,7 @@ static bool missed_irq(struct drm_i915_private *dev_priv, return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); } -static int __i915_spin_request(struct drm_i915_gem_request *req) +static int __i915_spin_request(struct drm_i915_gem_request *req, int state) { unsigned long timeout; @@ -1158,6 +1158,9 @@ static int __i915_spin_request(struct drm_i915_gem_request *req) if (i915_gem_request_completed(req, true)) return 0; + if (signal_pending_state(state, current)) + break; + if (time_after_eq(jiffies, timeout)) break; @@ -1197,6 +1200,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, struct drm_i915_private *dev_priv = dev->dev_private; const bool irq_test_in_progress = ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); + int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; DEFINE_WAIT(wait); unsigned long timeout_expire; s64 before, now; @@ -1229,7 +1233,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, before = ktime_get_raw_ns(); /* Optimistic spin for the next jiffie before touching IRQs */ - ret = __i915_spin_request(req); + ret = __i915_spin_request(req, state); if (ret == 0) goto out; @@ -1241,8 +1245,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, for (;;) { struct timer_list timer; - prepare_to_wait(&ring->irq_queue, &wait, - interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); + prepare_to_wait(&ring->irq_queue, &wait, state); /* We need to check whether any gpu reset happened in between * the caller grabbing the seqno and now ... */ @@ -1260,7 +1263,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, break; } - if (interruptible && signal_pending(current)) { + if (signal_pending_state(state, current)) { ret = -ERESTARTSYS; break; } -- cgit v0.10.2 From 6f06a2c45d8d714ea3b11a360b4a7191e52acaa4 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 5 Aug 2015 16:41:38 +0100 Subject: MIPS: uaccess: Take EVA into account in __copy_from_user() When EVA is in use, __copy_from_user() was unconditionally using the EVA instructions to read the user address space, however this can also be used for kernel access. If the address isn't a valid user address it will cause an address error or TLB exception, and if it is then user memory may be read instead of kernel memory. For example in the following stack trace from Linux v3.10 (changes since then will prevent this particular one still happening) kernel_sendmsg() set the user address limit to KERNEL_DS, and tcp_sendmsg() goes on to use __copy_from_user() with a kernel address in KSeg0. [<8002d434>] __copy_fromuser_common+0x10c/0x254 [<805710e0>] tcp_sendmsg+0x5f4/0xf00 [<804e8e3c>] sock_sendmsg+0x78/0xa0 [<804e8f28>] kernel_sendmsg+0x24/0x38 [<804ee0f8>] sock_no_sendpage+0x70/0x7c [<8017c820>] pipe_to_sendpage+0x80/0x98 [<8017c6b0>] splice_from_pipe_feed+0xa8/0x198 [<8017cc54>] __splice_from_pipe+0x4c/0x8c [<8017e844>] splice_from_pipe+0x58/0x78 [<8017e884>] generic_splice_sendpage+0x20/0x2c [<8017d690>] do_splice_from+0xb4/0x110 [<8017d710>] direct_splice_actor+0x24/0x30 [<8017d394>] splice_direct_to_actor+0xd8/0x208 [<8017d51c>] do_splice_direct+0x58/0x7c [<8014eaf4>] do_sendfile+0x1dc/0x39c [<8014f82c>] SyS_sendfile+0x90/0xf8 Add the eva_kernel_access() check in __copy_from_user() like the one in copy_from_user(). Signed-off-by: James Hogan Cc: Markos Chandras Cc: Paul Burton Cc: Leonid Yegoshin Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/10843/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 3f959c0..5014e18 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -1122,9 +1122,15 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); __cu_to = (to); \ __cu_from = (from); \ __cu_len = (n); \ - might_fault(); \ - __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ - __cu_len); \ + if (eva_kernel_access()) { \ + __cu_len = __invoke_copy_from_kernel(__cu_to, \ + __cu_from, \ + __cu_len); \ + } else { \ + might_fault(); \ + __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ + __cu_len); \ + } \ __cu_len; \ }) -- cgit v0.10.2 From f87a780f07b22b6dc4642dbaf44af65112076cb8 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 11 Dec 2015 11:32:58 +0000 Subject: drm/i915: Limit the busy wait on requests to 5us not 10ms! When waiting for high frequency requests, the finite amount of time required to set up the irq and wait upon it limits the response rate. By busywaiting on the request completion for a short while we can service the high frequency waits as quick as possible. However, if it is a slow request, we want to sleep as quickly as possible. The tradeoff between waiting and sleeping is roughly the time it takes to sleep on a request, on the order of a microsecond. Based on measurements of synchronous workloads from across big core and little atom, I have set the limit for busywaiting as 10 microseconds. In most of the synchronous cases, we can reduce the limit down to as little as 2 miscroseconds, but that leaves quite a few test cases regressing by factors of 3 and more. The code currently uses the jiffie clock, but that is far too coarse (on the order of 10 milliseconds) and results in poor interactivity as the CPU ends up being hogged by slow requests. To get microsecond resolution we need to use a high resolution timer. The cheapest of which is polling local_clock(), but that is only valid on the same CPU. If we switch CPUs because the task was preempted, we can also use that as an indicator that the system is too busy to waste cycles on spinning and we should sleep instead. __i915_spin_request was introduced in commit 2def4ad99befa25775dd2f714fdd4d92faec6e34 [v4.2] Author: Chris Wilson Date: Tue Apr 7 16:20:41 2015 +0100 drm/i915: Optimistically spin for the request completion v2: Drop full u64 for unsigned long - the timer is 32bit wraparound safe, so we can use native register sizes on smaller architectures. Mention the approximate microseconds units for elapsed time and add some extra comments describing the reason for busywaiting. v3: Raise the limit to 10us v4: Now 5us. Reported-by: Jens Axboe Link: https://lkml.org/lkml/2015/11/12/621 Reviewed-by: Tvrtko Ursulin Cc: "Rogozhkin, Dmitry V" Cc: Daniel Vetter Cc: Tvrtko Ursulin Cc: Eero Tamminen Cc: "Rantala, Valtteri" Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449833608-22125-3-git-send-email-chris@chris-wilson.co.uk (cherry picked from commit ca5b721e238226af1d767103ac852aeb8e4c0764) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 06631e1..8719fa2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1146,14 +1146,57 @@ static bool missed_irq(struct drm_i915_private *dev_priv, return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); } +static unsigned long local_clock_us(unsigned *cpu) +{ + unsigned long t; + + /* Cheaply and approximately convert from nanoseconds to microseconds. + * The result and subsequent calculations are also defined in the same + * approximate microseconds units. The principal source of timing + * error here is from the simple truncation. + * + * Note that local_clock() is only defined wrt to the current CPU; + * the comparisons are no longer valid if we switch CPUs. Instead of + * blocking preemption for the entire busywait, we can detect the CPU + * switch and use that as indicator of system load and a reason to + * stop busywaiting, see busywait_stop(). + */ + *cpu = get_cpu(); + t = local_clock() >> 10; + put_cpu(); + + return t; +} + +static bool busywait_stop(unsigned long timeout, unsigned cpu) +{ + unsigned this_cpu; + + if (time_after(local_clock_us(&this_cpu), timeout)) + return true; + + return this_cpu != cpu; +} + static int __i915_spin_request(struct drm_i915_gem_request *req, int state) { unsigned long timeout; + unsigned cpu; + + /* When waiting for high frequency requests, e.g. during synchronous + * rendering split between the CPU and GPU, the finite amount of time + * required to set up the irq and wait upon it limits the response + * rate. By busywaiting on the request completion for a short while we + * can service the high frequency waits as quick as possible. However, + * if it is a slow request, we want to sleep as quickly as possible. + * The tradeoff between waiting and sleeping is roughly the time it + * takes to sleep on a request, on the order of a microsecond. + */ if (i915_gem_request_get_ring(req)->irq_refcount) return -EBUSY; - timeout = jiffies + 1; + timeout = local_clock_us(&cpu) + 5; while (!need_resched()) { if (i915_gem_request_completed(req, true)) return 0; @@ -1161,7 +1204,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) if (signal_pending_state(state, current)) break; - if (time_after_eq(jiffies, timeout)) + if (busywait_stop(timeout, cpu)) break; cpu_relax_lowlatency(); -- cgit v0.10.2 From 0f0cd472062eca6f9fac8be0cd5585f9a2df1ab2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 11 Dec 2015 11:32:59 +0000 Subject: drm/i915: Only spin whilst waiting on the current request Limit busywaiting only to the request currently being processed by the GPU. If the request is not currently being processed by the GPU, there is a very low likelihood of it being completed within the 2 microsecond spin timeout and so we will just be wasting CPU cycles. v2: Check for logical inversion when rebasing - we were incorrectly checking for this request being active, and instead busywaiting for when the GPU was not yet processing the request of interest. v3: Try another colour for the seqno names. v4: Another colour for the function names. v5: Remove the forced coherency when checking for the active request. On reflection and plenty of recent experimentation, the issue is not a cache coherency problem - but an irq/seqno ordering problem (timing issue). Here, we do not need the w/a to force ordering of the read with an interrupt. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Cc: "Rogozhkin, Dmitry V" Cc: Daniel Vetter Cc: Tvrtko Ursulin Cc: Eero Tamminen Cc: "Rantala, Valtteri" Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449833608-22125-4-git-send-email-chris@chris-wilson.co.uk (cherry picked from commit 821485dc2ad665f136c57ee589bf7a8210160fe2) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 037a650..f4af19a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2193,8 +2193,17 @@ struct drm_i915_gem_request { struct drm_i915_private *i915; struct intel_engine_cs *ring; - /** GEM sequence number associated with this request. */ - uint32_t seqno; + /** GEM sequence number associated with the previous request, + * when the HWS breadcrumb is equal to this the GPU is processing + * this request. + */ + u32 previous_seqno; + + /** GEM sequence number associated with this request, + * when the HWS breadcrumb is equal or greater than this the GPU + * has finished processing this request. + */ + u32 seqno; /** Position in the ringbuffer of the start of the request */ u32 head; @@ -2911,15 +2920,17 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) return (int32_t)(seq1 - seq2) >= 0; } +static inline bool i915_gem_request_started(struct drm_i915_gem_request *req, + bool lazy_coherency) +{ + u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency); + return i915_seqno_passed(seqno, req->previous_seqno); +} + static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, bool lazy_coherency) { - u32 seqno; - - BUG_ON(req == NULL); - - seqno = req->ring->get_seqno(req->ring, lazy_coherency); - + u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency); return i915_seqno_passed(seqno, req->seqno); } diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8719fa2..f56af0a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1193,9 +1193,13 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) * takes to sleep on a request, on the order of a microsecond. */ - if (i915_gem_request_get_ring(req)->irq_refcount) + if (req->ring->irq_refcount) return -EBUSY; + /* Only spin if we know the GPU is processing this request */ + if (!i915_gem_request_started(req, true)) + return -EAGAIN; + timeout = local_clock_us(&cpu) + 5; while (!need_resched()) { if (i915_gem_request_completed(req, true)) @@ -1209,6 +1213,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) cpu_relax_lowlatency(); } + if (i915_gem_request_completed(req, false)) return 0; @@ -2600,6 +2605,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, request->batch_obj = obj; request->emitted_jiffies = jiffies; + request->previous_seqno = ring->last_submitted_seqno; ring->last_submitted_seqno = request->seqno; list_add_tail(&request->list, &ring->request_list); -- cgit v0.10.2 From ef8dd37af85a8f37ca3a29074647511e52c56181 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 18 Dec 2015 19:24:39 +0200 Subject: drm/i915: Workaround CHV pipe C cursor fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Turns out CHV pipe C was glued on somewhat poorly, and there's something wrong with the cursor. If the cursor straddles the left screen edge, and is then moved away from the edge or disabled, the pipe will often underrun. If enough underruns are triggered quickly enough the pipe will fall over and die (it just scans out a solid color and reports a constant underrun). We need to turn the disp2d power well off and on again to recover the pipe. None of that is very nice for the user, so let's just refuse to place the cursor in the compromised position. The ddx appears to fall back to swcursor when the ioctl returns an error, so theoretically there's no loss of functionality for the user (discounting swcursor bugs). I suppose most cursors images actually have the hotspot not exactly at 0,0 so under typical conditions the fallback will in fact kick in as soon as the cursor touches the left edge of the screen. Any atomic compositor should anyway be prepared to fall back to GPU composition when things don't work out, so there should be no problem with those. Other things that I tried to solve this include flipping all display related clock gating knobs I could find, increasing the minimum gtt alignment all the way up to 512k. I also tried to see if there are more specific screen coordinates that hit the bug, but the findings were somewhat inconclusive. Sometimes the failures happen almost across the whole left edge, sometimes more at the very top and around the bottom half. I wasn't able to find any real pattern to these variations, so it seems our only choice is to just refuse to straddle the left screen edge at all. Cc: stable@vger.kernel.org Cc: Jason Plum Testcase: igt/kms_chv_cursor_fail Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=92826 Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1450459479-16286-1-git-send-email-ville.syrjala@linux.intel.com Signed-off-by: Daniel Vetter (cherry picked from commit b29ec92c4f5e6d45d8bae8194e664427a01c6687) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 13bc6d4..69e1587 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13728,6 +13728,7 @@ intel_check_cursor_plane(struct drm_plane *plane, struct drm_crtc *crtc = crtc_state->base.crtc; struct drm_framebuffer *fb = state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); + enum pipe pipe = to_intel_plane(plane)->pipe; unsigned stride; int ret; @@ -13761,6 +13762,22 @@ intel_check_cursor_plane(struct drm_plane *plane, return -EINVAL; } + /* + * There's something wrong with the cursor on CHV pipe C. + * If it straddles the left edge of the screen then + * moving it away from the edge or disabling it often + * results in a pipe underrun, and often that can lead to + * dead pipe (constant underrun reported, and it scans + * out just a solid color). To recover from that, the + * display power well must be turned off and on again. + * Refuse the put the cursor into that compromised position. + */ + if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C && + state->visible && state->base.crtc_x < 0) { + DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); + return -EINVAL; + } + return 0; } -- cgit v0.10.2 From d6a428fb583738ad685c91a684748cdee7b2a05f Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 5 Aug 2015 16:41:39 +0100 Subject: MIPS: uaccess: Take EVA into account in [__]clear_user __clear_user() (and clear_user() which uses it), always access the user mode address space, which results in EVA store instructions when EVA is enabled even if the current user address limit is KERNEL_DS. Fix this by adding a new symbol __bzero_kernel for the normal kernel address space bzero in EVA mode, and call that from __clear_user() if eva_kernel_access(). Signed-off-by: James Hogan Cc: Markos Chandras Cc: Paul Burton Cc: Leonid Yegoshin Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/10844/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 5014e18..2e3b399 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -1235,16 +1235,28 @@ __clear_user(void __user *addr, __kernel_size_t size) { __kernel_size_t res; - might_fault(); - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, $0\n\t" - "move\t$6, %2\n\t" - __MODULE_JAL(__bzero) - "move\t%0, $6" - : "=r" (res) - : "r" (addr), "r" (size) - : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); + if (eva_kernel_access()) { + __asm__ __volatile__( + "move\t$4, %1\n\t" + "move\t$5, $0\n\t" + "move\t$6, %2\n\t" + __MODULE_JAL(__bzero_kernel) + "move\t%0, $6" + : "=r" (res) + : "r" (addr), "r" (size) + : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); + } else { + might_fault(); + __asm__ __volatile__( + "move\t$4, %1\n\t" + "move\t$5, $0\n\t" + "move\t$6, %2\n\t" + __MODULE_JAL(__bzero) + "move\t%0, $6" + : "=r" (res) + : "r" (addr), "r" (size) + : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); + } return res; } diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c index 291af0b..e2b6ab7 100644 --- a/arch/mips/kernel/mips_ksyms.c +++ b/arch/mips/kernel/mips_ksyms.c @@ -17,6 +17,7 @@ #include #include +extern void *__bzero_kernel(void *__s, size_t __count); extern void *__bzero(void *__s, size_t __count); extern long __strncpy_from_kernel_nocheck_asm(char *__to, const char *__from, long __len); @@ -64,6 +65,7 @@ EXPORT_SYMBOL(__copy_from_user_eva); EXPORT_SYMBOL(__copy_in_user_eva); EXPORT_SYMBOL(__copy_to_user_eva); EXPORT_SYMBOL(__copy_user_inatomic_eva); +EXPORT_SYMBOL(__bzero_kernel); #endif EXPORT_SYMBOL(__bzero); EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm); diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S index b8e63fd..8f0019a 100644 --- a/arch/mips/lib/memset.S +++ b/arch/mips/lib/memset.S @@ -283,6 +283,8 @@ LEAF(memset) 1: #ifndef CONFIG_EVA FEXPORT(__bzero) +#else +FEXPORT(__bzero_kernel) #endif __BUILD_BZERO LEGACY_MODE -- cgit v0.10.2 From 57a2af6bbc7a4f1b145cc216c34476402836f0b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Mon, 14 Dec 2015 17:35:02 +0200 Subject: drm/i915: Kill intel_crtc->cursor_bo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The vma may have been rebound between the last time the cursor was enabled and now, so skipping the cursor gtt offset deduction is not safe unless we would also reset cursor_bo to NULL when disabling the cursor. Just thow cursor_bo to the bin instead since it's lost all other uses thanks to universal plane support. Chris pointed out that cursor updates are currently too slow via universal planes that micro optimizations like these wouldn't even help. v2: Add a note about futility of micro optimizations (Chris) Cc: drm-intel-fixes@lists.freedesktop.org References: http://lists.freedesktop.org/archives/intel-gfx/2015-December/082976.html Cc: Chris Wilson Cc: Takashi Iwai Cc: Jani Nikula Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1450107302-17171-1-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson (cherry picked from commit 1264859d648c4bdc9f0a098efbff90cbf462a075) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 69e1587..beb0374 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13801,9 +13801,6 @@ intel_commit_cursor_plane(struct drm_plane *plane, crtc = crtc ? crtc : plane->crtc; intel_crtc = to_intel_crtc(crtc); - if (intel_crtc->cursor_bo == obj) - goto update; - if (!obj) addr = 0; else if (!INTEL_INFO(dev)->cursor_needs_physical) @@ -13812,9 +13809,7 @@ intel_commit_cursor_plane(struct drm_plane *plane, addr = obj->phys_handle->busaddr; intel_crtc->cursor_addr = addr; - intel_crtc->cursor_bo = obj; -update: if (crtc->state->active) intel_crtc_update_cursor(crtc, state->visible); } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index f2a1142..0d00f07 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -550,7 +550,6 @@ struct intel_crtc { int adjusted_x; int adjusted_y; - struct drm_i915_gem_object *cursor_bo; uint32_t cursor_addr; uint32_t cursor_cntl; uint32_t cursor_size; -- cgit v0.10.2 From 97f9010af05c15e0b7e6b4ef6ff8cb0ebb7e7715 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 11 Dec 2015 19:44:15 +0100 Subject: drm/i915: mdelay(10) considered harmful I missed this myself when reviewing commit 237ed86c693d8a8e4db476976aeb30df4deac74b Author: Sonika Jindal Date: Tue Sep 15 09:44:20 2015 +0530 drm/i915: Check live status before reading edid Long sleeps like this really shouldn't waste cpu cycles spinning. Cc: Sonika Jindal Cc: "Wang, Gary C" Link: http://patchwork.freedesktop.org/patch/msgid/1449859455-32609-1-git-send-email-daniel.vetter@ffwll.ch Reviewed-by: Sonika Jindal Signed-off-by: Daniel Vetter (cherry picked from commit 71a199bacb398ee54eeac001699257dda083a455) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 81cdd9f..12393df4 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1384,7 +1384,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) while (!live_status && --retry) { live_status = intel_digital_port_connected(dev_priv, hdmi_to_dig_port(intel_hdmi)); - mdelay(10); + msleep(10); } if (!live_status) -- cgit v0.10.2 From a98728e0bb978fbe9246c93ea89198de612c22e6 Mon Sep 17 00:00:00 2001 From: Gary Wang Date: Tue, 15 Dec 2015 12:40:30 +0800 Subject: drm/i915: Correct max delay for HDMI hotplug live status checking The total delay of HDMI hotplug detecting with 30ms have already been split into a resolution of 3 retries of 10ms each, for the worst cases. But it still suffered from only waiting 10ms at most in intel_hdmi_detect(). This patch corrects it by reading hotplug status with 4 times at most for 30ms delay. v2: - straight up to loop execution for more clear in code readability - mdelay will replace with msleep by Daniel's new patch drm/i915: mdelay(10) considered harmful - suggest to re-evaluate try times for being compatible to old HDMI monitor Reviewed-by: Cooper Chiou Tested-by: Gary Wang Cc: Jani Nikula Cc: Daniel Vetter Cc: Gavin Hindman Cc: Sonika Jindal Cc: Shashank Sharma Signed-off-by: Gary Wang [danvet: fixup conflict with s/mdelay/msleep/ patch.] Cc: drm-intel-fixes@lists.freedesktop.org Signed-off-by: Daniel Vetter (cherry picked from commit 61fb3980dd396880ffba48523b1e27579868b82b) Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 12393df4..64086f2 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1374,17 +1374,18 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct drm_i915_private *dev_priv = to_i915(connector->dev); bool live_status = false; - unsigned int retry = 3; + unsigned int try; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); - while (!live_status && --retry) { + for (try = 0; !live_status && try < 4; try++) { + if (try) + msleep(10); live_status = intel_digital_port_connected(dev_priv, hdmi_to_dig_port(intel_hdmi)); - msleep(10); } if (!live_status) -- cgit v0.10.2 From f3575e230cf8537951ebe3cc19974c5db2ff4f1c Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Wed, 5 Aug 2015 15:42:40 -0700 Subject: MIPS: CPS: drop .set mips64r2 directives Commit 977e043d5ea1 ("MIPS: kernel: cps-vec: Replace mips32r2 ISA level with mips64r2") leads to .set mips64r2 directives being present in 32 bit (ie. CONFIG_32BIT=y) kernels. This is incorrect & leads to MIPS64 instructions being emitted by the assembler when expanding pseudo-instructions. For example the "move" instruction can legitimately be expanded to a "daddu". This causes problems when the kernel is run on a MIPS32 CPU, as CONFIG_32BIT kernels of course often are... Fix this by dropping the .set directives entirely now that Kconfig should be ensuring that kernels including this code are built with a suitable -march= compiler flag. Signed-off-by: Paul Burton Cc: Markos Chandras Cc: James Hogan Cc: # 3.16+ Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/10869/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 8fd5a27..ac81edd 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -257,7 +257,6 @@ LEAF(mips_cps_core_init) has_mt t0, 3f .set push - .set mips64r2 .set mt /* Only allow 1 TC per VPE to execute... */ @@ -376,7 +375,6 @@ LEAF(mips_cps_boot_vpes) nop .set push - .set mips64r2 .set mt 1: /* Enter VPE configuration state */ -- cgit v0.10.2 From 2a037f310bab89b7db5b86c7954cf69c229c87ca Mon Sep 17 00:00:00 2001 From: Qais Yousef Date: Tue, 8 Dec 2015 10:11:43 +0000 Subject: MIPS: VDSO: Fix build error Commit ebb5e78cc634 ("MIPS: Initial implementation of a VDSO") introduced a build error. For MIPS VDSO to be compiled it requires binutils version 2.25 or above but the check in the Makefile had inverted logic causing it to be compiled in if binutils is below 2.25. This fixes the following compilation error: CC arch/mips/vdso/gettimeofday.o /tmp/ccsExcUd.s: Assembler messages: /tmp/ccsExcUd.s:62: Error: can't resolve `_start' {*UND* section} - `L0' {.text section} /tmp/ccsExcUd.s:467: Error: can't resolve `_start' {*UND* section} - `L0' {.text section} make[2]: *** [arch/mips/vdso/gettimeofday.o] Error 1 make[1]: *** [arch/mips/vdso] Error 2 make: *** [arch/mips] Error 2 [ralf@linux-mips: Fixed Sergei's complaint on the formatting of the cited commit and generally reformatted the log message.] Signed-off-by: Qais Yousef Cc: alex@alex-smith.me.uk Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/11745/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index ef5f348..018f8c7 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -26,8 +26,8 @@ aflags-vdso := $(ccflags-vdso) \ # the comments on that file. # ifndef CONFIG_CPU_MIPSR6 - ifeq ($(call ld-ifversion, -gt, 22400000, y),) - $(warning MIPS VDSO requires binutils > 2.24) + ifeq ($(call ld-ifversion, -lt, 22500000, y),) + $(warning MIPS VDSO requires binutils >= 2.25) obj-vdso-y := $(filter-out gettimeofday.o, $(obj-vdso-y)) ccflags-vdso += -DDISABLE_MIPS_VDSO endif -- cgit v0.10.2 From ec7b97208aaed46bd3fbe166d5909228c380a720 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Fri, 27 Nov 2015 19:17:01 +0100 Subject: MIPS: Fix build error due to unused variables. c861519fcf95b2d46cb4275903423b43ae150a40 ("MIPS: Fix delay loops which may be removed by GCC.") which made it upstream was an outdated version of the patch and is lacking some the removal of two variables that became unused thus resulting in further warnings and build breakage. The commit from ae878615d7cee5d7346946cf1ae1b60e427013c2 was correct however. Signed-off-by: Ralf Baechle diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c index dbbeccc..a245cad 100644 --- a/arch/mips/pci/pci-rt2880.c +++ b/arch/mips/pci/pci-rt2880.c @@ -221,7 +221,6 @@ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) static int rt288x_pci_probe(struct platform_device *pdev) { void __iomem *io_map_base; - int i; rt2880_pci_base = ioremap_nocache(RT2880_PCI_BASE, PAGE_SIZE); diff --git a/arch/mips/pmcs-msp71xx/msp_setup.c b/arch/mips/pmcs-msp71xx/msp_setup.c index 78b2ef4..9d293b3 100644 --- a/arch/mips/pmcs-msp71xx/msp_setup.c +++ b/arch/mips/pmcs-msp71xx/msp_setup.c @@ -39,7 +39,6 @@ extern void msp_serial_setup(void); void msp7120_reset(void) { void *start, *end, *iptr; - register int i; /* Diasble all interrupts */ local_irq_disable(); diff --git a/arch/mips/sni/reset.c b/arch/mips/sni/reset.c index db8f88b..6afa343 100644 --- a/arch/mips/sni/reset.c +++ b/arch/mips/sni/reset.c @@ -26,7 +26,7 @@ static inline void kb_wait(void) /* XXX This ends up at the ARC firmware prompt ... */ void sni_machine_restart(char *command) { - int i, j; + int i; /* This does a normal via the keyboard controller like a PC. We can do that easier ... */ -- cgit v0.10.2 From a7f2d7865720ff13d5b0f2a3bb1fd80dc3d7a73f Mon Sep 17 00:00:00 2001 From: Alexis Dambricourt Date: Mon, 14 Dec 2015 15:39:34 +0100 Subject: KVM: MTRR: fix fixed MTRR segment look up This fixes the slow-down of VM running with pci-passthrough, since some MTRR range changed from MTRR_TYPE_WRBACK to MTRR_TYPE_UNCACHABLE. Memory in the 0K-640K range was incorrectly treated as uncacheable. Fixes: f7bfb57b3e89ff89c0da9f93dedab89f68d6ca27 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=107561 Cc: qemu-stable@nongnu.org Signed-off-by: Alexis Dambricourt [Use correct BZ for "Fixes" annotation. - Paolo] Signed-off-by: Paolo Bonzini diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c index 9e8bf13..adc54e1 100644 --- a/arch/x86/kvm/mtrr.c +++ b/arch/x86/kvm/mtrr.c @@ -267,7 +267,7 @@ static int fixed_mtrr_addr_to_seg(u64 addr) for (seg = 0; seg < seg_num; seg++) { mtrr_seg = &fixed_seg_table[seg]; - if (mtrr_seg->start >= addr && addr < mtrr_seg->end) + if (mtrr_seg->start <= addr && addr < mtrr_seg->end) return seg; } -- cgit v0.10.2 From fa7c4ebd5ae0c22f9908436303106a9ffcf0cf42 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 14 Dec 2015 16:57:31 +0100 Subject: KVM: MTRR: observe maxphyaddr from guest CPUID, not host Conversion of MTRRs to ranges used the maxphyaddr from the boot CPU. This is wrong, because var_mtrr_range's mask variable then is discontiguous (like FF00FFFF000, where the first run of 0s corresponds to the bits between host and guest maxphyaddr). Instead always set up the masks to be full 64-bit values---we know that the reserved bits at the top are zero, and we can restore them when reading the MSR. This way var_mtrr_range gets a mask that just works. Fixes: a13842dc668b40daef4327294a6d3bdc8bd30276 Cc: qemu-stable@nongnu.org Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=107561 Signed-off-by: Paolo Bonzini diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c index adc54e1..7747b6d 100644 --- a/arch/x86/kvm/mtrr.c +++ b/arch/x86/kvm/mtrr.c @@ -300,7 +300,6 @@ static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end) *start = range->base & PAGE_MASK; mask = range->mask & PAGE_MASK; - mask |= ~0ULL << boot_cpu_data.x86_phys_bits; /* This cannot overflow because writing to the reserved bits of * variable MTRRs causes a #GP. @@ -356,10 +355,14 @@ static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) if (var_mtrr_range_is_valid(cur)) list_del(&mtrr_state->var_ranges[index].node); + /* Extend the mask with all 1 bits to the left, since those + * bits must implicitly be 0. The bits are then cleared + * when reading them. + */ if (!is_mtrr_mask) cur->base = data; else - cur->mask = data; + cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu)); /* add it to the list if it's enabled. */ if (var_mtrr_range_is_valid(cur)) { @@ -426,6 +429,8 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) *pdata = vcpu->arch.mtrr_state.var_ranges[index].base; else *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask; + + *pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1; } return 0; -- cgit v0.10.2 From e24dea2afc6a0852983dc741072d8e96155e13f5 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 22 Dec 2015 15:20:00 +0100 Subject: KVM: MTRR: treat memory as writeback if MTRR is disabled in guest CPUID Virtual machines can be run with CPUID such that there are no MTRRs. In that case, the firmware will never enable MTRRs and it is obviously undesirable to run the guest entirely with UC memory. Check out guest CPUID, and use WB memory if MTRR do not exist. Cc: qemu-stable@nongnu.org Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=107561 Signed-off-by: Paolo Bonzini diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 06332cb..3f5c48d 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -38,6 +38,14 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) return best && (best->ecx & bit(X86_FEATURE_XSAVE)); } +static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu) +{ + struct kvm_cpuid_entry2 *best; + + best = kvm_find_cpuid_entry(vcpu, 1, 0); + return best && (best->edx & bit(X86_FEATURE_MTRR)); +} + static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c index 7747b6d..3f8c732 100644 --- a/arch/x86/kvm/mtrr.c +++ b/arch/x86/kvm/mtrr.c @@ -120,14 +120,22 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state) return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK; } -static u8 mtrr_disabled_type(void) +static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu) { /* * Intel SDM 11.11.2.2: all MTRRs are disabled when * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC * memory type is applied to all of physical memory. + * + * However, virtual machines can be run with CPUID such that + * there are no MTRRs. In that case, the firmware will never + * enable MTRRs and it is obviously undesirable to run the + * guest entirely with UC memory and we use WB. */ - return MTRR_TYPE_UNCACHABLE; + if (guest_cpuid_has_mtrr(vcpu)) + return MTRR_TYPE_UNCACHABLE; + else + return MTRR_TYPE_WRBACK; } /* @@ -675,7 +683,7 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) } if (iter.mtrr_disabled) - return mtrr_disabled_type(); + return mtrr_disabled_type(vcpu); /* not contained in any MTRRs. */ if (type == -1) -- cgit v0.10.2 From 0185604c2d82c560dab2f2933a18f797e74ab5a8 Mon Sep 17 00:00:00 2001 From: Andrew Honig Date: Wed, 18 Nov 2015 14:50:23 -0800 Subject: KVM: x86: Reload pit counters for all channels when restoring state Currently if userspace restores the pit counters with a count of 0 on channels 1 or 2 and the guest attempts to read the count on those channels, then KVM will perform a mod of 0 and crash. This will ensure that 0 values are converted to 65536 as per the spec. This is CVE-2015-7513. Signed-off-by: Andy Honig Signed-off-by: Paolo Bonzini diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b84ba4b..7ffc224 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3572,9 +3572,11 @@ static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps) static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) { + int i; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); - kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0); + for (i = 0; i < 3; i++) + kvm_pit_load_count(kvm, i, ps->channels[i].count, 0); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return 0; } @@ -3593,6 +3595,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) { int start = 0; + int i; u32 prev_legacy, cur_legacy; mutex_lock(&kvm->arch.vpit->pit_state.lock); prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; @@ -3602,7 +3605,8 @@ static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, sizeof(kvm->arch.vpit->pit_state.channels)); kvm->arch.vpit->pit_state.flags = ps->flags; - kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start); + for (i = 0; i < 3; i++) + kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count, start); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return 0; } -- cgit v0.10.2 From b5875222de2fb91339db79a753677ba4f68120d0 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Fri, 11 Dec 2015 13:14:28 -0700 Subject: NVMe: IO ending fixes on surprise removal This patch fixes a lost request discovered during IO + hot removal. The driver's pci removal deletes gendisks prior to shutting down the controller to allow dirty data to sync. Dirty data can not be synced on a surprise removal, though, and would potentially block indefinitely. The driver previously had marked the queue as dying in this scenario to prevent new requests from attempting, however it will still block for requests that already entered the queue. This patch fixes this by quiescing IO first, then aborting the requeued requests before deleting disks. Reported-by: Sujith Pandel Signed-off-by: Keith Busch Tested-by: Sujith Pandel Signed-off-by: Jens Axboe diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 9e294ff..0c67b57 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2540,8 +2540,17 @@ static void nvme_ns_remove(struct nvme_ns *ns) { bool kill = nvme_io_incapable(ns->dev) && !blk_queue_dying(ns->queue); - if (kill) + if (kill) { blk_set_queue_dying(ns->queue); + + /* + * The controller was shutdown first if we got here through + * device removal. The shutdown may requeue outstanding + * requests. These need to be aborted immediately so + * del_gendisk doesn't block indefinitely for their completion. + */ + blk_mq_abort_requeue_list(ns->queue); + } if (ns->disk->flags & GENHD_FL_UP) del_gendisk(ns->disk); if (kill || !blk_queue_dying(ns->queue)) { @@ -2977,6 +2986,15 @@ static void nvme_dev_remove(struct nvme_dev *dev) { struct nvme_ns *ns, *next; + if (nvme_io_incapable(dev)) { + /* + * If the device is not capable of IO (surprise hot-removal, + * for example), we need to quiesce prior to deleting the + * namespaces. This will end outstanding requests and prevent + * attempts to sync dirty data. + */ + nvme_dev_shutdown(dev); + } list_for_each_entry_safe(ns, next, &dev->namespaces, list) nvme_ns_remove(ns); } -- cgit v0.10.2 From 23688bf4f830a89866fd0ed3501e342a7360fe4f Mon Sep 17 00:00:00 2001 From: Junichi Nomura Date: Tue, 22 Dec 2015 10:23:44 -0700 Subject: block: ensure to split after potentially bouncing a bio blk_queue_bio() does split then bounce, which makes the segment counting based on pages before bouncing and could go wrong. Move the split to after bouncing, like we do for blk-mq, and the we fix the issue of having the bio count for segments be wrong. Fixes: 54efd50bfd87 ("block: make generic_make_request handle arbitrarily sized bios") Cc: stable@vger.kernel.org Tested-by: Artem S. Tashkinov Signed-off-by: Jens Axboe diff --git a/block/blk-core.c b/block/blk-core.c index 3636be4..c487b94 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1689,8 +1689,6 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) struct request *req; unsigned int request_count = 0; - blk_queue_split(q, &bio, q->bio_split); - /* * low level driver can indicate that it wants pages above a * certain limit bounced to low memory (ie for highmem, or even @@ -1698,6 +1696,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) */ blk_queue_bounce(q, &bio); + blk_queue_split(q, &bio, q->bio_split); + if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { bio->bi_error = -EIO; bio_endio(bio); -- cgit v0.10.2 From e8271201462710dbbaa0448b768428606724ca90 Mon Sep 17 00:00:00 2001 From: Mike Krinkin Date: Tue, 15 Dec 2015 12:56:40 +0300 Subject: null_blk: fix use-after-free error blk_end_request_all may free request, so we need to save request_queue pointer before blk_end_request_all call. The problem was introduced in commit cf8ecc5a8455266f8d51 ("null_blk: guarantee device restart in all irq modes") and causes general protection fault with slab poisoning enabled. Fixes: cf8ecc5a8455266f8d51 ("null_blk: guarantee device restart in all irq modes") Signed-off-by: Mike Krinkin Reviewed-by: Ming Lei Signed-off-by: Jens Axboe diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 8162475..a428e4e 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -219,6 +219,9 @@ static void end_cmd(struct nullb_cmd *cmd) { struct request_queue *q = NULL; + if (cmd->rq) + q = cmd->rq->q; + switch (queue_mode) { case NULL_Q_MQ: blk_mq_end_request(cmd->rq, 0); @@ -232,9 +235,6 @@ static void end_cmd(struct nullb_cmd *cmd) goto free_cmd; } - if (cmd->rq) - q = cmd->rq->q; - /* Restart queue if needed, as we are freeing a tag */ if (q && !q->mq_ops && blk_queue_stopped(q)) { unsigned long flags; -- cgit v0.10.2 From 427d6e4812cf16fa6defccdcdfae2d60bfeb43b2 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Wed, 16 Dec 2015 17:14:45 +0800 Subject: bus: sunxi-rsb: Fix primary PMIC mapping hardware address The primary PMICs use 0x3a3 as their hardware address, not 0x3e3. Signed-off-by: Chen-Yu Tsai Signed-off-by: Olof Johansson diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index 0cfcb39..f951b35 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -527,7 +527,7 @@ static int sunxi_rsb_init_device_mode(struct sunxi_rsb *rsb) */ static const struct sunxi_rsb_addr_map sunxi_rsb_addr_maps[] = { - { 0x3e3, 0x2d }, /* Primary PMIC: AXP223, AXP809, AXP81X, ... */ + { 0x3a3, 0x2d }, /* Primary PMIC: AXP223, AXP809, AXP81X, ... */ { 0x745, 0x3a }, /* Secondary PMIC: AXP806, ... */ { 0xe89, 0x45 }, /* Peripheral IC: AC100, ... */ }; -- cgit v0.10.2 From bccd240fc8ac2df7d7a957e29c6d8fd7da10f86f Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Wed, 16 Dec 2015 17:14:46 +0800 Subject: bus: sunxi-rsb: Fix peripheral IC mapping runtime address 0x4e is the runtime address normally associated with perihperal ICs. 0x45 is not a valid runtime address. Signed-off-by: Chen-Yu Tsai Signed-off-by: Olof Johansson diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index f951b35..25996e2 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -529,7 +529,7 @@ static int sunxi_rsb_init_device_mode(struct sunxi_rsb *rsb) static const struct sunxi_rsb_addr_map sunxi_rsb_addr_maps[] = { { 0x3a3, 0x2d }, /* Primary PMIC: AXP223, AXP809, AXP81X, ... */ { 0x745, 0x3a }, /* Secondary PMIC: AXP806, ... */ - { 0xe89, 0x45 }, /* Peripheral IC: AC100, ... */ + { 0xe89, 0x4e }, /* Peripheral IC: AC100, ... */ }; static u8 sunxi_rsb_get_rtaddr(u16 hwaddr) -- cgit v0.10.2 From de3793796f78e293cc0873744a75588c99ed2fdd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micka=C3=ABl=20Sala=C3=BCn?= Date: Tue, 22 Dec 2015 21:44:01 +0100 Subject: um: Fix pointer cast MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix a pointer cast typo introduced in v4.4-rc5 especially visible for the i386 subarchitecture where it results in a kernel crash. [ Also removed pointless cast as per Al Viro - Linus ] Fixes: 8090bfd2bb9a ("um: Fix fpstate handling") Signed-off-by: Mickaël Salaün Cc: Jeff Dike Acked-by: Richard Weinberger Signed-off-by: Linus Torvalds diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c index e5f854c..14fcd01 100644 --- a/arch/x86/um/signal.c +++ b/arch/x86/um/signal.c @@ -470,7 +470,7 @@ long sys_sigreturn(void) struct sigcontext __user *sc = &frame->sc; int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long); - if (copy_from_user(&set.sig[0], (void *)sc->oldmask, sizeof(set.sig[0])) || + if (copy_from_user(&set.sig[0], &sc->oldmask, sizeof(set.sig[0])) || copy_from_user(&set.sig[1], frame->extramask, sig_size)) goto segfault; -- cgit v0.10.2 From 80373d37bee562060ce4d3e5272b708e8db5d1fc Mon Sep 17 00:00:00 2001 From: Jon Hunter Date: Tue, 8 Dec 2015 10:26:49 +0000 Subject: ARM: tegra: Fix suspend hang on Tegra124 Chromebooks Enabling CPUFreq support for Tegra124 Chromebooks is causing the Tegra124 to hang when resuming from suspend. When CPUFreq is enabled, the CPU clock is changed from the PLLX clock to the DFLL clock during kernel boot. When resuming from suspend the CPU clock is temporarily changed back to the PLLX clock before switching back to the DFLL. If the DFLL is operating at a much lower frequency than the PLLX when we enter suspend, and so the CPU voltage rail is at a voltage too low for the CPUs to operate at the PLLX frequency, then the device will hang. Please note that the PLLX is used in the resume sequence to switch the CPU clock from the very slow 32K clock to a faster clock during early resume to speed up the resume sequence before the DFLL is resumed. Ideally, we should fix this by setting the suspend frequency so that it matches the PLLX frequency, however, that would be a bigger change. For now simply disable CPUFreq support for Tegra124 Chromebooks to avoid the hang when resuming from suspend. Fixes: 9a0baee960a7 ("ARM: tegra: Enable CPUFreq support for Tegra124 Chromebooks") Signed-off-by: Jon Hunter Signed-off-by: Olof Johansson diff --git a/arch/arm/boot/dts/tegra124-nyan.dtsi b/arch/arm/boot/dts/tegra124-nyan.dtsi index 40c23a0..ec1aa64 100644 --- a/arch/arm/boot/dts/tegra124-nyan.dtsi +++ b/arch/arm/boot/dts/tegra124-nyan.dtsi @@ -399,7 +399,7 @@ /* CPU DFLL clock */ clock@0,70110000 { - status = "okay"; + status = "disabled"; vdd-cpu-supply = <&vdd_cpu>; nvidia,i2c-fs-rate = <400000>; }; -- cgit v0.10.2 From 9bcfd78ac0d19ef4c14b8b37eded26e5b3299e41 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Fri, 20 Nov 2015 15:54:06 -0800 Subject: sparc: Hook up userfaultfd system call After hooking up system call, userfaultfd selftest was successful for both 32 and 64 bit version of test. Signed-off-by: Mike Kravetz Signed-off-by: David S. Miller diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h index efe9479..f31a124 100644 --- a/arch/sparc/include/uapi/asm/unistd.h +++ b/arch/sparc/include/uapi/asm/unistd.h @@ -417,8 +417,9 @@ #define __NR_bpf 349 #define __NR_execveat 350 #define __NR_membarrier 351 +#define __NR_userfaultfd 352 -#define NR_syscalls 352 +#define NR_syscalls 353 /* Bitmask values returned from kern_features system call. */ #define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index cc23b62..78e8029 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S @@ -87,4 +87,4 @@ sys_call_table: /*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev /*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr /*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf -/*350*/ .long sys_execveat, sys_membarrier +/*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index f229468..2549c2c 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -88,7 +88,7 @@ sys_call_table32: .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf -/*350*/ .word sys32_execveat, sys_membarrier +/*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd #endif /* CONFIG_COMPAT */ @@ -168,4 +168,4 @@ sys_call_table: .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf -/*350*/ .word sys64_execveat, sys_membarrier +/*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd -- cgit v0.10.2 From a7def561c2ad8572c5d016ac96949e5e9999965f Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 15 Dec 2015 11:16:44 +0000 Subject: cpufreq: scpi-cpufreq: signedness bug in scpi_get_dvfs_info() The "domain" variable needs to be signed for the error handling to work. Fixes: 8def31034d03 (cpufreq: arm_big_little: add SCPI interface driver) Signed-off-by: Dan Carpenter Acked-by: Viresh Kumar Acked-by: Sudeep Holla Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index 2c3b16f..de5e89b 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -31,7 +31,7 @@ static struct scpi_ops *scpi_ops; static struct scpi_dvfs_info *scpi_get_dvfs_info(struct device *cpu_dev) { - u8 domain = topology_physical_package_id(cpu_dev->id); + int domain = topology_physical_package_id(cpu_dev->id); if (domain < 0) return ERR_PTR(-EINVAL); -- cgit v0.10.2 From 01fd3c2744540ae7554bf098a9615a8310c6fc13 Mon Sep 17 00:00:00 2001 From: Aya Mahfouz Date: Tue, 15 Dec 2015 01:37:57 +0200 Subject: tty: serial: constify sunhv_ops structs Constifies sunhv_ops structures in tty's serial driver since they are not modified after their initialization. Detected and found using Coccinelle. Suggested-by: Julia Lawall Signed-off-by: Aya Mahfouz Signed-off-by: David S. Miller diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c index 0640318..f224e8a 100644 --- a/drivers/tty/serial/sunhv.c +++ b/drivers/tty/serial/sunhv.c @@ -168,17 +168,17 @@ struct sunhv_ops { int (*receive_chars)(struct uart_port *port); }; -static struct sunhv_ops bychar_ops = { +static const struct sunhv_ops bychar_ops = { .transmit_chars = transmit_chars_putchar, .receive_chars = receive_chars_getchar, }; -static struct sunhv_ops bywrite_ops = { +static const struct sunhv_ops bywrite_ops = { .transmit_chars = transmit_chars_write, .receive_chars = receive_chars_read, }; -static struct sunhv_ops *sunhv_ops = &bychar_ops; +static const struct sunhv_ops *sunhv_ops = &bychar_ops; static struct tty_port *receive_chars(struct uart_port *port) { -- cgit v0.10.2 From 82924e542f20e645bc7de86e2889fe3fb0858566 Mon Sep 17 00:00:00 2001 From: Khalid Aziz Date: Thu, 17 Dec 2015 10:33:50 -0700 Subject: sparc64: Add ADI capability to cpu capabilities Add ADI (Application Data Integrity) capability to cpu capabilities list. ADI capability allows virtual addresses to be encoded with a tag in bits 63-60. This tag serves as an access control key for the regions of virtual address with ADI enabled and a key set on them. Hypervisor encodes this capability as "adp" in "hwcap-list" property in machine description. Signed-off-by: Khalid Aziz Signed-off-by: David S. Miller diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h index 370ca1e..9331083 100644 --- a/arch/sparc/include/asm/elf_64.h +++ b/arch/sparc/include/asm/elf_64.h @@ -95,6 +95,7 @@ * really available. So we simply advertise only "crypto" support. */ #define HWCAP_SPARC_CRYPTO 0x04000000 /* CRYPTO insns available */ +#define HWCAP_SPARC_ADI 0x08000000 /* ADI available */ #define CORE_DUMP_USE_REGSET diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index f7b2617..f3185e2 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -380,7 +380,8 @@ static const char *hwcaps[] = { */ "mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2", "ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau", - "ima", "cspare", "pause", "cbcond", + "ima", "cspare", "pause", "cbcond", NULL /*reserved for crypto */, + "adp", }; static const char *crypto_hwcaps[] = { @@ -396,7 +397,7 @@ void cpucap_info(struct seq_file *m) seq_puts(m, "cpucaps\t\t: "); for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { unsigned long bit = 1UL << i; - if (caps & bit) { + if (hwcaps[i] && (caps & bit)) { seq_printf(m, "%s%s", printed ? "," : "", hwcaps[i]); printed++; @@ -450,7 +451,7 @@ static void __init report_hwcaps(unsigned long caps) for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { unsigned long bit = 1UL << i; - if (caps & bit) + if (hwcaps[i] && (caps & bit)) report_one_hwcap(&printed, hwcaps[i]); } if (caps & HWCAP_SPARC_CRYPTO) @@ -485,7 +486,7 @@ static unsigned long __init mdesc_cpu_hwcap_list(void) for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { unsigned long bit = 1UL << i; - if (!strcmp(prop, hwcaps[i])) { + if (hwcaps[i] && !strcmp(prop, hwcaps[i])) { caps |= bit; break; } -- cgit v0.10.2 From 1ca04a4ce0d5131471c5a1fac76899dc2d9d3f36 Mon Sep 17 00:00:00 2001 From: Rob Gardner Date: Mon, 21 Dec 2015 21:48:03 -0700 Subject: sparc64: Don't set %pil in rtrap_nmi too early Commit 28a1f53 delays setting %pil to avoid potential hardirq stack overflow in the common rtrap_irq path. Setting %pil also needs to be delayed in the rtrap_nmi path for the same reason. Signed-off-by: Rob Gardner Signed-off-by: Dave Aldridge Signed-off-by: David S. Miller diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index 39f0c66..d08bdaf 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -73,7 +73,13 @@ rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 andn %l1, %l4, %l1 srl %l4, 20, %l4 ba,pt %xcc, rtrap_no_irq_enable - wrpr %l4, %pil + nop + /* Do not actually set the %pil here. We will do that + * below after we clear PSTATE_IE in the %pstate register. + * If we re-enable interrupts here, we can recurse down + * the hardirq stack potentially endlessly, causing a + * stack overflow. + */ .align 64 .globl rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall -- cgit v0.10.2 From 3f74306ac84cf7f2da2fdc87014fc455f5e67bad Mon Sep 17 00:00:00 2001 From: Rob Gardner Date: Tue, 22 Dec 2015 21:16:06 -0700 Subject: sparc64: Ensure perf can access user stacks When an interrupt (such as a perf counter interrupt) is delivered while executing in user space, the trap entry code puts ASI_AIUS in %asi so that copy_from_user() and copy_to_user() will access the correct memory. But if a perf counter interrupt is delivered while the cpu is already executing in kernel space, then the trap entry code will put ASI_P in %asi, and this will prevent copy_from_user() from reading any useful stack data in either of the perf_callchain_user_X functions, and thus no user callgraph data will be collected for this sample period. An additional problem is that a fault is guaranteed to occur, and though it will be silently covered up, it wastes time and could perturb state. In perf_callchain_user(), we ensure that %asi contains ASI_AIUS because we know for a fact that the subsequent calls to copy_from_user() are intended to read the user's stack. [ Use get_fs()/set_fs() -DaveM ] Signed-off-by: Rob Gardner Signed-off-by: Dave Aldridge Signed-off-by: David S. Miller diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 3091267..b1144d6 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -1828,11 +1828,16 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { + mm_segment_t old_fs; + perf_callchain_store(entry, regs->tpc); if (!current->mm) return; + old_fs = get_fs(); + set_fs(USER_DS); + flushw_user(); pagefault_disable(); @@ -1843,4 +1848,6 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_user_64(entry, regs); pagefault_enable(); + + set_fs(old_fs); } -- cgit v0.10.2 From 833526941f5945cf0b22a595bb8f3525b512f654 Mon Sep 17 00:00:00 2001 From: Rob Gardner Date: Tue, 22 Dec 2015 21:16:07 -0700 Subject: sparc64: Perf should save/restore fault info There have been several reports of random processes being killed with a bus error or segfault during userspace stack walking in perf. One of the root causes of this problem is an asynchronous modification to thread_info fault_address and fault_code, which stems from a perf counter interrupt arriving during kernel processing of a "benign" fault, such as a TSB miss. Since perf_callchain_user() invokes copy_from_user() to read user stacks, a fault is not only possible, but probable. Validity checks on the stack address merely cover up the problem and reduce its frequency. The solution here is to save and restore fault_address and fault_code in perf_callchain_user() so that the benign fault handler is not disturbed by a perf interrupt. Signed-off-by: Rob Gardner Signed-off-by: Dave Aldridge Signed-off-by: David S. Miller diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index b1144d6..6596f66 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -1828,6 +1828,8 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { + u64 saved_fault_address = current_thread_info()->fault_address; + u8 saved_fault_code = get_thread_fault_code(); mm_segment_t old_fs; perf_callchain_store(entry, regs->tpc); @@ -1850,4 +1852,6 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) pagefault_enable(); set_fs(old_fs); + set_thread_fault_code(saved_fault_code); + current_thread_info()->fault_address = saved_fault_address; } -- cgit v0.10.2 From a7c5724b5c17775ca8ea2fd9906d8a7e37337cce Mon Sep 17 00:00:00 2001 From: Rob Gardner Date: Tue, 22 Dec 2015 23:24:49 -0700 Subject: sparc64: fix FP corruption in user copy functions Short story: Exception handlers used by some copy_to_user() and copy_from_user() functions do not diligently clean up floating point register usage, and this can result in a user process seeing invalid values in floating point registers. This sometimes makes the process fail. Long story: Several cpu-specific (NG4, NG2, U1, U3) memcpy functions use floating point registers and VIS alignaddr/faligndata to accelerate data copying when source and dest addresses don't align well. Linux uses a lazy scheme for saving floating point registers; It is not done upon entering the kernel since it's a very expensive operation. Rather, it is done only when needed. If the kernel ends up not using FP regs during the course of some trap or system call, then it can return to user space without saving or restoring them. The various memcpy functions begin their FP code with VISEntry (or a variation thereof), which saves the FP regs. They conclude their FP code with VISExit (or a variation) which essentially marks the FP regs "clean", ie, they contain no unsaved values. fprs.FPRS_FEF is turned off so that a lazy restore will be triggered when/if the user process accesses floating point regs again. The bug is that the user copy variants of memcpy, copy_from_user() and copy_to_user(), employ an exception handling mechanism to detect faults when accessing user space addresses, and when this handler is invoked, an immediate return from the function is forced, and VISExit is not executed, thus leaving the fprs register in an indeterminate state, but often with fprs.FPRS_FEF set and one or more dirty bits. This results in a return to user space with invalid values in the FP regs, and since fprs.FPRS_FEF is on, no lazy restore occurs. This bug affects copy_to_user() and copy_from_user() for NG4, NG2, U3, and U1. All are fixed by using a new exception handler for those loads and stores that are done during the time between VISEnter and VISExit. n.b. In NG4memcpy, the problematic code can be triggered by a copy size greater than 128 bytes and an unaligned source address. This bug is known to be the cause of random user process memory corruptions while perf is running with the callgraph option (ie, perf record -g). This occurs because perf uses copy_from_user() to read user stacks, and may fault when it follows a stack frame pointer off to an invalid page. Validation checks on the stack address just obscure the underlying problem. Signed-off-by: Rob Gardner Signed-off-by: Dave Aldridge Signed-off-by: David S. Miller diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index 3d61fca..f2d30ca 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -946,6 +946,12 @@ ENTRY(__retl_one) mov 1, %o0 ENDPROC(__retl_one) +ENTRY(__retl_one_fp) + VISExitHalf + retl + mov 1, %o0 +ENDPROC(__retl_one_fp) + ENTRY(__ret_one_asi) wr %g0, ASI_AIUS, %asi ret @@ -958,6 +964,13 @@ ENTRY(__retl_one_asi) mov 1, %o0 ENDPROC(__retl_one_asi) +ENTRY(__retl_one_asi_fp) + wr %g0, ASI_AIUS, %asi + VISExitHalf + retl + mov 1, %o0 +ENDPROC(__retl_one_asi_fp) + ENTRY(__retl_o1) retl mov %o1, %o0 diff --git a/arch/sparc/lib/NG2copy_from_user.S b/arch/sparc/lib/NG2copy_from_user.S index 119ccb9..d5242b8 100644 --- a/arch/sparc/lib/NG2copy_from_user.S +++ b/arch/sparc/lib/NG2copy_from_user.S @@ -11,6 +11,14 @@ .text; \ .align 4; +#define EX_LD_FP(x) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, __retl_one_asi_fp;\ + .text; \ + .align 4; + #ifndef ASI_AIUS #define ASI_AIUS 0x11 #endif diff --git a/arch/sparc/lib/NG2copy_to_user.S b/arch/sparc/lib/NG2copy_to_user.S index 7fe1cce..4e962d9 100644 --- a/arch/sparc/lib/NG2copy_to_user.S +++ b/arch/sparc/lib/NG2copy_to_user.S @@ -11,6 +11,14 @@ .text; \ .align 4; +#define EX_ST_FP(x) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, __retl_one_asi_fp;\ + .text; \ + .align 4; + #ifndef ASI_AIUS #define ASI_AIUS 0x11 #endif diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S index 30eee6e..d5f585d 100644 --- a/arch/sparc/lib/NG2memcpy.S +++ b/arch/sparc/lib/NG2memcpy.S @@ -34,10 +34,16 @@ #ifndef EX_LD #define EX_LD(x) x #endif +#ifndef EX_LD_FP +#define EX_LD_FP(x) x +#endif #ifndef EX_ST #define EX_ST(x) x #endif +#ifndef EX_ST_FP +#define EX_ST_FP(x) x +#endif #ifndef EX_RETVAL #define EX_RETVAL(x) x @@ -134,40 +140,40 @@ fsrc2 %x6, %f12; \ fsrc2 %x7, %f14; #define FREG_LOAD_1(base, x0) \ - EX_LD(LOAD(ldd, base + 0x00, %x0)) + EX_LD_FP(LOAD(ldd, base + 0x00, %x0)) #define FREG_LOAD_2(base, x0, x1) \ - EX_LD(LOAD(ldd, base + 0x00, %x0)); \ - EX_LD(LOAD(ldd, base + 0x08, %x1)); + EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); #define FREG_LOAD_3(base, x0, x1, x2) \ - EX_LD(LOAD(ldd, base + 0x00, %x0)); \ - EX_LD(LOAD(ldd, base + 0x08, %x1)); \ - EX_LD(LOAD(ldd, base + 0x10, %x2)); + EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ + EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); #define FREG_LOAD_4(base, x0, x1, x2, x3) \ - EX_LD(LOAD(ldd, base + 0x00, %x0)); \ - EX_LD(LOAD(ldd, base + 0x08, %x1)); \ - EX_LD(LOAD(ldd, base + 0x10, %x2)); \ - EX_LD(LOAD(ldd, base + 0x18, %x3)); + EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ + EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ + EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); #define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \ - EX_LD(LOAD(ldd, base + 0x00, %x0)); \ - EX_LD(LOAD(ldd, base + 0x08, %x1)); \ - EX_LD(LOAD(ldd, base + 0x10, %x2)); \ - EX_LD(LOAD(ldd, base + 0x18, %x3)); \ - EX_LD(LOAD(ldd, base + 0x20, %x4)); + EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ + EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ + EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \ + EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); #define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \ - EX_LD(LOAD(ldd, base + 0x00, %x0)); \ - EX_LD(LOAD(ldd, base + 0x08, %x1)); \ - EX_LD(LOAD(ldd, base + 0x10, %x2)); \ - EX_LD(LOAD(ldd, base + 0x18, %x3)); \ - EX_LD(LOAD(ldd, base + 0x20, %x4)); \ - EX_LD(LOAD(ldd, base + 0x28, %x5)); + EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ + EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ + EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \ + EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \ + EX_LD_FP(LOAD(ldd, base + 0x28, %x5)); #define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \ - EX_LD(LOAD(ldd, base + 0x00, %x0)); \ - EX_LD(LOAD(ldd, base + 0x08, %x1)); \ - EX_LD(LOAD(ldd, base + 0x10, %x2)); \ - EX_LD(LOAD(ldd, base + 0x18, %x3)); \ - EX_LD(LOAD(ldd, base + 0x20, %x4)); \ - EX_LD(LOAD(ldd, base + 0x28, %x5)); \ - EX_LD(LOAD(ldd, base + 0x30, %x6)); + EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ + EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ + EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \ + EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \ + EX_LD_FP(LOAD(ldd, base + 0x28, %x5)); \ + EX_LD_FP(LOAD(ldd, base + 0x30, %x6)); .register %g2,#scratch .register %g3,#scratch @@ -275,11 +281,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ nop /* fall through for 0 < low bits < 8 */ 110: sub %o4, 64, %g2 - EX_LD(LOAD_BLK(%g2, %f0)) -1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) - EX_LD(LOAD_BLK(%o4, %f16)) + EX_LD_FP(LOAD_BLK(%g2, %f0)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) + EX_LD_FP(LOAD_BLK(%o4, %f16)) FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16) - EX_ST(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -290,10 +296,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 120: sub %o4, 56, %g2 FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12) -1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) - EX_LD(LOAD_BLK(%o4, %f16)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) + EX_LD_FP(LOAD_BLK(%o4, %f16)) FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18) - EX_ST(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -304,10 +310,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 130: sub %o4, 48, %g2 FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10) -1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) - EX_LD(LOAD_BLK(%o4, %f16)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) + EX_LD_FP(LOAD_BLK(%o4, %f16)) FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20) - EX_ST(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) FREG_MOVE_6(f20, f22, f24, f26, f28, f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -318,10 +324,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 140: sub %o4, 40, %g2 FREG_LOAD_5(%g2, f0, f2, f4, f6, f8) -1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) - EX_LD(LOAD_BLK(%o4, %f16)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) + EX_LD_FP(LOAD_BLK(%o4, %f16)) FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22) - EX_ST(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) FREG_MOVE_5(f22, f24, f26, f28, f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -332,10 +338,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 150: sub %o4, 32, %g2 FREG_LOAD_4(%g2, f0, f2, f4, f6) -1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) - EX_LD(LOAD_BLK(%o4, %f16)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) + EX_LD_FP(LOAD_BLK(%o4, %f16)) FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24) - EX_ST(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) FREG_MOVE_4(f24, f26, f28, f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -346,10 +352,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 160: sub %o4, 24, %g2 FREG_LOAD_3(%g2, f0, f2, f4) -1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) - EX_LD(LOAD_BLK(%o4, %f16)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) + EX_LD_FP(LOAD_BLK(%o4, %f16)) FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26) - EX_ST(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) FREG_MOVE_3(f26, f28, f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -360,10 +366,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 170: sub %o4, 16, %g2 FREG_LOAD_2(%g2, f0, f2) -1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) - EX_LD(LOAD_BLK(%o4, %f16)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) + EX_LD_FP(LOAD_BLK(%o4, %f16)) FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28) - EX_ST(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) FREG_MOVE_2(f28, f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -374,10 +380,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 180: sub %o4, 8, %g2 FREG_LOAD_1(%g2, f0) -1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) - EX_LD(LOAD_BLK(%o4, %f16)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) + EX_LD_FP(LOAD_BLK(%o4, %f16)) FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30) - EX_ST(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) FREG_MOVE_1(f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -387,10 +393,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ nop 190: -1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) subcc %g1, 64, %g1 - EX_LD(LOAD_BLK(%o4, %f0)) - EX_ST(STORE_BLK(%f0, %o4 + %g3)) + EX_LD_FP(LOAD_BLK(%o4, %f0)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) add %o4, 64, %o4 bne,pt %xcc, 1b LOAD(prefetch, %o4 + 64, #one_read) diff --git a/arch/sparc/lib/NG4copy_from_user.S b/arch/sparc/lib/NG4copy_from_user.S index fd9f903..2e8ee7a 100644 --- a/arch/sparc/lib/NG4copy_from_user.S +++ b/arch/sparc/lib/NG4copy_from_user.S @@ -11,6 +11,14 @@ .text; \ .align 4; +#define EX_LD_FP(x) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, __retl_one_asi_fp;\ + .text; \ + .align 4; + #ifndef ASI_AIUS #define ASI_AIUS 0x11 #endif diff --git a/arch/sparc/lib/NG4copy_to_user.S b/arch/sparc/lib/NG4copy_to_user.S index 9744c454..be0bf45 100644 --- a/arch/sparc/lib/NG4copy_to_user.S +++ b/arch/sparc/lib/NG4copy_to_user.S @@ -11,6 +11,14 @@ .text; \ .align 4; +#define EX_ST_FP(x) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, __retl_one_asi_fp;\ + .text; \ + .align 4; + #ifndef ASI_AIUS #define ASI_AIUS 0x11 #endif diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S index 83aeeb1..8e13ee1 100644 --- a/arch/sparc/lib/NG4memcpy.S +++ b/arch/sparc/lib/NG4memcpy.S @@ -48,10 +48,16 @@ #ifndef EX_LD #define EX_LD(x) x #endif +#ifndef EX_LD_FP +#define EX_LD_FP(x) x +#endif #ifndef EX_ST #define EX_ST(x) x #endif +#ifndef EX_ST_FP +#define EX_ST_FP(x) x +#endif #ifndef EX_RETVAL #define EX_RETVAL(x) x @@ -210,17 +216,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %o2, %o4, %o2 alignaddr %o1, %g0, %g1 add %o1, %o4, %o1 - EX_LD(LOAD(ldd, %g1 + 0x00, %f0)) -1: EX_LD(LOAD(ldd, %g1 + 0x08, %f2)) + EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0)) +1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2)) subcc %o4, 0x40, %o4 - EX_LD(LOAD(ldd, %g1 + 0x10, %f4)) - EX_LD(LOAD(ldd, %g1 + 0x18, %f6)) - EX_LD(LOAD(ldd, %g1 + 0x20, %f8)) - EX_LD(LOAD(ldd, %g1 + 0x28, %f10)) - EX_LD(LOAD(ldd, %g1 + 0x30, %f12)) - EX_LD(LOAD(ldd, %g1 + 0x38, %f14)) + EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4)) + EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6)) + EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8)) + EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10)) + EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12)) + EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14)) faligndata %f0, %f2, %f16 - EX_LD(LOAD(ldd, %g1 + 0x40, %f0)) + EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0)) faligndata %f2, %f4, %f18 add %g1, 0x40, %g1 faligndata %f4, %f6, %f20 @@ -229,14 +235,14 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ faligndata %f10, %f12, %f26 faligndata %f12, %f14, %f28 faligndata %f14, %f0, %f30 - EX_ST(STORE(std, %f16, %o0 + 0x00)) - EX_ST(STORE(std, %f18, %o0 + 0x08)) - EX_ST(STORE(std, %f20, %o0 + 0x10)) - EX_ST(STORE(std, %f22, %o0 + 0x18)) - EX_ST(STORE(std, %f24, %o0 + 0x20)) - EX_ST(STORE(std, %f26, %o0 + 0x28)) - EX_ST(STORE(std, %f28, %o0 + 0x30)) - EX_ST(STORE(std, %f30, %o0 + 0x38)) + EX_ST_FP(STORE(std, %f16, %o0 + 0x00)) + EX_ST_FP(STORE(std, %f18, %o0 + 0x08)) + EX_ST_FP(STORE(std, %f20, %o0 + 0x10)) + EX_ST_FP(STORE(std, %f22, %o0 + 0x18)) + EX_ST_FP(STORE(std, %f24, %o0 + 0x20)) + EX_ST_FP(STORE(std, %f26, %o0 + 0x28)) + EX_ST_FP(STORE(std, %f28, %o0 + 0x30)) + EX_ST_FP(STORE(std, %f30, %o0 + 0x38)) add %o0, 0x40, %o0 bne,pt %icc, 1b LOAD(prefetch, %g1 + 0x200, #n_reads_strong) diff --git a/arch/sparc/lib/U1copy_from_user.S b/arch/sparc/lib/U1copy_from_user.S index a6ae2ea..ecc5692 100644 --- a/arch/sparc/lib/U1copy_from_user.S +++ b/arch/sparc/lib/U1copy_from_user.S @@ -11,6 +11,14 @@ .text; \ .align 4; +#define EX_LD_FP(x) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, __retl_one_fp;\ + .text; \ + .align 4; + #define FUNC_NAME ___copy_from_user #define LOAD(type,addr,dest) type##a [addr] %asi, dest #define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_AIUS, dest diff --git a/arch/sparc/lib/U1copy_to_user.S b/arch/sparc/lib/U1copy_to_user.S index f4b970e..9eea392 100644 --- a/arch/sparc/lib/U1copy_to_user.S +++ b/arch/sparc/lib/U1copy_to_user.S @@ -11,6 +11,14 @@ .text; \ .align 4; +#define EX_ST_FP(x) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, __retl_one_fp;\ + .text; \ + .align 4; + #define FUNC_NAME ___copy_to_user #define STORE(type,src,addr) type##a src, [addr] ASI_AIUS #define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS diff --git a/arch/sparc/lib/U1memcpy.S b/arch/sparc/lib/U1memcpy.S index b67142b..3e6209e 100644 --- a/arch/sparc/lib/U1memcpy.S +++ b/arch/sparc/lib/U1memcpy.S @@ -25,10 +25,16 @@ #ifndef EX_LD #define EX_LD(x) x #endif +#ifndef EX_LD_FP +#define EX_LD_FP(x) x +#endif #ifndef EX_ST #define EX_ST(x) x #endif +#ifndef EX_ST_FP +#define EX_ST_FP(x) x +#endif #ifndef EX_RETVAL #define EX_RETVAL(x) x @@ -73,8 +79,8 @@ faligndata %f8, %f9, %f62; #define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \ - EX_LD(LOAD_BLK(%src, %fdest)); \ - EX_ST(STORE_BLK(%fsrc, %dest)); \ + EX_LD_FP(LOAD_BLK(%src, %fdest)); \ + EX_ST_FP(STORE_BLK(%fsrc, %dest)); \ add %src, 0x40, %src; \ subcc %len, 0x40, %len; \ be,pn %xcc, jmptgt; \ @@ -89,12 +95,12 @@ #define DO_SYNC membar #Sync; #define STORE_SYNC(dest, fsrc) \ - EX_ST(STORE_BLK(%fsrc, %dest)); \ + EX_ST_FP(STORE_BLK(%fsrc, %dest)); \ add %dest, 0x40, %dest; \ DO_SYNC #define STORE_JUMP(dest, fsrc, target) \ - EX_ST(STORE_BLK(%fsrc, %dest)); \ + EX_ST_FP(STORE_BLK(%fsrc, %dest)); \ add %dest, 0x40, %dest; \ ba,pt %xcc, target; \ nop; @@ -103,7 +109,7 @@ subcc %left, 8, %left;\ bl,pn %xcc, 95f; \ faligndata %f0, %f1, %f48; \ - EX_ST(STORE(std, %f48, %dest)); \ + EX_ST_FP(STORE(std, %f48, %dest)); \ add %dest, 8, %dest; #define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \ @@ -160,8 +166,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ and %g2, 0x38, %g2 1: subcc %g1, 0x1, %g1 - EX_LD(LOAD(ldub, %o1 + 0x00, %o3)) - EX_ST(STORE(stb, %o3, %o1 + %GLOBAL_SPARE)) + EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3)) + EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE)) bgu,pt %XCC, 1b add %o1, 0x1, %o1 @@ -172,20 +178,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ be,pt %icc, 3f alignaddr %o1, %g0, %o1 - EX_LD(LOAD(ldd, %o1, %f4)) -1: EX_LD(LOAD(ldd, %o1 + 0x8, %f6)) + EX_LD_FP(LOAD(ldd, %o1, %f4)) +1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6)) add %o1, 0x8, %o1 subcc %g2, 0x8, %g2 faligndata %f4, %f6, %f0 - EX_ST(STORE(std, %f0, %o0)) + EX_ST_FP(STORE(std, %f0, %o0)) be,pn %icc, 3f add %o0, 0x8, %o0 - EX_LD(LOAD(ldd, %o1 + 0x8, %f4)) + EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4)) add %o1, 0x8, %o1 subcc %g2, 0x8, %g2 faligndata %f6, %f4, %f0 - EX_ST(STORE(std, %f0, %o0)) + EX_ST_FP(STORE(std, %f0, %o0)) bne,pt %icc, 1b add %o0, 0x8, %o0 @@ -208,13 +214,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ add %g1, %GLOBAL_SPARE, %g1 subcc %o2, %g3, %o2 - EX_LD(LOAD_BLK(%o1, %f0)) + EX_LD_FP(LOAD_BLK(%o1, %f0)) add %o1, 0x40, %o1 add %g1, %g3, %g1 - EX_LD(LOAD_BLK(%o1, %f16)) + EX_LD_FP(LOAD_BLK(%o1, %f16)) add %o1, 0x40, %o1 sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE - EX_LD(LOAD_BLK(%o1, %f32)) + EX_LD_FP(LOAD_BLK(%o1, %f32)) add %o1, 0x40, %o1 /* There are 8 instances of the unrolled loop, @@ -426,28 +432,28 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 62: FINISH_VISCHUNK(o0, f44, f46, g3) 63: UNEVEN_VISCHUNK_LAST(o0, f46, f0, g3) -93: EX_LD(LOAD(ldd, %o1, %f2)) +93: EX_LD_FP(LOAD(ldd, %o1, %f2)) add %o1, 8, %o1 subcc %g3, 8, %g3 faligndata %f0, %f2, %f8 - EX_ST(STORE(std, %f8, %o0)) + EX_ST_FP(STORE(std, %f8, %o0)) bl,pn %xcc, 95f add %o0, 8, %o0 - EX_LD(LOAD(ldd, %o1, %f0)) + EX_LD_FP(LOAD(ldd, %o1, %f0)) add %o1, 8, %o1 subcc %g3, 8, %g3 faligndata %f2, %f0, %f8 - EX_ST(STORE(std, %f8, %o0)) + EX_ST_FP(STORE(std, %f8, %o0)) bge,pt %xcc, 93b add %o0, 8, %o0 95: brz,pt %o2, 2f mov %g1, %o1 -1: EX_LD(LOAD(ldub, %o1, %o3)) +1: EX_LD_FP(LOAD(ldub, %o1, %o3)) add %o1, 1, %o1 subcc %o2, 1, %o2 - EX_ST(STORE(stb, %o3, %o0)) + EX_ST_FP(STORE(stb, %o3, %o0)) bne,pt %xcc, 1b add %o0, 1, %o0 diff --git a/arch/sparc/lib/U3copy_from_user.S b/arch/sparc/lib/U3copy_from_user.S index b1acd13..88ad73d 100644 --- a/arch/sparc/lib/U3copy_from_user.S +++ b/arch/sparc/lib/U3copy_from_user.S @@ -11,6 +11,14 @@ .text; \ .align 4; +#define EX_LD_FP(x) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, __retl_one_fp;\ + .text; \ + .align 4; + #define FUNC_NAME U3copy_from_user #define LOAD(type,addr,dest) type##a [addr] %asi, dest #define EX_RETVAL(x) 0 diff --git a/arch/sparc/lib/U3copy_to_user.S b/arch/sparc/lib/U3copy_to_user.S index ef1e493..845139d 100644 --- a/arch/sparc/lib/U3copy_to_user.S +++ b/arch/sparc/lib/U3copy_to_user.S @@ -11,6 +11,14 @@ .text; \ .align 4; +#define EX_ST_FP(x) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, __retl_one_fp;\ + .text; \ + .align 4; + #define FUNC_NAME U3copy_to_user #define STORE(type,src,addr) type##a src, [addr] ASI_AIUS #define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S index 7cae9cc..491ee69 100644 --- a/arch/sparc/lib/U3memcpy.S +++ b/arch/sparc/lib/U3memcpy.S @@ -24,10 +24,16 @@ #ifndef EX_LD #define EX_LD(x) x #endif +#ifndef EX_LD_FP +#define EX_LD_FP(x) x +#endif #ifndef EX_ST #define EX_ST(x) x #endif +#ifndef EX_ST_FP +#define EX_ST_FP(x) x +#endif #ifndef EX_RETVAL #define EX_RETVAL(x) x @@ -120,8 +126,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ and %g2, 0x38, %g2 1: subcc %g1, 0x1, %g1 - EX_LD(LOAD(ldub, %o1 + 0x00, %o3)) - EX_ST(STORE(stb, %o3, %o1 + GLOBAL_SPARE)) + EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3)) + EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE)) bgu,pt %XCC, 1b add %o1, 0x1, %o1 @@ -132,20 +138,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ be,pt %icc, 3f alignaddr %o1, %g0, %o1 - EX_LD(LOAD(ldd, %o1, %f4)) -1: EX_LD(LOAD(ldd, %o1 + 0x8, %f6)) + EX_LD_FP(LOAD(ldd, %o1, %f4)) +1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6)) add %o1, 0x8, %o1 subcc %g2, 0x8, %g2 faligndata %f4, %f6, %f0 - EX_ST(STORE(std, %f0, %o0)) + EX_ST_FP(STORE(std, %f0, %o0)) be,pn %icc, 3f add %o0, 0x8, %o0 - EX_LD(LOAD(ldd, %o1 + 0x8, %f4)) + EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4)) add %o1, 0x8, %o1 subcc %g2, 0x8, %g2 faligndata %f6, %f4, %f2 - EX_ST(STORE(std, %f2, %o0)) + EX_ST_FP(STORE(std, %f2, %o0)) bne,pt %icc, 1b add %o0, 0x8, %o0 @@ -155,25 +161,25 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ LOAD(prefetch, %o1 + 0x080, #one_read) LOAD(prefetch, %o1 + 0x0c0, #one_read) LOAD(prefetch, %o1 + 0x100, #one_read) - EX_LD(LOAD(ldd, %o1 + 0x000, %f0)) + EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0)) LOAD(prefetch, %o1 + 0x140, #one_read) - EX_LD(LOAD(ldd, %o1 + 0x008, %f2)) + EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2)) LOAD(prefetch, %o1 + 0x180, #one_read) - EX_LD(LOAD(ldd, %o1 + 0x010, %f4)) + EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4)) LOAD(prefetch, %o1 + 0x1c0, #one_read) faligndata %f0, %f2, %f16 - EX_LD(LOAD(ldd, %o1 + 0x018, %f6)) + EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6)) faligndata %f2, %f4, %f18 - EX_LD(LOAD(ldd, %o1 + 0x020, %f8)) + EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8)) faligndata %f4, %f6, %f20 - EX_LD(LOAD(ldd, %o1 + 0x028, %f10)) + EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10)) faligndata %f6, %f8, %f22 - EX_LD(LOAD(ldd, %o1 + 0x030, %f12)) + EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12)) faligndata %f8, %f10, %f24 - EX_LD(LOAD(ldd, %o1 + 0x038, %f14)) + EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14)) faligndata %f10, %f12, %f26 - EX_LD(LOAD(ldd, %o1 + 0x040, %f0)) + EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0)) subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE add %o1, 0x40, %o1 @@ -184,26 +190,26 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ .align 64 1: - EX_LD(LOAD(ldd, %o1 + 0x008, %f2)) + EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2)) faligndata %f12, %f14, %f28 - EX_LD(LOAD(ldd, %o1 + 0x010, %f4)) + EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4)) faligndata %f14, %f0, %f30 - EX_ST(STORE_BLK(%f16, %o0)) - EX_LD(LOAD(ldd, %o1 + 0x018, %f6)) + EX_ST_FP(STORE_BLK(%f16, %o0)) + EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6)) faligndata %f0, %f2, %f16 add %o0, 0x40, %o0 - EX_LD(LOAD(ldd, %o1 + 0x020, %f8)) + EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8)) faligndata %f2, %f4, %f18 - EX_LD(LOAD(ldd, %o1 + 0x028, %f10)) + EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10)) faligndata %f4, %f6, %f20 - EX_LD(LOAD(ldd, %o1 + 0x030, %f12)) + EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12)) subcc %o3, 0x01, %o3 faligndata %f6, %f8, %f22 - EX_LD(LOAD(ldd, %o1 + 0x038, %f14)) + EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14)) faligndata %f8, %f10, %f24 - EX_LD(LOAD(ldd, %o1 + 0x040, %f0)) + EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0)) LOAD(prefetch, %o1 + 0x1c0, #one_read) faligndata %f10, %f12, %f26 bg,pt %XCC, 1b @@ -211,29 +217,29 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ /* Finally we copy the last full 64-byte block. */ 2: - EX_LD(LOAD(ldd, %o1 + 0x008, %f2)) + EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2)) faligndata %f12, %f14, %f28 - EX_LD(LOAD(ldd, %o1 + 0x010, %f4)) + EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4)) faligndata %f14, %f0, %f30 - EX_ST(STORE_BLK(%f16, %o0)) - EX_LD(LOAD(ldd, %o1 + 0x018, %f6)) + EX_ST_FP(STORE_BLK(%f16, %o0)) + EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6)) faligndata %f0, %f2, %f16 - EX_LD(LOAD(ldd, %o1 + 0x020, %f8)) + EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8)) faligndata %f2, %f4, %f18 - EX_LD(LOAD(ldd, %o1 + 0x028, %f10)) + EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10)) faligndata %f4, %f6, %f20 - EX_LD(LOAD(ldd, %o1 + 0x030, %f12)) + EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12)) faligndata %f6, %f8, %f22 - EX_LD(LOAD(ldd, %o1 + 0x038, %f14)) + EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14)) faligndata %f8, %f10, %f24 cmp %g1, 0 be,pt %XCC, 1f add %o0, 0x40, %o0 - EX_LD(LOAD(ldd, %o1 + 0x040, %f0)) + EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0)) 1: faligndata %f10, %f12, %f26 faligndata %f12, %f14, %f28 faligndata %f14, %f0, %f30 - EX_ST(STORE_BLK(%f16, %o0)) + EX_ST_FP(STORE_BLK(%f16, %o0)) add %o0, 0x40, %o0 add %o1, 0x40, %o1 membar #Sync @@ -253,20 +259,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %o2, %g2, %o2 be,a,pt %XCC, 1f - EX_LD(LOAD(ldd, %o1 + 0x00, %f0)) + EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0)) -1: EX_LD(LOAD(ldd, %o1 + 0x08, %f2)) +1: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2)) add %o1, 0x8, %o1 subcc %g2, 0x8, %g2 faligndata %f0, %f2, %f8 - EX_ST(STORE(std, %f8, %o0)) + EX_ST_FP(STORE(std, %f8, %o0)) be,pn %XCC, 2f add %o0, 0x8, %o0 - EX_LD(LOAD(ldd, %o1 + 0x08, %f0)) + EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0)) add %o1, 0x8, %o1 subcc %g2, 0x8, %g2 faligndata %f2, %f0, %f8 - EX_ST(STORE(std, %f8, %o0)) + EX_ST_FP(STORE(std, %f8, %o0)) bne,pn %XCC, 1b add %o0, 0x8, %o0 -- cgit v0.10.2 From 079317a65d05ce52b69b7d47fe1fb419d40a4395 Mon Sep 17 00:00:00 2001 From: Vijay Kumar Date: Wed, 23 Dec 2015 10:55:33 -0800 Subject: tty/serial: Skip 'NULL' char after console break when sysrq enabled When sysrq is triggered from console, serial driver for SUN hypervisor console receives a console break and enables the sysrq. It expects a valid sysrq char following with break. Meanwhile if driver receives 'NULL' ASCII char then it disables sysrq and sysrq handler will never be invoked. This fix skips calling uart sysrq handler when 'NULL' is received while sysrq is enabled. Signed-off-by: Vijay Kumar Acked-by: Karl Volz Signed-off-by: David S. Miller diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c index f224e8a..ca0d380 100644 --- a/drivers/tty/serial/sunhv.c +++ b/drivers/tty/serial/sunhv.c @@ -148,8 +148,10 @@ static int receive_chars_read(struct uart_port *port) uart_handle_dcd_change(port, 1); } - for (i = 0; i < bytes_read; i++) - uart_handle_sysrq_char(port, con_read_page[i]); + if (port->sysrq != 0 && *con_read_page) { + for (i = 0; i < bytes_read; i++) + uart_handle_sysrq_char(port, con_read_page[i]); + } if (port->state == NULL) continue; -- cgit v0.10.2 From 930c0f708e156613c37b97cf583c4c6b07d99cce Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 25 Dec 2015 12:09:30 -0500 Subject: MIPS: Fix bitrot in __get_user_unaligned() Signed-off-by: Al Viro Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 2e3b399..095ecaf 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -599,7 +599,7 @@ extern void __put_user_unknown(void); * On error, the variable @x is set to zero. */ #define __get_user_unaligned(x,ptr) \ - __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr))) + __get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr))) /* * Yuck. We need two variants, one for 64bit operation and one @@ -620,8 +620,8 @@ extern void __get_user_unaligned_unknown(void); do { \ switch (size) { \ case 1: __get_data_asm(val, "lb", ptr); break; \ - case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \ - case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \ + case 2: __get_data_unaligned_asm(val, "ulh", ptr); break; \ + case 4: __get_data_unaligned_asm(val, "ulw", ptr); break; \ case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \ default: __get_user_unaligned_unknown(); break; \ } \ -- cgit v0.10.2 From 74bf8efb5fa6e958d2d7c7917b8bb672085ec0c6 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 27 Dec 2015 18:17:37 -0800 Subject: Linux 4.4-rc7 diff --git a/Makefile b/Makefile index 4e2b18d..1122433 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 4 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc7 NAME = Blurry Fish Butt # *DOCUMENTATION* -- cgit v0.10.2 From f7d7f59ab124748156ea551edf789994f05da342 Mon Sep 17 00:00:00 2001 From: Oliver Freyermuth Date: Mon, 28 Dec 2015 18:37:38 +0100 Subject: USB: cp210x: add ID for ELV Marble Sound Board 1 Add the USB device ID for ELV Marble Sound Board 1. Signed-off-by: Oliver Freyermuth Cc: stable Signed-off-by: Johan Hovold diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 7d4f51a..59b2126 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -160,6 +160,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ + { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ -- cgit v0.10.2