diff options
80 files changed, 1462 insertions, 488 deletions
@@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 8 -SUBLEVEL = 8 +SUBLEVEL = 10 EXTRAVERSION = NAME = Displaced Humerus Anterior diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index c2f14e8..91fe4f1 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -349,7 +349,7 @@ rx_descs = <64>; mac_control = <0x20>; slaves = <2>; - cpts_active_slave = <0>; + active_slave = <0>; cpts_clock_mult = <0x80000000>; cpts_clock_shift = <29>; reg = <0x4a100000 0x800 @@ -385,5 +385,19 @@ mac-address = [ 00 00 00 00 00 00 ]; }; }; + + ocmcram: ocmcram@40300000 { + compatible = "ti,am3352-ocmcram"; + reg = <0x40300000 0x10000>; + ti,hwmods = "ocmcram"; + ti,no_idle_on_suspend; + }; + + wkup_m3: wkup_m3@44d00000 { + compatible = "ti,am3353-wkup-m3"; + reg = <0x44d00000 0x4000 /* M3 UMEM */ + 0x44d80000 0x2000>; /* M3 DMEM */ + ti,hwmods = "wkup_m3"; + }; }; }; diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index f9e8657..23fa6a2 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -261,7 +261,10 @@ validate_event(struct pmu_hw_events *hw_events, struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct pmu *leader_pmu = event->group_leader->pmu; - if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) + if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) + return 1; + + if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) return 1; return armpmu->get_event_idx(hw_events, event) >= 0; diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c index 0edce4b..5e3ca7a 100644 --- a/arch/arm/mach-imx/clk-imx35.c +++ b/arch/arm/mach-imx/clk-imx35.c @@ -265,6 +265,8 @@ int __init mx35_clocks_init() clk_prepare_enable(clk[gpio3_gate]); clk_prepare_enable(clk[iim_gate]); clk_prepare_enable(clk[emi_gate]); + clk_prepare_enable(clk[max_gate]); + clk_prepare_enable(clk[iomuxc_gate]); /* * SCC is needed to boot via mmc after a watchdog reset. The clock code diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index dd3d591..48bc3c0 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c @@ -343,6 +343,7 @@ void __init feroceon_l2_init(int __l2_wt_override) outer_cache.inv_range = feroceon_l2_inv_range; outer_cache.clean_range = feroceon_l2_clean_range; outer_cache.flush_range = feroceon_l2_flush_range; + outer_cache.inv_all = l2_inv_all; enable_l2(); diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 2c3b942..2556cf1 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm920_suspend_size .equ cpu_arm920_suspend_size, 4 * 3 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_arm920_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c13, c0, 0 @ PID diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index f1803f7..344c8a5 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm926_suspend_size .equ cpu_arm926_suspend_size, 4 * 3 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_arm926_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c13, c0, 0 @ PID diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 82f9cdc..0b60dd3 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext) .globl cpu_mohawk_suspend_size .equ cpu_mohawk_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_mohawk_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 3aa0da1..d92dfd0 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext) .globl cpu_sa1100_suspend_size .equ cpu_sa1100_suspend_size, 4 * 3 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_sa1100_do_suspend) stmfd sp!, {r4 - r6, lr} mrc p15, 0, r4, c3, c0, 0 @ domain ID diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 09c5233..d222215 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -138,7 +138,7 @@ ENTRY(cpu_v6_set_pte_ext) /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ .globl cpu_v6_suspend_size .equ cpu_v6_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_v6_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index eb93d64..e8efd83 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext) .globl cpu_xsc3_suspend_size .equ cpu_xsc3_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_xsc3_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 2551036..e766f88 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext) .globl cpu_xscale_suspend_size .equ cpu_xscale_suspend_size, 4 * 6 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_xscale_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index dbaec94..21bff32 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -31,7 +31,7 @@ #define PAGE_SHIFT 16 #endif #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE - 1)) +#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT #define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index ec120e6..a9ec6d0 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -634,7 +634,7 @@ resume_kernel: /* Clear _TIF_EMULATE_STACK_STORE flag */ lis r11,_TIF_EMULATE_STACK_STORE@h addi r5,r9,TI_FLAGS - ldarx r4,0,r5 +0: ldarx r4,0,r5 andc r4,r4,r11 stdcx. r4,0,r5 bne- 0b diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index 27cb321..379d96e 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -50,10 +50,6 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr); #define ioremap_nocache(addr, size) ioremap(addr, size) #define ioremap_wc ioremap_nocache -/* TODO: s390 cannot support io_remap_pfn_range... */ -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ - remap_pfn_range(vma, vaddr, pfn, size, prot) - static inline void __iomem *ioremap(unsigned long offset, unsigned long size) { return (void __iomem *) offset; diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 098adbb..1532d7f 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -56,6 +56,10 @@ extern unsigned long zero_page_mask; (((unsigned long)(vaddr)) &zero_page_mask)))) #define __HAVE_COLOR_ZERO_PAGE +/* TODO: s390 cannot support io_remap_pfn_range... */ +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + #endif /* !__ASSEMBLY__ */ /* diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 683978d..ff691d6 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -421,8 +421,8 @@ struct kvm_vcpu_arch { gpa_t time; struct pvclock_vcpu_time_info hv_clock; unsigned int hw_tsc_khz; - unsigned int time_offset; - struct page *time_page; + struct gfn_to_hva_cache pv_time; + bool pv_time_enabled; /* set guest stopped flag in pvclock flags field */ bool pvclock_set_guest_stopped_request; diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index b57a6ed..332e133 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -18,6 +18,7 @@ #include <linux/rcupdate.h> #include <linux/kobject.h> #include <linux/uaccess.h> +#include <linux/kthread.h> #include <linux/kdebug.h> #include <linux/kernel.h> #include <linux/percpu.h> @@ -1345,6 +1346,63 @@ static void mce_do_trigger(struct work_struct *work) static DECLARE_WORK(mce_trigger_work, mce_do_trigger); +static void __mce_notify_work(void) +{ + /* Not more than two messages every minute */ + static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); + + /* wake processes polling /dev/mcelog */ + wake_up_interruptible(&mce_chrdev_wait); + + /* + * There is no risk of missing notifications because + * work_pending is always cleared before the function is + * executed. + */ + if (mce_helper[0] && !work_pending(&mce_trigger_work)) + schedule_work(&mce_trigger_work); + + if (__ratelimit(&ratelimit)) + pr_info(HW_ERR "Machine check events logged\n"); +} + +#ifdef CONFIG_PREEMPT_RT_FULL +struct task_struct *mce_notify_helper; + +static int mce_notify_helper_thread(void *unused) +{ + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + if (kthread_should_stop()) + break; + __mce_notify_work(); + } + return 0; +} + +static int mce_notify_work_init(void) +{ + mce_notify_helper = kthread_run(mce_notify_helper_thread, NULL, + "mce-notify"); + if (!mce_notify_helper) + return -ENOMEM; + + return 0; +} + +static void mce_notify_work(void) +{ + wake_up_process(mce_notify_helper); +} +#else +static void mce_notify_work(void) +{ + __mce_notify_work(); +} +static inline int mce_notify_work_init(void) { return 0; } +#endif + /* * Notify the user(s) about new machine check events. * Can be called from interrupt context, but not from machine check/NMI @@ -1352,24 +1410,8 @@ static DECLARE_WORK(mce_trigger_work, mce_do_trigger); */ int mce_notify_irq(void) { - /* Not more than two messages every minute */ - static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); - if (test_and_clear_bit(0, &mce_need_notify)) { - /* wake processes polling /dev/mcelog */ - wake_up_interruptible(&mce_chrdev_wait); - - /* - * There is no risk of missing notifications because - * work_pending is always cleared before the function is - * executed. - */ - if (mce_helper[0] && !work_pending(&mce_trigger_work)) - schedule_work(&mce_trigger_work); - - if (__ratelimit(&ratelimit)) - pr_info(HW_ERR "Machine check events logged\n"); - + mce_notify_work(); return 1; } return 0; @@ -2431,6 +2473,8 @@ static __init int mcheck_init_device(void) /* register character device /dev/mcelog */ misc_register(&mce_chrdev_device); + err = mce_notify_work_init(); + return err; } device_initcall_sync(mcheck_init_device); diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 69662ad..0632237 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -128,8 +128,14 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly = }; static struct extra_reg intel_snb_extra_regs[] __read_mostly = { - INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), - INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), + INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), + INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), + EVENT_EXTRA_END +}; + +static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { + INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), + INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), EVENT_EXTRA_END }; @@ -2072,7 +2078,10 @@ __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_snb_event_constraints; x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; x86_pmu.pebs_aliases = intel_pebs_aliases_snb; - x86_pmu.extra_regs = intel_snb_extra_regs; + if (boot_cpu_data.x86_model == 45) + x86_pmu.extra_regs = intel_snbep_extra_regs; + else + x86_pmu.extra_regs = intel_snb_extra_regs; /* all extra regs are per-cpu when HT is on */ x86_pmu.er_flags |= ERF_HAS_RSP_1; x86_pmu.er_flags |= ERF_NO_HT_SHARING; @@ -2098,7 +2107,10 @@ __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_snb_event_constraints; x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; x86_pmu.pebs_aliases = intel_pebs_aliases_snb; - x86_pmu.extra_regs = intel_snb_extra_regs; + if (boot_cpu_data.x86_model == 62) + x86_pmu.extra_regs = intel_snbep_extra_regs; + else + x86_pmu.extra_regs = intel_snb_extra_regs; /* all extra regs are per-cpu when HT is on */ x86_pmu.er_flags |= ERF_HAS_RSP_1; x86_pmu.er_flags |= ERF_NO_HT_SHARING; diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 9392f52..a2f492c 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1781,7 +1781,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) if (!pv_eoi_enabled(vcpu)) return 0; return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, - addr); + addr, sizeof(u8)); } void kvm_lapic_init(void) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4f0c678..3dd9625 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1408,10 +1408,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) unsigned long flags, this_tsc_khz; struct kvm_vcpu_arch *vcpu = &v->arch; struct kvm_arch *ka = &v->kvm->arch; - void *shared_kaddr; s64 kernel_ns, max_kernel_ns; u64 tsc_timestamp, host_tsc; - struct pvclock_vcpu_time_info *guest_hv_clock; + struct pvclock_vcpu_time_info guest_hv_clock; u8 pvclock_flags; bool use_master_clock; @@ -1465,7 +1464,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) local_irq_restore(flags); - if (!vcpu->time_page) + if (!vcpu->pv_time_enabled) return 0; /* @@ -1527,12 +1526,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) */ vcpu->hv_clock.version += 2; - shared_kaddr = kmap_atomic(vcpu->time_page); - - guest_hv_clock = shared_kaddr + vcpu->time_offset; + if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, + &guest_hv_clock, sizeof(guest_hv_clock)))) + return 0; /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ - pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); + pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); if (vcpu->pvclock_set_guest_stopped_request) { pvclock_flags |= PVCLOCK_GUEST_STOPPED; @@ -1545,12 +1544,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) vcpu->hv_clock.flags = pvclock_flags; - memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, - sizeof(vcpu->hv_clock)); - - kunmap_atomic(shared_kaddr); - - mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); + kvm_write_guest_cached(v->kvm, &vcpu->pv_time, + &vcpu->hv_clock, + sizeof(vcpu->hv_clock)); return 0; } @@ -1829,7 +1825,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) return 0; } - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, + sizeof(u32))) return 1; vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); @@ -1839,10 +1836,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) static void kvmclock_reset(struct kvm_vcpu *vcpu) { - if (vcpu->arch.time_page) { - kvm_release_page_dirty(vcpu->arch.time_page); - vcpu->arch.time_page = NULL; - } + vcpu->arch.pv_time_enabled = false; } static void accumulate_steal_time(struct kvm_vcpu *vcpu) @@ -1948,6 +1942,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_KVM_SYSTEM_TIME_NEW: case MSR_KVM_SYSTEM_TIME: { + u64 gpa_offset; kvmclock_reset(vcpu); vcpu->arch.time = data; @@ -1957,14 +1952,14 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (!(data & 1)) break; - /* ...but clean it before doing the actual write */ - vcpu->arch.time_offset = data & ~(PAGE_MASK | 1); - - vcpu->arch.time_page = - gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); + gpa_offset = data & ~(PAGE_MASK | 1); - if (is_error_page(vcpu->arch.time_page)) - vcpu->arch.time_page = NULL; + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, + &vcpu->arch.pv_time, data & ~1ULL, + sizeof(struct pvclock_vcpu_time_info))) + vcpu->arch.pv_time_enabled = false; + else + vcpu->arch.pv_time_enabled = true; break; } @@ -1981,7 +1976,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, - data & KVM_STEAL_VALID_BITS)) + data & KVM_STEAL_VALID_BITS, + sizeof(struct kvm_steal_time))) return 1; vcpu->arch.st.msr_val = data; @@ -2966,7 +2962,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, */ static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) { - if (!vcpu->arch.time_page) + if (!vcpu->arch.pv_time_enabled) return -EINVAL; vcpu->arch.pvclock_set_guest_stopped_request = true; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); @@ -6667,6 +6663,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) goto fail_free_wbinvd_dirty_mask; vcpu->arch.ia32_tsc_adjust_msr = 0x0; + vcpu->arch.pv_time_enabled = false; kvm_async_pf_hash_reset(vcpu); kvm_pmu_init(vcpu); diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index ef5356c..0262210 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -161,6 +161,8 @@ static int hash_recvmsg(struct kiocb *unused, struct socket *sock, else if (len < ds) msg->msg_flags |= MSG_TRUNC; + msg->msg_namelen = 0; + lock_sock(sk); if (ctx->more) { ctx->more = 0; diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 6a6dfc0..a1c4f0a 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -432,6 +432,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock, long copied = 0; lock_sock(sk); + msg->msg_namelen = 0; for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0; iovlen--, iov++) { unsigned long seglen = iov->iov_len; diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index fe6d4be..615d262 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -373,26 +373,14 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma) struct hpet_dev *devp; unsigned long addr; - if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff) - return -EINVAL; - devp = file->private_data; addr = devp->hd_hpets->hp_hpet_phys; if (addr & (PAGE_SIZE - 1)) return -ENOSYS; - vma->vm_flags |= VM_IO; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - - if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, - PAGE_SIZE, vma->vm_page_prot)) { - printk(KERN_ERR "%s: io_remap_pfn_range failed\n", - __func__); - return -EAGAIN; - } - - return 0; + return vm_iomap_memory(vma, addr, PAGE_SIZE); #else return -ENOSYS; #endif diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index ba8805a..eabd3dd 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -814,6 +814,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct intel_ring_buffer *ring; u32 ctx_id = i915_execbuffer2_get_context_id(*args); u32 exec_start, exec_len; + u32 seqno; u32 mask; u32 flags; int ret, mode, i; @@ -1068,7 +1069,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } - trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); + seqno = intel_ring_get_seqno(ring); + trace_i915_gem_ring_dispatch(ring, seqno, flags); + i915_trace_irq_get(ring, seqno); i915_gem_execbuffer_move_to_active(&objects, ring); i915_gem_execbuffer_retire_commands(dev, file, ring); diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 3db4a68..29217db 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -244,7 +244,6 @@ TRACE_EVENT(i915_gem_ring_dispatch, __entry->ring = ring->id; __entry->seqno = seqno; __entry->flags = flags; - i915_trace_irq_get(ring, seqno); ), TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 6af87cd..8b5e4ae 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -232,8 +232,10 @@ static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring) static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) { +#ifdef CONFIG_TRACEPOINTS if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) ring->trace_irq_seqno = seqno; +#endif } /* DRI warts */ diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 75b1f89..fd86b37 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1001,6 +1001,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); const unsigned long do_discard = (bio->bi_rw & (REQ_DISCARD | REQ_SECURE)); + const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME); struct md_rdev *blocked_rdev; struct blk_plug_cb *cb; struct raid1_plug_cb *plug = NULL; @@ -1302,7 +1303,8 @@ read_again: conf->mirrors[i].rdev->data_offset); mbio->bi_bdev = conf->mirrors[i].rdev->bdev; mbio->bi_end_io = raid1_end_write_request; - mbio->bi_rw = WRITE | do_flush_fua | do_sync | do_discard; + mbio->bi_rw = + WRITE | do_flush_fua | do_sync | do_discard | do_same; mbio->bi_private = r1_bio; atomic_inc(&r1_bio->remaining); @@ -2819,6 +2821,9 @@ static int run(struct mddev *mddev) if (IS_ERR(conf)) return PTR_ERR(conf); + if (mddev->queue) + blk_queue_max_write_same_sectors(mddev->queue, + mddev->chunk_sectors); rdev_for_each(rdev, mddev) { if (!mddev->gendisk) continue; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 8d925dc..b3898d4 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1106,6 +1106,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) const unsigned long do_fua = (bio->bi_rw & REQ_FUA); const unsigned long do_discard = (bio->bi_rw & (REQ_DISCARD | REQ_SECURE)); + const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME); unsigned long flags; struct md_rdev *blocked_rdev; struct blk_plug_cb *cb; @@ -1461,7 +1462,8 @@ retry_write: rdev)); mbio->bi_bdev = rdev->bdev; mbio->bi_end_io = raid10_end_write_request; - mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; + mbio->bi_rw = + WRITE | do_sync | do_fua | do_discard | do_same; mbio->bi_private = r10_bio; atomic_inc(&r10_bio->remaining); @@ -1503,7 +1505,8 @@ retry_write: r10_bio, rdev)); mbio->bi_bdev = rdev->bdev; mbio->bi_end_io = raid10_end_write_request; - mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; + mbio->bi_rw = + WRITE | do_sync | do_fua | do_discard | do_same; mbio->bi_private = r10_bio; atomic_inc(&r10_bio->remaining); @@ -3570,6 +3573,8 @@ static int run(struct mddev *mddev) if (mddev->queue) { blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); + blk_queue_max_write_same_sectors(mddev->queue, + mddev->chunk_sectors); blk_queue_io_min(mddev->queue, chunk_size); if (conf->geo.raid_disks % conf->geo.near_copies) blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 82c0616..6e3d6dc 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -1159,45 +1159,17 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma) struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; struct map_info *map = mtd->priv; - resource_size_t start, off; - unsigned long len, vma_len; /* This is broken because it assumes the MTD device is map-based and that mtd->priv is a valid struct map_info. It should be replaced with something that uses the mtd_get_unmapped_area() operation properly. */ if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) { - off = get_vm_offset(vma); - start = map->phys; - len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size); - start &= PAGE_MASK; - vma_len = get_vm_size(vma); - - /* Overflow in off+len? */ - if (vma_len + off < off) - return -EINVAL; - /* Does it fit in the mapping? */ - if (vma_len + off > len) - return -EINVAL; - - off += start; - /* Did that overflow? */ - if (off < start) - return -EINVAL; - if (set_vm_offset(vma, off) < 0) - return -EINVAL; - vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; - #ifdef pgprot_noncached - if (file->f_flags & O_DSYNC || off >= __pa(high_memory)) + if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory)) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); #endif - if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) - return -EAGAIN; - - return 0; + return vm_iomap_memory(vma, map->phys, map->size); } return -ENOSYS; #else diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index 5eaf47b..42b6d69 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c @@ -922,6 +922,7 @@ static int mcp251x_open(struct net_device *net) struct mcp251x_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; struct mcp251x_platform_data *pdata = spi->dev.platform_data; + unsigned long flags; int ret; ret = open_candev(net); @@ -938,9 +939,14 @@ static int mcp251x_open(struct net_device *net) priv->tx_skb = NULL; priv->tx_len = 0; + flags = IRQF_ONESHOT; + if (pdata->irq_flags) + flags |= pdata->irq_flags; + else + flags |= IRQF_TRIGGER_FALLING; + ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, - pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING, - DEVICE_NAME, priv); + flags, DEVICE_NAME, priv); if (ret) { dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); if (pdata->transceiver_enable) diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c index 6433b81..8e0c4a0 100644 --- a/drivers/net/can/sja1000/sja1000_of_platform.c +++ b/drivers/net/can/sja1000/sja1000_of_platform.c @@ -96,8 +96,8 @@ static int sja1000_ofp_probe(struct platform_device *ofdev) struct net_device *dev; struct sja1000_priv *priv; struct resource res; - const u32 *prop; - int err, irq, res_size, prop_size; + u32 prop; + int err, irq, res_size; void __iomem *base; err = of_address_to_resource(np, 0, &res); @@ -138,27 +138,27 @@ static int sja1000_ofp_probe(struct platform_device *ofdev) priv->read_reg = sja1000_ofp_read_reg; priv->write_reg = sja1000_ofp_write_reg; - prop = of_get_property(np, "nxp,external-clock-frequency", &prop_size); - if (prop && (prop_size == sizeof(u32))) - priv->can.clock.freq = *prop / 2; + err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop); + if (!err) + priv->can.clock.freq = prop / 2; else priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */ - prop = of_get_property(np, "nxp,tx-output-mode", &prop_size); - if (prop && (prop_size == sizeof(u32))) - priv->ocr |= *prop & OCR_MODE_MASK; + err = of_property_read_u32(np, "nxp,tx-output-mode", &prop); + if (!err) + priv->ocr |= prop & OCR_MODE_MASK; else priv->ocr |= OCR_MODE_NORMAL; /* default */ - prop = of_get_property(np, "nxp,tx-output-config", &prop_size); - if (prop && (prop_size == sizeof(u32))) - priv->ocr |= (*prop << OCR_TX_SHIFT) & OCR_TX_MASK; + err = of_property_read_u32(np, "nxp,tx-output-config", &prop); + if (!err) + priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK; else priv->ocr |= OCR_TX0_PULLDOWN; /* default */ - prop = of_get_property(np, "nxp,clock-out-frequency", &prop_size); - if (prop && (prop_size == sizeof(u32)) && *prop) { - u32 divider = priv->can.clock.freq * 2 / *prop; + err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop); + if (!err && prop) { + u32 divider = priv->can.clock.freq * 2 / prop; if (divider > 1) priv->cdr |= divider / 2 - 1; @@ -168,8 +168,7 @@ static int sja1000_ofp_probe(struct platform_device *ofdev) priv->cdr |= CDR_CLK_OFF; /* default */ } - prop = of_get_property(np, "nxp,no-comparator-bypass", NULL); - if (!prop) + if (!of_property_read_bool(np, "nxp,no-comparator-bypass")) priv->cdr |= CDR_CBP; /* default */ priv->irq_flags = IRQF_SHARED; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 8a5253c..6917998 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -330,6 +330,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, @@ -9103,7 +9104,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) } if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) { - u32 grc_mode = tr32(GRC_MODE); + u32 grc_mode; + + /* Fix transmit hangs */ + val = tr32(TG3_CPMU_PADRNG_CTL); + val |= TG3_CPMU_PADRNG_CTL_RDIV2; + tw32(TG3_CPMU_PADRNG_CTL, val); + + grc_mode = tr32(GRC_MODE); /* Access the lower 1K of DL PCIE block registers. */ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; @@ -9413,6 +9421,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (tg3_flag(tp, PCI_EXPRESS)) rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) { + tp->dma_limit = 0; + if (tp->dev->mtu <= ETH_DATA_LEN) { + rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR; + tp->dma_limit = TG3_TX_BD_DMA_MAX_2K; + } + } + if (tg3_flag(tp, HW_TSO_1) || tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index d330e81..6f9b74c 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -1159,6 +1159,8 @@ #define CPMU_MUTEX_GNT_DRIVER 0x00001000 #define TG3_CPMU_PHY_STRAP 0x00003664 #define TG3_CPMU_PHY_STRAP_IS_SERDES 0x00000020 +#define TG3_CPMU_PADRNG_CTL 0x00003668 +#define TG3_CPMU_PADRNG_CTL_RDIV2 0x00040000 /* 0x3664 --> 0x36b0 unused */ #define TG3_CPMU_EEE_MODE 0x000036b0 diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 4426151..de71b1e 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -88,8 +88,8 @@ config TLAN Please email feedback to <torben.mathiasen@compaq.com>. config CPMAC - tristate "TI AR7 CPMAC Ethernet support (EXPERIMENTAL)" - depends on EXPERIMENTAL && AR7 + tristate "TI AR7 CPMAC Ethernet support" + depends on AR7 select PHYLIB ---help--- TI AR7 CPMAC Ethernet support diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 4c31183..1cfde0c 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -32,6 +32,7 @@ #include <linux/of.h> #include <linux/of_net.h> #include <linux/of_device.h> +#include <linux/if_vlan.h> #include <linux/platform_data/cpsw.h> @@ -118,6 +119,20 @@ do { \ #define TX_PRIORITY_MAPPING 0x33221100 #define CPDMA_TX_PRIORITY_MAP 0x76543210 +#define CPSW_VLAN_AWARE BIT(1) +#define CPSW_ALE_VLAN_AWARE 1 + +#define CPSW_FIFO_NORMAL_MODE (0 << 15) +#define CPSW_FIFO_DUAL_MAC_MODE (1 << 15) +#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15) + +#define CPSW_INTPACEEN (0x3f << 16) +#define CPSW_INTPRESCALE_MASK (0x7FF << 0) +#define CPSW_CMINTMAX_CNT 63 +#define CPSW_CMINTMIN_CNT 2 +#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT) +#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1) + #define cpsw_enable_irq(priv) \ do { \ u32 i; \ @@ -131,6 +146,10 @@ do { \ disable_irq_nosync(priv->irqs_table[i]); \ } while (0); +#define cpsw_slave_index(priv) \ + ((priv->data.dual_emac) ? priv->emac_port : \ + priv->data.active_slave) + static int debug_level; module_param(debug_level, int, 0); MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)"); @@ -152,6 +171,15 @@ struct cpsw_wr_regs { u32 rx_en; u32 tx_en; u32 misc_en; + u32 mem_allign1[8]; + u32 rx_thresh_stat; + u32 rx_stat; + u32 tx_stat; + u32 misc_stat; + u32 mem_allign2[8]; + u32 rx_imax; + u32 tx_imax; + }; struct cpsw_ss_regs { @@ -250,7 +278,7 @@ struct cpsw_ss_regs { struct cpsw_host_regs { u32 max_blks; u32 blk_cnt; - u32 flow_thresh; + u32 tx_in_ctl; u32 port_vlan; u32 tx_pri_map; u32 cpdma_tx_pri_map; @@ -277,6 +305,9 @@ struct cpsw_slave { u32 mac_control; struct cpsw_slave_data *data; struct phy_device *phy; + struct net_device *ndev; + u32 port_vlan; + u32 open_stat; }; static inline u32 slave_read(struct cpsw_slave *slave, u32 offset) @@ -303,6 +334,8 @@ struct cpsw_priv { struct cpsw_host_regs __iomem *host_port_regs; u32 msg_enable; u32 version; + u32 coal_intvl; + u32 bus_freq_mhz; struct net_device_stats stats; int rx_packet_max; int host_port; @@ -315,17 +348,69 @@ struct cpsw_priv { /* snapshot of IRQ numbers */ u32 irqs_table[4]; u32 num_irqs; - struct cpts cpts; + bool irq_enabled; + struct cpts *cpts; + u32 emac_port; }; #define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi) -#define for_each_slave(priv, func, arg...) \ - do { \ - int idx; \ - for (idx = 0; idx < (priv)->data.slaves; idx++) \ - (func)((priv)->slaves + idx, ##arg); \ +#define for_each_slave(priv, func, arg...) \ + do { \ + struct cpsw_slave *slave; \ + int n; \ + if (priv->data.dual_emac) \ + (func)((priv)->slaves + priv->emac_port, ##arg);\ + else \ + for (n = (priv)->data.slaves, \ + slave = (priv)->slaves; \ + n; n--) \ + (func)(slave++, ##arg); \ + } while (0) +#define cpsw_get_slave_ndev(priv, __slave_no__) \ + (priv->slaves[__slave_no__].ndev) +#define cpsw_get_slave_priv(priv, __slave_no__) \ + ((priv->slaves[__slave_no__].ndev) ? \ + netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \ + +#define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \ + do { \ + if (!priv->data.dual_emac) \ + break; \ + if (CPDMA_RX_SOURCE_PORT(status) == 1) { \ + ndev = cpsw_get_slave_ndev(priv, 0); \ + priv = netdev_priv(ndev); \ + skb->dev = ndev; \ + } else if (CPDMA_RX_SOURCE_PORT(status) == 2) { \ + ndev = cpsw_get_slave_ndev(priv, 1); \ + priv = netdev_priv(ndev); \ + skb->dev = ndev; \ + } \ + } while (0) +#define cpsw_add_mcast(priv, addr) \ + do { \ + if (priv->data.dual_emac) { \ + struct cpsw_slave *slave = priv->slaves + \ + priv->emac_port; \ + int slave_port = cpsw_get_slave_port(priv, \ + slave->slave_num); \ + cpsw_ale_add_mcast(priv->ale, addr, \ + 1 << slave_port | 1 << priv->host_port, \ + ALE_VLAN, slave->port_vlan, 0); \ + } else { \ + cpsw_ale_add_mcast(priv->ale, addr, \ + ALE_ALL_PORTS << priv->host_port, \ + 0, 0, 0); \ + } \ } while (0) +static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num) +{ + if (priv->host_port == 0) + return slave_num + 1; + else + return slave_num; +} + static void cpsw_ndo_set_rx_mode(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); @@ -344,8 +429,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) /* program multicast address list into ALE register */ netdev_for_each_mc_addr(ha, ndev) { - cpsw_ale_add_mcast(priv->ale, (u8 *)ha->addr, - ALE_ALL_PORTS << priv->host_port, 0, 0); + cpsw_add_mcast(priv, (u8 *)ha->addr); } } } @@ -374,9 +458,12 @@ void cpsw_tx_handler(void *token, int len, int status) struct net_device *ndev = skb->dev; struct cpsw_priv *priv = netdev_priv(ndev); + /* Check whether the queue is stopped due to stalled tx dma, if the + * queue is stopped then start the queue as we have free desc for tx + */ if (unlikely(netif_queue_stopped(ndev))) netif_wake_queue(ndev); - cpts_tx_timestamp(&priv->cpts, skb); + cpts_tx_timestamp(priv->cpts, skb); priv->stats.tx_packets++; priv->stats.tx_bytes += len; dev_kfree_skb_any(skb); @@ -385,81 +472,105 @@ void cpsw_tx_handler(void *token, int len, int status) void cpsw_rx_handler(void *token, int len, int status) { struct sk_buff *skb = token; + struct sk_buff *new_skb; struct net_device *ndev = skb->dev; struct cpsw_priv *priv = netdev_priv(ndev); int ret = 0; - /* free and bail if we are shutting down */ - if (unlikely(!netif_running(ndev)) || - unlikely(!netif_carrier_ok(ndev))) { + cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); + + if (unlikely(status < 0)) { + /* the interface is going down, skbs are purged */ dev_kfree_skb_any(skb); return; } - if (likely(status >= 0)) { + + new_skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max); + if (new_skb) { skb_put(skb, len); - cpts_rx_timestamp(&priv->cpts, skb); + cpts_rx_timestamp(priv->cpts, skb); skb->protocol = eth_type_trans(skb, ndev); - netif_receive_skb(skb); + netif_rx(skb); priv->stats.rx_bytes += len; priv->stats.rx_packets++; - skb = NULL; - } - - if (unlikely(!netif_running(ndev))) { - if (skb) - dev_kfree_skb_any(skb); - return; + } else { + priv->stats.rx_dropped++; + new_skb = skb; } - if (likely(!skb)) { - skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max); - if (WARN_ON(!skb)) - return; - - ret = cpdma_chan_submit(priv->rxch, skb, skb->data, - skb_tailroom(skb), GFP_KERNEL); - } - WARN_ON(ret < 0); + ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data, + skb_tailroom(new_skb), 0); + if (WARN_ON(ret < 0)) + dev_kfree_skb_any(new_skb); } static irqreturn_t cpsw_interrupt(int irq, void *dev_id) { struct cpsw_priv *priv = dev_id; + unsigned long flags; + u32 rx, tx, rx_thresh; + + spin_lock_irqsave(&priv->lock, flags); + rx_thresh = __raw_readl(&priv->wr_regs->rx_thresh_stat); + rx = __raw_readl(&priv->wr_regs->rx_stat); + tx = __raw_readl(&priv->wr_regs->tx_stat); + if (!rx_thresh && !rx && !tx) { + spin_unlock_irqrestore(&priv->lock, flags); + return IRQ_NONE; + } - if (likely(netif_running(priv->ndev))) { - cpsw_intr_disable(priv); + cpsw_intr_disable(priv); + if (priv->irq_enabled == true) { cpsw_disable_irq(priv); + priv->irq_enabled = false; + } + spin_unlock_irqrestore(&priv->lock, flags); + + if (netif_running(priv->ndev)) { napi_schedule(&priv->napi); + return IRQ_HANDLED; } - return IRQ_HANDLED; -} -static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num) -{ - if (priv->host_port == 0) - return slave_num + 1; - else - return slave_num; + priv = cpsw_get_slave_priv(priv, 1); + if (!priv) + return IRQ_NONE; + + if (netif_running(priv->ndev)) { + napi_schedule(&priv->napi); + return IRQ_HANDLED; + } + return IRQ_NONE; } static int cpsw_poll(struct napi_struct *napi, int budget) { struct cpsw_priv *priv = napi_to_priv(napi); int num_tx, num_rx; + unsigned long flags; + spin_lock_irqsave(&priv->lock, flags); num_tx = cpdma_chan_process(priv->txch, 128); - num_rx = cpdma_chan_process(priv->rxch, budget); - - if (num_rx || num_tx) - cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n", - num_rx, num_tx); + if (num_tx) + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); + num_rx = cpdma_chan_process(priv->rxch, budget); if (num_rx < budget) { + struct cpsw_priv *prim_cpsw; + napi_complete(napi); cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma); - cpsw_enable_irq(priv); + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); + prim_cpsw = cpsw_get_slave_priv(priv, 0); + if (prim_cpsw->irq_enabled == false) { + cpsw_enable_irq(priv); + prim_cpsw->irq_enabled = true; + } } + spin_unlock_irqrestore(&priv->lock, flags); + + if (num_rx || num_tx) + cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n", + num_rx, num_tx); return num_rx; } @@ -548,6 +659,77 @@ static void cpsw_adjust_link(struct net_device *ndev) } } +static int cpsw_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *coal) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + coal->rx_coalesce_usecs = priv->coal_intvl; + return 0; +} + +static int cpsw_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *coal) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + u32 int_ctrl; + u32 num_interrupts = 0; + u32 prescale = 0; + u32 addnl_dvdr = 1; + u32 coal_intvl = 0; + + if (!coal->rx_coalesce_usecs) + return -EINVAL; + + coal_intvl = coal->rx_coalesce_usecs; + + int_ctrl = readl(&priv->wr_regs->int_control); + prescale = priv->bus_freq_mhz * 4; + + if (coal_intvl < CPSW_CMINTMIN_INTVL) + coal_intvl = CPSW_CMINTMIN_INTVL; + + if (coal_intvl > CPSW_CMINTMAX_INTVL) { + /* Interrupt pacer works with 4us Pulse, we can + * throttle further by dilating the 4us pulse. + */ + addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale; + + if (addnl_dvdr > 1) { + prescale *= addnl_dvdr; + if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr)) + coal_intvl = (CPSW_CMINTMAX_INTVL + * addnl_dvdr); + } else { + addnl_dvdr = 1; + coal_intvl = CPSW_CMINTMAX_INTVL; + } + } + + num_interrupts = (1000 * addnl_dvdr) / coal_intvl; + writel(num_interrupts, &priv->wr_regs->rx_imax); + writel(num_interrupts, &priv->wr_regs->tx_imax); + + int_ctrl |= CPSW_INTPACEEN; + int_ctrl &= (~CPSW_INTPRESCALE_MASK); + int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK); + writel(int_ctrl, &priv->wr_regs->int_control); + + cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl); + if (priv->data.dual_emac) { + int i; + + for (i = 0; i < priv->data.slaves; i++) { + priv = netdev_priv(priv->slaves[i].ndev); + priv->coal_intvl = coal_intvl; + } + } else { + priv->coal_intvl = coal_intvl; + } + + return 0; +} + static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val) { static char *leader = "........................................"; @@ -559,6 +741,54 @@ static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val) leader + strlen(name), val); } +static int cpsw_common_res_usage_state(struct cpsw_priv *priv) +{ + u32 i; + u32 usage_count = 0; + + if (!priv->data.dual_emac) + return 0; + + for (i = 0; i < priv->data.slaves; i++) + if (priv->slaves[i].open_stat) + usage_count++; + + return usage_count; +} + +static inline int cpsw_tx_packet_submit(struct net_device *ndev, + struct cpsw_priv *priv, struct sk_buff *skb) +{ + if (!priv->data.dual_emac) + return cpdma_chan_submit(priv->txch, skb, skb->data, + skb->len, 0); + + if (ndev == cpsw_get_slave_ndev(priv, 0)) + return cpdma_chan_submit(priv->txch, skb, skb->data, + skb->len, 1); + else + return cpdma_chan_submit(priv->txch, skb, skb->data, + skb->len, 2); +} + +static inline void cpsw_add_dual_emac_def_ale_entries( + struct cpsw_priv *priv, struct cpsw_slave *slave, + u32 slave_port) +{ + u32 port_mask = 1 << slave_port | 1 << priv->host_port; + + if (priv->version == CPSW_VERSION_1) + slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN); + else + slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN); + cpsw_ale_add_vlan(priv->ale, slave->port_vlan, port_mask, + port_mask, port_mask, 0); + cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + port_mask, ALE_VLAN, slave->port_vlan, 0); + cpsw_ale_add_ucast(priv->ale, priv->mac_addr, + priv->host_port, ALE_VLAN, slave->port_vlan); +} + static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) { char name[32]; @@ -588,8 +818,11 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) slave_port = cpsw_get_slave_port(priv, slave->slave_num); - cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, - 1 << slave_port, 0, ALE_MCAST_FWD_2); + if (priv->data.dual_emac) + cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port); + else + cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); slave->phy = phy_connect(priv->ndev, slave->data->phy_id, &cpsw_adjust_link, 0, slave->data->phy_if); @@ -604,14 +837,44 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) } } +static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) +{ + const int vlan = priv->data.default_vlan; + const int port = priv->host_port; + u32 reg; + int i; + + reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : + CPSW2_PORT_VLAN; + + writel(vlan, &priv->host_port_regs->port_vlan); + + for (i = 0; i < priv->data.slaves; i++) + slave_write(priv->slaves + i, vlan, reg); + + cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port, + ALE_ALL_PORTS << port, ALE_ALL_PORTS << port, + (ALE_PORT_1 | ALE_PORT_2) << port); +} + static void cpsw_init_host_port(struct cpsw_priv *priv) { + u32 control_reg; + u32 fifo_mode; + /* soft reset the controller and initialize ale */ soft_reset("cpsw", &priv->regs->soft_reset); cpsw_ale_start(priv->ale); /* switch to vlan unaware mode */ - cpsw_ale_control_set(priv->ale, 0, ALE_VLAN_AWARE, 0); + cpsw_ale_control_set(priv->ale, priv->host_port, ALE_VLAN_AWARE, + CPSW_ALE_VLAN_AWARE); + control_reg = readl(&priv->regs->control); + control_reg |= CPSW_VLAN_AWARE; + writel(control_reg, &priv->regs->control); + fifo_mode = (priv->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE : + CPSW_FIFO_NORMAL_MODE; + writel(fifo_mode, &priv->host_port_regs->tx_in_ctl); /* setup host port priority mapping */ __raw_writel(CPDMA_TX_PRIORITY_MAP, @@ -621,18 +884,32 @@ static void cpsw_init_host_port(struct cpsw_priv *priv) cpsw_ale_control_set(priv->ale, priv->host_port, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); - cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, 0); - cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, - 1 << priv->host_port, 0, ALE_MCAST_FWD_2); + if (!priv->data.dual_emac) { + cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, + 0, 0); + cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + 1 << priv->host_port, 0, 0, ALE_MCAST_FWD_2); + } +} + +static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) +{ + if (!slave->phy) + return; + phy_stop(slave->phy); + phy_disconnect(slave->phy); + slave->phy = NULL; } static int cpsw_ndo_open(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_priv *prim_cpsw; int i, ret; u32 reg; - cpsw_intr_disable(priv); + if (!cpsw_common_res_usage_state(priv)) + cpsw_intr_disable(priv); netif_carrier_off(ndev); pm_runtime_get_sync(&priv->pdev->dev); @@ -644,53 +921,81 @@ static int cpsw_ndo_open(struct net_device *ndev) CPSW_RTL_VERSION(reg)); /* initialize host and slave ports */ - cpsw_init_host_port(priv); + if (!cpsw_common_res_usage_state(priv)) + cpsw_init_host_port(priv); for_each_slave(priv, cpsw_slave_open, priv); - /* setup tx dma to fixed prio and zero offset */ - cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1); - cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0); - - /* disable priority elevation and enable statistics on all ports */ - __raw_writel(0, &priv->regs->ptype); - - /* enable statistics collection only on the host port */ - __raw_writel(0x7, &priv->regs->stat_port_en); + /* Add default VLAN */ + if (!priv->data.dual_emac) + cpsw_add_default_vlan(priv); + + if (!cpsw_common_res_usage_state(priv)) { + /* setup tx dma to fixed prio and zero offset */ + cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1); + cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0); + + /* disable priority elevation */ + __raw_writel(0, &priv->regs->ptype); + + /* enable statistics collection only on all ports */ + __raw_writel(0x7, &priv->regs->stat_port_en); + + if (WARN_ON(!priv->data.rx_descs)) + priv->data.rx_descs = 128; + + for (i = 0; i < priv->data.rx_descs; i++) { + struct sk_buff *skb; + + ret = -ENOMEM; + skb = __netdev_alloc_skb_ip_align(priv->ndev, + priv->rx_packet_max, GFP_KERNEL); + if (!skb) + goto err_cleanup; + ret = cpdma_chan_submit(priv->rxch, skb, skb->data, + skb_tailroom(skb), 0); + if (ret < 0) { + kfree_skb(skb); + goto err_cleanup; + } + } + /* continue even if we didn't manage to submit all + * receive descs + */ + cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); + } - if (WARN_ON(!priv->data.rx_descs)) - priv->data.rx_descs = 128; + /* Enable Interrupt pacing if configured */ + if (priv->coal_intvl != 0) { + struct ethtool_coalesce coal; - for (i = 0; i < priv->data.rx_descs; i++) { - struct sk_buff *skb; + coal.rx_coalesce_usecs = (priv->coal_intvl << 4); + cpsw_set_coalesce(ndev, &coal); + } - ret = -ENOMEM; - skb = netdev_alloc_skb_ip_align(priv->ndev, - priv->rx_packet_max); - if (!skb) - break; - ret = cpdma_chan_submit(priv->rxch, skb, skb->data, - skb_tailroom(skb), GFP_KERNEL); - if (WARN_ON(ret < 0)) - break; + prim_cpsw = cpsw_get_slave_priv(priv, 0); + if (prim_cpsw->irq_enabled == false) { + if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) { + prim_cpsw->irq_enabled = true; + cpsw_enable_irq(prim_cpsw); + } } - /* continue even if we didn't manage to submit all receive descs */ - cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); cpdma_ctlr_start(priv->dma); cpsw_intr_enable(priv); napi_enable(&priv->napi); - cpdma_ctlr_eoi(priv->dma); + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); + if (priv->data.dual_emac) + priv->slaves[priv->emac_port].open_stat = true; return 0; -} -static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) -{ - if (!slave->phy) - return; - phy_stop(slave->phy); - phy_disconnect(slave->phy); - slave->phy = NULL; +err_cleanup: + cpdma_ctlr_stop(priv->dma); + for_each_slave(priv, cpsw_slave_stop, priv); + pm_runtime_put_sync(&priv->pdev->dev); + netif_carrier_off(priv->ndev); + return ret; } static int cpsw_ndo_stop(struct net_device *ndev) @@ -701,12 +1006,17 @@ static int cpsw_ndo_stop(struct net_device *ndev) netif_stop_queue(priv->ndev); napi_disable(&priv->napi); netif_carrier_off(priv->ndev); - cpsw_intr_disable(priv); - cpdma_ctlr_int_ctrl(priv->dma, false); - cpdma_ctlr_stop(priv->dma); - cpsw_ale_stop(priv->ale); + + if (cpsw_common_res_usage_state(priv) <= 1) { + cpsw_intr_disable(priv); + cpdma_ctlr_int_ctrl(priv->dma, false); + cpdma_ctlr_stop(priv->dma); + cpsw_ale_stop(priv->ale); + } for_each_slave(priv, cpsw_slave_stop, priv); pm_runtime_put_sync(&priv->pdev->dev); + if (priv->data.dual_emac) + priv->slaves[priv->emac_port].open_stat = false; return 0; } @@ -724,18 +1034,24 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } - if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && priv->cpts.tx_enable) + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + priv->cpts->tx_enable) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; skb_tx_timestamp(skb); - ret = cpdma_chan_submit(priv->txch, skb, skb->data, - skb->len, GFP_KERNEL); + ret = cpsw_tx_packet_submit(ndev, priv, skb); if (unlikely(ret != 0)) { cpsw_err(priv, tx_err, "desc submit failed\n"); goto fail; } + /* If there is no more tx desc left free then we need to + * tell the kernel to stop sending us tx frames. + */ + if (unlikely(!cpdma_check_free_tx_desc(priv->txch))) + netif_stop_queue(ndev); + return NETDEV_TX_OK; fail: priv->stats.tx_dropped++; @@ -770,10 +1086,10 @@ static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags) static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) { - struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave]; + struct cpsw_slave *slave = &priv->slaves[priv->data.active_slave]; u32 ts_en, seq_id; - if (!priv->cpts.tx_enable && !priv->cpts.rx_enable) { + if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) { slave_write(slave, 0, CPSW1_TS_CTL); return; } @@ -781,10 +1097,10 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588; ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS; - if (priv->cpts.tx_enable) + if (priv->cpts->tx_enable) ts_en |= CPSW_V1_TS_TX_EN; - if (priv->cpts.rx_enable) + if (priv->cpts->rx_enable) ts_en |= CPSW_V1_TS_RX_EN; slave_write(slave, ts_en, CPSW1_TS_CTL); @@ -793,16 +1109,21 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) { - struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave]; + struct cpsw_slave *slave; u32 ctrl, mtype; + if (priv->data.dual_emac) + slave = &priv->slaves[priv->emac_port]; + else + slave = &priv->slaves[priv->data.active_slave]; + ctrl = slave_read(slave, CPSW2_CONTROL); ctrl &= ~CTRL_ALL_TS_MASK; - if (priv->cpts.tx_enable) + if (priv->cpts->tx_enable) ctrl |= CTRL_TX_TS_BITS; - if (priv->cpts.rx_enable) + if (priv->cpts->rx_enable) ctrl |= CTRL_RX_TS_BITS; mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS; @@ -815,7 +1136,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) { struct cpsw_priv *priv = netdev_priv(dev); - struct cpts *cpts = &priv->cpts; + struct cpts *cpts = priv->cpts; struct hwtstamp_config cfg; if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) @@ -879,14 +1200,26 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { + struct cpsw_priv *priv = netdev_priv(dev); + struct mii_ioctl_data *data = if_mii(req); + int slave_no = cpsw_slave_index(priv); + if (!netif_running(dev)) return -EINVAL; + switch (cmd) { #ifdef CONFIG_TI_CPTS - if (cmd == SIOCSHWTSTAMP) + case SIOCSHWTSTAMP: return cpsw_hwtstamp_ioctl(dev, req); #endif - return -ENOTSUPP; + case SIOCGMIIPHY: + data->phy_id = priv->slaves[slave_no].phy->addr; + break; + default: + return -ENOTSUPP; + } + + return 0; } static void cpsw_ndo_tx_timeout(struct net_device *ndev) @@ -901,7 +1234,9 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev) cpdma_chan_start(priv->txch); cpdma_ctlr_int_ctrl(priv->dma, true); cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma); + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); + } static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev) @@ -920,10 +1255,79 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev) cpsw_interrupt(ndev->irq, priv); cpdma_ctlr_int_ctrl(priv->dma, true); cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma); + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); + } #endif +static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, + unsigned short vid) +{ + int ret; + + ret = cpsw_ale_add_vlan(priv->ale, vid, + ALE_ALL_PORTS << priv->host_port, + 0, ALE_ALL_PORTS << priv->host_port, + (ALE_PORT_1 | ALE_PORT_2) << priv->host_port); + if (ret != 0) + return ret; + + ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr, + priv->host_port, ALE_VLAN, vid); + if (ret != 0) + goto clean_vid; + + ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + ALE_ALL_PORTS << priv->host_port, + ALE_VLAN, vid, 0); + if (ret != 0) + goto clean_vlan_ucast; + return 0; + +clean_vlan_ucast: + cpsw_ale_del_ucast(priv->ale, priv->mac_addr, + priv->host_port, ALE_VLAN, vid); +clean_vid: + cpsw_ale_del_vlan(priv->ale, vid, 0); + return ret; +} + +static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, + unsigned short vid) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + if (vid == priv->data.default_vlan) + return 0; + + dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); + return cpsw_add_vlan_ale_entry(priv, vid); +} + +static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, + unsigned short vid) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int ret; + + if (vid == priv->data.default_vlan) + return 0; + + dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); + ret = cpsw_ale_del_vlan(priv->ale, vid, 0); + if (ret != 0) + return ret; + + ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr, + priv->host_port, ALE_VLAN, vid); + if (ret != 0) + return ret; + + return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast, + 0, ALE_VLAN, vid); +} + static const struct net_device_ops cpsw_netdev_ops = { .ndo_open = cpsw_ndo_open, .ndo_stop = cpsw_ndo_stop, @@ -938,6 +1342,8 @@ static const struct net_device_ops cpsw_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cpsw_ndo_poll_controller, #endif + .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid, }; static void cpsw_get_drvinfo(struct net_device *ndev, @@ -975,7 +1381,7 @@ static int cpsw_get_ts_info(struct net_device *ndev, SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->phc_index = priv->cpts.phc_index; + info->phc_index = priv->cpts->phc_index; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); @@ -994,12 +1400,39 @@ static int cpsw_get_ts_info(struct net_device *ndev, return 0; } +static int cpsw_get_settings(struct net_device *ndev, + struct ethtool_cmd *ecmd) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int slave_no = cpsw_slave_index(priv); + + if (priv->slaves[slave_no].phy) + return phy_ethtool_gset(priv->slaves[slave_no].phy, ecmd); + else + return -EOPNOTSUPP; +} + +static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int slave_no = cpsw_slave_index(priv); + + if (priv->slaves[slave_no].phy) + return phy_ethtool_sset(priv->slaves[slave_no].phy, ecmd); + else + return -EOPNOTSUPP; +} + static const struct ethtool_ops cpsw_ethtool_ops = { .get_drvinfo = cpsw_get_drvinfo, .get_msglevel = cpsw_get_msglevel, .set_msglevel = cpsw_set_msglevel, .get_link = ethtool_op_get_link, .get_ts_info = cpsw_get_ts_info, + .get_settings = cpsw_get_settings, + .set_settings = cpsw_set_settings, + .get_coalesce = cpsw_get_coalesce, + .set_coalesce = cpsw_set_coalesce, }; static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, @@ -1012,6 +1445,7 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, slave->data = data; slave->regs = regs + slave_reg_ofs; slave->sliver = regs + sliver_reg_ofs; + slave->port_vlan = data->dual_emac_res_vlan; } static int cpsw_probe_dt(struct cpsw_platform_data *data, @@ -1031,12 +1465,16 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, } data->slaves = prop; - if (of_property_read_u32(node, "cpts_active_slave", &prop)) { - pr_err("Missing cpts_active_slave property in the DT.\n"); - ret = -EINVAL; - goto error_ret; + if (of_property_read_u32(node, "active_slave", &prop)) { + pr_err("Missing active_slave property in the DT.\n"); + if (of_property_read_u32(node, "cpts_active_slave", &prop)) { + ret = -EINVAL; + goto error_ret; + } else { + pr_err("Using old cpts_active_slave as fallback.\n"); + } } - data->cpts_active_slave = prop; + data->active_slave = prop; if (of_property_read_u32(node, "cpts_clock_mult", &prop)) { pr_err("Missing cpts_clock_mult property in the DT.\n"); @@ -1052,12 +1490,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, } data->cpts_clock_shift = prop; - data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) * - data->slaves, GFP_KERNEL); - if (!data->slave_data) { - pr_err("Could not allocate slave memory.\n"); + data->slave_data = kcalloc(data->slaves, sizeof(struct cpsw_slave_data), + GFP_KERNEL); + if (!data->slave_data) return -EINVAL; - } if (of_property_read_u32(node, "cpdma_channels", &prop)) { pr_err("Missing cpdma_channels property in the DT.\n"); @@ -1094,6 +1530,9 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, } data->mac_control = prop; + if (!of_property_read_u32(node, "dual_emac", &prop)) + data->dual_emac = prop; + /* * Populate all the child nodes here... */ @@ -1127,6 +1566,18 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, if (mac_addr) memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); + if (data->dual_emac) { + if (of_property_read_u32(slave_node, "dual_emac_res_vlan", + &prop)) { + pr_err("Missing dual_emac_res_vlan in DT.\n"); + slave_data->dual_emac_res_vlan = i+1; + pr_err("Using %d as Reserved VLAN for %d slave\n", + slave_data->dual_emac_res_vlan, i); + } else { + slave_data->dual_emac_res_vlan = prop; + } + } + i++; } @@ -1137,9 +1588,84 @@ error_ret: return ret; } +static int cpsw_probe_dual_emac(struct platform_device *pdev, + struct cpsw_priv *priv) +{ + struct cpsw_platform_data *data = &priv->data; + struct net_device *ndev; + struct cpsw_priv *priv_sl2; + int ret = 0, i; + + ndev = alloc_etherdev(sizeof(struct cpsw_priv)); + if (!ndev) { + pr_err("cpsw: error allocating net_device\n"); + return -ENOMEM; + } + + priv_sl2 = netdev_priv(ndev); + spin_lock_init(&priv_sl2->lock); + priv_sl2->data = *data; + priv_sl2->pdev = pdev; + priv_sl2->ndev = ndev; + priv_sl2->dev = &ndev->dev; + priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); + priv_sl2->rx_packet_max = max(rx_packet_max, 128); + + if (is_valid_ether_addr(data->slave_data[1].mac_addr)) { + memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr, + ETH_ALEN); + pr_info("cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr); + } else { + random_ether_addr(priv_sl2->mac_addr); + pr_info("cpsw: Random MACID = %pM\n", priv_sl2->mac_addr); + } + memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN); + + priv_sl2->slaves = priv->slaves; + priv_sl2->clk = priv->clk; + + priv_sl2->coal_intvl = 0; + priv_sl2->bus_freq_mhz = priv->bus_freq_mhz; + + priv_sl2->cpsw_res = priv->cpsw_res; + priv_sl2->regs = priv->regs; + priv_sl2->host_port = priv->host_port; + priv_sl2->host_port_regs = priv->host_port_regs; + priv_sl2->wr_regs = priv->wr_regs; + priv_sl2->dma = priv->dma; + priv_sl2->txch = priv->txch; + priv_sl2->rxch = priv->rxch; + priv_sl2->ale = priv->ale; + priv_sl2->emac_port = 1; + priv->slaves[1].ndev = ndev; + priv_sl2->cpts = priv->cpts; + priv_sl2->version = priv->version; + + for (i = 0; i < priv->num_irqs; i++) { + priv_sl2->irqs_table[i] = priv->irqs_table[i]; + priv_sl2->num_irqs = priv->num_irqs; + } + ndev->features |= NETIF_F_HW_VLAN_FILTER; + + ndev->netdev_ops = &cpsw_netdev_ops; + SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); + netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT); + + /* register the network device */ + SET_NETDEV_DEV(ndev, &pdev->dev); + ret = register_netdev(ndev); + if (ret) { + pr_err("cpsw: error registering net device\n"); + free_netdev(ndev); + ret = -ENODEV; + } + + return ret; +} + static int cpsw_probe(struct platform_device *pdev) { - struct cpsw_platform_data *data = pdev->dev.platform_data; + struct cpsw_platform_data *data; struct net_device *ndev; struct cpsw_priv *priv; struct cpdma_params dma_params; @@ -1163,6 +1689,11 @@ static int cpsw_probe(struct platform_device *pdev) priv->dev = &ndev->dev; priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); priv->rx_packet_max = max(rx_packet_max, 128); + priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); + if (!ndev) { + pr_err("error allocating cpts\n"); + goto clean_ndev_ret; + } /* * This may be required here for child devices. @@ -1195,12 +1726,17 @@ static int cpsw_probe(struct platform_device *pdev) for (i = 0; i < data->slaves; i++) priv->slaves[i].slave_num = i; + priv->slaves[0].ndev = ndev; + priv->emac_port = 0; + priv->clk = clk_get(&pdev->dev, "fck"); if (IS_ERR(priv->clk)) { dev_err(&pdev->dev, "fck is not found\n"); ret = -ENODEV; goto clean_slave_ret; } + priv->coal_intvl = 0; + priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000; priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!priv->cpsw_res) { @@ -1249,7 +1785,7 @@ static int cpsw_probe(struct platform_device *pdev) switch (priv->version) { case CPSW_VERSION_1: priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET; - priv->cpts.reg = ss_regs + CPSW1_CPTS_OFFSET; + priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET; dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET; dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET; ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET; @@ -1260,7 +1796,7 @@ static int cpsw_probe(struct platform_device *pdev) break; case CPSW_VERSION_2: priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET; - priv->cpts.reg = ss_regs + CPSW2_CPTS_OFFSET; + priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET; dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET; dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET; ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET; @@ -1342,12 +1878,13 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_ale_ret; } priv->irqs_table[k] = i; - priv->num_irqs = k; + priv->num_irqs = k + 1; } k++; } - ndev->flags |= IFF_ALLMULTI; /* see cpsw_ndo_change_rx_flags() */ + priv->irq_enabled = true; + ndev->features |= NETIF_F_HW_VLAN_FILTER; ndev->netdev_ops = &cpsw_netdev_ops; SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); @@ -1362,17 +1899,26 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_irq_ret; } - if (cpts_register(&pdev->dev, &priv->cpts, + if (cpts_register(&pdev->dev, priv->cpts, data->cpts_clock_mult, data->cpts_clock_shift)) dev_err(priv->dev, "error registering cpts device\n"); cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n", priv->cpsw_res->start, ndev->irq); + if (priv->data.dual_emac) { + ret = cpsw_probe_dual_emac(pdev, priv); + if (ret) { + cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); + goto clean_irq_ret; + } + } + return 0; clean_irq_ret: - free_irq(ndev->irq, priv); + for (i = 0; i < priv->num_irqs; i++) + free_irq(priv->irqs_table[i], priv); clean_ale_ret: cpsw_ale_destroy(priv->ale); clean_dma_ret: @@ -1395,7 +1941,8 @@ clean_slave_ret: pm_runtime_disable(&pdev->dev); kfree(priv->slaves); clean_ndev_ret: - free_netdev(ndev); + kfree(priv->data.slave_data); + free_netdev(priv->ndev); return ret; } @@ -1403,12 +1950,17 @@ static int cpsw_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct cpsw_priv *priv = netdev_priv(ndev); + int i; - pr_info("removing device"); platform_set_drvdata(pdev, NULL); + if (priv->data.dual_emac) + unregister_netdev(cpsw_get_slave_ndev(priv, 1)); + unregister_netdev(ndev); + + cpts_unregister(priv->cpts); + for (i = 0; i < priv->num_irqs; i++) + free_irq(priv->irqs_table[i], priv); - cpts_unregister(&priv->cpts); - free_irq(ndev->irq, priv); cpsw_ale_destroy(priv->ale); cpdma_chan_destroy(priv->txch); cpdma_chan_destroy(priv->rxch); @@ -1422,8 +1974,10 @@ static int cpsw_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); clk_put(priv->clk); kfree(priv->slaves); + kfree(priv->data.slave_data); + if (priv->data.dual_emac) + free_netdev(cpsw_get_slave_ndev(priv, 1)); free_netdev(ndev); - return 0; } @@ -1459,6 +2013,7 @@ static const struct of_device_id cpsw_of_mtable[] = { { .compatible = "ti,cpsw", }, { /* sentinel */ }, }; +MODULE_DEVICE_TABLE(of, cpsw_of_mtable); static struct platform_driver cpsw_driver = { .driver = { diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 0e9ccc2..7fa60d6 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -148,7 +148,7 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry) return idx; } -static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr) +int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS]; int type, idx; @@ -160,6 +160,8 @@ static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr) type = cpsw_ale_get_entry_type(ale_entry); if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR) continue; + if (cpsw_ale_get_vlan_id(ale_entry) != vid) + continue; cpsw_ale_get_addr(ale_entry, entry_addr); if (memcmp(entry_addr, addr, 6) == 0) return idx; @@ -167,6 +169,22 @@ static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr) return -ENOENT; } +int cpsw_ale_match_vlan(struct cpsw_ale *ale, u16 vid) +{ + u32 ale_entry[ALE_ENTRY_WORDS]; + int type, idx; + + for (idx = 0; idx < ale->params.ale_entries; idx++) { + cpsw_ale_read(ale, idx, ale_entry); + type = cpsw_ale_get_entry_type(ale_entry); + if (type != ALE_TYPE_VLAN) + continue; + if (cpsw_ale_get_vlan_id(ale_entry) == vid) + return idx; + } + return -ENOENT; +} + static int cpsw_ale_match_free(struct cpsw_ale *ale) { u32 ale_entry[ALE_ENTRY_WORDS]; @@ -274,19 +292,32 @@ int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask) return 0; } -int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags) +static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry, + int flags, u16 vid) +{ + if (flags & ALE_VLAN) { + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN_ADDR); + cpsw_ale_set_vlan_id(ale_entry, vid); + } else { + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR); + } +} + +int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, + int flags, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; int idx; - cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR); + cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid); + cpsw_ale_set_addr(ale_entry, addr); cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT); cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0); cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0); cpsw_ale_set_port_num(ale_entry, port); - idx = cpsw_ale_match_addr(ale, addr); + idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); if (idx < 0) idx = cpsw_ale_match_free(ale); if (idx < 0) @@ -298,12 +329,13 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags) return 0; } -int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port) +int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, + int flags, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; int idx; - idx = cpsw_ale_match_addr(ale, addr); + idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); if (idx < 0) return -ENOENT; @@ -313,18 +345,19 @@ int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port) } int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, - int super, int mcast_state) + int flags, u16 vid, int mcast_state) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; int idx, mask; - idx = cpsw_ale_match_addr(ale, addr); + idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); if (idx >= 0) cpsw_ale_read(ale, idx, ale_entry); - cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR); + cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid); + cpsw_ale_set_addr(ale_entry, addr); - cpsw_ale_set_super(ale_entry, super); + cpsw_ale_set_super(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0); cpsw_ale_set_mcast_state(ale_entry, mcast_state); mask = cpsw_ale_get_port_mask(ale_entry); @@ -342,12 +375,13 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, return 0; } -int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask) +int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, + int flags, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; int idx; - idx = cpsw_ale_match_addr(ale, addr); + idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); if (idx < 0) return -EINVAL; @@ -362,6 +396,55 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask) return 0; } +int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, + int reg_mcast, int unreg_mcast) +{ + u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; + int idx; + + idx = cpsw_ale_match_vlan(ale, vid); + if (idx >= 0) + cpsw_ale_read(ale, idx, ale_entry); + + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN); + cpsw_ale_set_vlan_id(ale_entry, vid); + + cpsw_ale_set_vlan_untag_force(ale_entry, untag); + cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast); + cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast); + cpsw_ale_set_vlan_member_list(ale_entry, port); + + if (idx < 0) + idx = cpsw_ale_match_free(ale); + if (idx < 0) + idx = cpsw_ale_find_ageable(ale); + if (idx < 0) + return -ENOMEM; + + cpsw_ale_write(ale, idx, ale_entry); + return 0; +} + +int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) +{ + u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; + int idx; + + idx = cpsw_ale_match_vlan(ale, vid); + if (idx < 0) + return -ENOENT; + + cpsw_ale_read(ale, idx, ale_entry); + + if (port_mask) + cpsw_ale_set_vlan_member_list(ale_entry, port_mask); + else + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); + + cpsw_ale_write(ale, idx, ale_entry); + return 0; +} + struct ale_control_info { const char *name; int offset, port_offset; diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index 2bd09cb..30daa12 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h @@ -64,8 +64,14 @@ enum cpsw_ale_port_state { }; /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */ -#define ALE_SECURE 1 -#define ALE_BLOCKED 2 +#define ALE_SECURE BIT(0) +#define ALE_BLOCKED BIT(1) +#define ALE_SUPER BIT(2) +#define ALE_VLAN BIT(3) + +#define ALE_PORT_HOST BIT(0) +#define ALE_PORT_1 BIT(1) +#define ALE_PORT_2 BIT(2) #define ALE_MCAST_FWD 0 #define ALE_MCAST_BLOCK_LEARN_FWD 1 @@ -81,11 +87,17 @@ void cpsw_ale_stop(struct cpsw_ale *ale); int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout); int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask); int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask); -int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags); -int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port); +int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, + int flags, u16 vid); +int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, + int flags, u16 vid); int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, - int super, int mcast_state); -int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask); + int flags, u16 vid, int mcast_state); +int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, + int flags, u16 vid); +int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, + int reg_mcast, int unreg_mcast); +int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port); int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control); int cpsw_ale_control_set(struct cpsw_ale *ale, int port, diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 4995673..49dfd59 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -20,6 +20,7 @@ #include <linux/err.h> #include <linux/dma-mapping.h> #include <linux/io.h> +#include <linux/delay.h> #include "davinci_cpdma.h" @@ -60,6 +61,9 @@ #define CPDMA_DESC_EOQ BIT(28) #define CPDMA_DESC_TD_COMPLETE BIT(27) #define CPDMA_DESC_PASS_CRC BIT(26) +#define CPDMA_DESC_TO_PORT_EN BIT(20) +#define CPDMA_TO_PORT_SHIFT 16 +#define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16)) #define CPDMA_TEARDOWN_VALUE 0xfffffffc @@ -105,13 +109,13 @@ struct cpdma_ctlr { }; struct cpdma_chan { + struct cpdma_desc __iomem *head, *tail; + void __iomem *hdp, *cp, *rxfree; enum cpdma_state state; struct cpdma_ctlr *ctlr; int chan_num; spinlock_t lock; - struct cpdma_desc __iomem *head, *tail; int count; - void __iomem *hdp, *cp, *rxfree; u32 mask; cpdma_handler_fn handler; enum dma_data_direction dir; @@ -132,6 +136,14 @@ struct cpdma_chan { #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld) #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld) +#define cpdma_desc_to_port(chan, mode, directed) \ + do { \ + if (!is_rx_chan(chan) && ((directed == 1) || \ + (directed == 2))) \ + mode |= (CPDMA_DESC_TO_PORT_EN | \ + (directed << CPDMA_TO_PORT_SHIFT)); \ + } while (0) + /* * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci * emac) have dedicated on-chip memory for these descriptors. Some other @@ -217,17 +229,27 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) } static struct cpdma_desc __iomem * -cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc) +cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx) { unsigned long flags; int index; + int desc_start; + int desc_end; struct cpdma_desc __iomem *desc = NULL; spin_lock_irqsave(&pool->lock, flags); - index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0, - num_desc, 0); - if (index < pool->num_desc) { + if (is_rx) { + desc_start = 0; + desc_end = pool->num_desc/2; + } else { + desc_start = pool->num_desc/2; + desc_end = pool->num_desc; + } + + index = bitmap_find_next_zero_area(pool->bitmap, + desc_end, desc_start, num_desc, 0); + if (index < desc_end) { bitmap_set(pool->bitmap, index, num_desc); desc = pool->iomap + pool->desc_size * index; pool->used_desc++; @@ -291,14 +313,16 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) } if (ctlr->params.has_soft_reset) { - unsigned long timeout = jiffies + HZ/10; + unsigned timeout = 10 * 100; dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); - while (time_before(jiffies, timeout)) { + while (timeout) { if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) break; + udelay(10); + timeout--; } - WARN_ON(!time_before(jiffies, timeout)); + WARN_ON(!timeout); } for (i = 0; i < ctlr->num_chan; i++) { @@ -439,10 +463,8 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) if (ctlr->state != CPDMA_STATE_IDLE) cpdma_ctlr_stop(ctlr); - for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { - if (ctlr->channels[i]) - cpdma_chan_destroy(ctlr->channels[i]); - } + for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) + cpdma_chan_destroy(ctlr->channels[i]); cpdma_desc_pool_destroy(ctlr->pool); spin_unlock_irqrestore(&ctlr->lock, flags); @@ -473,11 +495,13 @@ int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } +EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl); -void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr) +void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) { - dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0); + dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value); } +EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi); struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, cpdma_handler_fn handler) @@ -652,7 +676,7 @@ static void __cpdma_chan_submit(struct cpdma_chan *chan, } int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, - int len, gfp_t gfp_mask) + int len, int directed) { struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc __iomem *desc; @@ -668,7 +692,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, goto unlock_ret; } - desc = cpdma_desc_alloc(ctlr->pool, 1); + desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan)); if (!desc) { chan->stats.desc_alloc_fail++; ret = -ENOMEM; @@ -682,6 +706,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, buffer = dma_map_single(ctlr->dev, data, len, chan->dir); mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; + cpdma_desc_to_port(chan, mode, directed); desc_write(desc, hw_next, 0); desc_write(desc, hw_buffer, buffer); @@ -704,6 +729,29 @@ unlock_ret: } EXPORT_SYMBOL_GPL(cpdma_chan_submit); +bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) +{ + unsigned long flags; + int index; + bool ret; + struct cpdma_ctlr *ctlr = chan->ctlr; + struct cpdma_desc_pool *pool = ctlr->pool; + + spin_lock_irqsave(&pool->lock, flags); + + index = bitmap_find_next_zero_area(pool->bitmap, + pool->num_desc, pool->num_desc/2, 1, 0); + + if (index < pool->num_desc) + ret = true; + else + ret = false; + + spin_unlock_irqrestore(&pool->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); + static void __cpdma_chan_free(struct cpdma_chan *chan, struct cpdma_desc __iomem *desc, int outlen, int status) @@ -728,6 +776,7 @@ static int __cpdma_chan_process(struct cpdma_chan *chan) struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc __iomem *desc; int status, outlen; + int cb_status = 0; struct cpdma_desc_pool *pool = ctlr->pool; dma_addr_t desc_dma; unsigned long flags; @@ -749,7 +798,8 @@ static int __cpdma_chan_process(struct cpdma_chan *chan) status = -EBUSY; goto unlock_ret; } - status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE); + status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE | + CPDMA_DESC_PORT_MASK); chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); chan_write(chan, cp, desc_dma); @@ -762,8 +812,12 @@ static int __cpdma_chan_process(struct cpdma_chan *chan) } spin_unlock_irqrestore(&chan->lock, flags); + if (unlikely(status & CPDMA_DESC_TD_COMPLETE)) + cb_status = -ENOSYS; + else + cb_status = status; - __cpdma_chan_free(chan, desc, outlen, status); + __cpdma_chan_free(chan, desc, outlen, cb_status); return status; unlock_ret: @@ -822,7 +876,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan) struct cpdma_desc_pool *pool = ctlr->pool; unsigned long flags; int ret; - unsigned long timeout; + unsigned timeout; spin_lock_irqsave(&chan->lock, flags); if (chan->state != CPDMA_STATE_ACTIVE) { @@ -837,14 +891,15 @@ int cpdma_chan_stop(struct cpdma_chan *chan) dma_reg_write(ctlr, chan->td, chan_linear(chan)); /* wait for teardown complete */ - timeout = jiffies + HZ/10; /* 100 msec */ - while (time_before(jiffies, timeout)) { + timeout = 100 * 100; /* 100 ms */ + while (timeout) { u32 cp = chan_read(chan, cp); if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE) break; - cpu_relax(); + udelay(10); + timeout--; } - WARN_ON(!time_before(jiffies, timeout)); + WARN_ON(!timeout); chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); /* handle completed packets */ @@ -984,3 +1039,6 @@ unlock_ret: spin_unlock_irqrestore(&ctlr->lock, flags); return ret; } +EXPORT_SYMBOL_GPL(cpdma_control_set); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h index afa19a0..86dee48 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.h +++ b/drivers/net/ethernet/ti/davinci_cpdma.h @@ -24,6 +24,13 @@ #define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1)) #define chan_linear(chan) __chan_linear((chan)->chan_num) +#define CPDMA_RX_SOURCE_PORT(__status__) ((__status__ >> 16) & 0x7) + +#define CPDMA_EOI_RX_THRESH 0x0 +#define CPDMA_EOI_RX 0x1 +#define CPDMA_EOI_TX 0x2 +#define CPDMA_EOI_MISC 0x3 + struct cpdma_params { struct device *dev; void __iomem *dmaregs; @@ -82,12 +89,13 @@ int cpdma_chan_dump(struct cpdma_chan *chan); int cpdma_chan_get_stats(struct cpdma_chan *chan, struct cpdma_chan_stats *stats); int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, - int len, gfp_t gfp_mask); + int len, int directed); int cpdma_chan_process(struct cpdma_chan *chan, int quota); int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable); -void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr); +void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value); int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable); +bool cpdma_check_free_tx_desc(struct cpdma_chan *chan); enum cpdma_control { CPDMA_CMD_IDLE, /* write-only */ diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index f52d8be..5aa9e4d 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -120,7 +120,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; #define EMAC_DEF_TX_CH (0) /* Default 0th channel */ #define EMAC_DEF_RX_CH (0) /* Default 0th channel */ #define EMAC_DEF_RX_NUM_DESC (128) -#define EMAC_DEF_TX_NUM_DESC (128) #define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */ #define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */ #define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */ @@ -342,7 +341,6 @@ struct emac_priv { u32 mac_hash2; u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS]; u32 rx_addr_type; - atomic_t cur_tx; const char *phy_id; #ifdef CONFIG_OF struct device_node *phy_node; @@ -1039,7 +1037,7 @@ static void emac_rx_handler(void *token, int len, int status) recycle: ret = cpdma_chan_submit(priv->rxchan, skb, skb->data, - skb_tailroom(skb), GFP_KERNEL); + skb_tailroom(skb), 0); WARN_ON(ret == -ENOMEM); if (unlikely(ret < 0)) @@ -1050,10 +1048,10 @@ static void emac_tx_handler(void *token, int len, int status) { struct sk_buff *skb = token; struct net_device *ndev = skb->dev; - struct emac_priv *priv = netdev_priv(ndev); - - atomic_dec(&priv->cur_tx); + /* Check whether the queue is stopped due to stalled tx dma, if the + * queue is stopped then start the queue as we have free desc for tx + */ if (unlikely(netif_queue_stopped(ndev))) netif_wake_queue(ndev); ndev->stats.tx_packets++; @@ -1094,14 +1092,17 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev) skb_tx_timestamp(skb); ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len, - GFP_KERNEL); + 0); if (unlikely(ret_code != 0)) { if (netif_msg_tx_err(priv) && net_ratelimit()) dev_err(emac_dev, "DaVinci EMAC: desc submit failed"); goto fail_tx; } - if (atomic_inc_return(&priv->cur_tx) >= EMAC_DEF_TX_NUM_DESC) + /* If there is no more tx desc left free then we need to + * tell the kernel to stop sending us tx frames. + */ + if (unlikely(!cpdma_check_free_tx_desc(priv->txchan))) netif_stop_queue(ndev); return NETDEV_TX_OK; @@ -1264,7 +1265,6 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr) /* Store mac addr in priv and rx channel and set it in EMAC hw */ memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len); memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len); - ndev->addr_assign_type &= ~NET_ADDR_RANDOM; /* MAC address is configured only after the interface is enabled. */ if (netif_running(ndev)) { @@ -1438,7 +1438,7 @@ static int emac_poll(struct napi_struct *napi, int budget) * Polled functionality used by netconsole and others in non interrupt mode * */ -void emac_poll_controller(struct net_device *ndev) +static void emac_poll_controller(struct net_device *ndev) { struct emac_priv *priv = netdev_priv(ndev); @@ -1558,7 +1558,7 @@ static int emac_dev_open(struct net_device *ndev) break; ret = cpdma_chan_submit(priv->rxchan, skb, skb->data, - skb_tailroom(skb), GFP_KERNEL); + skb_tailroom(skb), 0); if (WARN_ON(ret < 0)) break; } @@ -1865,21 +1865,18 @@ static int davinci_emac_probe(struct platform_device *pdev) /* obtain emac clock from kernel */ - emac_clk = clk_get(&pdev->dev, NULL); + emac_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(emac_clk)) { dev_err(&pdev->dev, "failed to get EMAC clock\n"); return -EBUSY; } emac_bus_frequency = clk_get_rate(emac_clk); - clk_put(emac_clk); /* TODO: Probe PHY here if possible */ ndev = alloc_etherdev(sizeof(struct emac_priv)); - if (!ndev) { - rc = -ENOMEM; - goto no_ndev; - } + if (!ndev) + return -ENOMEM; platform_set_drvdata(pdev, ndev); priv = netdev_priv(ndev); @@ -1893,7 +1890,7 @@ static int davinci_emac_probe(struct platform_device *pdev) if (!pdata) { dev_err(&pdev->dev, "no platform data\n"); rc = -ENODEV; - goto probe_quit; + goto no_pdata; } /* MAC addr and PHY mask , RMII enable info from platform_data */ @@ -1913,23 +1910,23 @@ static int davinci_emac_probe(struct platform_device *pdev) if (!res) { dev_err(&pdev->dev,"error getting res\n"); rc = -ENOENT; - goto probe_quit; + goto no_pdata; } priv->emac_base_phys = res->start + pdata->ctrl_reg_offset; size = resource_size(res); - if (!request_mem_region(res->start, size, ndev->name)) { + if (!devm_request_mem_region(&pdev->dev, res->start, + size, ndev->name)) { dev_err(&pdev->dev, "failed request_mem_region() for regs\n"); rc = -ENXIO; - goto probe_quit; + goto no_pdata; } - priv->remap_addr = ioremap(res->start, size); + priv->remap_addr = devm_ioremap(&pdev->dev, res->start, size); if (!priv->remap_addr) { dev_err(&pdev->dev, "unable to map IO\n"); rc = -ENOMEM; - release_mem_region(res->start, size); - goto probe_quit; + goto no_pdata; } priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset; ndev->base_addr = (unsigned long)priv->remap_addr; @@ -1962,7 +1959,7 @@ static int davinci_emac_probe(struct platform_device *pdev) if (!priv->dma) { dev_err(&pdev->dev, "error initializing DMA\n"); rc = -ENOMEM; - goto no_dma; + goto no_pdata; } priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH), @@ -1971,14 +1968,14 @@ static int davinci_emac_probe(struct platform_device *pdev) emac_rx_handler); if (WARN_ON(!priv->txchan || !priv->rxchan)) { rc = -ENOMEM; - goto no_irq_res; + goto no_cpdma_chan; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(&pdev->dev, "error getting irq res\n"); rc = -ENOENT; - goto no_irq_res; + goto no_cpdma_chan; } ndev->irq = res->start; @@ -2000,7 +1997,7 @@ static int davinci_emac_probe(struct platform_device *pdev) if (rc) { dev_err(&pdev->dev, "error in register_netdev\n"); rc = -ENODEV; - goto no_irq_res; + goto no_cpdma_chan; } @@ -2015,20 +2012,14 @@ static int davinci_emac_probe(struct platform_device *pdev) return 0; -no_irq_res: +no_cpdma_chan: if (priv->txchan) cpdma_chan_destroy(priv->txchan); if (priv->rxchan) cpdma_chan_destroy(priv->rxchan); cpdma_ctlr_destroy(priv->dma); -no_dma: - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, resource_size(res)); - iounmap(priv->remap_addr); - -probe_quit: +no_pdata: free_netdev(ndev); -no_ndev: return rc; } @@ -2041,14 +2032,12 @@ no_ndev: */ static int davinci_emac_remove(struct platform_device *pdev) { - struct resource *res; struct net_device *ndev = platform_get_drvdata(pdev); struct emac_priv *priv = netdev_priv(ndev); dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n"); platform_set_drvdata(pdev, NULL); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (priv->txchan) cpdma_chan_destroy(priv->txchan); @@ -2056,10 +2045,7 @@ static int davinci_emac_remove(struct platform_device *pdev) cpdma_chan_destroy(priv->rxchan); cpdma_ctlr_destroy(priv->dma); - release_mem_region(res->start, resource_size(res)); - unregister_netdev(ndev); - iounmap(priv->remap_addr); free_netdev(ndev); return 0; diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index cca2550..12aec17 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -320,10 +320,8 @@ static int davinci_mdio_probe(struct platform_device *pdev) int ret, addr; data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) { - dev_err(dev, "failed to alloc device data\n"); + if (!data) return -ENOMEM; - } data->bus = mdiobus_alloc(); if (!data->bus) { @@ -487,6 +485,7 @@ static const struct of_device_id davinci_mdio_of_mtable[] = { { .compatible = "ti,davinci_mdio", }, { /* sentinel */ }, }; +MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable); static struct platform_driver davinci_mdio_driver = { .driver = { diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 2272538..60c400f 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -320,6 +320,7 @@ static void tlan_remove_one(struct pci_dev *pdev) free_netdev(dev); pci_set_drvdata(pdev, NULL); + cancel_work_sync(&priv->tlan_tqueue); } static void tlan_start(struct net_device *dev) @@ -1911,10 +1912,8 @@ static void tlan_reset_lists(struct net_device *dev) list->frame_size = TLAN_MAX_FRAME_SIZE; list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5); - if (!skb) { - netdev_err(dev, "Out of memory for received data\n"); + if (!skb) break; - } list->buffer[0].address = pci_map_single(priv->pci_dev, skb->data, diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h index 6e1915a..c00c13a 100644 --- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h @@ -519,7 +519,7 @@ static const u32 ar9580_1p0_mac_core[][2] = { {0x00008258, 0x00000000}, {0x0000825c, 0x40000000}, {0x00008260, 0x00080922}, - {0x00008264, 0x9bc00010}, + {0x00008264, 0x9d400010}, {0x00008268, 0xffffffff}, {0x0000826c, 0x0000ffff}, {0x00008270, 0x00000000}, diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 05d5ba6..0663653 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -796,7 +796,7 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv) * required version. */ if (priv->fw_version_major != MAJOR_VERSION_REQ || - priv->fw_version_minor != MINOR_VERSION_REQ) { + priv->fw_version_minor < MINOR_VERSION_REQ) { dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n", MAJOR_VERSION_REQ, MINOR_VERSION_REQ); return -EINVAL; diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c index e8486c1..b70f220 100644 --- a/drivers/net/wireless/b43/phy_n.c +++ b/drivers/net/wireless/b43/phy_n.c @@ -5165,7 +5165,8 @@ static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid) #endif #ifdef CONFIG_B43_SSB case B43_BUS_SSB: - /* FIXME */ + ssb_pmu_spuravoid_pllupdate(&dev->dev->sdev->bus->chipco, + avoid); break; #endif } diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c index a43415a..bc75528 100644 --- a/drivers/ssb/driver_chipcommon_pmu.c +++ b/drivers/ssb/driver_chipcommon_pmu.c @@ -675,3 +675,32 @@ u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc) return 0; } } + +void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid) +{ + u32 pmu_ctl = 0; + + switch (cc->dev->bus->chip_id) { + case 0x4322: + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL0, 0x11100070); + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL1, 0x1014140a); + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL5, 0x88888854); + if (spuravoid == 1) + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05201828); + else + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05001828); + pmu_ctl = SSB_CHIPCO_PMU_CTL_PLL_UPD; + break; + case 43222: + /* TODO: BCM43222 requires updating PLLs too */ + return; + default: + ssb_printk(KERN_ERR PFX + "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n", + cc->dev->bus->chip_id); + return; + } + + chipco_set32(cc, SSB_CHIPCO_PMU_CTL, pmu_ctl); +} +EXPORT_SYMBOL_GPL(ssb_pmu_spuravoid_pllupdate); diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index dc61c12..0a49456 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c @@ -1373,15 +1373,12 @@ fb_mmap(struct file *file, struct vm_area_struct * vma) { struct fb_info *info = file_fb_info(file); struct fb_ops *fb; - unsigned long off; + unsigned long mmio_pgoff; unsigned long start; u32 len; if (!info) return -ENODEV; - if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) - return -EINVAL; - off = vma->vm_pgoff << PAGE_SHIFT; fb = info->fbops; if (!fb) return -ENODEV; @@ -1393,32 +1390,24 @@ fb_mmap(struct file *file, struct vm_area_struct * vma) return res; } - /* frame buffer memory */ + /* + * Ugh. This can be either the frame buffer mapping, or + * if pgoff points past it, the mmio mapping. + */ start = info->fix.smem_start; - len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len); - if (off >= len) { - /* memory mapped io */ - off -= len; - if (info->var.accel_flags) { - mutex_unlock(&info->mm_lock); - return -EINVAL; - } + len = info->fix.smem_len; + mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT; + if (vma->vm_pgoff >= mmio_pgoff) { + vma->vm_pgoff -= mmio_pgoff; start = info->fix.mmio_start; - len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len); + len = info->fix.mmio_len; } mutex_unlock(&info->mm_lock); - start &= PAGE_MASK; - if ((vma->vm_end - vma->vm_start + off) > len) - return -EINVAL; - off += start; - vma->vm_pgoff = off >> PAGE_SHIFT; - /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by io_remap_pfn_range()*/ + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - fb_pgprotect(file, vma, off); - if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, - vma->vm_end - vma->vm_start, vma->vm_page_prot)) - return -EAGAIN; - return 0; + fb_pgprotect(file, vma, start); + + return vm_iomap_memory(vma, start, len); } static int diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 0c42cdb..5843a47 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1132,6 +1132,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma, goto whole; if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE)) goto whole; + return 0; } /* Do not dump I/O mapped devices or special mappings */ diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 744a69b..8a00e2f 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -318,6 +318,7 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, unsigned long src_ptr; unsigned long dst_ptr; int overwrite_root = 0; + bool inode_item = key->type == BTRFS_INODE_ITEM_KEY; if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) overwrite_root = 1; @@ -327,6 +328,9 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, /* look for the key in the destination tree */ ret = btrfs_search_slot(NULL, root, key, path, 0, 0); + if (ret < 0) + return ret; + if (ret == 0) { char *src_copy; char *dst_copy; @@ -368,6 +372,30 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, return 0; } + /* + * We need to load the old nbytes into the inode so when we + * replay the extents we've logged we get the right nbytes. + */ + if (inode_item) { + struct btrfs_inode_item *item; + u64 nbytes; + + item = btrfs_item_ptr(path->nodes[0], path->slots[0], + struct btrfs_inode_item); + nbytes = btrfs_inode_nbytes(path->nodes[0], item); + item = btrfs_item_ptr(eb, slot, + struct btrfs_inode_item); + btrfs_set_inode_nbytes(eb, item, nbytes); + } + } else if (inode_item) { + struct btrfs_inode_item *item; + + /* + * New inode, set nbytes to 0 so that the nbytes comes out + * properly when we replay the extents. + */ + item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); + btrfs_set_inode_nbytes(eb, item, 0); } insert: btrfs_release_path(path); @@ -488,7 +516,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, u64 mask = root->sectorsize - 1; u64 extent_end; u64 start = key->offset; - u64 saved_nbytes; + u64 nbytes = 0; struct btrfs_file_extent_item *item; struct inode *inode = NULL; unsigned long size; @@ -498,10 +526,19 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, found_type = btrfs_file_extent_type(eb, item); if (found_type == BTRFS_FILE_EXTENT_REG || - found_type == BTRFS_FILE_EXTENT_PREALLOC) - extent_end = start + btrfs_file_extent_num_bytes(eb, item); - else if (found_type == BTRFS_FILE_EXTENT_INLINE) { + found_type == BTRFS_FILE_EXTENT_PREALLOC) { + nbytes = btrfs_file_extent_num_bytes(eb, item); + extent_end = start + nbytes; + + /* + * We don't add to the inodes nbytes if we are prealloc or a + * hole. + */ + if (btrfs_file_extent_disk_bytenr(eb, item) == 0) + nbytes = 0; + } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { size = btrfs_file_extent_inline_len(eb, item); + nbytes = btrfs_file_extent_ram_bytes(eb, item); extent_end = (start + size + mask) & ~mask; } else { ret = 0; @@ -550,7 +587,6 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, } btrfs_release_path(path); - saved_nbytes = inode_get_bytes(inode); /* drop any overlapping extents */ ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1); BUG_ON(ret); @@ -637,7 +673,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, BUG_ON(ret); } - inode_set_bytes(inode, saved_nbytes); + inode_add_bytes(inode, nbytes); ret = btrfs_update_inode(trans, root, inode); out: if (inode) diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 5a0c0b6..c84696c 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -796,13 +796,13 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie) _enter(""); - do { - spin_lock(&cookie->stores_lock); + spin_lock(&cookie->stores_lock); + while (1) { n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, - ARRAY_SIZE(results), - FSCACHE_COOKIE_PENDING_TAG); - if (n == 0) - break; + ARRAY_SIZE(results), + FSCACHE_COOKIE_PENDING_TAG); + if (n == 0) + break; for (i = n - 1; i >= 0; i--) { page = results[i]; radix_tree_delete(&cookie->stores, page->index); @@ -812,7 +812,8 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie) for (i = n - 1; i >= 0; i--) page_cache_release(results[i]); - } while (1); + spin_lock(&cookie->stores_lock); + } spin_unlock(&cookie->stores_lock); _leave(""); diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c index eba76ea..fc8ddc1 100644 --- a/fs/hfsplus/extents.c +++ b/fs/hfsplus/extents.c @@ -533,7 +533,7 @@ void hfsplus_file_truncate(struct inode *inode) struct address_space *mapping = inode->i_mapping; struct page *page; void *fsdata; - u32 size = inode->i_size; + loff_t size = inode->i_size; res = pagecache_write_begin(NULL, mapping, size, 0, AOP_FLAG_UNINTERRUPTIBLE, diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 78bde32..ccee8cc 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -110,7 +110,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) * way when do_mmap_pgoff unwinds (may be important on powerpc * and ia64). */ - vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; vma->vm_ops = &hugetlb_vm_ops; if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) diff --git a/fs/proc/array.c b/fs/proc/array.c index 6a91e6f..be3c22f 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -143,6 +143,7 @@ static const char * const task_state_array[] = { "x (dead)", /* 64 */ "K (wakekill)", /* 128 */ "W (waking)", /* 256 */ + "P (parked)", /* 512 */ }; static inline const char *get_task_state(struct task_struct *tsk) diff --git a/include/linux/capability.h b/include/linux/capability.h index 98503b7..d9a4f7f 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -35,6 +35,7 @@ struct cpu_vfs_cap_data { #define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t)) +struct file; struct inode; struct dentry; struct user_namespace; @@ -211,6 +212,7 @@ extern bool capable(int cap); extern bool ns_capable(struct user_namespace *ns, int cap); extern bool nsown_capable(int cap); extern bool inode_capable(const struct inode *inode, int cap); +extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); /* audit system wants to get cap info from files as well */ extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 6acda9f..226d10b 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -525,7 +525,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len); int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, - gpa_t gpa); + gpa_t gpa, unsigned long len); int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index fa7cc72..b0bcce0 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -71,6 +71,7 @@ struct gfn_to_hva_cache { u64 generation; gpa_t gpa; unsigned long hva; + unsigned long len; struct kvm_memory_slot *memslot; }; diff --git a/include/linux/mm.h b/include/linux/mm.h index f499754..e3b3a15 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1655,6 +1655,8 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); +int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); + struct page *follow_page(struct vm_area_struct *, unsigned long address, unsigned int foll_flags); diff --git a/include/linux/platform_data/cpsw.h b/include/linux/platform_data/cpsw.h index 24368a2..bb3cd58 100644 --- a/include/linux/platform_data/cpsw.h +++ b/include/linux/platform_data/cpsw.h @@ -21,6 +21,8 @@ struct cpsw_slave_data { char phy_id[MII_BUS_ID_SIZE]; int phy_if; u8 mac_addr[ETH_ALEN]; + u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */ + }; struct cpsw_platform_data { @@ -28,13 +30,15 @@ struct cpsw_platform_data { u32 channels; /* number of cpdma channels (symmetric) */ u32 slaves; /* number of slave cpgmac ports */ struct cpsw_slave_data *slave_data; - u32 cpts_active_slave; /* time stamping slave */ + u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */ u32 cpts_clock_mult; /* convert input clock ticks to nanoseconds */ u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */ u32 ale_entries; /* ale table size */ u32 bd_ram_size; /*buffer descriptor ram size */ u32 rx_descs; /* Number of Rx Descriptios */ u32 mac_control; /* Mac control register */ + u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/ + bool dual_emac; /* Enable Dual EMAC mode */ }; #endif /* __CPSW_H__ */ diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 9a569c9..a7f4212 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -85,7 +85,7 @@ do { \ #ifndef CONFIG_PREEMPT_RT_BASE # define preempt_enable_no_resched() sched_preempt_enable_no_resched() -# define preempt_check_resched_rt() do { } while (0) +# define preempt_check_resched_rt() barrier() #else # define preempt_enable_no_resched() preempt_enable() # define preempt_check_resched_rt() preempt_check_resched() @@ -149,7 +149,7 @@ do { \ #define preempt_disable_notrace() barrier() #define preempt_enable_no_resched_notrace() barrier() #define preempt_enable_notrace() barrier() -#define preempt_check_resched_rt() do { } while (0) +#define preempt_check_resched_rt() barrier() #endif /* CONFIG_PREEMPT_COUNT */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 913ec05..8c4837b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -165,9 +165,10 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) #define TASK_DEAD 64 #define TASK_WAKEKILL 128 #define TASK_WAKING 256 -#define TASK_STATE_MAX 512 +#define TASK_PARKED 512 +#define TASK_STATE_MAX 1024 -#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW" +#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP" extern char ___assert_task_state[1 - 2*!!( sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; diff --git a/include/linux/spinlock_types_rt.h b/include/linux/spinlock_types_rt.h index 1fe8fc0..9fd4319 100644 --- a/include/linux/spinlock_types_rt.h +++ b/include/linux/spinlock_types_rt.h @@ -5,6 +5,8 @@ #error "Do not include directly. Include spinlock_types.h instead" #endif +#include <linux/cache.h> + /* * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field: */ diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h index 9e492be..6fcfe99 100644 --- a/include/linux/ssb/ssb_driver_chipcommon.h +++ b/include/linux/ssb/ssb_driver_chipcommon.h @@ -219,6 +219,7 @@ #define SSB_CHIPCO_PMU_CTL 0x0600 /* PMU control */ #define SSB_CHIPCO_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */ #define SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT 16 +#define SSB_CHIPCO_PMU_CTL_PLL_UPD 0x00000400 #define SSB_CHIPCO_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */ #define SSB_CHIPCO_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */ #define SSB_CHIPCO_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */ @@ -667,5 +668,6 @@ enum ssb_pmu_ldo_volt_id { void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc, enum ssb_pmu_ldo_volt_id id, u32 voltage); void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on); +void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid); #endif /* LINUX_SSB_CHIPCO_H_ */ diff --git a/include/net/tcp.h b/include/net/tcp.h index aed42c7..4da2167 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1045,6 +1045,7 @@ static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) if (sysctl_tcp_low_latency || !tp->ucopy.task) return false; + skb_dst_force(skb); __skb_queue_tail(&tp->ucopy.prequeue, skb); tp->ucopy.memory += skb->truesize; if (tp->ucopy.memory > sk->sk_rcvbuf) { diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 5a8671e..e5586ca 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -147,7 +147,7 @@ TRACE_EVENT(sched_switch, __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|", { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" }, { 16, "Z" }, { 32, "X" }, { 64, "x" }, - { 128, "W" }) : "R", + { 128, "K" }, { 256, "W" }, { 512, "P" }) : "R", __entry->prev_state & TASK_STATE_MAX ? "+" : "", __entry->next_comm, __entry->next_pid, __entry->next_prio) ); diff --git a/kernel/capability.c b/kernel/capability.c index 493d972..f6c2ce5 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -393,6 +393,30 @@ bool ns_capable(struct user_namespace *ns, int cap) EXPORT_SYMBOL(ns_capable); /** + * file_ns_capable - Determine if the file's opener had a capability in effect + * @file: The file we want to check + * @ns: The usernamespace we want the capability in + * @cap: The capability to be tested for + * + * Return true if task that opened the file had a capability in effect + * when the file was opened. + * + * This does not set PF_SUPERPRIV because the caller may not + * actually be privileged. + */ +bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap) +{ + if (WARN_ON_ONCE(!cap_valid(cap))) + return false; + + if (security_capable(file->f_cred, ns, cap) == 0) + return true; + + return false; +} +EXPORT_SYMBOL(file_ns_capable); + +/** * capable - Determine if the current task has a superior capability in effect * @cap: The capability to be tested for * diff --git a/kernel/events/core.c b/kernel/events/core.c index 02aad6d..45f7b3e 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -5328,7 +5328,7 @@ static void sw_perf_event_destroy(struct perf_event *event) static int perf_swevent_init(struct perf_event *event) { - int event_id = event->attr.config; + u64 event_id = event->attr.config; if (event->attr.type != PERF_TYPE_SOFTWARE) return -ENOENT; diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index b8ba4e3..c2c58f9 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -62,6 +62,7 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = { + .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock), .clock_base = { { @@ -1853,8 +1854,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu) struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); int i; - raw_spin_lock_init(&cpu_base->lock); - for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { cpu_base->clock_base[i].cpu_base = cpu_base; timerqueue_init_head(&cpu_base->clock_base[i].active); diff --git a/kernel/kthread.c b/kernel/kthread.c index 691dc2e..9eb7fed 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -124,12 +124,12 @@ void *kthread_data(struct task_struct *task) static void __kthread_parkme(struct kthread *self) { - __set_current_state(TASK_INTERRUPTIBLE); + __set_current_state(TASK_PARKED); while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) { if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags)) complete(&self->parked); schedule(); - __set_current_state(TASK_INTERRUPTIBLE); + __set_current_state(TASK_PARKED); } clear_bit(KTHREAD_IS_PARKED, &self->flags); __set_current_state(TASK_RUNNING); @@ -256,8 +256,13 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), } EXPORT_SYMBOL(kthread_create_on_node); -static void __kthread_bind(struct task_struct *p, unsigned int cpu) +static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state) { + /* Must have done schedule() in kthread() before we set_task_cpu */ + if (!wait_task_inactive(p, state)) { + WARN_ON(1); + return; + } /* It's safe because the task is inactive. */ do_set_cpus_allowed(p, cpumask_of(cpu)); p->flags |= PF_THREAD_BOUND; @@ -274,12 +279,7 @@ static void __kthread_bind(struct task_struct *p, unsigned int cpu) */ void kthread_bind(struct task_struct *p, unsigned int cpu) { - /* Must have done schedule() in kthread() before we set_task_cpu */ - if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { - WARN_ON(1); - return; - } - __kthread_bind(p, cpu); + __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(kthread_bind); @@ -324,6 +324,22 @@ static struct kthread *task_get_live_kthread(struct task_struct *k) return NULL; } +static void __kthread_unpark(struct task_struct *k, struct kthread *kthread) +{ + clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); + /* + * We clear the IS_PARKED bit here as we don't wait + * until the task has left the park code. So if we'd + * park before that happens we'd see the IS_PARKED bit + * which might be about to be cleared. + */ + if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { + if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) + __kthread_bind(k, kthread->cpu, TASK_PARKED); + wake_up_state(k, TASK_PARKED); + } +} + /** * kthread_unpark - unpark a thread created by kthread_create(). * @k: thread created by kthread_create(). @@ -336,20 +352,8 @@ void kthread_unpark(struct task_struct *k) { struct kthread *kthread = task_get_live_kthread(k); - if (kthread) { - clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); - /* - * We clear the IS_PARKED bit here as we don't wait - * until the task has left the park code. So if we'd - * park before that happens we'd see the IS_PARKED bit - * which might be about to be cleared. - */ - if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { - if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) - __kthread_bind(k, kthread->cpu); - wake_up_process(k); - } - } + if (kthread) + __kthread_unpark(k, kthread); put_task_struct(k); } @@ -407,7 +411,7 @@ int kthread_stop(struct task_struct *k) trace_sched_kthread_stop(k); if (kthread) { set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); - clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); + __kthread_unpark(k, kthread); wake_up_process(k); wait_for_completion(&kthread->exited); } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5d82809..1ec9d1f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1562,8 +1562,10 @@ static void try_to_wake_up_local(struct task_struct *p) { struct rq *rq = task_rq(p); - BUG_ON(rq != this_rq()); - BUG_ON(p == current); + if (WARN_ON_ONCE(rq != this_rq()) || + WARN_ON_ONCE(p == current)) + return; + lockdep_assert_held(&rq->lock); if (!raw_spin_trylock(&p->pi_lock)) { @@ -2883,7 +2885,7 @@ static noinline void __schedule_bug(struct task_struct *prev) print_modules(); if (irqs_disabled()) print_irqtrace_events(prev); -#ifdef DEBUG_PREEMPT +#ifdef CONFIG_DEBUG_PREEMPT if (in_atomic_preempt_off()) { pr_err("Preemption disabled at:"); print_ip_sym(current->preempt_disable_ip); @@ -5324,7 +5326,7 @@ static void sd_free_ctl_entry(struct ctl_table **tablep) } static int min_load_idx = 0; -static int max_load_idx = CPU_LOAD_IDX_MAX; +static int max_load_idx = CPU_LOAD_IDX_MAX-1; static void set_table_entry(struct ctl_table *entry, @@ -7410,7 +7412,7 @@ void __might_sleep(const char *file, int line, int preempt_offset) debug_show_held_locks(current); if (irqs_disabled()) print_irqtrace_events(current); -#ifdef DEBUG_PREEMPT +#ifdef CONFIG_DEBUG_PREEMPT if (!preempt_count_equals(preempt_offset)) { pr_err("Preemption disabled at:"); print_ip_sym(current->preempt_disable_ip); diff --git a/kernel/signal.c b/kernel/signal.c index 750810a..03b6e8f 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2980,7 +2980,7 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) static int do_tkill(pid_t tgid, pid_t pid, int sig) { - struct siginfo info; + struct siginfo info = {}; info.si_signo = sig; info.si_errno = 0; diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index f45e128..f359dc7 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -25,7 +25,8 @@ static struct kmem_cache *user_ns_cachep __read_mostly; -static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid, +static bool new_idmap_permitted(const struct file *file, + struct user_namespace *ns, int cap_setid, struct uid_gid_map *map); static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns) @@ -575,10 +576,10 @@ static ssize_t map_write(struct file *file, const char __user *buf, if (map->nr_extents != 0) goto out; - /* Require the appropriate privilege CAP_SETUID or CAP_SETGID - * over the user namespace in order to set the id mapping. + /* + * Adjusting namespace settings requires capabilities on the target. */ - if (cap_valid(cap_setid) && !ns_capable(ns, cap_setid)) + if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN)) goto out; /* Get a buffer */ @@ -666,7 +667,7 @@ static ssize_t map_write(struct file *file, const char __user *buf, ret = -EPERM; /* Validate the user is allowed to use user id's mapped to. */ - if (!new_idmap_permitted(ns, cap_setid, &new_map)) + if (!new_idmap_permitted(file, ns, cap_setid, &new_map)) goto out; /* Map the lower ids from the parent user namespace to the @@ -753,7 +754,8 @@ ssize_t proc_projid_map_write(struct file *file, const char __user *buf, size_t &ns->projid_map, &ns->parent->projid_map); } -static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid, +static bool new_idmap_permitted(const struct file *file, + struct user_namespace *ns, int cap_setid, struct uid_gid_map *new_map) { /* Allow mapping to your own filesystem ids */ @@ -761,12 +763,12 @@ static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid, u32 id = new_map->extent[0].lower_first; if (cap_setid == CAP_SETUID) { kuid_t uid = make_kuid(ns->parent, id); - if (uid_eq(uid, current_fsuid())) + if (uid_eq(uid, file->f_cred->fsuid)) return true; } else if (cap_setid == CAP_SETGID) { kgid_t gid = make_kgid(ns->parent, id); - if (gid_eq(gid, current_fsgid())) + if (gid_eq(gid, file->f_cred->fsgid)) return true; } } @@ -777,8 +779,10 @@ static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid, /* Allow the specified ids if we have the appropriate capability * (CAP_SETUID or CAP_SETGID) over the parent user namespace. + * And the opener of the id file also had the approprpiate capability. */ - if (ns_capable(ns->parent, cap_setid)) + if (ns_capable(ns->parent, cap_setid) && + file_ns_capable(file, ns->parent, cap_setid)) return true; return false; diff --git a/localversion-rt b/localversion-rt index c3054d0..8fc605d 100644 --- a/localversion-rt +++ b/localversion-rt @@ -1 +1 @@ --rt2 +-rt6 diff --git a/mm/hugetlb.c b/mm/hugetlb.c index d7cec92..88eb939 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2965,7 +2965,17 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, break; } - if (absent || + /* + * We need call hugetlb_fault for both hugepages under migration + * (in which case hugetlb_fault waits for the migration,) and + * hwpoisoned hugepages (in which case we need to prevent the + * caller from accessing to them.) In order to do this, we use + * here is_swap_pte instead of is_hugetlb_entry_migration and + * is_hugetlb_entry_hwpoisoned. This is because it simply covers + * both cases, and because we can't follow correct pages + * directly from any kind of swap entries. + */ + if (absent || is_swap_pte(huge_ptep_get(pte)) || ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) { int ret; diff --git a/mm/memory.c b/mm/memory.c index 2f6e5c7..23b82ee 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2358,6 +2358,53 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, } EXPORT_SYMBOL(remap_pfn_range); +/** + * vm_iomap_memory - remap memory to userspace + * @vma: user vma to map to + * @start: start of area + * @len: size of area + * + * This is a simplified io_remap_pfn_range() for common driver use. The + * driver just needs to give us the physical memory range to be mapped, + * we'll figure out the rest from the vma information. + * + * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get + * whatever write-combining details or similar. + */ +int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) +{ + unsigned long vm_len, pfn, pages; + + /* Check that the physical memory area passed in looks valid */ + if (start + len < start) + return -EINVAL; + /* + * You *really* shouldn't map things that aren't page-aligned, + * but we've historically allowed it because IO memory might + * just have smaller alignment. + */ + len += start & ~PAGE_MASK; + pfn = start >> PAGE_SHIFT; + pages = (len + ~PAGE_MASK) >> PAGE_SHIFT; + if (pfn + pages < pfn) + return -EINVAL; + + /* We start the mapping 'vm_pgoff' pages into the area */ + if (vma->vm_pgoff > pages) + return -EINVAL; + pfn += vma->vm_pgoff; + pages -= vma->vm_pgoff; + + /* Can we fit all of the mapping? */ + vm_len = vma->vm_end - vma->vm_start; + if (vm_len >> PAGE_SHIFT > pages) + return -EINVAL; + + /* Ok, let it rip */ + return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); +} +EXPORT_SYMBOL(vm_iomap_memory); + static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) @@ -42,7 +42,7 @@ static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); -static DEFINE_LOCAL_IRQ_LOCK(swap_lock); +static DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); /* * This path almost never happens for VM activity - pages are normally @@ -407,13 +407,13 @@ static void activate_page_drain(int cpu) void activate_page(struct page *page) { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { - struct pagevec *pvec = &get_locked_var(swap_lock, + struct pagevec *pvec = &get_locked_var(swapvec_lock, activate_page_pvecs); page_cache_get(page); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, __activate_page, NULL); - put_locked_var(swap_lock, activate_page_pvecs); + put_locked_var(swapvec_lock, activate_page_pvecs); } } @@ -461,13 +461,13 @@ EXPORT_SYMBOL(mark_page_accessed); */ void __lru_cache_add(struct page *page, enum lru_list lru) { - struct pagevec *pvec = &get_locked_var(swap_lock, lru_add_pvecs)[lru]; + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvecs)[lru]; page_cache_get(page); if (!pagevec_space(pvec)) __pagevec_lru_add(pvec, lru); pagevec_add(pvec, page); - put_locked_var(swap_lock, lru_add_pvecs); + put_locked_var(swapvec_lock, lru_add_pvecs); } EXPORT_SYMBOL(__lru_cache_add); @@ -632,19 +632,19 @@ void deactivate_page(struct page *page) return; if (likely(get_page_unless_zero(page))) { - struct pagevec *pvec = &get_locked_var(swap_lock, + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_deactivate_pvecs); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); - put_locked_var(swap_lock, lru_deactivate_pvecs); + put_locked_var(swapvec_lock, lru_deactivate_pvecs); } } void lru_add_drain(void) { - lru_add_drain_cpu(local_lock_cpu(swap_lock)); - local_unlock_cpu(swap_lock); + lru_add_drain_cpu(local_lock_cpu(swapvec_lock)); + local_unlock_cpu(swapvec_lock); } static void lru_add_drain_per_cpu(struct work_struct *dummy) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index e14e676..a1a7997 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -3723,8 +3723,16 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, /* prep auth_data so we don't go into idle on disassoc */ ifmgd->auth_data = auth_data; - if (ifmgd->associated) - ieee80211_set_disassoc(sdata, 0, 0, false, NULL); + if (ifmgd->associated) { + u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; + + ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, + WLAN_REASON_UNSPECIFIED, + false, frame_buf); + + __cfg80211_send_deauth(sdata->dev, frame_buf, + sizeof(frame_buf)); + } sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid); @@ -3783,8 +3791,16 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, mutex_lock(&ifmgd->mtx); - if (ifmgd->associated) - ieee80211_set_disassoc(sdata, 0, 0, false, NULL); + if (ifmgd->associated) { + u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; + + ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, + WLAN_REASON_UNSPECIFIED, + false, frame_buf); + + __cfg80211_send_deauth(sdata->dev, frame_buf, + sizeof(frame_buf)); + } if (ifmgd->auth_data && !ifmgd->auth_data->done) { err = -EBUSY; diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 09b4286..f4aaf5a 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -3222,18 +3222,10 @@ EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap); int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_struct *area) { - long size; - unsigned long offset; + struct snd_pcm_runtime *runtime = substream->runtime;; area->vm_page_prot = pgprot_noncached(area->vm_page_prot); - area->vm_flags |= VM_IO; - size = area->vm_end - area->vm_start; - offset = area->vm_pgoff << PAGE_SHIFT; - if (io_remap_pfn_range(area, area->vm_start, - (substream->runtime->dma_addr + offset) >> PAGE_SHIFT, - size, area->vm_page_prot)) - return -EAGAIN; - return 0; + return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes); } EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem); diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index cfb7e4d..52058f0 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c @@ -73,9 +73,12 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; u64 redir_content; - ASSERT(redir_index < IOAPIC_NUM_PINS); + if (redir_index < IOAPIC_NUM_PINS) + redir_content = + ioapic->redirtbl[redir_index].bits; + else + redir_content = ~0ULL; - redir_content = ioapic->redirtbl[redir_index].bits; result = (ioapic->ioregsel & 0x1) ? (redir_content >> 32) & 0xffffffff : redir_content & 0xffffffff; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index bc9152d..60ac292 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1476,21 +1476,38 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, } int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, - gpa_t gpa) + gpa_t gpa, unsigned long len) { struct kvm_memslots *slots = kvm_memslots(kvm); int offset = offset_in_page(gpa); - gfn_t gfn = gpa >> PAGE_SHIFT; + gfn_t start_gfn = gpa >> PAGE_SHIFT; + gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; + gfn_t nr_pages_needed = end_gfn - start_gfn + 1; + gfn_t nr_pages_avail; ghc->gpa = gpa; ghc->generation = slots->generation; - ghc->memslot = gfn_to_memslot(kvm, gfn); - ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL); - if (!kvm_is_error_hva(ghc->hva)) + ghc->len = len; + ghc->memslot = gfn_to_memslot(kvm, start_gfn); + ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail); + if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) { ghc->hva += offset; - else - return -EFAULT; - + } else { + /* + * If the requested region crosses two memslots, we still + * verify that the entire region is valid here. + */ + while (start_gfn <= end_gfn) { + ghc->memslot = gfn_to_memslot(kvm, start_gfn); + ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, + &nr_pages_avail); + if (kvm_is_error_hva(ghc->hva)) + return -EFAULT; + start_gfn += nr_pages_avail; + } + /* Use the slow path for cross page reads and writes. */ + ghc->memslot = NULL; + } return 0; } EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); @@ -1501,8 +1518,13 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, struct kvm_memslots *slots = kvm_memslots(kvm); int r; + BUG_ON(len > ghc->len); + if (slots->generation != ghc->generation) - kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); + kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); + + if (unlikely(!ghc->memslot)) + return kvm_write_guest(kvm, ghc->gpa, data, len); if (kvm_is_error_hva(ghc->hva)) return -EFAULT; @@ -1522,8 +1544,13 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, struct kvm_memslots *slots = kvm_memslots(kvm); int r; + BUG_ON(len > ghc->len); + if (slots->generation != ghc->generation) - kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); + kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); + + if (unlikely(!ghc->memslot)) + return kvm_read_guest(kvm, ghc->gpa, data, len); if (kvm_is_error_hva(ghc->hva)) return -EFAULT; |