From a363dc5d23ec6af6bf1196c04725e40106240d93 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 19 Nov 2013 16:42:47 +0100 Subject: lockdep: Correctly annotate hardirq context in irq_exit() There was a reported deadlock on -rt which lockdep didn't report. It turns out that in irq_exit() we tell lockdep that the hardirq context ends and then do all kinds of locking afterwards. To fix it, move trace_hardirq_exit() to the very end of irq_exit(), this ensures all locking in tick_irq_exit() and rcu_irq_exit() are properly recorded as happening from hardirq context. This however leads to the 'fun' little problem of running softirqs while in hardirq context. To cure this make the softirq code a little more complex (in the CONFIG_TRACE_IRQFLAGS case). Due to stack swizzling arch dependent trickery we cannot pass an argument to __do_softirq() to tell it if it was done from hardirq context or not; so use a side-band argument. When we do __do_softirq() from hardirq context, 'atomically' flip to softirq context and back, so that no locking goes without being in either hard- or soft-irq context. I didn't find any new problems in mainline using this patch, but it did show the -rt problem. Reported-by: Sebastian Andrzej Siewior Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Andrew Morton Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/n/tip-dgwc5cdksbn0jk09vbmcc9sa@git.kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/softirq.c b/kernel/softirq.c index d7d498d..52f6c54 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -209,14 +209,52 @@ EXPORT_SYMBOL(local_bh_enable_ip); #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) #define MAX_SOFTIRQ_RESTART 10 +#ifdef CONFIG_TRACE_IRQFLAGS +/* + * Convoluted means of passing __do_softirq() a message through the various + * architecture execute_on_stack() bits. + * + * When we run softirqs from irq_exit() and thus on the hardirq stack we need + * to keep the lockdep irq context tracking as tight as possible in order to + * not miss-qualify lock contexts and miss possible deadlocks. + */ +static DEFINE_PER_CPU(int, softirq_from_hardirq); + +static inline void lockdep_softirq_from_hardirq(void) +{ + this_cpu_write(softirq_from_hardirq, 1); +} + +static inline void lockdep_softirq_start(void) +{ + if (this_cpu_read(softirq_from_hardirq)) + trace_hardirq_exit(); + lockdep_softirq_enter(); +} + +static inline void lockdep_softirq_end(void) +{ + lockdep_softirq_exit(); + if (this_cpu_read(softirq_from_hardirq)) { + this_cpu_write(softirq_from_hardirq, 0); + trace_hardirq_enter(); + } +} + +#else +static inline void lockdep_softirq_from_hardirq(void) { } +static inline void lockdep_softirq_start(void) { } +static inline void lockdep_softirq_end(void) { } +#endif + asmlinkage void __do_softirq(void) { - struct softirq_action *h; - __u32 pending; unsigned long end = jiffies + MAX_SOFTIRQ_TIME; - int cpu; unsigned long old_flags = current->flags; int max_restart = MAX_SOFTIRQ_RESTART; + struct softirq_action *h; + __u32 pending; + int cpu; /* * Mask out PF_MEMALLOC s current task context is borrowed for the @@ -229,7 +267,7 @@ asmlinkage void __do_softirq(void) account_irq_enter_time(current); __local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET); - lockdep_softirq_enter(); + lockdep_softirq_start(); cpu = smp_processor_id(); restart: @@ -276,8 +314,7 @@ restart: wakeup_softirqd(); } - lockdep_softirq_exit(); - + lockdep_softirq_end(); account_irq_exit_time(current); __local_bh_enable(SOFTIRQ_OFFSET); tsk_restore_flags(current, old_flags, PF_MEMALLOC); @@ -329,6 +366,7 @@ void irq_enter(void) static inline void invoke_softirq(void) { if (!force_irqthreads) { + lockdep_softirq_from_hardirq(); /* * We can safely execute softirq on the current stack if * it is the irq stack, because it should be near empty @@ -368,13 +406,13 @@ void irq_exit(void) #endif account_irq_exit_time(current); - trace_hardirq_exit(); sub_preempt_count(HARDIRQ_OFFSET); if (!in_interrupt() && local_softirq_pending()) invoke_softirq(); tick_irq_exit(); rcu_irq_exit(); + trace_hardirq_exit(); /* must be last! */ } /* -- cgit v0.10.2 From d507c3d471579aaf7b359a955e1bf5d616fd268d Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Thu, 12 Dec 2013 10:15:59 +0100 Subject: net: make neigh_priv_len in struct net_device 16bit instead of 8bit neigh_priv_len is defined as u8. With all debug enabled struct ipoib_neigh has 200 bytes. The largest part is sk_buff_head with 96 bytes and here the spinlock with 72 bytes. The size value still fits in this u8 leaving some room for more. On -RT struct ipoib_neigh put on weight and has 392 bytes. The main reason is sk_buff_head with 288 and the fatty here is spinlock with 192 bytes. This does no longer fit into into neigh_priv_len and gcc complains. This patch changes neigh_priv_len from being 8bit to 16bit. Since the following element (dev_id) is 16bit followed by a spinlock which is aligned, the struct remains with a total size of 3200 (allmodconfig) / 2048 (with as much debug off as possible) bytes on x86-64. On x86-32 the struct is 1856 (allmodconfig) / 1216 (with as much debug off as possible) bytes long. The numbers were gained with and without the patch to prove that this change does not increase the size of the struct. Cc: Steven Rostedt Cc: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: David S. Miller diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 9f2a0cb..e9ff3e5 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1214,7 +1214,7 @@ struct net_device { unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ unsigned char addr_assign_type; /* hw address assignment type */ unsigned char addr_len; /* hardware address length */ - unsigned char neigh_priv_len; + unsigned short neigh_priv_len; unsigned short dev_id; /* Used to differentiate devices * that share the same link * layer address -- cgit v0.10.2 From af8161f6ad494f440905a3660147c9086f789e4d Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 8 Nov 2013 09:03:10 -0800 Subject: rcu: Don't activate RCU core on NO_HZ_FULL CPUs Whenever a CPU receives a scheduling-clock interrupt, RCU checks to see if the RCU core needs anything from this CPU. If so, RCU raises RCU_SOFTIRQ to carry out any needed processing. This approach has worked well historically, but it is undesirable on NO_HZ_FULL CPUs. Such CPUs are expected to spend almost all of their time in userspace, so that scheduling-clock interrupts can be disabled while there is only one runnable task on the CPU in question. Unfortunately, raising any softirq has the potential to wake up ksoftirqd, which would provide the second runnable task on that CPU, preventing disabling of scheduling-clock interrupts. What is needed instead is for RCU to leave NO_HZ_FULL CPUs alone, relying on the grace-period kthreads' quiescent-state forcing to do any needed RCU work on behalf of those CPUs. This commit therefore refrains from raising RCU_SOFTIRQ on any NO_HZ_FULL CPUs during any grace periods that have been in effect for less than one second. The one-second limit handles the case where an inappropriate workload is running on a NO_HZ_FULL CPU that features lots of scheduling-clock interrupts, but no idle or userspace time. Reported-by: Mike Galbraith Signed-off-by: Paul E. McKenney Tested-by: Mike Galbraith Tested-by: Frederic Weisbecker Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 32618b3..ee4de3f 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -2658,6 +2658,10 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) /* Check for CPU stalls, if enabled. */ check_cpu_stall(rsp, rdp); + /* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */ + if (rcu_nohz_full_cpu(rsp)) + return 0; + /* Is the RCU core waiting for a quiescent state from this CPU? */ if (rcu_scheduler_fully_active && rdp->qs_pending && !rdp->passed_quiesce) { diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 52be957..7d71c06 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h @@ -564,6 +564,7 @@ static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle, unsigned long maxj); static void rcu_bind_gp_kthread(void); static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp); +static bool rcu_nohz_full_cpu(struct rcu_state *rsp); #endif /* #ifndef RCU_TREE_NONCORE */ diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 511e6b4..22232f8 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -2803,3 +2803,23 @@ static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp) } #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ + +/* + * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the + * grace-period kthread will do force_quiescent_state() processing? + * The idea is to avoid waking up RCU core processing on such a + * CPU unless the grace period has extended for too long. + * + * This code relies on the fact that all NO_HZ_FULL CPUs are also + * CONFIG_RCU_NOCB_CPUs. + */ +static bool rcu_nohz_full_cpu(struct rcu_state *rsp) +{ +#ifdef CONFIG_NO_HZ_FULL + if (tick_nohz_full_cpu(smp_processor_id()) && + (!rcu_gp_in_progress(rsp) || + ULONG_CMP_LT(jiffies, ACCESS_ONCE(rsp->gp_start) + HZ))) + return 1; +#endif /* #ifdef CONFIG_NO_HZ_FULL */ + return 0; +} -- cgit v0.10.2 From 21bb4fa00e5adac2bd9d79b91ee3b412c09d9a82 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 23 Jul 2011 11:04:08 +0200 Subject: early-printk-consolidate.patch Signed-off-by: Thomas Gleixner diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index 1434526..0884ccd 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c @@ -309,6 +309,7 @@ void __init setup_arch(char **cmdline_p) boot_flags_init(*cmdline_p); + early_console = &prom_early_console; register_console(&prom_early_console); printk("ARCH: "); diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index 3fdb455..4306d44 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -555,6 +555,12 @@ static void __init init_sparc64_elf_hwcap(void) pause_patch(); } +static inline void register_prom_console(void) +{ + early_console = &prom_early_console; + register_console(&prom_early_console); +} + void __init setup_arch(char **cmdline_p) { /* Initialize PROM console and command line. */ @@ -566,7 +572,7 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_EARLYFB if (btext_find_display()) #endif - register_console(&prom_early_console); + register_prom_console(); if (tlb_type == hypervisor) printk("ARCH: SUN4V\n"); -- cgit v0.10.2 From 9d0a22775c0f7ab8a95d431993a61ccc16393578 Mon Sep 17 00:00:00 2001 From: Kirill Tkhai Date: Fri, 30 Aug 2013 21:16:08 +0400 Subject: sparc: provide EARLY_PRINTK for SPARC sparc does not have CONFIG_EARLY_PRINTK option. So early-printk-consolidate.patch breaks compilation: arch/sparc/built-in.o: In function `setup_arch': (.init.text+0x15e4): undefined reference to `early_console' arch/sparc/built-in.o: In function `setup_arch': (.init.text+0x15ec): undefined reference to `early_console' The below addition fixes that. Signed-off-by: Kirill Tkhai Signed-off-by: Sebastian Andrzej Siewior diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 78c4fdb..49d5f09 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -523,6 +523,10 @@ menu "Executable file formats" source "fs/Kconfig.binfmt" +config EARLY_PRINTK + bool + default y + config COMPAT bool depends on SPARC64 -- cgit v0.10.2 From f535e3ce3b36bc29ea2444e77491b9e2be121010 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Fri, 13 Dec 2013 09:44:41 +0530 Subject: sparc64: use generic rwsem spinlocks rt Signed-off-by: Allen Pais Signed-off-by: Sebastian Andrzej Siewior diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 49d5f09..71e5099 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -177,12 +177,10 @@ config NR_CPUS source kernel/Kconfig.hz config RWSEM_GENERIC_SPINLOCK - bool - default y if SPARC32 + def_bool PREEMPT_RT_FULL config RWSEM_XCHGADD_ALGORITHM - bool - default y if SPARC64 + def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL config GENERIC_HWEIGHT bool -- cgit v0.10.2 From e07515fe8eb6d376afaf58c93f002d0b3814cb52 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Fri, 13 Dec 2013 09:44:42 +0530 Subject: sparc64: convert spinlock_t to raw_spinlock_t in mmu_context_t Issue debugged by Thomas Gleixner Signed-off-by: Allen Pais Signed-off-by: Sebastian Andrzej Siewior diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 71e5099..da51da9 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -26,6 +26,7 @@ config SPARC select HAVE_DMA_ATTRS select HAVE_DMA_API_DEBUG select HAVE_ARCH_JUMP_LABEL + select IRQ_FORCED_THREADING select GENERIC_IRQ_SHOW select ARCH_WANT_IPC_PARSE_VERSION select USE_GENERIC_SMP_HELPERS if SMP diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h index 76092c4..e945ddb 100644 --- a/arch/sparc/include/asm/mmu_64.h +++ b/arch/sparc/include/asm/mmu_64.h @@ -90,7 +90,7 @@ struct tsb_config { #endif typedef struct { - spinlock_t lock; + raw_spinlock_t lock; unsigned long sparc64_ctx_val; unsigned long huge_pte_count; struct page *pgtable_page; diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index 3d528f0..3a85624 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -77,7 +77,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str if (unlikely(mm == &init_mm)) return; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); ctx_valid = CTX_VALID(mm->context); if (!ctx_valid) get_new_mmu_context(mm); @@ -125,7 +125,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); } - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } #define deactivate_mm(tsk,mm) do { } while (0) @@ -136,7 +136,7 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm unsigned long flags; int cpu; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); if (!CTX_VALID(mm->context)) get_new_mmu_context(mm); cpu = smp_processor_id(); @@ -146,7 +146,7 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm load_secondary_context(mm); __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); tsb_context_switch(mm); - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } #endif /* !(__ASSEMBLY__) */ diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index e142545..8c68424 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -976,12 +976,12 @@ void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *reg if (unlikely(!mm || (mm == &init_mm))) return; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); if (unlikely(!CTX_VALID(mm->context))) get_new_mmu_context(mm); - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); load_secondary_context(mm); __flush_tlb_mm(CTX_HWBITS(mm->context), diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index ed82eda..e9ddd0c 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -350,7 +350,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * mm = vma->vm_mm; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) if (mm->context.huge_pte_count && is_hugetlb_pte(pte)) @@ -361,7 +361,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, address, pte_val(pte)); - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } void flush_dcache_page(struct page *page) diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 2cc3bce..d84d4ea 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -73,7 +73,7 @@ void flush_tsb_user(struct tlb_batch *tb) struct mm_struct *mm = tb->mm; unsigned long nentries, base, flags; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; @@ -90,14 +90,14 @@ void flush_tsb_user(struct tlb_batch *tb) __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries); } #endif - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) { unsigned long nentries, base, flags; - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; @@ -114,7 +114,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries); } #endif - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); } #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K @@ -392,7 +392,7 @@ retry_tsb_alloc: * the lock and ask all other cpus running this address space * to run tsb_context_switch() to see the new TSB table. */ - spin_lock_irqsave(&mm->context.lock, flags); + raw_spin_lock_irqsave(&mm->context.lock, flags); old_tsb = mm->context.tsb_block[tsb_index].tsb; old_cache_index = @@ -407,7 +407,7 @@ retry_tsb_alloc: */ if (unlikely(old_tsb && (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) { - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); kmem_cache_free(tsb_caches[new_cache_index], new_tsb); return; @@ -433,7 +433,7 @@ retry_tsb_alloc: mm->context.tsb_block[tsb_index].tsb = new_tsb; setup_tsb_params(mm, tsb_index, new_size); - spin_unlock_irqrestore(&mm->context.lock, flags); + raw_spin_unlock_irqrestore(&mm->context.lock, flags); /* If old_tsb is NULL, we're being invoked for the first time * from init_new_context(). @@ -459,7 +459,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) #endif unsigned int i; - spin_lock_init(&mm->context.lock); + raw_spin_lock_init(&mm->context.lock); mm->context.sparc64_ctx_val = 0UL; -- cgit v0.10.2 From 591a09fe39e9974c062333714fc802c6d6d8984b Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Fri, 13 Dec 2013 09:44:43 +0530 Subject: sparc64: convert ctx_alloc_lock raw_spinlock_t Signed-off-by: Allen Pais Signed-off-by: Sebastian Andrzej Siewior diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index 3a85624..44e393b 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -13,7 +13,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } -extern spinlock_t ctx_alloc_lock; +extern raw_spinlock_t ctx_alloc_lock; extern unsigned long tlb_context_cache; extern unsigned long mmu_context_bmap[]; diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index e9ddd0c..ec995b0 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -661,7 +661,7 @@ void __flush_dcache_range(unsigned long start, unsigned long end) EXPORT_SYMBOL(__flush_dcache_range); /* get_new_mmu_context() uses "cache + 1". */ -DEFINE_SPINLOCK(ctx_alloc_lock); +DEFINE_RAW_SPINLOCK(ctx_alloc_lock); unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; #define MAX_CTX_NR (1UL << CTX_NR_BITS) #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) @@ -683,7 +683,7 @@ void get_new_mmu_context(struct mm_struct *mm) unsigned long orig_pgsz_bits; int new_version; - spin_lock(&ctx_alloc_lock); + raw_spin_lock(&ctx_alloc_lock); orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); ctx = (tlb_context_cache + 1) & CTX_NR_MASK; new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); @@ -719,7 +719,7 @@ void get_new_mmu_context(struct mm_struct *mm) out: tlb_context_cache = new_ctx; mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; - spin_unlock(&ctx_alloc_lock); + raw_spin_unlock(&ctx_alloc_lock); if (unlikely(new_version)) smp_new_mmu_context_version(); @@ -2721,7 +2721,7 @@ void hugetlb_setup(struct pt_regs *regs) if (tlb_type == cheetah_plus) { unsigned long ctx; - spin_lock(&ctx_alloc_lock); + raw_spin_lock(&ctx_alloc_lock); ctx = mm->context.sparc64_ctx_val; ctx &= ~CTX_PGSZ_MASK; ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; @@ -2742,7 +2742,7 @@ void hugetlb_setup(struct pt_regs *regs) mm->context.sparc64_ctx_val = ctx; on_each_cpu(context_reload, mm, 0); } - spin_unlock(&ctx_alloc_lock); + raw_spin_unlock(&ctx_alloc_lock); } } #endif diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index d84d4ea..9eb10b4 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -523,12 +523,12 @@ void destroy_context(struct mm_struct *mm) free_hot_cold_page(page, 0); } - spin_lock_irqsave(&ctx_alloc_lock, flags); + raw_spin_lock_irqsave(&ctx_alloc_lock, flags); if (CTX_VALID(mm->context)) { unsigned long nr = CTX_NRBITS(mm->context); mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); } - spin_unlock_irqrestore(&ctx_alloc_lock, flags); + raw_spin_unlock_irqrestore(&ctx_alloc_lock, flags); } -- cgit v0.10.2 From ddfb14341d4b843ee42d26dc3d9b102216f8caa5 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 19 Mar 2013 14:44:30 +0100 Subject: kernel/SRCU: provide a static initializer There are macros for static initializer for the three out of four possible notifier types, that are: ATOMIC_NOTIFIER_HEAD() BLOCKING_NOTIFIER_HEAD() RAW_NOTIFIER_HEAD() This patch provides a static initilizer for the forth type to make it complete. Signed-off-by: Sebastian Andrzej Siewior diff --git a/include/linux/notifier.h b/include/linux/notifier.h index d14a4c3..2e4414a 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -6,7 +6,7 @@ * * Alan Cox */ - + #ifndef _LINUX_NOTIFIER_H #define _LINUX_NOTIFIER_H #include @@ -42,9 +42,7 @@ * in srcu_notifier_call_chain(): no cache bounces and no memory barriers. * As compensation, srcu_notifier_chain_unregister() is rather expensive. * SRCU notifier chains should be used when the chain will be called very - * often but notifier_blocks will seldom be removed. Also, SRCU notifier - * chains are slightly more difficult to use because they require special - * runtime initialization. + * often but notifier_blocks will seldom be removed. */ typedef int (*notifier_fn_t)(struct notifier_block *nb, @@ -88,7 +86,7 @@ struct srcu_notifier_head { (name)->head = NULL; \ } while (0) -/* srcu_notifier_heads must be initialized and cleaned up dynamically */ +/* srcu_notifier_heads must be cleaned up dynamically */ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); #define srcu_cleanup_notifier_head(name) \ cleanup_srcu_struct(&(name)->srcu); @@ -101,7 +99,13 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); .head = NULL } #define RAW_NOTIFIER_INIT(name) { \ .head = NULL } -/* srcu_notifier_heads cannot be initialized statically */ + +#define SRCU_NOTIFIER_INIT(name, pcpu) \ + { \ + .mutex = __MUTEX_INITIALIZER(name.mutex), \ + .head = NULL, \ + .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \ + } #define ATOMIC_NOTIFIER_HEAD(name) \ struct atomic_notifier_head name = \ @@ -113,6 +117,18 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); struct raw_notifier_head name = \ RAW_NOTIFIER_INIT(name) +#define _SRCU_NOTIFIER_HEAD(name, mod) \ + static DEFINE_PER_CPU(struct srcu_struct_array, \ + name##_head_srcu_array); \ + mod struct srcu_notifier_head name = \ + SRCU_NOTIFIER_INIT(name, name##_head_srcu_array) + +#define SRCU_NOTIFIER_HEAD(name) \ + _SRCU_NOTIFIER_HEAD(name, ) + +#define SRCU_NOTIFIER_HEAD_STATIC(name) \ + _SRCU_NOTIFIER_HEAD(name, static) + #ifdef __KERNEL__ extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh, @@ -182,12 +198,12 @@ static inline int notifier_to_errno(int ret) /* * Declared notifiers so far. I can imagine quite a few more chains - * over time (eg laptop power reset chains, reboot chain (to clean + * over time (eg laptop power reset chains, reboot chain (to clean * device units up), device [un]mount chain, module load/unload chain, - * low memory chain, screenblank chain (for plug in modular screenblankers) + * low memory chain, screenblank chain (for plug in modular screenblankers) * VC switch chains (for loadable kernel svgalib VC switch helpers) etc... */ - + /* CPU notfiers are defined in include/linux/cpu.h. */ /* netdevice notifiers are defined in include/linux/netdevice.h */ diff --git a/include/linux/srcu.h b/include/linux/srcu.h index c114614..d5e50dd 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -84,10 +84,10 @@ int init_srcu_struct(struct srcu_struct *sp); void process_srcu(struct work_struct *work); -#define __SRCU_STRUCT_INIT(name) \ +#define __SRCU_STRUCT_INIT(name, pcpu_name) \ { \ .completed = -300, \ - .per_cpu_ref = &name##_srcu_array, \ + .per_cpu_ref = &pcpu_name, \ .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ .running = false, \ .batch_queue = RCU_BATCH_INIT(name.batch_queue), \ @@ -104,11 +104,12 @@ void process_srcu(struct work_struct *work); */ #define DEFINE_SRCU(name) \ static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ - struct srcu_struct name = __SRCU_STRUCT_INIT(name); + struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array); #define DEFINE_STATIC_SRCU(name) \ static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ - static struct srcu_struct name = __SRCU_STRUCT_INIT(name); + static struct srcu_struct name = __SRCU_STRUCT_INIT(\ + name, name##_srcu_array); /** * call_srcu() - Queue a callback for invocation after an SRCU grace period -- cgit v0.10.2 From e9b1adc14817ddaf9bd9800ae530ea5fd80e8262 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Fri, 4 Oct 2013 17:37:09 +0200 Subject: Kind of revert "powerpc: 52xx: provide a default in mpc52xx_irqhost_map()" This more or less reverts commit 6391f697d4892a6f233501beea553e13f7745a23. Instead of adding an unneeded 'default', mark the variable to prevent the false positive 'uninitialized var'. The other change (fixing the printout) needs revert, too. We want to know WHICH critical irq failed, not which level it had. Signed-off-by: Wolfram Sang Cc: Sebastian Andrzej Siewior Cc: Anatolij Gustschin Signed-off-by: Sebastian Andrzej Siewior diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c index b69221b..2898b73 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c @@ -340,7 +340,7 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq, { int l1irq; int l2irq; - struct irq_chip *irqchip; + struct irq_chip *uninitialized_var(irqchip); void *hndlr; int type; u32 reg; @@ -373,9 +373,8 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq, case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break; case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break; case MPC52xx_IRQ_L1_CRIT: - default: pr_warn("%s: Critical IRQ #%d is unsupported! Nopping it.\n", - __func__, l1irq); + __func__, l2irq); irq_set_chip(virq, &no_irq_chip); return 0; } -- cgit v0.10.2 From cb495faf34a150608f1c3c25fc79b99a7a18be15 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 9 Jan 2013 23:03:29 +0100 Subject: sched: Init idle->on_rq in init_idle() Signed-off-by: Thomas Gleixner diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a494ace..723962a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4206,6 +4206,7 @@ void init_idle(struct task_struct *idle, int cpu) rcu_read_unlock(); rq->curr = rq->idle = idle; + idle->on_rq = 1; #if defined(CONFIG_SMP) idle->on_cpu = 1; #endif -- cgit v0.10.2 From 151b2e78dcb7543e7ee3e6fd7907abb60105d0f7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 9 Jan 2013 23:34:08 +0100 Subject: sched: Check for idle task in might_sleep() Idle is not allowed to call sleeping functions ever! Signed-off-by: Thomas Gleixner diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 723962a..767b3b8 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6586,7 +6586,8 @@ void __might_sleep(const char *file, int line, int preempt_offset) static unsigned long prev_jiffy; /* ratelimiting */ rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ - if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) || + if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && + !is_idle_task(current)) || system_state != SYSTEM_RUNNING || oops_in_progress) return; if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) -- cgit v0.10.2 From 9b5a31118969f454d2a2d1c52fbb976f2fb58b12 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 5 Oct 2012 08:56:15 +0100 Subject: sched: Better debug output for might sleep might sleep can tell us where interrupts have been disabled, but we have no idea what disabled preemption. Add some debug infrastructure. Signed-off-by: Thomas Gleixner diff --git a/include/linux/sched.h b/include/linux/sched.h index b1e963e..d63aba2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1321,6 +1321,9 @@ struct task_struct { struct mutex perf_event_mutex; struct list_head perf_event_list; #endif +#ifdef CONFIG_DEBUG_PREEMPT + unsigned long preempt_disable_ip; +#endif #ifdef CONFIG_NUMA struct mempolicy *mempolicy; /* Protected by alloc_lock */ short il_next; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 767b3b8..abfc473 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2232,8 +2232,13 @@ void __kprobes add_preempt_count(int val) DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK - 10); #endif - if (preempt_count() == val) - trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); + if (preempt_count() == val) { + unsigned long ip = get_parent_ip(CALLER_ADDR1); +#ifdef CONFIG_DEBUG_PREEMPT + current->preempt_disable_ip = ip; +#endif + trace_preempt_off(CALLER_ADDR0, ip); + } } EXPORT_SYMBOL(add_preempt_count); @@ -2276,6 +2281,13 @@ static noinline void __schedule_bug(struct task_struct *prev) print_modules(); if (irqs_disabled()) print_irqtrace_events(prev); +#ifdef CONFIG_DEBUG_PREEMPT + if (in_atomic_preempt_off()) { + pr_err("Preemption disabled at:"); + print_ip_sym(current->preempt_disable_ip); + pr_cont("\n"); + } +#endif dump_stack(); add_taint(TAINT_WARN, LOCKDEP_STILL_OK); } @@ -6605,6 +6617,13 @@ void __might_sleep(const char *file, int line, int preempt_offset) debug_show_held_locks(current); if (irqs_disabled()) print_irqtrace_events(current); +#ifdef CONFIG_DEBUG_PREEMPT + if (!preempt_count_equals(preempt_offset)) { + pr_err("Preemption disabled at:"); + print_ip_sym(current->preempt_disable_ip); + pr_cont("\n"); + } +#endif dump_stack(); } EXPORT_SYMBOL(__might_sleep); -- cgit v0.10.2 From d2ac5441781621dd313eb9a09dc78291d0f40bcc Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 20 Dec 2012 14:58:00 +0100 Subject: sched: Adjust sched_reset_on_fork when nothing else changes If the policy and priority remain unchanged a possible modification of sched_reset_on_fork gets lost in the early exit path. Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org Cc: stable-rt@vger.kernel.org diff --git a/kernel/sched/core.c b/kernel/sched/core.c index abfc473..418936c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3391,10 +3391,13 @@ recheck: } /* - * If not changing anything there's no need to proceed further: + * If not changing anything there's no need to proceed + * further, but store a possible modification of + * reset_on_fork. */ if (unlikely(policy == p->policy && (!rt_policy(policy) || param->sched_priority == p->rt_priority))) { + p->sched_reset_on_fork = reset_on_fork; task_rq_unlock(rq, p, &flags); return 0; } -- cgit v0.10.2 From efb2fb86cd10f4248e1570fa70bb0c4d40b4bc09 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 4 Dec 2012 08:56:41 +0100 Subject: sched: Queue RT tasks to head when prio drops The following scenario does not work correctly: Runqueue of CPUx contains two runnable and pinned tasks: T1: SCHED_FIFO, prio 80 T2: SCHED_FIFO, prio 80 T1 is on the cpu and executes the following syscalls (classic priority ceiling scenario): sys_sched_setscheduler(pid(T1), SCHED_FIFO, .prio = 90); ... sys_sched_setscheduler(pid(T1), SCHED_FIFO, .prio = 80); ... Now T1 gets preempted by T3 (SCHED_FIFO, prio 95). After T3 goes back to sleep the scheduler picks T2. Surprise! The same happens w/o actual preemption when T1 is forced into the scheduler due to a sporadic NEED_RESCHED event. The scheduler invokes pick_next_task() which returns T2. So T1 gets preempted and scheduled out. This happens because sched_setscheduler() dequeues T1 from the prio 90 list and then enqueues it on the tail of the prio 80 list behind T2. This violates the POSIX spec and surprises user space which relies on the guarantee that SCHED_FIFO tasks are not scheduled out unless they give the CPU up voluntarily or are preempted by a higher priority task. In the latter case the preempted task must get back on the CPU after the preempting task schedules out again. We fixed a similar issue already in commit 60db48c (sched: Queue a deboosted task to the head of the RT prio queue). The same treatment is necessary for sched_setscheduler(). So enqueue to head of the prio bucket list if the priority of the task is lowered. It might be possible that existing user space relies on the current behaviour, but it can be considered highly unlikely due to the corner case nature of the application scenario. Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org Cc: stable-rt@vger.kernel.org diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 418936c..4b67912 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3438,8 +3438,13 @@ recheck: if (running) p->sched_class->set_curr_task(rq); - if (on_rq) - enqueue_task(rq, p, 0); + if (on_rq) { + /* + * We enqueue to tail when the priority of a task is + * increased (user space view). + */ + enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0); + } check_class_changed(rq, p, prev_class, oldprio); task_rq_unlock(rq, p, &flags); -- cgit v0.10.2 From 7287941301f62f31b2731d306c9f2d1d3745f8e8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 20 Dec 2012 15:13:49 +0100 Subject: sched: Consider pi boosting in setscheduler If a PI boosted task policy/priority is modified by a setscheduler() call we unconditionally dequeue and requeue the task if it is on the runqueue even if the new priority is lower than the current effective boosted priority. This can result in undesired reordering of the priority bucket list. If the new priority is less or equal than the current effective we just store the new parameters in the task struct and leave the scheduler class and the runqueue untouched. This is handled when the task deboosts itself. Only if the new priority is higher than the effective boosted priority we apply the change immediately. Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org Cc: stable-rt@vger.kernel.org diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h index 440434d..4d54d6c 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h @@ -35,6 +35,7 @@ static inline int rt_task(struct task_struct *p) #ifdef CONFIG_RT_MUTEXES extern int rt_mutex_getprio(struct task_struct *p); extern void rt_mutex_setprio(struct task_struct *p, int prio); +extern int rt_mutex_check_prio(struct task_struct *task, int newprio); extern void rt_mutex_adjust_pi(struct task_struct *p); static inline bool tsk_is_pi_blocked(struct task_struct *tsk) { @@ -45,6 +46,10 @@ static inline int rt_mutex_getprio(struct task_struct *p) { return p->normal_prio; } +static inline int rt_mutex_check_prio(struct task_struct *task, int newprio) +{ + return 0; +} # define rt_mutex_adjust_pi(p) do { } while (0) static inline bool tsk_is_pi_blocked(struct task_struct *tsk) { diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 0dd6aec..2656896 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -107,6 +107,18 @@ int rt_mutex_getprio(struct task_struct *task) } /* + * Called by sched_setscheduler() to check whether the priority change + * is overruled by a possible priority boosting. + */ +int rt_mutex_check_prio(struct task_struct *task, int newprio) +{ + if (!task_has_pi_waiters(task)) + return 0; + + return task_top_pi_waiter(task)->pi_list_entry.prio <= newprio; +} + +/* * Adjust the priority of a task, after its pi_waiters got modified. * * This can be both boosting and unboosting. task->pi_lock must be held. diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4b67912..56df3da 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3030,7 +3030,8 @@ EXPORT_SYMBOL(sleep_on_timeout); * This function changes the 'effective' priority of a task. It does * not touch ->normal_prio like __setscheduler(). * - * Used by the rt_mutex code to implement priority inheritance logic. + * Used by the rt_mutex code to implement priority inheritance + * logic. Call site only calls if the priority of the task changed. */ void rt_mutex_setprio(struct task_struct *p, int prio) { @@ -3261,20 +3262,25 @@ static struct task_struct *find_process_by_pid(pid_t pid) return pid ? find_task_by_vpid(pid) : current; } -/* Actually do priority change: must hold rq lock. */ -static void -__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) +static void __setscheduler_params(struct task_struct *p, int policy, int prio) { p->policy = policy; p->rt_priority = prio; p->normal_prio = normal_prio(p); + set_load_weight(p); +} + +/* Actually do priority change: must hold rq lock. */ +static void +__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) +{ + __setscheduler_params(p, policy, prio); /* we are holding p->pi_lock already */ p->prio = rt_mutex_getprio(p); if (rt_prio(p->prio)) p->sched_class = &rt_sched_class; else p->sched_class = &fair_sched_class; - set_load_weight(p); } /* @@ -3296,6 +3302,7 @@ static bool check_same_owner(struct task_struct *p) static int __sched_setscheduler(struct task_struct *p, int policy, const struct sched_param *param, bool user) { + int newprio = MAX_RT_PRIO - 1 - param->sched_priority; int retval, oldprio, oldpolicy = -1, on_rq, running; unsigned long flags; const struct sched_class *prev_class; @@ -3423,6 +3430,25 @@ recheck: task_rq_unlock(rq, p, &flags); goto recheck; } + + p->sched_reset_on_fork = reset_on_fork; + oldprio = p->prio; + + /* + * Special case for priority boosted tasks. + * + * If the new priority is lower or equal (user space view) + * than the current (boosted) priority, we just store the new + * normal parameters and do not touch the scheduler class and + * the runqueue. This will be done when the task deboost + * itself. + */ + if (rt_mutex_check_prio(p, newprio)) { + __setscheduler_params(p, policy, param->sched_priority); + task_rq_unlock(rq, p, &flags); + return 0; + } + on_rq = p->on_rq; running = task_current(rq, p); if (on_rq) @@ -3430,9 +3456,6 @@ recheck: if (running) p->sched_class->put_prev_task(rq, p); - p->sched_reset_on_fork = reset_on_fork; - - oldprio = p->prio; prev_class = p->sched_class; __setscheduler(rq, p, policy, param->sched_priority); @@ -3445,7 +3468,6 @@ recheck: */ enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0); } - check_class_changed(rq, p, prev_class, oldprio); task_rq_unlock(rq, p, &flags); -- cgit v0.10.2 From cdd19ee3769eea3ce080e383fc490dc14d305c8e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 22 Jun 2011 19:47:02 +0200 Subject: block: Shorten interrupt disabled regions Moving the blk_sched_flush_plug() call out of the interrupt/preempt disabled region in the scheduler allows us to replace local_irq_save/restore(flags) by local_irq_disable/enable() in blk_flush_plug(). Now instead of doing this we disable interrupts explicitely when we lock the request_queue and reenable them when we drop the lock. That allows interrupts to be handled when the plug list contains requests for more than one queue. Aside of that this change makes the scope of the irq disabled region more obvious. The current code confused the hell out of me when looking at: local_irq_save(flags); spin_lock(q->queue_lock); ... queue_unplugged(q...); scsi_request_fn(); spin_unlock(q->queue_lock); spin_lock(shost->host_lock); spin_unlock_irq(shost->host_lock); -------------------^^^ ???? spin_lock_irq(q->queue_lock); spin_unlock(q->lock); local_irq_restore(flags); Also add a comment to __blk_run_queue() documenting that q->request_fn() can drop q->queue_lock and reenable interrupts, but must return with q->queue_lock held and interrupts disabled. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Tejun Heo Cc: Jens Axboe Cc: Linus Torvalds Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de diff --git a/block/blk-core.c b/block/blk-core.c index fce4b93..4c7ee30 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2925,7 +2925,7 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth, blk_run_queue_async(q); else __blk_run_queue(q); - spin_unlock(q->queue_lock); + spin_unlock_irq(q->queue_lock); } static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) @@ -2973,7 +2973,6 @@ EXPORT_SYMBOL(blk_check_plugged); void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) { struct request_queue *q; - unsigned long flags; struct request *rq; LIST_HEAD(list); unsigned int depth; @@ -2991,11 +2990,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) q = NULL; depth = 0; - /* - * Save and disable interrupts here, to avoid doing it for every - * queue lock we have to take. - */ - local_irq_save(flags); while (!list_empty(&list)) { rq = list_entry_rq(list.next); list_del_init(&rq->queuelist); @@ -3008,7 +3002,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) queue_unplugged(q, depth, from_schedule); q = rq->q; depth = 0; - spin_lock(q->queue_lock); + spin_lock_irq(q->queue_lock); } /* @@ -3035,8 +3029,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) */ if (q) queue_unplugged(q, depth, from_schedule); - - local_irq_restore(flags); } void blk_finish_plug(struct blk_plug *plug) -- cgit v0.10.2 From 7d646711764bb602b8c4b65bd336a83ff5fd3596 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 14 Feb 2013 22:36:59 +0100 Subject: timekeeping-split-jiffies-lock.patch Signed-off-by: Thomas Gleixner diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index a6a5bf5..23d7203 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -73,7 +73,8 @@ static struct clocksource clocksource_jiffies = { .shift = JIFFIES_SHIFT, }; -__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock); +__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock); +__cacheline_aligned_in_smp seqcount_t jiffies_seq; #if (BITS_PER_LONG < 64) u64 get_jiffies_64(void) @@ -82,9 +83,9 @@ u64 get_jiffies_64(void) u64 ret; do { - seq = read_seqbegin(&jiffies_lock); + seq = read_seqcount_begin(&jiffies_seq); ret = jiffies_64; - } while (read_seqretry(&jiffies_lock, seq)); + } while (read_seqcount_retry(&jiffies_seq, seq)); return ret; } EXPORT_SYMBOL(get_jiffies_64); diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 64522ec..1b80eb0 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -63,13 +63,15 @@ int tick_is_oneshot_available(void) static void tick_periodic(int cpu) { if (tick_do_timer_cpu == cpu) { - write_seqlock(&jiffies_lock); + raw_spin_lock(&jiffies_lock); + write_seqcount_begin(&jiffies_seq); /* Keep track of the next tick event */ tick_next_period = ktime_add(tick_next_period, tick_period); do_timer(1); - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); } update_process_times(user_mode(get_irq_regs())); @@ -130,9 +132,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) ktime_t next; do { - seq = read_seqbegin(&jiffies_lock); + seq = read_seqcount_begin(&jiffies_seq); next = tick_next_period; - } while (read_seqretry(&jiffies_lock, seq)); + } while (read_seqcount_retry(&jiffies_seq, seq)); clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index bc906ca..7e5e7f8 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -4,7 +4,8 @@ #include #include -extern seqlock_t jiffies_lock; +extern raw_spinlock_t jiffies_lock; +extern seqcount_t jiffies_seq; #define CS_NAME_LEN 32 diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index ea20f7d..f12acf8 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -62,7 +62,8 @@ static void tick_do_update_jiffies64(ktime_t now) return; /* Reevalute with jiffies_lock held */ - write_seqlock(&jiffies_lock); + raw_spin_lock(&jiffies_lock); + write_seqcount_begin(&jiffies_seq); delta = ktime_sub(now, last_jiffies_update); if (delta.tv64 >= tick_period.tv64) { @@ -85,7 +86,8 @@ static void tick_do_update_jiffies64(ktime_t now) /* Keep the tick_next_period variable up to date */ tick_next_period = ktime_add(last_jiffies_update, tick_period); } - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); } /* @@ -95,12 +97,14 @@ static ktime_t tick_init_jiffy_update(void) { ktime_t period; - write_seqlock(&jiffies_lock); + raw_spin_lock(&jiffies_lock); + write_seqcount_begin(&jiffies_seq); /* Did we start the jiffies update yet ? */ if (last_jiffies_update.tv64 == 0) last_jiffies_update = tick_next_period; period = last_jiffies_update; - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); return period; } @@ -538,11 +542,11 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, /* Read jiffies and the time when jiffies were updated last */ do { - seq = read_seqbegin(&jiffies_lock); + seq = read_seqcount_begin(&jiffies_seq); last_update = last_jiffies_update; last_jiffies = jiffies; time_delta = timekeeping_max_deferment(); - } while (read_seqretry(&jiffies_lock, seq)); + } while (read_seqcount_retry(&jiffies_seq, seq)); if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || arch_needs_cpu(cpu) || irq_work_needs_cpu()) { diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index bfca770..d3150a7 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -1754,7 +1754,9 @@ EXPORT_SYMBOL(hardpps); */ void xtime_update(unsigned long ticks) { - write_seqlock(&jiffies_lock); + raw_spin_lock(&jiffies_lock); + write_seqcount_begin(&jiffies_seq); do_timer(ticks); - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); } -- cgit v0.10.2 From 77f0376e155bf56e977e36faf5f755c3dce9354f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 23 Jul 2013 15:45:51 +0200 Subject: vtime-split-lock-and-seqcount.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 5cd0f09..b59240b 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -145,7 +145,8 @@ extern struct task_group root_task_group; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN # define INIT_VTIME(tsk) \ - .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \ + .vtime_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.vtime_lock), \ + .vtime_seq = SEQCNT_ZERO, \ .vtime_snap = 0, \ .vtime_snap_whence = VTIME_SYS, #else diff --git a/include/linux/sched.h b/include/linux/sched.h index d63aba2..465e66d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1159,7 +1159,8 @@ struct task_struct { struct cputime prev_cputime; #endif #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN - seqlock_t vtime_seqlock; + raw_spinlock_t vtime_lock; + seqcount_t vtime_seq; unsigned long long vtime_snap; enum { VTIME_SLEEPING = 0, diff --git a/kernel/fork.c b/kernel/fork.c index 458953c..2bc18dd 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1242,7 +1242,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->prev_cputime.utime = p->prev_cputime.stime = 0; #endif #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN - seqlock_init(&p->vtime_seqlock); + raw_spin_lock_init(&p->vtime_lock); + seqcount_init(&p->vtime_seq); p->vtime_snap = 0; p->vtime_snap_whence = VTIME_SLEEPING; #endif diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 9994791..1681f49 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -655,37 +655,45 @@ static void __vtime_account_system(struct task_struct *tsk) void vtime_account_system(struct task_struct *tsk) { - write_seqlock(&tsk->vtime_seqlock); + raw_spin_lock(&tsk->vtime_lock); + write_seqcount_begin(&tsk->vtime_seq); __vtime_account_system(tsk); - write_sequnlock(&tsk->vtime_seqlock); + write_seqcount_end(&tsk->vtime_seq); + raw_spin_unlock(&tsk->vtime_lock); } void vtime_gen_account_irq_exit(struct task_struct *tsk) { - write_seqlock(&tsk->vtime_seqlock); + raw_spin_lock(&tsk->vtime_lock); + write_seqcount_begin(&tsk->vtime_seq); __vtime_account_system(tsk); if (context_tracking_in_user()) tsk->vtime_snap_whence = VTIME_USER; - write_sequnlock(&tsk->vtime_seqlock); + write_seqcount_end(&tsk->vtime_seq); + raw_spin_unlock(&tsk->vtime_lock); } void vtime_account_user(struct task_struct *tsk) { cputime_t delta_cpu; - write_seqlock(&tsk->vtime_seqlock); + raw_spin_lock(&tsk->vtime_lock); + write_seqcount_begin(&tsk->vtime_seq); delta_cpu = get_vtime_delta(tsk); tsk->vtime_snap_whence = VTIME_SYS; account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu)); - write_sequnlock(&tsk->vtime_seqlock); + write_seqcount_end(&tsk->vtime_seq); + raw_spin_unlock(&tsk->vtime_lock); } void vtime_user_enter(struct task_struct *tsk) { - write_seqlock(&tsk->vtime_seqlock); + raw_spin_lock(&tsk->vtime_lock); + write_seqcount_begin(&tsk->vtime_seq); __vtime_account_system(tsk); tsk->vtime_snap_whence = VTIME_USER; - write_sequnlock(&tsk->vtime_seqlock); + write_seqcount_end(&tsk->vtime_seq); + raw_spin_unlock(&tsk->vtime_lock); } void vtime_guest_enter(struct task_struct *tsk) @@ -697,19 +705,23 @@ void vtime_guest_enter(struct task_struct *tsk) * synchronization against the reader (task_gtime()) * that can thus safely catch up with a tickless delta. */ - write_seqlock(&tsk->vtime_seqlock); + raw_spin_lock(&tsk->vtime_lock); + write_seqcount_begin(&tsk->vtime_seq); __vtime_account_system(tsk); current->flags |= PF_VCPU; - write_sequnlock(&tsk->vtime_seqlock); + write_seqcount_end(&tsk->vtime_seq); + raw_spin_unlock(&tsk->vtime_lock); } EXPORT_SYMBOL_GPL(vtime_guest_enter); void vtime_guest_exit(struct task_struct *tsk) { - write_seqlock(&tsk->vtime_seqlock); + raw_spin_lock(&tsk->vtime_lock); + write_seqcount_begin(&tsk->vtime_seq); __vtime_account_system(tsk); current->flags &= ~PF_VCPU; - write_sequnlock(&tsk->vtime_seqlock); + write_seqcount_end(&tsk->vtime_seq); + raw_spin_unlock(&tsk->vtime_lock); } EXPORT_SYMBOL_GPL(vtime_guest_exit); @@ -722,24 +734,30 @@ void vtime_account_idle(struct task_struct *tsk) void arch_vtime_task_switch(struct task_struct *prev) { - write_seqlock(&prev->vtime_seqlock); + raw_spin_lock(&prev->vtime_lock); + write_seqcount_begin(&prev->vtime_seq); prev->vtime_snap_whence = VTIME_SLEEPING; - write_sequnlock(&prev->vtime_seqlock); + write_seqcount_end(&prev->vtime_seq); + raw_spin_unlock(&prev->vtime_lock); - write_seqlock(¤t->vtime_seqlock); + raw_spin_lock(¤t->vtime_lock); + write_seqcount_begin(¤t->vtime_seq); current->vtime_snap_whence = VTIME_SYS; current->vtime_snap = sched_clock_cpu(smp_processor_id()); - write_sequnlock(¤t->vtime_seqlock); + write_seqcount_end(¤t->vtime_seq); + raw_spin_unlock(¤t->vtime_lock); } void vtime_init_idle(struct task_struct *t, int cpu) { unsigned long flags; - write_seqlock_irqsave(&t->vtime_seqlock, flags); + raw_spin_lock_irqsave(&t->vtime_lock, flags); + write_seqcount_begin(&t->vtime_seq); t->vtime_snap_whence = VTIME_SYS; t->vtime_snap = sched_clock_cpu(cpu); - write_sequnlock_irqrestore(&t->vtime_seqlock, flags); + write_seqcount_end(&t->vtime_seq); + raw_spin_unlock_irqrestore(&t->vtime_lock, flags); } cputime_t task_gtime(struct task_struct *t) @@ -748,13 +766,13 @@ cputime_t task_gtime(struct task_struct *t) cputime_t gtime; do { - seq = read_seqbegin(&t->vtime_seqlock); + seq = read_seqcount_begin(&t->vtime_seq); gtime = t->gtime; if (t->flags & PF_VCPU) gtime += vtime_delta(t); - } while (read_seqretry(&t->vtime_seqlock, seq)); + } while (read_seqcount_retry(&t->vtime_seq, seq)); return gtime; } @@ -777,7 +795,7 @@ fetch_task_cputime(struct task_struct *t, *udelta = 0; *sdelta = 0; - seq = read_seqbegin(&t->vtime_seqlock); + seq = read_seqcount_begin(&t->vtime_seq); if (u_dst) *u_dst = *u_src; @@ -801,7 +819,7 @@ fetch_task_cputime(struct task_struct *t, if (t->vtime_snap_whence == VTIME_SYS) *sdelta = delta; } - } while (read_seqretry(&t->vtime_seqlock, seq)); + } while (read_seqcount_retry(&t->vtime_seq, seq)); } -- cgit v0.10.2 From 5e4496397a80d602544777cc64f0c755b0846e7a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 18 Jul 2011 21:32:10 +0200 Subject: mips-enable-interrupts-in-signal.patch Signed-off-by: Thomas Gleixner diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 2f285ab..a4ae7ad 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -573,6 +573,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) { local_irq_enable(); + preempt_check_resched(); user_exit(); -- cgit v0.10.2 From e5ce7a77922dff56cbe9d8f6d68eed3bf3958137 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 29 Sep 2011 12:24:30 -0500 Subject: tracing: Account for preempt off in preempt_schedule() The preempt_schedule() uses the preempt_disable_notrace() version because it can cause infinite recursion by the function tracer as the function tracer uses preempt_enable_notrace() which may call back into the preempt_schedule() code as the NEED_RESCHED is still set and the PREEMPT_ACTIVE has not been set yet. See commit: d1f74e20b5b064a130cd0743a256c2d3cfe84010 that made this change. The preemptoff and preemptirqsoff latency tracers require the first and last preempt count modifiers to enable tracing. But this skips the checks. Since we can not convert them back to the non notrace version, we can use the idle() hooks for the latency tracers here. That is, the start/stop_critical_timings() works well to manually start and stop the latency tracer for preempt off timings. Signed-off-by: Steven Rostedt Signed-off-by: Clark Williams Signed-off-by: Thomas Gleixner diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 56df3da..011fd46 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2533,7 +2533,16 @@ asmlinkage void __sched notrace preempt_schedule(void) do { add_preempt_count_notrace(PREEMPT_ACTIVE); + /* + * The add/subtract must not be traced by the function + * tracer. But we still want to account for the + * preempt off latency tracer. Since the _notrace versions + * of add/subtract skip the accounting for latency tracer + * we must force it manually. + */ + start_critical_timings(); __schedule(); + stop_critical_timings(); sub_preempt_count_notrace(PREEMPT_ACTIVE); /* -- cgit v0.10.2 From 70b99bd28bdb7efba6f927adad7cc03c52161c94 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 21 Sep 2011 19:57:12 +0200 Subject: signal-revert-ptrace-preempt-magic.patch Signed-off-by: Thomas Gleixner diff --git a/kernel/signal.c b/kernel/signal.c index ded28b9..8ed7287 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1908,15 +1908,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) if (gstop_done && ptrace_reparented(current)) do_notify_parent_cldstop(current, false, why); - /* - * Don't want to allow preemption here, because - * sys_ptrace() needs this task to be inactive. - * - * XXX: implement read_unlock_no_resched(). - */ - preempt_disable(); read_unlock(&tasklist_lock); - preempt_enable_no_resched(); freezable_schedule(); } else { /* -- cgit v0.10.2 From 9141a662b864ace2e16c3530279e24291b4ed788 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 29 Aug 2013 18:21:04 +0200 Subject: ptrace: fix ptrace vs tasklist_lock race As explained by Alexander Fyodorov : |read_lock(&tasklist_lock) in ptrace_stop() is converted to mutex on RT kernel, |and it can remove __TASK_TRACED from task->state (by moving it to |task->saved_state). If parent does wait() on child followed by a sys_ptrace |call, the following race can happen: | |- child sets __TASK_TRACED in ptrace_stop() |- parent does wait() which eventually calls wait_task_stopped() and returns | child's pid |- child blocks on read_lock(&tasklist_lock) in ptrace_stop() and moves | __TASK_TRACED flag to saved_state |- parent calls sys_ptrace, which calls ptrace_check_attach() and wait_task_inactive() The patch is based on his initial patch where an additional check is added in case the __TASK_TRACED moved to ->saved_state. The pi_lock is taken in case the caller is interrupted between looking into ->state and ->saved_state. Signed-off-by: Sebastian Andrzej Siewior diff --git a/include/linux/sched.h b/include/linux/sched.h index 465e66d..230094d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -165,11 +165,8 @@ extern char ___assert_task_state[1 - 2*!!( TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ __TASK_TRACED) -#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) #define task_is_dead(task) ((task)->exit_state != 0) -#define task_is_stopped_or_traced(task) \ - ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) #define task_contributes_to_load(task) \ ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ (task->flags & PF_FROZEN) == 0) @@ -2414,6 +2411,51 @@ static inline int need_resched(void) return unlikely(test_thread_flag(TIF_NEED_RESCHED)); } +static inline bool __task_is_stopped_or_traced(struct task_struct *task) +{ + if (task->state & (__TASK_STOPPED | __TASK_TRACED)) + return true; +#ifdef CONFIG_PREEMPT_RT_FULL + if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED)) + return true; +#endif + return false; +} + +static inline bool task_is_stopped_or_traced(struct task_struct *task) +{ + bool traced_stopped; + +#ifdef CONFIG_PREEMPT_RT_FULL + unsigned long flags; + + raw_spin_lock_irqsave(&task->pi_lock, flags); + traced_stopped = __task_is_stopped_or_traced(task); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); +#else + traced_stopped = __task_is_stopped_or_traced(task); +#endif + return traced_stopped; +} + +static inline bool task_is_traced(struct task_struct *task) +{ + bool traced = false; + + if (task->state & __TASK_TRACED) + return true; +#ifdef CONFIG_PREEMPT_RT_FULL + /* in case the task is sleeping on tasklist_lock */ + raw_spin_lock_irq(&task->pi_lock); + if (task->state & __TASK_TRACED) + traced = true; + else if (task->saved_state & __TASK_TRACED) + traced = true; + raw_spin_unlock_irq(&task->pi_lock); +#endif + return traced; +} + /* * cond_resched() and cond_resched_lock(): latency reduction via * explicit rescheduling in places that are safe. The return diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 1f4bcb3..fddaf65 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -135,7 +135,12 @@ static bool ptrace_freeze_traced(struct task_struct *task) spin_lock_irq(&task->sighand->siglock); if (task_is_traced(task) && !__fatal_signal_pending(task)) { - task->state = __TASK_TRACED; + raw_spin_lock_irq(&task->pi_lock); + if (task->state & __TASK_TRACED) + task->state = __TASK_TRACED; + else + task->saved_state = __TASK_TRACED; + raw_spin_unlock_irq(&task->pi_lock); ret = true; } spin_unlock_irq(&task->sighand->siglock); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 011fd46..d995623 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1024,6 +1024,18 @@ struct migration_arg { static int migration_cpu_stop(void *data); +static bool check_task_state(struct task_struct *p, long match_state) +{ + bool match = false; + + raw_spin_lock_irq(&p->pi_lock); + if (p->state == match_state || p->saved_state == match_state) + match = true; + raw_spin_unlock_irq(&p->pi_lock); + + return match; +} + /* * wait_task_inactive - wait for a thread to unschedule. * @@ -1068,7 +1080,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) * is actually now running somewhere else! */ while (task_running(rq, p)) { - if (match_state && unlikely(p->state != match_state)) + if (match_state && !check_task_state(p, match_state)) return 0; cpu_relax(); } @@ -1083,7 +1095,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) running = task_running(rq, p); on_rq = p->on_rq; ncsw = 0; - if (!match_state || p->state == match_state) + if (!match_state || p->state == match_state + || p->saved_state == match_state) ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ task_rq_unlock(rq, p, &flags); @@ -1579,7 +1592,7 @@ out: */ int wake_up_process(struct task_struct *p) { - WARN_ON(task_is_stopped_or_traced(p)); + WARN_ON(__task_is_stopped_or_traced(p)); return try_to_wake_up(p, TASK_NORMAL, 0); } EXPORT_SYMBOL(wake_up_process); -- cgit v0.10.2 From be33dfd19e00630213001a98056f376fb5145807 Mon Sep 17 00:00:00 2001 From: Frank Rowand Date: Mon, 19 Sep 2011 14:51:14 -0700 Subject: preempt-rt: Convert arm boot_lock to raw The arm boot_lock is used by the secondary processor startup code. The locking task is the idle thread, which has idle->sched_class == &idle_sched_class. idle_sched_class->enqueue_task == NULL, so if the idle task blocks on the lock, the attempt to wake it when the lock becomes available will fail: try_to_wake_up() ... activate_task() enqueue_task() p->sched_class->enqueue_task(rq, p, flags) Fix by converting boot_lock to a raw spin lock. Signed-off-by: Frank Rowand Link: http://lkml.kernel.org/r/4E77B952.3010606@am.sony.com Signed-off-by: Thomas Gleixner diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c index 58b43e6..f56f767 100644 --- a/arch/arm/mach-exynos/platsmp.c +++ b/arch/arm/mach-exynos/platsmp.c @@ -73,7 +73,7 @@ static void __iomem *scu_base_addr(void) return (void __iomem *)(S5P_VA_SCU); } -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); static void exynos_secondary_init(unsigned int cpu) { @@ -86,8 +86,8 @@ static void exynos_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); } static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) @@ -99,7 +99,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) * Set synchronisation state between this boot processor * and the secondary one */ - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from @@ -128,7 +128,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) if (timeout == 0) { printk(KERN_ERR "cpu1 power enable failed"); - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return -ETIMEDOUT; } } @@ -167,7 +167,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c index 3f06edc..fc09a04 100644 --- a/arch/arm/mach-msm/platsmp.c +++ b/arch/arm/mach-msm/platsmp.c @@ -30,7 +30,7 @@ extern void msm_secondary_startup(void); -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); static inline int get_core_count(void) { @@ -50,8 +50,8 @@ static void msm_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); } static void prepare_cold_cpu(unsigned int cpu) @@ -88,7 +88,7 @@ static int msm_boot_secondary(unsigned int cpu, struct task_struct *idle) * set synchronisation state between this boot processor * and the secondary one */ - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from @@ -122,7 +122,7 @@ static int msm_boot_secondary(unsigned int cpu, struct task_struct *idle) * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index 8912110..5969da3 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c @@ -44,7 +44,7 @@ u16 pm44xx_errata; /* SCU base address */ static void __iomem *scu_base; -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); void __iomem *omap4_get_scu_base(void) { @@ -68,8 +68,8 @@ static void omap4_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); } static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) @@ -83,7 +83,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) * Set synchronisation state between this boot processor * and the secondary one */ - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* * Update the AuxCoreBoot0 with boot state for secondary core. @@ -160,7 +160,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) * Now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return 0; } diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c index 3dbcb1a..42837dc4 100644 --- a/arch/arm/mach-prima2/platsmp.c +++ b/arch/arm/mach-prima2/platsmp.c @@ -23,7 +23,7 @@ static void __iomem *scu_base; static void __iomem *rsc_base; -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); static struct map_desc scu_io_desc __initdata = { .length = SZ_4K, @@ -56,8 +56,8 @@ static void sirfsoc_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); } static struct of_device_id rsc_ids[] = { @@ -95,7 +95,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle) /* make sure write buffer is drained */ mb(); - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from @@ -128,7 +128,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle) * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c index 5c4a198..33dc270 100644 --- a/arch/arm/mach-spear/platsmp.c +++ b/arch/arm/mach-spear/platsmp.c @@ -20,7 +20,7 @@ #include #include "generic.h" -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); static void __iomem *scu_base = IOMEM(VA_SCU_BASE); @@ -36,8 +36,8 @@ static void spear13xx_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); } static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) @@ -48,7 +48,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) * set synchronisation state between this boot processor * and the secondary one */ - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from @@ -75,7 +75,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } diff --git a/arch/arm/mach-sti/platsmp.c b/arch/arm/mach-sti/platsmp.c index dce50d9..c05b764 100644 --- a/arch/arm/mach-sti/platsmp.c +++ b/arch/arm/mach-sti/platsmp.c @@ -35,7 +35,7 @@ static void write_pen_release(int val) outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); } -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); void sti_secondary_init(unsigned int cpu) { @@ -50,8 +50,8 @@ void sti_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); } int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) @@ -62,7 +62,7 @@ int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) * set synchronisation state between this boot processor * and the secondary one */ - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from @@ -93,7 +93,7 @@ int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c index 1f296e7..eeb5916 100644 --- a/arch/arm/mach-ux500/platsmp.c +++ b/arch/arm/mach-ux500/platsmp.c @@ -52,7 +52,7 @@ static void __iomem *scu_base_addr(void) return NULL; } -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); static void ux500_secondary_init(unsigned int cpu) { @@ -65,8 +65,8 @@ static void ux500_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); } static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle) @@ -77,7 +77,7 @@ static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle) * set synchronisation state between this boot processor * and the secondary one */ - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from @@ -98,7 +98,7 @@ static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle) * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c index 39895d8..b2e0858 100644 --- a/arch/arm/plat-versatile/platsmp.c +++ b/arch/arm/plat-versatile/platsmp.c @@ -31,7 +31,7 @@ static void write_pen_release(int val) outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); } -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); void versatile_secondary_init(unsigned int cpu) { @@ -44,8 +44,8 @@ void versatile_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); } int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) @@ -56,7 +56,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) * Set synchronisation state between this boot processor * and the secondary one */ - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* * This is really belt and braces; we hold unintended secondary @@ -86,7 +86,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } -- cgit v0.10.2 From b0101260ebd73bf052ad4870417f363fc3e0dd70 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 3 Jul 2009 08:29:20 -0500 Subject: posix-timers: Prevent broadcast signals Posix timers should not send broadcast signals and kernel only signals. Prevent it. Signed-off-by: Thomas Gleixner diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 424c2d4..68af2dc 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -497,6 +497,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) static struct pid *good_sigevent(sigevent_t * event) { struct task_struct *rtn = current->group_leader; + int sig = event->sigev_signo; if ((event->sigev_notify & SIGEV_THREAD_ID ) && (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) || @@ -505,7 +506,8 @@ static struct pid *good_sigevent(sigevent_t * event) return NULL; if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) && - ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) + (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) || + sig_kernel_coredump(sig))) return NULL; return task_pid(rtn); -- cgit v0.10.2 From 4fc142158128b576d5d598de0ee31b61c1a93ae0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 3 Jul 2009 08:44:56 -0500 Subject: signals: Allow rt tasks to cache one sigqueue struct To avoid allocation allow rt tasks to cache one sigqueue struct in task struct. Signed-off-by: Thomas Gleixner diff --git a/include/linux/sched.h b/include/linux/sched.h index 230094d..4d58a49 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1204,6 +1204,7 @@ struct task_struct { /* signal handlers */ struct signal_struct *signal; struct sighand_struct *sighand; + struct sigqueue *sigqueue_cache; sigset_t blocked, real_blocked; sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ diff --git a/include/linux/signal.h b/include/linux/signal.h index 2ac423b..1414eb2 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -226,6 +226,7 @@ static inline void init_sigpending(struct sigpending *sig) } extern void flush_sigqueue(struct sigpending *queue); +extern void flush_task_sigqueue(struct task_struct *tsk); /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ static inline int valid_signal(unsigned long sig) diff --git a/kernel/exit.c b/kernel/exit.c index a949819..7493b32 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -145,7 +145,7 @@ static void __exit_signal(struct task_struct *tsk) * Do this under ->siglock, we can race with another thread * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. */ - flush_sigqueue(&tsk->pending); + flush_task_sigqueue(tsk); tsk->sighand = NULL; spin_unlock(&sighand->siglock); diff --git a/kernel/fork.c b/kernel/fork.c index 2bc18dd..c0174b8 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1235,6 +1235,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); + p->sigqueue_cache = NULL; p->utime = p->stime = p->gtime = 0; p->utimescaled = p->stimescaled = 0; diff --git a/kernel/signal.c b/kernel/signal.c index 8ed7287..ed06b65 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -349,13 +350,45 @@ static bool task_participate_group_stop(struct task_struct *task) return false; } +#ifdef __HAVE_ARCH_CMPXCHG +static inline struct sigqueue *get_task_cache(struct task_struct *t) +{ + struct sigqueue *q = t->sigqueue_cache; + + if (cmpxchg(&t->sigqueue_cache, q, NULL) != q) + return NULL; + return q; +} + +static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) +{ + if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL) + return 0; + return 1; +} + +#else + +static inline struct sigqueue *get_task_cache(struct task_struct *t) +{ + return NULL; +} + +static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) +{ + return 1; +} + +#endif + /* * allocate a new signal queue record * - this may be called without locks if and only if t == current, otherwise an * appropriate lock must be held to stop the target task from exiting */ static struct sigqueue * -__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) +__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags, + int override_rlimit, int fromslab) { struct sigqueue *q = NULL; struct user_struct *user; @@ -372,7 +405,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi if (override_rlimit || atomic_read(&user->sigpending) <= task_rlimit(t, RLIMIT_SIGPENDING)) { - q = kmem_cache_alloc(sigqueue_cachep, flags); + if (!fromslab) + q = get_task_cache(t); + if (!q) + q = kmem_cache_alloc(sigqueue_cachep, flags); } else { print_dropped_signal(sig); } @@ -389,6 +425,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi return q; } +static struct sigqueue * +__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, + int override_rlimit) +{ + return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0); +} + static void __sigqueue_free(struct sigqueue *q) { if (q->flags & SIGQUEUE_PREALLOC) @@ -398,6 +441,21 @@ static void __sigqueue_free(struct sigqueue *q) kmem_cache_free(sigqueue_cachep, q); } +static void sigqueue_free_current(struct sigqueue *q) +{ + struct user_struct *up; + + if (q->flags & SIGQUEUE_PREALLOC) + return; + + up = q->user; + if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) { + atomic_dec(&up->sigpending); + free_uid(up); + } else + __sigqueue_free(q); +} + void flush_sigqueue(struct sigpending *queue) { struct sigqueue *q; @@ -411,6 +469,21 @@ void flush_sigqueue(struct sigpending *queue) } /* + * Called from __exit_signal. Flush tsk->pending and + * tsk->sigqueue_cache + */ +void flush_task_sigqueue(struct task_struct *tsk) +{ + struct sigqueue *q; + + flush_sigqueue(&tsk->pending); + + q = get_task_cache(tsk); + if (q) + kmem_cache_free(sigqueue_cachep, q); +} + +/* * Flush all pending signals for a task. */ void __flush_signals(struct task_struct *t) @@ -562,7 +635,7 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) still_pending: list_del_init(&first->list); copy_siginfo(info, &first->info); - __sigqueue_free(first); + sigqueue_free_current(first); } else { /* * Ok, it wasn't in the queue. This must be @@ -608,6 +681,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) { int signr; + WARN_ON_ONCE(tsk != current); + /* We only dequeue private signals from ourselves, we don't let * signalfd steal them */ @@ -1547,7 +1622,8 @@ EXPORT_SYMBOL(kill_pid); */ struct sigqueue *sigqueue_alloc(void) { - struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); + /* Preallocated sigqueue objects always from the slabcache ! */ + struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1); if (q) q->flags |= SIGQUEUE_PREALLOC; -- cgit v0.10.2 From 546472740ca0a79528dacaba0a2307e5cc70000e Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 9 Apr 2014 19:18:38 -0500 Subject: signal/x86: Delay calling signals in atomic On x86_64 we must disable preemption before we enable interrupts for stack faults, int3 and debugging, because the current task is using a per CPU debug stack defined by the IST. If we schedule out, another task can come in and use the same stack and cause the stack to be corrupted and crash the kernel on return. When CONFIG_PREEMPT_RT_FULL is enabled, spin_locks become mutexes, and one of these is the spin lock used in signal handling. Some of the debug code (int3) causes do_trap() to send a signal. This function calls a spin lock that has been converted to a mutex and has the possibility to sleep. If this happens, the above issues with the corrupted stack is possible. Instead of calling the signal right away, for PREEMPT_RT and x86_64, the signal information is stored on the stacks task_struct and TIF_NOTIFY_RESUME is set. Then on exit of the trap, the signal resume code will send the signal when preemption is enabled. [ rostedt: Switched from #ifdef CONFIG_PREEMPT_RT_FULL to ARCH_RT_DELAYS_SIGNAL_SEND and added comments to the code. ] Cc: stable-rt@vger.kernel.org Signed-off-by: Oleg Nesterov Signed-off-by: Steven Rostedt Signed-off-by: Thomas Gleixner diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h index 35e67a4..6ec0792 100644 --- a/arch/x86/include/asm/signal.h +++ b/arch/x86/include/asm/signal.h @@ -23,6 +23,19 @@ typedef struct { unsigned long sig[_NSIG_WORDS]; } sigset_t; +/* + * Because some traps use the IST stack, we must keep preemption + * disabled while calling do_trap(), but do_trap() may call + * force_sig_info() which will grab the signal spin_locks for the + * task, which in PREEMPT_RT_FULL are mutexes. By defining + * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set + * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the + * trap. + */ +#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_X86_64) +#define ARCH_RT_DELAYS_SIGNAL_SEND +#endif + #ifndef CONFIG_COMPAT typedef sigset_t compat_sigset_t; #endif diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 9e5de68..ecfe089 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -739,6 +739,14 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) mce_notify_process(); #endif /* CONFIG_X86_64 && CONFIG_X86_MCE */ +#ifdef ARCH_RT_DELAYS_SIGNAL_SEND + if (unlikely(current->forced_info.si_signo)) { + struct task_struct *t = current; + force_sig_info(t->forced_info.si_signo, &t->forced_info, t); + t->forced_info.si_signo = 0; + } +#endif + if (thread_info_flags & _TIF_UPROBE) uprobe_notify_resume(regs); diff --git a/include/linux/sched.h b/include/linux/sched.h index 4d58a49..9e254d8 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1209,6 +1209,10 @@ struct task_struct { sigset_t blocked, real_blocked; sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ struct sigpending pending; +#ifdef CONFIG_PREEMPT_RT_FULL + /* TODO: move me into ->restart_block ? */ + struct siginfo forced_info; +#endif unsigned long sas_ss_sp; size_t sas_ss_size; diff --git a/kernel/signal.c b/kernel/signal.c index ed06b65..e644f50 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1305,8 +1305,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, * We don't want to have recursive SIGSEGV's etc, for example, * that is why we also clear SIGNAL_UNKILLABLE. */ -int -force_sig_info(int sig, struct siginfo *info, struct task_struct *t) +static int +do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t) { unsigned long int flags; int ret, blocked, ignored; @@ -1331,6 +1331,39 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) return ret; } +int force_sig_info(int sig, struct siginfo *info, struct task_struct *t) +{ +/* + * On some archs, PREEMPT_RT has to delay sending a signal from a trap + * since it can not enable preemption, and the signal code's spin_locks + * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will + * send the signal on exit of the trap. + */ +#ifdef ARCH_RT_DELAYS_SIGNAL_SEND + if (in_atomic()) { + if (WARN_ON_ONCE(t != current)) + return 0; + if (WARN_ON_ONCE(t->forced_info.si_signo)) + return 0; + + if (is_si_special(info)) { + WARN_ON_ONCE(info != SEND_SIG_PRIV); + t->forced_info.si_signo = sig; + t->forced_info.si_errno = 0; + t->forced_info.si_code = SI_KERNEL; + t->forced_info.si_pid = 0; + t->forced_info.si_uid = 0; + } else { + t->forced_info = *info; + } + + set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); + return 0; + } +#endif + return do_force_sig_info(sig, info, t); +} + /* * Nuke all other threads in the group. */ -- cgit v0.10.2 From fc3f526992ba9e07ef2da7ba2388c372f3f99647 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:30 -0500 Subject: drivers: random: Reduce preempt disabled region No need to keep preemption disabled across the whole function. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/drivers/char/random.c b/drivers/char/random.c index 7a744d3..02172d2 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -673,9 +673,12 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) preempt_disable(); /* if over the trickle threshold, use only 1 in 4096 samples */ if (input_pool.entropy_count > trickle_thresh && - ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff)) - goto out; + ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff)) { + preempt_enable(); + return; + } + preempt_enable(); sample.jiffies = jiffies; sample.cycles = random_get_entropy(); sample.num = num; @@ -716,8 +719,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) credit_entropy_bits(&input_pool, min_t(int, fls(delta>>1), 11)); } -out: - preempt_enable(); } void add_input_randomness(unsigned int type, unsigned int code, -- cgit v0.10.2 From 2100732a1d388da6b884c745a21a1ff24c12ba96 Mon Sep 17 00:00:00 2001 From: Benedikt Spranger Date: Sat, 6 Mar 2010 17:47:10 +0100 Subject: ARM: AT91: PIT: Remove irq handler when clock event is unused MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Setup and remove the interrupt handler in clock event mode selection. This avoids calling the (shared) interrupt handler when the device is not used. Signed-off-by: Benedikt Spranger Signed-off-by: Thomas Gleixner [bigeasy: redo the patch with NR_IRQS_LEGACY which is probably required since commit 8fe82a55 ("ARM: at91: sparse irq support") which is included since v3.6. Patch based on what Sami Pietikäinen suggested]. Signed-off-by: Sebastian Andrzej Siewior diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c index f607deb..35f7b26 100644 --- a/arch/arm/mach-at91/at91rm9200_time.c +++ b/arch/arm/mach-at91/at91rm9200_time.c @@ -134,6 +134,7 @@ clkevt32k_mode(enum clock_event_mode mode, struct clock_event_device *dev) break; case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_UNUSED: + remove_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq); case CLOCK_EVT_MODE_RESUME: irqmask = 0; break; diff --git a/arch/arm/mach-at91/at91sam926x_time.c b/arch/arm/mach-at91/at91sam926x_time.c index bb39232..1c4c487 100644 --- a/arch/arm/mach-at91/at91sam926x_time.c +++ b/arch/arm/mach-at91/at91sam926x_time.c @@ -77,7 +77,7 @@ static struct clocksource pit_clk = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; - +static struct irqaction at91sam926x_pit_irq; /* * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16) */ @@ -86,6 +86,8 @@ pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev) { switch (mode) { case CLOCK_EVT_MODE_PERIODIC: + /* Set up irq handler */ + setup_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq); /* update clocksource counter */ pit_cnt += pit_cycle * PIT_PICNT(pit_read(AT91_PIT_PIVR)); pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN @@ -98,6 +100,7 @@ pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev) case CLOCK_EVT_MODE_UNUSED: /* disable irq, leaving the clocksource active */ pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN); + remove_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq); break; case CLOCK_EVT_MODE_RESUME: break; -- cgit v0.10.2 From 57ed8d0a9c804b2ab7998a70db3a291c5bcc174e Mon Sep 17 00:00:00 2001 From: Benedikt Spranger Date: Mon, 8 Mar 2010 18:57:04 +0100 Subject: clocksource: TCLIB: Allow higher clock rates for clock events MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As default the TCLIB uses the 32KiHz base clock rate for clock events. Add a compile time selection to allow higher clock resulution. (fixed up by Sami Pietikäinen ) Signed-off-by: Benedikt Spranger Signed-off-by: Thomas Gleixner diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index 8a61872..a00dfaf 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c @@ -23,8 +23,7 @@ * this 32 bit free-running counter. the second channel is not used. * * - The third channel may be used to provide a 16-bit clockevent - * source, used in either periodic or oneshot mode. This runs - * at 32 KiHZ, and can handle delays of up to two seconds. + * source, used in either periodic or oneshot mode. * * A boot clocksource and clockevent source are also currently needed, * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so @@ -74,6 +73,7 @@ static struct clocksource clksrc = { struct tc_clkevt_device { struct clock_event_device clkevt; struct clk *clk; + u32 freq; void __iomem *regs; }; @@ -82,13 +82,6 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt) return container_of(clkevt, struct tc_clkevt_device, clkevt); } -/* For now, we always use the 32K clock ... this optimizes for NO_HZ, - * because using one of the divided clocks would usually mean the - * tick rate can never be less than several dozen Hz (vs 0.5 Hz). - * - * A divided clock could be good for high resolution timers, since - * 30.5 usec resolution can seem "low". - */ static u32 timer_clock; static void tc_mode(enum clock_event_mode m, struct clock_event_device *d) @@ -111,11 +104,12 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d) case CLOCK_EVT_MODE_PERIODIC: clk_enable(tcd->clk); - /* slow clock, count up to RC, then irq and restart */ + /* count up to RC, then irq and restart */ __raw_writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); - __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); + __raw_writel((tcd->freq + HZ/2)/HZ, + tcaddr + ATMEL_TC_REG(2, RC)); /* Enable clock and interrupts on RC compare */ __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); @@ -128,7 +122,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d) case CLOCK_EVT_MODE_ONESHOT: clk_enable(tcd->clk); - /* slow clock, count up to RC, then irq and stop */ + /* count up to RC, then irq and stop */ __raw_writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); @@ -157,8 +151,12 @@ static struct tc_clkevt_device clkevt = { .name = "tc_clkevt", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, +#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK /* Should be lower than at91rm9200's system timer */ .rating = 125, +#else + .rating = 200, +#endif .set_next_event = tc_next_event, .set_mode = tc_mode, }, @@ -184,8 +182,9 @@ static struct irqaction tc_irqaction = { .handler = ch2_irq, }; -static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) +static void __init setup_clkevents(struct atmel_tc *tc, int divisor_idx) { + unsigned divisor = atmel_tc_divisors[divisor_idx]; struct clk *t2_clk = tc->clk[2]; int irq = tc->irq[2]; @@ -193,11 +192,15 @@ static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) clkevt.clk = t2_clk; tc_irqaction.dev_id = &clkevt; - timer_clock = clk32k_divisor_idx; + timer_clock = divisor_idx; + if (!divisor) + clkevt.freq = 32768; + else + clkevt.freq = clk_get_rate(t2_clk) / divisor; clkevt.clkevt.cpumask = cpumask_of(0); - clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff); + clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff); setup_irq(irq, &tc_irqaction); } @@ -322,8 +325,11 @@ static int __init tcb_clksrc_init(void) clocksource_register_hz(&clksrc, divided_rate); /* channel 2: periodic and oneshot timer support */ +#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK setup_clkevents(tc, clk32k_divisor_idx); - +#else + setup_clkevents(tc, best_divisor_idx); +#endif return 0; } arch_initcall(tcb_clksrc_init); diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 8dacd4c..bc68085 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -78,8 +78,7 @@ config ATMEL_TCB_CLKSRC are combined to make a single 32-bit timer. When GENERIC_CLOCKEVENTS is defined, the third timer channel - may be used as a clock event device supporting oneshot mode - (delays of up to two seconds) based on the 32 KiHz clock. + may be used as a clock event device supporting oneshot mode. config ATMEL_TCB_CLKSRC_BLOCK int @@ -93,6 +92,15 @@ config ATMEL_TCB_CLKSRC_BLOCK TC can be used for other purposes, such as PWM generation and interval timing. +config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK + bool "TC Block use 32 KiHz clock" + depends on ATMEL_TCB_CLKSRC + default y + help + Select this to use 32 KiHz base clock rate as TC block clock + source for clock events. + + config DUMMY_IRQ tristate "Dummy IRQ handler" default n -- cgit v0.10.2 From b189a901e2d12a6dfbb5a3ec1af0279d4fcdb747 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:30:18 -0500 Subject: drivers/net: tulip_remove_one needs to call pci_disable_device() Otherwise the device is not completely shut down. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 4e8cfa2..7565b99 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -1939,6 +1939,7 @@ static void tulip_remove_one(struct pci_dev *pdev) pci_iounmap(pdev, tp->base_addr); free_netdev (dev); pci_release_regions (pdev); + pci_disable_device(pdev); pci_set_drvdata (pdev, NULL); /* pci_power_off (pdev, -1); */ -- cgit v0.10.2 From 6ebefa18828dae29742463c1ed3f59b16bbfdc1d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:24 -0500 Subject: drivers/net: Use disable_irq_nosync() in 8139too Use disable_irq_nosync() instead of disable_irq() as this might be called in atomic context with netpoll. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 3ccedeb..fefe07b 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -2213,7 +2213,7 @@ static void rtl8139_poll_controller(struct net_device *dev) struct rtl8139_private *tp = netdev_priv(dev); const int irq = tp->pci_dev->irq; - disable_irq(irq); + disable_irq_nosync(irq); rtl8139_interrupt(irq, dev); enable_irq(irq); } -- cgit v0.10.2 From 60f0b2748306e45a2dd3ea6a42d0136439793664 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:30:37 -0500 Subject: mm: Prepare decoupling the page fault disabling logic Add a pagefault_disabled variable to task_struct to allow decoupling the pagefault-disabled logic from the preempt count. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/include/linux/sched.h b/include/linux/sched.h index 9e254d8..5f0c31c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1249,6 +1249,7 @@ struct task_struct { /* mutex deadlock detection */ struct mutex_waiter *blocked_on; #endif + int pagefault_disabled; #ifdef CONFIG_TRACE_IRQFLAGS unsigned int irq_events; unsigned long hardirq_enable_ip; diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 5ca0951..9414a1b 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -6,37 +6,10 @@ /* * These routines enable/disable the pagefault handler in that - * it will not take any locks and go straight to the fixup table. - * - * They have great resemblance to the preempt_disable/enable calls - * and in fact they are identical; this is because currently there is - * no other way to make the pagefault handlers do this. So we do - * disable preemption but we don't necessarily care about that. + * it will not take any MM locks and go straight to the fixup table. */ -static inline void pagefault_disable(void) -{ - inc_preempt_count(); - /* - * make sure to have issued the store before a pagefault - * can hit. - */ - barrier(); -} - -static inline void pagefault_enable(void) -{ - /* - * make sure to issue those last loads/stores before enabling - * the pagefault handler again. - */ - barrier(); - dec_preempt_count(); - /* - * make sure we do.. - */ - barrier(); - preempt_check_resched(); -} +extern void pagefault_disable(void); +extern void pagefault_enable(void); #ifndef ARCH_HAS_NOCACHE_UACCESS diff --git a/kernel/fork.c b/kernel/fork.c index c0174b8..a171a7f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1297,6 +1297,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->hardirq_context = 0; p->softirq_context = 0; #endif + p->pagefault_disabled = 0; #ifdef CONFIG_LOCKDEP p->lockdep_depth = 0; /* no locks held yet */ p->curr_chain_key = 0; diff --git a/mm/memory.c b/mm/memory.c index 22e67a2..7ad7e6d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3742,6 +3742,35 @@ unlock: return 0; } +void pagefault_disable(void) +{ + inc_preempt_count(); + current->pagefault_disabled++; + /* + * make sure to have issued the store before a pagefault + * can hit. + */ + barrier(); +} +EXPORT_SYMBOL(pagefault_disable); + +void pagefault_enable(void) +{ + /* + * make sure to issue those last loads/stores before enabling + * the pagefault handler again. + */ + barrier(); + current->pagefault_disabled--; + dec_preempt_count(); + /* + * make sure we do.. + */ + barrier(); + preempt_check_resched(); +} +EXPORT_SYMBOL(pagefault_enable); + /* * By the time we get here, we already hold the mm semaphore */ -- cgit v0.10.2 From e2d5d7daf459acde7c718ba4f30bbb00a66c5c1d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 17 Mar 2011 11:32:28 +0100 Subject: mm: Fixup all fault handlers to check current->pagefault_disable Necessary for decoupling pagefault disable from preempt count. Signed-off-by: Thomas Gleixner diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 98838a0..4d6b120 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -107,7 +107,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, /* If we're in an interrupt context, or have no user context, we must not take the fault. */ - if (!mm || in_atomic()) + if (!mm || in_atomic() || current->pagefault_disabled) goto no_context; #ifdef CONFIG_ALPHA_LARGE_VMALLOC diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index eb8830a..9e92a6c 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -277,7 +277,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm) + if (in_atomic() || !mm || current->pagefault_disabled) goto no_context; if (user_mode(regs)) diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index 0eca933..541d108 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c @@ -81,7 +81,8 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) * If we're in an interrupt or have no user context, we must * not take the fault... */ - if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM)) + if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM) || + current->pagefault_disabled) goto no_context; local_irq_enable(); diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c index 1790f22..69fe7c7 100644 --- a/arch/cris/mm/fault.c +++ b/arch/cris/mm/fault.c @@ -113,7 +113,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs, * user context, we must not take the fault. */ - if (in_atomic() || !mm) + if (in_atomic() || !mm || current->pagefault_disabled) goto no_context; if (user_mode(regs)) diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c index 9a66372..de6a23e 100644 --- a/arch/frv/mm/fault.c +++ b/arch/frv/mm/fault.c @@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm) + if (in_atomic() || !mm || current->pagefault_disabled) goto no_context; if (user_mode(__frame)) diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 7225dad..aae5478 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -96,7 +96,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re /* * If we're in an interrupt or have no user context, we must not take the fault.. */ - if (in_atomic() || !mm) + if (in_atomic() || !mm || current->pagefault_disabled) goto no_context; #ifdef CONFIG_VIRTUAL_MEM_MAP diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c index e9c6a80..d351a65 100644 --- a/arch/m32r/mm/fault.c +++ b/arch/m32r/mm/fault.c @@ -114,7 +114,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, * If we're in an interrupt or have no user context or are running in an * atomic region then we must not take the fault.. */ - if (in_atomic() || !mm) + if (in_atomic() || !mm || current->pagefault_disabled goto bad_area_nosemaphore; if (error_code & ACE_USERMODE) diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index eb1d61f..55607e3 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -85,7 +85,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm) + if (in_atomic() || !mm || current->pagefault_disabled) goto no_context; if (user_mode(regs)) diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index fa4cf52..21d96de 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -107,7 +107,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11) is_write = 0; - if (unlikely(in_atomic() || !mm)) { + if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) { if (kernel_mode(regs)) goto bad_area_nosemaphore; diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index becc42b..5494999 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -89,7 +89,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm) + if (in_atomic() || !mm || current->pagefault_disabled) goto bad_area_nosemaphore; if (user_mode(regs)) diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c index 3516cbd..4886c2d 100644 --- a/arch/mn10300/mm/fault.c +++ b/arch/mn10300/mm/fault.c @@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code, * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm) + if (in_atomic() || !mm || current->pagefault_disabled) goto no_context; if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 0293588..68b56fe 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -177,7 +177,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, int fault; unsigned int flags; - if (in_atomic()) + if (in_atomic() || current->pagefault_disabled) goto no_context; tsk = current; diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 51ab9e7..41230cd 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -261,7 +261,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, if (!arch_irq_disabled_regs(regs)) local_irq_enable(); - if (in_atomic() || mm == NULL) { + if (in_atomic() || mm == NULL || current->pagefault_disabled) { if (!user_mode(regs)) { rc = SIGSEGV; goto bail; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index fc66792..a4742e9 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -296,7 +296,8 @@ static inline int do_exception(struct pt_regs *regs, int access) * user context. */ fault = VM_FAULT_BADCONTEXT; - if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) + if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm || + tsk->pagefault_disabled)) goto out; address = trans_exc_code & __FAIL_ADDR_MASK; @@ -442,7 +443,8 @@ void __kprobes do_asce_exception(struct pt_regs *regs) clear_tsk_thread_flag(current, TIF_PER_TRAP); trans_exc_code = regs->int_parm_long; - if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) + if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm || + current->pagefault_disabled())) goto no_context; down_read(&mm->mmap_sem); diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c index 52238983..98384f7 100644 --- a/arch/score/mm/fault.c +++ b/arch/score/mm/fault.c @@ -73,7 +73,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm) + if (in_atomic() || !mm || current->pagefault_disabled) goto bad_area_nosemaphore; if (user_mode(regs)) diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 541dc61..a2aa4ff 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -438,7 +438,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, * If we're in an interrupt, have no user context or are running * in an atomic region then we must not take the fault: */ - if (unlikely(in_atomic() || !mm)) { + if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) { bad_area_nosemaphore(regs, error_code, address); return; } diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 59dbd46..7d51c05 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -199,7 +199,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm) + if (in_atomic() || !mm || current->pagefault_disabled) goto no_context; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 2ebec26..1747cde 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -322,7 +322,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm) + if (in_atomic() || !mm || current->pagefault_enabled) goto intr_or_no_mm; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index 6c05712..d28fe5a 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c @@ -357,7 +357,7 @@ static int handle_page_fault(struct pt_regs *regs, * If we're in an interrupt, have no user context or are running in an * atomic region then we must not take the fault. */ - if (in_atomic() || !mm) { + if (in_atomic() || !mm || current->pagefault_disabled) { vma = NULL; /* happy compiler */ goto bad_area_nosemaphore; } diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 5c3aef7..9e9ad49 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -38,7 +38,7 @@ int handle_page_fault(unsigned long address, unsigned long ip, * If the fault was during atomic operation, don't take the fault, just * fail. */ - if (in_atomic()) + if (in_atomic() || current->pagefault_disabled) goto out_nosemaphore; if (is_user) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 5b90bbcad9..f250a04 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1098,7 +1098,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code) * If we're in an interrupt, have no user context or are running * in an atomic region then we must not take the fault: */ - if (unlikely(in_atomic() || !mm)) { + if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) { bad_area_nosemaphore(regs, error_code, address); return; } diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index 70fa7bc..904c479 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs) /* If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm) { + if (in_atomic() || !mm || current->pagefault_disabled) { bad_page_fault(regs, address, SIGSEGV); return; } -- cgit v0.10.2 From 10e9aed2338e85748f227947969df7dd614a9301 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 11 Aug 2011 15:31:31 +0200 Subject: mm: pagefault_disabled() Wrap the test for pagefault_disabled() into a helper, this allows us to remove the need for current->pagefault_disabled on !-rt kernels. Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/n/tip-3yy517m8zsi9fpsf14xfaqkw@git.kernel.org diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 4d6b120..ee01270 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -107,7 +107,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, /* If we're in an interrupt context, or have no user context, we must not take the fault. */ - if (!mm || in_atomic() || current->pagefault_disabled) + if (!mm || pagefault_disabled()) goto no_context; #ifdef CONFIG_ALPHA_LARGE_VMALLOC diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 9e92a6c..b40d4ba 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -277,7 +277,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm || current->pagefault_disabled) + if (!mm || pagefault_disabled()) goto no_context; if (user_mode(regs)) diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index 541d108..25920d2 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c @@ -81,8 +81,7 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) * If we're in an interrupt or have no user context, we must * not take the fault... */ - if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM) || - current->pagefault_disabled) + if (!mm || regs->sr & SYSREG_BIT(GM) || pagefault_disabled()) goto no_context; local_irq_enable(); diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c index 69fe7c7..281e859 100644 --- a/arch/cris/mm/fault.c +++ b/arch/cris/mm/fault.c @@ -113,7 +113,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs, * user context, we must not take the fault. */ - if (in_atomic() || !mm || current->pagefault_disabled) + if (!mm || pagefault_disabled()) goto no_context; if (user_mode(regs)) diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c index de6a23e..8d9fc16 100644 --- a/arch/frv/mm/fault.c +++ b/arch/frv/mm/fault.c @@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm || current->pagefault_disabled) + if (!mm || pagefault_disabled()) goto no_context; if (user_mode(__frame)) diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index aae5478..164db10 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -96,7 +96,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re /* * If we're in an interrupt or have no user context, we must not take the fault.. */ - if (in_atomic() || !mm || current->pagefault_disabled) + if (!mm || pagefault_disabled()) goto no_context; #ifdef CONFIG_VIRTUAL_MEM_MAP diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c index d351a65..ccb6797 100644 --- a/arch/m32r/mm/fault.c +++ b/arch/m32r/mm/fault.c @@ -114,7 +114,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, * If we're in an interrupt or have no user context or are running in an * atomic region then we must not take the fault.. */ - if (in_atomic() || !mm || current->pagefault_disabled + if (!mm || pagefault_disabled()) goto bad_area_nosemaphore; if (error_code & ACE_USERMODE) diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 55607e3..76bee37 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -85,7 +85,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm || current->pagefault_disabled) + if (!mm || pagefault_disabled()) goto no_context; if (user_mode(regs)) diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index 21d96de..13d6b07 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -107,7 +107,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11) is_write = 0; - if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) { + if (unlikely(!mm || pagefault_disabled())) { if (kernel_mode(regs)) goto bad_area_nosemaphore; diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 5494999..6a492c0 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -89,7 +89,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm || current->pagefault_disabled) + if (!mm || pagefault_disabled()) goto bad_area_nosemaphore; if (user_mode(regs)) diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c index 4886c2d..8bd4425 100644 --- a/arch/mn10300/mm/fault.c +++ b/arch/mn10300/mm/fault.c @@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code, * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm || current->pagefault_disabled) + if (!mm || pagefault_disabled()) goto no_context; if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 68b56fe..f16f57b 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -177,7 +177,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, int fault; unsigned int flags; - if (in_atomic() || current->pagefault_disabled) + if (pagefault_disabled()) goto no_context; tsk = current; diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 41230cd..45aa26e 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -261,7 +261,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, if (!arch_irq_disabled_regs(regs)) local_irq_enable(); - if (in_atomic() || mm == NULL || current->pagefault_disabled) { + if (in_atomic() || mm == NULL || pagefault_disabled()) { if (!user_mode(regs)) { rc = SIGSEGV; goto bail; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index a4742e9..6b0efce 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -296,8 +296,8 @@ static inline int do_exception(struct pt_regs *regs, int access) * user context. */ fault = VM_FAULT_BADCONTEXT; - if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm || - tsk->pagefault_disabled)) + if (unlikely(!user_space_fault(trans_exc_code) || + !mm || pagefault_disabled())) goto out; address = trans_exc_code & __FAIL_ADDR_MASK; @@ -443,8 +443,8 @@ void __kprobes do_asce_exception(struct pt_regs *regs) clear_tsk_thread_flag(current, TIF_PER_TRAP); trans_exc_code = regs->int_parm_long; - if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm || - current->pagefault_disabled())) + if (unlikely(!user_space_fault(trans_exc_code) || !mm || + pagefault_disabled())) goto no_context; down_read(&mm->mmap_sem); diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c index 98384f7..35d339b 100644 --- a/arch/score/mm/fault.c +++ b/arch/score/mm/fault.c @@ -73,7 +73,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm || current->pagefault_disabled) + if (!mm || pagefault_disabled()) goto bad_area_nosemaphore; if (user_mode(regs)) diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index a2aa4ff..6589138 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -438,7 +438,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, * If we're in an interrupt, have no user context or are running * in an atomic region then we must not take the fault: */ - if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) { + if (unlikely(!mm || pagefault_disabled())) { bad_area_nosemaphore(regs, error_code, address); return; } diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 7d51c05..2eaca28 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -199,7 +199,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm || current->pagefault_disabled) + if (!mm || pagefault_disabled()) goto no_context; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 1747cde..a1d35e2 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -322,7 +322,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm || current->pagefault_enabled) + if (!mm || pagefault_disabled()) goto intr_or_no_mm; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index d28fe5a..40f30ac 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c @@ -357,7 +357,7 @@ static int handle_page_fault(struct pt_regs *regs, * If we're in an interrupt, have no user context or are running in an * atomic region then we must not take the fault. */ - if (in_atomic() || !mm || current->pagefault_disabled) { + if (!mm || pagefault_disabled()) { vma = NULL; /* happy compiler */ goto bad_area_nosemaphore; } diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 9e9ad49..100a278 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -38,7 +38,7 @@ int handle_page_fault(unsigned long address, unsigned long ip, * If the fault was during atomic operation, don't take the fault, just * fail. */ - if (in_atomic() || current->pagefault_disabled) + if (pagefault_disabled()) goto out_nosemaphore; if (is_user) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index f250a04..b5c8e37 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1098,7 +1098,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code) * If we're in an interrupt, have no user context or are running * in an atomic region then we must not take the fault: */ - if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) { + if (unlikely(!mm || pagefault_disabled())) { bad_area_nosemaphore(regs, error_code, address); return; } diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index 904c479..ff5c9c7 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs) /* If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm || current->pagefault_disabled) { + if (!mm || pagefault_disabled()) { bad_page_fault(regs, address, SIGSEGV); return; } diff --git a/include/linux/sched.h b/include/linux/sched.h index 5f0c31c..85556ee 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -52,6 +52,7 @@ struct sched_param { #include #include #include +#include #include @@ -1249,7 +1250,9 @@ struct task_struct { /* mutex deadlock detection */ struct mutex_waiter *blocked_on; #endif +#ifdef CONFIG_PREEMPT_RT_FULL int pagefault_disabled; +#endif #ifdef CONFIG_TRACE_IRQFLAGS unsigned int irq_events; unsigned long hardirq_enable_ip; @@ -1435,6 +1438,17 @@ static inline void set_numabalancing_state(bool enabled) } #endif +#ifdef CONFIG_PREEMPT_RT_FULL +static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; } +#else +static inline bool cur_pf_disabled(void) { return false; } +#endif + +static inline bool pagefault_disabled(void) +{ + return in_atomic() || cur_pf_disabled(); +} + static inline struct pid *task_pid(struct task_struct *task) { return task->pids[PIDTYPE_PID].pid; diff --git a/kernel/fork.c b/kernel/fork.c index a171a7f..afe158e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1297,7 +1297,9 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->hardirq_context = 0; p->softirq_context = 0; #endif +#ifdef CONFIG_PREEMPT_RT_FULL p->pagefault_disabled = 0; +#endif #ifdef CONFIG_LOCKDEP p->lockdep_depth = 0; /* no locks held yet */ p->curr_chain_key = 0; -- cgit v0.10.2 From 83c263774996f1c363f49a5dc211bbc98b18157d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 5 Aug 2011 17:16:58 +0200 Subject: mm: raw_pagefault_disable Adding migrate_disable() to pagefault_disable() to preserve the per-cpu thing for kmap_atomic might not have been the best of choices. But short of adding preempt_disable/migrate_disable foo all over the kmap code it still seems the best way. It does however yield the below borkage as well as wreck !-rt builds since !-rt does rely on pagefault_disable() not preempting. So fix all that up by adding raw_pagefault_disable(). [] warn_slowpath_common+0x85/0x9d [] warn_slowpath_fmt+0x46/0x48 [] ? _raw_spin_lock+0x6c/0x73 [] ? watchdog_overflow_callback+0x9b/0xd0 [] watchdog_overflow_callback+0xb7/0xd0 [] __perf_event_overflow+0x11c/0x1fe [] ? perf_event_update_userpage+0x149/0x151 [] ? perf_event_task_disable+0x7c/0x7c [] perf_event_overflow+0x14/0x16 [] x86_pmu_handle_irq+0xcb/0x108 [] perf_event_nmi_handler+0x46/0x91 [] notifier_call_chain+0x79/0xa6 [] __atomic_notifier_call_chain+0x66/0x98 [] ? notifier_call_chain+0xa6/0xa6 [] atomic_notifier_call_chain+0x14/0x16 [] notify_die+0x2e/0x30 [] do_nmi+0x7e/0x22b [] nmi+0x1a/0x2c [] ? sub_preempt_count+0x4b/0xaa <> [] delay_tsc+0xac/0xd1 [] __delay+0xf/0x11 [] do_raw_spin_lock+0xd2/0x13c [] _raw_spin_lock_irqsave+0x6b/0x85 [] ? task_rq_lock+0x35/0x8d [] task_rq_lock+0x35/0x8d [] migrate_disable+0x65/0x12c [] pagefault_disable+0xe/0x1f [] dump_trace+0x21f/0x2e2 [] show_trace_log_lvl+0x54/0x5d [] show_trace+0x15/0x17 [] dump_stack+0x77/0x80 [] spin_bug+0x9c/0xa3 [] ? task_rq_lock+0x50/0x8d [] do_raw_spin_lock+0x47/0x13c [] _raw_spin_lock+0x60/0x73 [] ? task_rq_lock+0x50/0x8d [] task_rq_lock+0x50/0x8d [] migrate_disable+0x65/0x12c [] pagefault_disable+0xe/0x1f [] dump_trace+0x21f/0x2e2 [] save_stack_trace+0x2f/0x4c [] save_trace+0x3f/0xaf [] mark_lock+0x228/0x530 [] __lock_acquire+0x662/0x1812 [] ? native_sched_clock+0x37/0x6d [] ? trace_hardirqs_off_caller+0x1f/0x99 [] ? sched_rt_period_timer+0xbd/0x218 [] lock_acquire+0x145/0x18a [] ? sched_rt_period_timer+0xbd/0x218 [] _raw_spin_lock+0x40/0x73 [] ? sched_rt_period_timer+0xbd/0x218 [] sched_rt_period_timer+0xbd/0x218 [] __run_hrtimer+0x1e4/0x347 [] ? can_migrate_task.clone.82+0x14a/0x14a [] hrtimer_interrupt+0xee/0x1d6 [] ? add_preempt_count+0xae/0xb2 [] smp_apic_timer_interrupt+0x85/0x98 [] apic_timer_interrupt+0x13/0x20 Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/n/tip-31keae8mkjiv8esq4rl76cib@git.kernel.org diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 9414a1b..44b3751 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -8,8 +8,34 @@ * These routines enable/disable the pagefault handler in that * it will not take any MM locks and go straight to the fixup table. */ +static inline void raw_pagefault_disable(void) +{ + inc_preempt_count(); + barrier(); +} + +static inline void raw_pagefault_enable(void) +{ + barrier(); + dec_preempt_count(); + barrier(); + preempt_check_resched(); +} + +#ifndef CONFIG_PREEMPT_RT_FULL +static inline void pagefault_disable(void) +{ + raw_pagefault_disable(); +} + +static inline void pagefault_enable(void) +{ + raw_pagefault_enable(); +} +#else extern void pagefault_disable(void); extern void pagefault_enable(void); +#endif #ifndef ARCH_HAS_NOCACHE_UACCESS @@ -50,9 +76,9 @@ static inline unsigned long __copy_from_user_nocache(void *to, mm_segment_t old_fs = get_fs(); \ \ set_fs(KERNEL_DS); \ - pagefault_disable(); \ + raw_pagefault_disable(); \ ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \ - pagefault_enable(); \ + raw_pagefault_enable(); \ set_fs(old_fs); \ ret; \ }) diff --git a/mm/memory.c b/mm/memory.c index 7ad7e6d..efe0d86 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3742,6 +3742,7 @@ unlock: return 0; } +#ifdef CONFIG_PREEMPT_RT_FULL void pagefault_disable(void) { inc_preempt_count(); @@ -3770,6 +3771,7 @@ void pagefault_enable(void) preempt_check_resched(); } EXPORT_SYMBOL(pagefault_enable); +#endif /* * By the time we get here, we already hold the mm semaphore -- cgit v0.10.2 From 42a23ca0e54f39d71e84539203189430aa1f1726 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 17 Jun 2011 18:56:24 +0200 Subject: filemap-fix-up.patch Cc: Peter Zijlstra Signed-off-by: Thomas Gleixner Link: http://lkml.kernel.org/n/tip-m6yuzd6ul717hlnl2gj6p3ou@git.kernel.org diff --git a/mm/filemap.c b/mm/filemap.c index ae4846f..3d2d39a 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1976,7 +1976,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, char *kaddr; size_t copied; - BUG_ON(!in_atomic()); + BUG_ON(!pagefault_disabled()); kaddr = kmap_atomic(page); if (likely(i->nr_segs == 1)) { int left; -- cgit v0.10.2 From 875e6cc6106b93960edc0241748315a3fcde6b66 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 25 Jul 2009 22:06:27 +0200 Subject: mm: Remove preempt count from pagefault disable/enable Now that all users are cleaned up, we can remove the preemption count. Signed-off-by: Thomas Gleixner diff --git a/mm/memory.c b/mm/memory.c index efe0d86..f342221 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3745,7 +3745,6 @@ unlock: #ifdef CONFIG_PREEMPT_RT_FULL void pagefault_disable(void) { - inc_preempt_count(); current->pagefault_disabled++; /* * make sure to have issued the store before a pagefault @@ -3763,12 +3762,6 @@ void pagefault_enable(void) */ barrier(); current->pagefault_disabled--; - dec_preempt_count(); - /* - * make sure we do.. - */ - barrier(); - preempt_check_resched(); } EXPORT_SYMBOL(pagefault_enable); #endif -- cgit v0.10.2 From 9761c77c68f2aa54d23a1e76c73667af330e8379 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 15 Jul 2010 10:29:00 +0200 Subject: suspend: Prevent might sleep splats timekeeping suspend/resume calls read_persistant_clock() which takes rtc_lock. That results in might sleep warnings because at that point we run with interrupts disabled. We cannot convert rtc_lock to a raw spinlock as that would trigger other might sleep warnings. As a temporary workaround we disable the might sleep warnings by setting system_state to SYSTEM_SUSPEND before calling sysdev_suspend() and restoring it to SYSTEM_RUNNING afer sysdev_resume(). Needs to be revisited. Signed-off-by: Thomas Gleixner diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 672ddc4..c34e608 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -412,6 +412,7 @@ extern enum system_states { SYSTEM_HALT, SYSTEM_POWER_OFF, SYSTEM_RESTART, + SYSTEM_SUSPEND, } system_state; #define TAINT_PROPRIETARY_MODULE 0 diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 0121dab..d26958b 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -275,6 +275,8 @@ static int create_image(int platform_mode) local_irq_disable(); + system_state = SYSTEM_SUSPEND; + error = syscore_suspend(); if (error) { printk(KERN_ERR "PM: Some system devices failed to power down, " @@ -302,6 +304,7 @@ static int create_image(int platform_mode) syscore_resume(); Enable_irqs: + system_state = SYSTEM_RUNNING; local_irq_enable(); Enable_cpus: @@ -427,6 +430,7 @@ static int resume_target_kernel(bool platform_mode) goto Enable_cpus; local_irq_disable(); + system_state = SYSTEM_SUSPEND; error = syscore_suspend(); if (error) @@ -460,6 +464,7 @@ static int resume_target_kernel(bool platform_mode) syscore_resume(); Enable_irqs: + system_state = SYSTEM_RUNNING; local_irq_enable(); Enable_cpus: @@ -542,6 +547,7 @@ int hibernation_platform_enter(void) goto Platform_finish; local_irq_disable(); + system_state = SYSTEM_SUSPEND; syscore_suspend(); if (pm_wakeup_pending()) { error = -EAGAIN; @@ -554,6 +560,7 @@ int hibernation_platform_enter(void) Power_up: syscore_resume(); + system_state = SYSTEM_RUNNING; local_irq_enable(); enable_nonboot_cpus(); diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 62ee437..e6703bb 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -218,6 +218,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) arch_suspend_disable_irqs(); BUG_ON(!irqs_disabled()); + system_state = SYSTEM_SUSPEND; + error = syscore_suspend(); if (!error) { *wakeup = pm_wakeup_pending(); @@ -228,6 +230,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) syscore_resume(); } + system_state = SYSTEM_RUNNING; + arch_suspend_enable_irqs(); BUG_ON(irqs_disabled()); -- cgit v0.10.2 From 854e577fc4e1746cb3ed7158cbd76bdc8739c73b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 21 Jun 2011 11:22:36 +0200 Subject: list-add-list-last-entry.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/list.h b/include/linux/list.h index f4d8a2f..885943e 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -373,6 +373,17 @@ static inline void list_splice_tail_init(struct list_head *list, (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) /** + * list_last_entry - get the last element from a list + * @ptr: the list head to take the element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + * + * Note, that list is expected to be not empty. + */ +#define list_last_entry(ptr, type, member) \ + list_entry((ptr)->prev, type, member) + +/** * list_for_each - iterate over a list * @pos: the &struct list_head to use as a loop cursor. * @head: the head for your list. -- cgit v0.10.2 From d92740e1f41209a45bfa7fdaae1444f42b0f4091 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 21 Jun 2011 11:24:35 +0200 Subject: mm-page-alloc-use-list-last-entry.patch Signed-off-by: Thomas Gleixner diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 06f8479..2bcb648 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -678,7 +678,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, do { int mt; /* migratetype of the to-be-freed page */ - page = list_entry(list->prev, struct page, lru); + page = list_last_entry(list, struct page, lru); /* must delete as __free_one_page list manipulates */ list_del(&page->lru); mt = get_freepage_migratetype(page); -- cgit v0.10.2 From ae82594ce0f27e8b20c74f79a140d4f9b2094cd1 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 15 Jul 2011 21:24:27 +0200 Subject: rwsem-inlcude-fix.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/pid.h b/include/linux/pid.h index 23705a5..2cc64b7 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -2,6 +2,7 @@ #define _LINUX_PID_H #include +#include enum pid_type { -- cgit v0.10.2 From 46f1329b1ac176ff892efde06b7295150ac7c9c7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 14 Nov 2011 10:52:34 +0100 Subject: sysctl-include-fix.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 14a8ff2..b15655f 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -25,6 +25,7 @@ #include #include #include +#include #include /* For the /proc/sys support */ -- cgit v0.10.2 From 53f872b981b8bb687b16770754d178712a1c4e89 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 28 Jun 2011 10:59:58 +0200 Subject: net-flip-lock-dep-thingy.patch ======================================================= [ INFO: possible circular locking dependency detected ] 3.0.0-rc3+ #26 ------------------------------------------------------- ip/1104 is trying to acquire lock: (local_softirq_lock){+.+...}, at: [] __local_lock+0x25/0x68 but task is already holding lock: (sk_lock-AF_INET){+.+...}, at: [] lock_sock+0x10/0x12 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (sk_lock-AF_INET){+.+...}: [] lock_acquire+0x103/0x12e [] lock_sock_nested+0x82/0x92 [] lock_sock+0x10/0x12 [] tcp_close+0x1b/0x355 [] inet_release+0xc3/0xcd [] sock_release+0x1f/0x74 [] sock_close+0x27/0x2b [] fput+0x11d/0x1e3 [] filp_close+0x70/0x7b [] sys_close+0xf8/0x13d [] system_call_fastpath+0x16/0x1b -> #0 (local_softirq_lock){+.+...}: [] __lock_acquire+0xacc/0xdc8 [] lock_acquire+0x103/0x12e [] _raw_spin_lock+0x3b/0x4a [] __local_lock+0x25/0x68 [] local_bh_disable+0x36/0x3b [] _raw_write_lock_bh+0x16/0x4f [] tcp_close+0x159/0x355 [] inet_release+0xc3/0xcd [] sock_release+0x1f/0x74 [] sock_close+0x27/0x2b [] fput+0x11d/0x1e3 [] filp_close+0x70/0x7b [] sys_close+0xf8/0x13d [] system_call_fastpath+0x16/0x1b other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(sk_lock-AF_INET); lock(local_softirq_lock); lock(sk_lock-AF_INET); lock(local_softirq_lock); *** DEADLOCK *** 1 lock held by ip/1104: #0: (sk_lock-AF_INET){+.+...}, at: [] lock_sock+0x10/0x12 stack backtrace: Pid: 1104, comm: ip Not tainted 3.0.0-rc3+ #26 Call Trace: [] print_circular_bug+0x1f8/0x209 [] __lock_acquire+0xacc/0xdc8 [] ? __local_lock+0x25/0x68 [] lock_acquire+0x103/0x12e [] ? __local_lock+0x25/0x68 [] ? get_parent_ip+0x11/0x41 [] _raw_spin_lock+0x3b/0x4a [] ? __local_lock+0x25/0x68 [] ? get_parent_ip+0x28/0x41 [] __local_lock+0x25/0x68 [] local_bh_disable+0x36/0x3b [] ? lock_sock+0x10/0x12 [] _raw_write_lock_bh+0x16/0x4f [] tcp_close+0x159/0x355 [] inet_release+0xc3/0xcd [] sock_release+0x1f/0x74 [] sock_close+0x27/0x2b [] fput+0x11d/0x1e3 [] filp_close+0x70/0x7b [] sys_close+0xf8/0x13d [] system_call_fastpath+0x16/0x1b Signed-off-by: Thomas Gleixner diff --git a/net/core/sock.c b/net/core/sock.c index 831a0d0..e57770c 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2339,12 +2339,11 @@ void lock_sock_nested(struct sock *sk, int subclass) if (sk->sk_lock.owned) __lock_sock(sk); sk->sk_lock.owned = 1; - spin_unlock(&sk->sk_lock.slock); + spin_unlock_bh(&sk->sk_lock.slock); /* * The sk_lock has mutex_lock() semantics here: */ mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); - local_bh_enable(); } EXPORT_SYMBOL(lock_sock_nested); -- cgit v0.10.2 From ecddaceecbbe4bf4d081e8ee25b11a60a9fd6c59 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Wed, 5 Mar 2014 00:49:47 +0100 Subject: net: sched: dev_deactivate_many(): use msleep(1) instead of yield() to wait for outstanding qdisc_run calls On PREEMPT_RT enabled systems the interrupt handler run as threads at prio 50 (by default). If a high priority userspace process tries to shut down a busy network interface it might spin in a yield loop waiting for the device to become idle. With the interrupt thread having a lower priority than the looping process it might never be scheduled and so result in a deadlock on UP systems. With Magic SysRq the following backtrace can be produced: > test_app R running 0 174 168 0x00000000 > [] (__schedule+0x220/0x3fc) from [] (preempt_schedule_irq+0x48/0x80) > [] (preempt_schedule_irq+0x48/0x80) from [] (svc_preempt+0x8/0x20) > [] (svc_preempt+0x8/0x20) from [] (local_bh_enable+0x18/0x88) > [] (local_bh_enable+0x18/0x88) from [] (dev_deactivate_many+0x220/0x264) > [] (dev_deactivate_many+0x220/0x264) from [] (__dev_close_many+0x64/0xd4) > [] (__dev_close_many+0x64/0xd4) from [] (__dev_close+0x28/0x3c) > [] (__dev_close+0x28/0x3c) from [] (__dev_change_flags+0x88/0x130) > [] (__dev_change_flags+0x88/0x130) from [] (dev_change_flags+0x10/0x48) > [] (dev_change_flags+0x10/0x48) from [] (do_setlink+0x370/0x7ec) > [] (do_setlink+0x370/0x7ec) from [] (rtnl_newlink+0x2b4/0x450) > [] (rtnl_newlink+0x2b4/0x450) from [] (rtnetlink_rcv_msg+0x158/0x1f4) > [] (rtnetlink_rcv_msg+0x158/0x1f4) from [] (netlink_rcv_skb+0xac/0xc0) > [] (netlink_rcv_skb+0xac/0xc0) from [] (rtnetlink_rcv+0x18/0x24) > [] (rtnetlink_rcv+0x18/0x24) from [] (netlink_unicast+0x13c/0x198) > [] (netlink_unicast+0x13c/0x198) from [] (netlink_sendmsg+0x264/0x2e0) > [] (netlink_sendmsg+0x264/0x2e0) from [] (sock_sendmsg+0x78/0x98) > [] (sock_sendmsg+0x78/0x98) from [] (___sys_sendmsg.part.25+0x268/0x278) > [] (___sys_sendmsg.part.25+0x268/0x278) from [] (__sys_sendmsg+0x48/0x78) > [] (__sys_sendmsg+0x48/0x78) from [] (ret_fast_syscall+0x0/0x2c) This patch works around the problem by replacing yield() by msleep(1), giving the interrupt thread time to finish, similar to other changes contained in the rt patch set. Using wait_for_completion() instead would probably be a better solution. Cc: stable-rt@vger.kernel.org Signed-off-by: Marc Kleine-Budde Signed-off-by: Sebastian Andrzej Siewior diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index a74e278..1dcebc9 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -850,7 +850,7 @@ void dev_deactivate_many(struct list_head *head) /* Wait for outstanding qdisc_run calls. */ list_for_each_entry(dev, head, unreg_list) while (some_qdisc_is_busy(dev)) - yield(); + msleep(1); } void dev_deactivate(struct net_device *dev) -- cgit v0.10.2 From 464a53236299da8574baaa31e965dceba1f45c2b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 28 Jun 2011 15:44:15 +0200 Subject: softirq-thread-do-softirq.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 5e865b5..1f6367a 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -374,6 +374,7 @@ struct softirq_action asmlinkage void do_softirq(void); asmlinkage void __do_softirq(void); +static inline void thread_do_softirq(void) { do_softirq(); } extern void open_softirq(int nr, void (*action)(struct softirq_action *)); extern void softirq_init(void); extern void __raise_softirq_irqoff(unsigned int nr); diff --git a/net/core/dev.c b/net/core/dev.c index b327975..f9a892b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3270,7 +3270,7 @@ int netif_rx_ni(struct sk_buff *skb) preempt_disable(); err = netif_rx(skb); if (local_softirq_pending()) - do_softirq(); + thread_do_softirq(); preempt_enable(); return err; -- cgit v0.10.2 From f8fd59c989224650313ad7569dde4edb6ad3129a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 28 Jun 2011 15:46:49 +0200 Subject: softirq-split-out-code.patch Signed-off-by: Thomas Gleixner diff --git a/kernel/softirq.c b/kernel/softirq.c index 52f6c54..c647956 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -77,6 +77,34 @@ static void wakeup_softirqd(void) wake_up_process(tsk); } +static void handle_pending_softirqs(u32 pending, int cpu) +{ + struct softirq_action *h = softirq_vec; + unsigned int prev_count = preempt_count(); + + local_irq_enable(); + for (; pending; h++, pending >>= 1) { + unsigned int vec_nr = h - softirq_vec; + + if (!(pending & 1)) + continue; + + kstat_incr_softirqs_this_cpu(vec_nr); + trace_softirq_entry(vec_nr); + h->action(h); + trace_softirq_exit(vec_nr); + if (unlikely(prev_count != preempt_count())) { + pr_err( +"huh, entered softirq %u %s %p with preempt_count %08x exited with %08x?\n", + vec_nr, softirq_to_name[vec_nr], h->action, + prev_count, (unsigned int) preempt_count()); + preempt_count() = prev_count; + } + rcu_bh_qs(cpu); + } + local_irq_disable(); +} + /* * preempt_count and SOFTIRQ_OFFSET usage: * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving @@ -252,7 +280,6 @@ asmlinkage void __do_softirq(void) unsigned long end = jiffies + MAX_SOFTIRQ_TIME; unsigned long old_flags = current->flags; int max_restart = MAX_SOFTIRQ_RESTART; - struct softirq_action *h; __u32 pending; int cpu; @@ -274,36 +301,7 @@ restart: /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); - local_irq_enable(); - - h = softirq_vec; - - do { - if (pending & 1) { - unsigned int vec_nr = h - softirq_vec; - int prev_count = preempt_count(); - - kstat_incr_softirqs_this_cpu(vec_nr); - - trace_softirq_entry(vec_nr); - h->action(h); - trace_softirq_exit(vec_nr); - if (unlikely(prev_count != preempt_count())) { - printk(KERN_ERR "huh, entered softirq %u %s %p" - "with preempt_count %08x," - " exited with %08x?\n", vec_nr, - softirq_to_name[vec_nr], h->action, - prev_count, preempt_count()); - preempt_count() = prev_count; - } - - rcu_bh_qs(cpu); - } - h++; - pending >>= 1; - } while (pending); - - local_irq_disable(); + handle_pending_softirqs(pending, cpu); pending = local_softirq_pending(); if (pending) { -- cgit v0.10.2 From 8a04f7be7702e31db4eb061f4186476e3625ac65 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:27 -0500 Subject: x86: Do not unmask io_apic when interrupt is in progress With threaded interrupts we might see an interrupt in progress on migration. Do not unmask it when this is the case. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index e63a5bd..5816e6a 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2396,7 +2396,8 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg) static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg) { /* If we are moving the irq we need to mask it */ - if (unlikely(irqd_is_setaffinity_pending(data))) { + if (unlikely(irqd_is_setaffinity_pending(data) && + !irqd_irq_inprogress(data))) { mask_ioapic(cfg); return true; } -- cgit v0.10.2 From 47dc7448154ce49de9f8dc663208b94ba4729218 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 9 Apr 2014 19:18:44 -0500 Subject: x86: Do not disable preemption in int3 on 32bit Preemption must be disabled before enabling interrupts in do_trap on x86_64 because the stack in use for int3 and debug is a per CPU stack set by th IST. But 32bit does not have an IST and the stack still belongs to the current task and there is no problem in scheduling out the task. Keep preemption enabled on X86_32 when enabling interrupts for do_trap(). The name of the function is changed from preempt_conditional_sti/cli() to conditional_sti/cli_ist(), to annotate that this function is used when the stack is on the IST. Cc: stable-rt@vger.kernel.org Signed-off-by: Steven Rostedt Signed-off-by: Thomas Gleixner diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 8c8093b..6663bb5 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -86,9 +86,21 @@ static inline void conditional_sti(struct pt_regs *regs) local_irq_enable(); } -static inline void preempt_conditional_sti(struct pt_regs *regs) +static inline void conditional_sti_ist(struct pt_regs *regs) { +#ifdef CONFIG_X86_64 + /* + * X86_64 uses a per CPU stack on the IST for certain traps + * like int3. The task can not be preempted when using one + * of these stacks, thus preemption must be disabled, otherwise + * the stack can be corrupted if the task is scheduled out, + * and another task comes in and uses this stack. + * + * On x86_32 the task keeps its own stack and it is OK if the + * task schedules out. + */ inc_preempt_count(); +#endif if (regs->flags & X86_EFLAGS_IF) local_irq_enable(); } @@ -99,11 +111,13 @@ static inline void conditional_cli(struct pt_regs *regs) local_irq_disable(); } -static inline void preempt_conditional_cli(struct pt_regs *regs) +static inline void conditional_cli_ist(struct pt_regs *regs) { if (regs->flags & X86_EFLAGS_IF) local_irq_disable(); +#ifdef CONFIG_X86_64 dec_preempt_count(); +#endif } static int __kprobes @@ -236,9 +250,9 @@ dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) prev_state = exception_enter(); if (notify_die(DIE_TRAP, "stack segment", regs, error_code, X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) { - preempt_conditional_sti(regs); + conditional_sti_ist(regs); do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); - preempt_conditional_cli(regs); + conditional_cli_ist(regs); } exception_exit(prev_state); } @@ -347,9 +361,9 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co * as we may switch to the interrupt stack. */ debug_stack_usage_inc(); - preempt_conditional_sti(regs); + conditional_sti_ist(regs); do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); - preempt_conditional_cli(regs); + conditional_cli_ist(regs); debug_stack_usage_dec(); exit: exception_exit(prev_state); @@ -455,12 +469,12 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) debug_stack_usage_inc(); /* It's safe to allow irq's after DR6 has been saved */ - preempt_conditional_sti(regs); + conditional_sti_ist(regs); if (regs->flags & X86_VM_MASK) { handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, X86_TRAP_DB); - preempt_conditional_cli(regs); + conditional_cli_ist(regs); debug_stack_usage_dec(); goto exit; } @@ -480,7 +494,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) si_code = get_si_code(tsk->thread.debugreg6); if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) send_sigtrap(tsk, regs, error_code, si_code); - preempt_conditional_cli(regs); + conditional_cli_ist(regs); debug_stack_usage_dec(); exit: -- cgit v0.10.2 From 9e2dba555698b6334ebef35d6803d6e2eab65810 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 1 Dec 2011 00:07:16 +0100 Subject: pci: Use __wake_up_all_locked pci_unblock_user_cfg_access() The waitqueue is protected by the pci_lock, so we can just avoid to lock the waitqueue lock itself. That prevents the might_sleep()/scheduling while atomic problem on RT Signed-off-by: Thomas Gleixner Cc: stable-rt@vger.kernel.org diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 0857ca9..8bb624e 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -465,7 +465,7 @@ void pci_cfg_access_unlock(struct pci_dev *dev) WARN_ON(!dev->block_cfg_access); dev->block_cfg_access = 0; - wake_up_all(&pci_cfg_wait); + wake_up_all_locked(&pci_cfg_wait); raw_spin_unlock_irqrestore(&pci_lock, flags); } EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); -- cgit v0.10.2 From ca629a5e1742e029d594ce89a3962bd4d1ce39e4 Mon Sep 17 00:00:00 2001 From: Carsten Emde Date: Tue, 19 Jul 2011 14:03:41 +0100 Subject: latency-hist.patch This patch provides a recording mechanism to store data of potential sources of system latencies. The recordings separately determine the latency caused by a delayed timer expiration, by a delayed wakeup of the related user space program and by the sum of both. The histograms can be enabled and reset individually. The data are accessible via the debug filesystem. For details please consult Documentation/trace/histograms.txt. Signed-off-by: Carsten Emde Signed-off-by: Thomas Gleixner diff --git a/Documentation/trace/histograms.txt b/Documentation/trace/histograms.txt new file mode 100644 index 0000000..6f2aeab --- /dev/null +++ b/Documentation/trace/histograms.txt @@ -0,0 +1,186 @@ + Using the Linux Kernel Latency Histograms + + +This document gives a short explanation how to enable, configure and use +latency histograms. Latency histograms are primarily relevant in the +context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT) +and are used in the quality management of the Linux real-time +capabilities. + + +* Purpose of latency histograms + +A latency histogram continuously accumulates the frequencies of latency +data. There are two types of histograms +- potential sources of latencies +- effective latencies + + +* Potential sources of latencies + +Potential sources of latencies are code segments where interrupts, +preemption or both are disabled (aka critical sections). To create +histograms of potential sources of latency, the kernel stores the time +stamp at the start of a critical section, determines the time elapsed +when the end of the section is reached, and increments the frequency +counter of that latency value - irrespective of whether any concurrently +running process is affected by latency or not. +- Configuration items (in the Kernel hacking/Tracers submenu) + CONFIG_INTERRUPT_OFF_LATENCY + CONFIG_PREEMPT_OFF_LATENCY + + +* Effective latencies + +Effective latencies are actually occuring during wakeup of a process. To +determine effective latencies, the kernel stores the time stamp when a +process is scheduled to be woken up, and determines the duration of the +wakeup time shortly before control is passed over to this process. Note +that the apparent latency in user space may be somewhat longer, since the +process may be interrupted after control is passed over to it but before +the execution in user space takes place. Simply measuring the interval +between enqueuing and wakeup may also not appropriate in cases when a +process is scheduled as a result of a timer expiration. The timer may have +missed its deadline, e.g. due to disabled interrupts, but this latency +would not be registered. Therefore, the offsets of missed timers are +recorded in a separate histogram. If both wakeup latency and missed timer +offsets are configured and enabled, a third histogram may be enabled that +records the overall latency as a sum of the timer latency, if any, and the +wakeup latency. This histogram is called "timerandwakeup". +- Configuration items (in the Kernel hacking/Tracers submenu) + CONFIG_WAKEUP_LATENCY + CONFIG_MISSED_TIMER_OFSETS + + +* Usage + +The interface to the administration of the latency histograms is located +in the debugfs file system. To mount it, either enter + +mount -t sysfs nodev /sys +mount -t debugfs nodev /sys/kernel/debug + +from shell command line level, or add + +nodev /sys sysfs defaults 0 0 +nodev /sys/kernel/debug debugfs defaults 0 0 + +to the file /etc/fstab. All latency histogram related files are then +available in the directory /sys/kernel/debug/tracing/latency_hist. A +particular histogram type is enabled by writing non-zero to the related +variable in the /sys/kernel/debug/tracing/latency_hist/enable directory. +Select "preemptirqsoff" for the histograms of potential sources of +latencies and "wakeup" for histograms of effective latencies etc. The +histogram data - one per CPU - are available in the files + +/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx +/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx +/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx +/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx +/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx +/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx +/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx + +The histograms are reset by writing non-zero to the file "reset" in a +particular latency directory. To reset all latency data, use + +#!/bin/sh + +TRACINGDIR=/sys/kernel/debug/tracing +HISTDIR=$TRACINGDIR/latency_hist + +if test -d $HISTDIR +then + cd $HISTDIR + for i in `find . | grep /reset$` + do + echo 1 >$i + done +fi + + +* Data format + +Latency data are stored with a resolution of one microsecond. The +maximum latency is 10,240 microseconds. The data are only valid, if the +overflow register is empty. Every output line contains the latency in +microseconds in the first row and the number of samples in the second +row. To display only lines with a positive latency count, use, for +example, + +grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0 + +#Minimum latency: 0 microseconds. +#Average latency: 0 microseconds. +#Maximum latency: 25 microseconds. +#Total samples: 3104770694 +#There are 0 samples greater or equal than 10240 microseconds +#usecs samples + 0 2984486876 + 1 49843506 + 2 58219047 + 3 5348126 + 4 2187960 + 5 3388262 + 6 959289 + 7 208294 + 8 40420 + 9 4485 + 10 14918 + 11 18340 + 12 25052 + 13 19455 + 14 5602 + 15 969 + 16 47 + 17 18 + 18 14 + 19 1 + 20 3 + 21 2 + 22 5 + 23 2 + 25 1 + + +* Wakeup latency of a selected process + +To only collect wakeup latency data of a particular process, write the +PID of the requested process to + +/sys/kernel/debug/tracing/latency_hist/wakeup/pid + +PIDs are not considered, if this variable is set to 0. + + +* Details of the process with the highest wakeup latency so far + +Selected data of the process that suffered from the highest wakeup +latency that occurred in a particular CPU are available in the file + +/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx. + +In addition, other relevant system data at the time when the +latency occurred are given. + +The format of the data is (all in one line): + () \ +<- + +The value of is only relevant in the combined timer +and wakeup latency recording. In the wakeup recording, it is +always 0, in the missed_timer_offsets recording, it is the same +as . + +When retrospectively searching for the origin of a latency and +tracing was not enabled, it may be helpful to know the name and +some basic data of the task that (finally) was switching to the +late real-tlme task. In addition to the victim's data, also the +data of the possible culprit are therefore displayed after the +"<-" symbol. + +Finally, the timestamp of the time when the latency occurred +in . after the most recent system boot +is provided. + +These data are also reset when the wakeup histogram is reset. diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index d19a5c2..f00fcfc 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -111,6 +111,9 @@ struct hrtimer { enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; unsigned long state; +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + ktime_t praecox; +#endif #ifdef CONFIG_TIMER_STATS int start_pid; void *start_site; diff --git a/include/linux/sched.h b/include/linux/sched.h index 85556ee..705fbb0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1398,6 +1398,12 @@ struct task_struct { unsigned long trace; /* bitmask and counter of trace recursion */ unsigned long trace_recursion; +#ifdef CONFIG_WAKEUP_LATENCY_HIST + u64 preempt_timestamp_hist; +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + long timer_offset; +#endif +#endif #endif /* CONFIG_TRACING */ #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */ struct memcg_batch_info { diff --git a/include/trace/events/hist.h b/include/trace/events/hist.h new file mode 100644 index 0000000..6122e42 --- /dev/null +++ b/include/trace/events/hist.h @@ -0,0 +1,72 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM hist + +#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_HIST_H + +#include "latency_hist.h" +#include + +#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST) +#define trace_preemptirqsoff_hist(a, b) +#else +TRACE_EVENT(preemptirqsoff_hist, + + TP_PROTO(int reason, int starthist), + + TP_ARGS(reason, starthist), + + TP_STRUCT__entry( + __field(int, reason) + __field(int, starthist) + ), + + TP_fast_assign( + __entry->reason = reason; + __entry->starthist = starthist; + ), + + TP_printk("reason=%s starthist=%s", getaction(__entry->reason), + __entry->starthist ? "start" : "stop") +); +#endif + +#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST +#define trace_hrtimer_interrupt(a, b, c, d) +#else +TRACE_EVENT(hrtimer_interrupt, + + TP_PROTO(int cpu, long long offset, struct task_struct *curr, + struct task_struct *task), + + TP_ARGS(cpu, offset, curr, task), + + TP_STRUCT__entry( + __field(int, cpu) + __field(long long, offset) + __array(char, ccomm, TASK_COMM_LEN) + __field(int, cprio) + __array(char, tcomm, TASK_COMM_LEN) + __field(int, tprio) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->offset = offset; + memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN); + __entry->cprio = curr->prio; + memcpy(__entry->tcomm, task != NULL ? task->comm : "", + task != NULL ? TASK_COMM_LEN : 7); + __entry->tprio = task != NULL ? task->prio : -1; + ), + + TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]", + __entry->cpu, __entry->offset, __entry->ccomm, + __entry->cprio, __entry->tcomm, __entry->tprio) +); +#endif + +#endif /* _TRACE_HIST_H */ + +/* This part must be outside protection */ +#include diff --git a/include/trace/events/latency_hist.h b/include/trace/events/latency_hist.h new file mode 100644 index 0000000..d3f2fbd --- /dev/null +++ b/include/trace/events/latency_hist.h @@ -0,0 +1,29 @@ +#ifndef _LATENCY_HIST_H +#define _LATENCY_HIST_H + +enum hist_action { + IRQS_ON, + PREEMPT_ON, + TRACE_STOP, + IRQS_OFF, + PREEMPT_OFF, + TRACE_START, +}; + +static char *actions[] = { + "IRQS_ON", + "PREEMPT_ON", + "TRACE_STOP", + "IRQS_OFF", + "PREEMPT_OFF", + "TRACE_START", +}; + +static inline char *getaction(int action) +{ + if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0])) + return actions[action]; + return "unknown"; +} + +#endif /* _LATENCY_HIST_H */ diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 383319b..7c4ee68 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -52,6 +52,7 @@ #include #include +#include /* * The timer bases: @@ -997,6 +998,17 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, #endif } +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + { + ktime_t now = new_base->get_time(); + + if (ktime_to_ns(tim) < ktime_to_ns(now)) + timer->praecox = now; + else + timer->praecox = ktime_set(0, 0); + } +#endif + hrtimer_set_expires_range_ns(timer, tim, delta_ns); timer_stats_hrtimer_set_start_info(timer); @@ -1275,6 +1287,8 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) #ifdef CONFIG_HIGH_RES_TIMERS +static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer); + /* * High resolution timer interrupt * Called with interrupts disabled @@ -1318,6 +1332,15 @@ retry: timer = container_of(node, struct hrtimer, node); + trace_hrtimer_interrupt(raw_smp_processor_id(), + ktime_to_ns(ktime_sub(ktime_to_ns(timer->praecox) ? + timer->praecox : hrtimer_get_expires(timer), + basenow)), + current, + timer->function == hrtimer_wakeup ? + container_of(timer, struct hrtimer_sleeper, + timer)->task : NULL); + /* * The immediate goal for using the softexpires is * minimizing wakeups, not running timers at the diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 015f85a..bbe95b9 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -192,6 +192,24 @@ config IRQSOFF_TRACER enabled. This option and the preempt-off timing option can be used together or separately.) +config INTERRUPT_OFF_HIST + bool "Interrupts-off Latency Histogram" + depends on IRQSOFF_TRACER + help + This option generates continuously updated histograms (one per cpu) + of the duration of time periods with interrupts disabled. The + histograms are disabled by default. To enable them, write a non-zero + number to + + /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff + + If PREEMPT_OFF_HIST is also selected, additional histograms (one + per cpu) are generated that accumulate the duration of time periods + when both interrupts and preemption are disabled. The histogram data + will be located in the debug file system at + + /sys/kernel/debug/tracing/latency_hist/irqsoff + config PREEMPT_TRACER bool "Preemption-off Latency Tracer" default n @@ -216,6 +234,24 @@ config PREEMPT_TRACER enabled. This option and the irqs-off timing option can be used together or separately.) +config PREEMPT_OFF_HIST + bool "Preemption-off Latency Histogram" + depends on PREEMPT_TRACER + help + This option generates continuously updated histograms (one per cpu) + of the duration of time periods with preemption disabled. The + histograms are disabled by default. To enable them, write a non-zero + number to + + /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff + + If INTERRUPT_OFF_HIST is also selected, additional histograms (one + per cpu) are generated that accumulate the duration of time periods + when both interrupts and preemption are disabled. The histogram data + will be located in the debug file system at + + /sys/kernel/debug/tracing/latency_hist/preemptoff + config SCHED_TRACER bool "Scheduling Latency Tracer" select GENERIC_TRACER @@ -226,6 +262,74 @@ config SCHED_TRACER This tracer tracks the latency of the highest priority task to be scheduled in, starting from the point it has woken up. +config WAKEUP_LATENCY_HIST + bool "Scheduling Latency Histogram" + depends on SCHED_TRACER + help + This option generates continuously updated histograms (one per cpu) + of the scheduling latency of the highest priority task. + The histograms are disabled by default. To enable them, write a + non-zero number to + + /sys/kernel/debug/tracing/latency_hist/enable/wakeup + + Two different algorithms are used, one to determine the latency of + processes that exclusively use the highest priority of the system and + another one to determine the latency of processes that share the + highest system priority with other processes. The former is used to + improve hardware and system software, the latter to optimize the + priority design of a given system. The histogram data will be + located in the debug file system at + + /sys/kernel/debug/tracing/latency_hist/wakeup + + and + + /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio + + If both Scheduling Latency Histogram and Missed Timer Offsets + Histogram are selected, additional histogram data will be collected + that contain, in addition to the wakeup latency, the timer latency, in + case the wakeup was triggered by an expired timer. These histograms + are available in the + + /sys/kernel/debug/tracing/latency_hist/timerandwakeup + + directory. They reflect the apparent interrupt and scheduling latency + and are best suitable to determine the worst-case latency of a given + system. To enable these histograms, write a non-zero number to + + /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup + +config MISSED_TIMER_OFFSETS_HIST + depends on HIGH_RES_TIMERS + select GENERIC_TRACER + bool "Missed Timer Offsets Histogram" + help + Generate a histogram of missed timer offsets in microseconds. The + histograms are disabled by default. To enable them, write a non-zero + number to + + /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets + + The histogram data will be located in the debug file system at + + /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets + + If both Scheduling Latency Histogram and Missed Timer Offsets + Histogram are selected, additional histogram data will be collected + that contain, in addition to the wakeup latency, the timer latency, in + case the wakeup was triggered by an expired timer. These histograms + are available in the + + /sys/kernel/debug/tracing/latency_hist/timerandwakeup + + directory. They reflect the apparent interrupt and scheduling latency + and are best suitable to determine the worst-case latency of a given + system. To enable these histograms, write a non-zero number to + + /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup + config ENABLE_DEFAULT_TRACERS bool "Trace process context switches and events" depends on !GENERIC_TRACER diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index d7e2068..f5e0243 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -34,6 +34,10 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o +obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o +obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o +obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o +obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o obj-$(CONFIG_NOP_TRACER) += trace_nop.o obj-$(CONFIG_STACK_TRACER) += trace_stack.o obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o diff --git a/kernel/trace/latency_hist.c b/kernel/trace/latency_hist.c new file mode 100644 index 0000000..66a69eb --- /dev/null +++ b/kernel/trace/latency_hist.c @@ -0,0 +1,1178 @@ +/* + * kernel/trace/latency_hist.c + * + * Add support for histograms of preemption-off latency and + * interrupt-off latency and wakeup latency, it depends on + * Real-Time Preemption Support. + * + * Copyright (C) 2005 MontaVista Software, Inc. + * Yi Yang + * + * Converted to work with the new latency tracer. + * Copyright (C) 2008 Red Hat, Inc. + * Steven Rostedt + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "trace.h" +#include + +#define NSECS_PER_USECS 1000L + +#define CREATE_TRACE_POINTS +#include + +enum { + IRQSOFF_LATENCY = 0, + PREEMPTOFF_LATENCY, + PREEMPTIRQSOFF_LATENCY, + WAKEUP_LATENCY, + WAKEUP_LATENCY_SHAREDPRIO, + MISSED_TIMER_OFFSETS, + TIMERANDWAKEUP_LATENCY, + MAX_LATENCY_TYPE, +}; + +#define MAX_ENTRY_NUM 10240 + +struct hist_data { + atomic_t hist_mode; /* 0 log, 1 don't log */ + long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */ + long min_lat; + long max_lat; + unsigned long long below_hist_bound_samples; + unsigned long long above_hist_bound_samples; + long long accumulate_lat; + unsigned long long total_samples; + unsigned long long hist_array[MAX_ENTRY_NUM]; +}; + +struct enable_data { + int latency_type; + int enabled; +}; + +static char *latency_hist_dir_root = "latency_hist"; + +#ifdef CONFIG_INTERRUPT_OFF_HIST +static DEFINE_PER_CPU(struct hist_data, irqsoff_hist); +static char *irqsoff_hist_dir = "irqsoff"; +static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start); +static DEFINE_PER_CPU(int, hist_irqsoff_counting); +#endif + +#ifdef CONFIG_PREEMPT_OFF_HIST +static DEFINE_PER_CPU(struct hist_data, preemptoff_hist); +static char *preemptoff_hist_dir = "preemptoff"; +static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start); +static DEFINE_PER_CPU(int, hist_preemptoff_counting); +#endif + +#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) +static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist); +static char *preemptirqsoff_hist_dir = "preemptirqsoff"; +static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start); +static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting); +#endif + +#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST) +static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start); +static struct enable_data preemptirqsoff_enabled_data = { + .latency_type = PREEMPTIRQSOFF_LATENCY, + .enabled = 0, +}; +#endif + +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) +struct maxlatproc_data { + char comm[FIELD_SIZEOF(struct task_struct, comm)]; + char current_comm[FIELD_SIZEOF(struct task_struct, comm)]; + int pid; + int current_pid; + int prio; + int current_prio; + long latency; + long timeroffset; + cycle_t timestamp; +}; +#endif + +#ifdef CONFIG_WAKEUP_LATENCY_HIST +static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist); +static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio); +static char *wakeup_latency_hist_dir = "wakeup"; +static char *wakeup_latency_hist_dir_sharedprio = "sharedprio"; +static notrace void probe_wakeup_latency_hist_start(void *v, + struct task_struct *p, int success); +static notrace void probe_wakeup_latency_hist_stop(void *v, + struct task_struct *prev, struct task_struct *next); +static notrace void probe_sched_migrate_task(void *, + struct task_struct *task, int cpu); +static struct enable_data wakeup_latency_enabled_data = { + .latency_type = WAKEUP_LATENCY, + .enabled = 0, +}; +static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc); +static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio); +static DEFINE_PER_CPU(struct task_struct *, wakeup_task); +static DEFINE_PER_CPU(int, wakeup_sharedprio); +static unsigned long wakeup_pid; +#endif + +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST +static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets); +static char *missed_timer_offsets_dir = "missed_timer_offsets"; +static notrace void probe_hrtimer_interrupt(void *v, int cpu, + long long offset, struct task_struct *curr, struct task_struct *task); +static struct enable_data missed_timer_offsets_enabled_data = { + .latency_type = MISSED_TIMER_OFFSETS, + .enabled = 0, +}; +static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc); +static unsigned long missed_timer_offsets_pid; +#endif + +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) +static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist); +static char *timerandwakeup_latency_hist_dir = "timerandwakeup"; +static struct enable_data timerandwakeup_enabled_data = { + .latency_type = TIMERANDWAKEUP_LATENCY, + .enabled = 0, +}; +static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc); +#endif + +void notrace latency_hist(int latency_type, int cpu, long latency, + long timeroffset, cycle_t stop, + struct task_struct *p) +{ + struct hist_data *my_hist; +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) + struct maxlatproc_data *mp = NULL; +#endif + + if (!cpu_possible(cpu) || latency_type < 0 || + latency_type >= MAX_LATENCY_TYPE) + return; + + switch (latency_type) { +#ifdef CONFIG_INTERRUPT_OFF_HIST + case IRQSOFF_LATENCY: + my_hist = &per_cpu(irqsoff_hist, cpu); + break; +#endif +#ifdef CONFIG_PREEMPT_OFF_HIST + case PREEMPTOFF_LATENCY: + my_hist = &per_cpu(preemptoff_hist, cpu); + break; +#endif +#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) + case PREEMPTIRQSOFF_LATENCY: + my_hist = &per_cpu(preemptirqsoff_hist, cpu); + break; +#endif +#ifdef CONFIG_WAKEUP_LATENCY_HIST + case WAKEUP_LATENCY: + my_hist = &per_cpu(wakeup_latency_hist, cpu); + mp = &per_cpu(wakeup_maxlatproc, cpu); + break; + case WAKEUP_LATENCY_SHAREDPRIO: + my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); + mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); + break; +#endif +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + case MISSED_TIMER_OFFSETS: + my_hist = &per_cpu(missed_timer_offsets, cpu); + mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); + break; +#endif +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) + case TIMERANDWAKEUP_LATENCY: + my_hist = &per_cpu(timerandwakeup_latency_hist, cpu); + mp = &per_cpu(timerandwakeup_maxlatproc, cpu); + break; +#endif + + default: + return; + } + + latency += my_hist->offset; + + if (atomic_read(&my_hist->hist_mode) == 0) + return; + + if (latency < 0 || latency >= MAX_ENTRY_NUM) { + if (latency < 0) + my_hist->below_hist_bound_samples++; + else + my_hist->above_hist_bound_samples++; + } else + my_hist->hist_array[latency]++; + + if (unlikely(latency > my_hist->max_lat || + my_hist->min_lat == LONG_MAX)) { +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) + if (latency_type == WAKEUP_LATENCY || + latency_type == WAKEUP_LATENCY_SHAREDPRIO || + latency_type == MISSED_TIMER_OFFSETS || + latency_type == TIMERANDWAKEUP_LATENCY) { + strncpy(mp->comm, p->comm, sizeof(mp->comm)); + strncpy(mp->current_comm, current->comm, + sizeof(mp->current_comm)); + mp->pid = task_pid_nr(p); + mp->current_pid = task_pid_nr(current); + mp->prio = p->prio; + mp->current_prio = current->prio; + mp->latency = latency; + mp->timeroffset = timeroffset; + mp->timestamp = stop; + } +#endif + my_hist->max_lat = latency; + } + if (unlikely(latency < my_hist->min_lat)) + my_hist->min_lat = latency; + my_hist->total_samples++; + my_hist->accumulate_lat += latency; +} + +static void *l_start(struct seq_file *m, loff_t *pos) +{ + loff_t *index_ptr = NULL; + loff_t index = *pos; + struct hist_data *my_hist = m->private; + + if (index == 0) { + char minstr[32], avgstr[32], maxstr[32]; + + atomic_dec(&my_hist->hist_mode); + + if (likely(my_hist->total_samples)) { + long avg = (long) div64_s64(my_hist->accumulate_lat, + my_hist->total_samples); + snprintf(minstr, sizeof(minstr), "%ld", + my_hist->min_lat - my_hist->offset); + snprintf(avgstr, sizeof(avgstr), "%ld", + avg - my_hist->offset); + snprintf(maxstr, sizeof(maxstr), "%ld", + my_hist->max_lat - my_hist->offset); + } else { + strcpy(minstr, ""); + strcpy(avgstr, minstr); + strcpy(maxstr, minstr); + } + + seq_printf(m, "#Minimum latency: %s microseconds\n" + "#Average latency: %s microseconds\n" + "#Maximum latency: %s microseconds\n" + "#Total samples: %llu\n" + "#There are %llu samples lower than %ld" + " microseconds.\n" + "#There are %llu samples greater or equal" + " than %ld microseconds.\n" + "#usecs\t%16s\n", + minstr, avgstr, maxstr, + my_hist->total_samples, + my_hist->below_hist_bound_samples, + -my_hist->offset, + my_hist->above_hist_bound_samples, + MAX_ENTRY_NUM - my_hist->offset, + "samples"); + } + if (index < MAX_ENTRY_NUM) { + index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL); + if (index_ptr) + *index_ptr = index; + } + + return index_ptr; +} + +static void *l_next(struct seq_file *m, void *p, loff_t *pos) +{ + loff_t *index_ptr = p; + struct hist_data *my_hist = m->private; + + if (++*pos >= MAX_ENTRY_NUM) { + atomic_inc(&my_hist->hist_mode); + return NULL; + } + *index_ptr = *pos; + return index_ptr; +} + +static void l_stop(struct seq_file *m, void *p) +{ + kfree(p); +} + +static int l_show(struct seq_file *m, void *p) +{ + int index = *(loff_t *) p; + struct hist_data *my_hist = m->private; + + seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset, + my_hist->hist_array[index]); + return 0; +} + +static const struct seq_operations latency_hist_seq_op = { + .start = l_start, + .next = l_next, + .stop = l_stop, + .show = l_show +}; + +static int latency_hist_open(struct inode *inode, struct file *file) +{ + int ret; + + ret = seq_open(file, &latency_hist_seq_op); + if (!ret) { + struct seq_file *seq = file->private_data; + seq->private = inode->i_private; + } + return ret; +} + +static const struct file_operations latency_hist_fops = { + .open = latency_hist_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) +static void clear_maxlatprocdata(struct maxlatproc_data *mp) +{ + mp->comm[0] = mp->current_comm[0] = '\0'; + mp->prio = mp->current_prio = mp->pid = mp->current_pid = + mp->latency = mp->timeroffset = -1; + mp->timestamp = 0; +} +#endif + +static void hist_reset(struct hist_data *hist) +{ + atomic_dec(&hist->hist_mode); + + memset(hist->hist_array, 0, sizeof(hist->hist_array)); + hist->below_hist_bound_samples = 0ULL; + hist->above_hist_bound_samples = 0ULL; + hist->min_lat = LONG_MAX; + hist->max_lat = LONG_MIN; + hist->total_samples = 0ULL; + hist->accumulate_lat = 0LL; + + atomic_inc(&hist->hist_mode); +} + +static ssize_t +latency_hist_reset(struct file *file, const char __user *a, + size_t size, loff_t *off) +{ + int cpu; + struct hist_data *hist = NULL; +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) + struct maxlatproc_data *mp = NULL; +#endif + off_t latency_type = (off_t) file->private_data; + + for_each_online_cpu(cpu) { + + switch (latency_type) { +#ifdef CONFIG_PREEMPT_OFF_HIST + case PREEMPTOFF_LATENCY: + hist = &per_cpu(preemptoff_hist, cpu); + break; +#endif +#ifdef CONFIG_INTERRUPT_OFF_HIST + case IRQSOFF_LATENCY: + hist = &per_cpu(irqsoff_hist, cpu); + break; +#endif +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) + case PREEMPTIRQSOFF_LATENCY: + hist = &per_cpu(preemptirqsoff_hist, cpu); + break; +#endif +#ifdef CONFIG_WAKEUP_LATENCY_HIST + case WAKEUP_LATENCY: + hist = &per_cpu(wakeup_latency_hist, cpu); + mp = &per_cpu(wakeup_maxlatproc, cpu); + break; + case WAKEUP_LATENCY_SHAREDPRIO: + hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); + mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); + break; +#endif +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + case MISSED_TIMER_OFFSETS: + hist = &per_cpu(missed_timer_offsets, cpu); + mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); + break; +#endif +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) + case TIMERANDWAKEUP_LATENCY: + hist = &per_cpu(timerandwakeup_latency_hist, cpu); + mp = &per_cpu(timerandwakeup_maxlatproc, cpu); + break; +#endif + } + + hist_reset(hist); +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) + if (latency_type == WAKEUP_LATENCY || + latency_type == WAKEUP_LATENCY_SHAREDPRIO || + latency_type == MISSED_TIMER_OFFSETS || + latency_type == TIMERANDWAKEUP_LATENCY) + clear_maxlatprocdata(mp); +#endif + } + + return size; +} + +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) +static ssize_t +show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) +{ + char buf[64]; + int r; + unsigned long *this_pid = file->private_data; + + r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid); + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); +} + +static ssize_t do_pid(struct file *file, const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char buf[64]; + unsigned long pid; + unsigned long *this_pid = file->private_data; + + if (cnt >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, cnt)) + return -EFAULT; + + buf[cnt] = '\0'; + + if (kstrtoul(buf, 10, &pid)) + return -EINVAL; + + *this_pid = pid; + + return cnt; +} +#endif + +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) +static ssize_t +show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) +{ + int r; + struct maxlatproc_data *mp = file->private_data; + int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8); + unsigned long long t; + unsigned long usecs, secs; + char *buf; + + if (mp->pid == -1 || mp->current_pid == -1) { + buf = "(none)\n"; + return simple_read_from_buffer(ubuf, cnt, ppos, buf, + strlen(buf)); + } + + buf = kmalloc(strmaxlen, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + t = ns2usecs(mp->timestamp); + usecs = do_div(t, USEC_PER_SEC); + secs = (unsigned long) t; + r = snprintf(buf, strmaxlen, + "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid, + MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm, + mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm, + secs, usecs); + r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); + kfree(buf); + return r; +} +#endif + +static ssize_t +show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) +{ + char buf[64]; + struct enable_data *ed = file->private_data; + int r; + + r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled); + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); +} + +static ssize_t +do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) +{ + char buf[64]; + long enable; + struct enable_data *ed = file->private_data; + + if (cnt >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, cnt)) + return -EFAULT; + + buf[cnt] = 0; + + if (kstrtoul(buf, 10, &enable)) + return -EINVAL; + + if ((enable && ed->enabled) || (!enable && !ed->enabled)) + return cnt; + + if (enable) { + int ret; + + switch (ed->latency_type) { +#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) + case PREEMPTIRQSOFF_LATENCY: + ret = register_trace_preemptirqsoff_hist( + probe_preemptirqsoff_hist, NULL); + if (ret) { + pr_info("wakeup trace: Couldn't assign " + "probe_preemptirqsoff_hist " + "to trace_preemptirqsoff_hist\n"); + return ret; + } + break; +#endif +#ifdef CONFIG_WAKEUP_LATENCY_HIST + case WAKEUP_LATENCY: + ret = register_trace_sched_wakeup( + probe_wakeup_latency_hist_start, NULL); + if (ret) { + pr_info("wakeup trace: Couldn't assign " + "probe_wakeup_latency_hist_start " + "to trace_sched_wakeup\n"); + return ret; + } + ret = register_trace_sched_wakeup_new( + probe_wakeup_latency_hist_start, NULL); + if (ret) { + pr_info("wakeup trace: Couldn't assign " + "probe_wakeup_latency_hist_start " + "to trace_sched_wakeup_new\n"); + unregister_trace_sched_wakeup( + probe_wakeup_latency_hist_start, NULL); + return ret; + } + ret = register_trace_sched_switch( + probe_wakeup_latency_hist_stop, NULL); + if (ret) { + pr_info("wakeup trace: Couldn't assign " + "probe_wakeup_latency_hist_stop " + "to trace_sched_switch\n"); + unregister_trace_sched_wakeup( + probe_wakeup_latency_hist_start, NULL); + unregister_trace_sched_wakeup_new( + probe_wakeup_latency_hist_start, NULL); + return ret; + } + ret = register_trace_sched_migrate_task( + probe_sched_migrate_task, NULL); + if (ret) { + pr_info("wakeup trace: Couldn't assign " + "probe_sched_migrate_task " + "to trace_sched_migrate_task\n"); + unregister_trace_sched_wakeup( + probe_wakeup_latency_hist_start, NULL); + unregister_trace_sched_wakeup_new( + probe_wakeup_latency_hist_start, NULL); + unregister_trace_sched_switch( + probe_wakeup_latency_hist_stop, NULL); + return ret; + } + break; +#endif +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + case MISSED_TIMER_OFFSETS: + ret = register_trace_hrtimer_interrupt( + probe_hrtimer_interrupt, NULL); + if (ret) { + pr_info("wakeup trace: Couldn't assign " + "probe_hrtimer_interrupt " + "to trace_hrtimer_interrupt\n"); + return ret; + } + break; +#endif +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) + case TIMERANDWAKEUP_LATENCY: + if (!wakeup_latency_enabled_data.enabled || + !missed_timer_offsets_enabled_data.enabled) + return -EINVAL; + break; +#endif + default: + break; + } + } else { + switch (ed->latency_type) { +#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) + case PREEMPTIRQSOFF_LATENCY: + { + int cpu; + + unregister_trace_preemptirqsoff_hist( + probe_preemptirqsoff_hist, NULL); + for_each_online_cpu(cpu) { +#ifdef CONFIG_INTERRUPT_OFF_HIST + per_cpu(hist_irqsoff_counting, + cpu) = 0; +#endif +#ifdef CONFIG_PREEMPT_OFF_HIST + per_cpu(hist_preemptoff_counting, + cpu) = 0; +#endif +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) + per_cpu(hist_preemptirqsoff_counting, + cpu) = 0; +#endif + } + } + break; +#endif +#ifdef CONFIG_WAKEUP_LATENCY_HIST + case WAKEUP_LATENCY: + { + int cpu; + + unregister_trace_sched_wakeup( + probe_wakeup_latency_hist_start, NULL); + unregister_trace_sched_wakeup_new( + probe_wakeup_latency_hist_start, NULL); + unregister_trace_sched_switch( + probe_wakeup_latency_hist_stop, NULL); + unregister_trace_sched_migrate_task( + probe_sched_migrate_task, NULL); + + for_each_online_cpu(cpu) { + per_cpu(wakeup_task, cpu) = NULL; + per_cpu(wakeup_sharedprio, cpu) = 0; + } + } +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + timerandwakeup_enabled_data.enabled = 0; +#endif + break; +#endif +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + case MISSED_TIMER_OFFSETS: + unregister_trace_hrtimer_interrupt( + probe_hrtimer_interrupt, NULL); +#ifdef CONFIG_WAKEUP_LATENCY_HIST + timerandwakeup_enabled_data.enabled = 0; +#endif + break; +#endif + default: + break; + } + } + ed->enabled = enable; + return cnt; +} + +static const struct file_operations latency_hist_reset_fops = { + .open = tracing_open_generic, + .write = latency_hist_reset, +}; + +static const struct file_operations enable_fops = { + .open = tracing_open_generic, + .read = show_enable, + .write = do_enable, +}; + +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) +static const struct file_operations pid_fops = { + .open = tracing_open_generic, + .read = show_pid, + .write = do_pid, +}; + +static const struct file_operations maxlatproc_fops = { + .open = tracing_open_generic, + .read = show_maxlatproc, +}; +#endif + +#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) +static notrace void probe_preemptirqsoff_hist(void *v, int reason, + int starthist) +{ + int cpu = raw_smp_processor_id(); + int time_set = 0; + + if (starthist) { + cycle_t uninitialized_var(start); + + if (!preempt_count() && !irqs_disabled()) + return; + +#ifdef CONFIG_INTERRUPT_OFF_HIST + if ((reason == IRQS_OFF || reason == TRACE_START) && + !per_cpu(hist_irqsoff_counting, cpu)) { + per_cpu(hist_irqsoff_counting, cpu) = 1; + start = ftrace_now(cpu); + time_set++; + per_cpu(hist_irqsoff_start, cpu) = start; + } +#endif + +#ifdef CONFIG_PREEMPT_OFF_HIST + if ((reason == PREEMPT_OFF || reason == TRACE_START) && + !per_cpu(hist_preemptoff_counting, cpu)) { + per_cpu(hist_preemptoff_counting, cpu) = 1; + if (!(time_set++)) + start = ftrace_now(cpu); + per_cpu(hist_preemptoff_start, cpu) = start; + } +#endif + +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) + if (per_cpu(hist_irqsoff_counting, cpu) && + per_cpu(hist_preemptoff_counting, cpu) && + !per_cpu(hist_preemptirqsoff_counting, cpu)) { + per_cpu(hist_preemptirqsoff_counting, cpu) = 1; + if (!time_set) + start = ftrace_now(cpu); + per_cpu(hist_preemptirqsoff_start, cpu) = start; + } +#endif + } else { + cycle_t uninitialized_var(stop); + +#ifdef CONFIG_INTERRUPT_OFF_HIST + if ((reason == IRQS_ON || reason == TRACE_STOP) && + per_cpu(hist_irqsoff_counting, cpu)) { + cycle_t start = per_cpu(hist_irqsoff_start, cpu); + + stop = ftrace_now(cpu); + time_set++; + if (start) { + long latency = ((long) (stop - start)) / + NSECS_PER_USECS; + + latency_hist(IRQSOFF_LATENCY, cpu, latency, 0, + stop, NULL); + } + per_cpu(hist_irqsoff_counting, cpu) = 0; + } +#endif + +#ifdef CONFIG_PREEMPT_OFF_HIST + if ((reason == PREEMPT_ON || reason == TRACE_STOP) && + per_cpu(hist_preemptoff_counting, cpu)) { + cycle_t start = per_cpu(hist_preemptoff_start, cpu); + + if (!(time_set++)) + stop = ftrace_now(cpu); + if (start) { + long latency = ((long) (stop - start)) / + NSECS_PER_USECS; + + latency_hist(PREEMPTOFF_LATENCY, cpu, latency, + 0, stop, NULL); + } + per_cpu(hist_preemptoff_counting, cpu) = 0; + } +#endif + +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) + if ((!per_cpu(hist_irqsoff_counting, cpu) || + !per_cpu(hist_preemptoff_counting, cpu)) && + per_cpu(hist_preemptirqsoff_counting, cpu)) { + cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu); + + if (!time_set) + stop = ftrace_now(cpu); + if (start) { + long latency = ((long) (stop - start)) / + NSECS_PER_USECS; + + latency_hist(PREEMPTIRQSOFF_LATENCY, cpu, + latency, 0, stop, NULL); + } + per_cpu(hist_preemptirqsoff_counting, cpu) = 0; + } +#endif + } +} +#endif + +#ifdef CONFIG_WAKEUP_LATENCY_HIST +static DEFINE_RAW_SPINLOCK(wakeup_lock); +static notrace void probe_sched_migrate_task(void *v, struct task_struct *task, + int cpu) +{ + int old_cpu = task_cpu(task); + + if (cpu != old_cpu) { + unsigned long flags; + struct task_struct *cpu_wakeup_task; + + raw_spin_lock_irqsave(&wakeup_lock, flags); + + cpu_wakeup_task = per_cpu(wakeup_task, old_cpu); + if (task == cpu_wakeup_task) { + put_task_struct(cpu_wakeup_task); + per_cpu(wakeup_task, old_cpu) = NULL; + cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task; + get_task_struct(cpu_wakeup_task); + } + + raw_spin_unlock_irqrestore(&wakeup_lock, flags); + } +} + +static notrace void probe_wakeup_latency_hist_start(void *v, + struct task_struct *p, int success) +{ + unsigned long flags; + struct task_struct *curr = current; + int cpu = task_cpu(p); + struct task_struct *cpu_wakeup_task; + + raw_spin_lock_irqsave(&wakeup_lock, flags); + + cpu_wakeup_task = per_cpu(wakeup_task, cpu); + + if (wakeup_pid) { + if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || + p->prio == curr->prio) + per_cpu(wakeup_sharedprio, cpu) = 1; + if (likely(wakeup_pid != task_pid_nr(p))) + goto out; + } else { + if (likely(!rt_task(p)) || + (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) || + p->prio > curr->prio) + goto out; + if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || + p->prio == curr->prio) + per_cpu(wakeup_sharedprio, cpu) = 1; + } + + if (cpu_wakeup_task) + put_task_struct(cpu_wakeup_task); + cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p; + get_task_struct(cpu_wakeup_task); + cpu_wakeup_task->preempt_timestamp_hist = + ftrace_now(raw_smp_processor_id()); +out: + raw_spin_unlock_irqrestore(&wakeup_lock, flags); +} + +static notrace void probe_wakeup_latency_hist_stop(void *v, + struct task_struct *prev, struct task_struct *next) +{ + unsigned long flags; + int cpu = task_cpu(next); + long latency; + cycle_t stop; + struct task_struct *cpu_wakeup_task; + + raw_spin_lock_irqsave(&wakeup_lock, flags); + + cpu_wakeup_task = per_cpu(wakeup_task, cpu); + + if (cpu_wakeup_task == NULL) + goto out; + + /* Already running? */ + if (unlikely(current == cpu_wakeup_task)) + goto out_reset; + + if (next != cpu_wakeup_task) { + if (next->prio < cpu_wakeup_task->prio) + goto out_reset; + + if (next->prio == cpu_wakeup_task->prio) + per_cpu(wakeup_sharedprio, cpu) = 1; + + goto out; + } + + if (current->prio == cpu_wakeup_task->prio) + per_cpu(wakeup_sharedprio, cpu) = 1; + + /* + * The task we are waiting for is about to be switched to. + * Calculate latency and store it in histogram. + */ + stop = ftrace_now(raw_smp_processor_id()); + + latency = ((long) (stop - next->preempt_timestamp_hist)) / + NSECS_PER_USECS; + + if (per_cpu(wakeup_sharedprio, cpu)) { + latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop, + next); + per_cpu(wakeup_sharedprio, cpu) = 0; + } else { + latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next); +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + if (timerandwakeup_enabled_data.enabled) { + latency_hist(TIMERANDWAKEUP_LATENCY, cpu, + next->timer_offset + latency, next->timer_offset, + stop, next); + } +#endif + } + +out_reset: +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + next->timer_offset = 0; +#endif + put_task_struct(cpu_wakeup_task); + per_cpu(wakeup_task, cpu) = NULL; +out: + raw_spin_unlock_irqrestore(&wakeup_lock, flags); +} +#endif + +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST +static notrace void probe_hrtimer_interrupt(void *v, int cpu, + long long latency_ns, struct task_struct *curr, + struct task_struct *task) +{ + if (latency_ns <= 0 && task != NULL && rt_task(task) && + (task->prio < curr->prio || + (task->prio == curr->prio && + !cpumask_test_cpu(cpu, &task->cpus_allowed)))) { + long latency; + cycle_t now; + + if (missed_timer_offsets_pid) { + if (likely(missed_timer_offsets_pid != + task_pid_nr(task))) + return; + } + + now = ftrace_now(cpu); + latency = (long) div_s64(-latency_ns, NSECS_PER_USECS); + latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now, + task); +#ifdef CONFIG_WAKEUP_LATENCY_HIST + task->timer_offset = latency; +#endif + } +} +#endif + +static __init int latency_hist_init(void) +{ + struct dentry *latency_hist_root = NULL; + struct dentry *dentry; +#ifdef CONFIG_WAKEUP_LATENCY_HIST + struct dentry *dentry_sharedprio; +#endif + struct dentry *entry; + struct dentry *enable_root; + int i = 0; + struct hist_data *my_hist; + char name[64]; + char *cpufmt = "CPU%d"; +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) + char *cpufmt_maxlatproc = "max_latency-CPU%d"; + struct maxlatproc_data *mp = NULL; +#endif + + dentry = tracing_init_dentry(); + latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry); + enable_root = debugfs_create_dir("enable", latency_hist_root); + +#ifdef CONFIG_INTERRUPT_OFF_HIST + dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root); + for_each_possible_cpu(i) { + sprintf(name, cpufmt, i); + entry = debugfs_create_file(name, 0444, dentry, + &per_cpu(irqsoff_hist, i), &latency_hist_fops); + my_hist = &per_cpu(irqsoff_hist, i); + atomic_set(&my_hist->hist_mode, 1); + my_hist->min_lat = LONG_MAX; + } + entry = debugfs_create_file("reset", 0644, dentry, + (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops); +#endif + +#ifdef CONFIG_PREEMPT_OFF_HIST + dentry = debugfs_create_dir(preemptoff_hist_dir, + latency_hist_root); + for_each_possible_cpu(i) { + sprintf(name, cpufmt, i); + entry = debugfs_create_file(name, 0444, dentry, + &per_cpu(preemptoff_hist, i), &latency_hist_fops); + my_hist = &per_cpu(preemptoff_hist, i); + atomic_set(&my_hist->hist_mode, 1); + my_hist->min_lat = LONG_MAX; + } + entry = debugfs_create_file("reset", 0644, dentry, + (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops); +#endif + +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) + dentry = debugfs_create_dir(preemptirqsoff_hist_dir, + latency_hist_root); + for_each_possible_cpu(i) { + sprintf(name, cpufmt, i); + entry = debugfs_create_file(name, 0444, dentry, + &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops); + my_hist = &per_cpu(preemptirqsoff_hist, i); + atomic_set(&my_hist->hist_mode, 1); + my_hist->min_lat = LONG_MAX; + } + entry = debugfs_create_file("reset", 0644, dentry, + (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops); +#endif + +#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) + entry = debugfs_create_file("preemptirqsoff", 0644, + enable_root, (void *)&preemptirqsoff_enabled_data, + &enable_fops); +#endif + +#ifdef CONFIG_WAKEUP_LATENCY_HIST + dentry = debugfs_create_dir(wakeup_latency_hist_dir, + latency_hist_root); + dentry_sharedprio = debugfs_create_dir( + wakeup_latency_hist_dir_sharedprio, dentry); + for_each_possible_cpu(i) { + sprintf(name, cpufmt, i); + + entry = debugfs_create_file(name, 0444, dentry, + &per_cpu(wakeup_latency_hist, i), + &latency_hist_fops); + my_hist = &per_cpu(wakeup_latency_hist, i); + atomic_set(&my_hist->hist_mode, 1); + my_hist->min_lat = LONG_MAX; + + entry = debugfs_create_file(name, 0444, dentry_sharedprio, + &per_cpu(wakeup_latency_hist_sharedprio, i), + &latency_hist_fops); + my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i); + atomic_set(&my_hist->hist_mode, 1); + my_hist->min_lat = LONG_MAX; + + sprintf(name, cpufmt_maxlatproc, i); + + mp = &per_cpu(wakeup_maxlatproc, i); + entry = debugfs_create_file(name, 0444, dentry, mp, + &maxlatproc_fops); + clear_maxlatprocdata(mp); + + mp = &per_cpu(wakeup_maxlatproc_sharedprio, i); + entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp, + &maxlatproc_fops); + clear_maxlatprocdata(mp); + } + entry = debugfs_create_file("pid", 0644, dentry, + (void *)&wakeup_pid, &pid_fops); + entry = debugfs_create_file("reset", 0644, dentry, + (void *)WAKEUP_LATENCY, &latency_hist_reset_fops); + entry = debugfs_create_file("reset", 0644, dentry_sharedprio, + (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops); + entry = debugfs_create_file("wakeup", 0644, + enable_root, (void *)&wakeup_latency_enabled_data, + &enable_fops); +#endif + +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + dentry = debugfs_create_dir(missed_timer_offsets_dir, + latency_hist_root); + for_each_possible_cpu(i) { + sprintf(name, cpufmt, i); + entry = debugfs_create_file(name, 0444, dentry, + &per_cpu(missed_timer_offsets, i), &latency_hist_fops); + my_hist = &per_cpu(missed_timer_offsets, i); + atomic_set(&my_hist->hist_mode, 1); + my_hist->min_lat = LONG_MAX; + + sprintf(name, cpufmt_maxlatproc, i); + mp = &per_cpu(missed_timer_offsets_maxlatproc, i); + entry = debugfs_create_file(name, 0444, dentry, mp, + &maxlatproc_fops); + clear_maxlatprocdata(mp); + } + entry = debugfs_create_file("pid", 0644, dentry, + (void *)&missed_timer_offsets_pid, &pid_fops); + entry = debugfs_create_file("reset", 0644, dentry, + (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops); + entry = debugfs_create_file("missed_timer_offsets", 0644, + enable_root, (void *)&missed_timer_offsets_enabled_data, + &enable_fops); +#endif + +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) + dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir, + latency_hist_root); + for_each_possible_cpu(i) { + sprintf(name, cpufmt, i); + entry = debugfs_create_file(name, 0444, dentry, + &per_cpu(timerandwakeup_latency_hist, i), + &latency_hist_fops); + my_hist = &per_cpu(timerandwakeup_latency_hist, i); + atomic_set(&my_hist->hist_mode, 1); + my_hist->min_lat = LONG_MAX; + + sprintf(name, cpufmt_maxlatproc, i); + mp = &per_cpu(timerandwakeup_maxlatproc, i); + entry = debugfs_create_file(name, 0444, dentry, mp, + &maxlatproc_fops); + clear_maxlatprocdata(mp); + } + entry = debugfs_create_file("reset", 0644, dentry, + (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops); + entry = debugfs_create_file("timerandwakeup", 0644, + enable_root, (void *)&timerandwakeup_enabled_data, + &enable_fops); +#endif + return 0; +} + +device_initcall(latency_hist_init); diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 2aefbee..2f4eb37 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -17,6 +17,7 @@ #include #include "trace.h" +#include static struct trace_array *irqsoff_trace __read_mostly; static int tracer_enabled __read_mostly; @@ -439,11 +440,13 @@ void start_critical_timings(void) { if (preempt_trace() || irq_trace()) start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); + trace_preemptirqsoff_hist(TRACE_START, 1); } EXPORT_SYMBOL_GPL(start_critical_timings); void stop_critical_timings(void) { + trace_preemptirqsoff_hist(TRACE_STOP, 0); if (preempt_trace() || irq_trace()) stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } @@ -453,6 +456,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings); #ifdef CONFIG_PROVE_LOCKING void time_hardirqs_on(unsigned long a0, unsigned long a1) { + trace_preemptirqsoff_hist(IRQS_ON, 0); if (!preempt_trace() && irq_trace()) stop_critical_timing(a0, a1); } @@ -461,6 +465,7 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1) { if (!preempt_trace() && irq_trace()) start_critical_timing(a0, a1); + trace_preemptirqsoff_hist(IRQS_OFF, 1); } #else /* !CONFIG_PROVE_LOCKING */ @@ -486,6 +491,7 @@ inline void print_irqtrace_events(struct task_struct *curr) */ void trace_hardirqs_on(void) { + trace_preemptirqsoff_hist(IRQS_ON, 0); if (!preempt_trace() && irq_trace()) stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } @@ -495,11 +501,13 @@ void trace_hardirqs_off(void) { if (!preempt_trace() && irq_trace()) start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); + trace_preemptirqsoff_hist(IRQS_OFF, 1); } EXPORT_SYMBOL(trace_hardirqs_off); void trace_hardirqs_on_caller(unsigned long caller_addr) { + trace_preemptirqsoff_hist(IRQS_ON, 0); if (!preempt_trace() && irq_trace()) stop_critical_timing(CALLER_ADDR0, caller_addr); } @@ -509,6 +517,7 @@ void trace_hardirqs_off_caller(unsigned long caller_addr) { if (!preempt_trace() && irq_trace()) start_critical_timing(CALLER_ADDR0, caller_addr); + trace_preemptirqsoff_hist(IRQS_OFF, 1); } EXPORT_SYMBOL(trace_hardirqs_off_caller); @@ -518,12 +527,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller); #ifdef CONFIG_PREEMPT_TRACER void trace_preempt_on(unsigned long a0, unsigned long a1) { + trace_preemptirqsoff_hist(PREEMPT_ON, 0); if (preempt_trace() && !irq_trace()) stop_critical_timing(a0, a1); } void trace_preempt_off(unsigned long a0, unsigned long a1) { + trace_preemptirqsoff_hist(PREEMPT_ON, 1); if (preempt_trace() && !irq_trace()) start_critical_timing(a0, a1); } -- cgit v0.10.2 From 216f1708a359ee9d70f3b49114c8266474c14b0c Mon Sep 17 00:00:00 2001 From: Carsten Emde Date: Tue, 19 Jul 2011 13:53:12 +0100 Subject: hwlatdetect.patch Jon Masters developed this wonderful SMI detector. For details please consult Documentation/hwlat_detector.txt. It could be ported to Linux 3.0 RT without any major change. Signed-off-by: Carsten Emde diff --git a/Documentation/hwlat_detector.txt b/Documentation/hwlat_detector.txt new file mode 100644 index 0000000..cb61516 --- /dev/null +++ b/Documentation/hwlat_detector.txt @@ -0,0 +1,64 @@ +Introduction: +------------- + +The module hwlat_detector is a special purpose kernel module that is used to +detect large system latencies induced by the behavior of certain underlying +hardware or firmware, independent of Linux itself. The code was developed +originally to detect SMIs (System Management Interrupts) on x86 systems, +however there is nothing x86 specific about this patchset. It was +originally written for use by the "RT" patch since the Real Time +kernel is highly latency sensitive. + +SMIs are usually not serviced by the Linux kernel, which typically does not +even know that they are occuring. SMIs are instead are set up by BIOS code +and are serviced by BIOS code, usually for "critical" events such as +management of thermal sensors and fans. Sometimes though, SMIs are used for +other tasks and those tasks can spend an inordinate amount of time in the +handler (sometimes measured in milliseconds). Obviously this is a problem if +you are trying to keep event service latencies down in the microsecond range. + +The hardware latency detector works by hogging all of the cpus for configurable +amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter +for some period, then looking for gaps in the TSC data. Any gap indicates a +time when the polling was interrupted and since the machine is stopped and +interrupts turned off the only thing that could do that would be an SMI. + +Note that the SMI detector should *NEVER* be used in a production environment. +It is intended to be run manually to determine if the hardware platform has a +problem with long system firmware service routines. + +Usage: +------ + +Loading the module hwlat_detector passing the parameter "enabled=1" (or by +setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only +step required to start the hwlat_detector. It is possible to redefine the +threshold in microseconds (us) above which latency spikes will be taken +into account (parameter "threshold="). + +Example: + + # modprobe hwlat_detector enabled=1 threshold=100 + +After the module is loaded, it creates a directory named "hwlat_detector" under +the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary +to have debugfs mounted, which might be on /sys/debug on your system. + +The /debug/hwlat_detector interface contains the following files: + +count - number of latency spikes observed since last reset +enable - a global enable/disable toggle (0/1), resets count +max - maximum hardware latency actually observed (usecs) +sample - a pipe from which to read current raw sample data + in the format + (can be opened O_NONBLOCK for a single sample) +threshold - minimum latency value to be considered (usecs) +width - time period to sample with CPUs held (usecs) + must be less than the total window size (enforced) +window - total period of sampling, width being inside (usecs) + +By default we will set width to 500,000 and window to 1,000,000, meaning that +we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we +observe any latencies that exceed the threshold (initially 100 usecs), +then we write to a global sample ring buffer of 8K samples, which is +consumed by reading from the "sample" (pipe) debugfs file interface. diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index bc68085..8d29ba8 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -130,6 +130,35 @@ config IBM_ASM for information on the specific driver level and support statement for your IBM server. +config HWLAT_DETECTOR + tristate "Testing module to detect hardware-induced latencies" + depends on DEBUG_FS + depends on RING_BUFFER + default m + ---help--- + A simple hardware latency detector. Use this module to detect + large latencies introduced by the behavior of the underlying + system firmware external to Linux. We do this using periodic + use of stop_machine to grab all available CPUs and measure + for unexplainable gaps in the CPU timestamp counter(s). By + default, the module is not enabled until the "enable" file + within the "hwlat_detector" debugfs directory is toggled. + + This module is often used to detect SMI (System Management + Interrupts) on x86 systems, though is not x86 specific. To + this end, we default to using a sample window of 1 second, + during which we will sample for 0.5 seconds. If an SMI or + similar event occurs during that time, it is recorded + into an 8K samples global ring buffer until retreived. + + WARNING: This software should never be enabled (it can be built + but should not be turned on after it is loaded) in a production + environment where high latencies are a concern since the + sampling mechanism actually introduces latencies for + regular tasks while the CPU(s) are being held. + + If unsure, say N + config PHANTOM tristate "Sensable PHANToM (PCI)" depends on PCI diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index c235d5b..6bb08da40 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -53,3 +53,4 @@ obj-$(CONFIG_INTEL_MEI) += mei/ obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/ obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o obj-$(CONFIG_SRAM) += sram.o +obj-$(CONFIG_HWLAT_DETECTOR) += hwlat_detector.o diff --git a/drivers/misc/hwlat_detector.c b/drivers/misc/hwlat_detector.c new file mode 100644 index 0000000..6864f3c --- /dev/null +++ b/drivers/misc/hwlat_detector.c @@ -0,0 +1,1212 @@ +/* + * hwlat_detector.c - A simple Hardware Latency detector. + * + * Use this module to detect large system latencies induced by the behavior of + * certain underlying system hardware or firmware, independent of Linux itself. + * The code was developed originally to detect the presence of SMIs on Intel + * and AMD systems, although there is no dependency upon x86 herein. + * + * The classical example usage of this module is in detecting the presence of + * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a + * somewhat special form of hardware interrupt spawned from earlier CPU debug + * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge + * LPC (or other device) to generate a special interrupt under certain + * circumstances, for example, upon expiration of a special SMI timer device, + * due to certain external thermal readings, on certain I/O address accesses, + * and other situations. An SMI hits a special CPU pin, triggers a special + * SMI mode (complete with special memory map), and the OS is unaware. + * + * Although certain hardware-inducing latencies are necessary (for example, + * a modern system often requires an SMI handler for correct thermal control + * and remote management) they can wreak havoc upon any OS-level performance + * guarantees toward low-latency, especially when the OS is not even made + * aware of the presence of these interrupts. For this reason, we need a + * somewhat brute force mechanism to detect these interrupts. In this case, + * we do it by hogging all of the CPU(s) for configurable timer intervals, + * sampling the built-in CPU timer, looking for discontiguous readings. + * + * WARNING: This implementation necessarily introduces latencies. Therefore, + * you should NEVER use this module in a production environment + * requiring any kind of low-latency performance guarantee(s). + * + * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. + * + * Includes useful feedback from Clark Williams + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */ +#define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */ +#define U64STR_SIZE 22 /* 20 digits max */ + +#define VERSION "1.0.0" +#define BANNER "hwlat_detector: " +#define DRVNAME "hwlat_detector" +#define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */ +#define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */ +#define DEFAULT_LAT_THRESHOLD 10 /* 10us */ + +/* Module metadata */ + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jon Masters "); +MODULE_DESCRIPTION("A simple hardware latency detector"); +MODULE_VERSION(VERSION); + +/* Module parameters */ + +static int debug; +static int enabled; +static int threshold; + +module_param(debug, int, 0); /* enable debug */ +module_param(enabled, int, 0); /* enable detector */ +module_param(threshold, int, 0); /* latency threshold */ + +/* Buffering and sampling */ + +static struct ring_buffer *ring_buffer; /* sample buffer */ +static DEFINE_MUTEX(ring_buffer_mutex); /* lock changes */ +static unsigned long buf_size = BUF_SIZE_DEFAULT; +static struct task_struct *kthread; /* sampling thread */ + +/* DebugFS filesystem entries */ + +static struct dentry *debug_dir; /* debugfs directory */ +static struct dentry *debug_max; /* maximum TSC delta */ +static struct dentry *debug_count; /* total detect count */ +static struct dentry *debug_sample_width; /* sample width us */ +static struct dentry *debug_sample_window; /* sample window us */ +static struct dentry *debug_sample; /* raw samples us */ +static struct dentry *debug_threshold; /* threshold us */ +static struct dentry *debug_enable; /* enable/disable */ + +/* Individual samples and global state */ + +struct sample; /* latency sample */ +struct data; /* Global state */ + +/* Sampling functions */ +static int __buffer_add_sample(struct sample *sample); +static struct sample *buffer_get_sample(struct sample *sample); +static int get_sample(void *unused); + +/* Threading and state */ +static int kthread_fn(void *unused); +static int start_kthread(void); +static int stop_kthread(void); +static void __reset_stats(void); +static int init_stats(void); + +/* Debugfs interface */ +static ssize_t simple_data_read(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos, const u64 *entry); +static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos, u64 *entry); +static int debug_sample_fopen(struct inode *inode, struct file *filp); +static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos); +static int debug_sample_release(struct inode *inode, struct file *filp); +static int debug_enable_fopen(struct inode *inode, struct file *filp); +static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos); +static ssize_t debug_enable_fwrite(struct file *file, + const char __user *user_buffer, + size_t user_size, loff_t *offset); + +/* Initialization functions */ +static int init_debugfs(void); +static void free_debugfs(void); +static int detector_init(void); +static void detector_exit(void); + +/* Individual latency samples are stored here when detected and packed into + * the ring_buffer circular buffer, where they are overwritten when + * more than buf_size/sizeof(sample) samples are received. */ +struct sample { + u64 seqnum; /* unique sequence */ + u64 duration; /* ktime delta */ + struct timespec timestamp; /* wall time */ + unsigned long lost; +}; + +/* keep the global state somewhere. Mostly used under stop_machine. */ +static struct data { + + struct mutex lock; /* protect changes */ + + u64 count; /* total since reset */ + u64 max_sample; /* max hardware latency */ + u64 threshold; /* sample threshold level */ + + u64 sample_window; /* total sampling window (on+off) */ + u64 sample_width; /* active sampling portion of window */ + + atomic_t sample_open; /* whether the sample file is open */ + + wait_queue_head_t wq; /* waitqeue for new sample values */ + +} data; + +/** + * __buffer_add_sample - add a new latency sample recording to the ring buffer + * @sample: The new latency sample value + * + * This receives a new latency sample and records it in a global ring buffer. + * No additional locking is used in this case - suited for stop_machine use. + */ +static int __buffer_add_sample(struct sample *sample) +{ + return ring_buffer_write(ring_buffer, + sizeof(struct sample), sample); +} + +/** + * buffer_get_sample - remove a hardware latency sample from the ring buffer + * @sample: Pre-allocated storage for the sample + * + * This retrieves a hardware latency sample from the global circular buffer + */ +static struct sample *buffer_get_sample(struct sample *sample) +{ + struct ring_buffer_event *e = NULL; + struct sample *s = NULL; + unsigned int cpu = 0; + + if (!sample) + return NULL; + + mutex_lock(&ring_buffer_mutex); + for_each_online_cpu(cpu) { + e = ring_buffer_consume(ring_buffer, cpu, NULL, &sample->lost); + if (e) + break; + } + + if (e) { + s = ring_buffer_event_data(e); + memcpy(sample, s, sizeof(struct sample)); + } else + sample = NULL; + mutex_unlock(&ring_buffer_mutex); + + return sample; +} + +/** + * get_sample - sample the CPU TSC and look for likely hardware latencies + * @unused: This is not used but is a part of the stop_machine API + * + * Used to repeatedly capture the CPU TSC (or similar), looking for potential + * hardware-induced latency. Called under stop_machine, with data.lock held. + */ +static int get_sample(void *unused) +{ + ktime_t start, t1, t2; + s64 diff, total = 0; + u64 sample = 0; + int ret = 1; + + start = ktime_get(); /* start timestamp */ + + do { + + t1 = ktime_get(); /* we'll look for a discontinuity */ + t2 = ktime_get(); + + total = ktime_to_us(ktime_sub(t2, start)); /* sample width */ + diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */ + + /* This shouldn't happen */ + if (diff < 0) { + pr_err(BANNER "time running backwards\n"); + goto out; + } + + if (diff > sample) + sample = diff; /* only want highest value */ + + } while (total <= data.sample_width); + + /* If we exceed the threshold value, we have found a hardware latency */ + if (sample > data.threshold) { + struct sample s; + + data.count++; + s.seqnum = data.count; + s.duration = sample; + s.timestamp = CURRENT_TIME; + __buffer_add_sample(&s); + + /* Keep a running maximum ever recorded hardware latency */ + if (sample > data.max_sample) + data.max_sample = sample; + } + + ret = 0; +out: + return ret; +} + +/* + * kthread_fn - The CPU time sampling/hardware latency detection kernel thread + * @unused: A required part of the kthread API. + * + * Used to periodically sample the CPU TSC via a call to get_sample. We + * use stop_machine, whith does (intentionally) introduce latency since we + * need to ensure nothing else might be running (and thus pre-empting). + * Obviously this should never be used in production environments. + * + * stop_machine will schedule us typically only on CPU0 which is fine for + * almost every real-world hardware latency situation - but we might later + * generalize this if we find there are any actualy systems with alternate + * SMI delivery or other non CPU0 hardware latencies. + */ +static int kthread_fn(void *unused) +{ + int err = 0; + u64 interval = 0; + + while (!kthread_should_stop()) { + + mutex_lock(&data.lock); + + err = stop_machine(get_sample, unused, 0); + if (err) { + /* Houston, we have a problem */ + mutex_unlock(&data.lock); + goto err_out; + } + + wake_up(&data.wq); /* wake up reader(s) */ + + interval = data.sample_window - data.sample_width; + do_div(interval, USEC_PER_MSEC); /* modifies interval value */ + + mutex_unlock(&data.lock); + + if (msleep_interruptible(interval)) + goto out; + } + goto out; +err_out: + pr_err(BANNER "could not call stop_machine, disabling\n"); + enabled = 0; +out: + return err; + +} + +/** + * start_kthread - Kick off the hardware latency sampling/detector kthread + * + * This starts a kernel thread that will sit and sample the CPU timestamp + * counter (TSC or similar) and look for potential hardware latencies. + */ +static int start_kthread(void) +{ + kthread = kthread_run(kthread_fn, NULL, + DRVNAME); + if (IS_ERR(kthread)) { + pr_err(BANNER "could not start sampling thread\n"); + enabled = 0; + return -ENOMEM; + } + + return 0; +} + +/** + * stop_kthread - Inform the hardware latency samping/detector kthread to stop + * + * This kicks the running hardware latency sampling/detector kernel thread and + * tells it to stop sampling now. Use this on unload and at system shutdown. + */ +static int stop_kthread(void) +{ + int ret; + + ret = kthread_stop(kthread); + + return ret; +} + +/** + * __reset_stats - Reset statistics for the hardware latency detector + * + * We use data to store various statistics and global state. We call this + * function in order to reset those when "enable" is toggled on or off, and + * also at initialization. Should be called with data.lock held. + */ +static void __reset_stats(void) +{ + data.count = 0; + data.max_sample = 0; + ring_buffer_reset(ring_buffer); /* flush out old sample entries */ +} + +/** + * init_stats - Setup global state statistics for the hardware latency detector + * + * We use data to store various statistics and global state. We also use + * a global ring buffer (ring_buffer) to keep raw samples of detected hardware + * induced system latencies. This function initializes these structures and + * allocates the global ring buffer also. + */ +static int init_stats(void) +{ + int ret = -ENOMEM; + + mutex_init(&data.lock); + init_waitqueue_head(&data.wq); + atomic_set(&data.sample_open, 0); + + ring_buffer = ring_buffer_alloc(buf_size, BUF_FLAGS); + + if (WARN(!ring_buffer, KERN_ERR BANNER + "failed to allocate ring buffer!\n")) + goto out; + + __reset_stats(); + data.threshold = DEFAULT_LAT_THRESHOLD; /* threshold us */ + data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */ + data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */ + + ret = 0; + +out: + return ret; + +} + +/* + * simple_data_read - Wrapper read function for global state debugfs entries + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The userspace provided buffer to read value into + * @cnt: The maximum number of bytes to read + * @ppos: The current "file" position + * @entry: The entry to read from + * + * This function provides a generic read implementation for the global state + * "data" structure debugfs filesystem entries. It would be nice to use + * simple_attr_read directly, but we need to make sure that the data.lock + * spinlock is held during the actual read (even though we likely won't ever + * actually race here as the updater runs under a stop_machine context). + */ +static ssize_t simple_data_read(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos, const u64 *entry) +{ + char buf[U64STR_SIZE]; + u64 val = 0; + int len = 0; + + memset(buf, 0, sizeof(buf)); + + if (!entry) + return -EFAULT; + + mutex_lock(&data.lock); + val = *entry; + mutex_unlock(&data.lock); + + len = snprintf(buf, sizeof(buf), "%llu\n", (unsigned long long)val); + + return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); + +} + +/* + * simple_data_write - Wrapper write function for global state debugfs entries + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The userspace provided buffer to write value from + * @cnt: The maximum number of bytes to write + * @ppos: The current "file" position + * @entry: The entry to write to + * + * This function provides a generic write implementation for the global state + * "data" structure debugfs filesystem entries. It would be nice to use + * simple_attr_write directly, but we need to make sure that the data.lock + * spinlock is held during the actual write (even though we likely won't ever + * actually race here as the updater runs under a stop_machine context). + */ +static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos, u64 *entry) +{ + char buf[U64STR_SIZE]; + int csize = min(cnt, sizeof(buf)); + u64 val = 0; + int err = 0; + + memset(buf, '\0', sizeof(buf)); + if (copy_from_user(buf, ubuf, csize)) + return -EFAULT; + + buf[U64STR_SIZE-1] = '\0'; /* just in case */ + err = kstrtoull(buf, 10, &val); + if (err) + return -EINVAL; + + mutex_lock(&data.lock); + *entry = val; + mutex_unlock(&data.lock); + + return csize; +} + +/** + * debug_count_fopen - Open function for "count" debugfs entry + * @inode: The in-kernel inode representation of the debugfs "file" + * @filp: The active open file structure for the debugfs "file" + * + * This function provides an open implementation for the "count" debugfs + * interface to the hardware latency detector. + */ +static int debug_count_fopen(struct inode *inode, struct file *filp) +{ + return 0; +} + +/** + * debug_count_fread - Read function for "count" debugfs entry + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The userspace provided buffer to read value into + * @cnt: The maximum number of bytes to read + * @ppos: The current "file" position + * + * This function provides a read implementation for the "count" debugfs + * interface to the hardware latency detector. Can be used to read the + * number of latency readings exceeding the configured threshold since + * the detector was last reset (e.g. by writing a zero into "count"). + */ +static ssize_t debug_count_fread(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + return simple_data_read(filp, ubuf, cnt, ppos, &data.count); +} + +/** + * debug_count_fwrite - Write function for "count" debugfs entry + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The user buffer that contains the value to write + * @cnt: The maximum number of bytes to write to "file" + * @ppos: The current position in the debugfs "file" + * + * This function provides a write implementation for the "count" debugfs + * interface to the hardware latency detector. Can be used to write a + * desired value, especially to zero the total count. + */ +static ssize_t debug_count_fwrite(struct file *filp, + const char __user *ubuf, + size_t cnt, + loff_t *ppos) +{ + return simple_data_write(filp, ubuf, cnt, ppos, &data.count); +} + +/** + * debug_enable_fopen - Dummy open function for "enable" debugfs interface + * @inode: The in-kernel inode representation of the debugfs "file" + * @filp: The active open file structure for the debugfs "file" + * + * This function provides an open implementation for the "enable" debugfs + * interface to the hardware latency detector. + */ +static int debug_enable_fopen(struct inode *inode, struct file *filp) +{ + return 0; +} + +/** + * debug_enable_fread - Read function for "enable" debugfs interface + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The userspace provided buffer to read value into + * @cnt: The maximum number of bytes to read + * @ppos: The current "file" position + * + * This function provides a read implementation for the "enable" debugfs + * interface to the hardware latency detector. Can be used to determine + * whether the detector is currently enabled ("0\n" or "1\n" returned). + */ +static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char buf[4]; + + if ((cnt < sizeof(buf)) || (*ppos)) + return 0; + + buf[0] = enabled ? '1' : '0'; + buf[1] = '\n'; + buf[2] = '\0'; + if (copy_to_user(ubuf, buf, strlen(buf))) + return -EFAULT; + return *ppos = strlen(buf); +} + +/** + * debug_enable_fwrite - Write function for "enable" debugfs interface + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The user buffer that contains the value to write + * @cnt: The maximum number of bytes to write to "file" + * @ppos: The current position in the debugfs "file" + * + * This function provides a write implementation for the "enable" debugfs + * interface to the hardware latency detector. Can be used to enable or + * disable the detector, which will have the side-effect of possibly + * also resetting the global stats and kicking off the measuring + * kthread (on an enable) or the converse (upon a disable). + */ +static ssize_t debug_enable_fwrite(struct file *filp, + const char __user *ubuf, + size_t cnt, + loff_t *ppos) +{ + char buf[4]; + int csize = min(cnt, sizeof(buf)); + long val = 0; + int err = 0; + + memset(buf, '\0', sizeof(buf)); + if (copy_from_user(buf, ubuf, csize)) + return -EFAULT; + + buf[sizeof(buf)-1] = '\0'; /* just in case */ + err = kstrtoul(buf, 10, &val); + if (0 != err) + return -EINVAL; + + if (val) { + if (enabled) + goto unlock; + enabled = 1; + __reset_stats(); + if (start_kthread()) + return -EFAULT; + } else { + if (!enabled) + goto unlock; + enabled = 0; + err = stop_kthread(); + if (err) { + pr_err(BANNER "cannot stop kthread\n"); + return -EFAULT; + } + wake_up(&data.wq); /* reader(s) should return */ + } +unlock: + return csize; +} + +/** + * debug_max_fopen - Open function for "max" debugfs entry + * @inode: The in-kernel inode representation of the debugfs "file" + * @filp: The active open file structure for the debugfs "file" + * + * This function provides an open implementation for the "max" debugfs + * interface to the hardware latency detector. + */ +static int debug_max_fopen(struct inode *inode, struct file *filp) +{ + return 0; +} + +/** + * debug_max_fread - Read function for "max" debugfs entry + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The userspace provided buffer to read value into + * @cnt: The maximum number of bytes to read + * @ppos: The current "file" position + * + * This function provides a read implementation for the "max" debugfs + * interface to the hardware latency detector. Can be used to determine + * the maximum latency value observed since it was last reset. + */ +static ssize_t debug_max_fread(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + return simple_data_read(filp, ubuf, cnt, ppos, &data.max_sample); +} + +/** + * debug_max_fwrite - Write function for "max" debugfs entry + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The user buffer that contains the value to write + * @cnt: The maximum number of bytes to write to "file" + * @ppos: The current position in the debugfs "file" + * + * This function provides a write implementation for the "max" debugfs + * interface to the hardware latency detector. Can be used to reset the + * maximum or set it to some other desired value - if, then, subsequent + * measurements exceed this value, the maximum will be updated. + */ +static ssize_t debug_max_fwrite(struct file *filp, + const char __user *ubuf, + size_t cnt, + loff_t *ppos) +{ + return simple_data_write(filp, ubuf, cnt, ppos, &data.max_sample); +} + + +/** + * debug_sample_fopen - An open function for "sample" debugfs interface + * @inode: The in-kernel inode representation of this debugfs "file" + * @filp: The active open file structure for the debugfs "file" + * + * This function handles opening the "sample" file within the hardware + * latency detector debugfs directory interface. This file is used to read + * raw samples from the global ring_buffer and allows the user to see a + * running latency history. Can be opened blocking or non-blocking, + * affecting whether it behaves as a buffer read pipe, or does not. + * Implements simple locking to prevent multiple simultaneous use. + */ +static int debug_sample_fopen(struct inode *inode, struct file *filp) +{ + if (!atomic_add_unless(&data.sample_open, 1, 1)) + return -EBUSY; + else + return 0; +} + +/** + * debug_sample_fread - A read function for "sample" debugfs interface + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The user buffer that will contain the samples read + * @cnt: The maximum bytes to read from the debugfs "file" + * @ppos: The current position in the debugfs "file" + * + * This function handles reading from the "sample" file within the hardware + * latency detector debugfs directory interface. This file is used to read + * raw samples from the global ring_buffer and allows the user to see a + * running latency history. By default this will block pending a new + * value written into the sample buffer, unless there are already a + * number of value(s) waiting in the buffer, or the sample file was + * previously opened in a non-blocking mode of operation. + */ +static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + int len = 0; + char buf[64]; + struct sample *sample = NULL; + + if (!enabled) + return 0; + + sample = kzalloc(sizeof(struct sample), GFP_KERNEL); + if (!sample) + return -ENOMEM; + + while (!buffer_get_sample(sample)) { + + DEFINE_WAIT(wait); + + if (filp->f_flags & O_NONBLOCK) { + len = -EAGAIN; + goto out; + } + + prepare_to_wait(&data.wq, &wait, TASK_INTERRUPTIBLE); + schedule(); + finish_wait(&data.wq, &wait); + + if (signal_pending(current)) { + len = -EINTR; + goto out; + } + + if (!enabled) { /* enable was toggled */ + len = 0; + goto out; + } + } + + len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\n", + sample->timestamp.tv_sec, + sample->timestamp.tv_nsec, + sample->duration); + + + /* handling partial reads is more trouble than it's worth */ + if (len > cnt) + goto out; + + if (copy_to_user(ubuf, buf, len)) + len = -EFAULT; + +out: + kfree(sample); + return len; +} + +/** + * debug_sample_release - Release function for "sample" debugfs interface + * @inode: The in-kernel inode represenation of the debugfs "file" + * @filp: The active open file structure for the debugfs "file" + * + * This function completes the close of the debugfs interface "sample" file. + * Frees the sample_open "lock" so that other users may open the interface. + */ +static int debug_sample_release(struct inode *inode, struct file *filp) +{ + atomic_dec(&data.sample_open); + + return 0; +} + +/** + * debug_threshold_fopen - Open function for "threshold" debugfs entry + * @inode: The in-kernel inode representation of the debugfs "file" + * @filp: The active open file structure for the debugfs "file" + * + * This function provides an open implementation for the "threshold" debugfs + * interface to the hardware latency detector. + */ +static int debug_threshold_fopen(struct inode *inode, struct file *filp) +{ + return 0; +} + +/** + * debug_threshold_fread - Read function for "threshold" debugfs entry + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The userspace provided buffer to read value into + * @cnt: The maximum number of bytes to read + * @ppos: The current "file" position + * + * This function provides a read implementation for the "threshold" debugfs + * interface to the hardware latency detector. It can be used to determine + * the current threshold level at which a latency will be recorded in the + * global ring buffer, typically on the order of 10us. + */ +static ssize_t debug_threshold_fread(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + return simple_data_read(filp, ubuf, cnt, ppos, &data.threshold); +} + +/** + * debug_threshold_fwrite - Write function for "threshold" debugfs entry + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The user buffer that contains the value to write + * @cnt: The maximum number of bytes to write to "file" + * @ppos: The current position in the debugfs "file" + * + * This function provides a write implementation for the "threshold" debugfs + * interface to the hardware latency detector. It can be used to configure + * the threshold level at which any subsequently detected latencies will + * be recorded into the global ring buffer. + */ +static ssize_t debug_threshold_fwrite(struct file *filp, + const char __user *ubuf, + size_t cnt, + loff_t *ppos) +{ + int ret; + + ret = simple_data_write(filp, ubuf, cnt, ppos, &data.threshold); + + if (enabled) + wake_up_process(kthread); + + return ret; +} + +/** + * debug_width_fopen - Open function for "width" debugfs entry + * @inode: The in-kernel inode representation of the debugfs "file" + * @filp: The active open file structure for the debugfs "file" + * + * This function provides an open implementation for the "width" debugfs + * interface to the hardware latency detector. + */ +static int debug_width_fopen(struct inode *inode, struct file *filp) +{ + return 0; +} + +/** + * debug_width_fread - Read function for "width" debugfs entry + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The userspace provided buffer to read value into + * @cnt: The maximum number of bytes to read + * @ppos: The current "file" position + * + * This function provides a read implementation for the "width" debugfs + * interface to the hardware latency detector. It can be used to determine + * for how many us of the total window us we will actively sample for any + * hardware-induced latecy periods. Obviously, it is not possible to + * sample constantly and have the system respond to a sample reader, or, + * worse, without having the system appear to have gone out to lunch. + */ +static ssize_t debug_width_fread(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_width); +} + +/** + * debug_width_fwrite - Write function for "width" debugfs entry + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The user buffer that contains the value to write + * @cnt: The maximum number of bytes to write to "file" + * @ppos: The current position in the debugfs "file" + * + * This function provides a write implementation for the "width" debugfs + * interface to the hardware latency detector. It can be used to configure + * for how many us of the total window us we will actively sample for any + * hardware-induced latency periods. Obviously, it is not possible to + * sample constantly and have the system respond to a sample reader, or, + * worse, without having the system appear to have gone out to lunch. It + * is enforced that width is less that the total window size. + */ +static ssize_t debug_width_fwrite(struct file *filp, + const char __user *ubuf, + size_t cnt, + loff_t *ppos) +{ + char buf[U64STR_SIZE]; + int csize = min(cnt, sizeof(buf)); + u64 val = 0; + int err = 0; + + memset(buf, '\0', sizeof(buf)); + if (copy_from_user(buf, ubuf, csize)) + return -EFAULT; + + buf[U64STR_SIZE-1] = '\0'; /* just in case */ + err = kstrtoull(buf, 10, &val); + if (0 != err) + return -EINVAL; + + mutex_lock(&data.lock); + if (val < data.sample_window) + data.sample_width = val; + else { + mutex_unlock(&data.lock); + return -EINVAL; + } + mutex_unlock(&data.lock); + + if (enabled) + wake_up_process(kthread); + + return csize; +} + +/** + * debug_window_fopen - Open function for "window" debugfs entry + * @inode: The in-kernel inode representation of the debugfs "file" + * @filp: The active open file structure for the debugfs "file" + * + * This function provides an open implementation for the "window" debugfs + * interface to the hardware latency detector. The window is the total time + * in us that will be considered one sample period. Conceptually, windows + * occur back-to-back and contain a sample width period during which + * actual sampling occurs. + */ +static int debug_window_fopen(struct inode *inode, struct file *filp) +{ + return 0; +} + +/** + * debug_window_fread - Read function for "window" debugfs entry + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The userspace provided buffer to read value into + * @cnt: The maximum number of bytes to read + * @ppos: The current "file" position + * + * This function provides a read implementation for the "window" debugfs + * interface to the hardware latency detector. The window is the total time + * in us that will be considered one sample period. Conceptually, windows + * occur back-to-back and contain a sample width period during which + * actual sampling occurs. Can be used to read the total window size. + */ +static ssize_t debug_window_fread(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_window); +} + +/** + * debug_window_fwrite - Write function for "window" debugfs entry + * @filp: The active open file structure for the debugfs "file" + * @ubuf: The user buffer that contains the value to write + * @cnt: The maximum number of bytes to write to "file" + * @ppos: The current position in the debugfs "file" + * + * This function provides a write implementation for the "window" debufds + * interface to the hardware latency detetector. The window is the total time + * in us that will be considered one sample period. Conceptually, windows + * occur back-to-back and contain a sample width period during which + * actual sampling occurs. Can be used to write a new total window size. It + * is enfoced that any value written must be greater than the sample width + * size, or an error results. + */ +static ssize_t debug_window_fwrite(struct file *filp, + const char __user *ubuf, + size_t cnt, + loff_t *ppos) +{ + char buf[U64STR_SIZE]; + int csize = min(cnt, sizeof(buf)); + u64 val = 0; + int err = 0; + + memset(buf, '\0', sizeof(buf)); + if (copy_from_user(buf, ubuf, csize)) + return -EFAULT; + + buf[U64STR_SIZE-1] = '\0'; /* just in case */ + err = kstrtoull(buf, 10, &val); + if (0 != err) + return -EINVAL; + + mutex_lock(&data.lock); + if (data.sample_width < val) + data.sample_window = val; + else { + mutex_unlock(&data.lock); + return -EINVAL; + } + mutex_unlock(&data.lock); + + return csize; +} + +/* + * Function pointers for the "count" debugfs file operations + */ +static const struct file_operations count_fops = { + .open = debug_count_fopen, + .read = debug_count_fread, + .write = debug_count_fwrite, + .owner = THIS_MODULE, +}; + +/* + * Function pointers for the "enable" debugfs file operations + */ +static const struct file_operations enable_fops = { + .open = debug_enable_fopen, + .read = debug_enable_fread, + .write = debug_enable_fwrite, + .owner = THIS_MODULE, +}; + +/* + * Function pointers for the "max" debugfs file operations + */ +static const struct file_operations max_fops = { + .open = debug_max_fopen, + .read = debug_max_fread, + .write = debug_max_fwrite, + .owner = THIS_MODULE, +}; + +/* + * Function pointers for the "sample" debugfs file operations + */ +static const struct file_operations sample_fops = { + .open = debug_sample_fopen, + .read = debug_sample_fread, + .release = debug_sample_release, + .owner = THIS_MODULE, +}; + +/* + * Function pointers for the "threshold" debugfs file operations + */ +static const struct file_operations threshold_fops = { + .open = debug_threshold_fopen, + .read = debug_threshold_fread, + .write = debug_threshold_fwrite, + .owner = THIS_MODULE, +}; + +/* + * Function pointers for the "width" debugfs file operations + */ +static const struct file_operations width_fops = { + .open = debug_width_fopen, + .read = debug_width_fread, + .write = debug_width_fwrite, + .owner = THIS_MODULE, +}; + +/* + * Function pointers for the "window" debugfs file operations + */ +static const struct file_operations window_fops = { + .open = debug_window_fopen, + .read = debug_window_fread, + .write = debug_window_fwrite, + .owner = THIS_MODULE, +}; + +/** + * init_debugfs - A function to initialize the debugfs interface files + * + * This function creates entries in debugfs for "hwlat_detector", including + * files to read values from the detector, current samples, and the + * maximum sample that has been captured since the hardware latency + * dectector was started. + */ +static int init_debugfs(void) +{ + int ret = -ENOMEM; + + debug_dir = debugfs_create_dir(DRVNAME, NULL); + if (!debug_dir) + goto err_debug_dir; + + debug_sample = debugfs_create_file("sample", 0444, + debug_dir, NULL, + &sample_fops); + if (!debug_sample) + goto err_sample; + + debug_count = debugfs_create_file("count", 0444, + debug_dir, NULL, + &count_fops); + if (!debug_count) + goto err_count; + + debug_max = debugfs_create_file("max", 0444, + debug_dir, NULL, + &max_fops); + if (!debug_max) + goto err_max; + + debug_sample_window = debugfs_create_file("window", 0644, + debug_dir, NULL, + &window_fops); + if (!debug_sample_window) + goto err_window; + + debug_sample_width = debugfs_create_file("width", 0644, + debug_dir, NULL, + &width_fops); + if (!debug_sample_width) + goto err_width; + + debug_threshold = debugfs_create_file("threshold", 0644, + debug_dir, NULL, + &threshold_fops); + if (!debug_threshold) + goto err_threshold; + + debug_enable = debugfs_create_file("enable", 0644, + debug_dir, &enabled, + &enable_fops); + if (!debug_enable) + goto err_enable; + + else { + ret = 0; + goto out; + } + +err_enable: + debugfs_remove(debug_threshold); +err_threshold: + debugfs_remove(debug_sample_width); +err_width: + debugfs_remove(debug_sample_window); +err_window: + debugfs_remove(debug_max); +err_max: + debugfs_remove(debug_count); +err_count: + debugfs_remove(debug_sample); +err_sample: + debugfs_remove(debug_dir); +err_debug_dir: +out: + return ret; +} + +/** + * free_debugfs - A function to cleanup the debugfs file interface + */ +static void free_debugfs(void) +{ + /* could also use a debugfs_remove_recursive */ + debugfs_remove(debug_enable); + debugfs_remove(debug_threshold); + debugfs_remove(debug_sample_width); + debugfs_remove(debug_sample_window); + debugfs_remove(debug_max); + debugfs_remove(debug_count); + debugfs_remove(debug_sample); + debugfs_remove(debug_dir); +} + +/** + * detector_init - Standard module initialization code + */ +static int detector_init(void) +{ + int ret = -ENOMEM; + + pr_info(BANNER "version %s\n", VERSION); + + ret = init_stats(); + if (0 != ret) + goto out; + + ret = init_debugfs(); + if (0 != ret) + goto err_stats; + + if (enabled) + ret = start_kthread(); + + goto out; + +err_stats: + ring_buffer_free(ring_buffer); +out: + return ret; + +} + +/** + * detector_exit - Standard module cleanup code + */ +static void detector_exit(void) +{ + int err; + + if (enabled) { + enabled = 0; + err = stop_kthread(); + if (err) + pr_err(BANNER "cannot stop kthread\n"); + } + + free_debugfs(); + ring_buffer_free(ring_buffer); /* free up the ring buffer */ + +} + +module_init(detector_init); +module_exit(detector_exit); -- cgit v0.10.2 From 4c7a7a6b6d4b63bc641c92bc13b2f033d4753bb9 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 19 Aug 2013 17:33:25 -0400 Subject: hwlat-detector: Update hwlat_detector to add outer loop detection The hwlat_detector reads two timestamps in a row, then reports any gap between those calls. The problem is, it misses everything between the second reading of the time stamp to the first reading of the time stamp in the next loop. That's were most of the time is spent, which means, chances are likely that it will miss all hardware latencies. This defeats the purpose. By also testing the first time stamp from the previous loop second time stamp (the outer loop), we are more likely to find a latency. Setting the threshold to 1, here's what the report now looks like: 1347415723.0232202770 0 2 1347415725.0234202822 0 2 1347415727.0236202875 0 2 1347415729.0238202928 0 2 1347415731.0240202980 0 2 1347415734.0243203061 0 2 1347415736.0245203113 0 2 1347415738.0247203166 2 0 1347415740.0249203219 0 3 1347415742.0251203272 0 3 1347415743.0252203299 0 3 1347415745.0254203351 0 2 1347415747.0256203404 0 2 1347415749.0258203457 0 2 1347415751.0260203510 0 2 1347415754.0263203589 0 2 1347415756.0265203642 0 2 1347415758.0267203695 0 2 1347415760.0269203748 0 2 1347415762.0271203801 0 2 1347415764.0273203853 2 0 There's some hardware latency that takes 2 microseconds to run. Signed-off-by: Steven Rostedt Signed-off-by: Sebastian Andrzej Siewior diff --git a/drivers/misc/hwlat_detector.c b/drivers/misc/hwlat_detector.c index 6864f3c..c07e859 100644 --- a/drivers/misc/hwlat_detector.c +++ b/drivers/misc/hwlat_detector.c @@ -143,6 +143,7 @@ static void detector_exit(void); struct sample { u64 seqnum; /* unique sequence */ u64 duration; /* ktime delta */ + u64 outer_duration; /* ktime delta (outer loop) */ struct timespec timestamp; /* wall time */ unsigned long lost; }; @@ -219,11 +220,13 @@ static struct sample *buffer_get_sample(struct sample *sample) */ static int get_sample(void *unused) { - ktime_t start, t1, t2; + ktime_t start, t1, t2, last_t2; s64 diff, total = 0; u64 sample = 0; + u64 outer_sample = 0; int ret = 1; + last_t2.tv64 = 0; start = ktime_get(); /* start timestamp */ do { @@ -231,7 +234,22 @@ static int get_sample(void *unused) t1 = ktime_get(); /* we'll look for a discontinuity */ t2 = ktime_get(); + if (last_t2.tv64) { + /* Check the delta from outer loop (t2 to next t1) */ + diff = ktime_to_us(ktime_sub(t1, last_t2)); + /* This shouldn't happen */ + if (diff < 0) { + pr_err(BANNER "time running backwards\n"); + goto out; + } + if (diff > outer_sample) + outer_sample = diff; + } + last_t2 = t2; + total = ktime_to_us(ktime_sub(t2, start)); /* sample width */ + + /* This checks the inner loop (t1 to t2) */ diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */ /* This shouldn't happen */ @@ -246,12 +264,13 @@ static int get_sample(void *unused) } while (total <= data.sample_width); /* If we exceed the threshold value, we have found a hardware latency */ - if (sample > data.threshold) { + if (sample > data.threshold || outer_sample > data.threshold) { struct sample s; data.count++; s.seqnum = data.count; s.duration = sample; + s.outer_duration = outer_sample; s.timestamp = CURRENT_TIME; __buffer_add_sample(&s); @@ -738,10 +757,11 @@ static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, } } - len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\n", - sample->timestamp.tv_sec, - sample->timestamp.tv_nsec, - sample->duration); + len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\t%llu\n", + sample->timestamp.tv_sec, + sample->timestamp.tv_nsec, + sample->duration, + sample->outer_duration); /* handling partial reads is more trouble than it's worth */ -- cgit v0.10.2 From 191183bd4dd58398afa711a4457953b6bc2dd4c2 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 19 Aug 2013 17:33:26 -0400 Subject: hwlat-detector: Use trace_clock_local if available As ktime_get() calls into the timing code which does a read_seq(), it may be affected by other CPUS that touch that lock. To remove this dependency, use the trace_clock_local() which is already exported for module use. If CONFIG_TRACING is enabled, use that as the clock, otherwise use ktime_get(). Signed-off-by: Steven Rostedt Signed-off-by: Sebastian Andrzej Siewior diff --git a/drivers/misc/hwlat_detector.c b/drivers/misc/hwlat_detector.c index c07e859..0fcc0e3 100644 --- a/drivers/misc/hwlat_detector.c +++ b/drivers/misc/hwlat_detector.c @@ -51,6 +51,7 @@ #include #include #include +#include #define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */ #define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */ @@ -211,6 +212,21 @@ static struct sample *buffer_get_sample(struct sample *sample) return sample; } +#ifndef CONFIG_TRACING +#define time_type ktime_t +#define time_get() ktime_get() +#define time_to_us(x) ktime_to_us(x) +#define time_sub(a, b) ktime_sub(a, b) +#define init_time(a, b) (a).tv64 = b +#define time_u64(a) ((a).tv64) +#else +#define time_type u64 +#define time_get() trace_clock_local() +#define time_to_us(x) div_u64(x, 1000) +#define time_sub(a, b) ((a) - (b)) +#define init_time(a, b) (a = b) +#define time_u64(a) a +#endif /** * get_sample - sample the CPU TSC and look for likely hardware latencies * @unused: This is not used but is a part of the stop_machine API @@ -220,23 +236,23 @@ static struct sample *buffer_get_sample(struct sample *sample) */ static int get_sample(void *unused) { - ktime_t start, t1, t2, last_t2; + time_type start, t1, t2, last_t2; s64 diff, total = 0; u64 sample = 0; u64 outer_sample = 0; int ret = 1; - last_t2.tv64 = 0; - start = ktime_get(); /* start timestamp */ + init_time(last_t2, 0); + start = time_get(); /* start timestamp */ do { - t1 = ktime_get(); /* we'll look for a discontinuity */ - t2 = ktime_get(); + t1 = time_get(); /* we'll look for a discontinuity */ + t2 = time_get(); - if (last_t2.tv64) { + if (time_u64(last_t2)) { /* Check the delta from outer loop (t2 to next t1) */ - diff = ktime_to_us(ktime_sub(t1, last_t2)); + diff = time_to_us(time_sub(t1, last_t2)); /* This shouldn't happen */ if (diff < 0) { pr_err(BANNER "time running backwards\n"); @@ -247,10 +263,10 @@ static int get_sample(void *unused) } last_t2 = t2; - total = ktime_to_us(ktime_sub(t2, start)); /* sample width */ + total = time_to_us(time_sub(t2, start)); /* sample width */ /* This checks the inner loop (t1 to t2) */ - diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */ + diff = time_to_us(time_sub(t2, t1)); /* current diff */ /* This shouldn't happen */ if (diff < 0) { -- cgit v0.10.2 From 938b1fa9865f784988b6f92554f41e6b6f8d48f4 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 19 Aug 2013 17:33:27 -0400 Subject: hwlat-detector: Use thread instead of stop machine There's no reason to use stop machine to search for hardware latency. Simply disabling interrupts while running the loop will do enough to check if something comes in that wasn't disabled by interrupts being off, which is exactly what stop machine does. Instead of using stop machine, just have the thread disable interrupts while it checks for hardware latency. Signed-off-by: Steven Rostedt Signed-off-by: Sebastian Andrzej Siewior diff --git a/drivers/misc/hwlat_detector.c b/drivers/misc/hwlat_detector.c index 0fcc0e3..6e88113 100644 --- a/drivers/misc/hwlat_detector.c +++ b/drivers/misc/hwlat_detector.c @@ -41,7 +41,6 @@ #include #include #include -#include #include #include #include @@ -107,7 +106,6 @@ struct data; /* Global state */ /* Sampling functions */ static int __buffer_add_sample(struct sample *sample); static struct sample *buffer_get_sample(struct sample *sample); -static int get_sample(void *unused); /* Threading and state */ static int kthread_fn(void *unused); @@ -149,7 +147,7 @@ struct sample { unsigned long lost; }; -/* keep the global state somewhere. Mostly used under stop_machine. */ +/* keep the global state somewhere. */ static struct data { struct mutex lock; /* protect changes */ @@ -172,7 +170,7 @@ static struct data { * @sample: The new latency sample value * * This receives a new latency sample and records it in a global ring buffer. - * No additional locking is used in this case - suited for stop_machine use. + * No additional locking is used in this case. */ static int __buffer_add_sample(struct sample *sample) { @@ -229,18 +227,18 @@ static struct sample *buffer_get_sample(struct sample *sample) #endif /** * get_sample - sample the CPU TSC and look for likely hardware latencies - * @unused: This is not used but is a part of the stop_machine API * * Used to repeatedly capture the CPU TSC (or similar), looking for potential - * hardware-induced latency. Called under stop_machine, with data.lock held. + * hardware-induced latency. Called with interrupts disabled and with + * data.lock held. */ -static int get_sample(void *unused) +static int get_sample(void) { time_type start, t1, t2, last_t2; s64 diff, total = 0; u64 sample = 0; u64 outer_sample = 0; - int ret = 1; + int ret = -1; init_time(last_t2, 0); start = time_get(); /* start timestamp */ @@ -279,10 +277,14 @@ static int get_sample(void *unused) } while (total <= data.sample_width); + ret = 0; + /* If we exceed the threshold value, we have found a hardware latency */ if (sample > data.threshold || outer_sample > data.threshold) { struct sample s; + ret = 1; + data.count++; s.seqnum = data.count; s.duration = sample; @@ -295,7 +297,6 @@ static int get_sample(void *unused) data.max_sample = sample; } - ret = 0; out: return ret; } @@ -305,32 +306,30 @@ out: * @unused: A required part of the kthread API. * * Used to periodically sample the CPU TSC via a call to get_sample. We - * use stop_machine, whith does (intentionally) introduce latency since we + * disable interrupts, which does (intentionally) introduce latency since we * need to ensure nothing else might be running (and thus pre-empting). * Obviously this should never be used in production environments. * - * stop_machine will schedule us typically only on CPU0 which is fine for - * almost every real-world hardware latency situation - but we might later - * generalize this if we find there are any actualy systems with alternate - * SMI delivery or other non CPU0 hardware latencies. + * Currently this runs on which ever CPU it was scheduled on, but most + * real-worald hardware latency situations occur across several CPUs, + * but we might later generalize this if we find there are any actualy + * systems with alternate SMI delivery or other hardware latencies. */ static int kthread_fn(void *unused) { - int err = 0; - u64 interval = 0; + int ret; + u64 interval; while (!kthread_should_stop()) { mutex_lock(&data.lock); - err = stop_machine(get_sample, unused, 0); - if (err) { - /* Houston, we have a problem */ - mutex_unlock(&data.lock); - goto err_out; - } + local_irq_disable(); + ret = get_sample(); + local_irq_enable(); - wake_up(&data.wq); /* wake up reader(s) */ + if (ret > 0) + wake_up(&data.wq); /* wake up reader(s) */ interval = data.sample_window - data.sample_width; do_div(interval, USEC_PER_MSEC); /* modifies interval value */ @@ -338,15 +337,10 @@ static int kthread_fn(void *unused) mutex_unlock(&data.lock); if (msleep_interruptible(interval)) - goto out; + break; } - goto out; -err_out: - pr_err(BANNER "could not call stop_machine, disabling\n"); - enabled = 0; -out: - return err; + return 0; } /** @@ -442,8 +436,7 @@ out: * This function provides a generic read implementation for the global state * "data" structure debugfs filesystem entries. It would be nice to use * simple_attr_read directly, but we need to make sure that the data.lock - * spinlock is held during the actual read (even though we likely won't ever - * actually race here as the updater runs under a stop_machine context). + * is held during the actual read. */ static ssize_t simple_data_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos, const u64 *entry) @@ -478,8 +471,7 @@ static ssize_t simple_data_read(struct file *filp, char __user *ubuf, * This function provides a generic write implementation for the global state * "data" structure debugfs filesystem entries. It would be nice to use * simple_attr_write directly, but we need to make sure that the data.lock - * spinlock is held during the actual write (even though we likely won't ever - * actually race here as the updater runs under a stop_machine context). + * is held during the actual write. */ static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u64 *entry) -- cgit v0.10.2 From 017907f4541930de79139fbcbcd2c4b0bbea4e10 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Fri, 30 Aug 2013 07:57:25 +0200 Subject: hwlat-detector: Don't ignore threshold module parameter If the user specified a threshold at module load time, use it. Cc: stable-rt@vger.kernel.org Acked-by: Steven Rostedt Signed-off-by: Mike Galbraith Signed-off-by: Sebastian Andrzej Siewior diff --git a/drivers/misc/hwlat_detector.c b/drivers/misc/hwlat_detector.c index 6e88113..2429c43 100644 --- a/drivers/misc/hwlat_detector.c +++ b/drivers/misc/hwlat_detector.c @@ -414,7 +414,7 @@ static int init_stats(void) goto out; __reset_stats(); - data.threshold = DEFAULT_LAT_THRESHOLD; /* threshold us */ + data.threshold = threshold ?: DEFAULT_LAT_THRESHOLD; /* threshold us */ data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */ data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */ -- cgit v0.10.2 From 765986731ec002f5608d3bd3d4a093b645fb40ed Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 22 Jul 2011 17:58:40 +0200 Subject: printk-kill.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/printk.h b/include/linux/printk.h index e6131a78..6add55e 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -101,9 +101,11 @@ int no_printk(const char *fmt, ...) extern asmlinkage __printf(1, 2) void early_printk(const char *fmt, ...); void early_vprintk(const char *fmt, va_list ap); +extern void printk_kill(void); #else static inline __printf(1, 2) __cold void early_printk(const char *s, ...) { } +static inline void printk_kill(void) { } #endif #ifdef CONFIG_PRINTK @@ -137,7 +139,6 @@ extern int __printk_ratelimit(const char *func); #define printk_ratelimit() __printk_ratelimit(__func__) extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, unsigned int interval_msec); - extern int printk_delay_msec; extern int dmesg_restrict; extern int kptr_restrict; diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index b4e8500..68e3f1a 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1488,6 +1488,55 @@ static size_t cont_print_text(char *text, size_t size) return textlen; } +#ifdef CONFIG_EARLY_PRINTK +struct console *early_console; + +void early_vprintk(const char *fmt, va_list ap) +{ + if (early_console) { + char buf[512]; + int n = vscnprintf(buf, sizeof(buf), fmt, ap); + + early_console->write(early_console, buf, n); + } +} + +asmlinkage void early_printk(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + early_vprintk(fmt, ap); + va_end(ap); +} + +/* + * This is independent of any log levels - a global + * kill switch that turns off all of printk. + * + * Used by the NMI watchdog if early-printk is enabled. + */ +static bool __read_mostly printk_killswitch; + +void printk_kill(void) +{ + printk_killswitch = true; +} + +static int forced_early_printk(const char *fmt, va_list ap) +{ + if (!printk_killswitch) + return 0; + early_vprintk(fmt, ap); + return 1; +} +#else +static inline int forced_early_printk(const char *fmt, va_list ap) +{ + return 0; +} +#endif + asmlinkage int vprintk_emit(int facility, int level, const char *dict, size_t dictlen, const char *fmt, va_list args) @@ -1501,6 +1550,13 @@ asmlinkage int vprintk_emit(int facility, int level, int this_cpu; int printed_len = 0; + /* + * Fall back to early_printk if a debugging subsystem has + * killed printk output + */ + if (unlikely(forced_early_printk(fmt, args))) + return 1; + boot_delay_msec(level); printk_delay(); @@ -1723,29 +1779,6 @@ static size_t cont_print_text(char *text, size_t size) { return 0; } #endif /* CONFIG_PRINTK */ -#ifdef CONFIG_EARLY_PRINTK -struct console *early_console; - -void early_vprintk(const char *fmt, va_list ap) -{ - if (early_console) { - char buf[512]; - int n = vscnprintf(buf, sizeof(buf), fmt, ap); - - early_console->write(early_console, buf, n); - } -} - -asmlinkage void early_printk(const char *fmt, ...) -{ - va_list ap; - - va_start(ap, fmt); - early_vprintk(fmt, ap); - va_end(ap); -} -#endif - static int __add_preferred_console(char *name, int idx, char *options, char *brl_options) { diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 4431610..cbad091 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -205,6 +205,8 @@ static int is_softlockup(unsigned long touch_ts) #ifdef CONFIG_HARDLOCKUP_DETECTOR +static DEFINE_RAW_SPINLOCK(watchdog_output_lock); + static struct perf_event_attr wd_hw_attr = { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES, @@ -239,10 +241,19 @@ static void watchdog_overflow_callback(struct perf_event *event, if (__this_cpu_read(hard_watchdog_warn) == true) return; - if (hardlockup_panic) + /* + * If early-printk is enabled then make sure we do not + * lock up in printk() and kill console logging: + */ + printk_kill(); + + if (hardlockup_panic) { panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); - else + } else { + raw_spin_lock(&watchdog_output_lock); WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); + raw_spin_unlock(&watchdog_output_lock); + } __this_cpu_write(hard_watchdog_warn, true); return; -- cgit v0.10.2 From 28fb38e470bf1dfe86fac22f31dd9bacb5e68459 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 2 Sep 2011 14:29:33 +0200 Subject: printk: 'force_early_printk' boot param to help with debugging Gives me an option to screw printk and actually see what the machine says. Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1314967289.1301.11.camel@twins Signed-off-by: Thomas Gleixner Link: http://lkml.kernel.org/n/tip-ykb97nsfmobq44xketrxs977@git.kernel.org diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 68e3f1a..184e346 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1518,6 +1518,13 @@ asmlinkage void early_printk(const char *fmt, ...) */ static bool __read_mostly printk_killswitch; +static int __init force_early_printk_setup(char *str) +{ + printk_killswitch = true; + return 0; +} +early_param("force_early_printk", force_early_printk_setup); + void printk_kill(void) { printk_killswitch = true; -- cgit v0.10.2 From a27b4b43dcb6c792ab87796e8557362c9aa70a34 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 17 Jun 2011 12:39:57 +0200 Subject: rt-preempt-base-config.patch Signed-off-by: Thomas Gleixner diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt index 3f9c974..c669134 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -1,3 +1,10 @@ +config PREEMPT + bool + select PREEMPT_COUNT + +config PREEMPT_RT_BASE + bool + select PREEMPT choice prompt "Preemption Model" @@ -33,9 +40,9 @@ config PREEMPT_VOLUNTARY Select this if you are building a kernel for a desktop system. -config PREEMPT +config PREEMPT__LL bool "Preemptible Kernel (Low-Latency Desktop)" - select PREEMPT_COUNT + select PREEMPT select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK help This option reduces the latency of the kernel by making @@ -52,6 +59,14 @@ config PREEMPT embedded system with latency requirements in the milliseconds range. +config PREEMPT_RTB + bool "Preemptible Kernel (Basic RT)" + select PREEMPT_RT_BASE + help + This option is basically the same as (Low-Latency Desktop) but + enables changes which are preliminary for the full preemptible + RT kernel. + endchoice config PREEMPT_COUNT -- cgit v0.10.2 From 015e3d89911fe6a0de5f8833fb3bf25aa15d77a3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 24 Jul 2011 12:11:43 +0200 Subject: kconfig-disable-a-few-options-rt.patch Disable stuff which is known to have issues on RT Signed-off-by: Thomas Gleixner diff --git a/arch/Kconfig b/arch/Kconfig index af2cc6e..77e7e80 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -6,6 +6,7 @@ config OPROFILE tristate "OProfile system profiling" depends on PROFILING depends on HAVE_OPROFILE + depends on !PREEMPT_RT_FULL select RING_BUFFER select RING_BUFFER_ALLOW_SWAP help diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index b45b240..7bbdf63 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -160,6 +160,7 @@ config VXLAN config NETCONSOLE tristate "Network console logging support" + depends on !PREEMPT_RT_FULL ---help--- If you want to log kernel messages over the network, enable this. See for details. diff --git a/mm/Kconfig b/mm/Kconfig index 394838f..083685a 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -384,7 +384,7 @@ config NOMMU_INITIAL_TRIM_EXCESS config TRANSPARENT_HUGEPAGE bool "Transparent Hugepage Support" - depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE + depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL select COMPACTION help Transparent Hugepages allows the kernel to use huge pages and -- cgit v0.10.2 From 785e71ea04274fc750e39946dccd20323e93c00d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 29 Jun 2011 14:58:57 +0200 Subject: kconfig-preempt-rt-full.patch Signed-off-by: Thomas Gleixner diff --git a/init/Makefile b/init/Makefile index 7bc47ee..88cf473 100644 --- a/init/Makefile +++ b/init/Makefile @@ -33,4 +33,4 @@ silent_chk_compile.h = : include/generated/compile.h: FORCE @$($(quiet)chk_compile.h) $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \ - "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)" + "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)" diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt index c669134..f8a2982 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -67,6 +67,14 @@ config PREEMPT_RTB enables changes which are preliminary for the full preemptible RT kernel. +config PREEMPT_RT_FULL + bool "Fully Preemptible Kernel (RT)" + depends on IRQ_FORCED_THREADING + select PREEMPT_RT_BASE + select PREEMPT_RCU + help + All and everything + endchoice config PREEMPT_COUNT diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h index f221ddf..5f44009 100755 --- a/scripts/mkcompile_h +++ b/scripts/mkcompile_h @@ -4,7 +4,8 @@ TARGET=$1 ARCH=$2 SMP=$3 PREEMPT=$4 -CC=$5 +RT=$5 +CC=$6 vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; } @@ -57,6 +58,7 @@ UTS_VERSION="#$VERSION" CONFIG_FLAGS="" if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi +if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP" # Truncate to maximum length -- cgit v0.10.2 From 79477db346fa881485a3ca0d807d4c1e7fb4fab5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:58 -0500 Subject: bug: BUG_ON/WARN_ON variants dependend on RT/!RT Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 7d10f96..aee7fd2 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -202,6 +202,20 @@ extern void warn_slowpath_null(const char *file, const int line); # define WARN_ON_SMP(x) ({0;}) #endif +#ifdef CONFIG_PREEMPT_RT_BASE +# define BUG_ON_RT(c) BUG_ON(c) +# define BUG_ON_NONRT(c) do { } while (0) +# define WARN_ON_RT(condition) WARN_ON(condition) +# define WARN_ON_NONRT(condition) do { } while (0) +# define WARN_ON_ONCE_NONRT(condition) do { } while (0) +#else +# define BUG_ON_RT(c) do { } while (0) +# define BUG_ON_NONRT(c) BUG_ON(c) +# define WARN_ON_RT(condition) do { } while (0) +# define WARN_ON_NONRT(condition) WARN_ON(condition) +# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition) +#endif + #endif /* __ASSEMBLY__ */ #endif -- cgit v0.10.2 From ad193b054a98ac8bcd759030cdf4557fde81cac5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 21 Jul 2009 22:34:14 +0200 Subject: rt: local_irq_* variants depending on RT/!RT Add local_irq_*_(no)rt variant which are mainly used to break interrupt disabled sections on PREEMPT_RT or to explicitely disable interrupts on PREEMPT_RT. Signed-off-by: Thomas Gleixner diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 1f6367a..cd616f9 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -176,7 +176,7 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); #ifdef CONFIG_LOCKDEP # define local_irq_enable_in_hardirq() do { } while (0) #else -# define local_irq_enable_in_hardirq() local_irq_enable() +# define local_irq_enable_in_hardirq() local_irq_enable_nort() #endif extern void disable_irq_nosync(unsigned int irq); diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index d176d65..cc05eb7 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -147,4 +147,23 @@ #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ +/* + * local_irq* variants depending on RT/!RT + */ +#ifdef CONFIG_PREEMPT_RT_FULL +# define local_irq_disable_nort() do { } while (0) +# define local_irq_enable_nort() do { } while (0) +# define local_irq_save_nort(flags) local_save_flags(flags) +# define local_irq_restore_nort(flags) (void)(flags) +# define local_irq_disable_rt() local_irq_disable() +# define local_irq_enable_rt() local_irq_enable() +#else +# define local_irq_disable_nort() local_irq_disable() +# define local_irq_enable_nort() local_irq_enable() +# define local_irq_save_nort(flags) local_irq_save(flags) +# define local_irq_restore_nort(flags) local_irq_restore(flags) +# define local_irq_disable_rt() do { } while (0) +# define local_irq_enable_rt() do { } while (0) +#endif + #endif -- cgit v0.10.2 From 4d109fc27cb9447f8ff514d697184410abca18d4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 24 Jul 2009 12:38:56 +0200 Subject: preempt: Provide preempt_*_(no)rt variants RT needs a few preempt_disable/enable points which are not necessary otherwise. Implement variants to avoid #ifdeffery. Signed-off-by: Thomas Gleixner diff --git a/include/linux/preempt.h b/include/linux/preempt.h index f5d4723..f237ebd 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -70,11 +70,15 @@ do { \ dec_preempt_count(); \ } while (0) -#define preempt_enable_no_resched() sched_preempt_enable_no_resched() +#ifndef CONFIG_PREEMPT_RT_BASE +# define preempt_enable_no_resched() sched_preempt_enable_no_resched() +#else +# define preempt_enable_no_resched() preempt_enable() +#endif #define preempt_enable() \ do { \ - preempt_enable_no_resched(); \ + sched_preempt_enable_no_resched(); \ barrier(); \ preempt_check_resched(); \ } while (0) @@ -126,6 +130,18 @@ do { \ #endif /* CONFIG_PREEMPT_COUNT */ +#ifdef CONFIG_PREEMPT_RT_FULL +# define preempt_disable_rt() preempt_disable() +# define preempt_enable_rt() preempt_enable() +# define preempt_disable_nort() barrier() +# define preempt_enable_nort() barrier() +#else +# define preempt_disable_rt() barrier() +# define preempt_enable_rt() barrier() +# define preempt_disable_nort() preempt_disable() +# define preempt_enable_nort() preempt_enable() +#endif + #ifdef CONFIG_PREEMPT_NOTIFIERS struct preempt_notifier; -- cgit v0.10.2 From 7f52437b2d8542b133c73d1f3759ed3e8892a126 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 3 Jul 2009 08:44:29 -0500 Subject: ata: Do not disable interrupts in ide code for preempt-rt Use the local_irq_*_nort variants. Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index b603720..2afbd46 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf, unsigned long flags; unsigned int consumed; - local_irq_save(flags); + local_irq_save_nort(flags); consumed = ata_sff_data_xfer32(dev, buf, buflen, rw); - local_irq_restore(flags); + local_irq_restore_nort(flags); return consumed; } @@ -719,7 +719,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) unsigned long flags; /* FIXME: use a bounce buffer */ - local_irq_save(flags); + local_irq_save_nort(flags); buf = kmap_atomic(page); /* do the actual data transfer */ @@ -727,7 +727,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) do_write); kunmap_atomic(buf); - local_irq_restore(flags); + local_irq_restore_nort(flags); } else { buf = page_address(page); ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, @@ -864,7 +864,7 @@ next_sg: unsigned long flags; /* FIXME: use bounce buffer */ - local_irq_save(flags); + local_irq_save_nort(flags); buf = kmap_atomic(page); /* do the actual data transfer */ @@ -872,7 +872,7 @@ next_sg: count, rw); kunmap_atomic(buf); - local_irq_restore(flags); + local_irq_restore_nort(flags); } else { buf = page_address(page); consumed = ap->ops->sff_data_xfer(dev, buf + offset, -- cgit v0.10.2 From 5ac85e9cb009b5ecb2ce10f3ae2de210d2826719 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:30:16 -0500 Subject: ide: Do not disable interrupts for PREEMPT-RT Use the local_irq_*_nort variants. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c index 36f76e2..394f142f 100644 --- a/drivers/ide/alim15x3.c +++ b/drivers/ide/alim15x3.c @@ -234,7 +234,7 @@ static int init_chipset_ali15x3(struct pci_dev *dev) isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); - local_irq_save(flags); + local_irq_save_nort(flags); if (m5229_revision < 0xC2) { /* @@ -325,7 +325,7 @@ out: } pci_dev_put(north); pci_dev_put(isa_dev); - local_irq_restore(flags); + local_irq_restore_nort(flags); return 0; } diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c index 696b6c1..0d0a966 100644 --- a/drivers/ide/hpt366.c +++ b/drivers/ide/hpt366.c @@ -1241,7 +1241,7 @@ static int init_dma_hpt366(ide_hwif_t *hwif, dma_old = inb(base + 2); - local_irq_save(flags); + local_irq_save_nort(flags); dma_new = dma_old; pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma); @@ -1252,7 +1252,7 @@ static int init_dma_hpt366(ide_hwif_t *hwif, if (dma_new != dma_old) outb(dma_new, base + 2); - local_irq_restore(flags); + local_irq_restore_nort(flags); printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name, base, base + 7); diff --git a/drivers/ide/ide-io-std.c b/drivers/ide/ide-io-std.c index 1976397..4169433 100644 --- a/drivers/ide/ide-io-std.c +++ b/drivers/ide/ide-io-std.c @@ -175,7 +175,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, unsigned long uninitialized_var(flags); if ((io_32bit & 2) && !mmio) { - local_irq_save(flags); + local_irq_save_nort(flags); ata_vlb_sync(io_ports->nsect_addr); } @@ -186,7 +186,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, insl(data_addr, buf, words); if ((io_32bit & 2) && !mmio) - local_irq_restore(flags); + local_irq_restore_nort(flags); if (((len + 1) & 3) < 2) return; @@ -219,7 +219,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, unsigned long uninitialized_var(flags); if ((io_32bit & 2) && !mmio) { - local_irq_save(flags); + local_irq_save_nort(flags); ata_vlb_sync(io_ports->nsect_addr); } @@ -230,7 +230,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, outsl(data_addr, buf, words); if ((io_32bit & 2) && !mmio) - local_irq_restore(flags); + local_irq_restore_nort(flags); if (((len + 1) & 3) < 2) return; diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 177db6d..079ae6b 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -659,7 +659,7 @@ void ide_timer_expiry (unsigned long data) /* disable_irq_nosync ?? */ disable_irq(hwif->irq); /* local CPU only, as if we were handling an interrupt */ - local_irq_disable(); + local_irq_disable_nort(); if (hwif->polling) { startstop = handler(drive); } else if (drive_is_ready(drive)) { diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c index 376f2dc..f014dd1 100644 --- a/drivers/ide/ide-iops.c +++ b/drivers/ide/ide-iops.c @@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, if ((stat & ATA_BUSY) == 0) break; - local_irq_restore(flags); + local_irq_restore_nort(flags); *rstat = stat; return -EBUSY; } } - local_irq_restore(flags); + local_irq_restore_nort(flags); } /* * Allow status to settle, then read it again. diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 2a744a9..0964500 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -196,10 +196,10 @@ static void do_identify(ide_drive_t *drive, u8 cmd, u16 *id) int bswap = 1; /* local CPU only; some systems need this */ - local_irq_save(flags); + local_irq_save_nort(flags); /* read 512 bytes of id info */ hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); - local_irq_restore(flags); + local_irq_restore_nort(flags); drive->dev_flags |= IDE_DFLAG_ID_READ; #ifdef DEBUG diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index dabb88b..2cecea7 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c @@ -250,7 +250,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd, page_is_high = PageHighMem(page); if (page_is_high) - local_irq_save(flags); + local_irq_save_nort(flags); buf = kmap_atomic(page) + offset; @@ -271,7 +271,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd, kunmap_atomic(buf); if (page_is_high) - local_irq_restore(flags); + local_irq_restore_nort(flags); len -= nr_bytes; } @@ -414,7 +414,7 @@ static ide_startstop_t pre_task_out_intr(ide_drive_t *drive, } if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0) - local_irq_disable(); + local_irq_disable_nort(); ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE); -- cgit v0.10.2 From 45fe5b8669bd44fb46cef94ccdb38f1721d1a716 Mon Sep 17 00:00:00 2001 From: Sven-Thorsten Dietrich Date: Fri, 3 Jul 2009 08:30:35 -0500 Subject: infiniband: Mellanox IB driver patch use _nort() primitives Fixes in_atomic stack-dump, when Mellanox module is loaded into the RT Kernel. Michael S. Tsirkin sayeth: "Basically, if you just make spin_lock_irqsave (and spin_lock_irq) not disable interrupts for non-raw spinlocks, I think all of infiniband will be fine without changes." Signed-off-by: Sven-Thorsten Dietrich Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index cecb98a..3800ef5 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -783,7 +783,7 @@ void ipoib_mcast_restart_task(struct work_struct *work) ipoib_mcast_stop_thread(dev, 0); - local_irq_save(flags); + local_irq_save_nort(flags); netif_addr_lock(dev); spin_lock(&priv->lock); @@ -865,7 +865,7 @@ void ipoib_mcast_restart_task(struct work_struct *work) spin_unlock(&priv->lock); netif_addr_unlock(dev); - local_irq_restore(flags); + local_irq_restore_nort(flags); /* We have to cancel outside of the spinlock */ list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { -- cgit v0.10.2 From 986ce3bc56ce1db46b8950f9a8b5d11abe001fa0 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:30:16 -0500 Subject: input: gameport: Do not disable interrupts on PREEMPT_RT Use the _nort() primitives. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c index 922a7fe..b4fe94b 100644 --- a/drivers/input/gameport/gameport.c +++ b/drivers/input/gameport/gameport.c @@ -87,12 +87,12 @@ static int gameport_measure_speed(struct gameport *gameport) tx = 1 << 30; for(i = 0; i < 50; i++) { - local_irq_save(flags); + local_irq_save_nort(flags); GET_TIME(t1); for (t = 0; t < 50; t++) gameport_read(gameport); GET_TIME(t2); GET_TIME(t3); - local_irq_restore(flags); + local_irq_restore_nort(flags); udelay(i * 10); if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t; } @@ -111,11 +111,11 @@ static int gameport_measure_speed(struct gameport *gameport) tx = 1 << 30; for(i = 0; i < 50; i++) { - local_irq_save(flags); + local_irq_save_nort(flags); rdtscl(t1); for (t = 0; t < 50; t++) gameport_read(gameport); rdtscl(t2); - local_irq_restore(flags); + local_irq_restore_nort(flags); udelay(i * 10); if (t2 - t1 < tx) tx = t2 - t1; } -- cgit v0.10.2 From 65d58f69123315e4bc1a834701d3fa5683ab0b65 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 21 Jul 2009 23:06:05 +0200 Subject: core: Do not disable interrupts on RT in kernel/users.c Use the local_irq_*_nort variants to reduce latencies in RT. The code is serialized by the locks. No need to disable interrupts. Signed-off-by: Thomas Gleixner diff --git a/kernel/user.c b/kernel/user.c index 5bbb919..2800008 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -154,11 +154,11 @@ void free_uid(struct user_struct *up) if (!up) return; - local_irq_save(flags); + local_irq_save_nort(flags); if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) free_user(up, flags); else - local_irq_restore(flags); + local_irq_restore_nort(flags); } struct user_struct *alloc_uid(kuid_t uid) -- cgit v0.10.2 From 4731c5248147dc95de6dc93c7657059a3f5a4562 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:44:33 -0500 Subject: core: Do not disable interrupts on RT in res_counter.c Frederic Weisbecker reported this warning: [ 45.228562] BUG: sleeping function called from invalid context at kernel/rtmutex.c:683 [ 45.228571] in_atomic(): 0, irqs_disabled(): 1, pid: 4290, name: ntpdate [ 45.228576] INFO: lockdep is turned off. [ 45.228580] irq event stamp: 0 [ 45.228583] hardirqs last enabled at (0): [<(null)>] (null) [ 45.228589] hardirqs last disabled at (0): [] copy_process+0x68d/0x1500 [ 45.228602] softirqs last enabled at (0): [] copy_process+0x68d/0x1500 [ 45.228609] softirqs last disabled at (0): [<(null)>] (null) [ 45.228617] Pid: 4290, comm: ntpdate Tainted: G W 2.6.29-rc4-rt1-tip #1 [ 45.228622] Call Trace: [ 45.228632] [] ? print_irqtrace_events+0xd0/0xe0 [ 45.228639] [] __might_sleep+0x113/0x130 [ 45.228646] [] rt_spin_lock+0xa1/0xb0 [ 45.228653] [] res_counter_charge+0x5d/0x130 [ 45.228660] [] __mem_cgroup_try_charge+0x7f/0x180 [ 45.228667] [] mem_cgroup_charge_common+0x57/0x90 [ 45.228674] [] ? ftrace_call+0x5/0x2b [ 45.228680] [] mem_cgroup_newpage_charge+0x5d/0x60 [ 45.228688] [] __do_fault+0x29e/0x4c0 [ 45.228694] [] ? rt_spin_unlock+0x23/0x80 [ 45.228700] [] handle_mm_fault+0x205/0x890 [ 45.228707] [] ? ftrace_call+0x5/0x2b [ 45.228714] [] do_page_fault+0x11e/0x2a0 [ 45.228720] [] page_fault+0x25/0x30 [ 45.228727] [] ? __clear_user+0x3d/0x70 [ 45.228733] [] ? __clear_user+0x21/0x70 The reason is the raw IRQ flag use of kernel/res_counter.c. The irq flags tricks there seem a bit pointless: it cannot protect the c->parent linkage because local_irq_save() is only per CPU. So replace it with _nort(). This code needs a second look. Reported-by: Frederic Weisbecker Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/kernel/res_counter.c b/kernel/res_counter.c index 4aa8a30..3fbcb0d 100644 --- a/kernel/res_counter.c +++ b/kernel/res_counter.c @@ -49,7 +49,7 @@ static int __res_counter_charge(struct res_counter *counter, unsigned long val, r = ret = 0; *limit_fail_at = NULL; - local_irq_save(flags); + local_irq_save_nort(flags); for (c = counter; c != NULL; c = c->parent) { spin_lock(&c->lock); r = res_counter_charge_locked(c, val, force); @@ -69,7 +69,7 @@ static int __res_counter_charge(struct res_counter *counter, unsigned long val, spin_unlock(&u->lock); } } - local_irq_restore(flags); + local_irq_restore_nort(flags); return ret; } @@ -103,7 +103,7 @@ u64 res_counter_uncharge_until(struct res_counter *counter, struct res_counter *c; u64 ret = 0; - local_irq_save(flags); + local_irq_save_nort(flags); for (c = counter; c != top; c = c->parent) { u64 r; spin_lock(&c->lock); @@ -112,7 +112,7 @@ u64 res_counter_uncharge_until(struct res_counter *counter, ret = r; spin_unlock(&c->lock); } - local_irq_restore(flags); + local_irq_restore_nort(flags); return ret; } -- cgit v0.10.2 From e1068f7b978a8ac352dde5ee852bfb7f5acdccd7 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 3 Jul 2009 08:44:26 -0500 Subject: usb: Use local_irq_*_nort() variants [ tglx: Now that irqf_disabled is dead we should kill that ] Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index d6a8d23..6bb14e3 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -2331,7 +2331,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd) * when the first handler doesn't use it. So let's just * assume it's never used. */ - local_irq_save(flags); + local_irq_save_nort(flags); if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd))) rc = IRQ_NONE; @@ -2340,7 +2340,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd) else rc = IRQ_HANDLED; - local_irq_restore(flags); + local_irq_restore_nort(flags); return rc; } EXPORT_SYMBOL_GPL(usb_hcd_irq); -- cgit v0.10.2 From 7f7aa2b3749cf27de22356841ce1f970d46c17d2 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 8 Nov 2013 17:34:54 +0100 Subject: usb: use _nort in giveback Since commit 94dfd7ed ("USB: HCD: support giveback of URB in tasklet context") I see |BUG: sleeping function called from invalid context at kernel/rtmutex.c:673 |in_atomic(): 0, irqs_disabled(): 1, pid: 109, name: irq/11-uhci_hcd |no locks held by irq/11-uhci_hcd/109. |irq event stamp: 440 |hardirqs last enabled at (439): [] _raw_spin_unlock_irqrestore+0x75/0x90 |hardirqs last disabled at (440): [] __usb_hcd_giveback_urb+0x46/0xc0 |softirqs last enabled at (0): [] copy_process.part.52+0x511/0x1510 |softirqs last disabled at (0): [< (null)>] (null) |CPU: 3 PID: 109 Comm: irq/11-uhci_hcd Not tainted 3.12.0-rt0-rc1+ #13 |Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011 | 0000000000000000 ffff8800db9ffbe0 ffffffff8169f064 0000000000000000 | ffff8800db9ffbf8 ffffffff810b2122 ffff88020f03e888 ffff8800db9ffc18 | ffffffff816a6944 ffffffff810b5748 ffff88020f03c000 ffff8800db9ffc50 |Call Trace: | [] dump_stack+0x4e/0x8f | [] __might_sleep+0x112/0x190 | [] rt_spin_lock+0x24/0x60 | [] hid_ctrl+0x3b/0x190 | [] __usb_hcd_giveback_urb+0x4f/0xc0 | [] usb_hcd_giveback_urb+0x3f/0x140 | [] uhci_giveback_urb+0xaf/0x280 | [] uhci_scan_schedule+0x47a/0xb10 | [] uhci_irq+0xa6/0x1a0 | [] usb_hcd_irq+0x28/0x40 | [] irq_forced_thread_fn+0x23/0x70 | [] irq_thread+0x10f/0x150 | [] kthread+0xcd/0xe0 | [] ret_from_fork+0x7c/0xb0 on -RT we run threaded so no need to disable interrupts. Signed-off-by: Sebastian Andrzej Siewior diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 6bb14e3..bf21a21 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1677,9 +1677,9 @@ static void __usb_hcd_giveback_urb(struct urb *urb) * and no one may trigger the above deadlock situation when * running complete() in tasklet. */ - local_irq_save(flags); + local_irq_save_nort(flags); urb->complete(urb); - local_irq_restore(flags); + local_irq_restore_nort(flags); atomic_dec(&urb->use_count); if (unlikely(atomic_read(&urb->reject))) -- cgit v0.10.2 From a17deff6a99920caeaf97627463813984bc5dd22 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 3 Jul 2009 08:44:34 -0500 Subject: mm: scatterlist dont disable irqs on RT Signed-off-by: Thomas Gleixner diff --git a/lib/scatterlist.c b/lib/scatterlist.c index d16fa29..9c1236e 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -582,7 +582,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) flush_kernel_dcache_page(miter->page); if (miter->__flags & SG_MITER_ATOMIC) { - WARN_ON_ONCE(preemptible()); + WARN_ON_ONCE(!pagefault_disabled()); kunmap_atomic(miter->addr); } else kunmap(miter->page); @@ -627,7 +627,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, if (!sg_miter_skip(&miter, skip)) return false; - local_irq_save(flags); + local_irq_save_nort(flags); while (sg_miter_next(&miter) && offset < buflen) { unsigned int len; @@ -644,7 +644,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, sg_miter_stop(&miter); - local_irq_restore(flags); + local_irq_restore_nort(flags); return offset; } -- cgit v0.10.2 From 088b044510c80b43638fc18882defc4e93e06f7d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 22 Jul 2011 08:07:08 +0200 Subject: signal-fix-up-rcu-wreckage.patch Signed-off-by: Thomas Gleixner diff --git a/kernel/signal.c b/kernel/signal.c index e644f50..3d32f54 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1394,12 +1394,12 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, struct sighand_struct *sighand; for (;;) { - local_irq_save(*flags); + local_irq_save_nort(*flags); rcu_read_lock(); sighand = rcu_dereference(tsk->sighand); if (unlikely(sighand == NULL)) { rcu_read_unlock(); - local_irq_restore(*flags); + local_irq_restore_nort(*flags); break; } @@ -1410,7 +1410,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, } spin_unlock(&sighand->siglock); rcu_read_unlock(); - local_irq_restore(*flags); + local_irq_restore_nort(*flags); } return sighand; -- cgit v0.10.2 From 9a984747422e7b6cb8754aa1074de225c9e23907 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 21 Jul 2011 21:05:33 +0200 Subject: net-wireless-warn-nort.patch Signed-off-by: Thomas Gleixner diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 1e5bd0d..62fba17 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3298,7 +3298,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) struct ieee80211_supported_band *sband; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); - WARN_ON_ONCE(softirq_count() == 0); + WARN_ON_ONCE_NONRT(softirq_count() == 0); if (WARN_ON(status->band >= IEEE80211_NUM_BANDS)) goto drop; -- cgit v0.10.2 From effe862239f73c781d8738a138ea54959e54e267 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 19 Aug 2009 09:56:42 +0200 Subject: mm: Replace cgroup_page bit spinlock Bit spinlocks are not working on RT. Replace them. Signed-off-by: Thomas Gleixner diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 777a524..ca67e80 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h @@ -24,6 +24,9 @@ enum { */ struct page_cgroup { unsigned long flags; +#ifdef CONFIG_PREEMPT_RT_BASE + spinlock_t pcg_lock; +#endif struct mem_cgroup *mem_cgroup; }; @@ -74,12 +77,20 @@ static inline void lock_page_cgroup(struct page_cgroup *pc) * Don't take this lock in IRQ context. * This lock is for pc->mem_cgroup, USED, MIGRATION */ +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_lock(PCG_LOCK, &pc->flags); +#else + spin_lock(&pc->pcg_lock); +#endif } static inline void unlock_page_cgroup(struct page_cgroup *pc) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_unlock(PCG_LOCK, &pc->flags); +#else + spin_unlock(&pc->pcg_lock); +#endif } #else /* CONFIG_MEMCG */ @@ -102,6 +113,10 @@ static inline void __init page_cgroup_init_flatmem(void) { } +static inline void page_cgroup_lock_init(struct page_cgroup *pc) +{ +} + #endif /* CONFIG_MEMCG */ #include diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 6d757e3a..98caeee 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c @@ -13,6 +13,14 @@ static unsigned long total_usage; +static void page_cgroup_lock_init(struct page_cgroup *pc, int nr_pages) +{ +#ifdef CONFIG_PREEMPT_RT_BASE + for (; nr_pages; nr_pages--, pc++) + spin_lock_init(&pc->pcg_lock); +#endif +} + #if !defined(CONFIG_SPARSEMEM) @@ -60,6 +68,7 @@ static int __init alloc_node_page_cgroup(int nid) return -ENOMEM; NODE_DATA(nid)->node_page_cgroup = base; total_usage += table_size; + page_cgroup_lock_init(base, nr_pages); return 0; } @@ -150,6 +159,8 @@ static int __meminit init_section_page_cgroup(unsigned long pfn, int nid) return -ENOMEM; } + page_cgroup_lock_init(base, PAGES_PER_SECTION); + /* * The passed "pfn" may not be aligned to SECTION. For the calculation * we need to apply a mask. -- cgit v0.10.2 From 0892e3f09ea81f571afe974aea6fa147d81db68e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 18 Mar 2011 09:18:52 +0100 Subject: buffer_head: Replace bh_uptodate_lock for -rt Wrap the bit_spin_lock calls into a separate inline and add the RT replacements with a real spinlock. Signed-off-by: Thomas Gleixner diff --git a/fs/buffer.c b/fs/buffer.c index aeeea65..e0fa2a4 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -322,8 +322,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) * decide that the page is now completely done. */ first = page_buffers(page); - local_irq_save(flags); - bit_spin_lock(BH_Uptodate_Lock, &first->b_state); + flags = bh_uptodate_lock_irqsave(first); clear_buffer_async_read(bh); unlock_buffer(bh); tmp = bh; @@ -336,8 +335,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) } tmp = tmp->b_this_page; } while (tmp != bh); - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); + bh_uptodate_unlock_irqrestore(first, flags); /* * If none of the buffers had errors and they are all @@ -349,9 +347,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) return; still_busy: - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); - return; + bh_uptodate_unlock_irqrestore(first, flags); } /* @@ -385,8 +381,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate) } first = page_buffers(page); - local_irq_save(flags); - bit_spin_lock(BH_Uptodate_Lock, &first->b_state); + flags = bh_uptodate_lock_irqsave(first); clear_buffer_async_write(bh); unlock_buffer(bh); @@ -398,15 +393,12 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate) } tmp = tmp->b_this_page; } - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); + bh_uptodate_unlock_irqrestore(first, flags); end_page_writeback(page); return; still_busy: - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); - return; + bh_uptodate_unlock_irqrestore(first, flags); } EXPORT_SYMBOL(end_buffer_async_write); @@ -3336,6 +3328,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); if (ret) { INIT_LIST_HEAD(&ret->b_assoc_buffers); + buffer_head_init_locks(ret); preempt_disable(); __this_cpu_inc(bh_accounting.nr); recalc_bh_state(); diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index d267ea6..98ec102 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c @@ -108,8 +108,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) "0x%llx.", (unsigned long long)bh->b_blocknr); } first = page_buffers(page); - local_irq_save(flags); - bit_spin_lock(BH_Uptodate_Lock, &first->b_state); + flags = bh_uptodate_lock_irqsave(first); clear_buffer_async_read(bh); unlock_buffer(bh); tmp = bh; @@ -124,8 +123,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) } tmp = tmp->b_this_page; } while (tmp != bh); - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); + bh_uptodate_unlock_irqrestore(first, flags); /* * If none of the buffers had errors then we can set the page uptodate, * but we first have to perform the post read mst fixups, if the @@ -160,9 +158,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) unlock_page(page); return; still_busy: - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); - return; + bh_uptodate_unlock_irqrestore(first, flags); } /** diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index d77797a..04fc97f 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -75,8 +75,42 @@ struct buffer_head { struct address_space *b_assoc_map; /* mapping this buffer is associated with */ atomic_t b_count; /* users using this buffer_head */ +#ifdef CONFIG_PREEMPT_RT_BASE + spinlock_t b_uptodate_lock; +#endif }; +static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh) +{ + unsigned long flags; + +#ifndef CONFIG_PREEMPT_RT_BASE + local_irq_save(flags); + bit_spin_lock(BH_Uptodate_Lock, &bh->b_state); +#else + spin_lock_irqsave(&bh->b_uptodate_lock, flags); +#endif + return flags; +} + +static inline void +bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags) +{ +#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state); + local_irq_restore(flags); +#else + spin_unlock_irqrestore(&bh->b_uptodate_lock, flags); +#endif +} + +static inline void buffer_head_init_locks(struct buffer_head *bh) +{ +#ifdef CONFIG_PREEMPT_RT_BASE + spin_lock_init(&bh->b_uptodate_lock); +#endif +} + /* * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() * and buffer_foo() functions. -- cgit v0.10.2 From 0cc20d5bd9f65ac37d52d5e3750bac7ad5ef8c4c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 18 Mar 2011 10:11:25 +0100 Subject: fs: jbd/jbd2: Make state lock and journal head lock rt safe bit_spin_locks break under RT. Based on a previous patch from Steven Rostedt Signed-off-by: Thomas Gleixner -- include/linux/buffer_head.h | 10 ++++++++++ include/linux/jbd_common.h | 24 ++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 04fc97f..cac4973 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -77,6 +77,11 @@ struct buffer_head { atomic_t b_count; /* users using this buffer_head */ #ifdef CONFIG_PREEMPT_RT_BASE spinlock_t b_uptodate_lock; +#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \ + defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE) + spinlock_t b_state_lock; + spinlock_t b_journal_head_lock; +#endif #endif }; @@ -108,6 +113,11 @@ static inline void buffer_head_init_locks(struct buffer_head *bh) { #ifdef CONFIG_PREEMPT_RT_BASE spin_lock_init(&bh->b_uptodate_lock); +#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \ + defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE) + spin_lock_init(&bh->b_state_lock); + spin_lock_init(&bh->b_journal_head_lock); +#endif #endif } diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h index 3dc5343..a90a6f5 100644 --- a/include/linux/jbd_common.h +++ b/include/linux/jbd_common.h @@ -15,32 +15,56 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh) static inline void jbd_lock_bh_state(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_lock(BH_State, &bh->b_state); +#else + spin_lock(&bh->b_state_lock); +#endif } static inline int jbd_trylock_bh_state(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE return bit_spin_trylock(BH_State, &bh->b_state); +#else + return spin_trylock(&bh->b_state_lock); +#endif } static inline int jbd_is_locked_bh_state(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE return bit_spin_is_locked(BH_State, &bh->b_state); +#else + return spin_is_locked(&bh->b_state_lock); +#endif } static inline void jbd_unlock_bh_state(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_unlock(BH_State, &bh->b_state); +#else + spin_unlock(&bh->b_state_lock); +#endif } static inline void jbd_lock_bh_journal_head(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_lock(BH_JournalHead, &bh->b_state); +#else + spin_lock(&bh->b_journal_head_lock); +#endif } static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_unlock(BH_JournalHead, &bh->b_state); +#else + spin_unlock(&bh->b_journal_head_lock); +#endif } #endif -- cgit v0.10.2 From 6ef7f90dc81c969632e0cfedcce6d8f4aafad71f Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Fri, 21 Jun 2013 15:07:25 -0400 Subject: list_bl.h: make list head locking RT safe As per changes in include/linux/jbd_common.h for avoiding the bit_spin_locks on RT ("fs: jbd/jbd2: Make state lock and journal head lock rt safe") we do the same thing here. We use the non atomic __set_bit and __clear_bit inside the scope of the lock to preserve the ability of the existing LIST_DEBUG code to use the zero'th bit in the sanity checks. As a bit spinlock, we had no lockdep visibility into the usage of the list head locking. Now, if we were to implement it as a standard non-raw spinlock, we would see: BUG: sleeping function called from invalid context at kernel/rtmutex.c:658 in_atomic(): 1, irqs_disabled(): 0, pid: 122, name: udevd 5 locks held by udevd/122: #0: (&sb->s_type->i_mutex_key#7/1){+.+.+.}, at: [] lock_rename+0xe8/0xf0 #1: (rename_lock){+.+...}, at: [] d_move+0x2c/0x60 #2: (&dentry->d_lock){+.+...}, at: [] dentry_lock_for_move+0xf3/0x130 #3: (&dentry->d_lock/2){+.+...}, at: [] dentry_lock_for_move+0xc4/0x130 #4: (&dentry->d_lock/3){+.+...}, at: [] dentry_lock_for_move+0xd7/0x130 Pid: 122, comm: udevd Not tainted 3.4.47-rt62 #7 Call Trace: [] __might_sleep+0x134/0x1f0 [] rt_spin_lock+0x24/0x60 [] __d_shrink+0x5c/0xa0 [] __d_drop+0x1d/0x40 [] __d_move+0x8e/0x320 [] d_move+0x3e/0x60 [] vfs_rename+0x198/0x4c0 [] sys_renameat+0x213/0x240 [] ? _raw_spin_unlock+0x35/0x60 [] ? do_page_fault+0x1ec/0x4b0 [] ? retint_swapgs+0xe/0x13 [] ? trace_hardirqs_on_thunk+0x3a/0x3f [] sys_rename+0x1b/0x20 [] system_call_fastpath+0x1a/0x1f Since we are only taking the lock during short lived list operations, lets assume for now that it being raw won't be a significant latency concern. Cc: stable-rt@vger.kernel.org Signed-off-by: Paul Gortmaker Signed-off-by: Sebastian Andrzej Siewior diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h index 2eb8855..d8876a0 100644 --- a/include/linux/list_bl.h +++ b/include/linux/list_bl.h @@ -2,6 +2,7 @@ #define _LINUX_LIST_BL_H #include +#include #include /* @@ -32,13 +33,22 @@ struct hlist_bl_head { struct hlist_bl_node *first; +#ifdef CONFIG_PREEMPT_RT_BASE + raw_spinlock_t lock; +#endif }; struct hlist_bl_node { struct hlist_bl_node *next, **pprev; }; -#define INIT_HLIST_BL_HEAD(ptr) \ - ((ptr)->first = NULL) + +static inline void INIT_HLIST_BL_HEAD(struct hlist_bl_head *h) +{ + h->first = NULL; +#ifdef CONFIG_PREEMPT_RT_BASE + raw_spin_lock_init(&h->lock); +#endif +} static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) { @@ -117,12 +127,26 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n) static inline void hlist_bl_lock(struct hlist_bl_head *b) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_lock(0, (unsigned long *)b); +#else + raw_spin_lock(&b->lock); +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + __set_bit(0, (unsigned long *)b); +#endif +#endif } static inline void hlist_bl_unlock(struct hlist_bl_head *b) { +#ifndef CONFIG_PREEMPT_RT_BASE __bit_spin_unlock(0, (unsigned long *)b); +#else +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + __clear_bit(0, (unsigned long *)b); +#endif + raw_spin_unlock(&b->lock); +#endif } static inline bool hlist_bl_is_locked(struct hlist_bl_head *b) -- cgit v0.10.2 From d56461fc6865f3d9a51dca4721f0ef032c8cf569 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 18 Mar 2011 10:22:04 +0100 Subject: genirq: Disable DEBUG_SHIRQ for rt Signed-off-by: Thomas Gleixner diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 094f315..bde5dd2 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -597,7 +597,7 @@ endmenu # "Memory Debugging" config DEBUG_SHIRQ bool "Debug shared IRQ handlers" - depends on DEBUG_KERNEL + depends on DEBUG_KERNEL && !PREEMPT_RT_BASE help Enable this to generate a spurious interrupt as soon as a shared interrupt handler is registered, and just before one is deregistered. -- cgit v0.10.2 From d7fb8670b999a78682ba6c7074889203e0e8247d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:57 -0500 Subject: genirq: disable irqpoll on -rt Creates long latencies for no value Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 7b5f012..e5a309a 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -340,6 +340,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true"); static int __init irqfixup_setup(char *str) { +#ifdef CONFIG_PREEMPT_RT_BASE + pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n"); + return 1; +#endif irqfixup = 1; printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); printk(KERN_WARNING "This may impact system performance.\n"); @@ -352,6 +356,10 @@ module_param(irqfixup, int, 0644); static int __init irqpoll_setup(char *str) { +#ifdef CONFIG_PREEMPT_RT_BASE + pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n"); + return 1; +#endif irqfixup = 2; printk(KERN_WARNING "Misrouted IRQ fixup and polling support " "enabled\n"); -- cgit v0.10.2 From cd0e40c6f0608c957b7fff6b452c2550a0a7148a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 3 Apr 2011 11:57:29 +0200 Subject: genirq-force-threading.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index cd616f9..1403b95 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -314,9 +314,13 @@ static inline int disable_irq_wake(unsigned int irq) #ifdef CONFIG_IRQ_FORCED_THREADING +# ifndef CONFIG_PREEMPT_RT_BASE extern bool force_irqthreads; +# else +# define force_irqthreads (true) +# endif #else -#define force_irqthreads (0) +#define force_irqthreads (false) #endif #ifndef __ARCH_SET_SOFTIRQ_PENDING diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 4c84746..ca9688b 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -22,6 +22,7 @@ #include "internals.h" #ifdef CONFIG_IRQ_FORCED_THREADING +# ifndef CONFIG_PREEMPT_RT_BASE __read_mostly bool force_irqthreads; static int __init setup_forced_irqthreads(char *arg) @@ -30,6 +31,7 @@ static int __init setup_forced_irqthreads(char *arg) return 0; } early_param("threadirqs", setup_forced_irqthreads); +# endif #endif /** -- cgit v0.10.2 From e560bacfa970cf282e62596bebaaf4d50a76714c Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 21 Aug 2013 17:48:46 +0200 Subject: genirq: do not invoke the affinity callback via a workqueue Joe Korty reported, that __irq_set_affinity_locked() schedules a workqueue while holding a rawlock which results in a might_sleep() warning. This patch moves the invokation into a process context so that we only wakeup() a process while holding the lock. Signed-off-by: Sebastian Andrzej Siewior diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 1403b95..9fadfa2 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -220,6 +220,7 @@ struct irq_affinity_notify { unsigned int irq; struct kref kref; struct work_struct work; + struct list_head list; void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); void (*release)(struct kref *ref); }; diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index ca9688b..a8352ae 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -164,6 +164,62 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, return ret; } +#ifdef CONFIG_PREEMPT_RT_FULL +static void _irq_affinity_notify(struct irq_affinity_notify *notify); +static struct task_struct *set_affinity_helper; +static LIST_HEAD(affinity_list); +static DEFINE_RAW_SPINLOCK(affinity_list_lock); + +static int set_affinity_thread(void *unused) +{ + while (1) { + struct irq_affinity_notify *notify; + int empty; + + set_current_state(TASK_INTERRUPTIBLE); + + raw_spin_lock_irq(&affinity_list_lock); + empty = list_empty(&affinity_list); + raw_spin_unlock_irq(&affinity_list_lock); + + if (empty) + schedule(); + if (kthread_should_stop()) + break; + set_current_state(TASK_RUNNING); +try_next: + notify = NULL; + + raw_spin_lock_irq(&affinity_list_lock); + if (!list_empty(&affinity_list)) { + notify = list_first_entry(&affinity_list, + struct irq_affinity_notify, list); + list_del_init(¬ify->list); + } + raw_spin_unlock_irq(&affinity_list_lock); + + if (!notify) + continue; + _irq_affinity_notify(notify); + goto try_next; + } + return 0; +} + +static void init_helper_thread(void) +{ + if (set_affinity_helper) + return; + set_affinity_helper = kthread_run(set_affinity_thread, NULL, + "affinity-cb"); + WARN_ON(IS_ERR(set_affinity_helper)); +} +#else + +static inline void init_helper_thread(void) { } + +#endif + int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) { struct irq_chip *chip = irq_data_get_irq_chip(data); @@ -182,7 +238,17 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) if (desc->affinity_notify) { kref_get(&desc->affinity_notify->kref); + +#ifdef CONFIG_PREEMPT_RT_FULL + raw_spin_lock(&affinity_list_lock); + if (list_empty(&desc->affinity_notify->list)) + list_add_tail(&affinity_list, + &desc->affinity_notify->list); + raw_spin_unlock(&affinity_list_lock); + wake_up_process(set_affinity_helper); +#else schedule_work(&desc->affinity_notify->work); +#endif } irqd_set(data, IRQD_AFFINITY_SET); @@ -223,10 +289,8 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) } EXPORT_SYMBOL_GPL(irq_set_affinity_hint); -static void irq_affinity_notify(struct work_struct *work) +static void _irq_affinity_notify(struct irq_affinity_notify *notify) { - struct irq_affinity_notify *notify = - container_of(work, struct irq_affinity_notify, work); struct irq_desc *desc = irq_to_desc(notify->irq); cpumask_var_t cpumask; unsigned long flags; @@ -248,6 +312,13 @@ out: kref_put(¬ify->kref, notify->release); } +static void irq_affinity_notify(struct work_struct *work) +{ + struct irq_affinity_notify *notify = + container_of(work, struct irq_affinity_notify, work); + _irq_affinity_notify(notify); +} + /** * irq_set_affinity_notifier - control notification of IRQ affinity changes * @irq: Interrupt for which to enable/disable notification @@ -277,6 +348,8 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) notify->irq = irq; kref_init(¬ify->kref); INIT_WORK(¬ify->work, irq_affinity_notify); + INIT_LIST_HEAD(¬ify->list); + init_helper_thread(); } raw_spin_lock_irqsave(&desc->lock, flags); -- cgit v0.10.2 From 5001984143b8eab3715681063220aa6f57d9ebb0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 20 Jun 2009 11:36:54 +0200 Subject: drivers/net: fix livelock issues Preempt-RT runs into a live lock issue with the NETDEV_TX_LOCKED micro optimization. The reason is that the softirq thread is rescheduling itself on that return value. Depending on priorities it starts to monoplize the CPU and livelock on UP systems. Remove it. Signed-off-by: Thomas Gleixner diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index a36a760..34ebd1b 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -2206,11 +2206,7 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, } tpd_req = atl1c_cal_tpd_req(skb); - if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) { - if (netif_msg_pktdata(adapter)) - dev_info(&adapter->pdev->dev, "tx locked\n"); - return NETDEV_TX_LOCKED; - } + spin_lock_irqsave(&adapter->tx_lock, flags); if (atl1c_tpd_avail(adapter, type) < tpd_req) { /* no enough descriptor, just stop queue */ diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 1966444..d398960 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1838,8 +1838,7 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb, return NETDEV_TX_OK; } tpd_req = atl1e_cal_tdp_req(skb); - if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) - return NETDEV_TX_LOCKED; + spin_lock_irqsave(&adapter->tx_lock, flags); if (atl1e_tpd_avail(adapter) < tpd_req) { /* no enough descriptor, just stop queue */ diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index 8061fb0..bfe5c72 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -1665,8 +1665,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, struct cmdQ *q = &sge->cmdQ[qid]; unsigned int credits, pidx, genbit, count, use_sched_skb = 0; - if (!spin_trylock(&q->lock)) - return NETDEV_TX_LOCKED; + spin_lock(&q->lock); reclaim_completed_tx(sge, q); diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 51b0094..809d92c 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -4089,12 +4089,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) [skb->priority & (MAX_TX_FIFOS - 1)]; fifo = &mac_control->fifos[queue]; - if (do_spin_lock) - spin_lock_irqsave(&fifo->tx_lock, flags); - else { - if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags))) - return NETDEV_TX_LOCKED; - } + spin_lock_irqsave(&fifo->tx_lock, flags); if (sp->config.multiq) { if (__netif_subqueue_stopped(dev, fifo->fifo_no)) { diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 5a0f04c..e732812 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -2148,10 +2148,8 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; unsigned long flags; - if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) { - /* Collision - tell upper layer to requeue */ - return NETDEV_TX_LOCKED; - } + spin_lock_irqsave(&tx_ring->tx_lock, flags); + if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) { netif_stop_queue(netdev); spin_unlock_irqrestore(&tx_ring->tx_lock, flags); diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 61a1540..7edf580 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1629,13 +1629,8 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb, unsigned long flags; ENTER; - local_irq_save(flags); - if (!spin_trylock(&priv->tx_lock)) { - local_irq_restore(flags); - DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n", - BDX_DRV_NAME, ndev->name); - return NETDEV_TX_LOCKED; - } + + spin_lock_irqsave(&priv->tx_lock, flags); /* build tx descriptor */ BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */ diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index 6d1f6ed..aa9800e 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c @@ -174,11 +174,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev) unsigned long flags; int add_num = 1; - local_irq_save(flags); - if (!spin_trylock(&rnet->tx_lock)) { - local_irq_restore(flags); - return NETDEV_TX_LOCKED; - } + spin_lock_irqsave(&rnet->tx_lock, flags); if (is_multicast_ether_addr(eth->h_dest)) add_num = nets[rnet->mport->id].nact; -- cgit v0.10.2 From d764e9136348e1d5c909b05bbad9103d21c5afdc Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 3 Jul 2009 08:30:00 -0500 Subject: drivers/net: vortex fix locking issues Argh, cut and paste wasn't enough... Use this patch instead. It needs an irq disable. But, believe it or not, on SMP this is actually better. If the irq is shared (as it is in Mark's case), we don't stop the irq of other devices from being handled on another CPU (unfortunately for Mark, he pinned all interrupts to one CPU). Signed-off-by: Steven Rostedt Signed-off-by: Thomas Gleixner drivers/net/ethernet/3com/3c59x.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) Signed-off-by: Ingo Molnar diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index ad5272b..55dd320 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -842,9 +842,9 @@ static void poll_vortex(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); unsigned long flags; - local_irq_save(flags); + local_irq_save_nort(flags); (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); - local_irq_restore(flags); + local_irq_restore_nort(flags); } #endif @@ -1917,12 +1917,12 @@ static void vortex_tx_timeout(struct net_device *dev) * Block interrupts because vortex_interrupt does a bare spin_lock() */ unsigned long flags; - local_irq_save(flags); + local_irq_save_nort(flags); if (vp->full_bus_master_tx) boomerang_interrupt(dev->irq, dev); else vortex_interrupt(dev->irq, dev); - local_irq_restore(flags); + local_irq_restore_nort(flags); } } -- cgit v0.10.2 From f1925bb8cdcc338b34714f4da94a9c2a7034d109 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 1 Apr 2010 20:20:57 +0200 Subject: drivers: net: gianfar: Make RT aware The adjust_link() disables interrupts before taking the queue locks. On RT those locks are converted to "sleeping" locks and therefor the local_irq_save/restore must be converted to local_irq_save/restore_nort. Reported-by: Xianghua Xiao Signed-off-by: Thomas Gleixner Tested-by: Xianghua Xiao diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 9fbe4dd..de10ff3 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1701,7 +1701,7 @@ void stop_gfar(struct net_device *dev) /* Lock it down */ - local_irq_save(flags); + local_irq_save_nort(flags); lock_tx_qs(priv); lock_rx_qs(priv); @@ -1709,7 +1709,7 @@ void stop_gfar(struct net_device *dev) unlock_rx_qs(priv); unlock_tx_qs(priv); - local_irq_restore(flags); + local_irq_restore_nort(flags); /* Free the IRQs */ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { @@ -3101,7 +3101,7 @@ static void adjust_link(struct net_device *dev) struct phy_device *phydev = priv->phydev; int new_state = 0; - local_irq_save(flags); + local_irq_save_nort(flags); lock_tx_qs(priv); if (phydev->link) { @@ -3175,7 +3175,7 @@ static void adjust_link(struct net_device *dev) if (new_state && netif_msg_link(priv)) phy_print_status(phydev); unlock_tx_qs(priv); - local_irq_restore(flags); + local_irq_restore_nort(flags); } /* Update the hash table based on the current list of multicast -- cgit v0.10.2 From 1e30b92814b215db1eb5ef6575b2844b50a00e66 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 25 Mar 2014 18:34:20 +0100 Subject: net: gianfar: do not disable interrupts each per-queue lock is taken with spin_lock_irqsave() except in the case where all of them are taken for some kind of serialisation. As an optimisation local_irq_save() is used so that lock_tx_qs() and lock_rx_qs() can use just the spin_lock() variant instead. On RT local_irq_save() behaves differently so we use the nort() variant. Lockdep screems easily by "ethtool -K eth0 rx off tx off" What remains is missing lockdep annotation that makes lockdep think lock_tx_qs() may cause a dead lock. Cc: stable-rt@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index de10ff3..8f1afda 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1301,7 +1301,7 @@ static int gfar_suspend(struct device *dev) if (netif_running(ndev)) { - local_irq_save(flags); + local_irq_save_nort(flags); lock_tx_qs(priv); lock_rx_qs(priv); @@ -1319,7 +1319,7 @@ static int gfar_suspend(struct device *dev) unlock_rx_qs(priv); unlock_tx_qs(priv); - local_irq_restore(flags); + local_irq_restore_nort(flags); disable_napi(priv); @@ -1361,7 +1361,7 @@ static int gfar_resume(struct device *dev) /* Disable Magic Packet mode, in case something * else woke us up. */ - local_irq_save(flags); + local_irq_save_nort(flags); lock_tx_qs(priv); lock_rx_qs(priv); @@ -1373,7 +1373,7 @@ static int gfar_resume(struct device *dev) unlock_rx_qs(priv); unlock_tx_qs(priv); - local_irq_restore(flags); + local_irq_restore_nort(flags); netif_device_attach(ndev); @@ -2387,7 +2387,7 @@ void gfar_vlan_mode(struct net_device *dev, netdev_features_t features) u32 tempval; regs = priv->gfargrp[0].regs; - local_irq_save(flags); + local_irq_save_nort(flags); lock_rx_qs(priv); if (features & NETIF_F_HW_VLAN_CTAG_TX) { @@ -2420,7 +2420,7 @@ void gfar_vlan_mode(struct net_device *dev, netdev_features_t features) gfar_change_mtu(dev, dev->mtu); unlock_rx_qs(priv); - local_irq_restore(flags); + local_irq_restore_nort(flags); } static int gfar_change_mtu(struct net_device *dev, int new_mtu) @@ -3381,14 +3381,14 @@ static irqreturn_t gfar_error(int irq, void *grp_id) dev->stats.tx_dropped++; atomic64_inc(&priv->extra_stats.tx_underrun); - local_irq_save(flags); + local_irq_save_nort(flags); lock_tx_qs(priv); /* Reactivate the Tx Queues */ gfar_write(®s->tstat, gfargrp->tstat); unlock_tx_qs(priv); - local_irq_restore(flags); + local_irq_restore_nort(flags); } netif_dbg(priv, tx_err, dev, "Transmit Error\n"); } diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index d3d7ede..95a1f62 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -501,7 +501,7 @@ static int gfar_sringparam(struct net_device *dev, /* Halt TX and RX, and process the frames which * have already been received */ - local_irq_save(flags); + local_irq_save_nort(flags); lock_tx_qs(priv); lock_rx_qs(priv); @@ -509,7 +509,7 @@ static int gfar_sringparam(struct net_device *dev, unlock_rx_qs(priv); unlock_tx_qs(priv); - local_irq_restore(flags); + local_irq_restore_nort(flags); for (i = 0; i < priv->num_rx_queues; i++) gfar_clean_rx_ring(priv->rx_queue[i], @@ -624,7 +624,7 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features) /* Halt TX and RX, and process the frames which * have already been received */ - local_irq_save(flags); + local_irq_save_nort(flags); lock_tx_qs(priv); lock_rx_qs(priv); @@ -632,7 +632,7 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features) unlock_tx_qs(priv); unlock_rx_qs(priv); - local_irq_restore(flags); + local_irq_restore_nort(flags); for (i = 0; i < priv->num_rx_queues; i++) gfar_clean_rx_ring(priv->rx_queue[i], diff --git a/drivers/net/ethernet/freescale/gianfar_sysfs.c b/drivers/net/ethernet/freescale/gianfar_sysfs.c index acb55af..f0160e67 100644 --- a/drivers/net/ethernet/freescale/gianfar_sysfs.c +++ b/drivers/net/ethernet/freescale/gianfar_sysfs.c @@ -68,7 +68,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev, return count; - local_irq_save(flags); + local_irq_save_nort(flags); lock_rx_qs(priv); /* Set the new stashing value */ @@ -84,7 +84,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev, gfar_write(®s->attr, temp); unlock_rx_qs(priv); - local_irq_restore(flags); + local_irq_restore_nort(flags); return count; } @@ -112,7 +112,7 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev, if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING)) return count; - local_irq_save(flags); + local_irq_save_nort(flags); lock_rx_qs(priv); if (length > priv->rx_buffer_size) @@ -140,7 +140,7 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev, out: unlock_rx_qs(priv); - local_irq_restore(flags); + local_irq_restore_nort(flags); return count; } @@ -171,7 +171,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev, if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING)) return count; - local_irq_save(flags); + local_irq_save_nort(flags); lock_rx_qs(priv); if (index > priv->rx_stash_size) @@ -189,7 +189,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev, out: unlock_rx_qs(priv); - local_irq_restore(flags); + local_irq_restore_nort(flags); return count; } @@ -219,7 +219,7 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev, if (length > GFAR_MAX_FIFO_THRESHOLD) return count; - local_irq_save(flags); + local_irq_save_nort(flags); lock_tx_qs(priv); priv->fifo_threshold = length; @@ -230,7 +230,7 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev, gfar_write(®s->fifo_tx_thr, temp); unlock_tx_qs(priv); - local_irq_restore(flags); + local_irq_restore_nort(flags); return count; } @@ -259,7 +259,7 @@ static ssize_t gfar_set_fifo_starve(struct device *dev, if (num > GFAR_MAX_FIFO_STARVE) return count; - local_irq_save(flags); + local_irq_save_nort(flags); lock_tx_qs(priv); priv->fifo_starve = num; @@ -270,7 +270,7 @@ static ssize_t gfar_set_fifo_starve(struct device *dev, gfar_write(®s->fifo_tx_starve, temp); unlock_tx_qs(priv); - local_irq_restore(flags); + local_irq_restore_nort(flags); return count; } @@ -300,7 +300,7 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev, if (num > GFAR_MAX_FIFO_STARVE_OFF) return count; - local_irq_save(flags); + local_irq_save_nort(flags); lock_tx_qs(priv); priv->fifo_starve_off = num; @@ -311,7 +311,7 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev, gfar_write(®s->fifo_tx_starve_shutoff, temp); unlock_tx_qs(priv); - local_irq_restore(flags); + local_irq_restore_nort(flags); return count; } -- cgit v0.10.2 From 6a683415da2b82f8290dad067ebb649b19de77b3 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 27 Mar 2014 14:09:02 +0100 Subject: net: gianfar: do not try to cleanup TX packets if they are not done What I observe is that the TX queue is not empty and does not make any progress. gfar_clean_tx_ring() does not clean up the packet because it is not completed yet. The root cause is that the DMA engine did not start yet (it was preempted before doing so) and that dumb loop, loops until that packet is gone. This is broken since c233cf4 ("gianfar: Fix tx napi polling"). What remains are spurious interrupts if CPU0 cleans up TX packages and CPU1 returns with IRQ_NONE. Cc: stable-rt@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 8f1afda..091945c 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -134,7 +134,6 @@ static int gfar_poll_sq(struct napi_struct *napi, int budget); static void gfar_netpoll(struct net_device *dev); #endif int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); -static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int amount_pull, struct napi_struct *napi); void gfar_halt(struct net_device *dev); @@ -2516,7 +2515,7 @@ static void gfar_align_skb(struct sk_buff *skb) } /* Interrupt Handler for Transmit complete */ -static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) +static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) { struct net_device *dev = tx_queue->dev; struct netdev_queue *txq; @@ -2939,10 +2938,14 @@ static int gfar_poll(struct napi_struct *napi, int budget) tx_queue = priv->tx_queue[i]; /* run Tx cleanup to completion */ if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { - gfar_clean_tx_ring(tx_queue); - has_tx_work = 1; + int ret; + + ret = gfar_clean_tx_ring(tx_queue); + if (ret) + has_tx_work++; } } + work_done += has_tx_work; for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { /* skip queue if not active */ -- cgit v0.10.2 From 5d9c00b689537c5e9c5e3d02eb19f7f2baf8ec86 Mon Sep 17 00:00:00 2001 From: Wu Zhangjin Date: Mon, 4 Jan 2010 11:33:02 +0800 Subject: USB: Fix the mouse problem when copying large amounts of data When copying large amounts of data between the USB storage devices and the hard disk, the USB mouse will not work, this patch fixes it. [NOTE: This problem have been found in the Loongson family machines, not sure whether it is producible on other platforms] Signed-off-by: Hu Hongbing Signed-off-by: Wu Zhangjin diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 604cad1..b691278 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -874,9 +874,13 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd) } if (ints & OHCI_INTR_WDH) { - spin_lock (&ohci->lock); - dl_done_list (ohci); - spin_unlock (&ohci->lock); + if (ohci->hcca->done_head == 0) { + ints &= ~OHCI_INTR_WDH; + } else { + spin_lock (&ohci->lock); + dl_done_list (ohci); + spin_unlock (&ohci->lock); + } } if (quirk_zfmicro(ohci) && (ints & OHCI_INTR_SF)) { -- cgit v0.10.2 From 8f27cf73e09803e638260c70ac0ec339c15331b3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 24 Jun 2011 18:40:37 +0200 Subject: local-var.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/percpu.h b/include/linux/percpu.h index c74088a..d273604 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -48,6 +48,11 @@ preempt_enable(); \ } while (0) +#define get_local_var(var) get_cpu_var(var) +#define put_local_var(var) put_cpu_var(var) +#define get_local_ptr(var) get_cpu_ptr(var) +#define put_local_ptr(var) put_cpu_ptr(var) + /* minimum unit size, also is the maximum supported allocation size */ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) -- cgit v0.10.2 From 6ef791606fc41b1871f99fa245d8e1f02a69556c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 20 Jun 2011 09:03:47 +0200 Subject: rt-local-irq-lock.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/locallock.h b/include/linux/locallock.h new file mode 100644 index 0000000..e7bd8be --- /dev/null +++ b/include/linux/locallock.h @@ -0,0 +1,254 @@ +#ifndef _LINUX_LOCALLOCK_H +#define _LINUX_LOCALLOCK_H + +#include +#include + +#ifdef CONFIG_PREEMPT_RT_BASE + +#ifdef CONFIG_DEBUG_SPINLOCK +# define LL_WARN(cond) WARN_ON(cond) +#else +# define LL_WARN(cond) do { } while (0) +#endif + +/* + * per cpu lock based substitute for local_irq_*() + */ +struct local_irq_lock { + spinlock_t lock; + struct task_struct *owner; + int nestcnt; + unsigned long flags; +}; + +#define DEFINE_LOCAL_IRQ_LOCK(lvar) \ + DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \ + .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) } + +#define DECLARE_LOCAL_IRQ_LOCK(lvar) \ + DECLARE_PER_CPU(struct local_irq_lock, lvar) + +#define local_irq_lock_init(lvar) \ + do { \ + int __cpu; \ + for_each_possible_cpu(__cpu) \ + spin_lock_init(&per_cpu(lvar, __cpu).lock); \ + } while (0) + +static inline void __local_lock(struct local_irq_lock *lv) +{ + if (lv->owner != current) { + spin_lock(&lv->lock); + LL_WARN(lv->owner); + LL_WARN(lv->nestcnt); + lv->owner = current; + } + lv->nestcnt++; +} + +#define local_lock(lvar) \ + do { __local_lock(&get_local_var(lvar)); } while (0) + +static inline int __local_trylock(struct local_irq_lock *lv) +{ + if (lv->owner != current && spin_trylock(&lv->lock)) { + LL_WARN(lv->owner); + LL_WARN(lv->nestcnt); + lv->owner = current; + lv->nestcnt = 1; + return 1; + } + return 0; +} + +#define local_trylock(lvar) \ + ({ \ + int __locked; \ + __locked = __local_trylock(&get_local_var(lvar)); \ + if (!__locked) \ + put_local_var(lvar); \ + __locked; \ + }) + +static inline void __local_unlock(struct local_irq_lock *lv) +{ + LL_WARN(lv->nestcnt == 0); + LL_WARN(lv->owner != current); + if (--lv->nestcnt) + return; + + lv->owner = NULL; + spin_unlock(&lv->lock); +} + +#define local_unlock(lvar) \ + do { \ + __local_unlock(&__get_cpu_var(lvar)); \ + put_local_var(lvar); \ + } while (0) + +static inline void __local_lock_irq(struct local_irq_lock *lv) +{ + spin_lock_irqsave(&lv->lock, lv->flags); + LL_WARN(lv->owner); + LL_WARN(lv->nestcnt); + lv->owner = current; + lv->nestcnt = 1; +} + +#define local_lock_irq(lvar) \ + do { __local_lock_irq(&get_local_var(lvar)); } while (0) + +#define local_lock_irq_on(lvar, cpu) \ + do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0) + +static inline void __local_unlock_irq(struct local_irq_lock *lv) +{ + LL_WARN(!lv->nestcnt); + LL_WARN(lv->owner != current); + lv->owner = NULL; + lv->nestcnt = 0; + spin_unlock_irq(&lv->lock); +} + +#define local_unlock_irq(lvar) \ + do { \ + __local_unlock_irq(&__get_cpu_var(lvar)); \ + put_local_var(lvar); \ + } while (0) + +#define local_unlock_irq_on(lvar, cpu) \ + do { \ + __local_unlock_irq(&per_cpu(lvar, cpu)); \ + } while (0) + +static inline int __local_lock_irqsave(struct local_irq_lock *lv) +{ + if (lv->owner != current) { + __local_lock_irq(lv); + return 0; + } else { + lv->nestcnt++; + return 1; + } +} + +#define local_lock_irqsave(lvar, _flags) \ + do { \ + if (__local_lock_irqsave(&get_local_var(lvar))) \ + put_local_var(lvar); \ + _flags = __get_cpu_var(lvar).flags; \ + } while (0) + +#define local_lock_irqsave_on(lvar, _flags, cpu) \ + do { \ + __local_lock_irqsave(&per_cpu(lvar, cpu)); \ + _flags = per_cpu(lvar, cpu).flags; \ + } while (0) + +static inline int __local_unlock_irqrestore(struct local_irq_lock *lv, + unsigned long flags) +{ + LL_WARN(!lv->nestcnt); + LL_WARN(lv->owner != current); + if (--lv->nestcnt) + return 0; + + lv->owner = NULL; + spin_unlock_irqrestore(&lv->lock, lv->flags); + return 1; +} + +#define local_unlock_irqrestore(lvar, flags) \ + do { \ + if (__local_unlock_irqrestore(&__get_cpu_var(lvar), flags)) \ + put_local_var(lvar); \ + } while (0) + +#define local_unlock_irqrestore_on(lvar, flags, cpu) \ + do { \ + __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \ + } while (0) + +#define local_spin_trylock_irq(lvar, lock) \ + ({ \ + int __locked; \ + local_lock_irq(lvar); \ + __locked = spin_trylock(lock); \ + if (!__locked) \ + local_unlock_irq(lvar); \ + __locked; \ + }) + +#define local_spin_lock_irq(lvar, lock) \ + do { \ + local_lock_irq(lvar); \ + spin_lock(lock); \ + } while (0) + +#define local_spin_unlock_irq(lvar, lock) \ + do { \ + spin_unlock(lock); \ + local_unlock_irq(lvar); \ + } while (0) + +#define local_spin_lock_irqsave(lvar, lock, flags) \ + do { \ + local_lock_irqsave(lvar, flags); \ + spin_lock(lock); \ + } while (0) + +#define local_spin_unlock_irqrestore(lvar, lock, flags) \ + do { \ + spin_unlock(lock); \ + local_unlock_irqrestore(lvar, flags); \ + } while (0) + +#define get_locked_var(lvar, var) \ + (*({ \ + local_lock(lvar); \ + &__get_cpu_var(var); \ + })) + +#define put_locked_var(lvar, var) local_unlock(lvar) + +#define local_lock_cpu(lvar) \ + ({ \ + local_lock(lvar); \ + smp_processor_id(); \ + }) + +#define local_unlock_cpu(lvar) local_unlock(lvar) + +#else /* PREEMPT_RT_BASE */ + +#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar +#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar + +static inline void local_irq_lock_init(int lvar) { } + +#define local_lock(lvar) preempt_disable() +#define local_unlock(lvar) preempt_enable() +#define local_lock_irq(lvar) local_irq_disable() +#define local_unlock_irq(lvar) local_irq_enable() +#define local_lock_irqsave(lvar, flags) local_irq_save(flags) +#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags) + +#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock) +#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock) +#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock) +#define local_spin_lock_irqsave(lvar, lock, flags) \ + spin_lock_irqsave(lock, flags) +#define local_spin_unlock_irqrestore(lvar, lock, flags) \ + spin_unlock_irqrestore(lock, flags) + +#define get_locked_var(lvar, var) get_cpu_var(var) +#define put_locked_var(lvar, var) put_cpu_var(var) + +#define local_lock_cpu(lvar) get_cpu() +#define local_unlock_cpu(lvar) put_cpu() + +#endif + +#endif -- cgit v0.10.2 From 73b79a694e6e9658207e68f9ddea57101e97ca4e Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Fri, 17 Jan 2014 20:41:58 +0100 Subject: use local spin_locks in local_lock Drop recursive call to migrate_disabel/enable for local_*lock* api reported by Steven Rostedt. local_lock will call migrate_disable via get_local_var - call tree is get_locked_var `-> local_lock(lvar) `-> __local_lock(&get_local_var(lvar)); `--> # define get_local_var(var) (*({ migrate_disable(); &__get_cpu_var(var); })) \ thus there should be no need to call migrate_disable/enable recursively in spin_try/lock/unlock. This patch addes a spin_trylock_local and replaces the migration disabling calls by the local calls. This patch is incomplete as it does not yet cover the _irq/_irqsave variants by local locks. This patch requires the API cleanup in kernel/softirq.c or it would break softirq_lock/unlock with respect to migration. Signed-off-by: Nicholas Mc Guire Signed-off-by: Sebastian Andrzej Siewior diff --git a/include/linux/locallock.h b/include/linux/locallock.h index e7bd8be..eb338ce 100644 --- a/include/linux/locallock.h +++ b/include/linux/locallock.h @@ -36,10 +36,20 @@ struct local_irq_lock { spin_lock_init(&per_cpu(lvar, __cpu).lock); \ } while (0) +/* + * spin_lock|trylock|unlock_local flavour that does not migrate disable + * used for __local_lock|trylock|unlock where get_local_var/put_local_var + * already takes care of the migrate_disable/enable + * for CONFIG_PREEMPT_BASE map to the normal spin_* calls. + */ +# define spin_lock_local(lock) spin_lock(lock) +# define spin_trylock_local(lock) spin_trylock(lock) +# define spin_unlock_local(lock) spin_unlock(lock) + static inline void __local_lock(struct local_irq_lock *lv) { if (lv->owner != current) { - spin_lock(&lv->lock); + spin_lock_local(&lv->lock); LL_WARN(lv->owner); LL_WARN(lv->nestcnt); lv->owner = current; @@ -52,7 +62,7 @@ static inline void __local_lock(struct local_irq_lock *lv) static inline int __local_trylock(struct local_irq_lock *lv) { - if (lv->owner != current && spin_trylock(&lv->lock)) { + if (lv->owner != current && spin_trylock_local(&lv->lock)) { LL_WARN(lv->owner); LL_WARN(lv->nestcnt); lv->owner = current; @@ -79,7 +89,7 @@ static inline void __local_unlock(struct local_irq_lock *lv) return; lv->owner = NULL; - spin_unlock(&lv->lock); + spin_unlock_local(&lv->lock); } #define local_unlock(lvar) \ @@ -211,7 +221,7 @@ static inline int __local_unlock_irqrestore(struct local_irq_lock *lv, &__get_cpu_var(var); \ })) -#define put_locked_var(lvar, var) local_unlock(lvar) +#define put_locked_var(lvar, var) local_unlock(lvar); #define local_lock_cpu(lvar) \ ({ \ -- cgit v0.10.2 From 360e2fa4cd8b6e594b773508fc3fc1b03d8a93cd Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 17 Jun 2011 15:42:38 +0200 Subject: cpu-rt-variants.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/smp.h b/include/linux/smp.h index 731f523..f30c7b1 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -188,6 +188,14 @@ static inline void __smp_call_function_single(int cpuid, #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) #define put_cpu() preempt_enable() +#ifndef CONFIG_PREEMPT_RT_FULL +# define get_cpu_light() get_cpu() +# define put_cpu_light() put_cpu() +#else +# define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) +# define put_cpu_light() migrate_enable() +#endif + /* * Callback to arch code if there's nosmp or maxcpus=0 on the * boot command line: -- cgit v0.10.2 From 918fce9adf90ea607f6eef32753bcbc0392779f2 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:37 -0500 Subject: mm: page_alloc: rt-friendly per-cpu pages rt-friendly per-cpu pages: convert the irqs-off per-cpu locking method into a preemptible, explicit-per-cpu-locks method. Contains fixes from: Peter Zijlstra Thomas Gleixner Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2bcb648..1abc312 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -61,6 +61,7 @@ #include #include #include +#include #include #include @@ -230,6 +231,18 @@ EXPORT_SYMBOL(nr_node_ids); EXPORT_SYMBOL(nr_online_nodes); #endif +static DEFINE_LOCAL_IRQ_LOCK(pa_lock); + +#ifdef CONFIG_PREEMPT_RT_BASE +# define cpu_lock_irqsave(cpu, flags) \ + spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags) +# define cpu_unlock_irqrestore(cpu, flags) \ + spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags) +#else +# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) +# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags) +#endif + int page_group_by_mobility_disabled __read_mostly; void set_pageblock_migratetype(struct page *page, int migratetype) @@ -742,12 +755,12 @@ static void __free_pages_ok(struct page *page, unsigned int order) if (!free_pages_prepare(page, order)) return; - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); __count_vm_events(PGFREE, 1 << order); migratetype = get_pageblock_migratetype(page); set_freepage_migratetype(page, migratetype); free_one_page(page_zone(page), page, order, migratetype); - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); } void __init __free_pages_bootmem(struct page *page, unsigned int order) @@ -1205,7 +1218,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) int to_drain; unsigned long batch; - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); batch = ACCESS_ONCE(pcp->batch); if (pcp->count >= batch) to_drain = batch; @@ -1215,7 +1228,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) free_pcppages_bulk(zone, to_drain, pcp); pcp->count -= to_drain; } - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); } static bool gfp_thisnode_allocation(gfp_t gfp_mask) { @@ -1244,7 +1257,7 @@ static void drain_pages(unsigned int cpu) struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; - local_irq_save(flags); + cpu_lock_irqsave(cpu, flags); pset = per_cpu_ptr(zone->pageset, cpu); pcp = &pset->pcp; @@ -1252,7 +1265,7 @@ static void drain_pages(unsigned int cpu) free_pcppages_bulk(zone, pcp->count, pcp); pcp->count = 0; } - local_irq_restore(flags); + cpu_unlock_irqrestore(cpu, flags); } } @@ -1305,7 +1318,12 @@ void drain_all_pages(void) else cpumask_clear_cpu(cpu, &cpus_with_pcps); } +#ifndef CONFIG_PREEMPT_RT_BASE on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1); +#else + for_each_cpu(cpu, &cpus_with_pcps) + drain_pages(cpu); +#endif } #ifdef CONFIG_HIBERNATION @@ -1360,7 +1378,7 @@ void free_hot_cold_page(struct page *page, int cold) migratetype = get_pageblock_migratetype(page); set_freepage_migratetype(page, migratetype); - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); __count_vm_event(PGFREE); /* @@ -1391,7 +1409,7 @@ void free_hot_cold_page(struct page *page, int cold) } out: - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); } /* @@ -1521,7 +1539,7 @@ again: struct per_cpu_pages *pcp; struct list_head *list; - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); pcp = &this_cpu_ptr(zone->pageset)->pcp; list = &pcp->lists[migratetype]; if (list_empty(list)) { @@ -1553,13 +1571,15 @@ again: */ WARN_ON_ONCE(order > 1); } - spin_lock_irqsave(&zone->lock, flags); + local_spin_lock_irqsave(pa_lock, &zone->lock, flags); page = __rmqueue(zone, order, migratetype); - spin_unlock(&zone->lock); - if (!page) + if (!page) { + spin_unlock(&zone->lock); goto failed; + } __mod_zone_freepage_state(zone, -(1 << order), get_pageblock_migratetype(page)); + spin_unlock(&zone->lock); } /* @@ -1571,7 +1591,7 @@ again: __count_zone_vm_events(PGALLOC, zone, 1 << order); zone_statistics(preferred_zone, zone, gfp_flags); - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); VM_BUG_ON(bad_range(zone, page)); if (prep_new_page(page, order, gfp_flags)) @@ -1579,7 +1599,7 @@ again: return page; failed: - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); return NULL; } @@ -5461,6 +5481,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self, void __init page_alloc_init(void) { hotcpu_notifier(page_alloc_cpu_notify, 0); + local_irq_lock_init(pa_lock); } /* @@ -6324,7 +6345,7 @@ void zone_pcp_reset(struct zone *zone) struct per_cpu_pageset *pset; /* avoid races with drain_pages() */ - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); if (zone->pageset != &boot_pageset) { for_each_online_cpu(cpu) { pset = per_cpu_ptr(zone->pageset, cpu); @@ -6333,7 +6354,7 @@ void zone_pcp_reset(struct zone *zone) free_percpu(zone->pageset); zone->pageset = &boot_pageset; } - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); } #ifdef CONFIG_MEMORY_HOTREMOVE -- cgit v0.10.2 From e932235009885c9cfddfd5fb885cc21b1cf0cf47 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 3 Jul 2009 08:44:37 -0500 Subject: mm: page_alloc reduce lock sections further Split out the pages which are to be freed into a separate list and call free_pages_bulk() outside of the percpu page allocator locks. Signed-off-by: Peter Zijlstra Signed-off-by: Thomas Gleixner diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1abc312..c56085c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -646,7 +646,7 @@ static inline int free_pages_check(struct page *page) } /* - * Frees a number of pages from the PCP lists + * Frees a number of pages which have been collected from the pcp lists. * Assumes all pages on list are in same zone, and of same order. * count is the number of pages to free. * @@ -657,15 +657,49 @@ static inline int free_pages_check(struct page *page) * pinned" detection logic. */ static void free_pcppages_bulk(struct zone *zone, int count, - struct per_cpu_pages *pcp) + struct list_head *list) { - int migratetype = 0; - int batch_free = 0; int to_free = count; + unsigned long flags; - spin_lock(&zone->lock); + spin_lock_irqsave(&zone->lock, flags); zone->pages_scanned = 0; + while (!list_empty(list)) { + struct page *page = list_first_entry(list, struct page, lru); + int mt; /* migratetype of the to-be-freed page */ + + /* must delete as __free_one_page list manipulates */ + list_del(&page->lru); + + mt = get_freepage_migratetype(page); + /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ + __free_one_page(page, zone, 0, mt); + trace_mm_page_pcpu_drain(page, 0, mt); + if (likely(!is_migrate_isolate_page(page))) { + __mod_zone_page_state(zone, NR_FREE_PAGES, 1); + if (is_migrate_cma(mt)) + __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1); + } + + to_free--; + } + WARN_ON(to_free != 0); + spin_unlock_irqrestore(&zone->lock, flags); +} + +/* + * Moves a number of pages from the PCP lists to free list which + * is freed outside of the locked region. + * + * Assumes all pages on list are in same zone, and of same order. + * count is the number of pages to free. + */ +static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src, + struct list_head *dst) +{ + int migratetype = 0, batch_free = 0; + while (to_free) { struct page *page; struct list_head *list; @@ -681,7 +715,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, batch_free++; if (++migratetype == MIGRATE_PCPTYPES) migratetype = 0; - list = &pcp->lists[migratetype]; + list = &src->lists[migratetype]; } while (list_empty(list)); /* This is the only non-empty list. Free them all. */ @@ -689,35 +723,25 @@ static void free_pcppages_bulk(struct zone *zone, int count, batch_free = to_free; do { - int mt; /* migratetype of the to-be-freed page */ - page = list_last_entry(list, struct page, lru); - /* must delete as __free_one_page list manipulates */ list_del(&page->lru); - mt = get_freepage_migratetype(page); - /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ - __free_one_page(page, zone, 0, mt); - trace_mm_page_pcpu_drain(page, 0, mt); - if (likely(!is_migrate_isolate_page(page))) { - __mod_zone_page_state(zone, NR_FREE_PAGES, 1); - if (is_migrate_cma(mt)) - __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1); - } + list_add(&page->lru, dst); } while (--to_free && --batch_free && !list_empty(list)); } - spin_unlock(&zone->lock); } static void free_one_page(struct zone *zone, struct page *page, int order, int migratetype) { - spin_lock(&zone->lock); + unsigned long flags; + + spin_lock_irqsave(&zone->lock, flags); zone->pages_scanned = 0; __free_one_page(page, zone, order, migratetype); if (unlikely(!is_migrate_isolate(migratetype))) __mod_zone_freepage_state(zone, 1 << order, migratetype); - spin_unlock(&zone->lock); + spin_unlock_irqrestore(&zone->lock, flags); } static bool free_pages_prepare(struct page *page, unsigned int order) @@ -1215,6 +1239,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) { unsigned long flags; + LIST_HEAD(dst); int to_drain; unsigned long batch; @@ -1225,10 +1250,11 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) else to_drain = pcp->count; if (to_drain > 0) { - free_pcppages_bulk(zone, to_drain, pcp); + isolate_pcp_pages(to_drain, pcp, &dst); pcp->count -= to_drain; } local_unlock_irqrestore(pa_lock, flags); + free_pcppages_bulk(zone, to_drain, &dst); } static bool gfp_thisnode_allocation(gfp_t gfp_mask) { @@ -1256,16 +1282,21 @@ static void drain_pages(unsigned int cpu) for_each_populated_zone(zone) { struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; + LIST_HEAD(dst); + int count; cpu_lock_irqsave(cpu, flags); pset = per_cpu_ptr(zone->pageset, cpu); pcp = &pset->pcp; - if (pcp->count) { - free_pcppages_bulk(zone, pcp->count, pcp); + count = pcp->count; + if (count) { + isolate_pcp_pages(count, pcp, &dst); pcp->count = 0; } cpu_unlock_irqrestore(cpu, flags); + if (count) + free_pcppages_bulk(zone, count, &dst); } } @@ -1404,8 +1435,13 @@ void free_hot_cold_page(struct page *page, int cold) pcp->count++; if (pcp->count >= pcp->high) { unsigned long batch = ACCESS_ONCE(pcp->batch); - free_pcppages_bulk(zone, batch, pcp); + LIST_HEAD(dst); + + isolate_pcp_pages(batch, pcp, &dst); pcp->count -= batch; + local_unlock_irqrestore(pa_lock, flags); + free_pcppages_bulk(zone, batch, &dst); + return; } out: -- cgit v0.10.2 From f05840a218b39f15af19fc127a594eff8bc1a581 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 21 Jul 2011 16:47:49 +0200 Subject: mm-page-alloc-fix.patch Signed-off-by: Thomas Gleixner diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c56085c..f8db53c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2314,8 +2314,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct page *page; /* Page migration frees to the PCP lists but we want merging */ - drain_pages(get_cpu()); - put_cpu(); + drain_pages(get_cpu_light()); + put_cpu_light(); page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, high_zoneidx, -- cgit v0.10.2 From a89d3c3465267476f483daaf1764394fcf6a8753 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:51 -0500 Subject: mm: convert swap to percpu locked Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/mm/swap.c b/mm/swap.c index 0c8f7a4..b55ee9c 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -32,6 +32,7 @@ #include #include #include +#include #include "internal.h" @@ -45,6 +46,9 @@ static DEFINE_PER_CPU(struct pagevec, lru_add_pvec); static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); +static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); +static DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); + /* * This path almost never happens for VM activity - pages are normally * freed via pagevecs. But it gets used by networking. @@ -408,11 +412,11 @@ void rotate_reclaimable_page(struct page *page) unsigned long flags; page_cache_get(page); - local_irq_save(flags); + local_lock_irqsave(rotate_lock, flags); pvec = &__get_cpu_var(lru_rotate_pvecs); if (!pagevec_add(pvec, page)) pagevec_move_tail(pvec); - local_irq_restore(flags); + local_unlock_irqrestore(rotate_lock, flags); } } @@ -463,12 +467,13 @@ static bool need_activate_page_drain(int cpu) void activate_page(struct page *page) { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { - struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); + struct pagevec *pvec = &get_locked_var(swapvec_lock, + activate_page_pvecs); page_cache_get(page); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, __activate_page, NULL); - put_cpu_var(activate_page_pvecs); + put_locked_var(swapvec_lock, activate_page_pvecs); } } @@ -494,7 +499,7 @@ void activate_page(struct page *page) static void __lru_cache_activate_page(struct page *page) { - struct pagevec *pvec = &get_cpu_var(lru_add_pvec); + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); int i; /* @@ -516,7 +521,7 @@ static void __lru_cache_activate_page(struct page *page) } } - put_cpu_var(lru_add_pvec); + put_locked_var(swapvec_lock, lru_add_pvec); } /* @@ -556,13 +561,13 @@ EXPORT_SYMBOL(mark_page_accessed); */ void __lru_cache_add(struct page *page) { - struct pagevec *pvec = &get_cpu_var(lru_add_pvec); + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); page_cache_get(page); if (!pagevec_space(pvec)) __pagevec_lru_add(pvec); pagevec_add(pvec, page); - put_cpu_var(lru_add_pvec); + put_locked_var(swapvec_lock, lru_add_pvec); } EXPORT_SYMBOL(__lru_cache_add); @@ -685,9 +690,9 @@ void lru_add_drain_cpu(int cpu) unsigned long flags; /* No harm done if a racing interrupt already did this */ - local_irq_save(flags); + local_lock_irqsave(rotate_lock, flags); pagevec_move_tail(pvec); - local_irq_restore(flags); + local_unlock_irqrestore(rotate_lock, flags); } pvec = &per_cpu(lru_deactivate_pvecs, cpu); @@ -715,18 +720,19 @@ void deactivate_page(struct page *page) return; if (likely(get_page_unless_zero(page))) { - struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); + struct pagevec *pvec = &get_locked_var(swapvec_lock, + lru_deactivate_pvecs); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); - put_cpu_var(lru_deactivate_pvecs); + put_locked_var(swapvec_lock, lru_deactivate_pvecs); } } void lru_add_drain(void) { - lru_add_drain_cpu(get_cpu()); - put_cpu(); + lru_add_drain_cpu(local_lock_cpu(swapvec_lock)); + local_unlock_cpu(swapvec_lock); } static void lru_add_drain_per_cpu(struct work_struct *dummy) -- cgit v0.10.2 From d7872e48930012df3b50971d7ab34716bf5cd236 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:30:13 -0500 Subject: mm: make vmstat -rt aware Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index a67b384..1ea2fd5 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -29,7 +29,9 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states); static inline void __count_vm_event(enum vm_event_item item) { + preempt_disable_rt(); __this_cpu_inc(vm_event_states.event[item]); + preempt_enable_rt(); } static inline void count_vm_event(enum vm_event_item item) @@ -39,7 +41,9 @@ static inline void count_vm_event(enum vm_event_item item) static inline void __count_vm_events(enum vm_event_item item, long delta) { + preempt_disable_rt(); __this_cpu_add(vm_event_states.event[item], delta); + preempt_enable_rt(); } static inline void count_vm_events(enum vm_event_item item, long delta) diff --git a/mm/vmstat.c b/mm/vmstat.c index 5a442a7..efea337 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -217,6 +217,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, long x; long t; + preempt_disable_rt(); x = delta + __this_cpu_read(*p); t = __this_cpu_read(pcp->stat_threshold); @@ -226,6 +227,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, x = 0; } __this_cpu_write(*p, x); + preempt_enable_rt(); } EXPORT_SYMBOL(__mod_zone_page_state); @@ -258,6 +260,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item) s8 __percpu *p = pcp->vm_stat_diff + item; s8 v, t; + preempt_disable_rt(); v = __this_cpu_inc_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v > t)) { @@ -266,6 +269,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item) zone_page_state_add(v + overstep, zone, item); __this_cpu_write(*p, -overstep); } + preempt_enable_rt(); } void __inc_zone_page_state(struct page *page, enum zone_stat_item item) @@ -280,6 +284,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item) s8 __percpu *p = pcp->vm_stat_diff + item; s8 v, t; + preempt_disable_rt(); v = __this_cpu_dec_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v < - t)) { @@ -288,6 +293,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item) zone_page_state_add(v - overstep, zone, item); __this_cpu_write(*p, overstep); } + preempt_enable_rt(); } void __dec_zone_page_state(struct page *page, enum zone_stat_item item) -- cgit v0.10.2 From 5e026c64d3221cef95ca9353f8f5f6c1b8fca2eb Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 3 Jul 2009 08:44:54 -0500 Subject: mm: shrink the page frame to !-rt size He below is a boot-tested hack to shrink the page frame size back to normal. Should be a net win since there should be many less PTE-pages than page-frames. Signed-off-by: Peter Zijlstra Signed-off-by: Thomas Gleixner diff --git a/include/linux/mm.h b/include/linux/mm.h index 648bcb0..c7ebd78 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1242,27 +1242,59 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a * overflow into the next struct page (as it might with DEBUG_SPINLOCK). * When freeing, reset page->mapping so free_pages_check won't complain. */ +#ifndef CONFIG_PREEMPT_RT_FULL + #define __pte_lockptr(page) &((page)->ptl) -#define pte_lock_init(_page) do { \ - spin_lock_init(__pte_lockptr(_page)); \ -} while (0) + +static inline struct page *pte_lock_init(struct page *page) +{ + spin_lock_init(__pte_lockptr(page)); + return page; +} + #define pte_lock_deinit(page) ((page)->mapping = NULL) + +#else /* !PREEMPT_RT_FULL */ + +/* + * On PREEMPT_RT_FULL the spinlock_t's are too large to embed in the + * page frame, hence it only has a pointer and we need to dynamically + * allocate the lock when we allocate PTE-pages. + * + * This is an overall win, since only a small fraction of the pages + * will be PTE pages under normal circumstances. + */ + +#define __pte_lockptr(page) ((page)->ptl) + +extern struct page *pte_lock_init(struct page *page); +extern void pte_lock_deinit(struct page *page); + +#endif /* PREEMPT_RT_FULL */ + #define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));}) #else /* !USE_SPLIT_PTLOCKS */ /* * We use mm->page_table_lock to guard all pagetable pages of the mm. */ -#define pte_lock_init(page) do {} while (0) +static inline struct page *pte_lock_init(struct page *page) { return page; } #define pte_lock_deinit(page) do {} while (0) #define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;}) #endif /* USE_SPLIT_PTLOCKS */ -static inline void pgtable_page_ctor(struct page *page) +static inline struct page *__pgtable_page_ctor(struct page *page) { - pte_lock_init(page); - inc_zone_page_state(page, NR_PAGETABLE); + page = pte_lock_init(page); + if (page) + inc_zone_page_state(page, NR_PAGETABLE); + return page; } +#define pgtable_page_ctor(page) \ +do { \ + page = __pgtable_page_ctor(page); \ +} while (0) + static inline void pgtable_page_dtor(struct page *page) { pte_lock_deinit(page); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 8e082f1..67689a5 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -142,7 +142,11 @@ struct page { * system if PG_buddy is set. */ #if USE_SPLIT_PTLOCKS +# ifndef CONFIG_PREEMPT_RT_FULL spinlock_t ptl; +# else + spinlock_t *ptl; +# endif #endif struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */ struct page *first_page; /* Compound tail pages */ diff --git a/mm/memory.c b/mm/memory.c index f342221..5771e09 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4342,3 +4342,35 @@ void copy_user_huge_page(struct page *dst, struct page *src, } } #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ + +#if defined(CONFIG_PREEMPT_RT_FULL) && (USE_SPLIT_PTLOCKS > 0) +/* + * Heinous hack, relies on the caller doing something like: + * + * pte = alloc_pages(PGALLOC_GFP, 0); + * if (pte) + * pgtable_page_ctor(pte); + * return pte; + * + * This ensures we release the page and return NULL when the + * lock allocation fails. + */ +struct page *pte_lock_init(struct page *page) +{ + page->ptl = kmalloc(sizeof(spinlock_t), GFP_KERNEL); + if (page->ptl) { + spin_lock_init(__pte_lockptr(page)); + } else { + __free_page(page); + page = NULL; + } + return page; +} + +void pte_lock_deinit(struct page *page) +{ + kfree(page->ptl); + page->mapping = NULL; +} + +#endif -- cgit v0.10.2 From 973fa63162fb4aa8a1f5334bbed7b0e4fc8d0f51 Mon Sep 17 00:00:00 2001 From: Frank Rowand Date: Sat, 1 Oct 2011 18:58:13 -0700 Subject: ARM: Initialize ptl->lock for vector page Without this patch, ARM can not use SPLIT_PTLOCK_CPUS if PREEMPT_RT_FULL=y because vectors_user_mapping() creates a VM_ALWAYSDUMP mapping of the vector page (address 0xffff0000), but no ptl->lock has been allocated for the page. An attempt to coredump that page will result in a kernel NULL pointer dereference when follow_page() attempts to lock the page. The call tree to the NULL pointer dereference is: do_notify_resume() get_signal_to_deliver() do_coredump() elf_core_dump() get_dump_page() __get_user_pages() follow_page() pte_offset_map_lock() <----- a #define ... rt_spin_lock() The underlying problem is exposed by mm-shrink-the-page-frame-to-rt-size.patch. Signed-off-by: Frank Rowand Cc: Frank Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/4E87C535.2030907@am.sony.com Signed-off-by: Thomas Gleixner diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 92f7b15..83af229 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -432,6 +432,30 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) } #ifdef CONFIG_MMU +/* + * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not + * initialized by pgtable_page_ctor() then a coredump of the vector page will + * fail. + */ +static int __init vectors_user_mapping_init_page(void) +{ + struct page *page; + unsigned long addr = 0xffff0000; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + + pgd = pgd_offset_k(addr); + pud = pud_offset(pgd, addr); + pmd = pmd_offset(pud, addr); + page = pmd_page(*(pmd)); + + pgtable_page_ctor(page); + + return 0; +} +late_initcall(vectors_user_mapping_init_page); + #ifdef CONFIG_KUSER_HELPERS /* * The vectors page is always readable from user space for the -- cgit v0.10.2 From df39a2cb2bebc4ec91d13706528a3397fc357c26 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 9 Jan 2013 10:33:09 +0100 Subject: mm: bounce: Use local_irq_save_nort kmap_atomic() is preemptible on RT. Signed-off-by: Thomas Gleixner diff --git a/mm/bounce.c b/mm/bounce.c index 5a7d58f..b09bb4e 100644 --- a/mm/bounce.c +++ b/mm/bounce.c @@ -51,11 +51,11 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom) unsigned long flags; unsigned char *vto; - local_irq_save(flags); + local_irq_save_nort(flags); vto = kmap_atomic(to->bv_page); memcpy(vto + to->bv_offset, vfrom, to->bv_len); kunmap_atomic(vto); - local_irq_restore(flags); + local_irq_restore_nort(flags); } #else /* CONFIG_HIGHMEM */ -- cgit v0.10.2 From d0a5eefa8a8d0188cfa13f3474ae54c2b28e1f67 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:44:03 -0500 Subject: mm: Allow only slub on RT Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/init/Kconfig b/init/Kconfig index 3ecd8a1..bf74b46 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1575,6 +1575,7 @@ choice config SLAB bool "SLAB" + depends on !PREEMPT_RT_FULL help The regular slab allocator that is established and known to work well in all environments. It organizes cache hot objects in @@ -1593,6 +1594,7 @@ config SLUB config SLOB depends on EXPERT bool "SLOB (Simple Allocator)" + depends on !PREEMPT_RT_FULL help SLOB replaces the stock allocator with a drastically simpler allocator. SLOB is generally more space efficient but -- cgit v0.10.2 From 0ded3f0df43579f5d190566490fda3a810ba27f9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 25 Oct 2012 10:32:35 +0100 Subject: mm: Enable SLUB for RT Make SLUB RT aware and remove the restriction in Kconfig. Signed-off-by: Thomas Gleixner diff --git a/mm/slab.h b/mm/slab.h index a535033..8ffb287 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -247,7 +247,11 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) * The slab lists for all objects. */ struct kmem_cache_node { +#ifdef CONFIG_SLUB + raw_spinlock_t list_lock; +#else spinlock_t list_lock; +#endif #ifdef CONFIG_SLAB struct list_head slabs_partial; /* partial list first, better asm code */ diff --git a/mm/slub.c b/mm/slub.c index 5c1343a..e228e45 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1087,7 +1087,7 @@ static noinline struct kmem_cache_node *free_debug_processing( { struct kmem_cache_node *n = get_node(s, page_to_nid(page)); - spin_lock_irqsave(&n->list_lock, *flags); + raw_spin_lock_irqsave(&n->list_lock, *flags); slab_lock(page); if (!check_slab(s, page)) @@ -1135,7 +1135,7 @@ out: fail: slab_unlock(page); - spin_unlock_irqrestore(&n->list_lock, *flags); + raw_spin_unlock_irqrestore(&n->list_lock, *flags); slab_fix(s, "Object at 0x%p not freed", object); return NULL; } @@ -1270,6 +1270,12 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) {} #endif /* CONFIG_SLUB_DEBUG */ +struct slub_free_list { + raw_spinlock_t lock; + struct list_head list; +}; +static DEFINE_PER_CPU(struct slub_free_list, slub_free_list); + /* * Slab allocation and freeing */ @@ -1294,7 +1300,11 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) flags &= gfp_allowed_mask; +#ifdef CONFIG_PREEMPT_RT_FULL + if (system_state == SYSTEM_RUNNING) +#else if (flags & __GFP_WAIT) +#endif local_irq_enable(); flags |= s->allocflags; @@ -1334,7 +1344,11 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) kmemcheck_mark_unallocated_pages(page, pages); } +#ifdef CONFIG_PREEMPT_RT_FULL + if (system_state == SYSTEM_RUNNING) +#else if (flags & __GFP_WAIT) +#endif local_irq_disable(); if (!page) return NULL; @@ -1431,6 +1445,16 @@ static void __free_slab(struct kmem_cache *s, struct page *page) __free_memcg_kmem_pages(page, order); } +static void free_delayed(struct list_head *h) +{ + while(!list_empty(h)) { + struct page *page = list_first_entry(h, struct page, lru); + + list_del(&page->lru); + __free_slab(page->slab_cache, page); + } +} + #define need_reserve_slab_rcu \ (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) @@ -1465,6 +1489,12 @@ static void free_slab(struct kmem_cache *s, struct page *page) } call_rcu(head, rcu_free_slab); + } else if (irqs_disabled()) { + struct slub_free_list *f = &__get_cpu_var(slub_free_list); + + raw_spin_lock(&f->lock); + list_add(&page->lru, &f->list); + raw_spin_unlock(&f->lock); } else __free_slab(s, page); } @@ -1569,7 +1599,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, if (!n || !n->nr_partial) return NULL; - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); list_for_each_entry_safe(page, page2, &n->partial, lru) { void *t; @@ -1594,7 +1624,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, break; } - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); return object; } @@ -1837,7 +1867,7 @@ redo: * that acquire_slab() will see a slab page that * is frozen */ - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); } } else { m = M_FULL; @@ -1848,7 +1878,7 @@ redo: * slabs from diagnostic functions will not see * any frozen slabs. */ - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); } } @@ -1883,7 +1913,7 @@ redo: goto redo; if (lock) - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); if (m == M_FREE) { stat(s, DEACTIVATE_EMPTY); @@ -1915,10 +1945,10 @@ static void unfreeze_partials(struct kmem_cache *s, n2 = get_node(s, page_to_nid(page)); if (n != n2) { if (n) - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); n = n2; - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); } do { @@ -1947,7 +1977,7 @@ static void unfreeze_partials(struct kmem_cache *s, } if (n) - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); while (discard_page) { page = discard_page; @@ -1985,14 +2015,21 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) pobjects = oldpage->pobjects; pages = oldpage->pages; if (drain && pobjects > s->cpu_partial) { + struct slub_free_list *f; unsigned long flags; + LIST_HEAD(tofree); /* * partial array is full. Move the existing * set to the per node partial list. */ local_irq_save(flags); unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); + f = &__get_cpu_var(slub_free_list); + raw_spin_lock(&f->lock); + list_splice_init(&f->list, &tofree); + raw_spin_unlock(&f->lock); local_irq_restore(flags); + free_delayed(&tofree); oldpage = NULL; pobjects = 0; pages = 0; @@ -2056,7 +2093,22 @@ static bool has_cpu_slab(int cpu, void *info) static void flush_all(struct kmem_cache *s) { + LIST_HEAD(tofree); + int cpu; + on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC); + for_each_online_cpu(cpu) { + struct slub_free_list *f; + + if (!has_cpu_slab(cpu, s)) + continue; + + f = &per_cpu(slub_free_list, cpu); + raw_spin_lock_irq(&f->lock); + list_splice_init(&f->list, &tofree); + raw_spin_unlock_irq(&f->lock); + free_delayed(&tofree); + } } /* @@ -2084,10 +2136,10 @@ static unsigned long count_partial(struct kmem_cache_node *n, unsigned long x = 0; struct page *page; - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) x += get_count(page); - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); return x; } @@ -2230,9 +2282,11 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page) static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, unsigned long addr, struct kmem_cache_cpu *c) { + struct slub_free_list *f; void *freelist; struct page *page; unsigned long flags; + LIST_HEAD(tofree); local_irq_save(flags); #ifdef CONFIG_PREEMPT @@ -2295,7 +2349,13 @@ load_freelist: VM_BUG_ON(!c->page->frozen); c->freelist = get_freepointer(s, freelist); c->tid = next_tid(c->tid); +out: + f = &__get_cpu_var(slub_free_list); + raw_spin_lock(&f->lock); + list_splice_init(&f->list, &tofree); + raw_spin_unlock(&f->lock); local_irq_restore(flags); + free_delayed(&tofree); return freelist; new_slab: @@ -2313,9 +2373,7 @@ new_slab: if (unlikely(!freelist)) { if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) slab_out_of_memory(s, gfpflags, node); - - local_irq_restore(flags); - return NULL; + goto out; } page = c->page; @@ -2330,8 +2388,7 @@ new_slab: deactivate_slab(s, page, get_freepointer(s, freelist)); c->page = NULL; c->freelist = NULL; - local_irq_restore(flags); - return freelist; + goto out; } /* @@ -2503,7 +2560,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, do { if (unlikely(n)) { - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); n = NULL; } prior = page->freelist; @@ -2535,7 +2592,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, * Otherwise the list_lock will synchronize with * other processors updating the list of slabs. */ - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); } } @@ -2577,7 +2634,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, add_partial(n, page, DEACTIVATE_TO_TAIL); stat(s, FREE_ADD_PARTIAL); } - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); return; slab_empty: @@ -2591,7 +2648,7 @@ slab_empty: /* Slab must be on the full list */ remove_full(s, page); - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); stat(s, FREE_SLAB); discard_slab(s, page); } @@ -2793,7 +2850,7 @@ static void init_kmem_cache_node(struct kmem_cache_node *n) { n->nr_partial = 0; - spin_lock_init(&n->list_lock); + raw_spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); #ifdef CONFIG_SLUB_DEBUG atomic_long_set(&n->nr_slabs, 0); @@ -3379,7 +3436,7 @@ int kmem_cache_shrink(struct kmem_cache *s) for (i = 0; i < objects; i++) INIT_LIST_HEAD(slabs_by_inuse + i); - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); /* * Build lists indexed by the items in use in each slab. @@ -3400,7 +3457,7 @@ int kmem_cache_shrink(struct kmem_cache *s) for (i = objects - 1; i > 0; i--) list_splice(slabs_by_inuse + i, n->partial.prev); - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); /* Release empty slabs */ list_for_each_entry_safe(page, t, slabs_by_inuse, lru) @@ -3576,6 +3633,12 @@ void __init kmem_cache_init(void) { static __initdata struct kmem_cache boot_kmem_cache, boot_kmem_cache_node; + int cpu; + + for_each_possible_cpu(cpu) { + raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock); + INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list); + } if (debug_guardpage_minorder()) slub_max_order = 0; @@ -3880,7 +3943,7 @@ static int validate_slab_node(struct kmem_cache *s, struct page *page; unsigned long flags; - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) { validate_slab_slab(s, page, map); @@ -3903,7 +3966,7 @@ static int validate_slab_node(struct kmem_cache *s, atomic_long_read(&n->nr_slabs)); out: - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); return count; } @@ -4093,12 +4156,12 @@ static int list_locations(struct kmem_cache *s, char *buf, if (!atomic_long_read(&n->nr_slabs)) continue; - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) process_slab(&t, s, page, alloc, map); list_for_each_entry(page, &n->full, lru) process_slab(&t, s, page, alloc, map); - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); } for (i = 0; i < t.count; i++) { -- cgit v0.10.2 From 63ebbe8b3dfbf25e491260e102e71c52f49ca5bb Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 9 Jan 2013 12:08:15 +0100 Subject: slub: Enable irqs for __GFP_WAIT SYSTEM_RUNNING might be too late for enabling interrupts. Allocations with GFP_WAIT can happen before that. So use this as an indicator. Signed-off-by: Thomas Gleixner diff --git a/mm/slub.c b/mm/slub.c index e228e45..b1c50f7 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1297,14 +1297,15 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) struct page *page; struct kmem_cache_order_objects oo = s->oo; gfp_t alloc_gfp; + bool enableirqs; flags &= gfp_allowed_mask; + enableirqs = (flags & __GFP_WAIT) != 0; #ifdef CONFIG_PREEMPT_RT_FULL - if (system_state == SYSTEM_RUNNING) -#else - if (flags & __GFP_WAIT) + enableirqs |= system_state == SYSTEM_RUNNING; #endif + if (enableirqs) local_irq_enable(); flags |= s->allocflags; @@ -1344,11 +1345,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) kmemcheck_mark_unallocated_pages(page, pages); } -#ifdef CONFIG_PREEMPT_RT_FULL - if (system_state == SYSTEM_RUNNING) -#else - if (flags & __GFP_WAIT) -#endif + if (enableirqs) local_irq_disable(); if (!page) return NULL; -- cgit v0.10.2 From 62df817484fe9ace2b769f251846d4f18a9b0479 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 9 Apr 2014 19:19:02 -0500 Subject: slub: delay ctor until the object is requested It seems that allocation of plenty objects causes latency on ARM since that code can not be preempted Signed-off-by: Sebastian Andrzej Siewior diff --git a/mm/slub.c b/mm/slub.c index b1c50f7..a164648 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1363,8 +1363,10 @@ static void setup_object(struct kmem_cache *s, struct page *page, void *object) { setup_object_debug(s, page, object); +#ifndef CONFIG_PREEMPT_RT_FULL if (unlikely(s->ctor)) s->ctor(object); +#endif } static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) @@ -2470,6 +2472,10 @@ redo: if (unlikely(gfpflags & __GFP_ZERO) && object) memset(object, 0, s->object_size); +#ifdef CONFIG_PREEMPT_RT_FULL + if (unlikely(s->ctor) && object) + s->ctor(object); +#endif slab_post_alloc_hook(s, gfpflags, object); -- cgit v0.10.2 From ef45e779459a792498539970e471d19b314e7d72 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 27 Sep 2012 11:11:46 +0200 Subject: mm: page_alloc: Use local_lock_on() instead of plain spinlock The plain spinlock while sufficient does not update the local_lock internals. Use a proper local_lock function instead to ease debugging. Signed-off-by: Thomas Gleixner Cc: stable-rt@vger.kernel.org diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f8db53c..f200af2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -235,9 +235,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock); #ifdef CONFIG_PREEMPT_RT_BASE # define cpu_lock_irqsave(cpu, flags) \ - spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags) + local_lock_irqsave_on(pa_lock, flags, cpu) # define cpu_unlock_irqrestore(cpu, flags) \ - spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags) + local_unlock_irqrestore_on(pa_lock, flags, cpu) #else # define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) # define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags) -- cgit v0.10.2 From 558c7c2c03e988ead1357c3ec6b08236aa727808 Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Wed, 30 Oct 2013 11:48:33 -0700 Subject: mm/memcontrol: Don't call schedule_work_on in preemption disabled context The following trace is triggered when running ltp oom test cases: BUG: sleeping function called from invalid context at kernel/rtmutex.c:659 in_atomic(): 1, irqs_disabled(): 0, pid: 17188, name: oom03 Preemption disabled at:[] mem_cgroup_reclaim+0x90/0xe0 CPU: 2 PID: 17188 Comm: oom03 Not tainted 3.10.10-rt3 #2 Hardware name: Intel Corporation Calpella platform/MATXM-CORE-411-B, BIOS 4.6.3 08/18/2010 ffff88007684d730 ffff880070df9b58 ffffffff8169918d ffff880070df9b70 ffffffff8106db31 ffff88007688b4a0 ffff880070df9b88 ffffffff8169d9c0 ffff88007688b4a0 ffff880070df9bc8 ffffffff81059da1 0000000170df9bb0 Call Trace: [] dump_stack+0x19/0x1b [] __might_sleep+0xf1/0x170 [] rt_spin_lock+0x20/0x50 [] queue_work_on+0x61/0x100 [] drain_all_stock+0xe1/0x1c0 [] mem_cgroup_reclaim+0x90/0xe0 [] __mem_cgroup_try_charge+0x41a/0xc40 [] ? release_pages+0x1b1/0x1f0 [] ? sched_exec+0x40/0xb0 [] mem_cgroup_charge_common+0x37/0x70 [] mem_cgroup_newpage_charge+0x26/0x30 [] handle_pte_fault+0x618/0x840 [] ? unpin_current_cpu+0x16/0x70 [] ? migrate_enable+0xd4/0x200 [] handle_mm_fault+0x145/0x1e0 [] __do_page_fault+0x1a1/0x4c0 [] ? preempt_schedule_irq+0x4b/0x70 [] ? retint_kernel+0x37/0x40 [] do_page_fault+0xe/0x10 [] page_fault+0x22/0x30 So, to prevent schedule_work_on from being called in preempt disabled context, replace the pair of get/put_cpu() to get/put_cpu_light(). Cc: stable-rt@vger.kernel.org Signed-off-by: Yang Shi Signed-off-by: Sebastian Andrzej Siewior diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 15429b9..bc16ebc 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2473,7 +2473,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync) /* Notify other cpus that system-wide "drain" is running */ get_online_cpus(); - curcpu = get_cpu(); + curcpu = get_cpu_light(); for_each_online_cpu(cpu) { struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); struct mem_cgroup *memcg; @@ -2490,7 +2490,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync) schedule_work_on(cpu, &stock->work); } } - put_cpu(); + put_cpu_light(); if (!sync) goto out; -- cgit v0.10.2 From 1badc49efd65489f4811eaf42a1f164665f93343 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 21:33:18 +0200 Subject: radix-tree-rt-aware.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 4039407..5b6d5b2 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -230,8 +230,13 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root, unsigned long index, unsigned long max_scan); unsigned long radix_tree_prev_hole(struct radix_tree_root *root, unsigned long index, unsigned long max_scan); +#ifndef CONFIG_PREEMPT_RT_FULL int radix_tree_preload(gfp_t gfp_mask); int radix_tree_maybe_preload(gfp_t gfp_mask); +#else +static inline int radix_tree_preload(gfp_t gm) { return 0; } +static inline int radix_tree_maybe_preload(gfp_t gfp_mask) { return 0; } +#endif void radix_tree_init(void); void *radix_tree_tag_set(struct radix_tree_root *root, unsigned long index, unsigned int tag); @@ -256,7 +261,7 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item); static inline void radix_tree_preload_end(void) { - preempt_enable(); + preempt_enable_nort(); } /** diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 7811ed3..e7b61e8 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -221,12 +221,13 @@ radix_tree_node_alloc(struct radix_tree_root *root) * succeed in getting a node here (and never reach * kmem_cache_alloc) */ - rtp = &__get_cpu_var(radix_tree_preloads); + rtp = &get_cpu_var(radix_tree_preloads); if (rtp->nr) { ret = rtp->nodes[rtp->nr - 1]; rtp->nodes[rtp->nr - 1] = NULL; rtp->nr--; } + put_cpu_var(radix_tree_preloads); } if (ret == NULL) ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); @@ -261,6 +262,7 @@ radix_tree_node_free(struct radix_tree_node *node) call_rcu(&node->rcu_head, radix_tree_node_rcu_free); } +#ifndef CONFIG_PREEMPT_RT_FULL /* * Load up this CPU's radix_tree_node buffer with sufficient objects to * ensure that the addition of a single element in the tree cannot fail. On @@ -326,6 +328,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask) return 0; } EXPORT_SYMBOL(radix_tree_maybe_preload); +#endif /* * Return the maximum key which can be store into a -- cgit v0.10.2 From 1ba2a0b1f0b6ec80afc57debe8c46c42df9c78e2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 9 Apr 2014 19:19:04 -0500 Subject: panic: skip get_random_bytes for RT_FULL in init_oops_id diff --git a/kernel/panic.c b/kernel/panic.c index b6c482c..936d00f 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -368,9 +368,11 @@ static u64 oops_id; static int init_oops_id(void) { +#ifndef CONFIG_PREEMPT_RT_FULL if (!oops_id) get_random_bytes(&oops_id, sizeof(oops_id)); else +#endif oops_id++; return 0; -- cgit v0.10.2 From 2451925754ad11895bea6ad37dc13942b04641a8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:30:12 -0500 Subject: ipc: Make the ipc code -rt aware RT serializes the code with the (rt)spinlock but keeps preemption enabled. Some parts of the code need to be atomic nevertheless. Protect it with preempt_disable/enable_rt pairts. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/ipc/mqueue.c b/ipc/mqueue.c index bb0248f..af0c6bb 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -923,12 +923,17 @@ static inline void pipelined_send(struct mqueue_inode_info *info, struct msg_msg *message, struct ext_wait_queue *receiver) { + /* + * Keep them in one critical section for PREEMPT_RT: + */ + preempt_disable_rt(); receiver->msg = message; list_del(&receiver->list); receiver->state = STATE_PENDING; wake_up_process(receiver->task); smp_wmb(); receiver->state = STATE_READY; + preempt_enable_rt(); } /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() diff --git a/ipc/msg.c b/ipc/msg.c index 52770bf..1cf8b2c 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -253,10 +253,18 @@ static void expunge_all(struct msg_queue *msq, int res) struct msg_receiver *msr, *t; list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { + /* + * Make sure that the wakeup doesnt preempt + * this CPU prematurely. (on PREEMPT_RT) + */ + preempt_disable_rt(); + msr->r_msg = NULL; wake_up_process(msr->r_tsk); smp_mb(); msr->r_msg = ERR_PTR(res); + + preempt_enable_rt(); } } @@ -636,6 +644,12 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, msr->r_msgtype, msr->r_mode)) { + /* + * Make sure that the wakeup doesnt preempt + * this CPU prematurely. (on PREEMPT_RT) + */ + preempt_disable_rt(); + list_del(&msr->r_list); if (msr->r_maxsize < msg->m_ts) { msr->r_msg = NULL; @@ -649,9 +663,11 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) wake_up_process(msr->r_tsk); smp_mb(); msr->r_msg = msg; + preempt_enable_rt(); return 1; } + preempt_enable_rt(); } } return 0; -- cgit v0.10.2 From f88491346129d5c5fccf5696415ba4eb4c47c04c Mon Sep 17 00:00:00 2001 From: KOBAYASHI Yoshitake Date: Sat, 23 Jul 2011 11:57:36 +0900 Subject: ipc/mqueue: Add a critical section to avoid a deadlock (Repost for v3.0-rt1 and changed the distination addreses) I have tested the following patch on v3.0-rt1 with PREEMPT_RT_FULL. In POSIX message queue, if a sender process uses SCHED_FIFO and has a higher priority than a receiver process, the sender will be stuck at ipc/mqueue.c:452 452 while (ewp->state == STATE_PENDING) 453 cpu_relax(); Description of the problem (receiver process) 1. receiver changes sender's state to STATE_PENDING (mqueue.c:846) 2. wake up sender process and "switch to sender" (mqueue.c:847) Note: This context switch only happens in PREEMPT_RT_FULL kernel. (sender process) 3. sender check the own state in above loop (mqueue.c:452-453) *. receiver will never wake up and cannot change sender's state to STATE_READY because sender has higher priority Signed-off-by: Yoshitake Kobayashi Cc: viro@zeniv.linux.org.uk Cc: dchinner@redhat.com Cc: npiggin@kernel.dk Cc: hch@lst.de Cc: arnd@arndb.de Link: http://lkml.kernel.org/r/4E2A38A0.1090601@toshiba.co.jp Signed-off-by: Thomas Gleixner diff --git a/ipc/mqueue.c b/ipc/mqueue.c index af0c6bb..2276120 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -947,13 +947,18 @@ static inline void pipelined_receive(struct mqueue_inode_info *info) wake_up_interruptible(&info->wait_q); return; } - if (msg_insert(sender->msg, info)) - return; - list_del(&sender->list); - sender->state = STATE_PENDING; - wake_up_process(sender->task); - smp_wmb(); - sender->state = STATE_READY; + /* + * Keep them in one critical section for PREEMPT_RT: + */ + preempt_disable_rt(); + if (!msg_insert(sender->msg, info)) { + list_del(&sender->list); + sender->state = STATE_PENDING; + wake_up_process(sender->task); + smp_wmb(); + sender->state = STATE_READY; + } + preempt_enable_rt(); } SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, -- cgit v0.10.2 From 6ab20341d88df6610a0d2cb02cba3c8a7746d9b8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:44:07 -0500 Subject: relay: fix timer madness remove timer calls (!!!) from deep within the tracing infrastructure. This was totally bogus code that can cause lockups and worse. Poll the buffer every 2 jiffies for now. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/kernel/relay.c b/kernel/relay.c index 5001c98..b915513 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -339,6 +339,10 @@ static void wakeup_readers(unsigned long data) { struct rchan_buf *buf = (struct rchan_buf *)data; wake_up_interruptible(&buf->read_wait); + /* + * Stupid polling for now: + */ + mod_timer(&buf->timer, jiffies + 1); } /** @@ -356,6 +360,7 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init) init_waitqueue_head(&buf->read_wait); kref_init(&buf->kref); setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf); + mod_timer(&buf->timer, jiffies + 1); } else del_timer_sync(&buf->timer); @@ -739,15 +744,6 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length) else buf->early_bytes += buf->chan->subbuf_size - buf->padding[old_subbuf]; - smp_mb(); - if (waitqueue_active(&buf->read_wait)) - /* - * Calling wake_up_interruptible() from here - * will deadlock if we happen to be logging - * from the scheduler (trying to re-grab - * rq->lock), so defer it. - */ - mod_timer(&buf->timer, jiffies + 1); } old = buf->data; -- cgit v0.10.2 From 5062db1dd6a5720edb3b310f6c89474b96853c6c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:34 -0500 Subject: timers: prepare for full preemption When softirqs can be preempted we need to make sure that cancelling the timer from the active thread can not deadlock vs. a running timer callback. Add a waitqueue to resolve that. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/include/linux/timer.h b/include/linux/timer.h index 8c5a197..5fcd72c 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -241,7 +241,7 @@ extern void add_timer(struct timer_list *timer); extern int try_to_del_timer_sync(struct timer_list *timer); -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) extern int del_timer_sync(struct timer_list *timer); #else # define del_timer_sync(t) del_timer(t) diff --git a/kernel/timer.c b/kernel/timer.c index 4296d13..cd92037 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -78,6 +78,7 @@ struct tvec_root { struct tvec_base { spinlock_t lock; struct timer_list *running_timer; + wait_queue_head_t wait_for_running_timer; unsigned long timer_jiffies; unsigned long next_timer; unsigned long active_timers; @@ -739,12 +740,15 @@ __mod_timer(struct timer_list *timer, unsigned long expires, debug_activate(timer, expires); + preempt_disable_rt(); cpu = smp_processor_id(); #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) cpu = get_nohz_timer_target(); #endif + preempt_enable_rt(); + new_base = per_cpu(tvec_bases, cpu); if (base != new_base) { @@ -945,6 +949,29 @@ void add_timer_on(struct timer_list *timer, int cpu) } EXPORT_SYMBOL_GPL(add_timer_on); +#ifdef CONFIG_PREEMPT_RT_FULL +/* + * Wait for a running timer + */ +static void wait_for_running_timer(struct timer_list *timer) +{ + struct tvec_base *base = timer->base; + + if (base->running_timer == timer) + wait_event(base->wait_for_running_timer, + base->running_timer != timer); +} + +# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_tunning_timer) +#else +static inline void wait_for_running_timer(struct timer_list *timer) +{ + cpu_relax(); +} + +# define wakeup_timer_waiters(b) do { } while (0) +#endif + /** * del_timer - deactive a timer. * @timer: the timer to be deactivated @@ -1002,7 +1029,7 @@ int try_to_del_timer_sync(struct timer_list *timer) } EXPORT_SYMBOL(try_to_del_timer_sync); -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) /** * del_timer_sync - deactivate a timer and wait for the handler to finish. * @timer: the timer to be deactivated @@ -1062,7 +1089,7 @@ int del_timer_sync(struct timer_list *timer) int ret = try_to_del_timer_sync(timer); if (ret >= 0) return ret; - cpu_relax(); + wait_for_running_timer(timer); } } EXPORT_SYMBOL(del_timer_sync); @@ -1179,15 +1206,17 @@ static inline void __run_timers(struct tvec_base *base) if (irqsafe) { spin_unlock(&base->lock); call_timer_fn(timer, fn, data); + base->running_timer = NULL; spin_lock(&base->lock); } else { spin_unlock_irq(&base->lock); call_timer_fn(timer, fn, data); + base->running_timer = NULL; spin_lock_irq(&base->lock); } } } - base->running_timer = NULL; + wake_up(&base->wait_for_running_timer); spin_unlock_irq(&base->lock); } @@ -1547,6 +1576,7 @@ static int init_timers_cpu(int cpu) base = per_cpu(tvec_bases, cpu); } + init_waitqueue_head(&base->wait_for_running_timer); for (j = 0; j < TVN_SIZE; j++) { INIT_LIST_HEAD(base->tv5.vec + j); -- cgit v0.10.2 From d11e34b2f77cf1a97676bf2a798341e45ed7e6b3 Mon Sep 17 00:00:00 2001 From: Zhao Hongjiang Date: Wed, 17 Apr 2013 17:44:16 +0800 Subject: timers: prepare for full preemption improve wake_up should do nothing on the nort, so we should use wakeup_timer_waiters, also fix a spell mistake. Cc: stable-rt@vger.kernel.org Signed-off-by: Zhao Hongjiang [bigeasy: s/CONFIG_PREEMPT_RT_BASE/CONFIG_PREEMPT_RT_FULL/] Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/timer.c b/kernel/timer.c index cd92037..5f0a85b 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -78,7 +78,9 @@ struct tvec_root { struct tvec_base { spinlock_t lock; struct timer_list *running_timer; +#ifdef CONFIG_PREEMPT_RT_FULL wait_queue_head_t wait_for_running_timer; +#endif unsigned long timer_jiffies; unsigned long next_timer; unsigned long active_timers; @@ -962,7 +964,7 @@ static void wait_for_running_timer(struct timer_list *timer) base->running_timer != timer); } -# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_tunning_timer) +# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_running_timer) #else static inline void wait_for_running_timer(struct timer_list *timer) { @@ -1216,7 +1218,7 @@ static inline void __run_timers(struct tvec_base *base) } } } - wake_up(&base->wait_for_running_timer); + wakeup_timer_waiters(base); spin_unlock_irq(&base->lock); } @@ -1576,7 +1578,9 @@ static int init_timers_cpu(int cpu) base = per_cpu(tvec_bases, cpu); } +#ifdef CONFIG_PREEMPT_RT_FULL init_waitqueue_head(&base->wait_for_running_timer); +#endif for (j = 0; j < TVN_SIZE; j++) { INIT_LIST_HEAD(base->tv5.vec + j); -- cgit v0.10.2 From a727c0bcff7677e5b33fc61a1144f82391f56102 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:30:20 -0500 Subject: timers: preempt-rt support Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/kernel/timer.c b/kernel/timer.c index 5f0a85b..d01c1c9 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1358,7 +1358,17 @@ unsigned long get_next_timer_interrupt(unsigned long now) if (cpu_is_offline(smp_processor_id())) return expires; +#ifdef CONFIG_PREEMPT_RT_FULL + /* + * On PREEMPT_RT we cannot sleep here. If the trylock does not + * succeed then we return the worst-case 'expires in 1 tick' + * value: + */ + if (!spin_trylock(&base->lock)) + return now + 1; +#else spin_lock(&base->lock); +#endif if (base->active_timers) { if (time_before_eq(base->next_timer, base->timer_jiffies)) base->next_timer = __next_timer_interrupt(base); @@ -1368,7 +1378,6 @@ unsigned long get_next_timer_interrupt(unsigned long now) if (time_before_eq(expires, now)) return now; - return cmp_next_hrtimer_event(now, expires); } #endif @@ -1619,7 +1628,7 @@ static void migrate_timers(int cpu) BUG_ON(cpu_online(cpu)); old_base = per_cpu(tvec_bases, cpu); - new_base = get_cpu_var(tvec_bases); + new_base = get_local_var(tvec_bases); /* * The caller is globally serialized and nobody else * takes two locks at once, deadlock is not possible. @@ -1640,7 +1649,7 @@ static void migrate_timers(int cpu) spin_unlock(&old_base->lock); spin_unlock_irq(&new_base->lock); - put_cpu_var(tvec_bases); + put_local_var(tvec_bases); } #endif /* CONFIG_HOTPLUG_CPU */ -- cgit v0.10.2 From e8cc66aeb40bde850f6dae015ebac87b34cfb978 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 21 Aug 2009 11:56:45 +0200 Subject: timer: delay waking softirqs from the jiffy tick People were complaining about broken balancing with the recent -rt series. A look at /proc/sched_debug yielded: cpu#0, 2393.874 MHz .nr_running : 0 .load : 0 .cpu_load[0] : 177522 .cpu_load[1] : 177522 .cpu_load[2] : 177522 .cpu_load[3] : 177522 .cpu_load[4] : 177522 cpu#1, 2393.874 MHz .nr_running : 4 .load : 4096 .cpu_load[0] : 181618 .cpu_load[1] : 180850 .cpu_load[2] : 180274 .cpu_load[3] : 179938 .cpu_load[4] : 179758 Which indicated the cpu_load computation was hosed, the 177522 value indicates that there is one RT task runnable. Initially I thought the old problem of calculating the cpu_load from a softirq had re-surfaced, however looking at the code shows its being done from scheduler_tick(). [ we really should fix this RT/cfs interaction some day... ] A few trace_printk()s later: sirq-timer/1-19 [001] 174.289744: 19: 50:S ==> [001] 0:140:R -0 [001] 174.290724: enqueue_task_rt: adding task: 19/sirq-timer/1 with load: 177522 -0 [001] 174.290725: 0:140:R + [001] 19: 50:S sirq-timer/1 -0 [001] 174.290730: scheduler_tick: current load: 177522 -0 [001] 174.290732: scheduler_tick: current: 0/swapper -0 [001] 174.290736: 0:140:R ==> [001] 19: 50:R sirq-timer/1 sirq-timer/1-19 [001] 174.290741: dequeue_task_rt: removing task: 19/sirq-timer/1 with load: 177522 sirq-timer/1-19 [001] 174.290743: 19: 50:S ==> [001] 0:140:R We see that we always raise the timer softirq before doing the load calculation. Avoid this by re-ordering the scheduler_tick() call in update_process_times() to occur before we deal with timers. This lowers the load back to sanity and restores regular load-balancing behaviour. Signed-off-by: Peter Zijlstra Signed-off-by: Thomas Gleixner diff --git a/kernel/timer.c b/kernel/timer.c index d01c1c9..adde8bb 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1393,13 +1393,13 @@ void update_process_times(int user_tick) /* Note: this timer irq context must be accounted for as well. */ account_process_tick(p, user_tick); + scheduler_tick(); run_local_timers(); rcu_check_callbacks(cpu, user_tick); #ifdef CONFIG_IRQ_WORK if (in_irq()) irq_work_run(); #endif - scheduler_tick(); run_posix_cpu_timers(p); } -- cgit v0.10.2 From 3a522ffe61ee80adcc78f2fd25bae737a04b9b4c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 21 Jul 2011 15:23:39 +0200 Subject: timers: Avoid the switch timers base set to NULL trick on RT On RT that code is preemptible, so we cannot assign NULL to timers base as a preempter would spin forever in lock_timer_base(). Signed-off-by: Thomas Gleixner diff --git a/kernel/timer.c b/kernel/timer.c index adde8bb..4cb850d 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -723,6 +723,36 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer, } } +#ifndef CONFIG_PREEMPT_RT_FULL +static inline struct tvec_base *switch_timer_base(struct timer_list *timer, + struct tvec_base *old, + struct tvec_base *new) +{ + /* See the comment in lock_timer_base() */ + timer_set_base(timer, NULL); + spin_unlock(&old->lock); + spin_lock(&new->lock); + timer_set_base(timer, new); + return new; +} +#else +static inline struct tvec_base *switch_timer_base(struct timer_list *timer, + struct tvec_base *old, + struct tvec_base *new) +{ + /* + * We cannot do the above because we might be preempted and + * then the preempter would see NULL and loop forever. + */ + if (spin_trylock(&new->lock)) { + timer_set_base(timer, new); + spin_unlock(&old->lock); + return new; + } + return old; +} +#endif + static inline int __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only, int pinned) @@ -761,14 +791,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, * handler yet has not finished. This also guarantees that * the timer is serialized wrt itself. */ - if (likely(base->running_timer != timer)) { - /* See the comment in lock_timer_base() */ - timer_set_base(timer, NULL); - spin_unlock(&base->lock); - base = new_base; - spin_lock(&base->lock); - timer_set_base(timer, base); - } + if (likely(base->running_timer != timer)) + base = switch_timer_base(timer, base, new_base); } timer->expires = expires; -- cgit v0.10.2 From 00417c7d809cc169dabef50a51ea4912ac74c9d1 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:29:34 -0500 Subject: hrtimers: prepare full preemption Make cancellation of a running callback in softirq context safe against preemption. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index f00fcfc..7bd7d73 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -193,6 +193,9 @@ struct hrtimer_cpu_base { unsigned long nr_hangs; ktime_t max_hang_time; #endif +#ifdef CONFIG_PREEMPT_RT_BASE + wait_queue_head_t wait; +#endif struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; }; @@ -388,6 +391,13 @@ static inline int hrtimer_restart(struct hrtimer *timer) return hrtimer_start_expires(timer, HRTIMER_MODE_ABS); } +/* Softirq preemption could deadlock timer removal */ +#ifdef CONFIG_PREEMPT_RT_BASE + extern void hrtimer_wait_for_timer(const struct hrtimer *timer); +#else +# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0) +#endif + /* Query timers: */ extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp); diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 7c4ee68..86adf2b 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -871,6 +871,32 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) } EXPORT_SYMBOL_GPL(hrtimer_forward); +#ifdef CONFIG_PREEMPT_RT_BASE +# define wake_up_timer_waiters(b) wake_up(&(b)->wait) + +/** + * hrtimer_wait_for_timer - Wait for a running timer + * + * @timer: timer to wait for + * + * The function waits in case the timers callback function is + * currently executed on the waitqueue of the timer base. The + * waitqueue is woken up after the timer callback function has + * finished execution. + */ +void hrtimer_wait_for_timer(const struct hrtimer *timer) +{ + struct hrtimer_clock_base *base = timer->base; + + if (base && base->cpu_base && !hrtimer_hres_active(base->cpu_base)) + wait_event(base->cpu_base->wait, + !(timer->state & HRTIMER_STATE_CALLBACK)); +} + +#else +# define wake_up_timer_waiters(b) do { } while (0) +#endif + /* * enqueue_hrtimer - internal function to (re)start a timer * @@ -1123,7 +1149,7 @@ int hrtimer_cancel(struct hrtimer *timer) if (ret >= 0) return ret; - cpu_relax(); + hrtimer_wait_for_timer(timer); } } EXPORT_SYMBOL_GPL(hrtimer_cancel); @@ -1533,6 +1559,8 @@ void hrtimer_run_queues(void) } raw_spin_unlock(&cpu_base->lock); } + + wake_up_timer_waiters(cpu_base); } /* @@ -1693,6 +1721,9 @@ static void init_hrtimers_cpu(int cpu) } hrtimer_init_hres(cpu_base); +#ifdef CONFIG_PREEMPT_RT_BASE + init_waitqueue_head(&cpu_base->wait); +#endif } #ifdef CONFIG_HOTPLUG_CPU diff --git a/kernel/itimer.c b/kernel/itimer.c index 8d262b4..d051390 100644 --- a/kernel/itimer.c +++ b/kernel/itimer.c @@ -213,6 +213,7 @@ again: /* We are sharing ->siglock with it_real_fn() */ if (hrtimer_try_to_cancel(timer) < 0) { spin_unlock_irq(&tsk->sighand->siglock); + hrtimer_wait_for_timer(&tsk->signal->real_timer); goto again; } expires = timeval_to_ktime(value->it_value); diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 68af2dc..a22b931 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -818,6 +818,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id) return overrun; } +/* + * Protected by RCU! + */ +static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr) +{ +#ifdef CONFIG_PREEMPT_RT_FULL + if (kc->timer_set == common_timer_set) + hrtimer_wait_for_timer(&timr->it.real.timer); + else + /* FIXME: Whacky hack for posix-cpu-timers */ + schedule_timeout(1); +#endif +} + /* Set a POSIX.1b interval timer. */ /* timr->it_lock is taken. */ static int @@ -895,6 +909,7 @@ retry: if (!timr) return -EINVAL; + rcu_read_lock(); kc = clockid_to_kclock(timr->it_clock); if (WARN_ON_ONCE(!kc || !kc->timer_set)) error = -EINVAL; @@ -903,9 +918,12 @@ retry: unlock_timer(timr, flag); if (error == TIMER_RETRY) { + timer_wait_for_callback(kc, timr); rtn = NULL; // We already got the old time... + rcu_read_unlock(); goto retry; } + rcu_read_unlock(); if (old_setting && !error && copy_to_user(old_setting, &old_spec, sizeof (old_spec))) @@ -943,10 +961,15 @@ retry_delete: if (!timer) return -EINVAL; + rcu_read_lock(); if (timer_delete_hook(timer) == TIMER_RETRY) { unlock_timer(timer, flags); + timer_wait_for_callback(clockid_to_kclock(timer->it_clock), + timer); + rcu_read_unlock(); goto retry_delete; } + rcu_read_unlock(); spin_lock(¤t->sighand->siglock); list_del(&timer->list); @@ -972,8 +995,18 @@ static void itimer_delete(struct k_itimer *timer) retry_delete: spin_lock_irqsave(&timer->it_lock, flags); + /* On RT we can race with a deletion */ + if (!timer->it_signal) { + unlock_timer(timer, flags); + return; + } + if (timer_delete_hook(timer) == TIMER_RETRY) { + rcu_read_lock(); unlock_timer(timer, flags); + timer_wait_for_callback(clockid_to_kclock(timer->it_clock), + timer); + rcu_read_unlock(); goto retry_delete; } list_del(&timer->list); -- cgit v0.10.2 From 026ba355cd0372630c9266141a7ab45af1075020 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 3 Jul 2009 08:44:31 -0500 Subject: hrtimer: fixup hrtimer callback changes for preempt-rt In preempt-rt we can not call the callbacks which take sleeping locks from the timer interrupt context. Bring back the softirq split for now, until we fixed the signal delivery problem for real. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 7bd7d73..79a7a35 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -111,6 +111,8 @@ struct hrtimer { enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; unsigned long state; + struct list_head cb_entry; + int irqsafe; #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ktime_t praecox; #endif @@ -150,6 +152,7 @@ struct hrtimer_clock_base { int index; clockid_t clockid; struct timerqueue_head active; + struct list_head expired; ktime_t resolution; ktime_t (*get_time)(void); ktime_t softirq_time; diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 86adf2b..0d7fac6 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -608,8 +608,7 @@ static int hrtimer_reprogram(struct hrtimer *timer, * When the callback is running, we do not reprogram the clock event * device. The timer callback is either running on a different CPU or * the callback is executed in the hrtimer_interrupt context. The - * reprogramming is handled either by the softirq, which called the - * callback or at the end of the hrtimer_interrupt. + * reprogramming is handled at the end of the hrtimer_interrupt. */ if (hrtimer_callback_running(timer)) return 0; @@ -644,6 +643,9 @@ static int hrtimer_reprogram(struct hrtimer *timer, return res; } +static void __run_hrtimer(struct hrtimer *timer, ktime_t *now); +static int hrtimer_rt_defer(struct hrtimer *timer); + /* * Initialize the high resolution related parts of cpu_base */ @@ -660,9 +662,18 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) * and expiry check is done in the hrtimer_interrupt or in the softirq. */ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, - struct hrtimer_clock_base *base) + struct hrtimer_clock_base *base, + int wakeup) { - return base->cpu_base->hres_active && hrtimer_reprogram(timer, base); + if (!(base->cpu_base->hres_active && hrtimer_reprogram(timer, base))) + return 0; + if (!wakeup) + return -ETIME; +#ifdef CONFIG_PREEMPT_RT_BASE + if (!hrtimer_rt_defer(timer)) + return -ETIME; +#endif + return 1; } static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) @@ -747,12 +758,18 @@ static inline int hrtimer_switch_to_hres(void) { return 0; } static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, - struct hrtimer_clock_base *base) + struct hrtimer_clock_base *base, + int wakeup) { return 0; } static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } static inline void retrigger_next_event(void *arg) { } +static inline int hrtimer_reprogram(struct hrtimer *timer, + struct hrtimer_clock_base *base) +{ + return 0; +} #endif /* CONFIG_HIGH_RES_TIMERS */ @@ -888,9 +905,9 @@ void hrtimer_wait_for_timer(const struct hrtimer *timer) { struct hrtimer_clock_base *base = timer->base; - if (base && base->cpu_base && !hrtimer_hres_active(base->cpu_base)) + if (base && base->cpu_base && !timer->irqsafe) wait_event(base->cpu_base->wait, - !(timer->state & HRTIMER_STATE_CALLBACK)); + !(timer->state & HRTIMER_STATE_CALLBACK)); } #else @@ -940,6 +957,11 @@ static void __remove_hrtimer(struct hrtimer *timer, if (!(timer->state & HRTIMER_STATE_ENQUEUED)) goto out; + if (unlikely(!list_empty(&timer->cb_entry))) { + list_del_init(&timer->cb_entry); + goto out; + } + next_timer = timerqueue_getnext(&base->active); timerqueue_del(&base->active, &timer->node); if (&timer->node == next_timer) { @@ -1047,9 +1069,19 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, * * XXX send_remote_softirq() ? */ - if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases) - && hrtimer_enqueue_reprogram(timer, new_base)) { - if (wakeup) { + if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) { + ret = hrtimer_enqueue_reprogram(timer, new_base, wakeup); + if (ret < 0) { + /* + * In case we failed to reprogram the timer (mostly + * because out current timer is already elapsed), + * remove it again and report a failure. This avoids + * stale base->first entries. + */ + debug_deactivate(timer); + __remove_hrtimer(timer, new_base, + timer->state & HRTIMER_STATE_CALLBACK, 0); + } else if (ret > 0) { /* * We need to drop cpu_base->lock to avoid a * lock ordering issue vs. rq->lock. @@ -1057,9 +1089,7 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, raw_spin_unlock(&new_base->cpu_base->lock); raise_softirq_irqoff(HRTIMER_SOFTIRQ); local_irq_restore(flags); - return ret; - } else { - __raise_softirq_irqoff(HRTIMER_SOFTIRQ); + return 0; } } @@ -1228,6 +1258,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, base = hrtimer_clockid_to_base(clock_id); timer->base = &cpu_base->clock_base[base]; + INIT_LIST_HEAD(&timer->cb_entry); timerqueue_init(&timer->node); #ifdef CONFIG_TIMER_STATS @@ -1311,10 +1342,128 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) timer->state &= ~HRTIMER_STATE_CALLBACK; } -#ifdef CONFIG_HIGH_RES_TIMERS - static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer); +#ifdef CONFIG_PREEMPT_RT_BASE +static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer, + struct hrtimer_clock_base *base) +{ + /* + * Note, we clear the callback flag before we requeue the + * timer otherwise we trigger the callback_running() check + * in hrtimer_reprogram(). + */ + timer->state &= ~HRTIMER_STATE_CALLBACK; + + if (restart != HRTIMER_NORESTART) { + BUG_ON(hrtimer_active(timer)); + /* + * Enqueue the timer, if it's the leftmost timer then + * we need to reprogram it. + */ + if (!enqueue_hrtimer(timer, base)) + return; + +#ifndef CONFIG_HIGH_RES_TIMERS + } +#else + if (base->cpu_base->hres_active && + hrtimer_reprogram(timer, base)) + goto requeue; + + } else if (hrtimer_active(timer)) { + /* + * If the timer was rearmed on another CPU, reprogram + * the event device. + */ + if (&timer->node == base->active.next && + base->cpu_base->hres_active && + hrtimer_reprogram(timer, base)) + goto requeue; + } + return; + +requeue: + /* + * Timer is expired. Thus move it from tree to pending list + * again. + */ + __remove_hrtimer(timer, base, timer->state, 0); + list_add_tail(&timer->cb_entry, &base->expired); +#endif +} + +/* + * The changes in mainline which removed the callback modes from + * hrtimer are not yet working with -rt. The non wakeup_process() + * based callbacks which involve sleeping locks need to be treated + * seperately. + */ +static void hrtimer_rt_run_pending(void) +{ + enum hrtimer_restart (*fn)(struct hrtimer *); + struct hrtimer_cpu_base *cpu_base; + struct hrtimer_clock_base *base; + struct hrtimer *timer; + int index, restart; + + local_irq_disable(); + cpu_base = &per_cpu(hrtimer_bases, smp_processor_id()); + + raw_spin_lock(&cpu_base->lock); + + for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { + base = &cpu_base->clock_base[index]; + + while (!list_empty(&base->expired)) { + timer = list_first_entry(&base->expired, + struct hrtimer, cb_entry); + + /* + * Same as the above __run_hrtimer function + * just we run with interrupts enabled. + */ + debug_hrtimer_deactivate(timer); + __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); + timer_stats_account_hrtimer(timer); + fn = timer->function; + + raw_spin_unlock_irq(&cpu_base->lock); + restart = fn(timer); + raw_spin_lock_irq(&cpu_base->lock); + + hrtimer_rt_reprogram(restart, timer, base); + } + } + + raw_spin_unlock_irq(&cpu_base->lock); + + wake_up_timer_waiters(cpu_base); +} + +static int hrtimer_rt_defer(struct hrtimer *timer) +{ + if (timer->irqsafe) + return 0; + + __remove_hrtimer(timer, timer->base, timer->state, 0); + list_add_tail(&timer->cb_entry, &timer->base->expired); + return 1; +} + +#else + +static inline void hrtimer_rt_run_pending(void) +{ + hrtimer_peek_ahead_timers(); +} + +static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; } + +#endif + +#ifdef CONFIG_HIGH_RES_TIMERS + /* * High resolution timer interrupt * Called with interrupts disabled @@ -1323,7 +1472,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) { struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); ktime_t expires_next, now, entry_time, delta; - int i, retries = 0; + int i, retries = 0, raise = 0; BUG_ON(!cpu_base->hres_active); cpu_base->nr_events++; @@ -1392,7 +1541,10 @@ retry: break; } - __run_hrtimer(timer, &basenow); + if (!hrtimer_rt_defer(timer)) + __run_hrtimer(timer, &basenow); + else + raise = 1; } } @@ -1407,6 +1559,10 @@ retry: if (expires_next.tv64 == KTIME_MAX || !tick_program_event(expires_next, 0)) { cpu_base->hang_detected = 0; + + if (raise) + raise_softirq_irqoff(HRTIMER_SOFTIRQ); + return; } @@ -1486,18 +1642,18 @@ void hrtimer_peek_ahead_timers(void) __hrtimer_peek_ahead_timers(); local_irq_restore(flags); } - -static void run_hrtimer_softirq(struct softirq_action *h) -{ - hrtimer_peek_ahead_timers(); -} - #else /* CONFIG_HIGH_RES_TIMERS */ static inline void __hrtimer_peek_ahead_timers(void) { } #endif /* !CONFIG_HIGH_RES_TIMERS */ + +static void run_hrtimer_softirq(struct softirq_action *h) +{ + hrtimer_rt_run_pending(); +} + /* * Called from timer softirq every jiffy, expire hrtimers: * @@ -1530,7 +1686,7 @@ void hrtimer_run_queues(void) struct timerqueue_node *node; struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); struct hrtimer_clock_base *base; - int index, gettime = 1; + int index, gettime = 1, raise = 0; if (hrtimer_hres_active()) return; @@ -1555,12 +1711,16 @@ void hrtimer_run_queues(void) hrtimer_get_expires_tv64(timer)) break; - __run_hrtimer(timer, &base->softirq_time); + if (!hrtimer_rt_defer(timer)) + __run_hrtimer(timer, &base->softirq_time); + else + raise = 1; } raw_spin_unlock(&cpu_base->lock); } - wake_up_timer_waiters(cpu_base); + if (raise) + raise_softirq_irqoff(HRTIMER_SOFTIRQ); } /* @@ -1582,6 +1742,7 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) { sl->timer.function = hrtimer_wakeup; + sl->timer.irqsafe = 1; sl->task = task; } EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); @@ -1718,6 +1879,7 @@ static void init_hrtimers_cpu(int cpu) for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { cpu_base->clock_base[i].cpu_base = cpu_base; timerqueue_init_head(&cpu_base->clock_base[i].active); + INIT_LIST_HEAD(&cpu_base->clock_base[i].expired); } hrtimer_init_hres(cpu_base); @@ -1836,9 +1998,7 @@ void __init hrtimers_init(void) hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, (void *)(long)smp_processor_id()); register_cpu_notifier(&hrtimers_nb); -#ifdef CONFIG_HIGH_RES_TIMERS open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq); -#endif } /** diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d995623..1f35ad6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -491,6 +491,7 @@ static void init_rq_hrtick(struct rq *rq) hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); rq->hrtick_timer.function = hrtick; + rq->hrtick_timer.irqsafe = 1; } #else /* CONFIG_SCHED_HRTICK */ static inline void hrtick_clear(struct rq *rq) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index ff04e1a..240fc60 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -43,6 +43,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + rt_b->rt_period_timer.irqsafe = 1; rt_b->rt_period_timer.function = sched_rt_period_timer; } diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index f12acf8..6c77b2d 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -1116,6 +1116,7 @@ void tick_setup_sched_timer(void) * Emulate tick processing via per-CPU hrtimers: */ hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + ts->sched_timer.irqsafe = 1; ts->sched_timer.function = tick_sched_timer; /* Get the next period (per cpu) */ diff --git a/kernel/watchdog.c b/kernel/watchdog.c index cbad091..870b748 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -357,6 +357,7 @@ static void watchdog_enable(unsigned int cpu) /* kick off the timer for the hardlockup detector */ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer->function = watchdog_timer_fn; + hrtimer->irqsafe = 1; /* Enable the perf event */ watchdog_nmi_enable(cpu); -- cgit v0.10.2 From ea408e3da40df36ea828ac753c3d2978722d8dfa Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 25 Jan 2012 11:08:40 +0100 Subject: timer-fd: Prevent live lock If hrtimer_try_to_cancel() requires a retry, then depending on the priority setting te retry loop might prevent timer callback completion on RT. Prevent that by waiting for completion on RT, no change for a non RT kernel. Reported-by: Sankara Muthukrishnan Signed-off-by: Thomas Gleixner Cc: stable-rt@vger.kernel.org diff --git a/fs/timerfd.c b/fs/timerfd.c index 9293121..d20e45a 100644 --- a/fs/timerfd.c +++ b/fs/timerfd.c @@ -380,7 +380,10 @@ static int do_timerfd_settime(int ufd, int flags, break; } spin_unlock_irq(&ctx->wqh.lock); - cpu_relax(); + if (isalarm(ctx)) + hrtimer_wait_for_timer(&ctx->t.alarm.timer); + else + hrtimer_wait_for_timer(&ctx->t.tmr); } /* -- cgit v0.10.2 From 460934140b740df3fb6a9e1f0f872e6d13f844f0 Mon Sep 17 00:00:00 2001 From: Watanabe Date: Sun, 28 Oct 2012 11:13:44 +0100 Subject: hrtimer: Raise softirq if hrtimer irq stalled When the hrtimer stall detection hits the softirq is not raised. Signed-off-by: Thomas Gleixner Cc: stable-rt@vger.kernel.org diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 0d7fac6..2ddbd6f 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -1559,11 +1559,7 @@ retry: if (expires_next.tv64 == KTIME_MAX || !tick_program_event(expires_next, 0)) { cpu_base->hang_detected = 0; - - if (raise) - raise_softirq_irqoff(HRTIMER_SOFTIRQ); - - return; + goto out; } /* @@ -1607,6 +1603,9 @@ retry: tick_program_event(expires_next, 1); printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta)); +out: + if (raise) + raise_softirq_irqoff(HRTIMER_SOFTIRQ); } /* -- cgit v0.10.2 From 73b8091420beccbc51530e3de906f4b26176bc1b Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Mon, 16 Sep 2013 14:09:19 -0700 Subject: hrtimer: Move schedule_work call to helper thread When run ltp leapsec_timer test, the following call trace is caught: BUG: sleeping function called from invalid context at kernel/rtmutex.c:659 in_atomic(): 1, irqs_disabled(): 1, pid: 0, name: swapper/1 Preemption disabled at:[] cpu_startup_entry+0x133/0x310 CPU: 1 PID: 0 Comm: swapper/1 Not tainted 3.10.10-rt3 #2 Hardware name: Intel Corporation Calpella platform/MATXM-CORE-411-B, BIOS 4.6.3 08/18/2010 ffffffff81c2f800 ffff880076843e40 ffffffff8169918d ffff880076843e58 ffffffff8106db31 ffff88007684b4a0 ffff880076843e70 ffffffff8169d9c0 ffff88007684b4a0 ffff880076843eb0 ffffffff81059da1 0000001876851200 Call Trace: [] dump_stack+0x19/0x1b [] __might_sleep+0xf1/0x170 [] rt_spin_lock+0x20/0x50 [] queue_work_on+0x61/0x100 [] clock_was_set_delayed+0x21/0x30 [] do_timer+0x40e/0x660 [] tick_do_update_jiffies64+0xf7/0x140 [] tick_check_idle+0x92/0xc0 [] irq_enter+0x57/0x70 [] smp_apic_timer_interrupt+0x3e/0x9b [] apic_timer_interrupt+0x6a/0x70 [] ? cpuidle_enter_state+0x4c/0xc0 [] cpuidle_idle_call+0xd8/0x2d0 [] arch_cpu_idle+0xe/0x30 [] cpu_startup_entry+0x19e/0x310 [] start_secondary+0x1ad/0x1b0 The clock_was_set_delayed is called in hard IRQ handler (timer interrupt), which calls schedule_work. Under PREEMPT_RT_FULL, schedule_work calls spinlocks which could sleep, so it's not safe to call schedule_work in interrupt context. Reference upstream commit b68d61c705ef02384c0538b8d9374545097899ca (rt,ntp: Move call to schedule_delayed_work() to helper thread) from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git, which makes a similar change. add a helper thread which does the call to schedule_work and wake up that thread instead of calling schedule_work directly. Cc: stable-rt@vger.kernel.org Signed-off-by: Yang Shi Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 2ddbd6f..c383841 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include @@ -741,6 +742,44 @@ static void clock_was_set_work(struct work_struct *work) static DECLARE_WORK(hrtimer_work, clock_was_set_work); +#ifdef CONFIG_PREEMPT_RT_FULL +/* + * RT can not call schedule_work from real interrupt context. + * Need to make a thread to do the real work. + */ +static struct task_struct *clock_set_delay_thread; +static bool do_clock_set_delay; + +static int run_clock_set_delay(void *ignore) +{ + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + if (do_clock_set_delay) { + do_clock_set_delay = false; + schedule_work(&hrtimer_work); + } + schedule(); + } + __set_current_state(TASK_RUNNING); + return 0; +} + +void clock_was_set_delayed(void) +{ + do_clock_set_delay = true; + /* Make visible before waking up process */ + smp_wmb(); + wake_up_process(clock_set_delay_thread); +} + +static __init int create_clock_set_delay_thread(void) +{ + clock_set_delay_thread = kthread_run(run_clock_set_delay, NULL, "kclksetdelayd"); + BUG_ON(!clock_set_delay_thread); + return 0; +} +early_initcall(create_clock_set_delay_thread); +#else /* PREEMPT_RT_FULL */ /* * Called from timekeeping and resume code to reprogramm the hrtimer * interrupt device on all cpus. @@ -749,6 +788,7 @@ void clock_was_set_delayed(void) { schedule_work(&hrtimer_work); } +#endif #else -- cgit v0.10.2 From 9bdfce64a9098cef4aad43746aa4602a7b6f0963 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Fri, 3 Jul 2009 08:29:58 -0500 Subject: posix-timers: thread posix-cpu-timers on -rt posix-cpu-timer code takes non -rt safe locks in hard irq context. Move it to a thread. [ 3.0 fixes from Peter Zijlstra ] Signed-off-by: John Stultz Signed-off-by: Thomas Gleixner diff --git a/include/linux/init_task.h b/include/linux/init_task.h index b59240b..766558a 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -143,6 +143,12 @@ extern struct task_group root_task_group; # define INIT_PERF_EVENTS(tsk) #endif +#ifdef CONFIG_PREEMPT_RT_BASE +# define INIT_TIMER_LIST .posix_timer_list = NULL, +#else +# define INIT_TIMER_LIST +#endif + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN # define INIT_VTIME(tsk) \ .vtime_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.vtime_lock), \ @@ -208,6 +214,7 @@ extern struct task_group root_task_group; .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ .timer_slack_ns = 50000, /* 50 usec default slack */ \ + INIT_TIMER_LIST \ .pids = { \ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 705fbb0..a6dc6b6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1174,6 +1174,9 @@ struct task_struct { struct task_cputime cputime_expires; struct list_head cpu_timers[3]; +#ifdef CONFIG_PREEMPT_RT_BASE + struct task_struct *posix_timer_list; +#endif /* process credentials */ const struct cred __rcu *real_cred; /* objective and real subjective task diff --git a/init/main.c b/init/main.c index 63d3e8f..33e96d2 100644 --- a/init/main.c +++ b/init/main.c @@ -6,7 +6,7 @@ * GK 2/5/95 - Changed to support mounting root fs via NFS * Added initrd & change_root: Werner Almesberger & Hans Lermen, Feb '96 * Moan early if gcc is old, avoiding bogus kernels - Paul Gortmaker, May '96 - * Simplified starting of init: Michael A. Griffith + * Simplified starting of init: Michael A. Griffith */ #define DEBUG /* Enable initcall_debug */ @@ -74,6 +74,7 @@ #include #include #include +#include #include #include #include diff --git a/kernel/fork.c b/kernel/fork.c index afe158e..3ac5af8 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1107,6 +1107,9 @@ void mm_init_owner(struct mm_struct *mm, struct task_struct *p) */ static void posix_cpu_timers_init(struct task_struct *tsk) { +#ifdef CONFIG_PREEMPT_RT_BASE + tsk->posix_timer_list = NULL; +#endif tsk->cputime_expires.prof_exp = 0; tsk->cputime_expires.virt_exp = 0; tsk->cputime_expires.sched_exp = 0; diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index c7f31aa..57ce5af 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -3,6 +3,7 @@ */ #include +#include #include #include #include @@ -663,7 +664,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags, /* * Disarm any old timer after extracting its expiry time. */ - BUG_ON(!irqs_disabled()); + BUG_ON_NONRT(!irqs_disabled()); ret = 0; old_incr = timer->it.cpu.incr; @@ -1110,7 +1111,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) /* * Now re-arm for the new expiry time. */ - BUG_ON(!irqs_disabled()); + BUG_ON_NONRT(!irqs_disabled()); arm_timer(timer); spin_unlock(&p->sighand->siglock); @@ -1177,10 +1178,11 @@ static inline int fastpath_timer_check(struct task_struct *tsk) sig = tsk->signal; if (sig->cputimer.running) { struct task_cputime group_sample; + unsigned long flags; - raw_spin_lock(&sig->cputimer.lock); + raw_spin_lock_irqsave(&sig->cputimer.lock, flags); group_sample = sig->cputimer.cputime; - raw_spin_unlock(&sig->cputimer.lock); + raw_spin_unlock_irqrestore(&sig->cputimer.lock, flags); if (task_cputime_expired(&group_sample, &sig->cputime_expires)) return 1; @@ -1194,13 +1196,13 @@ static inline int fastpath_timer_check(struct task_struct *tsk) * already updated our counts. We need to check if any timers fire now. * Interrupts are disabled. */ -void run_posix_cpu_timers(struct task_struct *tsk) +static void __run_posix_cpu_timers(struct task_struct *tsk) { LIST_HEAD(firing); struct k_itimer *timer, *next; unsigned long flags; - BUG_ON(!irqs_disabled()); + BUG_ON_NONRT(!irqs_disabled()); /* * The fast path checks that there are no expired thread or thread @@ -1265,6 +1267,175 @@ void run_posix_cpu_timers(struct task_struct *tsk) posix_cpu_timer_kick_nohz(); } +#ifdef CONFIG_PREEMPT_RT_BASE +#include +#include +DEFINE_PER_CPU(struct task_struct *, posix_timer_task); +DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist); + +static int posix_cpu_timers_thread(void *data) +{ + int cpu = (long)data; + + BUG_ON(per_cpu(posix_timer_task,cpu) != current); + + while (!kthread_should_stop()) { + struct task_struct *tsk = NULL; + struct task_struct *next = NULL; + + if (cpu_is_offline(cpu)) + goto wait_to_die; + + /* grab task list */ + raw_local_irq_disable(); + tsk = per_cpu(posix_timer_tasklist, cpu); + per_cpu(posix_timer_tasklist, cpu) = NULL; + raw_local_irq_enable(); + + /* its possible the list is empty, just return */ + if (!tsk) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + __set_current_state(TASK_RUNNING); + continue; + } + + /* Process task list */ + while (1) { + /* save next */ + next = tsk->posix_timer_list; + + /* run the task timers, clear its ptr and + * unreference it + */ + __run_posix_cpu_timers(tsk); + tsk->posix_timer_list = NULL; + put_task_struct(tsk); + + /* check if this is the last on the list */ + if (next == tsk) + break; + tsk = next; + } + } + return 0; + +wait_to_die: + /* Wait for kthread_stop */ + set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { + schedule(); + set_current_state(TASK_INTERRUPTIBLE); + } + __set_current_state(TASK_RUNNING); + return 0; +} + +void run_posix_cpu_timers(struct task_struct *tsk) +{ + unsigned long cpu = smp_processor_id(); + struct task_struct *tasklist; + + BUG_ON(!irqs_disabled()); + if(!per_cpu(posix_timer_task, cpu)) + return; + /* get per-cpu references */ + tasklist = per_cpu(posix_timer_tasklist, cpu); + + /* check to see if we're already queued */ + if (!tsk->posix_timer_list) { + get_task_struct(tsk); + if (tasklist) { + tsk->posix_timer_list = tasklist; + } else { + /* + * The list is terminated by a self-pointing + * task_struct + */ + tsk->posix_timer_list = tsk; + } + per_cpu(posix_timer_tasklist, cpu) = tsk; + } + /* XXX signal the thread somehow */ + wake_up_process(per_cpu(posix_timer_task, cpu)); +} + +/* + * posix_cpu_thread_call - callback that gets triggered when a CPU is added. + * Here we can start up the necessary migration thread for the new CPU. + */ +static int posix_cpu_thread_call(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + int cpu = (long)hcpu; + struct task_struct *p; + struct sched_param param; + + switch (action) { + case CPU_UP_PREPARE: + p = kthread_create(posix_cpu_timers_thread, hcpu, + "posix_cpu_timers/%d",cpu); + if (IS_ERR(p)) + return NOTIFY_BAD; + p->flags |= PF_NOFREEZE; + kthread_bind(p, cpu); + /* Must be high prio to avoid getting starved */ + param.sched_priority = MAX_RT_PRIO-1; + sched_setscheduler(p, SCHED_FIFO, ¶m); + per_cpu(posix_timer_task,cpu) = p; + break; + case CPU_ONLINE: + /* Strictly unneccessary, as first user will wake it. */ + wake_up_process(per_cpu(posix_timer_task,cpu)); + break; +#ifdef CONFIG_HOTPLUG_CPU + case CPU_UP_CANCELED: + /* Unbind it from offline cpu so it can run. Fall thru. */ + kthread_bind(per_cpu(posix_timer_task, cpu), + cpumask_any(cpu_online_mask)); + kthread_stop(per_cpu(posix_timer_task,cpu)); + per_cpu(posix_timer_task,cpu) = NULL; + break; + case CPU_DEAD: + kthread_stop(per_cpu(posix_timer_task,cpu)); + per_cpu(posix_timer_task,cpu) = NULL; + break; +#endif + } + return NOTIFY_OK; +} + +/* Register at highest priority so that task migration (migrate_all_tasks) + * happens before everything else. + */ +static struct notifier_block posix_cpu_thread_notifier = { + .notifier_call = posix_cpu_thread_call, + .priority = 10 +}; + +static int __init posix_cpu_thread_init(void) +{ + void *hcpu = (void *)(long)smp_processor_id(); + /* Start one for boot CPU. */ + unsigned long cpu; + + /* init the per-cpu posix_timer_tasklets */ + for_each_possible_cpu(cpu) + per_cpu(posix_timer_tasklist, cpu) = NULL; + + posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu); + posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu); + register_cpu_notifier(&posix_cpu_thread_notifier); + return 0; +} +early_initcall(posix_cpu_thread_init); +#else /* CONFIG_PREEMPT_RT_BASE */ +void run_posix_cpu_timers(struct task_struct *tsk) +{ + __run_posix_cpu_timers(tsk); +} +#endif /* CONFIG_PREEMPT_RT_BASE */ + /* * Set one of the process-wide special case CPU timers or RLIMIT_CPU. * The tsk->sighand->siglock must be held by the caller. -- cgit v0.10.2 From 17483359a394fa36cb0ccdb0ee4d0ac4528d935d Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 3 Jul 2009 08:30:00 -0500 Subject: posix-timers: Shorten posix_cpu_timers/ kernel thread names Shorten the softirq kernel thread names because they always overflow the limited comm length, appearing as "posix_cpu_timer" CPU# times. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 57ce5af..2d1963c 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -1374,7 +1374,7 @@ static int posix_cpu_thread_call(struct notifier_block *nfb, switch (action) { case CPU_UP_PREPARE: p = kthread_create(posix_cpu_timers_thread, hcpu, - "posix_cpu_timers/%d",cpu); + "posixcputmr/%d",cpu); if (IS_ERR(p)) return NOTIFY_BAD; p->flags |= PF_NOFREEZE; -- cgit v0.10.2 From 35f715cc8726108d325aa563587eb5d8a9b32d91 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 3 Jul 2009 08:44:44 -0500 Subject: posix-timers: Avoid wakeups when no timers are active Waking the thread even when no timers are scheduled is useless. Signed-off-by: Thomas Gleixner diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 2d1963c..4bf82f8 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -1331,6 +1331,21 @@ wait_to_die: return 0; } +static inline int __fastpath_timer_check(struct task_struct *tsk) +{ + /* tsk == current, ensure it is safe to use ->signal/sighand */ + if (unlikely(tsk->exit_state)) + return 0; + + if (!task_cputime_zero(&tsk->cputime_expires)) + return 1; + + if (!task_cputime_zero(&tsk->signal->cputime_expires)) + return 1; + + return 0; +} + void run_posix_cpu_timers(struct task_struct *tsk) { unsigned long cpu = smp_processor_id(); @@ -1343,7 +1358,7 @@ void run_posix_cpu_timers(struct task_struct *tsk) tasklist = per_cpu(posix_timer_tasklist, cpu); /* check to see if we're already queued */ - if (!tsk->posix_timer_list) { + if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) { get_task_struct(tsk); if (tasklist) { tsk->posix_timer_list = tasklist; @@ -1355,9 +1370,9 @@ void run_posix_cpu_timers(struct task_struct *tsk) tsk->posix_timer_list = tsk; } per_cpu(posix_timer_tasklist, cpu) = tsk; + + wake_up_process(per_cpu(posix_timer_task, cpu)); } - /* XXX signal the thread somehow */ - wake_up_process(per_cpu(posix_timer_task, cpu)); } /* -- cgit v0.10.2 From 0a1feb80217e70570ad262944432999c915ef89c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 31 May 2011 16:59:16 +0200 Subject: sched-delay-put-task.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/sched.h b/include/linux/sched.h index a6dc6b6..cf866a5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1430,6 +1430,9 @@ struct task_struct { unsigned int sequential_io; unsigned int sequential_io_avg; #endif +#ifdef CONFIG_PREEMPT_RT_BASE + struct rcu_head put_rcu; +#endif }; /* Future-safe accessor for struct task_struct's cpus_allowed. */ @@ -1593,6 +1596,15 @@ extern struct pid *cad_pid; extern void free_task(struct task_struct *tsk); #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) +#ifdef CONFIG_PREEMPT_RT_BASE +extern void __put_task_struct_cb(struct rcu_head *rhp); + +static inline void put_task_struct(struct task_struct *t) +{ + if (atomic_dec_and_test(&t->usage)) + call_rcu(&t->put_rcu, __put_task_struct_cb); +} +#else extern void __put_task_struct(struct task_struct *t); static inline void put_task_struct(struct task_struct *t) @@ -1600,6 +1612,7 @@ static inline void put_task_struct(struct task_struct *t) if (atomic_dec_and_test(&t->usage)) __put_task_struct(t); } +#endif #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN extern void task_cputime(struct task_struct *t, diff --git a/kernel/fork.c b/kernel/fork.c index 3ac5af8..d1e28a3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -230,7 +230,9 @@ static inline void put_signal_struct(struct signal_struct *sig) if (atomic_dec_and_test(&sig->sigcnt)) free_signal_struct(sig); } - +#ifdef CONFIG_PREEMPT_RT_BASE +static +#endif void __put_task_struct(struct task_struct *tsk) { WARN_ON(!tsk->exit_state); @@ -245,7 +247,18 @@ void __put_task_struct(struct task_struct *tsk) if (!profile_handoff_task(tsk)) free_task(tsk); } +#ifndef CONFIG_PREEMPT_RT_BASE EXPORT_SYMBOL_GPL(__put_task_struct); +#else +void __put_task_struct_cb(struct rcu_head *rhp) +{ + struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu); + + __put_task_struct(tsk); + +} +EXPORT_SYMBOL_GPL(__put_task_struct_cb); +#endif void __init __weak arch_task_cache_init(void) { } -- cgit v0.10.2 From 85c522deb39a50e15271194e16eb80f5fe76ac51 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 6 Jun 2011 12:12:51 +0200 Subject: sched-limit-nr-migrate.patch Signed-off-by: Thomas Gleixner diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1f35ad6..1553d27 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -272,7 +272,11 @@ late_initcall(sched_init_debug); * Number of tasks to iterate in a single balance run. * Limited because this is done with IRQs disabled. */ +#ifndef CONFIG_PREEMPT_RT_FULL const_debug unsigned int sysctl_sched_nr_migrate = 32; +#else +const_debug unsigned int sysctl_sched_nr_migrate = 8; +#endif /* * period over which we average the RT time consumption, measured -- cgit v0.10.2 From 967ee2862959218c3a4e92fcd90f6c92d4d7c79f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 6 Jun 2011 12:20:33 +0200 Subject: sched-mmdrop-delayed.patch Needs thread context (pgd_lock) -> ifdeffed. workqueues wont work with RT Signed-off-by: Thomas Gleixner diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 67689a5..d87823c 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -448,6 +449,9 @@ struct mm_struct { bool tlb_flush_pending; #endif struct uprobes_state uprobes_state; +#ifdef CONFIG_PREEMPT_RT_BASE + struct rcu_head delayed_drop; +#endif }; /* first nid will either be a valid NID or one of these values */ diff --git a/include/linux/sched.h b/include/linux/sched.h index cf866a5..e74578a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2129,12 +2129,24 @@ extern struct mm_struct * mm_alloc(void); /* mmdrop drops the mm and the page tables */ extern void __mmdrop(struct mm_struct *); + static inline void mmdrop(struct mm_struct * mm) { if (unlikely(atomic_dec_and_test(&mm->mm_count))) __mmdrop(mm); } +#ifdef CONFIG_PREEMPT_RT_BASE +extern void __mmdrop_delayed(struct rcu_head *rhp); +static inline void mmdrop_delayed(struct mm_struct *mm) +{ + if (atomic_dec_and_test(&mm->mm_count)) + call_rcu(&mm->delayed_drop, __mmdrop_delayed); +} +#else +# define mmdrop_delayed(mm) mmdrop(mm) +#endif + /* mmput gets rid of the mappings and all user-space */ extern void mmput(struct mm_struct *); /* Grab a reference to a task's mm, if it is not already going away */ diff --git a/kernel/fork.c b/kernel/fork.c index d1e28a3..b681c38 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -611,6 +611,19 @@ void __mmdrop(struct mm_struct *mm) } EXPORT_SYMBOL_GPL(__mmdrop); +#ifdef CONFIG_PREEMPT_RT_BASE +/* + * RCU callback for delayed mm drop. Not strictly rcu, but we don't + * want another facility to make this work. + */ +void __mmdrop_delayed(struct rcu_head *rhp) +{ + struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop); + + __mmdrop(mm); +} +#endif + /* * Decrement the use count and release all resources for an mm. */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1553d27..4f8e309 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1905,8 +1905,12 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) finish_arch_post_lock_switch(); fire_sched_in_preempt_notifiers(current); + /* + * We use mmdrop_delayed() here so we don't have to do the + * full __mmdrop() when we are the last user. + */ if (mm) - mmdrop(mm); + mmdrop_delayed(mm); if (unlikely(prev_state == TASK_DEAD)) { /* * Remove function-return probe instances associated with this @@ -4437,6 +4441,8 @@ static int migration_cpu_stop(void *data) #ifdef CONFIG_HOTPLUG_CPU +static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm); + /* * Ensures that the idle task is using init_mm right before its cpu goes * offline. @@ -4449,7 +4455,12 @@ void idle_task_exit(void) if (mm != &init_mm) switch_mm(mm, &init_mm, current); - mmdrop(mm); + + /* + * Defer the cleanup to an alive cpu. On RT we can neither + * call mmdrop() nor mmdrop_delayed() from here. + */ + per_cpu(idle_last_mm, smp_processor_id()) = mm; } /* @@ -4773,6 +4784,10 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_DEAD: calc_load_migrate(rq); + if (per_cpu(idle_last_mm, cpu)) { + mmdrop(per_cpu(idle_last_mm, cpu)); + per_cpu(idle_last_mm, cpu) = NULL; + } break; #endif } -- cgit v0.10.2 From 8177eac023c79a28b67894531912f7c69d9e878d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 25 Jun 2011 09:21:04 +0200 Subject: sched-rt-mutex-wakeup.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/sched.h b/include/linux/sched.h index e74578a..51e84f0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1020,6 +1020,7 @@ enum perf_event_task_context { struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ + volatile long saved_state; /* saved state for "spinlock sleepers" */ void *stack; atomic_t usage; unsigned int flags; /* per process flags, defined below */ @@ -2015,6 +2016,7 @@ extern void xtime_update(unsigned long ticks); extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); +extern int wake_up_lock_sleeper(struct task_struct * tsk); extern void wake_up_new_task(struct task_struct *tsk); #ifdef CONFIG_SMP extern void kick_process(struct task_struct *tsk); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4f8e309..44be15a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1504,8 +1504,25 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) */ smp_mb__before_spinlock(); raw_spin_lock_irqsave(&p->pi_lock, flags); - if (!(p->state & state)) + if (!(p->state & state)) { + /* + * The task might be running due to a spinlock sleeper + * wakeup. Check the saved state and set it to running + * if the wakeup condition is true. + */ + if (!(wake_flags & WF_LOCK_SLEEPER)) { + if (p->saved_state & state) + p->saved_state = TASK_RUNNING; + } goto out; + } + + /* + * If this is a regular wakeup, then we can unconditionally + * clear the saved state of a "lock sleeper". + */ + if (!(wake_flags & WF_LOCK_SLEEPER)) + p->saved_state = TASK_RUNNING; success = 1; /* we're going to change ->state */ cpu = task_cpu(p); @@ -1602,6 +1619,18 @@ int wake_up_process(struct task_struct *p) } EXPORT_SYMBOL(wake_up_process); +/** + * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock" + * @p: The process to be woken up. + * + * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate + * the nature of the wakeup. + */ +int wake_up_lock_sleeper(struct task_struct *p) +{ + return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER); +} + int wake_up_state(struct task_struct *p, unsigned int state) { return try_to_wake_up(p, state, 0); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 4f31059..ca61374 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -898,6 +898,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ #define WF_FORK 0x02 /* child wakeup after fork */ #define WF_MIGRATED 0x4 /* internal use, task got migrated */ +#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */ /* * To aid in avoiding the subversion of "niceness" due to uneven distribution -- cgit v0.10.2 From e189367f6b74bfe4d0c88cf1f19054fda795e540 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 7 Jun 2011 09:19:06 +0200 Subject: sched-might-sleep-do-not-account-rcu-depth.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index f1f1bc3..e56a6b2 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -190,6 +190,11 @@ void synchronize_rcu(void); * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. */ #define rcu_preempt_depth() (current->rcu_read_lock_nesting) +#ifndef CONFIG_PREEMPT_RT_FULL +#define sched_rcu_preempt_depth() rcu_preempt_depth() +#else +static inline int sched_rcu_preempt_depth(void) { return 0; } +#endif #else /* #ifdef CONFIG_PREEMPT_RCU */ @@ -213,6 +218,8 @@ static inline int rcu_preempt_depth(void) return 0; } +#define sched_rcu_preempt_depth() rcu_preempt_depth() + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ /* Internal to kernel */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 44be15a..34247ee 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6689,7 +6689,8 @@ void __init sched_init(void) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP static inline int preempt_count_equals(int preempt_offset) { - int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); + int nested = (preempt_count() & ~PREEMPT_ACTIVE) + + sched_rcu_preempt_depth(); return (nested == preempt_offset); } -- cgit v0.10.2 From 8a1275be207e70273c6170b24be2f553add716b4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 7 Jun 2011 11:25:03 +0200 Subject: sched-cond-resched.patch Signed-off-by: Thomas Gleixner diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 34247ee..d9e3b58 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3903,9 +3903,17 @@ static inline int should_resched(void) static void __cond_resched(void) { - add_preempt_count(PREEMPT_ACTIVE); - __schedule(); - sub_preempt_count(PREEMPT_ACTIVE); + do { + add_preempt_count(PREEMPT_ACTIVE); + __schedule(); + sub_preempt_count(PREEMPT_ACTIVE); + /* + * Check again in case we missed a preemption + * opportunity between schedule and now. + */ + barrier(); + + } while (need_resched()); } int __sched _cond_resched(void) -- cgit v0.10.2 From 9cdcfaed714ae3fac60287e712017a4a64adb4f8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 14 Jul 2011 09:56:44 +0200 Subject: cond-resched-softirq-fix.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/sched.h b/include/linux/sched.h index 51e84f0..a99e9e2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2539,12 +2539,16 @@ extern int __cond_resched_lock(spinlock_t *lock); __cond_resched_lock(lock); \ }) +#ifndef CONFIG_PREEMPT_RT_FULL extern int __cond_resched_softirq(void); #define cond_resched_softirq() ({ \ __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ __cond_resched_softirq(); \ }) +#else +# define cond_resched_softirq() cond_resched() +#endif static inline void cond_resched_rcu(void) { diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d9e3b58..3ec1cdc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3954,6 +3954,7 @@ int __cond_resched_lock(spinlock_t *lock) } EXPORT_SYMBOL(__cond_resched_lock); +#ifndef CONFIG_PREEMPT_RT_FULL int __sched __cond_resched_softirq(void) { BUG_ON(!in_softirq()); @@ -3967,6 +3968,7 @@ int __sched __cond_resched_softirq(void) return 0; } EXPORT_SYMBOL(__cond_resched_softirq); +#endif /** * yield - yield the current processor to other threads. -- cgit v0.10.2 From 77cc862fa486b606a5bfb3aa81f808052b6e6219 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 22:51:33 +0200 Subject: cond-resched-lock-rt-tweak.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/sched.h b/include/linux/sched.h index a99e9e2..bf1cf6d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2528,7 +2528,7 @@ extern int _cond_resched(void); extern int __cond_resched_lock(spinlock_t *lock); -#ifdef CONFIG_PREEMPT_COUNT +#if defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT_FULL) #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET #else #define PREEMPT_LOCK_OFFSET 0 -- cgit v0.10.2 From edf8bf66d9d0c94af95c6b814174e5043a3e11f9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 13 Sep 2011 16:42:35 +0200 Subject: sched-disable-ttwu-queue.patch Signed-off-by: Thomas Gleixner diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 99399f8..938274c 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -50,11 +50,15 @@ SCHED_FEAT(LB_BIAS, true) */ SCHED_FEAT(NONTASK_POWER, true) +#ifndef CONFIG_PREEMPT_RT_FULL /* * Queue remote wakeups on the target CPU and process them * using the scheduler IPI. Reduces rq->lock contention/bounces. */ SCHED_FEAT(TTWU_QUEUE, true) +#else +SCHED_FEAT(TTWU_QUEUE, false) +#endif SCHED_FEAT(FORCE_SD_OVERLAP, false) SCHED_FEAT(RT_RUNTIME_SHARE, true) -- cgit v0.10.2 From f8bc4647787669af342c8d4009893ac6754d6383 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 18 Jul 2011 17:03:52 +0200 Subject: sched: Disable CONFIG_RT_GROUP_SCHED on RT Carsten reported problems when running: taskset 01 chrt -f 1 sleep 1 from within rc.local on a F15 machine. The task stays running and never gets on the run queue because some of the run queues have rt_throttled=1 which does not go away. Works nice from a ssh login shell. Disabling CONFIG_RT_GROUP_SCHED solves that as well. Signed-off-by: Thomas Gleixner diff --git a/init/Kconfig b/init/Kconfig index bf74b46..a1798e8 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1042,6 +1042,7 @@ config CFS_BANDWIDTH config RT_GROUP_SCHED bool "Group scheduling for SCHED_RR/FIFO" depends on CGROUP_SCHED + depends on !PREEMPT_RT_FULL default n help This feature lets you explicitly allocate real CPU bandwidth -- cgit v0.10.2 From 591311e6e58e2c171c237fb522aaadbd63b03483 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 13 Dec 2011 21:42:19 +0100 Subject: sched: ttwu: Return success when only changing the saved_state value When a task blocks on a rt lock, it saves the current state in p->saved_state, so a lock related wake up will not destroy the original state. When a real wakeup happens, while the task is running due to a lock wakeup already, we update p->saved_state to TASK_RUNNING, but we do not return success, which might cause another wakeup in the waitqueue code and the task remains in the waitqueue list. Return success in that case as well. Signed-off-by: Thomas Gleixner Cc: stable-rt@vger.kernel.org diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3ec1cdc..9034918 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1511,8 +1511,10 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) * if the wakeup condition is true. */ if (!(wake_flags & WF_LOCK_SLEEPER)) { - if (p->saved_state & state) + if (p->saved_state & state) { p->saved_state = TASK_RUNNING; + success = 1; + } } goto out; } -- cgit v0.10.2 From a6e1de1a21be70d251a0d2e017b062141c58bc85 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 18 Mar 2013 15:12:49 -0400 Subject: sched/workqueue: Only wake up idle workers if not blocked on sleeping spin lock In -rt, most spin_locks() turn into mutexes. One of these spin_lock conversions is performed on the workqueue gcwq->lock. When the idle worker is worken, the first thing it will do is grab that same lock and it too will block, possibly jumping into the same code, but because nr_running would already be decremented it prevents an infinite loop. But this is still a waste of CPU cycles, and it doesn't follow the method of mainline, as new workers should only be woken when a worker thread is truly going to sleep, and not just blocked on a spin_lock(). Check the saved_state too before waking up new workers. Cc: stable-rt@vger.kernel.org Signed-off-by: Steven Rostedt Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9034918..3d063e2 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2475,8 +2475,10 @@ need_resched: * If a worker went to sleep, notify and ask workqueue * whether it wants to wake up a task to maintain * concurrency. + * Only call wake up if prev isn't blocked on a sleeping + * spin lock. */ - if (prev->flags & PF_WQ_WORKER) { + if (prev->flags & PF_WQ_WORKER && !prev->saved_state) { struct task_struct *to_wakeup; to_wakeup = wq_worker_sleeping(prev, cpu); -- cgit v0.10.2 From 0beecd3a9e60f7d0aabbd36b40785ac40bd7913f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:30:27 -0500 Subject: stop_machine: convert stop_machine_run() to PREEMPT_RT Instead of playing with non-preemption, introduce explicit startup serialization. This is more robust and cleaner as well. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index c09f295..51f8088 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -137,6 +137,7 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, /* static data for stop_cpus */ static DEFINE_MUTEX(stop_cpus_mutex); +static DEFINE_MUTEX(stopper_lock); static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work); static void queue_stop_cpus_work(const struct cpumask *cpumask, @@ -155,14 +156,13 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask, } /* - * Disable preemption while queueing to avoid getting - * preempted by a stopper which might wait for other stoppers - * to enter @fn which can lead to deadlock. + * Make sure that all work is queued on all cpus before we + * any of the cpus can execute it. */ - preempt_disable(); + mutex_lock(&stopper_lock); for_each_cpu(cpu, cpumask) cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu)); - preempt_enable(); + mutex_unlock(&stopper_lock); } static int __stop_cpus(const struct cpumask *cpumask, @@ -279,6 +279,16 @@ repeat: struct cpu_stop_done *done = work->done; char ksym_buf[KSYM_NAME_LEN] __maybe_unused; + /* + * Wait until the stopper finished scheduling on all + * cpus + */ + mutex_lock(&stopper_lock); + /* + * Let other cpu threads continue as well + */ + mutex_unlock(&stopper_lock); + /* cpu stop callbacks are not allowed to sleep */ preempt_disable(); -- cgit v0.10.2 From 93dd9624644443b7e0aa847704e4987a7f03ff22 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 29 Jun 2011 11:01:51 +0200 Subject: stomp-machine-raw-lock.patch Signed-off-by: Thomas Gleixner diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 51f8088..bbdef53 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -29,12 +29,12 @@ struct cpu_stop_done { atomic_t nr_todo; /* nr left to execute */ bool executed; /* actually executed? */ int ret; /* collected return value */ - struct completion completion; /* fired if nr_todo reaches 0 */ + struct task_struct *waiter; /* woken when nr_todo reaches 0 */ }; /* the actual stopper, one per every possible cpu, enabled on online cpus */ struct cpu_stopper { - spinlock_t lock; + raw_spinlock_t lock; bool enabled; /* is this stopper enabled? */ struct list_head works; /* list of pending works */ }; @@ -47,7 +47,7 @@ static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) { memset(done, 0, sizeof(*done)); atomic_set(&done->nr_todo, nr_todo); - init_completion(&done->completion); + done->waiter = current; } /* signal completion unless @done is NULL */ @@ -56,8 +56,10 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed) if (done) { if (executed) done->executed = true; - if (atomic_dec_and_test(&done->nr_todo)) - complete(&done->completion); + if (atomic_dec_and_test(&done->nr_todo)) { + wake_up_process(done->waiter); + done->waiter = NULL; + } } } @@ -69,7 +71,7 @@ static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) unsigned long flags; - spin_lock_irqsave(&stopper->lock, flags); + raw_spin_lock_irqsave(&stopper->lock, flags); if (stopper->enabled) { list_add_tail(&work->list, &stopper->works); @@ -77,7 +79,23 @@ static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) } else cpu_stop_signal_done(work->done, false); - spin_unlock_irqrestore(&stopper->lock, flags); + raw_spin_unlock_irqrestore(&stopper->lock, flags); +} + +static void wait_for_stop_done(struct cpu_stop_done *done) +{ + set_current_state(TASK_UNINTERRUPTIBLE); + while (atomic_read(&done->nr_todo)) { + schedule(); + set_current_state(TASK_UNINTERRUPTIBLE); + } + /* + * We need to wait until cpu_stop_signal_done() has cleared + * done->waiter. + */ + while (done->waiter) + cpu_relax(); + set_current_state(TASK_RUNNING); } /** @@ -111,7 +129,7 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) cpu_stop_init_done(&done, 1); cpu_stop_queue_work(cpu, &work); - wait_for_completion(&done.completion); + wait_for_stop_done(&done); return done.executed ? done.ret : -ENOENT; } @@ -172,7 +190,7 @@ static int __stop_cpus(const struct cpumask *cpumask, cpu_stop_init_done(&done, cpumask_weight(cpumask)); queue_stop_cpus_work(cpumask, fn, arg, &done); - wait_for_completion(&done.completion); + wait_for_stop_done(&done); return done.executed ? done.ret : -ENOENT; } @@ -251,9 +269,9 @@ static int cpu_stop_should_run(unsigned int cpu) unsigned long flags; int run; - spin_lock_irqsave(&stopper->lock, flags); + raw_spin_lock_irqsave(&stopper->lock, flags); run = !list_empty(&stopper->works); - spin_unlock_irqrestore(&stopper->lock, flags); + raw_spin_unlock_irqrestore(&stopper->lock, flags); return run; } @@ -265,13 +283,13 @@ static void cpu_stopper_thread(unsigned int cpu) repeat: work = NULL; - spin_lock_irq(&stopper->lock); + raw_spin_lock_irq(&stopper->lock); if (!list_empty(&stopper->works)) { work = list_first_entry(&stopper->works, struct cpu_stop_work, list); list_del_init(&work->list); } - spin_unlock_irq(&stopper->lock); + raw_spin_unlock_irq(&stopper->lock); if (work) { cpu_stop_fn_t fn = work->fn; @@ -303,7 +321,13 @@ repeat: kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL, ksym_buf), arg); + /* + * Make sure that the wakeup and setting done->waiter + * to NULL is atomic. + */ + local_irq_disable(); cpu_stop_signal_done(done, true); + local_irq_enable(); goto repeat; } } @@ -322,20 +346,20 @@ static void cpu_stop_park(unsigned int cpu) unsigned long flags; /* drain remaining works */ - spin_lock_irqsave(&stopper->lock, flags); + raw_spin_lock_irqsave(&stopper->lock, flags); list_for_each_entry(work, &stopper->works, list) cpu_stop_signal_done(work->done, false); stopper->enabled = false; - spin_unlock_irqrestore(&stopper->lock, flags); + raw_spin_unlock_irqrestore(&stopper->lock, flags); } static void cpu_stop_unpark(unsigned int cpu) { struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); - spin_lock_irq(&stopper->lock); + raw_spin_lock_irq(&stopper->lock); stopper->enabled = true; - spin_unlock_irq(&stopper->lock); + raw_spin_unlock_irq(&stopper->lock); } static struct smp_hotplug_thread cpu_stop_threads = { @@ -357,7 +381,7 @@ static int __init cpu_stop_init(void) for_each_possible_cpu(cpu) { struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); - spin_lock_init(&stopper->lock); + raw_spin_lock_init(&stopper->lock); INIT_LIST_HEAD(&stopper->works); } @@ -544,7 +568,7 @@ int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data, ret = stop_machine_cpu_stop(&smdata); /* Busy wait for completion. */ - while (!completion_done(&done.completion)) + while (atomic_read(&done.nr_todo)) cpu_relax(); mutex_unlock(&stop_cpus_mutex); -- cgit v0.10.2 From fe3989bebde11ecb64dc78cbd568f0e4798b6d4a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 3 Oct 2012 17:21:53 +0100 Subject: stomp_machine: Use mutex_trylock when called from inactive cpu If the stop machinery is called from inactive CPU we cannot use mutex_lock, because some other stomp machine invokation might be in progress and the mutex can be contended. We cannot schedule from this context, so trylock and loop. Signed-off-by: Thomas Gleixner Cc: stable-rt@vger.kernel.org diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index bbdef53..5f02a3f 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -160,7 +160,7 @@ static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work); static void queue_stop_cpus_work(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg, - struct cpu_stop_done *done) + struct cpu_stop_done *done, bool inactive) { struct cpu_stop_work *work; unsigned int cpu; @@ -177,7 +177,12 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask, * Make sure that all work is queued on all cpus before we * any of the cpus can execute it. */ - mutex_lock(&stopper_lock); + if (!inactive) { + mutex_lock(&stopper_lock); + } else { + while (!mutex_trylock(&stopper_lock)) + cpu_relax(); + } for_each_cpu(cpu, cpumask) cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu)); mutex_unlock(&stopper_lock); @@ -189,7 +194,7 @@ static int __stop_cpus(const struct cpumask *cpumask, struct cpu_stop_done done; cpu_stop_init_done(&done, cpumask_weight(cpumask)); - queue_stop_cpus_work(cpumask, fn, arg, &done); + queue_stop_cpus_work(cpumask, fn, arg, &done, false); wait_for_stop_done(&done); return done.executed ? done.ret : -ENOENT; } @@ -564,7 +569,7 @@ int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data, set_state(&smdata, STOPMACHINE_PREPARE); cpu_stop_init_done(&done, num_active_cpus()); queue_stop_cpus_work(cpu_active_mask, stop_machine_cpu_stop, &smdata, - &done); + &done, true); ret = stop_machine_cpu_stop(&smdata); /* Busy wait for completion. */ -- cgit v0.10.2 From 3cd4c79971ed7c83706be4356eea67562661e5e4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 15 Jun 2011 12:36:06 +0200 Subject: hotplug: Lightweight get online cpus get_online_cpus() is a heavy weight function which involves a global mutex. migrate_disable() wants a simpler construct which prevents only a CPU from going doing while a task is in a migrate disabled section. Implement a per cpu lockless mechanism, which serializes only in the real unplug case on a global mutex. That serialization affects only tasks on the cpu which should be brought down. Signed-off-by: Thomas Gleixner diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 801ff9e..0c2b05c 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -179,6 +179,8 @@ extern void get_online_cpus(void); extern void put_online_cpus(void); extern void cpu_hotplug_disable(void); extern void cpu_hotplug_enable(void); +extern void pin_current_cpu(void); +extern void unpin_current_cpu(void); #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) #define register_hotcpu_notifier(nb) register_cpu_notifier(nb) #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) @@ -206,6 +208,8 @@ static inline void cpu_hotplug_done(void) {} #define put_online_cpus() do { } while (0) #define cpu_hotplug_disable() do { } while (0) #define cpu_hotplug_enable() do { } while (0) +static inline void pin_current_cpu(void) { } +static inline void unpin_current_cpu(void) { } #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) /* These aren't inline functions due to a GCC bug. */ #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) diff --git a/kernel/cpu.c b/kernel/cpu.c index d7f07a2..58e5322 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -63,6 +63,101 @@ static struct { .refcount = 0, }; +struct hotplug_pcp { + struct task_struct *unplug; + int refcount; + struct completion synced; +}; + +static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp); + +/** + * pin_current_cpu - Prevent the current cpu from being unplugged + * + * Lightweight version of get_online_cpus() to prevent cpu from being + * unplugged when code runs in a migration disabled region. + * + * Must be called with preemption disabled (preempt_count = 1)! + */ +void pin_current_cpu(void) +{ + struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp); + +retry: + if (!hp->unplug || hp->refcount || preempt_count() > 1 || + hp->unplug == current) { + hp->refcount++; + return; + } + preempt_enable(); + mutex_lock(&cpu_hotplug.lock); + mutex_unlock(&cpu_hotplug.lock); + preempt_disable(); + goto retry; +} + +/** + * unpin_current_cpu - Allow unplug of current cpu + * + * Must be called with preemption or interrupts disabled! + */ +void unpin_current_cpu(void) +{ + struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp); + + WARN_ON(hp->refcount <= 0); + + /* This is safe. sync_unplug_thread is pinned to this cpu */ + if (!--hp->refcount && hp->unplug && hp->unplug != current) + wake_up_process(hp->unplug); +} + +/* + * FIXME: Is this really correct under all circumstances ? + */ +static int sync_unplug_thread(void *data) +{ + struct hotplug_pcp *hp = data; + + preempt_disable(); + hp->unplug = current; + set_current_state(TASK_UNINTERRUPTIBLE); + while (hp->refcount) { + schedule_preempt_disabled(); + set_current_state(TASK_UNINTERRUPTIBLE); + } + set_current_state(TASK_RUNNING); + preempt_enable(); + complete(&hp->synced); + return 0; +} + +/* + * Start the sync_unplug_thread on the target cpu and wait for it to + * complete. + */ +static int cpu_unplug_begin(unsigned int cpu) +{ + struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); + struct task_struct *tsk; + + init_completion(&hp->synced); + tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d\n", cpu); + if (IS_ERR(tsk)) + return (PTR_ERR(tsk)); + kthread_bind(tsk, cpu); + wake_up_process(tsk); + wait_for_completion(&hp->synced); + return 0; +} + +static void cpu_unplug_done(unsigned int cpu) +{ + struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); + + hp->unplug = NULL; +} + void get_online_cpus(void) { might_sleep(); @@ -282,13 +377,14 @@ static int __ref take_cpu_down(void *_param) /* Requires cpu_add_remove_lock to be held */ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) { - int err, nr_calls = 0; + int mycpu, err, nr_calls = 0; void *hcpu = (void *)(long)cpu; unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; struct take_cpu_down_param tcd_param = { .mod = mod, .hcpu = hcpu, }; + cpumask_var_t cpumask; if (num_online_cpus() == 1) return -EBUSY; @@ -296,7 +392,20 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) if (!cpu_online(cpu)) return -EINVAL; - cpu_hotplug_begin(); + /* Move the downtaker off the unplug cpu */ + if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) + return -ENOMEM; + cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu)); + set_cpus_allowed_ptr(current, cpumask); + free_cpumask_var(cpumask); + preempt_disable(); + mycpu = smp_processor_id(); + if (mycpu == cpu) { + printk(KERN_ERR "Yuck! Still on unplug CPU\n!"); + preempt_enable(); + return -EBUSY; + } + preempt_enable(); err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); if (err) { @@ -304,7 +413,16 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); printk("%s: attempt to take down CPU %u failed\n", __func__, cpu); - goto out_release; + goto out_cancel; + } + + cpu_hotplug_begin(); + err = cpu_unplug_begin(cpu); + if (err) { + nr_calls--; + __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); + printk("cpu_unplug_begin(%d) failed\n", cpu); + goto out_cancel; } smpboot_park_threads(cpu); @@ -336,6 +454,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) check_for_tasks(cpu); out_release: + cpu_unplug_done(cpu); +out_cancel: cpu_hotplug_done(); if (!err) cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); -- cgit v0.10.2 From 24ff554021113fcf827e3aac0bca57956fb169a3 Mon Sep 17 00:00:00 2001 From: Yong Zhang Date: Sun, 16 Oct 2011 18:56:43 +0800 Subject: hotplug: sync_unplug: No "\n" in task name Otherwise the output will look a little odd. Signed-off-by: Yong Zhang Link: http://lkml.kernel.org/r/1318762607-2261-2-git-send-email-yong.zhang0@gmail.com Signed-off-by: Thomas Gleixner diff --git a/kernel/cpu.c b/kernel/cpu.c index 58e5322..7de9868 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -142,7 +142,7 @@ static int cpu_unplug_begin(unsigned int cpu) struct task_struct *tsk; init_completion(&hp->synced); - tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d\n", cpu); + tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); if (IS_ERR(tsk)) return (PTR_ERR(tsk)); kthread_bind(tsk, cpu); -- cgit v0.10.2 From f31b617a9cfe6498b9ebc27187fee5385c17e337 Mon Sep 17 00:00:00 2001 From: Yong Zhang Date: Thu, 28 Jul 2011 11:16:00 +0800 Subject: hotplug: Reread hotplug_pcp on pin_current_cpu() retry When retry happens, it's likely that the task has been migrated to another cpu (except unplug failed), but it still derefernces the original hotplug_pcp per cpu data. Update the pointer to hotplug_pcp in the retry path, so it points to the current cpu. Signed-off-by: Yong Zhang Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20110728031600.GA338@windriver.com Signed-off-by: Thomas Gleixner diff --git a/kernel/cpu.c b/kernel/cpu.c index 7de9868..b9c39b5 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -81,9 +81,11 @@ static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp); */ void pin_current_cpu(void) { - struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp); + struct hotplug_pcp *hp; retry: + hp = &__get_cpu_var(hotplug_pcp); + if (!hp->unplug || hp->refcount || preempt_count() > 1 || hp->unplug == current) { hp->refcount++; -- cgit v0.10.2 From 5f1a16bdb3b1bda23879ac0ff31739b3c255b03d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 16 Jun 2011 13:26:08 +0200 Subject: sched-migrate-disable.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/preempt.h b/include/linux/preempt.h index f237ebd..fbf934b 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -130,6 +130,14 @@ do { \ #endif /* CONFIG_PREEMPT_COUNT */ +#ifdef CONFIG_SMP +extern void migrate_disable(void); +extern void migrate_enable(void); +#else +# define migrate_disable() barrier() +# define migrate_enable() barrier() +#endif + #ifdef CONFIG_PREEMPT_RT_FULL # define preempt_disable_rt() preempt_disable() # define preempt_enable_rt() preempt_enable() diff --git a/include/linux/sched.h b/include/linux/sched.h index bf1cf6d..9861c9b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1063,6 +1063,7 @@ struct task_struct { #endif unsigned int policy; + int migrate_disable; int nr_cpus_allowed; cpumask_t cpus_allowed; @@ -1436,9 +1437,6 @@ struct task_struct { #endif }; -/* Future-safe accessor for struct task_struct's cpus_allowed. */ -#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) - #ifdef CONFIG_NUMA_BALANCING extern void task_numa_fault(int node, int pages, bool migrated); extern void set_numabalancing_state(bool enabled); @@ -2734,6 +2732,15 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) #endif /* CONFIG_SMP */ +/* Future-safe accessor for struct task_struct's cpus_allowed. */ +static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p) +{ + if (p->migrate_disable) + return cpumask_of(task_cpu(p)); + + return &p->cpus_allowed; +} + extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); extern long sched_getaffinity(pid_t pid, struct cpumask *mask); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3d063e2..7ef7cd3 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4345,11 +4345,12 @@ void init_idle(struct task_struct *idle, int cpu) #ifdef CONFIG_SMP void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { - if (p->sched_class && p->sched_class->set_cpus_allowed) - p->sched_class->set_cpus_allowed(p, new_mask); - + if (!p->migrate_disable) { + if (p->sched_class && p->sched_class->set_cpus_allowed) + p->sched_class->set_cpus_allowed(p, new_mask); + p->nr_cpus_allowed = cpumask_weight(new_mask); + } cpumask_copy(&p->cpus_allowed, new_mask); - p->nr_cpus_allowed = cpumask_weight(new_mask); } /* @@ -4395,7 +4396,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) do_set_cpus_allowed(p, new_mask); /* Can the task run on the task's current CPU? If so, we're done */ - if (cpumask_test_cpu(task_cpu(p), new_mask)) + if (cpumask_test_cpu(task_cpu(p), new_mask) || p->migrate_disable) goto out; dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); @@ -4414,6 +4415,83 @@ out: } EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); +void migrate_disable(void) +{ + struct task_struct *p = current; + const struct cpumask *mask; + unsigned long flags; + struct rq *rq; + + preempt_disable(); + if (p->migrate_disable) { + p->migrate_disable++; + preempt_enable(); + return; + } + + pin_current_cpu(); + if (unlikely(!scheduler_running)) { + p->migrate_disable = 1; + preempt_enable(); + return; + } + rq = task_rq_lock(p, &flags); + p->migrate_disable = 1; + mask = tsk_cpus_allowed(p); + + WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); + + if (!cpumask_equal(&p->cpus_allowed, mask)) { + if (p->sched_class->set_cpus_allowed) + p->sched_class->set_cpus_allowed(p, mask); + p->nr_cpus_allowed = cpumask_weight(mask); + } + task_rq_unlock(rq, p, &flags); + preempt_enable(); +} +EXPORT_SYMBOL(migrate_disable); + +void migrate_enable(void) +{ + struct task_struct *p = current; + const struct cpumask *mask; + unsigned long flags; + struct rq *rq; + + WARN_ON_ONCE(p->migrate_disable <= 0); + + preempt_disable(); + if (p->migrate_disable > 1) { + p->migrate_disable--; + preempt_enable(); + return; + } + + if (unlikely(!scheduler_running)) { + p->migrate_disable = 0; + unpin_current_cpu(); + preempt_enable(); + return; + } + + rq = task_rq_lock(p, &flags); + p->migrate_disable = 0; + mask = tsk_cpus_allowed(p); + + WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); + + if (!cpumask_equal(&p->cpus_allowed, mask)) { + if (p->sched_class->set_cpus_allowed) + p->sched_class->set_cpus_allowed(p, mask); + p->nr_cpus_allowed = cpumask_weight(mask); + } + + task_rq_unlock(rq, p, &flags); + unpin_current_cpu(); + preempt_enable(); +} +EXPORT_SYMBOL(migrate_enable); + /* * Move (not current) task off this cpu, onto dest cpu. We're doing * this because either it can't run here any more (set_cpus_allowed() diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 4c0d0e5..0a846e7 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -39,9 +39,9 @@ notrace unsigned int debug_smp_processor_id(void) if (!printk_ratelimit()) goto out_enable; - printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] " - "code: %s/%d\n", - preempt_count() - 1, current->comm, current->pid); + printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] " + "code: %s/%d\n", preempt_count() - 1, + current->migrate_disable, current->comm, current->pid); print_symbol("caller is %s\n", (long)__builtin_return_address(0)); dump_stack(); -- cgit v0.10.2 From 7694f6caf8071e443eb1d4f9a9cf12c6b75f00e6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 19:35:29 +0200 Subject: hotplug-use-migrate-disable.patch Signed-off-by: Thomas Gleixner diff --git a/kernel/cpu.c b/kernel/cpu.c index b9c39b5..ef0e472 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -400,14 +400,13 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu)); set_cpus_allowed_ptr(current, cpumask); free_cpumask_var(cpumask); - preempt_disable(); + migrate_disable(); mycpu = smp_processor_id(); if (mycpu == cpu) { printk(KERN_ERR "Yuck! Still on unplug CPU\n!"); - preempt_enable(); + migrate_enable(); return -EBUSY; } - preempt_enable(); err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); if (err) { @@ -458,6 +457,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) out_release: cpu_unplug_done(cpu); out_cancel: + migrate_enable(); cpu_hotplug_done(); if (!err) cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); -- cgit v0.10.2 From deeeefcd45210cc90d6b8640019c6ebe8c4d3830 Mon Sep 17 00:00:00 2001 From: Yong Zhang Date: Sun, 16 Oct 2011 18:56:44 +0800 Subject: hotplug: Call cpu_unplug_begin() before DOWN_PREPARE cpu_unplug_begin() should be called before CPU_DOWN_PREPARE, because at CPU_DOWN_PREPARE cpu_active is cleared and sched_domain is rebuilt. Otherwise the 'sync_unplug' thread will be running on the cpu on which it's created and not bound on the cpu which is about to go down. I found that by an incorrect warning on smp_processor_id() called by sync_unplug/1, and trace shows below: (echo 1 > /sys/device/system/cpu/cpu1/online) bash-1664 [000] 83.136620: _cpu_down: Bind sync_unplug to cpu 1 bash-1664 [000] 83.136623: sched_wait_task: comm=sync_unplug/1 pid=1724 prio=120 bash-1664 [000] 83.136624: _cpu_down: Wake sync_unplug bash-1664 [000] 83.136629: sched_wakeup: comm=sync_unplug/1 pid=1724 prio=120 success=1 target_cpu=000 Wants to be folded back.... Signed-off-by: Yong Zhang Link: http://lkml.kernel.org/r/1318762607-2261-3-git-send-email-yong.zhang0@gmail.com Signed-off-by: Thomas Gleixner diff --git a/kernel/cpu.c b/kernel/cpu.c index ef0e472..7e79306 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -408,22 +408,20 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) return -EBUSY; } - err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); + cpu_hotplug_begin(); + err = cpu_unplug_begin(cpu); if (err) { - nr_calls--; - __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); - printk("%s: attempt to take down CPU %u failed\n", - __func__, cpu); + printk("cpu_unplug_begin(%d) failed\n", cpu); goto out_cancel; } - cpu_hotplug_begin(); - err = cpu_unplug_begin(cpu); + err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); if (err) { nr_calls--; __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); - printk("cpu_unplug_begin(%d) failed\n", cpu); - goto out_cancel; + printk("%s: attempt to take down CPU %u failed\n", + __func__, cpu); + goto out_release; } smpboot_park_threads(cpu); -- cgit v0.10.2 From ebbf5742fbb77eb1a6f69fd67b868e5de4fa6a09 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 21:56:42 +0200 Subject: ftrace-migrate-disable-tracing.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 5eaa746..cb2b03c 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -56,6 +56,8 @@ struct trace_entry { unsigned char flags; unsigned char preempt_count; int pid; + unsigned short migrate_disable; + unsigned short padding; }; #define FTRACE_MAX_EVENT \ diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 138077b..4caf463 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -442,7 +442,7 @@ int __trace_puts(unsigned long ip, const char *str, int size) local_save_flags(irq_flags); buffer = global_trace.trace_buffer.buffer; - event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, + event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, irq_flags, preempt_count()); if (!event) return 0; @@ -1519,6 +1519,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); + + entry->migrate_disable = (tsk) ? tsk->migrate_disable & 0xFF : 0; } EXPORT_SYMBOL_GPL(tracing_generic_entry_update); @@ -2413,9 +2415,10 @@ static void print_lat_help_header(struct seq_file *m) seq_puts(m, "# | / _----=> need-resched \n"); seq_puts(m, "# || / _---=> hardirq/softirq \n"); seq_puts(m, "# ||| / _--=> preempt-depth \n"); - seq_puts(m, "# |||| / delay \n"); - seq_puts(m, "# cmd pid ||||| time | caller \n"); - seq_puts(m, "# \\ / ||||| \\ | / \n"); + seq_puts(m, "# |||| / _--=> migrate-disable\n"); + seq_puts(m, "# ||||| / delay \n"); + seq_puts(m, "# cmd pid |||||| time | caller \n"); + seq_puts(m, "# \\ / ||||| \\ | / \n"); } static void print_event_info(struct trace_buffer *buf, struct seq_file *m) diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index b03b1f8..271616c 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -166,6 +166,7 @@ static int trace_define_common_fields(void) __common_field(unsigned char, flags); __common_field(unsigned char, preempt_count); __common_field(int, pid); + __common_field(unsigned short, migrate_disable); return ret; } diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 34e7cba..b54b3c8 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -635,6 +635,11 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) else ret = trace_seq_putc(s, '.'); + if (entry->migrate_disable) + ret = trace_seq_printf(s, "%x", entry->migrate_disable); + else + ret = trace_seq_putc(s, '.'); + return ret; } -- cgit v0.10.2 From 71aef246dcf7c4054a3bae7bea40331d6e625e7e Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 16 Nov 2011 13:19:35 -0500 Subject: tracing: Show padding as unsigned short RT added two bytes to trace migrate disable counting to the trace events and used two bytes of the padding to make the change. The structures and all were updated correctly, but the display in the event formats was not: cat /debug/tracing/events/sched/sched_switch/format name: sched_switch ID: 51 format: field:unsigned short common_type; offset:0; size:2; signed:0; field:unsigned char common_flags; offset:2; size:1; signed:0; field:unsigned char common_preempt_count; offset:3; size:1; signed:0; field:int common_pid; offset:4; size:4; signed:1; field:unsigned short common_migrate_disable; offset:8; size:2; signed:0; field:int common_padding; offset:10; size:2; signed:0; The field for common_padding has the correct size and offset, but the use of "int" might confuse some parsers (and people that are reading it). This needs to be changed to "unsigned short". Signed-off-by: Steven Rostedt Link: http://lkml.kernel.org/r/1321467575.4181.36.camel@frodo Cc: stable-rt@vger.kernel.org Signed-off-by: Thomas Gleixner diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 271616c..7531ded 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -167,6 +167,7 @@ static int trace_define_common_fields(void) __common_field(unsigned char, preempt_count); __common_field(int, pid); __common_field(unsigned short, migrate_disable); + __common_field(unsigned short, padding); return ret; } -- cgit v0.10.2 From 1a40cc806e7f6c16c63acbe6a542c3a4bc35bf8f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 19:48:20 +0200 Subject: migrate-disable-rt-variant.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/preempt.h b/include/linux/preempt.h index fbf934b..f906def 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -143,11 +143,15 @@ extern void migrate_enable(void); # define preempt_enable_rt() preempt_enable() # define preempt_disable_nort() barrier() # define preempt_enable_nort() barrier() +# define migrate_disable_rt() migrate_disable() +# define migrate_enable_rt() migrate_enable() #else # define preempt_disable_rt() barrier() # define preempt_enable_rt() barrier() # define preempt_disable_nort() preempt_disable() # define preempt_enable_nort() preempt_enable() +# define migrate_disable_rt() barrier() +# define migrate_enable_rt() barrier() #endif #ifdef CONFIG_PREEMPT_NOTIFIERS -- cgit v0.10.2 From 78211b80f38536b8e0e2dea5dddb665969fe4c0e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 11 Aug 2011 15:03:35 +0200 Subject: sched: Optimize migrate_disable Change from task_rq_lock() to raw_spin_lock(&rq->lock) to avoid a few atomic ops. See comment on why it should be safe. Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/n/tip-cbz6hkl5r5mvwtx5s3tor2y6@git.kernel.org diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7ef7cd3..8cdf838 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4435,7 +4435,19 @@ void migrate_disable(void) preempt_enable(); return; } - rq = task_rq_lock(p, &flags); + + /* + * Since this is always current we can get away with only locking + * rq->lock, the ->cpus_allowed value can normally only be changed + * while holding both p->pi_lock and rq->lock, but seeing that this + * it current, we cannot actually be waking up, so all code that + * relies on serialization against p->pi_lock is out of scope. + * + * Taking rq->lock serializes us against things like + * set_cpus_allowed_ptr() that can still happen concurrently. + */ + rq = this_rq(); + raw_spin_lock_irqsave(&rq->lock, flags); p->migrate_disable = 1; mask = tsk_cpus_allowed(p); @@ -4446,7 +4458,7 @@ void migrate_disable(void) p->sched_class->set_cpus_allowed(p, mask); p->nr_cpus_allowed = cpumask_weight(mask); } - task_rq_unlock(rq, p, &flags); + raw_spin_unlock_irqrestore(&rq->lock, flags); preempt_enable(); } EXPORT_SYMBOL(migrate_disable); @@ -4474,7 +4486,11 @@ void migrate_enable(void) return; } - rq = task_rq_lock(p, &flags); + /* + * See comment in migrate_disable(). + */ + rq = this_rq(); + raw_spin_lock_irqsave(&rq->lock, flags); p->migrate_disable = 0; mask = tsk_cpus_allowed(p); @@ -4486,7 +4502,7 @@ void migrate_enable(void) p->nr_cpus_allowed = cpumask_weight(mask); } - task_rq_unlock(rq, p, &flags); + raw_spin_unlock_irqrestore(&rq->lock, flags); unpin_current_cpu(); preempt_enable(); } -- cgit v0.10.2 From 2481a38554d43a00b23ecce4dd3175653b845e41 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 11 Aug 2011 15:14:58 +0200 Subject: sched: Generic migrate_disable Make migrate_disable() be a preempt_disable() for !rt kernels. This allows generic code to use it but still enforces that these code sections stay relatively small. A preemptible migrate_disable() accessible for general use would allow people growing arbitrary per-cpu crap instead of clean these things up. Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org diff --git a/include/linux/preempt.h b/include/linux/preempt.h index f906def..116ac32 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -130,28 +130,25 @@ do { \ #endif /* CONFIG_PREEMPT_COUNT */ -#ifdef CONFIG_SMP -extern void migrate_disable(void); -extern void migrate_enable(void); -#else -# define migrate_disable() barrier() -# define migrate_enable() barrier() -#endif - #ifdef CONFIG_PREEMPT_RT_FULL # define preempt_disable_rt() preempt_disable() # define preempt_enable_rt() preempt_enable() # define preempt_disable_nort() barrier() # define preempt_enable_nort() barrier() -# define migrate_disable_rt() migrate_disable() -# define migrate_enable_rt() migrate_enable() +# ifdef CONFIG_SMP + extern void migrate_disable(void); + extern void migrate_enable(void); +# else /* CONFIG_SMP */ +# define migrate_disable() barrier() +# define migrate_enable() barrier() +# endif /* CONFIG_SMP */ #else # define preempt_disable_rt() barrier() # define preempt_enable_rt() barrier() # define preempt_disable_nort() preempt_disable() # define preempt_enable_nort() preempt_enable() -# define migrate_disable_rt() barrier() -# define migrate_enable_rt() barrier() +# define migrate_disable() preempt_disable() +# define migrate_enable() preempt_enable() #endif #ifdef CONFIG_PREEMPT_NOTIFIERS diff --git a/include/linux/sched.h b/include/linux/sched.h index 9861c9b..9ef7bee 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1063,7 +1063,9 @@ struct task_struct { #endif unsigned int policy; +#ifdef CONFIG_PREEMPT_RT_FULL int migrate_disable; +#endif int nr_cpus_allowed; cpumask_t cpus_allowed; @@ -2732,11 +2734,22 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) #endif /* CONFIG_SMP */ +static inline int __migrate_disabled(struct task_struct *p) +{ +#ifdef CONFIG_PREEMPT_RT_FULL + return p->migrate_disable; +#else + return 0; +#endif +} + /* Future-safe accessor for struct task_struct's cpus_allowed. */ static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p) { +#ifdef CONFIG_PREEMPT_RT_FULL if (p->migrate_disable) return cpumask_of(task_cpu(p)); +#endif return &p->cpus_allowed; } diff --git a/include/linux/smp.h b/include/linux/smp.h index f30c7b1..e05b694 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -188,13 +188,8 @@ static inline void __smp_call_function_single(int cpuid, #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) #define put_cpu() preempt_enable() -#ifndef CONFIG_PREEMPT_RT_FULL -# define get_cpu_light() get_cpu() -# define put_cpu_light() put_cpu() -#else -# define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) -# define put_cpu_light() migrate_enable() -#endif +#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) +#define put_cpu_light() migrate_enable() /* * Callback to arch code if there's nosmp or maxcpus=0 on the diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8cdf838..17dc85f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4345,7 +4345,7 @@ void init_idle(struct task_struct *idle, int cpu) #ifdef CONFIG_SMP void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { - if (!p->migrate_disable) { + if (!__migrate_disabled(p)) { if (p->sched_class && p->sched_class->set_cpus_allowed) p->sched_class->set_cpus_allowed(p, new_mask); p->nr_cpus_allowed = cpumask_weight(new_mask); @@ -4396,7 +4396,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) do_set_cpus_allowed(p, new_mask); /* Can the task run on the task's current CPU? If so, we're done */ - if (cpumask_test_cpu(task_cpu(p), new_mask) || p->migrate_disable) + if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p)) goto out; dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); @@ -4415,6 +4415,7 @@ out: } EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); +#ifdef CONFIG_PREEMPT_RT_FULL void migrate_disable(void) { struct task_struct *p = current; @@ -4507,6 +4508,7 @@ void migrate_enable(void) preempt_enable(); } EXPORT_SYMBOL(migrate_enable); +#endif /* CONFIG_PREEMPT_RT_FULL */ /* * Move (not current) task off this cpu, onto dest cpu. We're doing diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 4caf463..402fcc6 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1520,7 +1520,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); - entry->migrate_disable = (tsk) ? tsk->migrate_disable & 0xFF : 0; + entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; } EXPORT_SYMBOL_GPL(tracing_generic_entry_update); diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 0a846e7..dbb1570 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -41,7 +41,7 @@ notrace unsigned int debug_smp_processor_id(void) printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] " "code: %s/%d\n", preempt_count() - 1, - current->migrate_disable, current->comm, current->pid); + __migrate_disabled(current), current->comm, current->pid); print_symbol("caller is %s\n", (long)__builtin_return_address(0)); dump_stack(); -- cgit v0.10.2 From c494ce00e3ab851b7d8ea24e15ec37b5e132c4c9 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Tue, 23 Aug 2011 16:12:43 +0200 Subject: sched, rt: Fix migrate_enable() thinko Assigning mask = tsk_cpus_allowed(p) after p->migrate_disable = 0 ensures that we won't see a mask change.. no push/pull, we stack tasks on one CPU. Also add a couple fields to sched_debug for the next guy. [ Build fix from Stratos Psomadakis ] Signed-off-by: Mike Galbraith Cc: Paul E. McKenney Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1314108763.6689.4.camel@marge.simson.net Signed-off-by: Thomas Gleixner diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 17dc85f..09892ba 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4492,12 +4492,14 @@ void migrate_enable(void) */ rq = this_rq(); raw_spin_lock_irqsave(&rq->lock, flags); - p->migrate_disable = 0; mask = tsk_cpus_allowed(p); + p->migrate_disable = 0; WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); if (!cpumask_equal(&p->cpus_allowed, mask)) { + /* Get the mask now that migration is enabled */ + mask = tsk_cpus_allowed(p); if (p->sched_class->set_cpus_allowed) p->sched_class->set_cpus_allowed(p, mask); p->nr_cpus_allowed = cpumask_weight(mask); diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index fd9ca1d..70812af 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -256,6 +256,9 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) P(rt_throttled); PN(rt_time); PN(rt_runtime); +#ifdef CONFIG_SMP + P(rt_nr_migratory); +#endif #undef PN #undef P @@ -585,6 +588,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) #endif P(policy); P(prio); +#ifdef CONFIG_PREEMPT_RT_FULL + P(migrate_disable); +#endif + P(nr_cpus_allowed); #undef PN #undef __PN #undef P -- cgit v0.10.2 From dba592cc8ffefed504095146ace0956bbd4e4a55 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 2 Sep 2011 14:29:27 +0200 Subject: sched: teach migrate_disable about atomic contexts [] spin_bug+0x94/0xa8 [] do_raw_spin_lock+0x43/0xea [] _raw_spin_lock_irqsave+0x6b/0x85 [] ? migrate_disable+0x75/0x12d [] ? pin_current_cpu+0x36/0xb0 [] migrate_disable+0x75/0x12d [] pagefault_disable+0xe/0x1f [] copy_from_user_nmi+0x74/0xe6 [] perf_callchain_user+0xf3/0x135 Now clearly we can't go around taking locks from NMI context, cure this by short-circuiting migrate_disable() when we're in an atomic context already. Add some extra debugging to avoid things like: preempt_disable() migrate_disable(); preempt_enable(); migrate_enable(); Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1314967297.1301.14.camel@twins Signed-off-by: Thomas Gleixner Link: http://lkml.kernel.org/n/tip-wbot4vsmwhi8vmbf83hsclk6@git.kernel.org diff --git a/include/linux/sched.h b/include/linux/sched.h index 9ef7bee..6714813 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1065,6 +1065,9 @@ struct task_struct { unsigned int policy; #ifdef CONFIG_PREEMPT_RT_FULL int migrate_disable; +# ifdef CONFIG_SCHED_DEBUG + int migrate_disable_atomic; +# endif #endif int nr_cpus_allowed; cpumask_t cpus_allowed; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 09892ba..92538c0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4423,6 +4423,17 @@ void migrate_disable(void) unsigned long flags; struct rq *rq; + if (in_atomic()) { +#ifdef CONFIG_SCHED_DEBUG + p->migrate_disable_atomic++; +#endif + return; + } + +#ifdef CONFIG_SCHED_DEBUG + WARN_ON_ONCE(p->migrate_disable_atomic); +#endif + preempt_disable(); if (p->migrate_disable) { p->migrate_disable++; @@ -4471,6 +4482,16 @@ void migrate_enable(void) unsigned long flags; struct rq *rq; + if (in_atomic()) { +#ifdef CONFIG_SCHED_DEBUG + p->migrate_disable_atomic--; +#endif + return; + } + +#ifdef CONFIG_SCHED_DEBUG + WARN_ON_ONCE(p->migrate_disable_atomic); +#endif WARN_ON_ONCE(p->migrate_disable <= 0); preempt_disable(); -- cgit v0.10.2 From 5a5be9fd56add495878f09be0ae1882778d6ad7e Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 27 Sep 2011 08:40:23 -0400 Subject: sched: Postpone actual migration disalbe to schedule The migrate_disable() can cause a bit of a overhead to the RT kernel, as changing the affinity is expensive to do at every lock encountered. As a running task can not migrate, the actual disabling of migration does not need to occur until the task is about to schedule out. In most cases, a task that disables migration will enable it before it schedules making this change improve performance tremendously. [ Frank Rowand: UP compile fix ] Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Clark Williams Link: http://lkml.kernel.org/r/20110927124422.779693167@goodmis.org Signed-off-by: Thomas Gleixner diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 92538c0..b38ae06 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2364,6 +2364,135 @@ static inline void schedule_debug(struct task_struct *prev) schedstat_inc(this_rq(), sched_count); } +#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP) +#define MIGRATE_DISABLE_SET_AFFIN (1<<30) /* Can't make a negative */ +#define migrate_disabled_updated(p) ((p)->migrate_disable & MIGRATE_DISABLE_SET_AFFIN) +#define migrate_disable_count(p) ((p)->migrate_disable & ~MIGRATE_DISABLE_SET_AFFIN) + +static inline void update_migrate_disable(struct task_struct *p) +{ + const struct cpumask *mask; + + if (likely(!p->migrate_disable)) + return; + + /* Did we already update affinity? */ + if (unlikely(migrate_disabled_updated(p))) + return; + + /* + * Since this is always current we can get away with only locking + * rq->lock, the ->cpus_allowed value can normally only be changed + * while holding both p->pi_lock and rq->lock, but seeing that this + * is current, we cannot actually be waking up, so all code that + * relies on serialization against p->pi_lock is out of scope. + * + * Having rq->lock serializes us against things like + * set_cpus_allowed_ptr() that can still happen concurrently. + */ + mask = tsk_cpus_allowed(p); + + WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); + + if (!cpumask_equal(&p->cpus_allowed, mask)) { + if (p->sched_class->set_cpus_allowed) + p->sched_class->set_cpus_allowed(p, mask); + p->nr_cpus_allowed = cpumask_weight(mask); + + /* Let migrate_enable know to fix things back up */ + p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN; + } +} + +void migrate_disable(void) +{ + struct task_struct *p = current; + + if (in_atomic()) { +#ifdef CONFIG_SCHED_DEBUG + p->migrate_disable_atomic++; +#endif + return; + } + +#ifdef CONFIG_SCHED_DEBUG + WARN_ON_ONCE(p->migrate_disable_atomic); +#endif + + preempt_disable(); + if (p->migrate_disable) { + p->migrate_disable++; + preempt_enable(); + return; + } + + pin_current_cpu(); + p->migrate_disable = 1; + preempt_enable(); +} +EXPORT_SYMBOL(migrate_disable); + +void migrate_enable(void) +{ + struct task_struct *p = current; + const struct cpumask *mask; + unsigned long flags; + struct rq *rq; + + if (in_atomic()) { +#ifdef CONFIG_SCHED_DEBUG + p->migrate_disable_atomic--; +#endif + return; + } + +#ifdef CONFIG_SCHED_DEBUG + WARN_ON_ONCE(p->migrate_disable_atomic); +#endif + WARN_ON_ONCE(p->migrate_disable <= 0); + + preempt_disable(); + if (migrate_disable_count(p) > 1) { + p->migrate_disable--; + preempt_enable(); + return; + } + + if (unlikely(migrate_disabled_updated(p))) { + /* + * See comment in update_migrate_disable() about locking. + */ + rq = this_rq(); + raw_spin_lock_irqsave(&rq->lock, flags); + mask = tsk_cpus_allowed(p); + /* + * Clearing migrate_disable causes tsk_cpus_allowed to + * show the tasks original cpu affinity. + */ + p->migrate_disable = 0; + + WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); + + if (unlikely(!cpumask_equal(&p->cpus_allowed, mask))) { + /* Get the mask now that migration is enabled */ + mask = tsk_cpus_allowed(p); + if (p->sched_class->set_cpus_allowed) + p->sched_class->set_cpus_allowed(p, mask); + p->nr_cpus_allowed = cpumask_weight(mask); + } + raw_spin_unlock_irqrestore(&rq->lock, flags); + } else + p->migrate_disable = 0; + + unpin_current_cpu(); + preempt_enable(); +} +EXPORT_SYMBOL(migrate_enable); +#else +static inline void update_migrate_disable(struct task_struct *p) { } +#define migrate_disabled_updated(p) 0 +#endif + static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->on_rq || rq->skip_clock_update < 0) @@ -2463,6 +2592,8 @@ need_resched: smp_mb__before_spinlock(); raw_spin_lock_irq(&rq->lock); + update_migrate_disable(prev); + switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { if (unlikely(signal_pending_state(prev->state, prev))) { @@ -4345,7 +4476,7 @@ void init_idle(struct task_struct *idle, int cpu) #ifdef CONFIG_SMP void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { - if (!__migrate_disabled(p)) { + if (!migrate_disabled_updated(p)) { if (p->sched_class && p->sched_class->set_cpus_allowed) p->sched_class->set_cpus_allowed(p, new_mask); p->nr_cpus_allowed = cpumask_weight(new_mask); @@ -4415,124 +4546,6 @@ out: } EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); -#ifdef CONFIG_PREEMPT_RT_FULL -void migrate_disable(void) -{ - struct task_struct *p = current; - const struct cpumask *mask; - unsigned long flags; - struct rq *rq; - - if (in_atomic()) { -#ifdef CONFIG_SCHED_DEBUG - p->migrate_disable_atomic++; -#endif - return; - } - -#ifdef CONFIG_SCHED_DEBUG - WARN_ON_ONCE(p->migrate_disable_atomic); -#endif - - preempt_disable(); - if (p->migrate_disable) { - p->migrate_disable++; - preempt_enable(); - return; - } - - pin_current_cpu(); - if (unlikely(!scheduler_running)) { - p->migrate_disable = 1; - preempt_enable(); - return; - } - - /* - * Since this is always current we can get away with only locking - * rq->lock, the ->cpus_allowed value can normally only be changed - * while holding both p->pi_lock and rq->lock, but seeing that this - * it current, we cannot actually be waking up, so all code that - * relies on serialization against p->pi_lock is out of scope. - * - * Taking rq->lock serializes us against things like - * set_cpus_allowed_ptr() that can still happen concurrently. - */ - rq = this_rq(); - raw_spin_lock_irqsave(&rq->lock, flags); - p->migrate_disable = 1; - mask = tsk_cpus_allowed(p); - - WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); - - if (!cpumask_equal(&p->cpus_allowed, mask)) { - if (p->sched_class->set_cpus_allowed) - p->sched_class->set_cpus_allowed(p, mask); - p->nr_cpus_allowed = cpumask_weight(mask); - } - raw_spin_unlock_irqrestore(&rq->lock, flags); - preempt_enable(); -} -EXPORT_SYMBOL(migrate_disable); - -void migrate_enable(void) -{ - struct task_struct *p = current; - const struct cpumask *mask; - unsigned long flags; - struct rq *rq; - - if (in_atomic()) { -#ifdef CONFIG_SCHED_DEBUG - p->migrate_disable_atomic--; -#endif - return; - } - -#ifdef CONFIG_SCHED_DEBUG - WARN_ON_ONCE(p->migrate_disable_atomic); -#endif - WARN_ON_ONCE(p->migrate_disable <= 0); - - preempt_disable(); - if (p->migrate_disable > 1) { - p->migrate_disable--; - preempt_enable(); - return; - } - - if (unlikely(!scheduler_running)) { - p->migrate_disable = 0; - unpin_current_cpu(); - preempt_enable(); - return; - } - - /* - * See comment in migrate_disable(). - */ - rq = this_rq(); - raw_spin_lock_irqsave(&rq->lock, flags); - mask = tsk_cpus_allowed(p); - p->migrate_disable = 0; - - WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); - - if (!cpumask_equal(&p->cpus_allowed, mask)) { - /* Get the mask now that migration is enabled */ - mask = tsk_cpus_allowed(p); - if (p->sched_class->set_cpus_allowed) - p->sched_class->set_cpus_allowed(p, mask); - p->nr_cpus_allowed = cpumask_weight(mask); - } - - raw_spin_unlock_irqrestore(&rq->lock, flags); - unpin_current_cpu(); - preempt_enable(); -} -EXPORT_SYMBOL(migrate_enable); -#endif /* CONFIG_PREEMPT_RT_FULL */ - /* * Move (not current) task off this cpu, onto dest cpu. We're doing * this because either it can't run here any more (set_cpus_allowed() -- cgit v0.10.2 From bd3332707bb49a7e4d69ce375cb8d9988e543aad Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Wed, 20 Nov 2013 07:22:09 +0800 Subject: allow preemption in recursive migrate_disable call Minor cleanup in migrate_disable/migrate_enable. The recursive case does not need to disable preemption as it is "pinned" to the current cpu any way so it is safe to preempt it. Signed-off-by: Nicholas Mc Guire Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b38ae06..fdf5edd 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2419,13 +2419,12 @@ void migrate_disable(void) WARN_ON_ONCE(p->migrate_disable_atomic); #endif - preempt_disable(); if (p->migrate_disable) { p->migrate_disable++; - preempt_enable(); return; } + preempt_disable(); pin_current_cpu(); p->migrate_disable = 1; preempt_enable(); @@ -2451,13 +2450,12 @@ void migrate_enable(void) #endif WARN_ON_ONCE(p->migrate_disable <= 0); - preempt_disable(); if (migrate_disable_count(p) > 1) { p->migrate_disable--; - preempt_enable(); return; } + preempt_disable(); if (unlikely(migrate_disabled_updated(p))) { /* * See comment in update_migrate_disable() about locking. -- cgit v0.10.2 From 0273ae2dd2669b017c4e11d3840a0b69e8dba2d6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 27 Sep 2011 08:40:24 -0400 Subject: sched: Do not compare cpu masks in scheduler Signed-off-by: Peter Zijlstra Cc: Peter Zijlstra Cc: Clark Williams Link: http://lkml.kernel.org/r/20110927124423.128129033@goodmis.org Signed-off-by: Thomas Gleixner diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fdf5edd..7dde497 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2392,16 +2392,12 @@ static inline void update_migrate_disable(struct task_struct *p) */ mask = tsk_cpus_allowed(p); - WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); + if (p->sched_class->set_cpus_allowed) + p->sched_class->set_cpus_allowed(p, mask); + p->nr_cpus_allowed = cpumask_weight(mask); - if (!cpumask_equal(&p->cpus_allowed, mask)) { - if (p->sched_class->set_cpus_allowed) - p->sched_class->set_cpus_allowed(p, mask); - p->nr_cpus_allowed = cpumask_weight(mask); - - /* Let migrate_enable know to fix things back up */ - p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN; - } + /* Let migrate_enable know to fix things back up */ + p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN; } void migrate_disable(void) -- cgit v0.10.2 From 605cf87d062e92e121b4d7b5a70c388ec806c05c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 27 Sep 2011 08:40:25 -0400 Subject: sched: Have migrate_disable ignore bounded threads Signed-off-by: Peter Zijlstra Cc: Peter Zijlstra Cc: Clark Williams Link: http://lkml.kernel.org/r/20110927124423.567944215@goodmis.org Signed-off-by: Thomas Gleixner diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7dde497..1105e72 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2404,7 +2404,7 @@ void migrate_disable(void) { struct task_struct *p = current; - if (in_atomic()) { + if (in_atomic() || p->flags & PF_NO_SETAFFINITY) { #ifdef CONFIG_SCHED_DEBUG p->migrate_disable_atomic++; #endif @@ -2434,7 +2434,7 @@ void migrate_enable(void) unsigned long flags; struct rq *rq; - if (in_atomic()) { + if (in_atomic() || p->flags & PF_NO_SETAFFINITY) { #ifdef CONFIG_SCHED_DEBUG p->migrate_disable_atomic--; #endif @@ -2454,26 +2454,21 @@ void migrate_enable(void) preempt_disable(); if (unlikely(migrate_disabled_updated(p))) { /* - * See comment in update_migrate_disable() about locking. + * Undo whatever update_migrate_disable() did, also see there + * about locking. */ rq = this_rq(); raw_spin_lock_irqsave(&rq->lock, flags); - mask = tsk_cpus_allowed(p); + /* * Clearing migrate_disable causes tsk_cpus_allowed to * show the tasks original cpu affinity. */ p->migrate_disable = 0; - - WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); - - if (unlikely(!cpumask_equal(&p->cpus_allowed, mask))) { - /* Get the mask now that migration is enabled */ - mask = tsk_cpus_allowed(p); - if (p->sched_class->set_cpus_allowed) - p->sched_class->set_cpus_allowed(p, mask); - p->nr_cpus_allowed = cpumask_weight(mask); - } + mask = tsk_cpus_allowed(p); + if (p->sched_class->set_cpus_allowed) + p->sched_class->set_cpus_allowed(p, mask); + p->nr_cpus_allowed = cpumask_weight(mask); raw_spin_unlock_irqrestore(&rq->lock, flags); } else p->migrate_disable = 0; -- cgit v0.10.2 From bbd2f8de86b29aa5e1ed2f0ab4e447cd22acb1e1 Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Mon, 24 Mar 2014 13:18:48 +0100 Subject: sched: dont calculate hweight in update_migrate_disable() Proposal for a minor optimization in update_migrate_disable - its only a few instructions saved but those are in the hot path of locks so it might be worth it When being scheduled out while migrate_disable > 0 and migrate_disabled_updated is not yet set we end up here (kernel/sched/core.c): static inline void update_migrate_disable(struct task_struct *p) { ... mask = tsk_cpus_allowed(p); if (p->sched_class->set_cpus_allowed) p->sched_class->set_cpus_allowed(p, mask); p->nr_cpus_allowed = cpumask_weight(mask); as we only can get here if migrate_disable > 0 there is no need to calculate the cpumask_weight(mask) as tsk_cpus_allowed in that case will return cpumask_of(task_cpu(p)) which only can have a hamming weight of 1 anyway. So we can simply do: p->nr_cpus_allowed = 1; without changing the behavior. Reviewed-by: Steven Rostedt Signed-off-by: Nicholas Mc Guire Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1105e72..e98f555 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2394,7 +2394,8 @@ static inline void update_migrate_disable(struct task_struct *p) if (p->sched_class->set_cpus_allowed) p->sched_class->set_cpus_allowed(p, mask); - p->nr_cpus_allowed = cpumask_weight(mask); + /* mask==cpumask_of(task_cpu(p)) which has a cpumask_weight==1 */ + p->nr_cpus_allowed = 1; /* Let migrate_enable know to fix things back up */ p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN; -- cgit v0.10.2 From ccd49065c0b73289fcc726c9e27fd22d7e02c4b5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 4 Nov 2011 20:48:36 +0100 Subject: sched-clear-pf-thread-bound-on-fallback-rq.patch Signed-off-by: Thomas Gleixner diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e98f555..c21721e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1247,6 +1247,12 @@ out: } } + /* + * Clear PF_NO_SETAFFINITY, otherwise we wreckage + * migrate_disable/enable. See optimization for + * PF_NO_SETAFFINITY tasks there. + */ + p->flags &= ~PF_NO_SETAFFINITY; return dest_cpu; } -- cgit v0.10.2 From 086eb6f6bfee117803e858278dec495424bcefc1 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 16:29:27 +0200 Subject: net-netif_rx_ni-migrate-disable.patch Signed-off-by: Thomas Gleixner diff --git a/net/core/dev.c b/net/core/dev.c index f9a892b..41ae38b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3267,11 +3267,11 @@ int netif_rx_ni(struct sk_buff *skb) { int err; - preempt_disable(); + migrate_disable(); err = netif_rx(skb); if (local_softirq_pending()) thread_do_softirq(); - preempt_enable(); + migrate_enable(); return err; } -- cgit v0.10.2 From 0be6e65c2f57d62663e4df7a516686aa421f5f89 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 3 Jul 2009 13:16:38 -0500 Subject: softirq: Sanitize softirq pending for NOHZ/RT Signed-off-by: Thomas Gleixner diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 9fadfa2..674d90e 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -387,6 +387,8 @@ extern void __raise_softirq_irqoff(unsigned int nr); extern void raise_softirq_irqoff(unsigned int nr); extern void raise_softirq(unsigned int nr); +extern void softirq_check_pending_idle(void); + /* This is the worklist that queues up per-cpu softirq work. * * send_remote_sendirq() adds work to these lists, and diff --git a/kernel/softirq.c b/kernel/softirq.c index c647956..a1c156a 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -62,6 +62,69 @@ char *softirq_to_name[NR_SOFTIRQS] = { "TASKLET", "SCHED", "HRTIMER", "RCU" }; +#ifdef CONFIG_NO_HZ_COMMON +# ifdef CONFIG_PREEMPT_RT_FULL +/* + * On preempt-rt a softirq might be blocked on a lock. There might be + * no other runnable task on this CPU because the lock owner runs on + * some other CPU. So we have to go into idle with the pending bit + * set. Therefor we need to check this otherwise we warn about false + * positives which confuses users and defeats the whole purpose of + * this test. + * + * This code is called with interrupts disabled. + */ +void softirq_check_pending_idle(void) +{ + static int rate_limit; + u32 warnpending = 0, pending; + + if (rate_limit >= 10) + return; + + pending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; + if (pending) { + struct task_struct *tsk; + + tsk = __get_cpu_var(ksoftirqd); + /* + * The wakeup code in rtmutex.c wakes up the task + * _before_ it sets pi_blocked_on to NULL under + * tsk->pi_lock. So we need to check for both: state + * and pi_blocked_on. + */ + raw_spin_lock(&tsk->pi_lock); + + if (!tsk->pi_blocked_on && !(tsk->state == TASK_RUNNING)) + warnpending = 1; + + raw_spin_unlock(&tsk->pi_lock); + } + + if (warnpending) { + printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", + pending); + rate_limit++; + } +} +# else +/* + * On !PREEMPT_RT we just printk rate limited: + */ +void softirq_check_pending_idle(void) +{ + static int rate_limit; + + if (rate_limit < 10 && + (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { + printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", + local_softirq_pending()); + rate_limit++; + } +} +# endif +#endif + /* * we cannot loop indefinitely here to avoid userspace starvation, * but we also don't want to introduce a worst case 1/HZ latency diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 6c77b2d..1f4299b 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -724,14 +724,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) return false; if (unlikely(local_softirq_pending() && cpu_online(cpu))) { - static int ratelimit; - - if (ratelimit < 10 && - (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { - pr_warn("NOHZ: local_softirq_pending %02x\n", - (unsigned int) local_softirq_pending()); - ratelimit++; - } + softirq_check_pending_idle(); return false; } -- cgit v0.10.2 From 469206117d04faef147507d77d36e99c05671f8a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 18:51:23 +0200 Subject: lockdep-rt.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index cc05eb7..0977829 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -25,8 +25,6 @@ # define trace_softirqs_enabled(p) ((p)->softirqs_enabled) # define trace_hardirq_enter() do { current->hardirq_context++; } while (0) # define trace_hardirq_exit() do { current->hardirq_context--; } while (0) -# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) -# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, #else # define trace_hardirqs_on() do { } while (0) @@ -39,9 +37,15 @@ # define trace_softirqs_enabled(p) 0 # define trace_hardirq_enter() do { } while (0) # define trace_hardirq_exit() do { } while (0) +# define INIT_TRACE_IRQFLAGS +#endif + +#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL) +# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) +# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) +#else # define lockdep_softirq_enter() do { } while (0) # define lockdep_softirq_exit() do { } while (0) -# define INIT_TRACE_IRQFLAGS #endif #if defined(CONFIG_IRQSOFF_TRACER) || \ diff --git a/kernel/lockdep.c b/kernel/lockdep.c index e16c45b..b74f7a5 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -3541,6 +3541,7 @@ static void check_flags(unsigned long flags) } } +#ifndef CONFIG_PREEMPT_RT_FULL /* * We dont accurately track softirq state in e.g. * hardirq contexts (such as on 4KSTACKS), so only @@ -3555,6 +3556,7 @@ static void check_flags(unsigned long flags) DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); } } +#endif if (!debug_locks) print_irqtrace_events(current); -- cgit v0.10.2 From 59fb3bb56b7ebeabe0e687995ea7d83e103df627 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 21:51:45 +0200 Subject: mutex-no-spin-on-rt.patch Signed-off-by: Thomas Gleixner diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks index d2b32ac..8bb92eb 100644 --- a/kernel/Kconfig.locks +++ b/kernel/Kconfig.locks @@ -222,4 +222,4 @@ endif config MUTEX_SPIN_ON_OWNER def_bool y - depends on SMP && !DEBUG_MUTEXES + depends on SMP && !DEBUG_MUTEXES && !PREEMPT_RT_FULL -- cgit v0.10.2 From 9959175020598a1c58949ac3f124e9a54b8edfc4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 28 Jun 2011 15:57:18 +0200 Subject: softirq-local-lock.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 674d90e..f9f2407 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -379,7 +379,13 @@ struct softirq_action asmlinkage void do_softirq(void); asmlinkage void __do_softirq(void); + +#ifndef CONFIG_PREEMPT_RT_FULL static inline void thread_do_softirq(void) { do_softirq(); } +#else +extern void thread_do_softirq(void); +#endif + extern void open_softirq(int nr, void (*action)(struct softirq_action *)); extern void softirq_init(void); extern void __raise_softirq_irqoff(unsigned int nr); @@ -566,6 +572,12 @@ void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) tasklet_kill(&ttimer->tasklet); } +#ifdef CONFIG_PREEMPT_RT_FULL +extern void softirq_early_init(void); +#else +static inline void softirq_early_init(void) { } +#endif + /* * Autoprobing for irqs: * diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h index 931bc61..199f278 100644 --- a/include/linux/preempt_mask.h +++ b/include/linux/preempt_mask.h @@ -58,7 +58,11 @@ #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) #define NMI_OFFSET (1UL << NMI_SHIFT) -#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) +#ifndef CONFIG_PREEMPT_RT_FULL +# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) +#else +# define SOFTIRQ_DISABLE_OFFSET (0) +#endif #ifndef PREEMPT_ACTIVE #define PREEMPT_ACTIVE_BITS 1 @@ -71,9 +75,15 @@ #endif #define hardirq_count() (preempt_count() & HARDIRQ_MASK) -#define softirq_count() (preempt_count() & SOFTIRQ_MASK) #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ | NMI_MASK)) +#ifndef CONFIG_PREEMPT_RT_FULL +# define softirq_count() (preempt_count() & SOFTIRQ_MASK) +# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) +#else +# define softirq_count() (0UL) +extern int in_serving_softirq(void); +#endif /* * Are we doing bottom half or hardware interrupt processing? @@ -84,7 +94,6 @@ #define in_irq() (hardirq_count()) #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) -#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) /* * Are we in NMI context? diff --git a/include/linux/sched.h b/include/linux/sched.h index 6714813..b6a1bfe 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1439,6 +1439,7 @@ struct task_struct { #endif #ifdef CONFIG_PREEMPT_RT_BASE struct rcu_head put_rcu; + int softirq_nestcnt; #endif }; diff --git a/init/main.c b/init/main.c index 33e96d2..69e4308 100644 --- a/init/main.c +++ b/init/main.c @@ -499,6 +499,7 @@ asmlinkage void __init start_kernel(void) * Interrupts are still disabled. Do necessary setups, then * enable them */ + softirq_early_init(); boot_cpu_init(); page_address_init(); pr_notice("%s", linux_banner); diff --git a/kernel/softirq.c b/kernel/softirq.c index a1c156a..2c329f2 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -25,6 +25,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -168,6 +169,7 @@ static void handle_pending_softirqs(u32 pending, int cpu) local_irq_disable(); } +#ifndef CONFIG_PREEMPT_RT_FULL /* * preempt_count and SOFTIRQ_OFFSET usage: * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving @@ -403,6 +405,163 @@ asmlinkage void do_softirq(void) #endif +static inline void local_bh_disable_nort(void) { local_bh_disable(); } +static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } + +#else /* !PREEMPT_RT_FULL */ + +/* + * On RT we serialize softirq execution with a cpu local lock + */ +static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock); +static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner); + +static void __do_softirq(void); + +void __init softirq_early_init(void) +{ + local_irq_lock_init(local_softirq_lock); +} + +void local_bh_disable(void) +{ + migrate_disable(); + current->softirq_nestcnt++; +} +EXPORT_SYMBOL(local_bh_disable); + +void local_bh_enable(void) +{ + if (WARN_ON(current->softirq_nestcnt == 0)) + return; + + if ((current->softirq_nestcnt == 1) && + local_softirq_pending() && + local_trylock(local_softirq_lock)) { + + local_irq_disable(); + if (local_softirq_pending()) + __do_softirq(); + local_irq_enable(); + local_unlock(local_softirq_lock); + WARN_ON(current->softirq_nestcnt != 1); + } + current->softirq_nestcnt--; + migrate_enable(); +} +EXPORT_SYMBOL(local_bh_enable); + +void local_bh_enable_ip(unsigned long ip) +{ + local_bh_enable(); +} +EXPORT_SYMBOL(local_bh_enable_ip); + +/* For tracing */ +int notrace __in_softirq(void) +{ + if (__get_cpu_var(local_softirq_lock).owner == current) + return __get_cpu_var(local_softirq_lock).nestcnt; + return 0; +} + +int in_serving_softirq(void) +{ + int res; + + preempt_disable(); + res = __get_cpu_var(local_softirq_runner) == current; + preempt_enable(); + return res; +} +EXPORT_SYMBOL(in_serving_softirq); + +/* + * Called with bh and local interrupts disabled. For full RT cpu must + * be pinned. + */ +static void __do_softirq(void) +{ + u32 pending = local_softirq_pending(); + int cpu = smp_processor_id(); + + current->softirq_nestcnt++; + + /* Reset the pending bitmask before enabling irqs */ + set_softirq_pending(0); + + __get_cpu_var(local_softirq_runner) = current; + + lockdep_softirq_enter(); + + handle_pending_softirqs(pending, cpu); + + pending = local_softirq_pending(); + if (pending) + wakeup_softirqd(); + + lockdep_softirq_exit(); + __get_cpu_var(local_softirq_runner) = NULL; + + current->softirq_nestcnt--; +} + +static int __thread_do_softirq(int cpu) +{ + /* + * Prevent the current cpu from going offline. + * pin_current_cpu() can reenable preemption and block on the + * hotplug mutex. When it returns, the current cpu is + * pinned. It might be the wrong one, but the offline check + * below catches that. + */ + pin_current_cpu(); + /* + * If called from ksoftirqd (cpu >= 0) we need to check + * whether we are on the wrong cpu due to cpu offlining. If + * called via thread_do_softirq() no action required. + */ + if (cpu >= 0 && cpu_is_offline(cpu)) { + unpin_current_cpu(); + return -1; + } + preempt_enable(); + local_lock(local_softirq_lock); + local_irq_disable(); + /* + * We cannot switch stacks on RT as we want to be able to + * schedule! + */ + if (local_softirq_pending()) + __do_softirq(); + local_unlock(local_softirq_lock); + unpin_current_cpu(); + preempt_disable(); + local_irq_enable(); + return 0; +} + +/* + * Called from netif_rx_ni(). Preemption enabled. + */ +void thread_do_softirq(void) +{ + if (!in_serving_softirq()) { + preempt_disable(); + __thread_do_softirq(-1); + preempt_enable(); + } +} + +static int ksoftirqd_do_softirq(int cpu) +{ + return __thread_do_softirq(cpu); +} + +static inline void local_bh_disable_nort(void) { } +static inline void _local_bh_enable_nort(void) { } + +#endif /* PREEMPT_RT_FULL */ /* * Enter an interrupt context. */ @@ -416,9 +575,9 @@ void irq_enter(void) * Prevent raise_softirq from needlessly waking up ksoftirqd * here, as softirq will be serviced on return from interrupt. */ - local_bh_disable(); + local_bh_disable_nort(); tick_check_idle(cpu); - _local_bh_enable(); + _local_bh_enable_nort(); } __irq_enter(); @@ -426,6 +585,7 @@ void irq_enter(void) static inline void invoke_softirq(void) { +#ifndef CONFIG_PREEMPT_RT_FULL if (!force_irqthreads) { lockdep_softirq_from_hardirq(); /* @@ -440,6 +600,9 @@ static inline void invoke_softirq(void) } else { wakeup_softirqd(); } +#else + wakeup_softirqd(); +#endif } static inline void tick_irq_exit(void) -- cgit v0.10.2 From 80b0a7953b1f13229e954b93d1b93627ff6a19ab Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 18 Jul 2011 13:59:17 +0200 Subject: softirq-disable-softirq-stacks-for-rt.patch Signed-off-by: Thomas Gleixner diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index c7cb8c2..f3ed55a 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -594,6 +594,7 @@ void irq_ctx_init(void) } } +#ifndef CONFIG_PREEMPT_RT_FULL static inline void do_softirq_onstack(void) { struct thread_info *curtp, *irqtp; @@ -626,6 +627,7 @@ void do_softirq(void) local_irq_restore(flags); } +#endif irq_hw_number_t virq_to_hw(unsigned int virq) { diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index ace3413..84cbc94 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -40,6 +40,7 @@ * We store the saved ksp_limit in the unused part * of the STACK_FRAME_OVERHEAD */ +#ifndef CONFIG_PREEMPT_RT_FULL _GLOBAL(call_do_softirq) mflr r0 stw r0,4(r1) @@ -56,6 +57,7 @@ _GLOBAL(call_do_softirq) stw r10,THREAD+KSP_LIMIT(r2) mtlr r0 blr +#endif /* * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index e59caf8..f93987e 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -29,6 +29,7 @@ .text +#ifndef CONFIG_PREEMPT_RT_FULL _GLOBAL(call_do_softirq) mflr r0 std r0,16(r1) @@ -39,6 +40,7 @@ _GLOBAL(call_do_softirq) ld r0,16(r1) mtlr r0 blr +#endif _GLOBAL(call_do_irq) mflr r0 diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 063af10..ae4b141 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -149,6 +149,7 @@ void irq_ctx_exit(int cpu) hardirq_ctx[cpu] = NULL; } +#ifndef CONFIG_PREEMPT_RT_FULL asmlinkage void do_softirq(void) { unsigned long flags; @@ -191,6 +192,7 @@ asmlinkage void do_softirq(void) local_irq_restore(flags); } +#endif #else static inline void handle_one_irq(unsigned int irq) { diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index d4840ce..d74fa7f 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -698,6 +698,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs) set_irq_regs(old_regs); } +#ifndef CONFIG_PREEMPT_RT_FULL void do_softirq(void) { unsigned long flags; @@ -723,6 +724,7 @@ void do_softirq(void) local_irq_restore(flags); } +#endif #ifdef CONFIG_HOTPLUG_CPU void fixup_irqs(void) diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 9ce2567..0db3eeb 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -1341,6 +1341,7 @@ bad_gs: jmp 2b .previous +#ifndef CONFIG_PREEMPT_RT_FULL /* Call softirq on interrupt stack. Interrupts are off. */ ENTRY(call_softirq) CFI_STARTPROC @@ -1360,6 +1361,7 @@ ENTRY(call_softirq) ret CFI_ENDPROC END(call_softirq) +#endif #ifdef CONFIG_XEN zeroentry xen_hypervisor_callback xen_do_hypervisor_callback diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 4186755..9da1bc7 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -149,6 +149,7 @@ void irq_ctx_init(int cpu) cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); } +#ifndef CONFIG_PREEMPT_RT_FULL asmlinkage void do_softirq(void) { unsigned long flags; @@ -179,6 +180,7 @@ asmlinkage void do_softirq(void) local_irq_restore(flags); } +#endif bool handle_irq(unsigned irq, struct pt_regs *regs) { diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index d04d3ec..831f247 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -88,7 +88,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) return true; } - +#ifndef CONFIG_PREEMPT_RT_FULL extern void call_softirq(void); asmlinkage void do_softirq(void) @@ -108,3 +108,4 @@ asmlinkage void do_softirq(void) } local_irq_restore(flags); } +#endif diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index f9f2407..7e533d9 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -377,10 +377,9 @@ struct softirq_action void (*action)(struct softirq_action *); }; +#ifndef CONFIG_PREEMPT_RT_FULL asmlinkage void do_softirq(void); asmlinkage void __do_softirq(void); - -#ifndef CONFIG_PREEMPT_RT_FULL static inline void thread_do_softirq(void) { do_softirq(); } #else extern void thread_do_softirq(void); -- cgit v0.10.2 From 07745acfcca104d91528f3fad08c8a6803902788 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 21 Jul 2011 21:06:43 +0200 Subject: softirq-make-fifo.patch Signed-off-by: Thomas Gleixner diff --git a/kernel/softirq.c b/kernel/softirq.c index 2c329f2..4202cf5 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -407,6 +407,8 @@ asmlinkage void do_softirq(void) static inline void local_bh_disable_nort(void) { local_bh_disable(); } static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } +static void ksoftirqd_set_sched_params(unsigned int cpu) { } +static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { } #else /* !PREEMPT_RT_FULL */ @@ -561,6 +563,20 @@ static int ksoftirqd_do_softirq(int cpu) static inline void local_bh_disable_nort(void) { } static inline void _local_bh_enable_nort(void) { } +static inline void ksoftirqd_set_sched_params(unsigned int cpu) +{ + struct sched_param param = { .sched_priority = 1 }; + + sched_setscheduler(current, SCHED_FIFO, ¶m); +} + +static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) +{ + struct sched_param param = { .sched_priority = 0 }; + + sched_setscheduler(current, SCHED_NORMAL, ¶m); +} + #endif /* PREEMPT_RT_FULL */ /* * Enter an interrupt context. @@ -1122,6 +1138,8 @@ static struct notifier_block cpu_nfb = { static struct smp_hotplug_thread softirq_threads = { .store = &ksoftirqd, + .setup = ksoftirqd_set_sched_params, + .cleanup = ksoftirqd_clr_sched_params, .thread_should_run = ksoftirqd_should_run, .thread_fn = run_ksoftirqd, .thread_comm = "ksoftirqd/%u", -- cgit v0.10.2 From 0c0cdb175d31104430c3d944c634edd63ea0e673 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 29 Nov 2011 20:18:22 -0500 Subject: tasklet: Prevent tasklets from going into infinite spin in RT When CONFIG_PREEMPT_RT_FULL is enabled, tasklets run as threads, and spinlocks turn are mutexes. But this can cause issues with tasks disabling tasklets. A tasklet runs under ksoftirqd, and if a tasklets are disabled with tasklet_disable(), the tasklet count is increased. When a tasklet runs, it checks this counter and if it is set, it adds itself back on the softirq queue and returns. The problem arises in RT because ksoftirq will see that a softirq is ready to run (the tasklet softirq just re-armed itself), and will not sleep, but instead run the softirqs again. The tasklet softirq will still see that the count is non-zero and will not execute the tasklet and requeue itself on the softirq again, which will cause ksoftirqd to run it again and again and again. It gets worse because ksoftirqd runs as a real-time thread. If it preempted the task that disabled tasklets, and that task has migration disabled, or can't run for other reasons, the tasklet softirq will never run because the count will never be zero, and ksoftirqd will go into an infinite loop. As an RT task, it this becomes a big problem. This is a hack solution to have tasklet_disable stop tasklets, and when a tasklet runs, instead of requeueing the tasklet softirqd it delays it. When tasklet_enable() is called, and tasklets are waiting, then the tasklet_enable() will kick the tasklets to continue. This prevents the lock up from ksoftirq going into an infinite loop. [ rostedt@goodmis.org: ported to 3.0-rt ] Signed-off-by: Ingo Molnar Signed-off-by: Steven Rostedt Signed-off-by: Thomas Gleixner diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 7e533d9..b218d23 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -434,8 +434,9 @@ extern void __send_remote_softirq(struct call_single_data *cp, int cpu, to be executed on some cpu at least once after this. * If the tasklet is already scheduled, but its execution is still not started, it will be executed only once. - * If this tasklet is already running on another CPU (or schedule is called - from tasklet itself), it is rescheduled for later. + * If this tasklet is already running on another CPU, it is rescheduled + for later. + * Schedule must not be called from the tasklet itself (a lockup occurs) * Tasklet is strictly serialized wrt itself, but not wrt another tasklets. If client needs some intertask synchronization, he makes it with spinlocks. @@ -460,27 +461,36 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } enum { TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ - TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ + TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */ + TASKLET_STATE_PENDING /* Tasklet is pending */ }; -#ifdef CONFIG_SMP +#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED) +#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN) +#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING) + +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) static inline int tasklet_trylock(struct tasklet_struct *t) { return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); } +static inline int tasklet_tryunlock(struct tasklet_struct *t) +{ + return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN; +} + static inline void tasklet_unlock(struct tasklet_struct *t) { smp_mb__before_clear_bit(); clear_bit(TASKLET_STATE_RUN, &(t)->state); } -static inline void tasklet_unlock_wait(struct tasklet_struct *t) -{ - while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } -} +extern void tasklet_unlock_wait(struct tasklet_struct *t); + #else #define tasklet_trylock(t) 1 +#define tasklet_tryunlock(t) 1 #define tasklet_unlock_wait(t) do { } while (0) #define tasklet_unlock(t) do { } while (0) #endif @@ -529,17 +539,8 @@ static inline void tasklet_disable(struct tasklet_struct *t) smp_mb(); } -static inline void tasklet_enable(struct tasklet_struct *t) -{ - smp_mb__before_atomic_dec(); - atomic_dec(&t->count); -} - -static inline void tasklet_hi_enable(struct tasklet_struct *t) -{ - smp_mb__before_atomic_dec(); - atomic_dec(&t->count); -} +extern void tasklet_enable(struct tasklet_struct *t); +extern void tasklet_hi_enable(struct tasklet_struct *t); extern void tasklet_kill(struct tasklet_struct *t); extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); diff --git a/kernel/softirq.c b/kernel/softirq.c index 4202cf5..79d370b 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -707,15 +708,45 @@ struct tasklet_head static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); +static void inline +__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr) +{ + if (tasklet_trylock(t)) { +again: + /* We may have been preempted before tasklet_trylock + * and __tasklet_action may have already run. + * So double check the sched bit while the takslet + * is locked before adding it to the list. + */ + if (test_bit(TASKLET_STATE_SCHED, &t->state)) { + t->next = NULL; + *head->tail = t; + head->tail = &(t->next); + raise_softirq_irqoff(nr); + tasklet_unlock(t); + } else { + /* This is subtle. If we hit the corner case above + * It is possible that we get preempted right here, + * and another task has successfully called + * tasklet_schedule(), then this function, and + * failed on the trylock. Thus we must be sure + * before releasing the tasklet lock, that the + * SCHED_BIT is clear. Otherwise the tasklet + * may get its SCHED_BIT set, but not added to the + * list + */ + if (!tasklet_tryunlock(t)) + goto again; + } + } +} + void __tasklet_schedule(struct tasklet_struct *t) { unsigned long flags; local_irq_save(flags); - t->next = NULL; - *__this_cpu_read(tasklet_vec.tail) = t; - __this_cpu_write(tasklet_vec.tail, &(t->next)); - raise_softirq_irqoff(TASKLET_SOFTIRQ); + __tasklet_common_schedule(t, &__get_cpu_var(tasklet_vec), TASKLET_SOFTIRQ); local_irq_restore(flags); } @@ -726,10 +757,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) unsigned long flags; local_irq_save(flags); - t->next = NULL; - *__this_cpu_read(tasklet_hi_vec.tail) = t; - __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); - raise_softirq_irqoff(HI_SOFTIRQ); + __tasklet_common_schedule(t, &__get_cpu_var(tasklet_hi_vec), HI_SOFTIRQ); local_irq_restore(flags); } @@ -737,50 +765,119 @@ EXPORT_SYMBOL(__tasklet_hi_schedule); void __tasklet_hi_schedule_first(struct tasklet_struct *t) { - BUG_ON(!irqs_disabled()); - - t->next = __this_cpu_read(tasklet_hi_vec.head); - __this_cpu_write(tasklet_hi_vec.head, t); - __raise_softirq_irqoff(HI_SOFTIRQ); + __tasklet_hi_schedule(t); } EXPORT_SYMBOL(__tasklet_hi_schedule_first); -static void tasklet_action(struct softirq_action *a) +void tasklet_enable(struct tasklet_struct *t) { - struct tasklet_struct *list; + if (!atomic_dec_and_test(&t->count)) + return; + if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state)) + tasklet_schedule(t); +} - local_irq_disable(); - list = __this_cpu_read(tasklet_vec.head); - __this_cpu_write(tasklet_vec.head, NULL); - __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head); - local_irq_enable(); +EXPORT_SYMBOL(tasklet_enable); + +void tasklet_hi_enable(struct tasklet_struct *t) +{ + if (!atomic_dec_and_test(&t->count)) + return; + if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state)) + tasklet_hi_schedule(t); +} + +EXPORT_SYMBOL(tasklet_hi_enable); + +static void +__tasklet_action(struct softirq_action *a, struct tasklet_struct *list) +{ + int loops = 1000000; while (list) { struct tasklet_struct *t = list; list = list->next; - if (tasklet_trylock(t)) { - if (!atomic_read(&t->count)) { - if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) - BUG(); - t->func(t->data); - tasklet_unlock(t); - continue; - } - tasklet_unlock(t); + /* + * Should always succeed - after a tasklist got on the + * list (after getting the SCHED bit set from 0 to 1), + * nothing but the tasklet softirq it got queued to can + * lock it: + */ + if (!tasklet_trylock(t)) { + WARN_ON(1); + continue; } - local_irq_disable(); t->next = NULL; - *__this_cpu_read(tasklet_vec.tail) = t; - __this_cpu_write(tasklet_vec.tail, &(t->next)); - __raise_softirq_irqoff(TASKLET_SOFTIRQ); - local_irq_enable(); + + /* + * If we cannot handle the tasklet because it's disabled, + * mark it as pending. tasklet_enable() will later + * re-schedule the tasklet. + */ + if (unlikely(atomic_read(&t->count))) { +out_disabled: + /* implicit unlock: */ + wmb(); + t->state = TASKLET_STATEF_PENDING; + continue; + } + + /* + * After this point on the tasklet might be rescheduled + * on another CPU, but it can only be added to another + * CPU's tasklet list if we unlock the tasklet (which we + * dont do yet). + */ + if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) + WARN_ON(1); + +again: + t->func(t->data); + + /* + * Try to unlock the tasklet. We must use cmpxchg, because + * another CPU might have scheduled or disabled the tasklet. + * We only allow the STATE_RUN -> 0 transition here. + */ + while (!tasklet_tryunlock(t)) { + /* + * If it got disabled meanwhile, bail out: + */ + if (atomic_read(&t->count)) + goto out_disabled; + /* + * If it got scheduled meanwhile, re-execute + * the tasklet function: + */ + if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) + goto again; + if (!--loops) { + printk("hm, tasklet state: %08lx\n", t->state); + WARN_ON(1); + tasklet_unlock(t); + break; + } + } } } +static void tasklet_action(struct softirq_action *a) +{ + struct tasklet_struct *list; + + local_irq_disable(); + list = __get_cpu_var(tasklet_vec).head; + __get_cpu_var(tasklet_vec).head = NULL; + __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head; + local_irq_enable(); + + __tasklet_action(a, list); +} + static void tasklet_hi_action(struct softirq_action *a) { struct tasklet_struct *list; @@ -791,29 +888,7 @@ static void tasklet_hi_action(struct softirq_action *a) __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head); local_irq_enable(); - while (list) { - struct tasklet_struct *t = list; - - list = list->next; - - if (tasklet_trylock(t)) { - if (!atomic_read(&t->count)) { - if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) - BUG(); - t->func(t->data); - tasklet_unlock(t); - continue; - } - tasklet_unlock(t); - } - - local_irq_disable(); - t->next = NULL; - *__this_cpu_read(tasklet_hi_vec.tail) = t; - __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); - __raise_softirq_irqoff(HI_SOFTIRQ); - local_irq_enable(); - } + __tasklet_action(a, list); } @@ -836,7 +911,7 @@ void tasklet_kill(struct tasklet_struct *t) while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { do { - yield(); + msleep(1); } while (test_bit(TASKLET_STATE_SCHED, &t->state)); } tasklet_unlock_wait(t); @@ -1040,6 +1115,23 @@ void __init softirq_init(void) open_softirq(HI_SOFTIRQ, tasklet_hi_action); } +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) +void tasklet_unlock_wait(struct tasklet_struct *t) +{ + while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { + /* + * Hack for now to avoid this busy-loop: + */ +#ifdef CONFIG_PREEMPT_RT_FULL + msleep(1); +#else + barrier(); +#endif + } +} +EXPORT_SYMBOL(tasklet_unlock_wait); +#endif + static int ksoftirqd_should_run(unsigned int cpu) { return local_softirq_pending(); -- cgit v0.10.2 From 07ffc758f648ade41f1ca85bcb7708d9c91bce53 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 31 Jan 2012 13:01:27 +0100 Subject: genirq: Allow disabling of softirq processing in irq thread context The processing of softirqs in irq thread context is a performance gain for the non-rt workloads of a system, but it's counterproductive for interrupts which are explicitely related to the realtime workload. Allow such interrupts to prevent softirq processing in their thread context. Signed-off-by: Thomas Gleixner Cc: stable-rt@vger.kernel.org diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index b218d23..a2609fb 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -58,6 +58,7 @@ * IRQF_NO_THREAD - Interrupt cannot be threaded * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device * resume time. + * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT) */ #define IRQF_DISABLED 0x00000020 #define IRQF_SHARED 0x00000080 @@ -71,6 +72,7 @@ #define IRQF_FORCE_RESUME 0x00008000 #define IRQF_NO_THREAD 0x00010000 #define IRQF_EARLY_RESUME 0x00020000 +#define IRQF_NO_SOFTIRQ_CALL 0x00040000 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) diff --git a/include/linux/irq.h b/include/linux/irq.h index 56bb0dc..e2d8789 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -70,6 +70,7 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data); * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context * IRQ_NESTED_TRHEAD - Interrupt nests into another thread * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable + * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT) */ enum { IRQ_TYPE_NONE = 0x00000000, @@ -94,12 +95,14 @@ enum { IRQ_NESTED_THREAD = (1 << 15), IRQ_NOTHREAD = (1 << 16), IRQ_PER_CPU_DEVID = (1 << 17), + IRQ_NO_SOFTIRQ_CALL = (1 << 18), }; #define IRQF_MODIFY_MASK \ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ - IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID) + IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ + IRQ_NO_SOFTIRQ_CALL) #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index a8352ae..b1b37ea 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -856,7 +856,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) local_bh_disable(); ret = action->thread_fn(action->irq, action->dev_id); irq_finalize_oneshot(desc, action); - local_bh_enable(); + /* + * Interrupts which have real time requirements can be set up + * to avoid softirq processing in the thread handler. This is + * safe as these interrupts do not raise soft interrupts. + */ + if (irq_settings_no_softirq_call(desc)) + _local_bh_enable(); + else + local_bh_enable(); return ret; } @@ -1201,6 +1209,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) irqd_set(&desc->irq_data, IRQD_NO_BALANCING); } + if (new->flags & IRQF_NO_SOFTIRQ_CALL) + irq_settings_set_no_softirq_call(desc); + /* Set default affinity mask once everything is setup */ setup_affinity(irq, desc, mask); diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h index 1162f10..0d2c381 100644 --- a/kernel/irq/settings.h +++ b/kernel/irq/settings.h @@ -14,6 +14,7 @@ enum { _IRQ_NO_BALANCING = IRQ_NO_BALANCING, _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, + _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL, _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, }; @@ -26,6 +27,7 @@ enum { #define IRQ_NOAUTOEN GOT_YOU_MORON #define IRQ_NESTED_THREAD GOT_YOU_MORON #define IRQ_PER_CPU_DEVID GOT_YOU_MORON +#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON #undef IRQF_MODIFY_MASK #define IRQF_MODIFY_MASK GOT_YOU_MORON @@ -36,6 +38,16 @@ irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set) desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK); } +static inline bool irq_settings_no_softirq_call(struct irq_desc *desc) +{ + return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL; +} + +static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc) +{ + desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL; +} + static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) { return desc->status_use_accessors & _IRQ_PER_CPU; diff --git a/kernel/softirq.c b/kernel/softirq.c index 79d370b..37482da 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -460,6 +460,13 @@ void local_bh_enable_ip(unsigned long ip) } EXPORT_SYMBOL(local_bh_enable_ip); +void _local_bh_enable(void) +{ + current->softirq_nestcnt--; + migrate_enable(); +} +EXPORT_SYMBOL(_local_bh_enable); + /* For tracing */ int notrace __in_softirq(void) { -- cgit v0.10.2 From c3429f72c9ea824c08763c389bacf245f1c74452 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 28 Jun 2011 20:42:16 +0200 Subject: local-vars-migrate-disable.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/percpu.h b/include/linux/percpu.h index d273604..f05adf5 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -48,10 +48,30 @@ preempt_enable(); \ } while (0) -#define get_local_var(var) get_cpu_var(var) -#define put_local_var(var) put_cpu_var(var) -#define get_local_ptr(var) get_cpu_ptr(var) -#define put_local_ptr(var) put_cpu_ptr(var) +#ifndef CONFIG_PREEMPT_RT_FULL +# define get_local_var(var) get_cpu_var(var) +# define put_local_var(var) put_cpu_var(var) +# define get_local_ptr(var) get_cpu_ptr(var) +# define put_local_ptr(var) put_cpu_ptr(var) +#else +# define get_local_var(var) (*({ \ + migrate_disable(); \ + &__get_cpu_var(var); })) + +# define put_local_var(var) do { \ + (void)&(var); \ + migrate_enable(); \ +} while (0) + +# define get_local_ptr(var) ({ \ + migrate_disable(); \ + this_cpu_ptr(var); }) + +# define put_local_ptr(var) do { \ + (void)(var); \ + migrate_enable(); \ +} while (0) +#endif /* minimum unit size, also is the maximum supported allocation size */ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) -- cgit v0.10.2 From a73bde4dcfa135b3611f33157755f7acc5fa7ddf Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 6 Apr 2010 16:51:31 +0200 Subject: md: raid5: Make raid5_percpu handling RT aware __raid_run_ops() disables preemption with get_cpu() around the access to the raid5_percpu variables. That causes scheduling while atomic spews on RT. Serialize the access to the percpu data with a lock and keep the code preemptible. Reported-by: Udo van den Heuvel Signed-off-by: Thomas Gleixner Tested-by: Udo van den Heuvel diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 3ecfb06..ab00c1e 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1541,8 +1541,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) struct raid5_percpu *percpu; unsigned long cpu; - cpu = get_cpu(); + cpu = get_cpu_light(); percpu = per_cpu_ptr(conf->percpu, cpu); + spin_lock(&percpu->lock); if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { ops_run_biofill(sh); overlap_clear++; @@ -1594,7 +1595,8 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) if (test_and_clear_bit(R5_Overlap, &dev->flags)) wake_up(&sh->raid_conf->wait_for_overlap); } - put_cpu(); + spin_unlock(&percpu->lock); + put_cpu_light(); } static int grow_one_stripe(struct r5conf *conf) @@ -5456,6 +5458,7 @@ static int raid5_alloc_percpu(struct r5conf *conf) __func__, cpu); break; } + spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock); } put_online_cpus(); diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 2113ffa..0a5e1e1 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -444,6 +444,7 @@ struct r5conf { int recovery_disabled; /* per cpu variables */ struct raid5_percpu { + spinlock_t lock; /* Protection for -RT */ struct page *spare_page; /* Used when checking P/Q in raid6 */ void *scribble; /* space for constructing buffer * lists and performing address -- cgit v0.10.2 From 0a13c973854b639f936c9a34a8ef5f0df5e75c79 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 10 Jun 2011 11:04:15 +0200 Subject: rtmutex-futex-prepare-rt.patch Signed-off-by: Thomas Gleixner diff --git a/kernel/futex.c b/kernel/futex.c index 2317548..919b7f3 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1447,6 +1447,16 @@ retry_private: requeue_pi_wake_futex(this, &key2, hb2); drop_count++; continue; + } else if (ret == -EAGAIN) { + /* + * Waiter was woken by timeout or + * signal and has set pi_blocked_on to + * PI_WAKEUP_INPROGRESS before we + * tried to enqueue it on the rtmutex. + */ + this->pi_state = NULL; + free_pi_state(pi_state); + continue; } else if (ret) { /* -EDEADLK */ this->pi_state = NULL; @@ -2290,7 +2300,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, struct hrtimer_sleeper timeout, *to = NULL; struct rt_mutex_waiter rt_waiter; struct rt_mutex *pi_mutex = NULL; - struct futex_hash_bucket *hb; + struct futex_hash_bucket *hb, *hb2; union futex_key key2 = FUTEX_KEY_INIT; struct futex_q q = futex_q_init; int res, ret; @@ -2337,20 +2347,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, /* Queue the futex_q, drop the hb lock, wait for wakeup. */ futex_wait_queue_me(hb, &q, to); - spin_lock(&hb->lock); - ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); - spin_unlock(&hb->lock); - if (ret) - goto out_put_keys; + /* + * On RT we must avoid races with requeue and trying to block + * on two mutexes (hb->lock and uaddr2's rtmutex) by + * serializing access to pi_blocked_on with pi_lock. + */ + raw_spin_lock_irq(¤t->pi_lock); + if (current->pi_blocked_on) { + /* + * We have been requeued or are in the process of + * being requeued. + */ + raw_spin_unlock_irq(¤t->pi_lock); + } else { + /* + * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS + * prevents a concurrent requeue from moving us to the + * uaddr2 rtmutex. After that we can safely acquire + * (and possibly block on) hb->lock. + */ + current->pi_blocked_on = PI_WAKEUP_INPROGRESS; + raw_spin_unlock_irq(¤t->pi_lock); + + spin_lock(&hb->lock); + + /* + * Clean up pi_blocked_on. We might leak it otherwise + * when we succeeded with the hb->lock in the fast + * path. + */ + raw_spin_lock_irq(¤t->pi_lock); + current->pi_blocked_on = NULL; + raw_spin_unlock_irq(¤t->pi_lock); + + ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); + spin_unlock(&hb->lock); + if (ret) + goto out_put_keys; + } /* - * In order for us to be here, we know our q.key == key2, and since - * we took the hb->lock above, we also know that futex_requeue() has - * completed and we no longer have to concern ourselves with a wakeup - * race with the atomic proxy lock acquisition by the requeue code. The - * futex_requeue dropped our key1 reference and incremented our key2 - * reference count. + * In order to be here, we have either been requeued, are in + * the process of being requeued, or requeue successfully + * acquired uaddr2 on our behalf. If pi_blocked_on was + * non-null above, we may be racing with a requeue. Do not + * rely on q->lock_ptr to be hb2->lock until after blocking on + * hb->lock or hb2->lock. The futex_requeue dropped our key1 + * reference and incremented our key2 reference count. */ + hb2 = hash_futex(&key2); /* Check if the requeue code acquired the second futex for us. */ if (!q.rt_waiter) { @@ -2359,9 +2404,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, * did a lock-steal - fix up the PI-state in that case. */ if (q.pi_state && (q.pi_state->owner != current)) { - spin_lock(q.lock_ptr); + spin_lock(&hb2->lock); + BUG_ON(&hb2->lock != q.lock_ptr); ret = fixup_pi_state_owner(uaddr2, &q, current); - spin_unlock(q.lock_ptr); + spin_unlock(&hb2->lock); } } else { /* @@ -2374,7 +2420,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1); debug_rt_mutex_free_waiter(&rt_waiter); - spin_lock(q.lock_ptr); + spin_lock(&hb2->lock); + BUG_ON(&hb2->lock != q.lock_ptr); /* * Fixup the pi_state owner and possibly acquire the lock if we * haven't already. diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 2656896..65ff892 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -68,6 +68,11 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) clear_rt_mutex_waiters(lock); } +static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) +{ + return waiter && waiter != PI_WAKEUP_INPROGRESS; +} + /* * We can speed up the acquire/release, if the architecture * supports cmpxchg and if there's no debugging state to be set up @@ -222,7 +227,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * reached or the state of the chain has changed while we * dropped the locks. */ - if (!waiter) + if (!rt_mutex_real_waiter(waiter)) goto out_unlock_pi; /* @@ -425,6 +430,23 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, int chain_walk = 0, res; raw_spin_lock_irqsave(&task->pi_lock, flags); + + /* + * In the case of futex requeue PI, this will be a proxy + * lock. The task will wake unaware that it is enqueueed on + * this lock. Avoid blocking on two locks and corrupting + * pi_blocked_on via the PI_WAKEUP_INPROGRESS + * flag. futex_wait_requeue_pi() sets this when it wakes up + * before requeue (due to a signal or timeout). Do not enqueue + * the task if PI_WAKEUP_INPROGRESS is set. + */ + if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) { + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + return -EAGAIN; + } + + BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)); + __rt_mutex_adjust_prio(task); waiter->task = task; waiter->lock = lock; @@ -449,7 +471,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, plist_add(&waiter->pi_list_entry, &owner->pi_waiters); __rt_mutex_adjust_prio(owner); - if (owner->pi_blocked_on) + if (rt_mutex_real_waiter(owner->pi_blocked_on)) chain_walk = 1; raw_spin_unlock_irqrestore(&owner->pi_lock, flags); } @@ -543,7 +565,7 @@ static void remove_waiter(struct rt_mutex *lock, } __rt_mutex_adjust_prio(owner); - if (owner->pi_blocked_on) + if (rt_mutex_real_waiter(owner->pi_blocked_on)) chain_walk = 1; raw_spin_unlock_irqrestore(&owner->pi_lock, flags); @@ -577,7 +599,8 @@ void rt_mutex_adjust_pi(struct task_struct *task) raw_spin_lock_irqsave(&task->pi_lock, flags); waiter = task->pi_blocked_on; - if (!waiter || waiter->list_entry.prio == task->prio) { + if (!rt_mutex_real_waiter(waiter) || + waiter->list_entry.prio == task->prio) { raw_spin_unlock_irqrestore(&task->pi_lock, flags); return; } diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h index 53a66c8..b43d832 100644 --- a/kernel/rtmutex_common.h +++ b/kernel/rtmutex_common.h @@ -103,6 +103,8 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) /* * PI-futex support (proxy locking functions, etc.): */ +#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1) + extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner); -- cgit v0.10.2 From 89c8df1fae8dce3c0651fcf90dcabb8b8e7a10e0 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 9 Apr 2014 19:19:23 -0500 Subject: futex: Fix bug on when a requeued RT task times out Requeue with timeout causes a bug with PREEMPT_RT_FULL. The bug comes from a timed out condition. TASK 1 TASK 2 ------ ------ futex_wait_requeue_pi() futex_wait_queue_me() double_lock_hb(); raw_spin_lock(pi_lock); if (current->pi_blocked_on) { } else { current->pi_blocked_on = PI_WAKE_INPROGRESS; run_spin_unlock(pi_lock); spin_lock(hb->lock); <-- blocked! plist_for_each_entry_safe(this) { rt_mutex_start_proxy_lock(); task_blocks_on_rt_mutex(); BUG_ON(task->pi_blocked_on)!!!! The BUG_ON() actually has a check for PI_WAKE_INPROGRESS, but the problem is that, after TASK 1 sets PI_WAKE_INPROGRESS, it then tries to grab the hb->lock, which it fails to do so. As the hb->lock is a mutex, it will block and set the "pi_blocked_on" to the hb->lock. When TASK 2 goes to requeue it, the check for PI_WAKE_INPROGESS fails because the task1's pi_blocked_on is no longer set to that, but instead, set to the hb->lock. The fix: When calling rt_mutex_start_proxy_lock() a check is made to see if the proxy tasks pi_blocked_on is set. If so, exit out early. Otherwise set it to a new flag PI_REQUEUE_INPROGRESS, which notifies the proxy task that it is being requeued, and will handle things appropriately. Cc: stable-rt@vger.kernel.org Signed-off-by: Steven Rostedt Signed-off-by: Thomas Gleixner diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 65ff892..ad0aaa1 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -70,7 +70,8 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) { - return waiter && waiter != PI_WAKEUP_INPROGRESS; + return waiter && waiter != PI_WAKEUP_INPROGRESS && + waiter != PI_REQUEUE_INPROGRESS; } /* @@ -1007,6 +1008,35 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, return 1; } +#ifdef CONFIG_PREEMPT_RT_FULL + /* + * In PREEMPT_RT there's an added race. + * If the task, that we are about to requeue, times out, + * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue + * to skip this task. But right after the task sets + * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then + * block on the spin_lock(&hb->lock), which in RT is an rtmutex. + * This will replace the PI_WAKEUP_INPROGRESS with the actual + * lock that it blocks on. We *must not* place this task + * on this proxy lock in that case. + * + * To prevent this race, we first take the task's pi_lock + * and check if it has updated its pi_blocked_on. If it has, + * we assume that it woke up and we return -EAGAIN. + * Otherwise, we set the task's pi_blocked_on to + * PI_REQUEUE_INPROGRESS, so that if the task is waking up + * it will know that we are in the process of requeuing it. + */ + raw_spin_lock_irq(&task->pi_lock); + if (task->pi_blocked_on) { + raw_spin_unlock_irq(&task->pi_lock); + raw_spin_unlock(&lock->wait_lock); + return -EAGAIN; + } + task->pi_blocked_on = PI_REQUEUE_INPROGRESS; + raw_spin_unlock_irq(&task->pi_lock); +#endif + ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); if (ret && !rt_mutex_owner(lock)) { diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h index b43d832..47290ec 100644 --- a/kernel/rtmutex_common.h +++ b/kernel/rtmutex_common.h @@ -104,6 +104,7 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) * PI-futex support (proxy locking functions, etc.): */ #define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1) +#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2) extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, -- cgit v0.10.2 From d22bb423d5eef26976bd0f6dbb50fbcfc2410ce6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 1 Mar 2013 11:17:42 +0100 Subject: futex: Ensure lock/unlock symetry versus pi_lock and hash bucket lock In exit_pi_state_list() we have the following locking construct: spin_lock(&hb->lock); raw_spin_lock_irq(&curr->pi_lock); ... spin_unlock(&hb->lock); In !RT this works, but on RT the migrate_enable() function which is called from spin_unlock() sees atomic context due to the held pi_lock and just decrements the migrate_disable_atomic counter of the task. Now the next call to migrate_disable() sees the counter being negative and issues a warning. That check should be in migrate_enable() already. Fix this by dropping pi_lock before unlocking hb->lock and reaquire pi_lock after that again. This is safe as the loop code reevaluates head again under the pi_lock. Reported-by: Yong Zhang Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/futex.c b/kernel/futex.c index 919b7f3..eb58a33 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -573,7 +573,9 @@ void exit_pi_state_list(struct task_struct *curr) * task still owns the PI-state: */ if (head->next != next) { + raw_spin_unlock_irq(&curr->pi_lock); spin_unlock(&hb->lock); + raw_spin_lock_irq(&curr->pi_lock); continue; } -- cgit v0.10.2 From 3104e8e17bb899c4bae1d9fb4688ddfba853a710 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 9 Jun 2011 11:43:52 +0200 Subject: rtmutex-lock-killable.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index de17134..3561eb2 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -90,6 +90,7 @@ extern void rt_mutex_destroy(struct rt_mutex *lock); extern void rt_mutex_lock(struct rt_mutex *lock); extern int rt_mutex_lock_interruptible(struct rt_mutex *lock, int detect_deadlock); +extern int rt_mutex_lock_killable(struct rt_mutex *lock, int detect_deadlock); extern int rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout, int detect_deadlock); diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index ad0aaa1..0d67faa 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -841,12 +841,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); /** * rt_mutex_lock_interruptible - lock a rt_mutex interruptible * - * @lock: the rt_mutex to be locked + * @lock: the rt_mutex to be locked * @detect_deadlock: deadlock detection on/off * * Returns: - * 0 on success - * -EINTR when interrupted by a signal + * 0 on success + * -EINTR when interrupted by a signal * -EDEADLK when the lock would deadlock (when deadlock detection is on) */ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, @@ -860,17 +860,38 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); /** + * rt_mutex_lock_killable - lock a rt_mutex killable + * + * @lock: the rt_mutex to be locked + * @detect_deadlock: deadlock detection on/off + * + * Returns: + * 0 on success + * -EINTR when interrupted by a signal + * -EDEADLK when the lock would deadlock (when deadlock detection is on) + */ +int __sched rt_mutex_lock_killable(struct rt_mutex *lock, + int detect_deadlock) +{ + might_sleep(); + + return rt_mutex_fastlock(lock, TASK_KILLABLE, + detect_deadlock, rt_mutex_slowlock); +} +EXPORT_SYMBOL_GPL(rt_mutex_lock_killable); + +/** * rt_mutex_timed_lock - lock a rt_mutex interruptible * the timeout structure is provided * by the caller * - * @lock: the rt_mutex to be locked + * @lock: the rt_mutex to be locked * @timeout: timeout structure or NULL (no timeout) * @detect_deadlock: deadlock detection on/off * * Returns: - * 0 on success - * -EINTR when interrupted by a signal + * 0 on success + * -EINTR when interrupted by a signal * -ETIMEDOUT when the timeout expired * -EDEADLK when the lock would deadlock (when deadlock detection is on) */ -- cgit v0.10.2 From eaefaf4919f7477a4b3f3a7290371f51772902c8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 10 Jun 2011 11:21:25 +0200 Subject: rt-mutex-add-sleeping-spinlocks-support.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index 3561eb2..d7cfa71 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -18,6 +18,10 @@ extern int max_lock_depth; /* for sysctl */ +#ifdef CONFIG_DEBUG_MUTEXES +#include +#endif + /** * The rt_mutex structure * @@ -29,9 +33,10 @@ struct rt_mutex { raw_spinlock_t wait_lock; struct plist_head wait_list; struct task_struct *owner; -#ifdef CONFIG_DEBUG_RT_MUTEXES int save_state; - const char *name, *file; +#ifdef CONFIG_DEBUG_RT_MUTEXES + const char *file; + const char *name; int line; void *magic; #endif @@ -56,19 +61,39 @@ struct hrtimer_sleeper; #ifdef CONFIG_DEBUG_RT_MUTEXES # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ , .name = #mutexname, .file = __FILE__, .line = __LINE__ -# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__) + +# define rt_mutex_init(mutex) \ + do { \ + raw_spin_lock_init(&(mutex)->wait_lock); \ + __rt_mutex_init(mutex, #mutex); \ + } while (0) + extern void rt_mutex_debug_task_free(struct task_struct *tsk); #else # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) -# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL) + +# define rt_mutex_init(mutex) \ + do { \ + raw_spin_lock_init(&(mutex)->wait_lock); \ + __rt_mutex_init(mutex, #mutex); \ + } while (0) + # define rt_mutex_debug_task_free(t) do { } while (0) #endif -#define __RT_MUTEX_INITIALIZER(mutexname) \ - { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ +#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list) \ , .owner = NULL \ - __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} + __DEBUG_RT_MUTEX_INITIALIZER(mutexname) + + +#define __RT_MUTEX_INITIALIZER(mutexname) \ + { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) } + +#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \ + { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ + , .save_state = 1 } #define DEFINE_RT_MUTEX(mutexname) \ struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) diff --git a/kernel/futex.c b/kernel/futex.c index eb58a33..3b85a95 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -2327,8 +2327,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, * The waiter is allocated on our stack, manipulated by the requeue * code while we sleep on uaddr. */ - debug_rt_mutex_init_waiter(&rt_waiter); - rt_waiter.task = NULL; + rt_mutex_init_waiter(&rt_waiter, false); ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); if (unlikely(ret != 0)) diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 0d67faa..1c4808f 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -8,6 +8,12 @@ * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt * Copyright (C) 2006 Esben Nielsen * + * Adaptive Spinlocks: + * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich, + * and Peter Morreale, + * Adaptive Spinlocks simplification: + * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt + * * See Documentation/rt-mutex-design.txt for details. */ #include @@ -97,6 +103,12 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) } #endif +static inline void init_lists(struct rt_mutex *lock) +{ + if (unlikely(!lock->wait_list.node_list.prev)) + plist_head_init(&lock->wait_list); +} + /* * Calculate task priority from the waiter list priority * @@ -155,6 +167,14 @@ static void rt_mutex_adjust_prio(struct task_struct *task) raw_spin_unlock_irqrestore(&task->pi_lock, flags); } +static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter) +{ + if (waiter->savestate) + wake_up_lock_sleeper(waiter->task); + else + wake_up_process(waiter->task); +} + /* * Max number of times we'll walk the boosting chain: */ @@ -279,13 +299,15 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, /* Release the task */ raw_spin_unlock_irqrestore(&task->pi_lock, flags); if (!rt_mutex_owner(lock)) { + struct rt_mutex_waiter *lock_top_waiter; + /* * If the requeue above changed the top waiter, then we need * to wake the new top waiter up to try to get the lock. */ - - if (top_waiter != rt_mutex_top_waiter(lock)) - wake_up_process(rt_mutex_top_waiter(lock)->task); + lock_top_waiter = rt_mutex_top_waiter(lock); + if (top_waiter != lock_top_waiter) + rt_mutex_wake_waiter(lock_top_waiter); raw_spin_unlock(&lock->wait_lock); goto out_put_task; } @@ -330,6 +352,25 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, return ret; } + +#define STEAL_NORMAL 0 +#define STEAL_LATERAL 1 + +/* + * Note that RT tasks are excluded from lateral-steals to prevent the + * introduction of an unbounded latency + */ +static inline int lock_is_stealable(struct task_struct *task, + struct task_struct *pendowner, int mode) +{ + if (mode == STEAL_NORMAL || rt_task(task)) { + if (task->prio >= pendowner->prio) + return 0; + } else if (task->prio > pendowner->prio) + return 0; + return 1; +} + /* * Try to take an rt-mutex * @@ -339,8 +380,9 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * @task: the task which wants to acquire the lock * @waiter: the waiter that is queued to the lock's wait list. (could be NULL) */ -static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, - struct rt_mutex_waiter *waiter) +static int +__try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, + struct rt_mutex_waiter *waiter, int mode) { /* * We have to be careful here if the atomic speedups are @@ -373,12 +415,14 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, * 3) it is top waiter */ if (rt_mutex_has_waiters(lock)) { - if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) { - if (!waiter || waiter != rt_mutex_top_waiter(lock)) - return 0; - } + struct task_struct *pown = rt_mutex_top_waiter(lock)->task; + + if (task != pown && !lock_is_stealable(task, pown, mode)) + return 0; } + /* We got the lock. */ + if (waiter || rt_mutex_has_waiters(lock)) { unsigned long flags; struct rt_mutex_waiter *top; @@ -403,7 +447,6 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, raw_spin_unlock_irqrestore(&task->pi_lock, flags); } - /* We got the lock. */ debug_rt_mutex_lock(lock); rt_mutex_set_owner(lock, task); @@ -413,6 +456,13 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, return 1; } +static inline int +try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, + struct rt_mutex_waiter *waiter) +{ + return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL); +} + /* * Task blocks on lock. * @@ -527,7 +577,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock) raw_spin_unlock_irqrestore(¤t->pi_lock, flags); - wake_up_process(waiter->task); + rt_mutex_wake_waiter(waiter); } /* @@ -606,18 +656,315 @@ void rt_mutex_adjust_pi(struct task_struct *task) return; } - raw_spin_unlock_irqrestore(&task->pi_lock, flags); - /* gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(task); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); } +#ifdef CONFIG_PREEMPT_RT_FULL +/* + * preemptible spin_lock functions: + */ +static inline void rt_spin_lock_fastlock(struct rt_mutex *lock, + void (*slowfn)(struct rt_mutex *lock)) +{ + might_sleep(); + + if (likely(rt_mutex_cmpxchg(lock, NULL, current))) + rt_mutex_deadlock_account_lock(lock, current); + else + slowfn(lock); +} + +static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock, + void (*slowfn)(struct rt_mutex *lock)) +{ + if (likely(rt_mutex_cmpxchg(lock, current, NULL))) + rt_mutex_deadlock_account_unlock(current); + else + slowfn(lock); +} + +#ifdef CONFIG_SMP +/* + * Note that owner is a speculative pointer and dereferencing relies + * on rcu_read_lock() and the check against the lock owner. + */ +static int adaptive_wait(struct rt_mutex *lock, + struct task_struct *owner) +{ + int res = 0; + + rcu_read_lock(); + for (;;) { + if (owner != rt_mutex_owner(lock)) + break; + /* + * Ensure that owner->on_cpu is dereferenced _after_ + * checking the above to be valid. + */ + barrier(); + if (!owner->on_cpu) { + res = 1; + break; + } + cpu_relax(); + } + rcu_read_unlock(); + return res; +} +#else +static int adaptive_wait(struct rt_mutex *lock, + struct task_struct *orig_owner) +{ + return 1; +} +#endif + +# define pi_lock(lock) raw_spin_lock_irq(lock) +# define pi_unlock(lock) raw_spin_unlock_irq(lock) + +/* + * Slow path lock function spin_lock style: this variant is very + * careful not to miss any non-lock wakeups. + * + * We store the current state under p->pi_lock in p->saved_state and + * the try_to_wake_up() code handles this accordingly. + */ +static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock) +{ + struct task_struct *lock_owner, *self = current; + struct rt_mutex_waiter waiter, *top_waiter; + int ret; + + rt_mutex_init_waiter(&waiter, true); + + raw_spin_lock(&lock->wait_lock); + init_lists(lock); + + if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) { + raw_spin_unlock(&lock->wait_lock); + return; + } + + BUG_ON(rt_mutex_owner(lock) == self); + + /* + * We save whatever state the task is in and we'll restore it + * after acquiring the lock taking real wakeups into account + * as well. We are serialized via pi_lock against wakeups. See + * try_to_wake_up(). + */ + pi_lock(&self->pi_lock); + self->saved_state = self->state; + __set_current_state(TASK_UNINTERRUPTIBLE); + pi_unlock(&self->pi_lock); + + ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0); + BUG_ON(ret); + + for (;;) { + /* Try to acquire the lock again. */ + if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL)) + break; + + top_waiter = rt_mutex_top_waiter(lock); + lock_owner = rt_mutex_owner(lock); + + raw_spin_unlock(&lock->wait_lock); + + debug_rt_mutex_print_deadlock(&waiter); + + if (top_waiter != &waiter || adaptive_wait(lock, lock_owner)) + schedule_rt_mutex(lock); + + raw_spin_lock(&lock->wait_lock); + + pi_lock(&self->pi_lock); + __set_current_state(TASK_UNINTERRUPTIBLE); + pi_unlock(&self->pi_lock); + } + + /* + * Restore the task state to current->saved_state. We set it + * to the original state above and the try_to_wake_up() code + * has possibly updated it when a real (non-rtmutex) wakeup + * happened while we were blocked. Clear saved_state so + * try_to_wakeup() does not get confused. + */ + pi_lock(&self->pi_lock); + __set_current_state(self->saved_state); + self->saved_state = TASK_RUNNING; + pi_unlock(&self->pi_lock); + + /* + * try_to_take_rt_mutex() sets the waiter bit + * unconditionally. We might have to fix that up: + */ + fixup_rt_mutex_waiters(lock); + + BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock)); + BUG_ON(!plist_node_empty(&waiter.list_entry)); + + raw_spin_unlock(&lock->wait_lock); + + debug_rt_mutex_free_waiter(&waiter); +} + +/* + * Slow path to release a rt_mutex spin_lock style + */ +static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) +{ + raw_spin_lock(&lock->wait_lock); + + debug_rt_mutex_unlock(lock); + + rt_mutex_deadlock_account_unlock(current); + + if (!rt_mutex_has_waiters(lock)) { + lock->owner = NULL; + raw_spin_unlock(&lock->wait_lock); + return; + } + + wakeup_next_waiter(lock); + + raw_spin_unlock(&lock->wait_lock); + + /* Undo pi boosting.when necessary */ + rt_mutex_adjust_prio(current); +} + +void __lockfunc rt_spin_lock(spinlock_t *lock) +{ + rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); +} +EXPORT_SYMBOL(rt_spin_lock); + +void __lockfunc __rt_spin_lock(struct rt_mutex *lock) +{ + rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock); +} +EXPORT_SYMBOL(__rt_spin_lock); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass) +{ + rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); + spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); +} +EXPORT_SYMBOL(rt_spin_lock_nested); +#endif + +void __lockfunc rt_spin_unlock(spinlock_t *lock) +{ + /* NOTE: we always pass in '1' for nested, for simplicity */ + spin_release(&lock->dep_map, 1, _RET_IP_); + rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); +} +EXPORT_SYMBOL(rt_spin_unlock); + +void __lockfunc __rt_spin_unlock(struct rt_mutex *lock) +{ + rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock); +} +EXPORT_SYMBOL(__rt_spin_unlock); + +/* + * Wait for the lock to get unlocked: instead of polling for an unlock + * (like raw spinlocks do), we lock and unlock, to force the kernel to + * schedule if there's contention: + */ +void __lockfunc rt_spin_unlock_wait(spinlock_t *lock) +{ + spin_lock(lock); + spin_unlock(lock); +} +EXPORT_SYMBOL(rt_spin_unlock_wait); + +int __lockfunc rt_spin_trylock(spinlock_t *lock) +{ + int ret; + + migrate_disable(); + ret = rt_mutex_trylock(&lock->lock); + if (ret) + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + else + migrate_enable(); + + return ret; +} +EXPORT_SYMBOL(rt_spin_trylock); + +int __lockfunc rt_spin_trylock_bh(spinlock_t *lock) +{ + int ret; + + local_bh_disable(); + ret = rt_mutex_trylock(&lock->lock); + if (ret) { + migrate_disable(); + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + } else + local_bh_enable(); + return ret; +} +EXPORT_SYMBOL(rt_spin_trylock_bh); + +int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags) +{ + int ret; + + *flags = 0; + migrate_disable(); + ret = rt_mutex_trylock(&lock->lock); + if (ret) + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + else + migrate_enable(); + return ret; +} +EXPORT_SYMBOL(rt_spin_trylock_irqsave); + +int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock) +{ + /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ + if (atomic_add_unless(atomic, -1, 1)) + return 0; + migrate_disable(); + rt_spin_lock(lock); + if (atomic_dec_and_test(atomic)) + return 1; + rt_spin_unlock(lock); + migrate_enable(); + return 0; +} +EXPORT_SYMBOL(atomic_dec_and_spin_lock); + +void +__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held lock: + */ + debug_check_no_locks_freed((void *)lock, sizeof(*lock)); + lockdep_init_map(&lock->dep_map, name, key, 0); +#endif +} +EXPORT_SYMBOL(__rt_spin_lock_init); + +#endif /* PREEMPT_RT_FULL */ + /** * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop * @lock: the rt_mutex to take * @state: the state the task should block in (TASK_INTERRUPTIBLE - * or TASK_UNINTERRUPTIBLE) + * or TASK_UNINTERRUPTIBLE) * @timeout: the pre-initialized and started timer, or NULL for none * @waiter: the pre-initialized rt_mutex_waiter * @@ -673,9 +1020,10 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, struct rt_mutex_waiter waiter; int ret = 0; - debug_rt_mutex_init_waiter(&waiter); + rt_mutex_init_waiter(&waiter, false); raw_spin_lock(&lock->wait_lock); + init_lists(lock); /* Try to acquire the lock again: */ if (try_to_take_rt_mutex(lock, current, NULL)) { @@ -728,6 +1076,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock) int ret = 0; raw_spin_lock(&lock->wait_lock); + init_lists(lock); if (likely(rt_mutex_owner(lock) != current)) { @@ -960,12 +1309,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy); void __rt_mutex_init(struct rt_mutex *lock, const char *name) { lock->owner = NULL; - raw_spin_lock_init(&lock->wait_lock); plist_head_init(&lock->wait_list); debug_rt_mutex_init(lock, name); } -EXPORT_SYMBOL_GPL(__rt_mutex_init); +EXPORT_SYMBOL(__rt_mutex_init); /** * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a @@ -980,7 +1328,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init); void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner) { - __rt_mutex_init(lock, NULL); + rt_mutex_init(lock); debug_rt_mutex_proxy_lock(lock, proxy_owner); rt_mutex_set_owner(lock, proxy_owner); rt_mutex_deadlock_account_lock(lock, proxy_owner); diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h index 47290ec..6ec3dc1 100644 --- a/kernel/rtmutex_common.h +++ b/kernel/rtmutex_common.h @@ -49,6 +49,7 @@ struct rt_mutex_waiter { struct plist_node pi_list_entry; struct task_struct *task; struct rt_mutex *lock; + bool savestate; #ifdef CONFIG_DEBUG_RT_MUTEXES unsigned long ip; struct pid *deadlock_task_pid; @@ -126,4 +127,12 @@ extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, # include "rtmutex.h" #endif +static inline void +rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate) +{ + debug_rt_mutex_init_waiter(waiter); + waiter->task = NULL; + waiter->savestate = savestate; +} + #endif -- cgit v0.10.2 From bd7c52f1ea553397b1d846082e6f659f76ce7b80 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 29 Jun 2011 19:34:01 +0200 Subject: spinlock-types-separate-raw.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h index cc0072e..5317cd9 100644 --- a/include/linux/rwlock_types.h +++ b/include/linux/rwlock_types.h @@ -1,6 +1,10 @@ #ifndef __LINUX_RWLOCK_TYPES_H #define __LINUX_RWLOCK_TYPES_H +#if !defined(__LINUX_SPINLOCK_TYPES_H) +# error "Do not include directly, include spinlock_types.h" +#endif + /* * include/linux/rwlock_types.h - generic rwlock type definitions * and initializers diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 73548eb..5c8664d 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -9,79 +9,9 @@ * Released under the General Public License (GPL). */ -#if defined(CONFIG_SMP) -# include -#else -# include -#endif +#include -#include - -typedef struct raw_spinlock { - arch_spinlock_t raw_lock; -#ifdef CONFIG_GENERIC_LOCKBREAK - unsigned int break_lock; -#endif -#ifdef CONFIG_DEBUG_SPINLOCK - unsigned int magic, owner_cpu; - void *owner; -#endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -} raw_spinlock_t; - -#define SPINLOCK_MAGIC 0xdead4ead - -#define SPINLOCK_OWNER_INIT ((void *)-1L) - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } -#else -# define SPIN_DEP_MAP_INIT(lockname) -#endif - -#ifdef CONFIG_DEBUG_SPINLOCK -# define SPIN_DEBUG_INIT(lockname) \ - .magic = SPINLOCK_MAGIC, \ - .owner_cpu = -1, \ - .owner = SPINLOCK_OWNER_INIT, -#else -# define SPIN_DEBUG_INIT(lockname) -#endif - -#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ - { \ - .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ - SPIN_DEBUG_INIT(lockname) \ - SPIN_DEP_MAP_INIT(lockname) } - -#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ - (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) - -#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) - -typedef struct spinlock { - union { - struct raw_spinlock rlock; - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) - struct { - u8 __padding[LOCK_PADSIZE]; - struct lockdep_map dep_map; - }; -#endif - }; -} spinlock_t; - -#define __SPIN_LOCK_INITIALIZER(lockname) \ - { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } - -#define __SPIN_LOCK_UNLOCKED(lockname) \ - (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) - -#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) +#include #include diff --git a/include/linux/spinlock_types_nort.h b/include/linux/spinlock_types_nort.h new file mode 100644 index 0000000..f1dac1f --- /dev/null +++ b/include/linux/spinlock_types_nort.h @@ -0,0 +1,33 @@ +#ifndef __LINUX_SPINLOCK_TYPES_NORT_H +#define __LINUX_SPINLOCK_TYPES_NORT_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +#error "Do not include directly. Include spinlock_types.h instead" +#endif + +/* + * The non RT version maps spinlocks to raw_spinlocks + */ +typedef struct spinlock { + union { + struct raw_spinlock rlock; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) + struct { + u8 __padding[LOCK_PADSIZE]; + struct lockdep_map dep_map; + }; +#endif + }; +} spinlock_t; + +#define __SPIN_LOCK_INITIALIZER(lockname) \ + { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } + +#define __SPIN_LOCK_UNLOCKED(lockname) \ + (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) + +#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) + +#endif diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h new file mode 100644 index 0000000..edffc4d --- /dev/null +++ b/include/linux/spinlock_types_raw.h @@ -0,0 +1,56 @@ +#ifndef __LINUX_SPINLOCK_TYPES_RAW_H +#define __LINUX_SPINLOCK_TYPES_RAW_H + +#if defined(CONFIG_SMP) +# include +#else +# include +#endif + +#include + +typedef struct raw_spinlock { + arch_spinlock_t raw_lock; +#ifdef CONFIG_GENERIC_LOCKBREAK + unsigned int break_lock; +#endif +#ifdef CONFIG_DEBUG_SPINLOCK + unsigned int magic, owner_cpu; + void *owner; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} raw_spinlock_t; + +#define SPINLOCK_MAGIC 0xdead4ead + +#define SPINLOCK_OWNER_INIT ((void *)-1L) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +#else +# define SPIN_DEP_MAP_INIT(lockname) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK +# define SPIN_DEBUG_INIT(lockname) \ + .magic = SPINLOCK_MAGIC, \ + .owner_cpu = -1, \ + .owner = SPINLOCK_OWNER_INIT, +#else +# define SPIN_DEBUG_INIT(lockname) +#endif + +#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ + { \ + .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ + SPIN_DEBUG_INIT(lockname) \ + SPIN_DEP_MAP_INIT(lockname) } + +#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ + (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) + +#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) + +#endif -- cgit v0.10.2 From 43f1a0816535bc361df956a0966cd92b63cd8aa3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 29 Jun 2011 20:06:39 +0200 Subject: rtmutex-avoid-include-hell.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index d7cfa71..fa18682 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -14,7 +14,7 @@ #include #include -#include +#include extern int max_lock_depth; /* for sysctl */ -- cgit v0.10.2 From 1265501a1d96f1104da0e96a353b7fcd1a4e545c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 29 Jun 2011 19:43:35 +0200 Subject: rt-add-rt-spinlocks.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/rwlock_types_rt.h b/include/linux/rwlock_types_rt.h new file mode 100644 index 0000000..b138321 --- /dev/null +++ b/include/linux/rwlock_types_rt.h @@ -0,0 +1,33 @@ +#ifndef __LINUX_RWLOCK_TYPES_RT_H +#define __LINUX_RWLOCK_TYPES_RT_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +#error "Do not include directly. Include spinlock_types.h instead" +#endif + +/* + * rwlocks - rtmutex which allows single reader recursion + */ +typedef struct { + struct rt_mutex lock; + int read_depth; + unsigned int break_lock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} rwlock_t; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +#else +# define RW_DEP_MAP_INIT(lockname) +#endif + +#define __RW_LOCK_UNLOCKED(name) \ + { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \ + RW_DEP_MAP_INIT(name) } + +#define DEFINE_RWLOCK(name) \ + rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) + +#endif diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 5c8664d..10bac71 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -11,8 +11,13 @@ #include -#include - -#include +#ifndef CONFIG_PREEMPT_RT_FULL +# include +# include +#else +# include +# include +# include +#endif #endif /* __LINUX_SPINLOCK_TYPES_H */ diff --git a/include/linux/spinlock_types_rt.h b/include/linux/spinlock_types_rt.h new file mode 100644 index 0000000..9fd4319 --- /dev/null +++ b/include/linux/spinlock_types_rt.h @@ -0,0 +1,51 @@ +#ifndef __LINUX_SPINLOCK_TYPES_RT_H +#define __LINUX_SPINLOCK_TYPES_RT_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +#error "Do not include directly. Include spinlock_types.h instead" +#endif + +#include + +/* + * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field: + */ +typedef struct spinlock { + struct rt_mutex lock; + unsigned int break_lock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} spinlock_t; + +#ifdef CONFIG_DEBUG_RT_MUTEXES +# define __RT_SPIN_INITIALIZER(name) \ + { \ + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ + .save_state = 1, \ + .file = __FILE__, \ + .line = __LINE__ , \ + } +#else +# define __RT_SPIN_INITIALIZER(name) \ + { \ + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ + .save_state = 1, \ + } +#endif + +/* +.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock) +*/ + +#define __SPIN_LOCK_UNLOCKED(name) \ + { .lock = __RT_SPIN_INITIALIZER(name.lock), \ + SPIN_DEP_MAP_INIT(name) } + +#define __DEFINE_SPINLOCK(name) \ + spinlock_t name = __SPIN_LOCK_UNLOCKED(name) + +#define DEFINE_SPINLOCK(name) \ + spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name) + +#endif -- cgit v0.10.2 From 5cb7a82d082067227c7de798f1a5ee8a3c9bbf2e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 29 Jun 2011 20:56:22 +0200 Subject: rt-add-rt-to-mutex-headers.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/mutex.h b/include/linux/mutex.h index bab49da..79e172a 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -18,6 +18,17 @@ #include #include +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ + , .dep_map = { .name = #lockname } +#else +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +#endif + +#ifdef CONFIG_PREEMPT_RT_FULL +# include +#else + /* * Simple, straightforward mutexes with strict semantics: * @@ -99,13 +110,6 @@ do { \ static inline void mutex_destroy(struct mutex *lock) {} #endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ - , .dep_map = { .name = #lockname } -#else -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) -#endif - #define __MUTEX_INITIALIZER(lockname) \ { .count = ATOMIC_INIT(1) \ , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ @@ -173,6 +177,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock); extern int mutex_trylock(struct mutex *lock); extern void mutex_unlock(struct mutex *lock); +#endif /* !PREEMPT_RT_FULL */ + extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); #ifndef arch_mutex_cpu_relax diff --git a/include/linux/mutex_rt.h b/include/linux/mutex_rt.h new file mode 100644 index 0000000..c38a44b --- /dev/null +++ b/include/linux/mutex_rt.h @@ -0,0 +1,84 @@ +#ifndef __LINUX_MUTEX_RT_H +#define __LINUX_MUTEX_RT_H + +#ifndef __LINUX_MUTEX_H +#error "Please include mutex.h" +#endif + +#include + +/* FIXME: Just for __lockfunc */ +#include + +struct mutex { + struct rt_mutex lock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#define __MUTEX_INITIALIZER(mutexname) \ + { \ + .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \ + __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ + } + +#define DEFINE_MUTEX(mutexname) \ + struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) + +extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key); +extern void __lockfunc _mutex_lock(struct mutex *lock); +extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock); +extern int __lockfunc _mutex_lock_killable(struct mutex *lock); +extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass); +extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); +extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass); +extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass); +extern int __lockfunc _mutex_trylock(struct mutex *lock); +extern void __lockfunc _mutex_unlock(struct mutex *lock); + +#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock) +#define mutex_lock(l) _mutex_lock(l) +#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l) +#define mutex_lock_killable(l) _mutex_lock_killable(l) +#define mutex_trylock(l) _mutex_trylock(l) +#define mutex_unlock(l) _mutex_unlock(l) +#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s) +# define mutex_lock_interruptible_nested(l, s) \ + _mutex_lock_interruptible_nested(l, s) +# define mutex_lock_killable_nested(l, s) \ + _mutex_lock_killable_nested(l, s) + +# define mutex_lock_nest_lock(lock, nest_lock) \ +do { \ + typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ + _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ +} while (0) + +#else +# define mutex_lock_nested(l, s) _mutex_lock(l) +# define mutex_lock_interruptible_nested(l, s) \ + _mutex_lock_interruptible(l) +# define mutex_lock_killable_nested(l, s) \ + _mutex_lock_killable(l) +# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) +#endif + +# define mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + \ + rt_mutex_init(&(mutex)->lock); \ + __mutex_do_init((mutex), #mutex, &__key); \ +} while (0) + +# define __mutex_init(mutex, name, key) \ +do { \ + rt_mutex_init(&(mutex)->lock); \ + __mutex_do_init((mutex), name, key); \ +} while (0) + +#endif -- cgit v0.10.2 From a82ee54429c434ae1b54481ca5afb9f04e4840e4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 29 Jun 2011 21:02:53 +0200 Subject: rwsem-add-rt-variant.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 0616ffe..0ad6070 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -16,6 +16,10 @@ #include +#ifdef CONFIG_PREEMPT_RT_FULL +#include +#else /* PREEMPT_RT_FULL */ + struct rw_semaphore; #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK @@ -149,4 +153,6 @@ extern void up_read_non_owner(struct rw_semaphore *sem); # define up_read_non_owner(sem) up_read(sem) #endif +#endif /* !PREEMPT_RT_FULL */ + #endif /* _LINUX_RWSEM_H */ diff --git a/include/linux/rwsem_rt.h b/include/linux/rwsem_rt.h new file mode 100644 index 0000000..e94d945 --- /dev/null +++ b/include/linux/rwsem_rt.h @@ -0,0 +1,128 @@ +#ifndef _LINUX_RWSEM_RT_H +#define _LINUX_RWSEM_RT_H + +#ifndef _LINUX_RWSEM_H +#error "Include rwsem.h" +#endif + +/* + * RW-semaphores are a spinlock plus a reader-depth count. + * + * Note that the semantics are different from the usual + * Linux rw-sems, in PREEMPT_RT mode we do not allow + * multiple readers to hold the lock at once, we only allow + * a read-lock owner to read-lock recursively. This is + * better for latency, makes the implementation inherently + * fair and makes it simpler as well. + */ + +#include + +struct rw_semaphore { + struct rt_mutex lock; + int read_depth; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#define __RWSEM_INITIALIZER(name) \ + { .lock = __RT_MUTEX_INITIALIZER(name.lock), \ + RW_DEP_MAP_INIT(name) } + +#define DECLARE_RWSEM(lockname) \ + struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) + +extern void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name, + struct lock_class_key *key); + +#define __rt_init_rwsem(sem, name, key) \ + do { \ + rt_mutex_init(&(sem)->lock); \ + __rt_rwsem_init((sem), (name), (key));\ + } while (0) + +#define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key) + +# define rt_init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + __rt_init_rwsem((sem), #sem, &__key); \ +} while (0) + +extern void rt_down_write(struct rw_semaphore *rwsem); +extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass); +extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass); +extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem, + struct lockdep_map *nest); +extern void rt_down_read(struct rw_semaphore *rwsem); +extern int rt_down_write_trylock(struct rw_semaphore *rwsem); +extern int rt_down_read_trylock(struct rw_semaphore *rwsem); +extern void rt_up_read(struct rw_semaphore *rwsem); +extern void rt_up_write(struct rw_semaphore *rwsem); +extern void rt_downgrade_write(struct rw_semaphore *rwsem); + +#define init_rwsem(sem) rt_init_rwsem(sem) +#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock) + +static inline void down_read(struct rw_semaphore *sem) +{ + rt_down_read(sem); +} + +static inline int down_read_trylock(struct rw_semaphore *sem) +{ + return rt_down_read_trylock(sem); +} + +static inline void down_write(struct rw_semaphore *sem) +{ + rt_down_write(sem); +} + +static inline int down_write_trylock(struct rw_semaphore *sem) +{ + return rt_down_write_trylock(sem); +} + +static inline void up_read(struct rw_semaphore *sem) +{ + rt_up_read(sem); +} + +static inline void up_write(struct rw_semaphore *sem) +{ + rt_up_write(sem); +} + +static inline void downgrade_write(struct rw_semaphore *sem) +{ + rt_downgrade_write(sem); +} + +static inline void down_read_nested(struct rw_semaphore *sem, int subclass) +{ + return rt_down_read_nested(sem, subclass); +} + +static inline void down_write_nested(struct rw_semaphore *sem, int subclass) +{ + rt_down_write_nested(sem, subclass); +} +#ifdef CONFIG_DEBUG_LOCK_ALLOC +static inline void down_write_nest_lock(struct rw_semaphore *sem, + struct rw_semaphore *nest_lock) +{ + rt_down_write_nested_lock(sem, &nest_lock->dep_map); +} + +#else + +static inline void down_write_nest_lock(struct rw_semaphore *sem, + struct rw_semaphore *nest_lock) +{ + rt_down_write_nested_lock(sem, NULL); +} +#endif +#endif diff --git a/lib/Makefile b/lib/Makefile index 6af6fbb..4461595 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -43,8 +43,11 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o + +ifneq ($(CONFIG_PREEMPT_RT_FULL),y) lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o +endif lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o GCOV_PROFILE_hweight.o := n -- cgit v0.10.2 From 73ae55fd4fdfc9fc7c07e86fb7278db47edf11fb Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 26 Jul 2009 19:39:56 +0200 Subject: rt: Add the preempt-rt lock replacement APIs Map spinlocks, rwlocks, rw_semaphores and semaphores to the rt_mutex based locking functions for preempt-rt. Signed-off-by: Thomas Gleixner diff --git a/include/linux/locallock.h b/include/linux/locallock.h index eb338ce..21653e9 100644 --- a/include/linux/locallock.h +++ b/include/linux/locallock.h @@ -42,9 +42,15 @@ struct local_irq_lock { * already takes care of the migrate_disable/enable * for CONFIG_PREEMPT_BASE map to the normal spin_* calls. */ +#ifdef CONFIG_PREEMPT_RT_FULL +# define spin_lock_local(lock) rt_spin_lock(lock) +# define spin_trylock_local(lock) rt_spin_trylock(lock) +# define spin_unlock_local(lock) rt_spin_unlock(lock) +#else # define spin_lock_local(lock) spin_lock(lock) # define spin_trylock_local(lock) spin_trylock(lock) # define spin_unlock_local(lock) spin_unlock(lock) +#endif static inline void __local_lock(struct local_irq_lock *lv) { diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h new file mode 100644 index 0000000..853ee36 --- /dev/null +++ b/include/linux/rwlock_rt.h @@ -0,0 +1,123 @@ +#ifndef __LINUX_RWLOCK_RT_H +#define __LINUX_RWLOCK_RT_H + +#ifndef __LINUX_SPINLOCK_H +#error Do not include directly. Use spinlock.h +#endif + +#define rwlock_init(rwl) \ +do { \ + static struct lock_class_key __key; \ + \ + rt_mutex_init(&(rwl)->lock); \ + __rt_rwlock_init(rwl, #rwl, &__key); \ +} while (0) + +extern void __lockfunc rt_write_lock(rwlock_t *rwlock); +extern void __lockfunc rt_read_lock(rwlock_t *rwlock); +extern int __lockfunc rt_write_trylock(rwlock_t *rwlock); +extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags); +extern int __lockfunc rt_read_trylock(rwlock_t *rwlock); +extern void __lockfunc rt_write_unlock(rwlock_t *rwlock); +extern void __lockfunc rt_read_unlock(rwlock_t *rwlock); +extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock); +extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock); +extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key); + +#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock)) +#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock)) + +#define write_trylock_irqsave(lock, flags) \ + __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags)) + +#define read_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + migrate_disable(); \ + flags = rt_read_lock_irqsave(lock); \ + } while (0) + +#define write_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + migrate_disable(); \ + flags = rt_write_lock_irqsave(lock); \ + } while (0) + +#define read_lock(lock) \ + do { \ + migrate_disable(); \ + rt_read_lock(lock); \ + } while (0) + +#define read_lock_bh(lock) \ + do { \ + local_bh_disable(); \ + migrate_disable(); \ + rt_read_lock(lock); \ + } while (0) + +#define read_lock_irq(lock) read_lock(lock) + +#define write_lock(lock) \ + do { \ + migrate_disable(); \ + rt_write_lock(lock); \ + } while (0) + +#define write_lock_bh(lock) \ + do { \ + local_bh_disable(); \ + migrate_disable(); \ + rt_write_lock(lock); \ + } while (0) + +#define write_lock_irq(lock) write_lock(lock) + +#define read_unlock(lock) \ + do { \ + rt_read_unlock(lock); \ + migrate_enable(); \ + } while (0) + +#define read_unlock_bh(lock) \ + do { \ + rt_read_unlock(lock); \ + migrate_enable(); \ + local_bh_enable(); \ + } while (0) + +#define read_unlock_irq(lock) read_unlock(lock) + +#define write_unlock(lock) \ + do { \ + rt_write_unlock(lock); \ + migrate_enable(); \ + } while (0) + +#define write_unlock_bh(lock) \ + do { \ + rt_write_unlock(lock); \ + migrate_enable(); \ + local_bh_enable(); \ + } while (0) + +#define write_unlock_irq(lock) write_unlock(lock) + +#define read_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + (void) flags; \ + rt_read_unlock(lock); \ + migrate_enable(); \ + } while (0) + +#define write_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + (void) flags; \ + rt_write_unlock(lock); \ + migrate_enable(); \ + } while (0) + +#endif diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 75f3494..a124f92 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -262,7 +262,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) /* Include rwlock functions */ -#include +#ifdef CONFIG_PREEMPT_RT_FULL +# include +#else +# include +#endif /* * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: @@ -273,6 +277,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) # include #endif +#ifdef CONFIG_PREEMPT_RT_FULL +# include +#else /* PREEMPT_RT_FULL */ + /* * Map the spin_lock functions to the raw variants for PREEMPT_RT=n */ @@ -402,4 +410,6 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); #define atomic_dec_and_lock(atomic, lock) \ __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) +#endif /* !PREEMPT_RT_FULL */ + #endif /* __LINUX_SPINLOCK_H */ diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index bdb9993..1356078 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -191,6 +191,8 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) return 0; } -#include +#ifndef CONFIG_PREEMPT_RT_FULL +# include +#endif #endif /* __LINUX_SPINLOCK_API_SMP_H */ diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h new file mode 100644 index 0000000..621d857 --- /dev/null +++ b/include/linux/spinlock_rt.h @@ -0,0 +1,155 @@ +#ifndef __LINUX_SPINLOCK_RT_H +#define __LINUX_SPINLOCK_RT_H + +#ifndef __LINUX_SPINLOCK_H +#error Do not include directly. Use spinlock.h +#endif + +#include + +extern void +__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key); + +#define spin_lock_init(slock) \ +do { \ + static struct lock_class_key __key; \ + \ + rt_mutex_init(&(slock)->lock); \ + __rt_spin_lock_init(slock, #slock, &__key); \ +} while (0) + +extern void __lockfunc rt_spin_lock(spinlock_t *lock); +extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock); +extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass); +extern void __lockfunc rt_spin_unlock(spinlock_t *lock); +extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock); +extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags); +extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock); +extern int __lockfunc rt_spin_trylock(spinlock_t *lock); +extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock); + +/* + * lockdep-less calls, for derived types like rwlock: + * (for trylock they can use rt_mutex_trylock() directly. + */ +extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock); +extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock); + +#define spin_lock(lock) \ + do { \ + migrate_disable(); \ + rt_spin_lock(lock); \ + } while (0) + +#define spin_lock_bh(lock) \ + do { \ + local_bh_disable(); \ + migrate_disable(); \ + rt_spin_lock(lock); \ + } while (0) + +#define spin_lock_irq(lock) spin_lock(lock) + +#define spin_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock)) + +#ifdef CONFIG_LOCKDEP +# define spin_lock_nested(lock, subclass) \ + do { \ + migrate_disable(); \ + rt_spin_lock_nested(lock, subclass); \ + } while (0) + +# define spin_lock_irqsave_nested(lock, flags, subclass) \ + do { \ + typecheck(unsigned long, flags); \ + flags = 0; \ + migrate_disable(); \ + rt_spin_lock_nested(lock, subclass); \ + } while (0) +#else +# define spin_lock_nested(lock, subclass) spin_lock(lock) + +# define spin_lock_irqsave_nested(lock, flags, subclass) \ + do { \ + typecheck(unsigned long, flags); \ + flags = 0; \ + spin_lock(lock); \ + } while (0) +#endif + +#define spin_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = 0; \ + spin_lock(lock); \ + } while (0) + +static inline unsigned long spin_lock_trace_flags(spinlock_t *lock) +{ + unsigned long flags = 0; +#ifdef CONFIG_TRACE_IRQFLAGS + flags = rt_spin_lock_trace_flags(lock); +#else + spin_lock(lock); /* lock_local */ +#endif + return flags; +} + +/* FIXME: we need rt_spin_lock_nest_lock */ +#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0) + +#define spin_unlock(lock) \ + do { \ + rt_spin_unlock(lock); \ + migrate_enable(); \ + } while (0) + +#define spin_unlock_bh(lock) \ + do { \ + rt_spin_unlock(lock); \ + migrate_enable(); \ + local_bh_enable(); \ + } while (0) + +#define spin_unlock_irq(lock) spin_unlock(lock) + +#define spin_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + (void) flags; \ + spin_unlock(lock); \ + } while (0) + +#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock)) +#define spin_trylock_irq(lock) spin_trylock(lock) + +#define spin_trylock_irqsave(lock, flags) \ + rt_spin_trylock_irqsave(lock, &(flags)) + +#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock) + +#ifdef CONFIG_GENERIC_LOCKBREAK +# define spin_is_contended(lock) ((lock)->break_lock) +#else +# define spin_is_contended(lock) (((void)(lock), 0)) +#endif + +static inline int spin_can_lock(spinlock_t *lock) +{ + return !rt_mutex_is_locked(&lock->lock); +} + +static inline int spin_is_locked(spinlock_t *lock) +{ + return rt_mutex_is_locked(&lock->lock); +} + +static inline void assert_spin_locked(spinlock_t *lock) +{ + BUG_ON(!spin_is_locked(lock)); +} + +#define atomic_dec_and_lock(atomic, lock) \ + atomic_dec_and_spin_lock(atomic, lock) + +#endif diff --git a/kernel/Makefile b/kernel/Makefile index 1ce4755..7fc87d5 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -7,8 +7,8 @@ obj-y = fork.o exec_domain.o panic.o \ sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \ signal.o sys.o kmod.o workqueue.o pid.o task_work.o \ rcupdate.o extable.o params.o posix-timers.o \ - kthread.o wait.o sys_ni.o posix-cpu-timers.o mutex.o \ - hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ + kthread.o wait.o sys_ni.o posix-cpu-timers.o \ + hrtimer.o nsproxy.o srcu.o semaphore.o \ notifier.o ksysfs.o cred.o reboot.o \ async.o range.o groups.o lglock.o smpboot.o @@ -33,7 +33,11 @@ obj-$(CONFIG_FREEZER) += freezer.o obj-$(CONFIG_PROFILING) += profile.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-y += time/ +ifneq ($(CONFIG_PREEMPT_RT_FULL),y) +obj-y += mutex.o obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o +obj-y += rwsem.o +endif obj-$(CONFIG_LOCKDEP) += lockdep.o ifeq ($(CONFIG_PROC_FS),y) obj-$(CONFIG_LOCKDEP) += lockdep_proc.o @@ -45,6 +49,7 @@ endif obj-$(CONFIG_RT_MUTEXES) += rtmutex.o obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o +obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o obj-$(CONFIG_SMP) += smp.o ifneq ($(CONFIG_SMP),y) diff --git a/kernel/rt.c b/kernel/rt.c new file mode 100644 index 0000000..92a16e1 --- /dev/null +++ b/kernel/rt.c @@ -0,0 +1,442 @@ +/* + * kernel/rt.c + * + * Real-Time Preemption Support + * + * started by Ingo Molnar: + * + * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2006, Timesys Corp., Thomas Gleixner + * + * historic credit for proving that Linux spinlocks can be implemented via + * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow + * and others) who prototyped it on 2.4 and did lots of comparative + * research and analysis; TimeSys, for proving that you can implement a + * fully preemptible kernel via the use of IRQ threading and mutexes; + * Bill Huey for persuasively arguing on lkml that the mutex model is the + * right one; and to MontaVista, who ported pmutexes to 2.6. + * + * This code is a from-scratch implementation and is not based on pmutexes, + * but the idea of converting spinlocks to mutexes is used here too. + * + * lock debugging, locking tree, deadlock detection: + * + * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey + * Released under the General Public License (GPL). + * + * Includes portions of the generic R/W semaphore implementation from: + * + * Copyright (c) 2001 David Howells (dhowells@redhat.com). + * - Derived partially from idea by Andrea Arcangeli + * - Derived also from comments by Linus + * + * Pending ownership of locks and ownership stealing: + * + * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt + * + * (also by Steven Rostedt) + * - Converted single pi_lock to individual task locks. + * + * By Esben Nielsen: + * Doing priority inheritance with help of the scheduler. + * + * Copyright (C) 2006, Timesys Corp., Thomas Gleixner + * - major rework based on Esben Nielsens initial patch + * - replaced thread_info references by task_struct refs + * - removed task->pending_owner dependency + * - BKL drop/reacquire for semaphore style locks to avoid deadlocks + * in the scheduler return path as discussed with Steven Rostedt + * + * Copyright (C) 2006, Kihon Technologies Inc. + * Steven Rostedt + * - debugged and patched Thomas Gleixner's rework. + * - added back the cmpxchg to the rework. + * - turned atomic require back on for SMP. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rtmutex_common.h" + +/* + * struct mutex functions + */ +void __mutex_do_init(struct mutex *mutex, const char *name, + struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held lock: + */ + debug_check_no_locks_freed((void *)mutex, sizeof(*mutex)); + lockdep_init_map(&mutex->dep_map, name, key, 0); +#endif + mutex->lock.save_state = 0; +} +EXPORT_SYMBOL(__mutex_do_init); + +void __lockfunc _mutex_lock(struct mutex *lock) +{ + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); + rt_mutex_lock(&lock->lock); +} +EXPORT_SYMBOL(_mutex_lock); + +int __lockfunc _mutex_lock_interruptible(struct mutex *lock) +{ + int ret; + + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); + ret = rt_mutex_lock_interruptible(&lock->lock, 0); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(_mutex_lock_interruptible); + +int __lockfunc _mutex_lock_killable(struct mutex *lock) +{ + int ret; + + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); + ret = rt_mutex_lock_killable(&lock->lock, 0); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(_mutex_lock_killable); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass) +{ + mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); + rt_mutex_lock(&lock->lock); +} +EXPORT_SYMBOL(_mutex_lock_nested); + +void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) +{ + mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_); + rt_mutex_lock(&lock->lock); +} +EXPORT_SYMBOL(_mutex_lock_nest_lock); + +int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass) +{ + int ret; + + mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); + ret = rt_mutex_lock_interruptible(&lock->lock, 0); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(_mutex_lock_interruptible_nested); + +int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass) +{ + int ret; + + mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); + ret = rt_mutex_lock_killable(&lock->lock, 0); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(_mutex_lock_killable_nested); +#endif + +int __lockfunc _mutex_trylock(struct mutex *lock) +{ + int ret = rt_mutex_trylock(&lock->lock); + + if (ret) + mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); + + return ret; +} +EXPORT_SYMBOL(_mutex_trylock); + +void __lockfunc _mutex_unlock(struct mutex *lock) +{ + mutex_release(&lock->dep_map, 1, _RET_IP_); + rt_mutex_unlock(&lock->lock); +} +EXPORT_SYMBOL(_mutex_unlock); + +/* + * rwlock_t functions + */ +int __lockfunc rt_write_trylock(rwlock_t *rwlock) +{ + int ret = rt_mutex_trylock(&rwlock->lock); + + migrate_disable(); + if (ret) + rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); + else + migrate_enable(); + + return ret; +} +EXPORT_SYMBOL(rt_write_trylock); + +int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags) +{ + int ret; + + *flags = 0; + migrate_disable(); + ret = rt_write_trylock(rwlock); + if (!ret) + migrate_enable(); + return ret; +} +EXPORT_SYMBOL(rt_write_trylock_irqsave); + +int __lockfunc rt_read_trylock(rwlock_t *rwlock) +{ + struct rt_mutex *lock = &rwlock->lock; + int ret = 1; + + /* + * recursive read locks succeed when current owns the lock, + * but not when read_depth == 0 which means that the lock is + * write locked. + */ + migrate_disable(); + if (rt_mutex_owner(lock) != current) + ret = rt_mutex_trylock(lock); + else if (!rwlock->read_depth) + ret = 0; + + if (ret) { + rwlock->read_depth++; + rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); + } else + migrate_enable(); + + return ret; +} +EXPORT_SYMBOL(rt_read_trylock); + +void __lockfunc rt_write_lock(rwlock_t *rwlock) +{ + rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); + __rt_spin_lock(&rwlock->lock); +} +EXPORT_SYMBOL(rt_write_lock); + +void __lockfunc rt_read_lock(rwlock_t *rwlock) +{ + struct rt_mutex *lock = &rwlock->lock; + + rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); + + /* + * recursive read locks succeed when current owns the lock + */ + if (rt_mutex_owner(lock) != current) + __rt_spin_lock(lock); + rwlock->read_depth++; +} + +EXPORT_SYMBOL(rt_read_lock); + +void __lockfunc rt_write_unlock(rwlock_t *rwlock) +{ + /* NOTE: we always pass in '1' for nested, for simplicity */ + rwlock_release(&rwlock->dep_map, 1, _RET_IP_); + __rt_spin_unlock(&rwlock->lock); +} +EXPORT_SYMBOL(rt_write_unlock); + +void __lockfunc rt_read_unlock(rwlock_t *rwlock) +{ + rwlock_release(&rwlock->dep_map, 1, _RET_IP_); + + /* Release the lock only when read_depth is down to 0 */ + if (--rwlock->read_depth == 0) + __rt_spin_unlock(&rwlock->lock); +} +EXPORT_SYMBOL(rt_read_unlock); + +unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock) +{ + rt_write_lock(rwlock); + + return 0; +} +EXPORT_SYMBOL(rt_write_lock_irqsave); + +unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock) +{ + rt_read_lock(rwlock); + + return 0; +} +EXPORT_SYMBOL(rt_read_lock_irqsave); + +void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held lock: + */ + debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock)); + lockdep_init_map(&rwlock->dep_map, name, key, 0); +#endif + rwlock->lock.save_state = 1; + rwlock->read_depth = 0; +} +EXPORT_SYMBOL(__rt_rwlock_init); + +/* + * rw_semaphores + */ + +void rt_up_write(struct rw_semaphore *rwsem) +{ + rwsem_release(&rwsem->dep_map, 1, _RET_IP_); + rt_mutex_unlock(&rwsem->lock); +} +EXPORT_SYMBOL(rt_up_write); + +void rt_up_read(struct rw_semaphore *rwsem) +{ + rwsem_release(&rwsem->dep_map, 1, _RET_IP_); + if (--rwsem->read_depth == 0) + rt_mutex_unlock(&rwsem->lock); +} +EXPORT_SYMBOL(rt_up_read); + +/* + * downgrade a write lock into a read lock + * - just wake up any readers at the front of the queue + */ +void rt_downgrade_write(struct rw_semaphore *rwsem) +{ + BUG_ON(rt_mutex_owner(&rwsem->lock) != current); + rwsem->read_depth = 1; +} +EXPORT_SYMBOL(rt_downgrade_write); + +int rt_down_write_trylock(struct rw_semaphore *rwsem) +{ + int ret = rt_mutex_trylock(&rwsem->lock); + + if (ret) + rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(rt_down_write_trylock); + +void rt_down_write(struct rw_semaphore *rwsem) +{ + rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_); + rt_mutex_lock(&rwsem->lock); +} +EXPORT_SYMBOL(rt_down_write); + +void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass) +{ + rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_); + rt_mutex_lock(&rwsem->lock); +} +EXPORT_SYMBOL(rt_down_write_nested); + +int rt_down_read_trylock(struct rw_semaphore *rwsem) +{ + struct rt_mutex *lock = &rwsem->lock; + int ret = 1; + + /* + * recursive read locks succeed when current owns the rwsem, + * but not when read_depth == 0 which means that the rwsem is + * write locked. + */ + if (rt_mutex_owner(lock) != current) + ret = rt_mutex_trylock(&rwsem->lock); + else if (!rwsem->read_depth) + ret = 0; + + if (ret) { + rwsem->read_depth++; + rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); + } + return ret; +} +EXPORT_SYMBOL(rt_down_read_trylock); + +static void __rt_down_read(struct rw_semaphore *rwsem, int subclass) +{ + struct rt_mutex *lock = &rwsem->lock; + + rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_); + + if (rt_mutex_owner(lock) != current) + rt_mutex_lock(&rwsem->lock); + rwsem->read_depth++; +} + +void rt_down_read(struct rw_semaphore *rwsem) +{ + __rt_down_read(rwsem, 0); +} +EXPORT_SYMBOL(rt_down_read); + +void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass) +{ + __rt_down_read(rwsem, subclass); +} +EXPORT_SYMBOL(rt_down_read_nested); + +void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name, + struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held lock: + */ + debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem)); + lockdep_init_map(&rwsem->dep_map, name, key, 0); +#endif + rwsem->read_depth = 0; + rwsem->lock.save_state = 0; +} +EXPORT_SYMBOL(__rt_rwsem_init); + +/** + * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 + * @cnt: the atomic which we are to dec + * @lock: the mutex to return holding if we dec to 0 + * + * return true and hold lock if we dec to 0, return false otherwise + */ +int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) +{ + /* dec if we can't possibly hit 0 */ + if (atomic_add_unless(cnt, -1, 1)) + return 0; + /* we might hit 0, so take the lock */ + mutex_lock(lock); + if (!atomic_dec_and_test(cnt)) { + /* when we actually did the dec, we didn't hit 0 */ + mutex_unlock(lock); + return 0; + } + /* we hit 0, and we hold the lock */ + return 1; +} +EXPORT_SYMBOL(atomic_dec_and_mutex_lock); diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 4b082b5..5c76166 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c @@ -124,8 +124,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ * __[spin|read|write]_lock_bh() */ BUILD_LOCK_OPS(spin, raw_spinlock); + +#ifndef CONFIG_PREEMPT_RT_FULL BUILD_LOCK_OPS(read, rwlock); BUILD_LOCK_OPS(write, rwlock); +#endif #endif @@ -209,6 +212,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) EXPORT_SYMBOL(_raw_spin_unlock_bh); #endif +#ifndef CONFIG_PREEMPT_RT_FULL + #ifndef CONFIG_INLINE_READ_TRYLOCK int __lockfunc _raw_read_trylock(rwlock_t *lock) { @@ -353,6 +358,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) EXPORT_SYMBOL(_raw_write_unlock_bh); #endif +#endif /* !PREEMPT_RT_FULL */ + #ifdef CONFIG_DEBUG_LOCK_ALLOC void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 0374a59..9497033 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, EXPORT_SYMBOL(__raw_spin_lock_init); +#ifndef CONFIG_PREEMPT_RT_FULL void __rwlock_init(rwlock_t *lock, const char *name, struct lock_class_key *key) { @@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name, } EXPORT_SYMBOL(__rwlock_init); +#endif static void spin_dump(raw_spinlock_t *lock, const char *msg) { @@ -159,6 +161,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock) arch_spin_unlock(&lock->raw_lock); } +#ifndef CONFIG_PREEMPT_RT_FULL static void rwlock_bug(rwlock_t *lock, const char *msg) { if (!debug_locks_off()) @@ -300,3 +303,5 @@ void do_raw_write_unlock(rwlock_t *lock) debug_write_unlock(lock); arch_write_unlock(&lock->raw_lock); } + +#endif -- cgit v0.10.2 From ea6767f8b4469261eb3b5e9308ff170bcfbfa7bf Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Thu, 21 Nov 2013 22:52:30 -0500 Subject: condition migration_disable on lock acquisition No need to unconditionally migrate_disable (what is it protecting ?) and re-enable on failure to acquire the lock. This patch moves the migrate_disable to be conditioned on sucessful lock acquisition only. Signed-off-by: Nicholas Mc Guire Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/rt.c b/kernel/rt.c index 92a16e1..ff56d3c 100644 --- a/kernel/rt.c +++ b/kernel/rt.c @@ -182,11 +182,10 @@ int __lockfunc rt_write_trylock(rwlock_t *rwlock) { int ret = rt_mutex_trylock(&rwlock->lock); - migrate_disable(); - if (ret) + if (ret) { rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); - else - migrate_enable(); + migrate_disable(); + } return ret; } -- cgit v0.10.2 From 9205c26d46c2ead7e5c842a0e017ab7198a441a4 Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Fri, 29 Nov 2013 00:19:41 -0500 Subject: migrate_disable pushd down in atomic_dec_and_spin_lock Signed-off-by: Nicholas Mc Guire Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 1c4808f..f6b6ebb 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -935,12 +935,12 @@ int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock) /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ if (atomic_add_unless(atomic, -1, 1)) return 0; - migrate_disable(); rt_spin_lock(lock); - if (atomic_dec_and_test(atomic)) + if (atomic_dec_and_test(atomic)){ + migrate_disable(); return 1; + } rt_spin_unlock(lock); - migrate_enable(); return 0; } EXPORT_SYMBOL(atomic_dec_and_spin_lock); -- cgit v0.10.2 From 9fb5fd596d3eddfa2131b1a0773964a8e5e8e6de Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Fri, 29 Nov 2013 00:17:27 -0500 Subject: migrate_disable pushd down in rt_spin_trylock_irqsave Signed-off-by: Nicholas Mc Guire Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index f6b6ebb..ef42b8a 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -920,12 +920,11 @@ int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags) int ret; *flags = 0; - migrate_disable(); ret = rt_mutex_trylock(&lock->lock); - if (ret) + if (ret) { + migrate_disable(); spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); - else - migrate_enable(); + } return ret; } EXPORT_SYMBOL(rt_spin_trylock_irqsave); -- cgit v0.10.2 From ad8d43dfe324184a28daaf994aab03e8a3a3513d Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Fri, 29 Nov 2013 00:21:59 -0500 Subject: migrate_disable pushd down in rt_write_trylock_irqsave Signed-off-by: Nicholas Mc Guire Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/rt.c b/kernel/rt.c index ff56d3c..d58b410 100644 --- a/kernel/rt.c +++ b/kernel/rt.c @@ -196,10 +196,9 @@ int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags) int ret; *flags = 0; - migrate_disable(); ret = rt_write_trylock(rwlock); - if (!ret) - migrate_enable(); + if (ret) + migrate_disable(); return ret; } EXPORT_SYMBOL(rt_write_trylock_irqsave); -- cgit v0.10.2 From 3c5a1447ccb6f866dbf2ad1e4470d2734b67fce4 Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Thu, 2 Jan 2014 10:18:42 +0100 Subject: write_lock migrate_disable pushdown to rt_write_lock pushdown of migrate_disable/enable from write_*lock* to the rt_write_*lock* api level general mapping of write_*lock* to mutexes: write_*lock* `-> rt_write_*lock* `-> __spin_lock (the sleeping __spin_lock) `-> rt_mutex write_*lock*s are non-recursive so we have two lock chains to consider - write_trylock*/write_unlock - write_lock*/wirte_unlock for both paths the migration_disable/enable must be balanced. write_trylock* mapping: write_trylock_irqsave `-> rt_write_trylock_irqsave write_trylock \ `--------> rt_write_trylock ret = rt_mutex_trylock rt_mutex_fasttrylock rt_mutex_cmpxchg if (ret) migrate_disable write_lock* mapping: write_lock_irqsave `-> rt_write_lock_irqsave write_lock_irq -> write_lock ----. \ write_lock_bh -+ \ `-> rt_write_lock __rt_spin_lock() rt_spin_lock_fastlock() rt_mutex_cmpxchg() migrate_disable() write_unlock* mapping: write_unlock_irqrestore. write_unlock_bh -------+ write_unlock_irq -> write_unlock ----------+ `-> rt_write_unlock() __rt_spin_unlock() rt_spin_lock_fastunlock() rt_mutex_cmpxchg() migrate_enable() So calls to migrate_disable/enable() are better placed at the rt_write_* level of lock/trylock/unlock as all of the write_*lock* API has this as a common path. This approach to write_*_bh also eliminates the concerns raised with regards to api inbalances (write_lock_bh -> write_unlock+local_bh_enable) Tested-by: Carsten Emde Signed-off-by: Nicholas Mc Guire Signed-off-by: Sebastian Andrzej Siewior diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h index 853ee36..a276fae 100644 --- a/include/linux/rwlock_rt.h +++ b/include/linux/rwlock_rt.h @@ -40,7 +40,6 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key #define write_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ - migrate_disable(); \ flags = rt_write_lock_irqsave(lock); \ } while (0) @@ -61,14 +60,12 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key #define write_lock(lock) \ do { \ - migrate_disable(); \ rt_write_lock(lock); \ } while (0) #define write_lock_bh(lock) \ do { \ local_bh_disable(); \ - migrate_disable(); \ rt_write_lock(lock); \ } while (0) @@ -92,13 +89,11 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key #define write_unlock(lock) \ do { \ rt_write_unlock(lock); \ - migrate_enable(); \ } while (0) #define write_unlock_bh(lock) \ do { \ rt_write_unlock(lock); \ - migrate_enable(); \ local_bh_enable(); \ } while (0) @@ -117,7 +112,6 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key typecheck(unsigned long, flags); \ (void) flags; \ rt_write_unlock(lock); \ - migrate_enable(); \ } while (0) #endif diff --git a/kernel/rt.c b/kernel/rt.c index d58b410..d712968 100644 --- a/kernel/rt.c +++ b/kernel/rt.c @@ -197,8 +197,6 @@ int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags) *flags = 0; ret = rt_write_trylock(rwlock); - if (ret) - migrate_disable(); return ret; } EXPORT_SYMBOL(rt_write_trylock_irqsave); @@ -232,6 +230,7 @@ EXPORT_SYMBOL(rt_read_trylock); void __lockfunc rt_write_lock(rwlock_t *rwlock) { rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); + migrate_disable(); __rt_spin_lock(&rwlock->lock); } EXPORT_SYMBOL(rt_write_lock); @@ -257,6 +256,7 @@ void __lockfunc rt_write_unlock(rwlock_t *rwlock) /* NOTE: we always pass in '1' for nested, for simplicity */ rwlock_release(&rwlock->dep_map, 1, _RET_IP_); __rt_spin_unlock(&rwlock->lock); + migrate_enable(); } EXPORT_SYMBOL(rt_write_unlock); -- cgit v0.10.2 From c237dbc7612732a96d7b51e95be60579a2769daa Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Thu, 2 Jan 2014 10:19:15 +0100 Subject: read_lock migrate_disable pushdown to rt_read_lock pushdown of migrate_disable/enable from read_*lock* to the rt_read_*lock* api level general mapping to mutexes: read_*lock* `-> rt_read_*lock* `-> __spin_lock (the sleeping spin locks) `-> rt_mutex The real read_lock* mapping: read_lock_irqsave -. read_lock_irq `-> rt_read_lock_irqsave() `->read_lock ---------. \ read_lock_bh ------+ \ `--> rt_read_lock() if (rt_mutex_owner(lock) != current){ `-> __rt_spin_lock() rt_spin_lock_fastlock() `->rt_mutex_cmpxchg() migrate_disable() } rwlock->read_depth++; read_trylock mapping: read_trylock `-> rt_read_trylock if (rt_mutex_owner(lock) != current){ `-> rt_mutex_trylock() rt_mutex_fasttrylock() rt_mutex_cmpxchg() migrate_disable() } rwlock->read_depth++; read_unlock* mapping: read_unlock_bh --------+ read_unlock_irq -------+ read_unlock_irqrestore + read_unlock -----------+ `-> rt_read_unlock() if(--rwlock->read_depth==0){ `-> __rt_spin_unlock() rt_spin_lock_fastunlock() `-> rt_mutex_cmpxchg() migrate_disable() } So calls to migrate_disable/enable() are better placed at the rt_read_* level of lock/trylock/unlock as all of the read_*lock* API has this as a common path. In the rt_read* API of lock/trylock/unlock the nesting level is already being recorded in rwlock->read_depth, so we can push down the migrate disable/enable to that level and condition it on the read_depth going from 0 to 1 -> migrate_disable and 1 to 0 -> migrate_enable. This eliminates the recursive calls that were needed when migrate_disable/enable was done at the read_*lock* level. The approach to read_*_bh also eliminates the concerns raised with the regards to api inbalances (read_lock_bh -> read_unlock+local_bh_enable) Tested-by: Carsten Emde Signed-off-by: Nicholas Mc Guire Signed-off-by: Sebastian Andrzej Siewior diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h index a276fae..e85a5df 100644 --- a/include/linux/rwlock_rt.h +++ b/include/linux/rwlock_rt.h @@ -33,7 +33,6 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key #define read_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ - migrate_disable(); \ flags = rt_read_lock_irqsave(lock); \ } while (0) @@ -45,14 +44,12 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key #define read_lock(lock) \ do { \ - migrate_disable(); \ rt_read_lock(lock); \ } while (0) #define read_lock_bh(lock) \ do { \ local_bh_disable(); \ - migrate_disable(); \ rt_read_lock(lock); \ } while (0) @@ -74,13 +71,11 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key #define read_unlock(lock) \ do { \ rt_read_unlock(lock); \ - migrate_enable(); \ } while (0) #define read_unlock_bh(lock) \ do { \ rt_read_unlock(lock); \ - migrate_enable(); \ local_bh_enable(); \ } while (0) @@ -104,7 +99,6 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key typecheck(unsigned long, flags); \ (void) flags; \ rt_read_unlock(lock); \ - migrate_enable(); \ } while (0) #define write_unlock_irqrestore(lock, flags) \ diff --git a/kernel/rt.c b/kernel/rt.c index d712968..a9925c6 100644 --- a/kernel/rt.c +++ b/kernel/rt.c @@ -211,17 +211,19 @@ int __lockfunc rt_read_trylock(rwlock_t *rwlock) * but not when read_depth == 0 which means that the lock is * write locked. */ - migrate_disable(); - if (rt_mutex_owner(lock) != current) + if (rt_mutex_owner(lock) != current) { ret = rt_mutex_trylock(lock); - else if (!rwlock->read_depth) + if (ret) + migrate_disable(); + + } else if (!rwlock->read_depth) { ret = 0; + } if (ret) { rwlock->read_depth++; rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); - } else - migrate_enable(); + } return ret; } @@ -244,8 +246,10 @@ void __lockfunc rt_read_lock(rwlock_t *rwlock) /* * recursive read locks succeed when current owns the lock */ - if (rt_mutex_owner(lock) != current) + if (rt_mutex_owner(lock) != current) { __rt_spin_lock(lock); + migrate_disable(); + } rwlock->read_depth++; } @@ -265,8 +269,10 @@ void __lockfunc rt_read_unlock(rwlock_t *rwlock) rwlock_release(&rwlock->dep_map, 1, _RET_IP_); /* Release the lock only when read_depth is down to 0 */ - if (--rwlock->read_depth == 0) + if (--rwlock->read_depth == 0) { __rt_spin_unlock(&rwlock->lock); + migrate_enable(); + } } EXPORT_SYMBOL(rt_read_unlock); -- cgit v0.10.2 From bfa7924c5caf99cff122eaa3205596199e8c772a Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Sat, 8 Feb 2014 12:39:20 +0100 Subject: rt: Cleanup of unnecessary do while 0 in read/write _lock() With the migration pushdonw a few of the do{ }while(0) loops became obsolete but got left over - this patch only removes this fallout. Patch applies on top of 3.12.9-rt13 Signed-off-by: Nicholas Mc Guire Signed-off-by: Sebastian Andrzej Siewior diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h index e85a5df..49ed2d4 100644 --- a/include/linux/rwlock_rt.h +++ b/include/linux/rwlock_rt.h @@ -42,10 +42,7 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key flags = rt_write_lock_irqsave(lock); \ } while (0) -#define read_lock(lock) \ - do { \ - rt_read_lock(lock); \ - } while (0) +#define read_lock(lock) rt_read_lock(lock) #define read_lock_bh(lock) \ do { \ @@ -55,10 +52,7 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key #define read_lock_irq(lock) read_lock(lock) -#define write_lock(lock) \ - do { \ - rt_write_lock(lock); \ - } while (0) +#define write_lock(lock) rt_write_lock(lock) #define write_lock_bh(lock) \ do { \ @@ -68,10 +62,7 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key #define write_lock_irq(lock) write_lock(lock) -#define read_unlock(lock) \ - do { \ - rt_read_unlock(lock); \ - } while (0) +#define read_unlock(lock) rt_read_unlock(lock) #define read_unlock_bh(lock) \ do { \ @@ -81,10 +72,7 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key #define read_unlock_irq(lock) read_unlock(lock) -#define write_unlock(lock) \ - do { \ - rt_write_unlock(lock); \ - } while (0) +#define write_unlock(lock) rt_write_unlock(lock) #define write_unlock_bh(lock) \ do { \ -- cgit v0.10.2 From bf9426905ed74a205621dd183a61d31c109830f2 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 8 Apr 2013 16:08:46 +0200 Subject: percpu-rwsem: compile fix The shortcut on mainline skip lockdep. No idea why this is a good thing. Signed-off-by: Sebastian Andrzej Siewior diff --git a/lib/percpu-rwsem.c b/lib/percpu-rwsem.c index 652a8ee..2db0f42 100644 --- a/lib/percpu-rwsem.c +++ b/lib/percpu-rwsem.c @@ -84,8 +84,12 @@ void percpu_down_read(struct percpu_rw_semaphore *brw) down_read(&brw->rw_sem); atomic_inc(&brw->slow_read_ctr); +#ifdef CONFIG_PREEMPT_RT_FULL + up_read(&brw->rw_sem); +#else /* avoid up_read()->rwsem_release() */ __up_read(&brw->rw_sem); +#endif } void percpu_up_read(struct percpu_rw_semaphore *brw) -- cgit v0.10.2 From dba6ea4e8211132d178c3ca705bc94ba4cdc7c09 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 28 Oct 2013 09:36:37 +0100 Subject: rtmutex: add a first shot of ww_mutex lockdep says: | -------------------------------------------------------------------------- | | Wound/wait tests | | --------------------- | ww api failures: ok | ok | ok | | ww contexts mixing: ok | ok | | finishing ww context: ok | ok | ok | ok | | locking mismatches: ok | ok | ok | | EDEADLK handling: ok | ok | ok | ok | ok | ok | ok | ok | ok | ok | | spinlock nest unlocked: ok | | ----------------------------------------------------- | |block | try |context| | ----------------------------------------------------- | context: ok | ok | ok | | try: ok | ok | ok | | block: ok | ok | ok | | spinlock: ok | ok | ok | Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index ef42b8a..1bb34e9 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "rtmutex_common.h" @@ -959,6 +960,39 @@ EXPORT_SYMBOL(__rt_spin_lock_init); #endif /* PREEMPT_RT_FULL */ +#ifdef CONFIG_PREEMPT_RT_FULL +static inline int __sched +__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) +{ + struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); + struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); + + if (!hold_ctx) + return 0; + + if (unlikely(ctx == hold_ctx)) + return -EALREADY; + + if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && + (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { +#ifdef CONFIG_DEBUG_MUTEXES + DEBUG_LOCKS_WARN_ON(ctx->contending_lock); + ctx->contending_lock = ww; +#endif + return -EDEADLK; + } + + return 0; +} +#else +static inline int __sched +__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) +{ + BUG(); +} + +#endif + /** * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop * @lock: the rt_mutex to take @@ -972,7 +1006,8 @@ EXPORT_SYMBOL(__rt_spin_lock_init); static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - struct rt_mutex_waiter *waiter) + struct rt_mutex_waiter *waiter, + struct ww_acquire_ctx *ww_ctx) { int ret = 0; @@ -995,6 +1030,12 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, break; } + if (ww_ctx && ww_ctx->acquired > 0) { + ret = __mutex_lock_check_stamp(lock, ww_ctx); + if (ret) + break; + } + raw_spin_unlock(&lock->wait_lock); debug_rt_mutex_print_deadlock(waiter); @@ -1008,13 +1049,89 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, return ret; } +static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, + struct ww_acquire_ctx *ww_ctx) +{ +#ifdef CONFIG_DEBUG_MUTEXES + /* + * If this WARN_ON triggers, you used ww_mutex_lock to acquire, + * but released with a normal mutex_unlock in this call. + * + * This should never happen, always use ww_mutex_unlock. + */ + DEBUG_LOCKS_WARN_ON(ww->ctx); + + /* + * Not quite done after calling ww_acquire_done() ? + */ + DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); + + if (ww_ctx->contending_lock) { + /* + * After -EDEADLK you tried to + * acquire a different ww_mutex? Bad! + */ + DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); + + /* + * You called ww_mutex_lock after receiving -EDEADLK, + * but 'forgot' to unlock everything else first? + */ + DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); + ww_ctx->contending_lock = NULL; + } + + /* + * Naughty, using a different class will lead to undefined behavior! + */ + DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); +#endif + ww_ctx->acquired++; +} + +#ifdef CONFIG_PREEMPT_RT_FULL +static void ww_mutex_account_lock(struct rt_mutex *lock, + struct ww_acquire_ctx *ww_ctx) +{ + struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); + struct rt_mutex_waiter *waiter; + + /* + * This branch gets optimized out for the common case, + * and is only important for ww_mutex_lock. + */ + ww_mutex_lock_acquired(ww, ww_ctx); + ww->ctx = ww_ctx; + + /* + * Give any possible sleeping processes the chance to wake up, + * so they can recheck if they have to back off. + */ + plist_for_each_entry(waiter, &lock->wait_list, list_entry) { + + /* XXX debug rt mutex waiter wakeup */ + + BUG_ON(waiter->lock != lock); + rt_mutex_wake_waiter(waiter); + } +} + +#else + +static void ww_mutex_account_lock(struct rt_mutex *lock, + struct ww_acquire_ctx *ww_ctx) +{ + BUG(); +} +#endif + /* * Slow path lock function: */ static int __sched rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - int detect_deadlock) + int detect_deadlock, struct ww_acquire_ctx *ww_ctx) { struct rt_mutex_waiter waiter; int ret = 0; @@ -1026,6 +1143,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, /* Try to acquire the lock again: */ if (try_to_take_rt_mutex(lock, current, NULL)) { + if (ww_ctx) + ww_mutex_account_lock(lock, ww_ctx); raw_spin_unlock(&lock->wait_lock); return 0; } @@ -1042,12 +1161,14 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock); if (likely(!ret)) - ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); + ret = __rt_mutex_slowlock(lock, state, timeout, &waiter, ww_ctx); set_current_state(TASK_RUNNING); if (unlikely(ret)) remove_waiter(lock, &waiter); + else if (ww_ctx) + ww_mutex_account_lock(lock, ww_ctx); /* * try_to_take_rt_mutex() sets the waiter bit @@ -1126,30 +1247,33 @@ rt_mutex_slowunlock(struct rt_mutex *lock) */ static inline int rt_mutex_fastlock(struct rt_mutex *lock, int state, - int detect_deadlock, + int detect_deadlock, struct ww_acquire_ctx *ww_ctx, int (*slowfn)(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - int detect_deadlock)) + int detect_deadlock, + struct ww_acquire_ctx *ww_ctx)) { if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { rt_mutex_deadlock_account_lock(lock, current); return 0; } else - return slowfn(lock, state, NULL, detect_deadlock); + return slowfn(lock, state, NULL, detect_deadlock, ww_ctx); } static inline int rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, int detect_deadlock, + struct ww_acquire_ctx *ww_ctx, int (*slowfn)(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - int detect_deadlock)) + int detect_deadlock, + struct ww_acquire_ctx *ww_ctx)) { if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { rt_mutex_deadlock_account_lock(lock, current); return 0; } else - return slowfn(lock, state, timeout, detect_deadlock); + return slowfn(lock, state, timeout, detect_deadlock, ww_ctx); } static inline int @@ -1182,7 +1306,7 @@ void __sched rt_mutex_lock(struct rt_mutex *lock) { might_sleep(); - rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock); + rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, rt_mutex_slowlock); } EXPORT_SYMBOL_GPL(rt_mutex_lock); @@ -1203,7 +1327,7 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, might_sleep(); return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, - detect_deadlock, rt_mutex_slowlock); + detect_deadlock, NULL, rt_mutex_slowlock); } EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); @@ -1224,7 +1348,7 @@ int __sched rt_mutex_lock_killable(struct rt_mutex *lock, might_sleep(); return rt_mutex_fastlock(lock, TASK_KILLABLE, - detect_deadlock, rt_mutex_slowlock); + detect_deadlock, NULL, rt_mutex_slowlock); } EXPORT_SYMBOL_GPL(rt_mutex_lock_killable); @@ -1250,7 +1374,7 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout, might_sleep(); return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, - detect_deadlock, rt_mutex_slowlock); + detect_deadlock, NULL, rt_mutex_slowlock); } EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); @@ -1474,7 +1598,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, set_current_state(TASK_INTERRUPTIBLE); - ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); + ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL); set_current_state(TASK_RUNNING); @@ -1491,3 +1615,88 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, return ret; } + +static inline int +ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) +{ +#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH + unsigned tmp; + + if (ctx->deadlock_inject_countdown-- == 0) { + tmp = ctx->deadlock_inject_interval; + if (tmp > UINT_MAX/4) + tmp = UINT_MAX; + else + tmp = tmp*2 + tmp + tmp/2; + + ctx->deadlock_inject_interval = tmp; + ctx->deadlock_inject_countdown = tmp; + ctx->contending_lock = lock; + + ww_mutex_unlock(lock); + + return -EDEADLK; + } +#endif + + return 0; +} + +#ifdef CONFIG_PREEMPT_RT_FULL +int __sched +__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) +{ + int ret; + + might_sleep(); + + mutex_acquire(&lock->base.dep_map, 0, 0, _RET_IP_); + ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx); + if (ret) + mutex_release(&lock->base.dep_map, 1, _RET_IP_); + else if (!ret && ww_ctx->acquired > 1) + return ww_mutex_deadlock_injection(lock, ww_ctx); + + return ret; +} +EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); + +int __sched +__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) +{ + int ret; + + might_sleep(); + + mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, + _RET_IP_); + ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx); + if (ret) + mutex_release(&lock->base.dep_map, 1, _RET_IP_); + else if (!ret && ww_ctx->acquired > 1) + return ww_mutex_deadlock_injection(lock, ww_ctx); + + return ret; +} +EXPORT_SYMBOL_GPL(__ww_mutex_lock); + +void __sched ww_mutex_unlock(struct ww_mutex *lock) +{ + /* + * The unlocking fastpath is the 0->1 transition from 'locked' + * into 'unlocked' state: + */ + if (lock->ctx) { +#ifdef CONFIG_DEBUG_MUTEXES + DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); +#endif + if (lock->ctx->acquired > 0) + lock->ctx->acquired--; + lock->ctx = NULL; + } + + mutex_release(&lock->base.dep_map, 1, _RET_IP_); + rt_mutex_unlock(&lock->base.lock); +} +EXPORT_SYMBOL(ww_mutex_unlock); +#endif -- cgit v0.10.2 From 52a2b76df4527d329f656e57c85cd4334c0a6b9b Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Sun, 15 Dec 2013 15:40:47 +0100 Subject: bad return value in __mutex_lock_check_stamp Bad return value in _mutex_lock_check_stamp - this problem only would show up with 3.12.1 rt4 applied but CONFIG_PREEMPT_RT_FULL not enabled currently it would be returning what ever vprintk_emit ended up with (atleast on x86), which probably is not the intended behavior. Added a return 0; as in the case with CONFIG_PREEMPT_RT_FULL enabled. Signed-off-by: Nicholas Mc Guire Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 1bb34e9..d3d0c72 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -989,6 +989,7 @@ static inline int __sched __mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) { BUG(); + return 0; } #endif -- cgit v0.10.2 From 974238cc2d9ba6d79575f764bc76ccc7d2b63d27 Mon Sep 17 00:00:00 2001 From: John Kacur Date: Mon, 19 Sep 2011 11:09:27 +0200 Subject: rwlocks: Fix section mismatch This fixes the following build error for the preempt-rt kernel. make kernel/fork.o CC kernel/fork.o kernel/fork.c:90: error: section of tasklist_lock conflicts with previous declaration make[2]: *** [kernel/fork.o] Error 1 make[1]: *** [kernel/fork.o] Error 2 The rt kernel cache aligns the RWLOCK in DEFINE_RWLOCK by default. The non-rt kernels explicitly cache align only the tasklist_lock in kernel/fork.c That can create a build conflict. This fixes the build problem by making the non-rt kernels cache align RWLOCKs by default. The side effect is that the other RWLOCKs are also cache aligned for non-rt. This is a short term solution for rt only. The longer term solution would be to push the cache aligned DEFINE_RWLOCK to mainline. If there are objections, then we could create a DEFINE_RWLOCK_CACHE_ALIGNED or something of that nature. Comments? Objections? Signed-off-by: John Kacur Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/alpine.LFD.2.00.1109191104010.23118@localhost6.localdomain6 Signed-off-by: Thomas Gleixner diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h index 5317cd9..d0da966 100644 --- a/include/linux/rwlock_types.h +++ b/include/linux/rwlock_types.h @@ -47,6 +47,7 @@ typedef struct { RW_DEP_MAP_INIT(lockname) } #endif -#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) +#define DEFINE_RWLOCK(name) \ + rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) #endif /* __LINUX_RWLOCK_TYPES_H */ diff --git a/kernel/fork.c b/kernel/fork.c index b681c38..ae9a1a4 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -94,7 +94,7 @@ int max_threads; /* tunable limit on nr_threads */ DEFINE_PER_CPU(unsigned long, process_counts) = 0; -__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ +DEFINE_RWLOCK(tasklist_lock); /* outer */ #ifdef CONFIG_PROVE_RCU int lockdep_tasklist_lock_is_held(void) @@ -1690,7 +1690,7 @@ SYSCALL_DEFINE0(fork) #ifdef __ARCH_WANT_SYS_VFORK SYSCALL_DEFINE0(vfork) { - return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0, + return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0, 0, NULL, NULL); } #endif -- cgit v0.10.2 From 3040e993d5358ce7362d791f465f0b0c42b5fb3a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 22:08:38 +0200 Subject: timer-handle-idle-trylock-in-get-next-timer-irq.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h index 621d857..447c457 100644 --- a/include/linux/spinlock_rt.h +++ b/include/linux/spinlock_rt.h @@ -50,7 +50,17 @@ extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock); #define spin_lock_irq(lock) spin_lock(lock) -#define spin_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock)) +#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock)) + +#define spin_trylock(lock) \ +({ \ + int __locked; \ + migrate_disable(); \ + __locked = spin_do_trylock(lock); \ + if (!__locked) \ + migrate_enable(); \ + __locked; \ +}) #ifdef CONFIG_LOCKDEP # define spin_lock_nested(lock, subclass) \ diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index d3d0c72..5aa004d 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -888,15 +888,10 @@ EXPORT_SYMBOL(rt_spin_unlock_wait); int __lockfunc rt_spin_trylock(spinlock_t *lock) { - int ret; + int ret = rt_mutex_trylock(&lock->lock); - migrate_disable(); - ret = rt_mutex_trylock(&lock->lock); if (ret) spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); - else - migrate_enable(); - return ret; } EXPORT_SYMBOL(rt_spin_trylock); diff --git a/kernel/timer.c b/kernel/timer.c index 4cb850d..bbe6d43 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1386,9 +1386,10 @@ unsigned long get_next_timer_interrupt(unsigned long now) /* * On PREEMPT_RT we cannot sleep here. If the trylock does not * succeed then we return the worst-case 'expires in 1 tick' - * value: + * value. We use the rt functions here directly to avoid a + * migrate_disable() call. */ - if (!spin_trylock(&base->lock)) + if (!spin_do_trylock(&base->lock)) return now + 1; #else spin_lock(&base->lock); @@ -1398,7 +1399,11 @@ unsigned long get_next_timer_interrupt(unsigned long now) base->next_timer = __next_timer_interrupt(base); expires = base->next_timer; } +#ifdef CONFIG_PREEMPT_RT_FULL + rt_spin_unlock(&base->lock); +#else spin_unlock(&base->lock); +#endif if (time_before_eq(expires, now)) return now; -- cgit v0.10.2 From aadc46ed4f64c95ebacd908e32eb4b2b8325d3cc Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 7 Nov 2013 12:21:11 +0100 Subject: timers: do not raise softirq unconditionally Mike, On Thu, 7 Nov 2013, Mike Galbraith wrote: > On Thu, 2013-11-07 at 04:26 +0100, Mike Galbraith wrote: > > On Wed, 2013-11-06 at 18:49 +0100, Thomas Gleixner wrote: > > > > I bet you are trying to work around some of the side effects of the > > > occasional tick which is still necessary despite of full nohz, right? > > > > Nope, I wanted to check out cost of nohz_full for rt, and found that it > > doesn't work at all instead, looked, and found that the sole running > > task has just awakened ksoftirqd when it wants to shut the tick down, so > > that shutdown never happens. > > Like so in virgin 3.10-rt. Box is x3550 M3 booted nowatchdog > rcu_nocbs=1-3 nohz_full=1-3, and CPUs1-3 are completely isolated via > cpusets as well. well, that very same problem is in mainline if you add "threadirqs" to the command line. But we can be smart about this. The untested patch below should address that issue. If that works on mainline we can adapt it for RT (needs a trylock(&base->lock) there). Though it's not a full solution. It needs some thought versus the softirq code of timers. Assume we have only one timer queued 1000 ticks into the future. So this change will cause the timer softirq not to be called until that timer expires and then the timer softirq is going to do 1000 loops until it catches up with jiffies. That's anything but pretty ... What worries me more is this one: pert-5229 [003] d..h1.. 684.482618: softirq_raise: vec=9 [action=RCU] The CPU has no callbacks as you shoved them over to cpu 0, so why is the RCU softirq raised? Thanks, tglx ------------------ Message-id: |CONFIG_NO_HZ_FULL + CONFIG_PREEMPT_RT_FULL = nogo Signed-off-by: Sebastian Andrzej Siewior diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 79a7a35..bdbf77db 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -461,9 +461,8 @@ extern int schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta, const enum hrtimer_mode mode, int clock); extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); -/* Soft interrupt function to run the hrtimer queues: */ +/* Called from the periodic timer tick */ extern void hrtimer_run_queues(void); -extern void hrtimer_run_pending(void); /* Bootup initialization: */ extern void __init hrtimers_init(void); diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index c383841..7aa442e 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -1694,30 +1694,6 @@ static void run_hrtimer_softirq(struct softirq_action *h) } /* - * Called from timer softirq every jiffy, expire hrtimers: - * - * For HRT its the fall back code to run the softirq in the timer - * softirq context in case the hrtimer initialization failed or has - * not been done yet. - */ -void hrtimer_run_pending(void) -{ - if (hrtimer_hres_active()) - return; - - /* - * This _is_ ugly: We have to check in the softirq context, - * whether we can switch to highres and / or nohz mode. The - * clocksource switch happens in the timer interrupt with - * xtime_lock held. Notification from there only sets the - * check bit in the tick_oneshot code, otherwise we might - * deadlock vs. xtime_lock. - */ - if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) - hrtimer_switch_to_hres(); -} - -/* * Called from hardirq context every jiffy */ void hrtimer_run_queues(void) @@ -1730,6 +1706,13 @@ void hrtimer_run_queues(void) if (hrtimer_hres_active()) return; + /* + * Check whether we can switch to highres mode. + */ + if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()) + && hrtimer_switch_to_hres()) + return; + for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { base = &cpu_base->clock_base[index]; if (!timerqueue_getnext(&base->active)) diff --git a/kernel/timer.c b/kernel/timer.c index bbe6d43..0906351 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1439,8 +1439,6 @@ static void run_timer_softirq(struct softirq_action *h) { struct tvec_base *base = __this_cpu_read(tvec_bases); - hrtimer_run_pending(); - if (time_after_eq(jiffies, base->timer_jiffies)) __run_timers(base); } @@ -1450,8 +1448,32 @@ static void run_timer_softirq(struct softirq_action *h) */ void run_local_timers(void) { + struct tvec_base *base = __this_cpu_read(tvec_bases); + hrtimer_run_queues(); - raise_softirq(TIMER_SOFTIRQ); + /* + * We can access this lockless as we are in the timer + * interrupt. If there are no timers queued, nothing to do in + * the timer softirq. + */ +#ifdef CONFIG_PREEMPT_RT_FULL + if (!spin_do_trylock(&base->lock)) { + raise_softirq(TIMER_SOFTIRQ); + return; + } +#endif + if (!base->active_timers) + goto out; + + /* Check whether the next pending timer has expired */ + if (time_before_eq(base->next_timer, jiffies)) + raise_softirq(TIMER_SOFTIRQ); +out: +#ifdef CONFIG_PREEMPT_RT_FULL + rt_spin_unlock_after_trylock_in_irq(&base->lock); +#endif + /* The ; ensures that gcc won't complain in the !RT case */ + ; } #ifdef __ARCH_WANT_SYS_ALARM -- cgit v0.10.2 From 3d5290963bde738abce3e3886f56f0f069d712cd Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 24 Jan 2014 15:09:33 -0500 Subject: timer: Raise softirq if there's irq_work [ Talking with Sebastian on IRC, it seems that doing the irq_work_run() from the interrupt in -rt is a bad thing. Here we simply raise the softirq if there's irq work to do. This too boots on my i7 ] After trying hard to figure out why my i7 box was locking up with the new active_timers code, that does not run the timer softirq if there are no active timers, I took an extra look at the softirq handler and noticed that it doesn't just run timer softirqs, it also runs irq work. This was the bug that was locking up the system. It wasn't missing a timer, it was missing irq work. By always doing the irq work callbacks, the system boots fine. The missing irq work callback was the RCU's sp_wakeup() function. No need to check for defined(CONFIG_IRQ_WORK). When that's not set the "irq_work_needs_cpu()" is a static inline that returns false. Signed-off-by: Steven Rostedt Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/timer.c b/kernel/timer.c index 0906351..ed9d3bd 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1462,8 +1462,13 @@ void run_local_timers(void) return; } #endif - if (!base->active_timers) - goto out; + if (!base->active_timers) { +#ifdef CONFIG_PREEMPT_RT_FULL + /* On RT, irq work runs from softirq */ + if (!irq_work_needs_cpu()) +#endif + goto out; + } /* Check whether the next pending timer has expired */ if (time_before_eq(base->next_timer, jiffies)) -- cgit v0.10.2 From 9621d858da62206b8f25d6d89502d6c7bee191af Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 31 Jan 2014 12:07:57 -0500 Subject: timer/rt: Always raise the softirq if there's irq_work to be done It was previously discovered that some systems would hang on boot up with a previous version of 3.12-rt. This was due to RCU using irq_work, and RT defers the irq_work to a softirq. But if there's no active timers, the softirq will not be raised, and RCU work will not get done, causing the system to hang. The fix was to check that if there was no active timers but irq_work to be done, then we should raise the softirq. But this fix was not 100% correct. It left out the case that there were active timers that were not expired yet. This would have the softirq not get raised even if there was irq work to be done. If there is irq_work to be done, then we must raise the timer softirq regardless of if there is active timers or whether they are expired or not. The softirq can handle those cases. But we can never ignore irq_work. As it is only PREEMPT_RT_FULL that requires irq_work to be done in the softirq, we can pull out the check in the active_timers condition, and make the code a bit cleaner by having the irq_work check separate, and put the code in with the other #ifdef PREEMPT_RT. If there is irq_work to be done, there's no need to check the active timers or if they are expired. Just raise the time softirq and be done with it. Otherwise, we can do the timer checks just like we do with non -rt. Signed-off-by: Steven Rostedt Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/timer.c b/kernel/timer.c index ed9d3bd..31757c0 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1457,18 +1457,20 @@ void run_local_timers(void) * the timer softirq. */ #ifdef CONFIG_PREEMPT_RT_FULL + /* On RT, irq work runs from softirq */ + if (irq_work_needs_cpu()) { + raise_softirq(TIMER_SOFTIRQ); + return; + } + if (!spin_do_trylock(&base->lock)) { raise_softirq(TIMER_SOFTIRQ); return; } #endif - if (!base->active_timers) { -#ifdef CONFIG_PREEMPT_RT_FULL - /* On RT, irq work runs from softirq */ - if (!irq_work_needs_cpu()) -#endif - goto out; - } + + if (!base->active_timers) + goto out; /* Check whether the next pending timer has expired */ if (time_before_eq(base->next_timer, jiffies)) -- cgit v0.10.2 From 6c9fe1cf1a3d8c68f104dd8472059890e3fc6d87 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 15 Nov 2013 15:46:50 +0100 Subject: rtmutex: use a trylock for waiter lock in trylock Mike Galbraith captered the following: | >#11 [ffff88017b243e90] _raw_spin_lock at ffffffff815d2596 | >#12 [ffff88017b243e90] rt_mutex_trylock at ffffffff815d15be | >#13 [ffff88017b243eb0] get_next_timer_interrupt at ffffffff81063b42 | >#14 [ffff88017b243f00] tick_nohz_stop_sched_tick at ffffffff810bd1fd | >#15 [ffff88017b243f70] tick_nohz_irq_exit at ffffffff810bd7d2 | >#16 [ffff88017b243f90] irq_exit at ffffffff8105b02d | >#17 [ffff88017b243fb0] reschedule_interrupt at ffffffff815db3dd | >--- --- | >#18 [ffff88017a2a9bc8] reschedule_interrupt at ffffffff815db3dd | > [exception RIP: task_blocks_on_rt_mutex+51] | >#19 [ffff88017a2a9ce0] rt_spin_lock_slowlock at ffffffff815d183c | >#20 [ffff88017a2a9da0] lock_timer_base.isra.35 at ffffffff81061cbf | >#21 [ffff88017a2a9dd0] schedule_timeout at ffffffff815cf1ce | >#22 [ffff88017a2a9e50] rcu_gp_kthread at ffffffff810f9bbb | >#23 [ffff88017a2a9ed0] kthread at ffffffff810796d5 | >#24 [ffff88017a2a9f50] ret_from_fork at ffffffff815da04c lock_timer_base() does a try_lock() which deadlocks on the waiter lock not the lock itself. This patch takes the waiter_lock with trylock so it should work from interrupt context as well. If the fastpath doesn't work and the waiter_lock itself is taken then it seems that the lock itself taken. This patch also adds "rt_spin_unlock_after_trylock_in_irq" to keep lockdep happy. If we managed to take the wait_lock in the first place we should also be able to take it in the unlock path. Cc: stable-rt@vger.kernel.org Reported-by: Mike Galbraith Signed-off-by: Sebastian Andrzej Siewior diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h index 447c457..ac6f08b 100644 --- a/include/linux/spinlock_rt.h +++ b/include/linux/spinlock_rt.h @@ -22,6 +22,7 @@ extern void __lockfunc rt_spin_lock(spinlock_t *lock); extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock); extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass); extern void __lockfunc rt_spin_unlock(spinlock_t *lock); +extern void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock); extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock); extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags); extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock); diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 5aa004d..4057bc6 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -816,10 +816,8 @@ static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock) /* * Slow path to release a rt_mutex spin_lock style */ -static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) +static void __sched __rt_spin_lock_slowunlock(struct rt_mutex *lock) { - raw_spin_lock(&lock->wait_lock); - debug_rt_mutex_unlock(lock); rt_mutex_deadlock_account_unlock(current); @@ -838,6 +836,23 @@ static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) rt_mutex_adjust_prio(current); } +static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) +{ + raw_spin_lock(&lock->wait_lock); + __rt_spin_lock_slowunlock(lock); +} + +static void noinline __sched rt_spin_lock_slowunlock_hirq(struct rt_mutex *lock) +{ + int ret; + + do { + ret = raw_spin_trylock(&lock->wait_lock); + } while (!ret); + + __rt_spin_lock_slowunlock(lock); +} + void __lockfunc rt_spin_lock(spinlock_t *lock) { rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); @@ -868,6 +883,13 @@ void __lockfunc rt_spin_unlock(spinlock_t *lock) } EXPORT_SYMBOL(rt_spin_unlock); +void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock) +{ + /* NOTE: we always pass in '1' for nested, for simplicity */ + spin_release(&lock->dep_map, 1, _RET_IP_); + rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock_hirq); +} + void __lockfunc __rt_spin_unlock(struct rt_mutex *lock) { rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock); @@ -1191,7 +1213,8 @@ rt_mutex_slowtrylock(struct rt_mutex *lock) { int ret = 0; - raw_spin_lock(&lock->wait_lock); + if (!raw_spin_trylock(&lock->wait_lock)) + return ret; init_lists(lock); if (likely(rt_mutex_owner(lock) != current)) { diff --git a/kernel/timer.c b/kernel/timer.c index 31757c0..b4f860c 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1400,7 +1400,7 @@ unsigned long get_next_timer_interrupt(unsigned long now) expires = base->next_timer; } #ifdef CONFIG_PREEMPT_RT_FULL - rt_spin_unlock(&base->lock); + rt_spin_unlock_after_trylock_in_irq(&base->lock); #else spin_unlock(&base->lock); #endif -- cgit v0.10.2 From 4d85f8825a4e7d261aec080a07c94f1a6c63493a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 13 Aug 2011 00:23:17 +0200 Subject: rcu: Frob softirq test With RT_FULL we get the below wreckage: [ 126.060484] ======================================================= [ 126.060486] [ INFO: possible circular locking dependency detected ] [ 126.060489] 3.0.1-rt10+ #30 [ 126.060490] ------------------------------------------------------- [ 126.060492] irq/24-eth0/1235 is trying to acquire lock: [ 126.060495] (&(lock)->wait_lock#2){+.+...}, at: [] rt_mutex_slowunlock+0x16/0x55 [ 126.060503] [ 126.060504] but task is already holding lock: [ 126.060506] (&p->pi_lock){-...-.}, at: [] try_to_wake_up+0x35/0x429 [ 126.060511] [ 126.060511] which lock already depends on the new lock. [ 126.060513] [ 126.060514] [ 126.060514] the existing dependency chain (in reverse order) is: [ 126.060516] [ 126.060516] -> #1 (&p->pi_lock){-...-.}: [ 126.060519] [] lock_acquire+0x145/0x18a [ 126.060524] [] _raw_spin_lock_irqsave+0x4b/0x85 [ 126.060527] [] task_blocks_on_rt_mutex+0x36/0x20f [ 126.060531] [] rt_mutex_slowlock+0xd1/0x15a [ 126.060534] [] rt_mutex_lock+0x2d/0x2f [ 126.060537] [] rcu_boost+0xad/0xde [ 126.060541] [] rcu_boost_kthread+0x7d/0x9b [ 126.060544] [] kthread+0x99/0xa1 [ 126.060547] [] kernel_thread_helper+0x4/0x10 [ 126.060551] [ 126.060552] -> #0 (&(lock)->wait_lock#2){+.+...}: [ 126.060555] [] __lock_acquire+0x1157/0x1816 [ 126.060558] [] lock_acquire+0x145/0x18a [ 126.060561] [] _raw_spin_lock+0x40/0x73 [ 126.060564] [] rt_mutex_slowunlock+0x16/0x55 [ 126.060566] [] rt_mutex_unlock+0x27/0x29 [ 126.060569] [] rcu_read_unlock_special+0x17e/0x1c4 [ 126.060573] [] __rcu_read_unlock+0x48/0x89 [ 126.060576] [] select_task_rq_rt+0xc7/0xd5 [ 126.060580] [] try_to_wake_up+0x175/0x429 [ 126.060583] [] wake_up_process+0x15/0x17 [ 126.060585] [] wakeup_softirqd+0x24/0x26 [ 126.060590] [] irq_exit+0x49/0x55 [ 126.060593] [] smp_apic_timer_interrupt+0x8a/0x98 [ 126.060597] [] apic_timer_interrupt+0x13/0x20 [ 126.060600] [] irq_forced_thread_fn+0x1b/0x44 [ 126.060603] [] irq_thread+0xde/0x1af [ 126.060606] [] kthread+0x99/0xa1 [ 126.060608] [] kernel_thread_helper+0x4/0x10 [ 126.060611] [ 126.060612] other info that might help us debug this: [ 126.060614] [ 126.060615] Possible unsafe locking scenario: [ 126.060616] [ 126.060617] CPU0 CPU1 [ 126.060619] ---- ---- [ 126.060620] lock(&p->pi_lock); [ 126.060623] lock(&(lock)->wait_lock); [ 126.060625] lock(&p->pi_lock); [ 126.060627] lock(&(lock)->wait_lock); [ 126.060629] [ 126.060629] *** DEADLOCK *** [ 126.060630] [ 126.060632] 1 lock held by irq/24-eth0/1235: [ 126.060633] #0: (&p->pi_lock){-...-.}, at: [] try_to_wake_up+0x35/0x429 [ 126.060638] [ 126.060638] stack backtrace: [ 126.060641] Pid: 1235, comm: irq/24-eth0 Not tainted 3.0.1-rt10+ #30 [ 126.060643] Call Trace: [ 126.060644] [] print_circular_bug+0x289/0x29a [ 126.060651] [] __lock_acquire+0x1157/0x1816 [ 126.060655] [] ? trace_hardirqs_off_caller+0x1f/0x99 [ 126.060658] [] ? rt_mutex_slowunlock+0x16/0x55 [ 126.060661] [] lock_acquire+0x145/0x18a [ 126.060664] [] ? rt_mutex_slowunlock+0x16/0x55 [ 126.060668] [] _raw_spin_lock+0x40/0x73 [ 126.060671] [] ? rt_mutex_slowunlock+0x16/0x55 [ 126.060674] [] ? rcu_report_qs_rsp+0x87/0x8c [ 126.060677] [] rt_mutex_slowunlock+0x16/0x55 [ 126.060680] [] ? rcu_read_unlock_special+0x9b/0x1c4 [ 126.060683] [] rt_mutex_unlock+0x27/0x29 [ 126.060687] [] rcu_read_unlock_special+0x17e/0x1c4 [ 126.060690] [] __rcu_read_unlock+0x48/0x89 [ 126.060693] [] select_task_rq_rt+0xc7/0xd5 [ 126.060696] [] ? select_task_rq_rt+0x27/0xd5 [ 126.060701] [] ? clockevents_program_event+0x8e/0x90 [ 126.060704] [] try_to_wake_up+0x175/0x429 [ 126.060708] [] ? tick_program_event+0x1f/0x21 [ 126.060711] [] wake_up_process+0x15/0x17 [ 126.060715] [] wakeup_softirqd+0x24/0x26 [ 126.060718] [] irq_exit+0x49/0x55 [ 126.060721] [] smp_apic_timer_interrupt+0x8a/0x98 [ 126.060724] [] apic_timer_interrupt+0x13/0x20 [ 126.060726] [] ? migrate_disable+0x75/0x12d [ 126.060733] [] ? local_bh_disable+0xe/0x1f [ 126.060736] [] ? local_bh_disable+0x1d/0x1f [ 126.060739] [] irq_forced_thread_fn+0x1b/0x44 [ 126.060742] [] ? _raw_spin_unlock_irq+0x3b/0x59 [ 126.060745] [] irq_thread+0xde/0x1af [ 126.060748] [] ? irq_thread_fn+0x3a/0x3a [ 126.060751] [] ? irq_finalize_oneshot+0xd1/0xd1 [ 126.060754] [] ? irq_finalize_oneshot+0xd1/0xd1 [ 126.060757] [] kthread+0x99/0xa1 [ 126.060761] [] kernel_thread_helper+0x4/0x10 [ 126.060764] [] ? finish_task_switch+0x87/0x10a [ 126.060768] [] ? retint_restore_args+0xe/0xe [ 126.060771] [] ? __init_kthread_worker+0x8c/0x8c [ 126.060774] [] ? gs_change+0xb/0xb Because irq_exit() does: void irq_exit(void) { account_system_vtime(current); trace_hardirq_exit(); sub_preempt_count(IRQ_EXIT_OFFSET); if (!in_interrupt() && local_softirq_pending()) invoke_softirq(); ... } Which triggers a wakeup, which uses RCU, now if the interrupted task has t->rcu_read_unlock_special set, the rcu usage from the wakeup will end up in rcu_read_unlock_special(). rcu_read_unlock_special() will test for in_irq(), which will fail as we just decremented preempt_count with IRQ_EXIT_OFFSET, and in_sering_softirq(), which for PREEMPT_RT_FULL reads: int in_serving_softirq(void) { int res; preempt_disable(); res = __get_cpu_var(local_softirq_runner) == current; preempt_enable(); return res; } Which will thus also fail, resulting in the above wreckage. The 'somewhat' ugly solution is to open-code the preempt_count() test in rcu_read_unlock_special(). Also, we're not at all sure how ->rcu_read_unlock_special gets set here... so this is very likely a bandaid and more thought is required. Cc: Paul E. McKenney Signed-off-by: Peter Zijlstra diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 22232f8..d2da952 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -359,7 +359,7 @@ void rcu_read_unlock_special(struct task_struct *t) } /* Hardware IRQ handlers cannot block. */ - if (in_irq() || in_serving_softirq()) { + if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) { local_irq_restore(flags); return; } -- cgit v0.10.2 From df063b3e0f2dda8825336c34116cadd07a929757 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 5 Oct 2011 11:59:38 -0700 Subject: rcu: Merge RCU-bh into RCU-preempt The Linux kernel has long RCU-bh read-side critical sections that intolerably increase scheduling latency under mainline's RCU-bh rules, which include RCU-bh read-side critical sections being non-preemptible. This patch therefore arranges for RCU-bh to be implemented in terms of RCU-preempt for CONFIG_PREEMPT_RT_FULL=y. This has the downside of defeating the purpose of RCU-bh, namely, handling the case where the system is subjected to a network-based denial-of-service attack that keeps at least one CPU doing full-time softirq processing. This issue will be fixed by a later commit. The current commit will need some work to make it appropriate for mainline use, for example, it needs to be extended to cover Tiny RCU. [ paulmck: Added a useful changelog ] Signed-off-by: Thomas Gleixner Signed-off-by: Paul E. McKenney Link: http://lkml.kernel.org/r/20111005185938.GA20403@linux.vnet.ibm.com Signed-off-by: Thomas Gleixner diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index e56a6b2..7d754bf 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -128,6 +128,9 @@ extern void call_rcu(struct rcu_head *head, #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ +#ifdef CONFIG_PREEMPT_RT_FULL +#define call_rcu_bh call_rcu +#else /** * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. * @head: structure to be used for queueing the RCU updates. @@ -151,6 +154,7 @@ extern void call_rcu(struct rcu_head *head, */ extern void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *head)); +#endif /** * call_rcu_sched() - Queue an RCU for invocation after sched grace period. @@ -225,7 +229,13 @@ static inline int rcu_preempt_depth(void) /* Internal to kernel */ extern void rcu_init(void); extern void rcu_sched_qs(int cpu); + +#ifdef CONFIG_PREEMPT_RT_FULL +static inline void rcu_bh_qs(int cpu) { } +#else extern void rcu_bh_qs(int cpu); +#endif + extern void rcu_check_callbacks(int cpu, int user); struct notifier_block; extern void rcu_idle_enter(void); @@ -369,7 +379,14 @@ static inline int rcu_read_lock_held(void) * rcu_read_lock_bh_held() is defined out of line to avoid #include-file * hell. */ +#ifdef CONFIG_PREEMPT_RT_FULL +static inline int rcu_read_lock_bh_held(void) +{ + return rcu_read_lock_held(); +} +#else extern int rcu_read_lock_bh_held(void); +#endif /** * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? @@ -826,10 +843,14 @@ static inline void rcu_read_unlock(void) static inline void rcu_read_lock_bh(void) { local_bh_disable(); +#ifdef CONFIG_PREEMPT_RT_FULL + rcu_read_lock(); +#else __acquire(RCU_BH); rcu_lock_acquire(&rcu_bh_lock_map); rcu_lockdep_assert(!rcu_is_cpu_idle(), "rcu_read_lock_bh() used illegally while idle"); +#endif } /* @@ -839,10 +860,14 @@ static inline void rcu_read_lock_bh(void) */ static inline void rcu_read_unlock_bh(void) { +#ifdef CONFIG_PREEMPT_RT_FULL + rcu_read_unlock(); +#else rcu_lockdep_assert(!rcu_is_cpu_idle(), "rcu_read_unlock_bh() used illegally while idle"); rcu_lock_release(&rcu_bh_lock_map); __release(RCU_BH); +#endif local_bh_enable(); } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 226169d..91333de 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -44,7 +44,11 @@ static inline void rcu_virt_note_context_switch(int cpu) rcu_note_context_switch(cpu); } +#ifdef CONFIG_PREEMPT_RT_FULL +# define synchronize_rcu_bh synchronize_rcu +#else extern void synchronize_rcu_bh(void); +#endif extern void synchronize_sched_expedited(void); extern void synchronize_rcu_expedited(void); @@ -72,17 +76,19 @@ static inline void synchronize_rcu_bh_expedited(void) } extern void rcu_barrier(void); +#ifdef CONFIG_PREEMPT_RT_FULL +# define rcu_barrier_bh rcu_barrier +#else extern void rcu_barrier_bh(void); +#endif extern void rcu_barrier_sched(void); extern unsigned long rcutorture_testseq; extern unsigned long rcutorture_vernum; extern long rcu_batches_completed(void); -extern long rcu_batches_completed_bh(void); extern long rcu_batches_completed_sched(void); extern void rcu_force_quiescent_state(void); -extern void rcu_bh_force_quiescent_state(void); extern void rcu_sched_force_quiescent_state(void); extern void exit_rcu(void); @@ -90,4 +96,12 @@ extern void exit_rcu(void); extern void rcu_scheduler_starting(void); extern int rcu_scheduler_active __read_mostly; +#ifndef CONFIG_PREEMPT_RT_FULL +extern void rcu_bh_force_quiescent_state(void); +extern long rcu_batches_completed_bh(void); +#else +# define rcu_bh_force_quiescent_state rcu_force_quiescent_state +# define rcu_batches_completed_bh rcu_batches_completed +#endif + #endif /* __LINUX_RCUTREE_H */ diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index b02a339..7e1dd3e 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -129,6 +129,7 @@ int notrace debug_lockdep_rcu_enabled(void) } EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); +#ifndef CONFIG_PREEMPT_RT_FULL /** * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? * @@ -155,6 +156,7 @@ int rcu_read_lock_bh_held(void) return in_softirq() || irqs_disabled(); } EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); +#endif #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ diff --git a/kernel/rcutree.c b/kernel/rcutree.c index ee4de3f..d52e23b 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -199,6 +199,7 @@ void rcu_sched_qs(int cpu) rdp->passed_quiesce = 1; } +#ifndef CONFIG_PREEMPT_RT_FULL void rcu_bh_qs(int cpu) { struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); @@ -207,6 +208,7 @@ void rcu_bh_qs(int cpu) trace_rcu_grace_period(TPS("rcu_bh"), rdp->gpnum, TPS("cpuqs")); rdp->passed_quiesce = 1; } +#endif /* * Note a context switch. This is a quiescent state for RCU-sched, @@ -263,6 +265,7 @@ long rcu_batches_completed_sched(void) } EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); +#ifndef CONFIG_PREEMPT_RT_FULL /* * Return the number of RCU BH batches processed thus far for debug & stats. */ @@ -280,6 +283,7 @@ void rcu_bh_force_quiescent_state(void) force_quiescent_state(&rcu_bh_state); } EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); +#endif /* * Record the number of times rcutorture tests have been initiated and @@ -2373,6 +2377,7 @@ void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) } EXPORT_SYMBOL_GPL(call_rcu_sched); +#ifndef CONFIG_PREEMPT_RT_FULL /* * Queue an RCU callback for invocation after a quicker grace period. */ @@ -2381,6 +2386,7 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) __call_rcu(head, func, &rcu_bh_state, -1, 0); } EXPORT_SYMBOL_GPL(call_rcu_bh); +#endif /* * Because a context switch is a grace period for RCU-sched and RCU-bh, @@ -2458,6 +2464,7 @@ void synchronize_sched(void) } EXPORT_SYMBOL_GPL(synchronize_sched); +#ifndef CONFIG_PREEMPT_RT_FULL /** * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. * @@ -2484,6 +2491,7 @@ void synchronize_rcu_bh(void) wait_rcu_gp(call_rcu_bh); } EXPORT_SYMBOL_GPL(synchronize_rcu_bh); +#endif static int synchronize_sched_expedited_cpu_stop(void *data) { @@ -2895,6 +2903,7 @@ static void _rcu_barrier(struct rcu_state *rsp) mutex_unlock(&rsp->barrier_mutex); } +#ifndef CONFIG_PREEMPT_RT_FULL /** * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. */ @@ -2903,6 +2912,7 @@ void rcu_barrier_bh(void) _rcu_barrier(&rcu_bh_state); } EXPORT_SYMBOL_GPL(rcu_barrier_bh); +#endif /** * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. -- cgit v0.10.2 From 0c648c875792e72e55b57d88457c7efa1d60223b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 14 Nov 2011 10:57:54 +0100 Subject: rcu-more-fallout.patch Signed-off-by: Thomas Gleixner diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index 9ed6075..f202b26 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c @@ -369,6 +369,7 @@ void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) } EXPORT_SYMBOL_GPL(call_rcu_sched); +#ifndef CONFIG_PREEMPT_RT_FULL /* * Post an RCU bottom-half callback to be invoked after any subsequent * quiescent state. @@ -378,6 +379,7 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) __call_rcu(head, func, &rcu_bh_ctrlblk); } EXPORT_SYMBOL_GPL(call_rcu_bh); +#endif void rcu_init(void) { -- cgit v0.10.2 From 61f4e30bfb1b99f73366e3564ab173def41a5a4c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 5 Oct 2011 11:45:18 -0700 Subject: rcu: Make ksoftirqd do RCU quiescent states Implementing RCU-bh in terms of RCU-preempt makes the system vulnerable to network-based denial-of-service attacks. This patch therefore makes __do_softirq() invoke rcu_bh_qs(), but only when __do_softirq() is running in ksoftirqd context. A wrapper layer in interposed so that other calls to __do_softirq() avoid invoking rcu_bh_qs(). The underlying function __do_softirq_common() does the actual work. The reason that rcu_bh_qs() is bad in these non-ksoftirqd contexts is that there might be a local_bh_enable() inside an RCU-preempt read-side critical section. This local_bh_enable() can invoke __do_softirq() directly, so if __do_softirq() were to invoke rcu_bh_qs() (which just calls rcu_preempt_qs() in the PREEMPT_RT_FULL case), there would be an illegal RCU-preempt quiescent state in the middle of an RCU-preempt read-side critical section. Therefore, quiescent states can only happen in cases where __do_softirq() is invoked directly from ksoftirqd. Signed-off-by: Paul E. McKenney Link: http://lkml.kernel.org/r/20111005184518.GA21601@linux.vnet.ibm.com Signed-off-by: Thomas Gleixner diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 7d754bf..8b2693d 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -229,13 +229,7 @@ static inline int rcu_preempt_depth(void) /* Internal to kernel */ extern void rcu_init(void); extern void rcu_sched_qs(int cpu); - -#ifdef CONFIG_PREEMPT_RT_FULL -static inline void rcu_bh_qs(int cpu) { } -#else extern void rcu_bh_qs(int cpu); -#endif - extern void rcu_check_callbacks(int cpu, int user); struct notifier_block; extern void rcu_idle_enter(void); diff --git a/kernel/rcutree.c b/kernel/rcutree.c index d52e23b..8104cb2 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -199,7 +199,14 @@ void rcu_sched_qs(int cpu) rdp->passed_quiesce = 1; } -#ifndef CONFIG_PREEMPT_RT_FULL +#ifdef CONFIG_PREEMPT_RT_FULL +static void rcu_preempt_qs(int cpu); + +void rcu_bh_qs(int cpu) +{ + rcu_preempt_qs(cpu); +} +#else void rcu_bh_qs(int cpu) { struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index d2da952..63e0520 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -1553,7 +1553,7 @@ static void rcu_prepare_kthreads(int cpu) #endif /* #else #ifdef CONFIG_RCU_BOOST */ -#if !defined(CONFIG_RCU_FAST_NO_HZ) +#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) /* * Check to see if any future RCU-related work will need to be done @@ -1569,6 +1569,9 @@ int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) *delta_jiffies = ULONG_MAX; return rcu_cpu_has_callbacks(cpu, NULL); } +#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */ + +#if !defined(CONFIG_RCU_FAST_NO_HZ) /* * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up @@ -1666,6 +1669,8 @@ static bool rcu_try_advance_all_cbs(void) return cbs_ready; } +#ifndef CONFIG_PREEMPT_RT_FULL + /* * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready * to invoke. If the CPU has callbacks, try to advance them. Tell the @@ -1704,6 +1709,7 @@ int rcu_needs_cpu(int cpu, unsigned long *dj) } return 0; } +#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */ /* * Prepare a CPU for idle from an RCU perspective. The first major task diff --git a/kernel/softirq.c b/kernel/softirq.c index 37482da..8692908 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -142,7 +142,7 @@ static void wakeup_softirqd(void) wake_up_process(tsk); } -static void handle_pending_softirqs(u32 pending, int cpu) +static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs) { struct softirq_action *h = softirq_vec; unsigned int prev_count = preempt_count(); @@ -165,7 +165,8 @@ static void handle_pending_softirqs(u32 pending, int cpu) prev_count, (unsigned int) preempt_count()); preempt_count() = prev_count; } - rcu_bh_qs(cpu); + if (need_rcu_bh_qs) + rcu_bh_qs(cpu); } local_irq_disable(); } @@ -367,7 +368,7 @@ restart: /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); - handle_pending_softirqs(pending, cpu); + handle_pending_softirqs(pending, cpu, 1); pending = local_softirq_pending(); if (pending) { @@ -419,7 +420,12 @@ static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { } static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock); static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner); -static void __do_softirq(void); +static void __do_softirq_common(int need_rcu_bh_qs); + +void __do_softirq(void) +{ + __do_softirq_common(0); +} void __init softirq_early_init(void) { @@ -490,7 +496,7 @@ EXPORT_SYMBOL(in_serving_softirq); * Called with bh and local interrupts disabled. For full RT cpu must * be pinned. */ -static void __do_softirq(void) +static void __do_softirq_common(int need_rcu_bh_qs) { u32 pending = local_softirq_pending(); int cpu = smp_processor_id(); @@ -504,7 +510,7 @@ static void __do_softirq(void) lockdep_softirq_enter(); - handle_pending_softirqs(pending, cpu); + handle_pending_softirqs(pending, cpu, need_rcu_bh_qs); pending = local_softirq_pending(); if (pending) @@ -543,7 +549,7 @@ static int __thread_do_softirq(int cpu) * schedule! */ if (local_softirq_pending()) - __do_softirq(); + __do_softirq_common(cpu >= 0); local_unlock(local_softirq_lock); unpin_current_cpu(); preempt_disable(); -- cgit v0.10.2 From 854fa50d6557c610f4c247601da99c6cb286abc3 Mon Sep 17 00:00:00 2001 From: Tiejun Chen Date: Wed, 18 Dec 2013 17:51:49 +0800 Subject: rcutree/rcu_bh_qs: disable irq while calling rcu_preempt_qs() Any callers to the function rcu_preempt_qs() must disable irqs in order to protect the assignment to ->rcu_read_unlock_special. In RT case, rcu_bh_qs() as the wrapper of rcu_preempt_qs() is called in some scenarios where irq is enabled, like this path, do_single_softirq() | + local_irq_enable(); + handle_softirq() | | | + rcu_bh_qs() | | | + rcu_preempt_qs() | + local_irq_disable() So here we'd better disable irq directly inside of rcu_bh_qs() to fix this, otherwise the kernel may be freezable sometimes as observed. And especially this way is also kind and safe for the potential rcu_bh_qs() usage elsewhere in the future. Cc: stable-rt@vger.kernel.org Signed-off-by: Tiejun Chen Signed-off-by: Bin Jiang Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 8104cb2..cae59e3 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -204,7 +204,12 @@ static void rcu_preempt_qs(int cpu); void rcu_bh_qs(int cpu) { + unsigned long flags; + + /* Callers to this function, rcu_preempt_qs(), must disable irqs. */ + local_irq_save(flags); rcu_preempt_qs(cpu); + local_irq_restore(flags); } #else void rcu_bh_qs(int cpu) -- cgit v0.10.2 From 9db18e76ef43081555be1fe93d7df83f39e4df6b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 15 Jun 2011 11:02:21 +0200 Subject: lglocks-rt.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/lglock.h b/include/linux/lglock.h index 0d24e93..d2c0d6d 100644 --- a/include/linux/lglock.h +++ b/include/linux/lglock.h @@ -42,22 +42,37 @@ #endif struct lglock { +#ifndef CONFIG_PREEMPT_RT_FULL arch_spinlock_t __percpu *lock; +#else + struct rt_mutex __percpu *lock; +#endif #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lock_class_key lock_key; struct lockdep_map lock_dep_map; #endif }; -#define DEFINE_LGLOCK(name) \ +#ifndef CONFIG_PREEMPT_RT_FULL +# define DEFINE_LGLOCK(name) \ static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ = __ARCH_SPIN_LOCK_UNLOCKED; \ struct lglock name = { .lock = &name ## _lock } -#define DEFINE_STATIC_LGLOCK(name) \ +# define DEFINE_STATIC_LGLOCK(name) \ static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ = __ARCH_SPIN_LOCK_UNLOCKED; \ static struct lglock name = { .lock = &name ## _lock } +#else + +# define DEFINE_LGLOCK(name) \ + static DEFINE_PER_CPU(struct rt_mutex, name ## _lock); \ + struct lglock name = { .lock = &name ## _lock } + +# define DEFINE_STATIC_LGLOCK(name) \ + static DEFINE_PER_CPU(struct rt_mutex, name ## _lock); \ + static struct lglock name = { .lock = &name ## _lock } +#endif void lg_lock_init(struct lglock *lg, char *name); void lg_local_lock(struct lglock *lg); diff --git a/kernel/lglock.c b/kernel/lglock.c index 86ae2ae..f2356df 100644 --- a/kernel/lglock.c +++ b/kernel/lglock.c @@ -4,6 +4,15 @@ #include #include +#ifndef CONFIG_PREEMPT_RT_FULL +# define lg_lock_ptr arch_spinlock_t +# define lg_do_lock(l) arch_spin_lock(l) +# define lg_do_unlock(l) arch_spin_unlock(l) +#else +# define lg_lock_ptr struct rt_mutex +# define lg_do_lock(l) __rt_spin_lock(l) +# define lg_do_unlock(l) __rt_spin_unlock(l) +#endif /* * Note there is no uninit, so lglocks cannot be defined in * modules (but it's fine to use them from there) @@ -12,51 +21,60 @@ void lg_lock_init(struct lglock *lg, char *name) { +#ifdef CONFIG_PREEMPT_RT_FULL + int i; + + for_each_possible_cpu(i) { + struct rt_mutex *lock = per_cpu_ptr(lg->lock, i); + + rt_mutex_init(lock); + } +#endif LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0); } EXPORT_SYMBOL(lg_lock_init); void lg_local_lock(struct lglock *lg) { - arch_spinlock_t *lock; + lg_lock_ptr *lock; - preempt_disable(); + migrate_disable(); lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); lock = this_cpu_ptr(lg->lock); - arch_spin_lock(lock); + lg_do_lock(lock); } EXPORT_SYMBOL(lg_local_lock); void lg_local_unlock(struct lglock *lg) { - arch_spinlock_t *lock; + lg_lock_ptr *lock; lock_release(&lg->lock_dep_map, 1, _RET_IP_); lock = this_cpu_ptr(lg->lock); - arch_spin_unlock(lock); - preempt_enable(); + lg_do_unlock(lock); + migrate_enable(); } EXPORT_SYMBOL(lg_local_unlock); void lg_local_lock_cpu(struct lglock *lg, int cpu) { - arch_spinlock_t *lock; + lg_lock_ptr *lock; - preempt_disable(); + preempt_disable_nort(); lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); lock = per_cpu_ptr(lg->lock, cpu); - arch_spin_lock(lock); + lg_do_lock(lock); } EXPORT_SYMBOL(lg_local_lock_cpu); void lg_local_unlock_cpu(struct lglock *lg, int cpu) { - arch_spinlock_t *lock; + lg_lock_ptr *lock; lock_release(&lg->lock_dep_map, 1, _RET_IP_); lock = per_cpu_ptr(lg->lock, cpu); - arch_spin_unlock(lock); - preempt_enable(); + lg_do_unlock(lock); + preempt_enable_nort(); } EXPORT_SYMBOL(lg_local_unlock_cpu); @@ -64,12 +82,12 @@ void lg_global_lock(struct lglock *lg) { int i; - preempt_disable(); + preempt_disable_nort(); lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); for_each_possible_cpu(i) { - arch_spinlock_t *lock; + lg_lock_ptr *lock; lock = per_cpu_ptr(lg->lock, i); - arch_spin_lock(lock); + lg_do_lock(lock); } } EXPORT_SYMBOL(lg_global_lock); @@ -80,10 +98,10 @@ void lg_global_unlock(struct lglock *lg) lock_release(&lg->lock_dep_map, 1, _RET_IP_); for_each_possible_cpu(i) { - arch_spinlock_t *lock; + lg_lock_ptr *lock; lock = per_cpu_ptr(lg->lock, i); - arch_spin_unlock(lock); + lg_do_unlock(lock); } - preempt_enable(); + preempt_enable_nort(); } EXPORT_SYMBOL(lg_global_unlock); -- cgit v0.10.2 From ea4c77aee40edb037398c0b65dd0c3ee742a11d1 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:30:01 -0500 Subject: serial: 8250: Clean up the locking for -rt In -RT the spin_lock_irqsave() does not spin but sleep if the lock is taken. Before that, local_irq_save() is invoked which disables interrupts even on -RT. Therefore local_irq_save() + spin_lock() does not work. In the ->sysrq and oops_in_progress case it is save to trylock the lock i.e. this is what we do now anyway except for ->sysrq where we assume that the lock is already taken. The spin_lock_irqsave() grabs the lock and disables the interrupts on vanilla (the same behavior) and on -RT it won't disable interrupts. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner [bigeasy: add a patch description] Signed-off-by: Sebastian Andrzej Siewior diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 4f6e01c..f8fde64 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -2864,14 +2864,10 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count) touch_nmi_watchdog(); - local_irq_save(flags); - if (port->sysrq) { - /* serial8250_handle_irq() already took the lock */ - locked = 0; - } else if (oops_in_progress) { - locked = spin_trylock(&port->lock); - } else - spin_lock(&port->lock); + if (port->sysrq || oops_in_progress) + locked = spin_trylock_irqsave(&port->lock, flags); + else + spin_lock_irqsave(&port->lock, flags); /* * First save the IER then disable the interrupts @@ -2903,8 +2899,7 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count) serial8250_modem_status(up); if (locked) - spin_unlock(&port->lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&port->lock, flags); } static int __init serial8250_console_setup(struct console *co, char *options) -- cgit v0.10.2 From 7782904ee7d801ebae6ac2721b71587b930d5fa3 Mon Sep 17 00:00:00 2001 From: David Miller Date: Tue, 4 Mar 2014 20:28:35 +0000 Subject: sparc: serial: Clean up the locking for -rt Signed-off-by: David S. Miller Tested-by: Allen Pais diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c index cf86e72..dc697ce 100644 --- a/drivers/tty/serial/sunhv.c +++ b/drivers/tty/serial/sunhv.c @@ -433,13 +433,10 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign unsigned long flags; int locked = 1; - local_irq_save(flags); - if (port->sysrq) { - locked = 0; - } else if (oops_in_progress) { - locked = spin_trylock(&port->lock); - } else - spin_lock(&port->lock); + if (port->sysrq || oops_in_progress) + locked = spin_trylock_irqsave(&port->lock, flags); + else + spin_lock_irqsave(&port->lock, flags); while (n > 0) { unsigned long ra = __pa(con_write_page); @@ -470,8 +467,7 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign } if (locked) - spin_unlock(&port->lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&port->lock, flags); } static inline void sunhv_console_putchar(struct uart_port *port, char c) @@ -492,7 +488,10 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig unsigned long flags; int i, locked = 1; - local_irq_save(flags); + if (port->sysrq || oops_in_progress) + locked = spin_trylock_irqsave(&port->lock, flags); + else + spin_lock_irqsave(&port->lock, flags); if (port->sysrq) { locked = 0; } else if (oops_in_progress) { @@ -507,8 +506,7 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig } if (locked) - spin_unlock(&port->lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&port->lock, flags); } static struct console sunhv_console = { diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c index 5d6136b..1bf2774 100644 --- a/drivers/tty/serial/sunsab.c +++ b/drivers/tty/serial/sunsab.c @@ -844,20 +844,16 @@ static void sunsab_console_write(struct console *con, const char *s, unsigned n) unsigned long flags; int locked = 1; - local_irq_save(flags); - if (up->port.sysrq) { - locked = 0; - } else if (oops_in_progress) { - locked = spin_trylock(&up->port.lock); - } else - spin_lock(&up->port.lock); + if (up->port.sysrq || oops_in_progress) + locked = spin_trylock_irqsave(&up->port.lock, flags); + else + spin_lock_irqsave(&up->port.lock, flags); uart_console_write(&up->port, s, n, sunsab_console_putchar); sunsab_tec_wait(up); if (locked) - spin_unlock(&up->port.lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&up->port.lock, flags); } static int sunsab_console_setup(struct console *con, char *options) diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c index 699cc1b..d88fb63 100644 --- a/drivers/tty/serial/sunsu.c +++ b/drivers/tty/serial/sunsu.c @@ -1295,13 +1295,10 @@ static void sunsu_console_write(struct console *co, const char *s, unsigned int ier; int locked = 1; - local_irq_save(flags); - if (up->port.sysrq) { - locked = 0; - } else if (oops_in_progress) { - locked = spin_trylock(&up->port.lock); - } else - spin_lock(&up->port.lock); + if (up->port.sysrq || oops_in_progress) + locked = spin_trylock_irqsave(&up->port.lock, flags); + else + spin_lock_irqsave(&up->port.lock, flags); /* * First save the UER then disable the interrupts @@ -1319,8 +1316,7 @@ static void sunsu_console_write(struct console *co, const char *s, serial_out(up, UART_IER, ier); if (locked) - spin_unlock(&up->port.lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&up->port.lock, flags); } /* diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c index 135a152..3103c3b 100644 --- a/drivers/tty/serial/sunzilog.c +++ b/drivers/tty/serial/sunzilog.c @@ -1195,20 +1195,16 @@ sunzilog_console_write(struct console *con, const char *s, unsigned int count) unsigned long flags; int locked = 1; - local_irq_save(flags); - if (up->port.sysrq) { - locked = 0; - } else if (oops_in_progress) { - locked = spin_trylock(&up->port.lock); - } else - spin_lock(&up->port.lock); + if (up->port.sysrq || oops_in_progress) + locked = spin_trylock_irqsave(&up->port.lock, flags); + else + spin_lock_irqsave(&up->port.lock, flags); uart_console_write(&up->port, s, count, sunzilog_putchar); udelay(2); if (locked) - spin_unlock(&up->port.lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&up->port.lock, flags); } static int __init sunzilog_console_setup(struct console *con, char *options) -- cgit v0.10.2 From be183d523a633c383405f20797b1680ff4bdc859 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:30:01 -0500 Subject: serial: 8250: Call flush_to_ldisc when the irq is threaded Signed-off-by: Ingo Molnar diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c index c043136f..8a3b2a0 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c @@ -509,10 +509,14 @@ void tty_flip_buffer_push(struct tty_port *port) buf->tail->commit = buf->tail->used; +#ifndef CONFIG_PREEMPT_RT_FULL if (port->low_latency) flush_to_ldisc(&buf->work); else schedule_work(&buf->work); +#else + schedule_work(&buf->work); +#endif } EXPORT_SYMBOL(tty_flip_buffer_push); -- cgit v0.10.2 From f0b8a563433a1e137a9a010bee8891631afda63c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 28 Jul 2011 13:32:57 +0200 Subject: drivers-tty-fix-omap-lock-crap.patch Signed-off-by: Thomas Gleixner diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 816d1a2..22c8149 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -1232,13 +1232,10 @@ serial_omap_console_write(struct console *co, const char *s, pm_runtime_get_sync(up->dev); - local_irq_save(flags); - if (up->port.sysrq) - locked = 0; - else if (oops_in_progress) - locked = spin_trylock(&up->port.lock); + if (up->port.sysrq || oops_in_progress) + locked = spin_trylock_irqsave(&up->port.lock, flags); else - spin_lock(&up->port.lock); + spin_lock_irqsave(&up->port.lock, flags); /* * First save the IER then disable the interrupts @@ -1267,8 +1264,7 @@ serial_omap_console_write(struct console *co, const char *s, pm_runtime_mark_last_busy(up->dev); pm_runtime_put_autosuspend(up->dev); if (locked) - spin_unlock(&up->port.lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&up->port.lock, flags); } static int __init -- cgit v0.10.2 From 7bec8512ea842c8b255d5a468640b31dc35aad18 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 8 Jan 2013 21:36:51 +0100 Subject: drivers-tty-pl011-irq-disable-madness.patch Signed-off-by: Thomas Gleixner diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 1440d0b..729198c 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -1916,13 +1916,19 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) clk_enable(uap->clk); - local_irq_save(flags); + /* + * local_irq_save(flags); + * + * This local_irq_save() is nonsense. If we come in via sysrq + * handling then interrupts are already disabled. Aside of + * that the port.sysrq check is racy on SMP regardless. + */ if (uap->port.sysrq) locked = 0; else if (oops_in_progress) - locked = spin_trylock(&uap->port.lock); + locked = spin_trylock_irqsave(&uap->port.lock, flags); else - spin_lock(&uap->port.lock); + spin_lock_irqsave(&uap->port.lock, flags); /* * First save the CR then disable the interrupts @@ -1944,8 +1950,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) writew(old_cr, uap->port.membase + UART011_CR); if (locked) - spin_unlock(&uap->port.lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&uap->port.lock, flags); clk_disable(uap->clk); } -- cgit v0.10.2 From 83e3597c4eba0a2f56c3f296bad875d163929cbd Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 14 Dec 2011 13:05:54 +0100 Subject: rt: Improve the serial console PASS_LIMIT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Beyond the warning: drivers/tty/serial/8250/8250.c:1613:6: warning: unused variable ‘pass_counter’ [-Wunused-variable] the solution of just looping infinitely was ugly - up it to 1 million to give it a chance to continue in some really ugly situation. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index f8fde64..73a05af 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -80,7 +80,16 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */ #define DEBUG_INTR(fmt...) do { } while (0) #endif -#define PASS_LIMIT 512 +/* + * On -rt we can have a more delays, and legitimately + * so - so don't drop work spuriously and spam the + * syslog: + */ +#ifdef CONFIG_PREEMPT_RT_FULL +# define PASS_LIMIT 1000000 +#else +# define PASS_LIMIT 512 +#endif #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) -- cgit v0.10.2 From de92ac57a46708314fd323475d2351ad485a8156 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 19 Jul 2009 08:44:27 -0500 Subject: fs: namespace preemption fix On RT we cannot loop with preemption disabled here as mnt_make_readonly() might have been preempted. We can safely enable preemption while waiting for MNT_WRITE_HOLD to be cleared. Safe on !RT as well. Signed-off-by: Thomas Gleixner diff --git a/fs/namespace.c b/fs/namespace.c index 84447db..6ed0fb1 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -315,8 +315,11 @@ int __mnt_want_write(struct vfsmount *m) * incremented count after it has set MNT_WRITE_HOLD. */ smp_mb(); - while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) + while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { + preempt_enable(); cpu_relax(); + preempt_disable(); + } /* * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will * be set to match its requirements. So we must not load that until -- cgit v0.10.2 From 03ca605b77fefb3ca3453a81eaf9472d893d0efe Mon Sep 17 00:00:00 2001 From: Yong Zhang Date: Tue, 15 May 2012 13:53:56 +0800 Subject: mm: Protect activate_mm() by preempt_[disable&enable]_rt() User preempt_*_rt instead of local_irq_*_rt or otherwise there will be warning on ARM like below: WARNING: at build/linux/kernel/smp.c:459 smp_call_function_many+0x98/0x264() Modules linked in: [] (unwind_backtrace+0x0/0xe4) from [] (warn_slowpath_common+0x4c/0x64) [] (warn_slowpath_common+0x4c/0x64) from [] (warn_slowpath_null+0x18/0x1c) [] (warn_slowpath_null+0x18/0x1c) from [](smp_call_function_many+0x98/0x264) [] (smp_call_function_many+0x98/0x264) from [] (smp_call_function+0x44/0x6c) [] (smp_call_function+0x44/0x6c) from [] (__new_context+0xbc/0x124) [] (__new_context+0xbc/0x124) from [] (flush_old_exec+0x460/0x5e4) [] (flush_old_exec+0x460/0x5e4) from [] (load_elf_binary+0x2e0/0x11ac) [] (load_elf_binary+0x2e0/0x11ac) from [] (search_binary_handler+0x94/0x2a4) [] (search_binary_handler+0x94/0x2a4) from [] (do_execve+0x254/0x364) [] (do_execve+0x254/0x364) from [] (sys_execve+0x34/0x54) [] (sys_execve+0x34/0x54) from [] (ret_fast_syscall+0x0/0x30) ---[ end trace 0000000000000002 ]--- The reason is that ARM need irq enabled when doing activate_mm(). According to mm-protect-activate-switch-mm.patch, actually preempt_[disable|enable]_rt() is sufficient. Inspired-by: Steven Rostedt Signed-off-by: Yong Zhang Cc: Steven Rostedt Link: http://lkml.kernel.org/r/1337061236-1766-1-git-send-email-yong.zhang0@gmail.com Signed-off-by: Thomas Gleixner diff --git a/fs/exec.c b/fs/exec.c index bb8afc1..eb02518 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -840,10 +840,12 @@ static int exec_mmap(struct mm_struct *mm) } } task_lock(tsk); + preempt_disable_rt(); active_mm = tsk->active_mm; tsk->mm = mm; tsk->active_mm = mm; activate_mm(active_mm, mm); + preempt_enable_rt(); task_unlock(tsk); arch_pick_mmap_layout(mm); if (old_mm) { diff --git a/mm/mmu_context.c b/mm/mmu_context.c index 8a8cd02..adfce87 100644 --- a/mm/mmu_context.c +++ b/mm/mmu_context.c @@ -23,6 +23,7 @@ void use_mm(struct mm_struct *mm) struct task_struct *tsk = current; task_lock(tsk); + preempt_disable_rt(); active_mm = tsk->active_mm; if (active_mm != mm) { atomic_inc(&mm->mm_count); @@ -30,6 +31,7 @@ void use_mm(struct mm_struct *mm) } tsk->mm = mm; switch_mm(active_mm, mm, tsk); + preempt_enable_rt(); task_unlock(tsk); if (active_mm != mm) -- cgit v0.10.2 From 760e7cdee7f2b8d37783aa0e47b4ef7d469953af Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 14 Jun 2011 17:05:09 +0200 Subject: fs-block-rt-support.patch Signed-off-by: Thomas Gleixner diff --git a/block/blk-core.c b/block/blk-core.c index 4c7ee30..f703f97 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -232,7 +232,7 @@ EXPORT_SYMBOL(blk_delay_queue); **/ void blk_start_queue(struct request_queue *q) { - WARN_ON(!irqs_disabled()); + WARN_ON_NONRT(!irqs_disabled()); queue_flag_clear(QUEUE_FLAG_STOPPED, q); __blk_run_queue(q); -- cgit v0.10.2 From 3db80947b547c8f76582f02811349f7573dee831 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Fri, 3 Jul 2009 08:44:12 -0500 Subject: fs: ntfs: disable interrupt only on !RT On Sat, 2007-10-27 at 11:44 +0200, Ingo Molnar wrote: > * Nick Piggin wrote: > > > > [10138.175796] [] show_trace+0x12/0x14 > > > [10138.180291] [] dump_stack+0x16/0x18 > > > [10138.184769] [] native_smp_call_function_mask+0x138/0x13d > > > [10138.191117] [] smp_call_function+0x1e/0x24 > > > [10138.196210] [] on_each_cpu+0x25/0x50 > > > [10138.200807] [] flush_tlb_all+0x1e/0x20 > > > [10138.205553] [] kmap_high+0x1b6/0x417 > > > [10138.210118] [] kmap+0x4d/0x4f > > > [10138.214102] [] ntfs_end_buffer_async_read+0x228/0x2f9 > > > [10138.220163] [] end_bio_bh_io_sync+0x26/0x3f > > > [10138.225352] [] bio_endio+0x42/0x6d > > > [10138.229769] [] __end_that_request_first+0x115/0x4ac > > > [10138.235682] [] end_that_request_chunk+0x8/0xa > > > [10138.241052] [] ide_end_request+0x55/0x10a > > > [10138.246058] [] ide_dma_intr+0x6f/0xac > > > [10138.250727] [] ide_intr+0x93/0x1e0 > > > [10138.255125] [] handle_IRQ_event+0x5c/0xc9 > > > > Looks like ntfs is kmap()ing from interrupt context. Should be using > > kmap_atomic instead, I think. > > it's not atomic interrupt context but irq thread context - and -rt > remaps kmap_atomic() to kmap() internally. Hm. Looking at the change to mm/bounce.c, perhaps I should do this instead? Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index 98ec102..33f0d86 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c @@ -144,13 +144,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) recs = PAGE_CACHE_SIZE / rec_size; /* Should have been verified before we got here... */ BUG_ON(!recs); - local_irq_save(flags); + local_irq_save_nort(flags); kaddr = kmap_atomic(page); for (i = 0; i < recs; i++) post_read_mst_fixup((NTFS_RECORD*)(kaddr + i * rec_size), rec_size); kunmap_atomic(kaddr); - local_irq_restore(flags); + local_irq_restore_nort(flags); flush_dcache_page(page); if (likely(page_uptodate && !PageError(page))) SetPageUptodate(page); -- cgit v0.10.2 From 36264b24dae7b283a7a67718a2112efc4138f853 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Wed, 11 Jul 2012 22:05:20 +0000 Subject: fs, jbd: pull your plug when waiting for space With an -rt kernel, and a heavy sync IO load, tasks can jam up on journal locks without unplugging, which can lead to terminal IO starvation. Unplug and schedule when waiting for space. Signed-off-by: Mike Galbraith Cc: Steven Rostedt Cc: Theodore Tso Link: http://lkml.kernel.org/r/1341812414.7370.73.camel@marge.simpson.net Signed-off-by: Thomas Gleixner diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c index 08c0304..95debd7 100644 --- a/fs/jbd/checkpoint.c +++ b/fs/jbd/checkpoint.c @@ -129,6 +129,8 @@ void __log_wait_for_space(journal_t *journal) if (journal->j_flags & JFS_ABORT) return; spin_unlock(&journal->j_state_lock); + if (current->plug) + io_schedule(); mutex_lock(&journal->j_checkpoint_mutex); /* -- cgit v0.10.2 From 578bfff9b7013e30b60649b24b75ab79c634525f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 17 Feb 2014 17:30:03 +0100 Subject: fs: jbd2: pull your plug when waiting for space Two cps in parallel managed to stall the the ext4 fs. It seems that journal code is either waiting for locks or sleeping waiting for something to happen. This seems similar to what Mike observed on ext3, here is his description: |With an -rt kernel, and a heavy sync IO load, tasks can jam |up on journal locks without unplugging, which can lead to |terminal IO starvation. Unplug and schedule when waiting |for space. Cc: stable-rt@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index 7f34f47..8c40400 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c @@ -125,6 +125,8 @@ void __jbd2_log_wait_for_space(journal_t *journal) if (journal->j_flags & JBD2_ABORT) return; write_unlock(&journal->j_state_lock); + if (current->plug) + io_schedule(); mutex_lock(&journal->j_checkpoint_mutex); /* -- cgit v0.10.2 From ba989baf3928a0c76976f81af14c7d192b4686ad Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Sat, 26 Oct 2013 00:01:58 +0200 Subject: lockref: disable 64bit cmpxchg optimization on RT One of the requirements is that the lock has to fit in an u32 which does not work with the sleeping locks. We would have to use the bare raw locks for this and I would like to avoid this. Signed-off-by: Sebastian Andrzej Siewior diff --git a/lib/Kconfig b/lib/Kconfig index b3c8be0..c742a2d 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -57,6 +57,7 @@ config CMPXCHG_LOCKREF depends on !GENERIC_LOCKBREAK depends on !DEBUG_SPINLOCK depends on !DEBUG_LOCK_ALLOC + depends on !PREEMPT_RT_BASE config CRC_CCITT tristate "CRC-CCITT functions" -- cgit v0.10.2 From df36f3d72f5dcb3b838e2e8cce042cd5f1e25ddc Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 13 Dec 2010 16:33:39 +0100 Subject: x86: Convert mce timer to hrtimer mce_timer is started in atomic contexts of cpu bringup. This results in might_sleep() warnings on RT. Convert mce_timer to a hrtimer to avoid this. Signed-off-by: Thomas Gleixner diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index b3218cd..534b46b 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -1268,7 +1269,7 @@ void mce_log_therm_throt_event(__u64 status) static unsigned long check_interval = 5 * 60; /* 5 minutes */ static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ -static DEFINE_PER_CPU(struct timer_list, mce_timer); +static DEFINE_PER_CPU(struct hrtimer, mce_timer); static unsigned long mce_adjust_timer_default(unsigned long interval) { @@ -1278,13 +1279,10 @@ static unsigned long mce_adjust_timer_default(unsigned long interval) static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; -static void mce_timer_fn(unsigned long data) +static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer) { - struct timer_list *t = &__get_cpu_var(mce_timer); unsigned long iv; - WARN_ON(smp_processor_id() != data); - if (mce_available(__this_cpu_ptr(&cpu_info))) { machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_poll_banks)); @@ -1305,9 +1303,10 @@ static void mce_timer_fn(unsigned long data) __this_cpu_write(mce_next_interval, iv); /* Might have become 0 after CMCI storm subsided */ if (iv) { - t->expires = jiffies + iv; - add_timer_on(t, smp_processor_id()); + hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_usecs(iv))); + return HRTIMER_RESTART; } + return HRTIMER_NORESTART; } /* @@ -1315,28 +1314,37 @@ static void mce_timer_fn(unsigned long data) */ void mce_timer_kick(unsigned long interval) { - struct timer_list *t = &__get_cpu_var(mce_timer); - unsigned long when = jiffies + interval; + struct hrtimer *t = &__get_cpu_var(mce_timer); unsigned long iv = __this_cpu_read(mce_next_interval); - if (timer_pending(t)) { - if (time_before(when, t->expires)) - mod_timer_pinned(t, when); + if (hrtimer_active(t)) { + s64 exp; + s64 intv_us; + + intv_us = jiffies_to_usecs(interval); + exp = ktime_to_us(hrtimer_expires_remaining(t)); + if (intv_us < exp) { + hrtimer_cancel(t); + hrtimer_start_range_ns(t, + ns_to_ktime(intv_us * 1000), + 0, HRTIMER_MODE_REL_PINNED); + } } else { - t->expires = round_jiffies(when); - add_timer_on(t, smp_processor_id()); + hrtimer_start_range_ns(t, + ns_to_ktime(jiffies_to_usecs(interval) * 1000), + 0, HRTIMER_MODE_REL_PINNED); } if (interval < iv) __this_cpu_write(mce_next_interval, interval); } -/* Must not be called in IRQ context where del_timer_sync() can deadlock */ +/* Must not be called in IRQ context where hrtimer_cancel() can deadlock */ static void mce_timer_delete_all(void) { int cpu; for_each_online_cpu(cpu) - del_timer_sync(&per_cpu(mce_timer, cpu)); + hrtimer_cancel(&per_cpu(mce_timer, cpu)); } static void mce_do_trigger(struct work_struct *work) @@ -1636,7 +1644,7 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) } } -static void mce_start_timer(unsigned int cpu, struct timer_list *t) +static void mce_start_timer(unsigned int cpu, struct hrtimer *t) { unsigned long iv = mce_adjust_timer(check_interval * HZ); @@ -1645,16 +1653,17 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t) if (mca_cfg.ignore_ce || !iv) return; - t->expires = round_jiffies(jiffies + iv); - add_timer_on(t, smp_processor_id()); + hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000), + 0, HRTIMER_MODE_REL_PINNED); } static void __mcheck_cpu_init_timer(void) { - struct timer_list *t = &__get_cpu_var(mce_timer); + struct hrtimer *t = &__get_cpu_var(mce_timer); unsigned int cpu = smp_processor_id(); - setup_timer(t, mce_timer_fn, cpu); + hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + t->function = mce_timer_fn; mce_start_timer(cpu, t); } @@ -2329,6 +2338,8 @@ static void mce_disable_cpu(void *h) if (!mce_available(__this_cpu_ptr(&cpu_info))) return; + hrtimer_cancel(&__get_cpu_var(mce_timer)); + if (!(action & CPU_TASKS_FROZEN)) cmci_clear(); for (i = 0; i < mca_cfg.banks; i++) { @@ -2355,6 +2366,7 @@ static void mce_reenable_cpu(void *h) if (b->init) wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); } + __mcheck_cpu_init_timer(); } /* Get notified when a cpu comes on/off. Be hotplug friendly. */ @@ -2362,7 +2374,6 @@ static int mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - struct timer_list *t = &per_cpu(mce_timer, cpu); switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: @@ -2378,11 +2389,9 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) break; case CPU_DOWN_PREPARE: smp_call_function_single(cpu, mce_disable_cpu, &action, 1); - del_timer_sync(t); break; case CPU_DOWN_FAILED: smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); - mce_start_timer(cpu, t); break; } -- cgit v0.10.2 From df6508878d3996ca0c78a4330498add799c28f7d Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Wed, 29 May 2013 13:52:13 +0200 Subject: x86/mce: fix mce timer interval Seems mce timer fire at the wrong frequency in -rt kernels since roughly forever due to 32 bit overflow. 3.8-rt is also missing a multiplier. Add missing us -> ns conversion and 32 bit overflow prevention. Cc: stable-rt@vger.kernel.org Signed-off-by: Mike Galbraith [bigeasy: use ULL instead of u64 cast] Signed-off-by: Sebastian Andrzej Siewior diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 534b46b..933a842 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1303,7 +1303,8 @@ static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer) __this_cpu_write(mce_next_interval, iv); /* Might have become 0 after CMCI storm subsided */ if (iv) { - hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_usecs(iv))); + hrtimer_forward_now(timer, ns_to_ktime( + jiffies_to_usecs(iv) * 1000ULL)); return HRTIMER_RESTART; } return HRTIMER_NORESTART; @@ -1331,7 +1332,7 @@ void mce_timer_kick(unsigned long interval) } } else { hrtimer_start_range_ns(t, - ns_to_ktime(jiffies_to_usecs(interval) * 1000), + ns_to_ktime(jiffies_to_usecs(interval) * 1000ULL), 0, HRTIMER_MODE_REL_PINNED); } if (interval < iv) @@ -1653,7 +1654,7 @@ static void mce_start_timer(unsigned int cpu, struct hrtimer *t) if (mca_cfg.ignore_ce || !iv) return; - hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000), + hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL), 0, HRTIMER_MODE_REL_PINNED); } -- cgit v0.10.2 From e1e38268de0ea7a7ac27de362b155e944e411259 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 11 Apr 2013 14:33:34 -0400 Subject: x86/mce: Defer mce wakeups to threads for PREEMPT_RT We had a customer report a lockup on a 3.0-rt kernel that had the following backtrace: [ffff88107fca3e80] rt_spin_lock_slowlock at ffffffff81499113 [ffff88107fca3f40] rt_spin_lock at ffffffff81499a56 [ffff88107fca3f50] __wake_up at ffffffff81043379 [ffff88107fca3f80] mce_notify_irq at ffffffff81017328 [ffff88107fca3f90] intel_threshold_interrupt at ffffffff81019508 [ffff88107fca3fa0] smp_threshold_interrupt at ffffffff81019fc1 [ffff88107fca3fb0] threshold_interrupt at ffffffff814a1853 It actually bugged because the lock was taken by the same owner that already had that lock. What happened was the thread that was setting itself on a wait queue had the lock when an MCE triggered. The MCE interrupt does a wake up on its wait list and grabs the same lock. NOTE: THIS IS NOT A BUG ON MAINLINE Sorry for yelling, but as I Cc'd mainline maintainers I want them to know that this is an PREEMPT_RT bug only. I only Cc'd them for advice. On PREEMPT_RT the wait queue locks are converted from normal "spin_locks" into an rt_mutex (see the rt_spin_lock_slowlock above). These are not to be taken by hard interrupt context. This usually isn't a problem as most all interrupts in PREEMPT_RT are converted into schedulable threads. Unfortunately that's not the case with the MCE irq. As wait queue locks are notorious for long hold times, we can not convert them to raw_spin_locks without causing issues with -rt. But Thomas has created a "simple-wait" structure that uses raw spin locks which may have been a good fit. Unfortunately, wait queues are not the only issue, as the mce_notify_irq also does a schedule_work(), which grabs the workqueue spin locks that have the exact same issue. Thus, this patch I'm proposing is to move the actual work of the MCE interrupt into a helper thread that gets woken up on the MCE interrupt and does the work in a schedulable context. NOTE: THIS PATCH ONLY CHANGES THE BEHAVIOR WHEN PREEMPT_RT IS SET Oops, sorry for yelling again, but I want to stress that I keep the same behavior of mainline when PREEMPT_RT is not set. Thus, this only changes the MCE behavior when PREEMPT_RT is configured. Signed-off-by: Steven Rostedt [bigeasy@linutronix: make mce_notify_work() a proper prototype, use kthread_run()] Signed-off-by: Sebastian Andrzej Siewior diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 933a842..3a7ab0b 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -1355,6 +1356,63 @@ static void mce_do_trigger(struct work_struct *work) static DECLARE_WORK(mce_trigger_work, mce_do_trigger); +static void __mce_notify_work(void) +{ + /* Not more than two messages every minute */ + static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); + + /* wake processes polling /dev/mcelog */ + wake_up_interruptible(&mce_chrdev_wait); + + /* + * There is no risk of missing notifications because + * work_pending is always cleared before the function is + * executed. + */ + if (mce_helper[0] && !work_pending(&mce_trigger_work)) + schedule_work(&mce_trigger_work); + + if (__ratelimit(&ratelimit)) + pr_info(HW_ERR "Machine check events logged\n"); +} + +#ifdef CONFIG_PREEMPT_RT_FULL +struct task_struct *mce_notify_helper; + +static int mce_notify_helper_thread(void *unused) +{ + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + if (kthread_should_stop()) + break; + __mce_notify_work(); + } + return 0; +} + +static int mce_notify_work_init(void) +{ + mce_notify_helper = kthread_run(mce_notify_helper_thread, NULL, + "mce-notify"); + if (!mce_notify_helper) + return -ENOMEM; + + return 0; +} + +static void mce_notify_work(void) +{ + wake_up_process(mce_notify_helper); +} +#else +static void mce_notify_work(void) +{ + __mce_notify_work(); +} +static inline int mce_notify_work_init(void) { return 0; } +#endif + /* * Notify the user(s) about new machine check events. * Can be called from interrupt context, but not from machine check/NMI @@ -1362,19 +1420,8 @@ static DECLARE_WORK(mce_trigger_work, mce_do_trigger); */ int mce_notify_irq(void) { - /* Not more than two messages every minute */ - static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); - if (test_and_clear_bit(0, &mce_need_notify)) { - /* wake processes polling /dev/mcelog */ - wake_up_interruptible(&mce_chrdev_wait); - - if (mce_helper[0]) - schedule_work(&mce_trigger_work); - - if (__ratelimit(&ratelimit)) - pr_info(HW_ERR "Machine check events logged\n"); - + mce_notify_work(); return 1; } return 0; @@ -2454,6 +2501,8 @@ static __init int mcheck_init_device(void) /* register character device /dev/mcelog */ misc_register(&mce_chrdev_device); + err = mce_notify_work_init(); + return err; } device_initcall_sync(mcheck_init_device); -- cgit v0.10.2 From 85c416cd2bc39b9227692e73507e6aaefc450afe Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 16 Dec 2010 14:25:18 +0100 Subject: x86: stackprotector: Avoid random pool on rt CPU bringup calls into the random pool to initialize the stack canary. During boot that works nicely even on RT as the might sleep checks are disabled. During CPU hotplug the might sleep checks trigger. Making the locks in random raw is a major PITA, so avoid the call on RT is the only sensible solution. This is basically the same randomness which we get during boot where the random pool has no entropy and we rely on the TSC randomnness. Reported-by: Carsten Emde Signed-off-by: Thomas Gleixner diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index 6a99859..64fb5cb 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -57,7 +57,7 @@ */ static __always_inline void boot_init_stack_canary(void) { - u64 canary; + u64 uninitialized_var(canary); u64 tsc; #ifdef CONFIG_X86_64 @@ -68,8 +68,16 @@ static __always_inline void boot_init_stack_canary(void) * of randomness. The TSC only matters for very early init, * there it already has some randomness on most systems. Later * on during the bootup the random pool has true entropy too. + * + * For preempt-rt we need to weaken the randomness a bit, as + * we can't call into the random generator from atomic context + * due to locking constraints. We just leave canary + * uninitialized and use the TSC based randomness on top of + * it. */ +#ifndef CONFIG_PREEMPT_RT_FULL get_random_bytes(&canary, sizeof(canary)); +#endif tsc = __native_read_tsc(); canary += tsc + (tsc << 32UL); -- cgit v0.10.2 From 05b36fd9529169aad2cdc5437644f2b72eee0e8e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 26 Jul 2009 02:21:32 +0200 Subject: x86: Use generic rwsem_spinlocks on -rt Simplifies the separation of anon_rw_semaphores and rw_semaphores for -rt. Signed-off-by: Thomas Gleixner diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index f67e839..7a6b720 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -179,8 +179,11 @@ config ARCH_MAY_HAVE_PC_FDC def_bool y depends on ISA_DMA_API +config RWSEM_GENERIC_SPINLOCK + def_bool PREEMPT_RT_FULL + config RWSEM_XCHGADD_ALGORITHM - def_bool y + def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL config GENERIC_CALIBRATE_DELAY def_bool y @@ -470,7 +473,7 @@ config X86_MDFLD select MFD_INTEL_MSIC ---help--- Medfield is Intel's Low Power Intel Architecture (LPIA) based Moblin - Internet Device(MID) platform. + Internet Device(MID) platform. Unlike standard x86 PCs, Medfield does not have many legacy devices nor standard legacy replacement devices/features. e.g. Medfield does not contain i8259, i8254, HPET, legacy BIOS, most of the io ports. -- cgit v0.10.2 From f6418898d37e7db8aa8ff32c2b12a6f3a3789d39 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 3 Jul 2009 08:44:10 -0500 Subject: x86: Disable IST stacks for debug/int 3/stack fault for PREEMPT_RT Normally the x86-64 trap handlers for debug/int 3/stack fault run on a special interrupt stack to make them more robust when dealing with kernel code. The PREEMPT_RT kernel can sleep in locks even while allocating GFP_ATOMIC memory. When one of these trap handlers needs to send real time signals for ptrace it allocates memory and could then try to to schedule. But it is not allowed to schedule on a IST stack. This can cause warnings and hangs. This patch disables the IST stacks for these handlers for PREEMPT_RT kernel. Instead let them run on the normal process stack. The kernel only really needs the ISTs here to make kernel debuggers more robust in case someone sets a break point somewhere where the stack is invalid. But there are no kernel debuggers in the standard kernel that do this. It also means kprobes cannot be set in situations with invalid stack; but that sounds like a reasonable restriction. The stack fault change could minimally impact oops quality, but not very much because stack faults are fairly rare. A better solution would be to use similar logic as the NMI "paranoid" path: check if signal is for user space, if yes go back to entry.S, switch stack, call sync_regs, then do the signal sending etc. But this patch is much simpler and should work too with minimal impact. Signed-off-by: Andi Kleen Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 43dcd80..695e04d 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -14,12 +14,21 @@ #define IRQ_STACK_ORDER 2 #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) -#define STACKFAULT_STACK 1 -#define DOUBLEFAULT_STACK 2 -#define NMI_STACK 3 -#define DEBUG_STACK 4 -#define MCE_STACK 5 -#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ +#ifdef CONFIG_PREEMPT_RT_FULL +# define STACKFAULT_STACK 0 +# define DOUBLEFAULT_STACK 1 +# define NMI_STACK 2 +# define DEBUG_STACK 0 +# define MCE_STACK 3 +# define N_EXCEPTION_STACKS 3 /* hw limit: 7 */ +#else +# define STACKFAULT_STACK 1 +# define DOUBLEFAULT_STACK 2 +# define NMI_STACK 3 +# define DEBUG_STACK 4 +# define MCE_STACK 5 +# define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ +#endif #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 3533e2c..d4e8eb8 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1110,7 +1110,9 @@ DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); */ static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, +#if DEBUG_STACK > 0 [DEBUG_STACK - 1] = DEBUG_STKSZ +#endif }; static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index addb207..52b4bcd 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -21,10 +21,14 @@ (N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2) static char x86_stack_ids[][8] = { +#if DEBUG_STACK > 0 [ DEBUG_STACK-1 ] = "#DB", +#endif [ NMI_STACK-1 ] = "NMI", [ DOUBLEFAULT_STACK-1 ] = "#DF", +#if STACKFAULT_STACK > 0 [ STACKFAULT_STACK-1 ] = "#SS", +#endif [ MCE_STACK-1 ] = "#MC", #if DEBUG_STKSZ > EXCEPTION_STKSZ [ N_EXCEPTION_STACKS ... -- cgit v0.10.2 From 6f72edd223def8b879b10745239b9b7e2f16788f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 3 Jan 2014 14:55:48 +0100 Subject: Revert "x86: Disable IST stacks for debug/int 3/stack fault for PREEMPT_RT" where do I start. Let me explain what is going on here. The code sequence | pushf | pop %edx | or $0x1,%dh | push %edx | mov $0xe0,%eax | popf | sysenter triggers the bug. On 64bit kernel we see the double fault (with 32bit and 64bit userland) and on 32bit kernel there is no problem. The reporter said that double fault does not happen on 64bit kernel with 64bit userland and this is because in that case the VDSO uses the "syscall" interface instead of "sysenter". The bug. "popf" loads the flags with the TF bit set which enables "single stepping" and this leads to a debug exception. Usually on 64bit we have a special IST stack for the debug exception. Due to patch [0] we do not use the IST stack but the kernel stack instead. On 64bit the sysenter instruction starts in kernel with the stack address NULL. The code sequence above enters the debug exception (TF flag) after the sysenter instruction was executed which sets the stack pointer to NULL and we have a fault (it seems that the debug exception saves some bytes on the stack). To fix the double fault I'm going to drop patch [0]. It is completely pointless. In do_debug() and do_stack_segment() we disable preemption which means the task can't leave the CPU. So it does not matter if we run on IST or on kernel stack. There is a patch [1] which drops preempt_disable() call for a 32bit kernel but not for 64bit so there should be no regression. And [1] seems valid even for this code sequence. We enter the debug exception with a 256bytes long per cpu stack and migrate to the kernel stack before calling do_debug(). [0] x86-disable-debug-stack.patch [1] fix-rt-int3-x86_32-3.2-rt.patch Cc: stable-rt@vger.kernel.org Reported-by: Brian Silverman Cc: Andi Kleen Signed-off-by: Sebastian Andrzej Siewior diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 695e04d..43dcd80 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -14,21 +14,12 @@ #define IRQ_STACK_ORDER 2 #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) -#ifdef CONFIG_PREEMPT_RT_FULL -# define STACKFAULT_STACK 0 -# define DOUBLEFAULT_STACK 1 -# define NMI_STACK 2 -# define DEBUG_STACK 0 -# define MCE_STACK 3 -# define N_EXCEPTION_STACKS 3 /* hw limit: 7 */ -#else -# define STACKFAULT_STACK 1 -# define DOUBLEFAULT_STACK 2 -# define NMI_STACK 3 -# define DEBUG_STACK 4 -# define MCE_STACK 5 -# define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ -#endif +#define STACKFAULT_STACK 1 +#define DOUBLEFAULT_STACK 2 +#define NMI_STACK 3 +#define DEBUG_STACK 4 +#define MCE_STACK 5 +#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index d4e8eb8..3533e2c 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1110,9 +1110,7 @@ DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); */ static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, -#if DEBUG_STACK > 0 [DEBUG_STACK - 1] = DEBUG_STKSZ -#endif }; static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 52b4bcd..addb207 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -21,14 +21,10 @@ (N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2) static char x86_stack_ids[][8] = { -#if DEBUG_STACK > 0 [ DEBUG_STACK-1 ] = "#DB", -#endif [ NMI_STACK-1 ] = "NMI", [ DOUBLEFAULT_STACK-1 ] = "#DF", -#if STACKFAULT_STACK > 0 [ STACKFAULT_STACK-1 ] = "#SS", -#endif [ MCE_STACK-1 ] = "#MC", #if DEBUG_STKSZ > EXCEPTION_STKSZ [ N_EXCEPTION_STACKS ... -- cgit v0.10.2 From c5c2cb498c4b1f3ea880025199f0016a734c027f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 8 Jul 2011 16:35:35 +0200 Subject: epoll.patch Signed-off-by: Thomas Gleixner diff --git a/fs/eventpoll.c b/fs/eventpoll.c index d76c974..9691a6e 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -505,12 +505,12 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests) */ static void ep_poll_safewake(wait_queue_head_t *wq) { - int this_cpu = get_cpu(); + int this_cpu = get_cpu_light(); ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); - put_cpu(); + put_cpu_light(); } static void ep_remove_wait_queue(struct eppoll_entry *pwq) -- cgit v0.10.2 From a98f37c17e021ac4bf1bfa589a40a32ffa117a03 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 12 Jul 2011 11:39:36 +0200 Subject: mm-vmalloc.patch Signed-off-by: Thomas Gleixner diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 1074543..d64289d 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -790,7 +790,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask) struct vmap_block *vb; struct vmap_area *va; unsigned long vb_idx; - int node, err; + int node, err, cpu; node = numa_node_id(); @@ -828,11 +828,12 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask) BUG_ON(err); radix_tree_preload_end(); - vbq = &get_cpu_var(vmap_block_queue); + cpu = get_cpu_light(); + vbq = &__get_cpu_var(vmap_block_queue); spin_lock(&vbq->lock); list_add_rcu(&vb->free_list, &vbq->free); spin_unlock(&vbq->lock); - put_cpu_var(vmap_block_queue); + put_cpu_light(); return vb; } @@ -900,6 +901,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) struct vmap_block *vb; unsigned long addr = 0; unsigned int order; + int cpu = 0; BUG_ON(size & ~PAGE_MASK); BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); @@ -915,7 +917,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) again: rcu_read_lock(); - vbq = &get_cpu_var(vmap_block_queue); + cpu = get_cpu_light(); + vbq = &__get_cpu_var(vmap_block_queue); list_for_each_entry_rcu(vb, &vbq->free, free_list) { int i; @@ -939,7 +942,7 @@ next: spin_unlock(&vb->lock); } - put_cpu_var(vmap_block_queue); + put_cpu_light(); rcu_read_unlock(); if (!addr) { -- cgit v0.10.2 From dfb845d5e9b283cf0c189d6f289722f5b67ddd1a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 7 Mar 2012 20:51:03 +0100 Subject: rt: Introduce cpu_chill() Retry loops on RT might loop forever when the modifying side was preempted. Add cpu_chill() to replace cpu_relax(). cpu_chill() defaults to cpu_relax() for non RT. On RT it puts the looping task to sleep for a tick so the preempted task can make progress. Signed-off-by: Thomas Gleixner Cc: stable-rt@vger.kernel.org diff --git a/include/linux/delay.h b/include/linux/delay.h index a6ecb34..e23a7c0 100644 --- a/include/linux/delay.h +++ b/include/linux/delay.h @@ -52,4 +52,10 @@ static inline void ssleep(unsigned int seconds) msleep(seconds * 1000); } +#ifdef CONFIG_PREEMPT_RT_FULL +# define cpu_chill() msleep(1) +#else +# define cpu_chill() cpu_relax() +#endif + #endif /* defined(_LINUX_DELAY_H) */ -- cgit v0.10.2 From 5fbd616799e3176f69a2389522581d1740c55419 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 5 Feb 2014 11:51:25 -0500 Subject: rt: Make cpu_chill() use hrtimer instead of msleep() Ulrich Obergfell pointed out that cpu_chill() calls msleep() which is woken up by the ksoftirqd running the TIMER softirq. But as the cpu_chill() is called from softirq context, it may block the ksoftirqd() from running, in which case, it may never wake up the msleep() causing the deadlock. I checked the vmcore, and irq/74-qla2xxx is stuck in the msleep() call, running on CPU 8. The one ksoftirqd that is stuck, happens to be the one that runs on CPU 8, and it is blocked on a lock held by irq/74-qla2xxx. As that ksoftirqd is the one that will wake up irq/74-qla2xxx, and it happens to be blocked on a lock that irq/74-qla2xxx holds, we have our deadlock. The solution is not to convert the cpu_chill() back to a cpu_relax() as that will re-create a possible live lock that the cpu_chill() fixed earlier, and may also leave this bug open on other softirqs. The fix is to remove the dependency on ksoftirqd from cpu_chill(). That is, instead of calling msleep() that requires ksoftirqd to wake it up, use the hrtimer_nanosleep() code that does the wakeup from hard irq context. |Looks to be the lock of the block softirq. I don't have the core dump |anymore, but from what I could tell the ksoftirqd was blocked on the |block softirq lock, where the block softirq handler did a msleep |(called by the qla2xxx interrupt handler). | |Looking at trigger_softirq() in block/blk-softirq.c, it can do a |smp_callfunction() to another cpu to run the block softirq. If that |happens to be the cpu where the qla2xx irq handler is doing the block |softirq and is in a middle of a msleep(), I believe the ksoftirqd will |try to run the softirq. If it does that, then BOOM, it's deadlocked |because the ksoftirqd will never run the timer softirq either. |I should have also stated that it was only one lock that was involved. |But the lock owner was doing a msleep() that requires a wakeup by |ksoftirqd to continue. If ksoftirqd happens to be blocked on a lock |held by the msleep() caller, then you have your deadlock. | |It's best not to have any softirqs going to sleep requiring another |softirq to wake it up. Note, if we ever require a timer softirq to do a |cpu_chill() it will most definitely hit this deadlock. Cc: stable-rt@vger.kernel.org Found-by: Ulrich Obergfell Signed-off-by: Steven Rostedt [bigeasy: add the 4 | chapters from email] Signed-off-by: Sebastian Andrzej Siewior diff --git a/include/linux/delay.h b/include/linux/delay.h index e23a7c0..37caab3 100644 --- a/include/linux/delay.h +++ b/include/linux/delay.h @@ -53,7 +53,7 @@ static inline void ssleep(unsigned int seconds) } #ifdef CONFIG_PREEMPT_RT_FULL -# define cpu_chill() msleep(1) +extern void cpu_chill(void); #else # define cpu_chill() cpu_relax() #endif diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 7aa442e..5c26d2c 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -1890,6 +1890,21 @@ SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); } +#ifdef CONFIG_PREEMPT_RT_FULL +/* + * Sleep for 1 ms in hope whoever holds what we want will let it go. + */ +void cpu_chill(void) +{ + struct timespec tu = { + .tv_nsec = NSEC_PER_MSEC, + }; + + hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); +} +EXPORT_SYMBOL(cpu_chill); +#endif + /* * Functions related to boot-time initialization: */ -- cgit v0.10.2 From bba67230e07214d2de9866d7283e8773a3ddd884 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 19 Feb 2014 11:56:06 +0100 Subject: kernel/hrtimer: be non-freezeable in cpu_chill() Since we replaced msleep() by hrtimer I see now and then (rarely) this: | [....] Waiting for /dev to be fully populated... | ===================================== | [ BUG: udevd/229 still has locks held! ] | 3.12.11-rt17 #23 Not tainted | ------------------------------------- | 1 lock held by udevd/229: | #0: (&type->i_mutex_dir_key#2){+.+.+.}, at: lookup_slow+0x28/0x98 | | stack backtrace: | CPU: 0 PID: 229 Comm: udevd Not tainted 3.12.11-rt17 #23 | (unwind_backtrace+0x0/0xf8) from (show_stack+0x10/0x14) | (show_stack+0x10/0x14) from (dump_stack+0x74/0xbc) | (dump_stack+0x74/0xbc) from (do_nanosleep+0x120/0x160) | (do_nanosleep+0x120/0x160) from (hrtimer_nanosleep+0x90/0x110) | (hrtimer_nanosleep+0x90/0x110) from (cpu_chill+0x30/0x38) | (cpu_chill+0x30/0x38) from (dentry_kill+0x158/0x1ec) | (dentry_kill+0x158/0x1ec) from (dput+0x74/0x15c) | (dput+0x74/0x15c) from (lookup_real+0x4c/0x50) | (lookup_real+0x4c/0x50) from (__lookup_hash+0x34/0x44) | (__lookup_hash+0x34/0x44) from (lookup_slow+0x38/0x98) | (lookup_slow+0x38/0x98) from (path_lookupat+0x208/0x7fc) | (path_lookupat+0x208/0x7fc) from (filename_lookup+0x20/0x60) | (filename_lookup+0x20/0x60) from (user_path_at_empty+0x50/0x7c) | (user_path_at_empty+0x50/0x7c) from (user_path_at+0x14/0x1c) | (user_path_at+0x14/0x1c) from (vfs_fstatat+0x48/0x94) | (vfs_fstatat+0x48/0x94) from (SyS_stat64+0x14/0x30) | (SyS_stat64+0x14/0x30) from (ret_fast_syscall+0x0/0x48) For now I see no better way but to disable the freezer the sleep the period. Cc: stable-rt@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 5c26d2c..083815d 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -1899,8 +1899,12 @@ void cpu_chill(void) struct timespec tu = { .tv_nsec = NSEC_PER_MSEC, }; + unsigned int freeze_flag = current->flags & PF_NOFREEZE; + current->flags |= PF_NOFREEZE; hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); + if (!freeze_flag) + current->flags &= ~PF_NOFREEZE; } EXPORT_SYMBOL(cpu_chill); #endif -- cgit v0.10.2 From e72903255fc2c93f1b60ecb9e019e768ef9893a1 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 4 Mar 2014 12:28:32 -0500 Subject: cpu_chill: Add a UNINTERRUPTIBLE hrtimer_nanosleep We hit another bug that was caused by switching cpu_chill() from msleep() to hrtimer_nanosleep(). This time it is a livelock. The problem is that hrtimer_nanosleep() calls schedule with the state == TASK_INTERRUPTIBLE. But these means that if a signal is pending, the scheduler wont schedule, and will simply change the current task state back to TASK_RUNNING. This nullifies the whole point of cpu_chill() in the first place. That is, if a task is spinning on a try_lock() and it preempted the owner of the lock, if it has a signal pending, it will never give up the CPU to let the owner of the lock run. I made a static function __hrtimer_nanosleep() that takes a fifth parameter "state", which determines the task state of that the nanosleep() will be in. The normal hrtimer_nanosleep() will act the same, but cpu_chill() will call the __hrtimer_nanosleep() directly with the TASK_UNINTERRUPTIBLE state. cpu_chill() only cares that the first sleep happens, and does not care about the state of the restart schedule (in hrtimer_nanosleep_restart). Cc: stable-rt@vger.kernel.org Reported-by: Ulrich Obergfell Signed-off-by: Steven Rostedt Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 083815d..c19183d 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -1769,12 +1769,13 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) } EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); -static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) +static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode, + unsigned long state) { hrtimer_init_sleeper(t, current); do { - set_current_state(TASK_INTERRUPTIBLE); + set_current_state(state); hrtimer_start_expires(&t->timer, mode); if (!hrtimer_active(&t->timer)) t->task = NULL; @@ -1818,7 +1819,8 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart) HRTIMER_MODE_ABS); hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); - if (do_nanosleep(&t, HRTIMER_MODE_ABS)) + /* cpu_chill() does not care about restart state. */ + if (do_nanosleep(&t, HRTIMER_MODE_ABS, TASK_INTERRUPTIBLE)) goto out; rmtp = restart->nanosleep.rmtp; @@ -1835,8 +1837,10 @@ out: return ret; } -long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, - const enum hrtimer_mode mode, const clockid_t clockid) +static long +__hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, + const enum hrtimer_mode mode, const clockid_t clockid, + unsigned long state) { struct restart_block *restart; struct hrtimer_sleeper t; @@ -1849,7 +1853,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, hrtimer_init_on_stack(&t.timer, clockid, mode); hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack); - if (do_nanosleep(&t, mode)) + if (do_nanosleep(&t, mode, state)) goto out; /* Absolute timers do not update the rmtp value and restart: */ @@ -1876,6 +1880,12 @@ out: return ret; } +long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, + const enum hrtimer_mode mode, const clockid_t clockid) +{ + return __hrtimer_nanosleep(rqtp, rmtp, mode, clockid, TASK_INTERRUPTIBLE); +} + SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, struct timespec __user *, rmtp) { @@ -1902,7 +1912,8 @@ void cpu_chill(void) unsigned int freeze_flag = current->flags & PF_NOFREEZE; current->flags |= PF_NOFREEZE; - hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); + __hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC, + TASK_UNINTERRUPTIBLE); if (!freeze_flag) current->flags &= ~PF_NOFREEZE; } -- cgit v0.10.2 From e960551addbad2c36f32ed55d84566250fe46b6e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 20 Dec 2012 18:28:26 +0100 Subject: block: Use cpu_chill() for retry loops Retry loops on RT might loop forever when the modifying side was preempted. Steven also observed a live lock when there was a concurrent priority boosting going on. Use cpu_chill() instead of cpu_relax() to let the system make progress. Signed-off-by: Thomas Gleixner Cc: stable-rt@vger.kernel.org diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 46cd7bd..899d3e4 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -8,6 +8,7 @@ #include #include /* for max_pfn/max_low_pfn */ #include +#include #include "blk.h" @@ -110,7 +111,7 @@ static void ioc_release_fn(struct work_struct *work) spin_unlock(q->queue_lock); } else { spin_unlock_irqrestore(&ioc->lock, flags); - cpu_relax(); + cpu_chill(); spin_lock_irqsave_nested(&ioc->lock, flags, 1); } } @@ -188,7 +189,7 @@ retry: spin_unlock(icq->q->queue_lock); } else { spin_unlock_irqrestore(&ioc->lock, flags); - cpu_relax(); + cpu_chill(); goto retry; } } -- cgit v0.10.2 From b4e35a44d3c268150f3803121e1d9ca1e5e9f998 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 7 Mar 2012 21:00:34 +0100 Subject: fs: dcache: Use cpu_chill() in trylock loops Retry loops on RT might loop forever when the modifying side was preempted. Use cpu_chill() instead of cpu_relax() to let the system make progress. Signed-off-by: Thomas Gleixner Cc: stable-rt@vger.kernel.org diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 3f1128b..c2ed9c4 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -34,6 +34,7 @@ #include #include #include +#include #include #include diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index 3d9d3f5..b422ad6 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c @@ -157,7 +157,7 @@ again: parent = p->d_parent; if (!spin_trylock(&parent->d_lock)) { spin_unlock(&p->d_lock); - cpu_relax(); + cpu_chill(); goto relock; } spin_unlock(&p->d_lock); diff --git a/fs/dcache.c b/fs/dcache.c index 4021e01..ae2a83e 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -554,7 +555,7 @@ dentry_kill(struct dentry *dentry, int unlock_on_failure) relock: if (unlock_on_failure) { spin_unlock(&dentry->d_lock); - cpu_relax(); + cpu_chill(); } return dentry; /* try again with same dentry */ } @@ -2391,7 +2392,7 @@ again: if (dentry->d_lockref.count == 1) { if (!spin_trylock(&inode->i_lock)) { spin_unlock(&dentry->d_lock); - cpu_relax(); + cpu_chill(); goto again; } dentry->d_flags &= ~DCACHE_CANT_MOUNT; diff --git a/fs/namespace.c b/fs/namespace.c index 6ed0fb1..22cbfab 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -23,6 +23,7 @@ #include #include #include +#include #include "pnode.h" #include "internal.h" @@ -317,7 +318,7 @@ int __mnt_want_write(struct vfsmount *m) smp_mb(); while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { preempt_enable(); - cpu_relax(); + cpu_chill(); preempt_disable(); } /* -- cgit v0.10.2 From 75e5aaab8bf4f8a9eaf1e8fd119f8a3bbd74b7a5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 7 Mar 2012 21:10:04 +0100 Subject: net: Use cpu_chill() instead of cpu_relax() Retry loops on RT might loop forever when the modifying side was preempted. Use cpu_chill() instead of cpu_relax() to let the system make progress. Signed-off-by: Thomas Gleixner Cc: stable-rt@vger.kernel.org diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 88cfbc1..d1705d0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -63,6 +63,7 @@ #include #include #include +#include #include #include #include @@ -630,7 +631,7 @@ static void prb_retire_rx_blk_timer_expired(unsigned long data) if (BLOCK_NUM_PKTS(pbd)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ - cpu_relax(); + cpu_chill(); } } @@ -881,7 +882,7 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, if (!(status & TP_STATUS_BLK_TMO)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ - cpu_relax(); + cpu_chill(); } } prb_close_block(pkc, pbd, po, status); diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index e8fdb17..5a44c6e 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c @@ -34,6 +34,7 @@ #include #include #include +#include #include "rds.h" #include "ib.h" @@ -286,7 +287,7 @@ static inline void wait_clean_list_grace(void) for_each_online_cpu(cpu) { flag = &per_cpu(clean_list_grace, cpu); while (test_bit(CLEAN_LIST_BUSY_BIT, flag)) - cpu_relax(); + cpu_chill(); } } -- cgit v0.10.2 From 846d732491e0131322bd472267f871fba2a4f971 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 24 Jul 2013 15:26:54 +0200 Subject: workqueue: Use normal rcu There is no need for sched_rcu. The undocumented reason why sched_rcu is used is to avoid a few explicit rcu_read_lock()/unlock() pairs by abusing the fact that sched_rcu reader side critical sections are also protected by preempt or irq disabled regions. Signed-off-by: Thomas Gleixner diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 60fee69..385da2f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -129,11 +129,11 @@ enum { * * PL: wq_pool_mutex protected. * - * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. + * PR: wq_pool_mutex protected for writes. RCU protected for reads. * * WQ: wq->mutex protected. * - * WR: wq->mutex protected for writes. Sched-RCU protected for reads. + * WR: wq->mutex protected for writes. RCU protected for reads. * * MD: wq_mayday_lock protected. */ @@ -178,7 +178,7 @@ struct worker_pool { atomic_t nr_running ____cacheline_aligned_in_smp; /* - * Destruction of pool is sched-RCU protected to allow dereferences + * Destruction of pool is RCU protected to allow dereferences * from get_work_pool(). */ struct rcu_head rcu; @@ -207,7 +207,7 @@ struct pool_workqueue { /* * Release of unbound pwq is punted to system_wq. See put_pwq() * and pwq_unbound_release_workfn() for details. pool_workqueue - * itself is also sched-RCU protected so that the first pwq can be + * itself is also RCU protected so that the first pwq can be * determined without grabbing wq->mutex. */ struct work_struct unbound_release_work; @@ -331,14 +331,14 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to, #include #define assert_rcu_or_pool_mutex() \ - rcu_lockdep_assert(rcu_read_lock_sched_held() || \ + rcu_lockdep_assert(rcu_read_lock_held() || \ lockdep_is_held(&wq_pool_mutex), \ - "sched RCU or wq_pool_mutex should be held") + "RCU or wq_pool_mutex should be held") #define assert_rcu_or_wq_mutex(wq) \ - rcu_lockdep_assert(rcu_read_lock_sched_held() || \ + rcu_lockdep_assert(rcu_read_lock_held() || \ lockdep_is_held(&wq->mutex), \ - "sched RCU or wq->mutex should be held") + "RCU or wq->mutex should be held") #ifdef CONFIG_LOCKDEP #define assert_manager_or_pool_lock(pool) \ @@ -360,7 +360,7 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to, * @pool: iteration cursor * @pi: integer used for iteration * - * This must be called either with wq_pool_mutex held or sched RCU read + * This must be called either with wq_pool_mutex held or RCU read * locked. If the pool needs to be used beyond the locking in effect, the * caller is responsible for guaranteeing that the pool stays online. * @@ -393,7 +393,7 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to, * @pwq: iteration cursor * @wq: the target workqueue * - * This must be called either with wq->mutex held or sched RCU read locked. + * This must be called either with wq->mutex held or RCU read locked. * If the pwq needs to be used beyond the locking in effect, the caller is * responsible for guaranteeing that the pwq stays online. * @@ -541,7 +541,7 @@ static int worker_pool_assign_id(struct worker_pool *pool) * @wq: the target workqueue * @node: the node ID * - * This must be called either with pwq_lock held or sched RCU read locked. + * This must be called either with pwq_lock held or RCU read locked. * If the pwq needs to be used beyond the locking in effect, the caller is * responsible for guaranteeing that the pwq stays online. * @@ -645,8 +645,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work) * @work: the work item of interest * * Pools are created and destroyed under wq_pool_mutex, and allows read - * access under sched-RCU read lock. As such, this function should be - * called under wq_pool_mutex or with preemption disabled. + * access under RCU read lock. As such, this function should be + * called under wq_pool_mutex or inside of a rcu_read_lock() region. * * All fields of the returned pool are accessible as long as the above * mentioned locking is in effect. If the returned pool needs to be used @@ -1079,7 +1079,7 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq) { if (pwq) { /* - * As both pwqs and pools are sched-RCU protected, the + * As both pwqs and pools are RCU protected, the * following lock operations are safe. */ spin_lock_irq(&pwq->pool->lock); @@ -1205,6 +1205,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) return 0; + rcu_read_lock(); /* * The queueing is in progress, or it is already queued. Try to * steal it from ->worklist without clearing WORK_STRUCT_PENDING. @@ -1243,10 +1244,12 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, set_work_pool_and_keep_pending(work, pool->id); spin_unlock(&pool->lock); + rcu_read_unlock(); return 1; } spin_unlock(&pool->lock); fail: + rcu_read_unlock(); local_irq_restore(*flags); if (work_is_canceling(work)) return -ENOENT; @@ -1327,6 +1330,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, if (unlikely(wq->flags & __WQ_DRAINING) && WARN_ON_ONCE(!is_chained_work(wq))) return; + + rcu_read_lock(); retry: if (req_cpu == WORK_CPU_UNBOUND) cpu = raw_smp_processor_id(); @@ -1383,10 +1388,8 @@ retry: /* pwq determined, queue */ trace_workqueue_queue_work(req_cpu, pwq, work); - if (WARN_ON(!list_empty(&work->entry))) { - spin_unlock(&pwq->pool->lock); - return; - } + if (WARN_ON(!list_empty(&work->entry))) + goto out; pwq->nr_in_flight[pwq->work_color]++; work_flags = work_color_to_flags(pwq->work_color); @@ -1402,7 +1405,9 @@ retry: insert_work(pwq, work, worklist, work_flags); +out: spin_unlock(&pwq->pool->lock); + rcu_read_unlock(); } /** @@ -2809,14 +2814,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) might_sleep(); - local_irq_disable(); + rcu_read_lock(); pool = get_work_pool(work); if (!pool) { - local_irq_enable(); + rcu_read_unlock(); return false; } - spin_lock(&pool->lock); + spin_lock_irq(&pool->lock); /* see the comment in try_to_grab_pending() with the same code */ pwq = get_work_pwq(work); if (pwq) { @@ -2843,10 +2848,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) else lock_map_acquire_read(&pwq->wq->lockdep_map); lock_map_release(&pwq->wq->lockdep_map); - + rcu_read_unlock(); return true; already_gone: spin_unlock_irq(&pool->lock); + rcu_read_unlock(); return false; } @@ -3169,7 +3175,8 @@ static ssize_t wq_pool_ids_show(struct device *dev, const char *delim = ""; int node, written = 0; - rcu_read_lock_sched(); + get_online_cpus(); + rcu_read_lock(); for_each_node(node) { written += scnprintf(buf + written, PAGE_SIZE - written, "%s%d:%d", delim, node, @@ -3177,7 +3184,8 @@ static ssize_t wq_pool_ids_show(struct device *dev, delim = " "; } written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); - rcu_read_unlock_sched(); + rcu_read_unlock(); + put_online_cpus(); return written; } @@ -3543,7 +3551,7 @@ static void rcu_free_pool(struct rcu_head *rcu) * put_unbound_pool - put a worker_pool * @pool: worker_pool to put * - * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU + * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU * safe manner. get_unbound_pool() calls this function on its failure path * and this function should be able to release pools which went through, * successfully or not, init_worker_pool(). @@ -3590,8 +3598,8 @@ static void put_unbound_pool(struct worker_pool *pool) del_timer_sync(&pool->idle_timer); del_timer_sync(&pool->mayday_timer); - /* sched-RCU protected to allow dereferences from get_work_pool() */ - call_rcu_sched(&pool->rcu, rcu_free_pool); + /* RCU protected to allow dereferences from get_work_pool() */ + call_rcu(&pool->rcu, rcu_free_pool); } /** @@ -3704,7 +3712,7 @@ static void pwq_unbound_release_workfn(struct work_struct *work) put_unbound_pool(pool); mutex_unlock(&wq_pool_mutex); - call_rcu_sched(&pwq->rcu, rcu_free_pwq); + call_rcu(&pwq->rcu, rcu_free_pwq); /* * If we're the last pwq going away, @wq is already dead and no one @@ -4417,7 +4425,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) struct pool_workqueue *pwq; bool ret; - rcu_read_lock_sched(); + rcu_read_lock(); + preempt_disable(); if (cpu == WORK_CPU_UNBOUND) cpu = smp_processor_id(); @@ -4428,7 +4437,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); ret = !list_empty(&pwq->delayed_works); - rcu_read_unlock_sched(); + preempt_enable(); + rcu_read_unlock(); return ret; } @@ -4454,16 +4464,15 @@ unsigned int work_busy(struct work_struct *work) if (work_pending(work)) ret |= WORK_BUSY_PENDING; - local_irq_save(flags); + rcu_read_lock() pool = get_work_pool(work); if (pool) { - spin_lock(&pool->lock); + spin_lock_irqsave(&pool->lock, flags); if (find_worker_executing_work(pool, work)) ret |= WORK_BUSY_RUNNING; - spin_unlock(&pool->lock); + spin_unlock_irqrestore(&pool->lock, flags); } - local_irq_restore(flags); - + rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(work_busy); @@ -4916,16 +4925,16 @@ bool freeze_workqueues_busy(void) * nr_active is monotonically decreasing. It's safe * to peek without lock. */ - rcu_read_lock_sched(); + rcu_read_lock(); for_each_pwq(pwq, wq) { WARN_ON_ONCE(pwq->nr_active < 0); if (pwq->nr_active) { busy = true; - rcu_read_unlock_sched(); + rcu_read_unlock(); goto out_unlock; } } - rcu_read_unlock_sched(); + rcu_read_unlock(); } out_unlock: mutex_unlock(&wq_pool_mutex); -- cgit v0.10.2 From f9c6f86f6d440a9495e20302f43530f62ce3e6d6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 21:42:26 +0200 Subject: Use local irq lock instead of irq disable regions Signed-off-by: Thomas Gleixner diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 385da2f..e44ea7a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -48,6 +48,7 @@ #include #include #include +#include #include "workqueue_internal.h" @@ -323,6 +324,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient_wq); struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); +static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock); + static int worker_thread(void *__worker); static void copy_workqueue_attrs(struct workqueue_attrs *to, const struct workqueue_attrs *from); @@ -1082,9 +1085,9 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq) * As both pwqs and pools are RCU protected, the * following lock operations are safe. */ - spin_lock_irq(&pwq->pool->lock); + local_spin_lock_irq(pendingb_lock, &pwq->pool->lock); put_pwq(pwq); - spin_unlock_irq(&pwq->pool->lock); + local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock); } } @@ -1186,7 +1189,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, struct worker_pool *pool; struct pool_workqueue *pwq; - local_irq_save(*flags); + local_lock_irqsave(pendingb_lock, *flags); /* try to steal the timer if it exists */ if (is_dwork) { @@ -1250,7 +1253,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, spin_unlock(&pool->lock); fail: rcu_read_unlock(); - local_irq_restore(*flags); + local_unlock_irqrestore(pendingb_lock, *flags); if (work_is_canceling(work)) return -ENOENT; cpu_relax(); @@ -1322,7 +1325,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, * queued or lose PENDING. Grabbing PENDING and queueing should * happen with IRQ disabled. */ - WARN_ON_ONCE(!irqs_disabled()); + WARN_ON_ONCE_NONRT(!irqs_disabled()); debug_work_activate(work); @@ -1427,14 +1430,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq, bool ret = false; unsigned long flags; - local_irq_save(flags); + local_lock_irqsave(pendingb_lock,flags); if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { __queue_work(cpu, wq, work); ret = true; } - local_irq_restore(flags); + local_unlock_irqrestore(pendingb_lock, flags); return ret; } EXPORT_SYMBOL(queue_work_on); @@ -1501,14 +1504,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, unsigned long flags; /* read the comment in __queue_work() */ - local_irq_save(flags); + local_lock_irqsave(pendingb_lock, flags); if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { __queue_delayed_work(cpu, wq, dwork, delay); ret = true; } - local_irq_restore(flags); + local_unlock_irqrestore(pendingb_lock, flags); return ret; } EXPORT_SYMBOL(queue_delayed_work_on); @@ -1543,7 +1546,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, if (likely(ret >= 0)) { __queue_delayed_work(cpu, wq, dwork, delay); - local_irq_restore(flags); + local_unlock_irqrestore(pendingb_lock, flags); } /* -ENOENT from try_to_grab_pending() becomes %true */ @@ -2906,7 +2909,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) /* tell other tasks trying to grab @work to back off */ mark_work_canceling(work); - local_irq_restore(flags); + local_unlock_irqrestore(pendingb_lock, flags); flush_work(work); clear_work_data(work); @@ -2951,10 +2954,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); */ bool flush_delayed_work(struct delayed_work *dwork) { - local_irq_disable(); + local_lock_irq(pendingb_lock); if (del_timer_sync(&dwork->timer)) __queue_work(dwork->cpu, dwork->wq, &dwork->work); - local_irq_enable(); + local_unlock_irq(pendingb_lock); return flush_work(&dwork->work); } EXPORT_SYMBOL(flush_delayed_work); @@ -2989,7 +2992,7 @@ bool cancel_delayed_work(struct delayed_work *dwork) set_work_pool_and_clear_pending(&dwork->work, get_work_pool_id(&dwork->work)); - local_irq_restore(flags); + local_unlock_irqrestore(pendingb_lock, flags); return ret; } EXPORT_SYMBOL(cancel_delayed_work); @@ -4464,7 +4467,7 @@ unsigned int work_busy(struct work_struct *work) if (work_pending(work)) ret |= WORK_BUSY_PENDING; - rcu_read_lock() + rcu_read_lock(); pool = get_work_pool(work); if (pool) { spin_lock_irqsave(&pool->lock, flags); -- cgit v0.10.2 From 12f6d906aa82292dd392f1951d8bfd9ff5a92556 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 1 Jul 2013 11:02:42 +0200 Subject: workqueue vs ata-piix livelock fixup An Intel i7 system regularly detected rcu_preempt stalls after the kernel was upgraded from 3.6-rt to 3.8-rt. When the stall happened, disk I/O was no longer possible, unless the system was restarted. The kernel message was: INFO: rcu_preempt self-detected stall on CPU { 6} [..] NMI backtrace for cpu 6 CPU 6 Pid: 119, comm: irq/19-ata_piix Not tainted 3.8.13-rt13 #11 Shuttle Inc. SX58/SX58 RIP: 0010:[] [] ip_compute_csum+0x30/0x30 RSP: 0018:ffff880333303cb0 EFLAGS: 00000002 RAX: 0000000000000006 RBX: 00000000000003e9 RCX: 0000000000000034 RDX: 0000000000000000 RSI: ffffffff81aa16d0 RDI: 0000000000000001 RBP: ffff880333303ce8 R08: ffffffff81aa16d0 R09: ffffffff81c1b8cc R10: 0000000000000000 R11: 0000000000000000 R12: 000000000005161f R13: 0000000000000006 R14: ffffffff81aa16d0 R15: 0000000000000002 FS: 0000000000000000(0000) GS:ffff880333300000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 0000003c1b2bb420 CR3: 0000000001a0f000 CR4: 00000000000007e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 Process irq/19-ata_piix (pid: 119, threadinfo ffff88032d88a000, task ffff88032df80000) Stack: ffffffff8124cb32 000000000005161e 00000000000003e9 0000000000001000 0000000000009022 ffffffff81aa16d0 0000000000000002 ffff880333303cf8 ffffffff8124caa9 ffff880333303d08 ffffffff8124cad2 ffff880333303d28 Call Trace: [] ? delay_tsc+0x33/0xe3 [] __delay+0xf/0x11 [] __const_udelay+0x27/0x29 [] native_safe_apic_wait_icr_idle+0x39/0x45 [] __default_send_IPI_dest_field.constprop.0+0x1e/0x58 [] default_send_IPI_mask_sequence_phys+0x49/0x7d [] physflat_send_IPI_all+0x17/0x19 [] arch_trigger_all_cpu_backtrace+0x50/0x79 [] rcu_check_callbacks+0x1cb/0x568 [] ? raise_softirq+0x2e/0x35 [] ? tick_sched_do_timer+0x38/0x38 [] update_process_times+0x44/0x55 [] tick_sched_handle+0x4a/0x59 [] tick_sched_timer+0x3c/0x5b [] __run_hrtimer+0x9b/0x158 [] hrtimer_interrupt+0x172/0x2aa [] smp_apic_timer_interrupt+0x76/0x89 [] apic_timer_interrupt+0x6d/0x80 [] ? __local_lock_irqsave+0x17/0x4a [] try_to_grab_pending+0x42/0x17e [] mod_delayed_work_on+0x32/0x88 [] mod_delayed_work+0x1c/0x1e [] blk_run_queue_async+0x37/0x39 [] flush_end_io+0xf1/0x107 [] blk_finish_request+0x21e/0x264 [] blk_end_bidi_request+0x42/0x60 [] blk_end_request+0x10/0x12 [] scsi_io_completion+0x1bf/0x492 [] ? sd_done+0x298/0x2ef [] scsi_finish_command+0xe9/0xf2 [] scsi_softirq_done+0x106/0x10f [] blk_done_softirq+0x77/0x87 [] do_current_softirqs+0x172/0x2e1 [] ? irq_thread_fn+0x3a/0x3a [] local_bh_enable+0x43/0x72 [] irq_forced_thread_fn+0x46/0x52 [] irq_thread+0x8c/0x17c [] ? irq_thread+0x17c/0x17c [] ? wake_threads_waitq+0x44/0x44 [] kthread+0x8d/0x95 [] ? __kthread_parkme+0x65/0x65 [] ret_from_fork+0x7c/0xb0 [] ? __kthread_parkme+0x65/0x65 The state of softirqd of this CPU at the time of the crash was: ksoftirqd/6 R running task 0 53 2 0x00000000 ffff88032fc39d18 0000000000000046 ffff88033330c4c0 ffff8803303f4710 ffff88032fc39fd8 ffff88032fc39fd8 0000000000000000 0000000000062500 ffff88032df88000 ffff8803303f4710 0000000000000000 ffff88032fc38000 Call Trace: [] ? __queue_work+0x27c/0x27c [] preempt_schedule+0x61/0x76 [] migrate_enable+0xe5/0x1df [] ? __queue_work+0x27c/0x27c [] run_timer_softirq+0x161/0x1d6 [] do_current_softirqs+0x172/0x2e1 [] run_ksoftirqd+0x2d/0x45 [] smpboot_thread_fn+0x2ea/0x308 [] ? test_ti_thread_flag+0xc/0xc [] ? test_ti_thread_flag+0xc/0xc [] kthread+0x8d/0x95 [] ? __kthread_parkme+0x65/0x65 [] ret_from_fork+0x7c/0xb0 [] ? __kthread_parkme+0x65/0x65 Apparently, the softirq demon and the ata_piix IRQ handler were waiting for each other to finish ending up in a livelock. After the below patch was applied, the system no longer crashes. Reported-by: Carsten Emde Proposed-by: Thomas Gleixner Tested by: Carsten Emde Signed-off-by: Carsten Emde Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e44ea7a..b35f90e 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -49,6 +49,7 @@ #include #include #include +#include #include "workqueue_internal.h" @@ -1256,7 +1257,7 @@ fail: local_unlock_irqrestore(pendingb_lock, *flags); if (work_is_canceling(work)) return -ENOENT; - cpu_relax(); + cpu_chill(); return -EAGAIN; } -- cgit v0.10.2 From 24edfc10c27f9e772d160bc2b88ef6ab2d245766 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 22 Jun 2011 19:47:03 +0200 Subject: sched: Distangle worker accounting from rqlock The worker accounting for cpu bound workers is plugged into the core scheduler code and the wakeup code. This is not a hard requirement and can be avoided by keeping track of the state in the workqueue code itself. Keep track of the sleeping state in the worker itself and call the notifier before entering the core scheduler. There might be false positives when the task is woken between that call and actually scheduling, but that's not really different from scheduling and being woken immediately after switching away. There is also no harm from updating nr_running when the task returns from scheduling instead of accounting it in the wakeup code. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Tejun Heo Cc: Jens Axboe Cc: Linus Torvalds Link: http://lkml.kernel.org/r/20110622174919.135236139@linutronix.de Signed-off-by: Thomas Gleixner diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c21721e..fe06ca2 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1332,10 +1332,6 @@ static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) { activate_task(rq, p, en_flags); p->on_rq = 1; - - /* if a worker is waking up, notify workqueue */ - if (p->flags & PF_WQ_WORKER) - wq_worker_waking_up(p, cpu_of(rq)); } /* @@ -1573,42 +1569,6 @@ out: } /** - * try_to_wake_up_local - try to wake up a local task with rq lock held - * @p: the thread to be awakened - * - * Put @p on the run-queue if it's not already there. The caller must - * ensure that this_rq() is locked, @p is bound to this_rq() and not - * the current task. - */ -static void try_to_wake_up_local(struct task_struct *p) -{ - struct rq *rq = task_rq(p); - - if (WARN_ON_ONCE(rq != this_rq()) || - WARN_ON_ONCE(p == current)) - return; - - lockdep_assert_held(&rq->lock); - - if (!raw_spin_trylock(&p->pi_lock)) { - raw_spin_unlock(&rq->lock); - raw_spin_lock(&p->pi_lock); - raw_spin_lock(&rq->lock); - } - - if (!(p->state & TASK_NORMAL)) - goto out; - - if (!p->on_rq) - ttwu_activate(rq, p, ENQUEUE_WAKEUP); - - ttwu_do_wakeup(rq, p, 0); - ttwu_stat(p, smp_processor_id(), 0); -out: - raw_spin_unlock(&p->pi_lock); -} - -/** * wake_up_process - Wake up a specific process * @p: The process to be woken up. * @@ -2597,21 +2557,6 @@ need_resched: } else { deactivate_task(rq, prev, DEQUEUE_SLEEP); prev->on_rq = 0; - - /* - * If a worker went to sleep, notify and ask workqueue - * whether it wants to wake up a task to maintain - * concurrency. - * Only call wake up if prev isn't blocked on a sleeping - * spin lock. - */ - if (prev->flags & PF_WQ_WORKER && !prev->saved_state) { - struct task_struct *to_wakeup; - - to_wakeup = wq_worker_sleeping(prev, cpu); - if (to_wakeup) - try_to_wake_up_local(to_wakeup); - } } switch_count = &prev->nvcsw; } @@ -2654,6 +2599,14 @@ static inline void sched_submit_work(struct task_struct *tsk) { if (!tsk->state || tsk_is_pi_blocked(tsk)) return; + + /* + * If a worker went to sleep, notify and ask workqueue whether + * it wants to wake up a task to maintain concurrency. + */ + if (tsk->flags & PF_WQ_WORKER) + wq_worker_sleeping(tsk); + /* * If we are going to sleep and we have plugged IO queued, * make sure to submit it to avoid deadlocks. @@ -2662,12 +2615,19 @@ static inline void sched_submit_work(struct task_struct *tsk) blk_schedule_flush_plug(tsk); } +static inline void sched_update_worker(struct task_struct *tsk) +{ + if (tsk->flags & PF_WQ_WORKER) + wq_worker_running(tsk); +} + asmlinkage void __sched schedule(void) { struct task_struct *tsk = current; sched_submit_work(tsk); __schedule(); + sched_update_worker(tsk); } EXPORT_SYMBOL(schedule); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b35f90e..9efb7ce 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -808,44 +808,31 @@ static void wake_up_worker(struct worker_pool *pool) } /** - * wq_worker_waking_up - a worker is waking up - * @task: task waking up - * @cpu: CPU @task is waking up to + * wq_worker_running - a worker is running again + * @task: task returning from sleep * - * This function is called during try_to_wake_up() when a worker is - * being awoken. - * - * CONTEXT: - * spin_lock_irq(rq->lock) + * This function is called when a worker returns from schedule() */ -void wq_worker_waking_up(struct task_struct *task, int cpu) +void wq_worker_running(struct task_struct *task) { struct worker *worker = kthread_data(task); - if (!(worker->flags & WORKER_NOT_RUNNING)) { - WARN_ON_ONCE(worker->pool->cpu != cpu); + if (!worker->sleeping) + return; + if (!(worker->flags & WORKER_NOT_RUNNING)) atomic_inc(&worker->pool->nr_running); - } + worker->sleeping = 0; } /** * wq_worker_sleeping - a worker is going to sleep * @task: task going to sleep - * @cpu: CPU in question, must be the current CPU number - * - * This function is called during schedule() when a busy worker is - * going to sleep. Worker on the same cpu can be woken up by - * returning pointer to its task. - * - * CONTEXT: - * spin_lock_irq(rq->lock) - * - * Return: - * Worker task on @cpu to wake up, %NULL if none. + * This function is called from schedule() when a busy worker is + * going to sleep. */ -struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) +void wq_worker_sleeping(struct task_struct *task) { - struct worker *worker = kthread_data(task), *to_wakeup = NULL; + struct worker *next, *worker = kthread_data(task); struct worker_pool *pool; /* @@ -854,14 +841,15 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) * checking NOT_RUNNING. */ if (worker->flags & WORKER_NOT_RUNNING) - return NULL; + return; pool = worker->pool; - /* this can only happen on the local cpu */ - if (WARN_ON_ONCE(cpu != raw_smp_processor_id())) - return NULL; + if (WARN_ON_ONCE(worker->sleeping)) + return; + worker->sleeping = 1; + spin_lock_irq(&pool->lock); /* * The counterpart of the following dec_and_test, implied mb, * worklist not empty test sequence is in insert_work(). @@ -874,9 +862,12 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) * lock is safe. */ if (atomic_dec_and_test(&pool->nr_running) && - !list_empty(&pool->worklist)) - to_wakeup = first_worker(pool); - return to_wakeup ? to_wakeup->task : NULL; + !list_empty(&pool->worklist)) { + next = first_worker(pool); + if (next) + wake_up_process(next->task); + } + spin_unlock_irq(&pool->lock); } /** diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h index 7e2204d..2bb5b5a 100644 --- a/kernel/workqueue_internal.h +++ b/kernel/workqueue_internal.h @@ -41,6 +41,7 @@ struct worker { unsigned long last_active; /* L: last active timestamp */ unsigned int flags; /* X: flags */ int id; /* I: worker id */ + int sleeping; /* None */ /* * Opaque string set with work_set_desc(). Printed out with task @@ -66,7 +67,7 @@ static inline struct worker *current_wq_worker(void) * Scheduler hooks for concurrency managed workqueue. Only to be used from * sched/core.c and workqueue.c. */ -void wq_worker_waking_up(struct task_struct *task, int cpu); -struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu); +void wq_worker_running(struct task_struct *task); +void wq_worker_sleeping(struct task_struct *task); #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ -- cgit v0.10.2 From 0daa7bd247a2dc811a6ff3d211d4150f97a96200 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 9 Apr 2014 19:19:47 -0500 Subject: idr: Use local lock instead of preempt enable/disable We need to protect the per cpu variable and prevent migration. Signed-off-by: Thomas Gleixner diff --git a/include/linux/idr.h b/include/linux/idr.h index 871a213..267527b 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -92,10 +92,14 @@ void idr_init(struct idr *idp); * Each idr_preload() should be matched with an invocation of this * function. See idr_preload() for details. */ +#ifdef CONFIG_PREEMPT_RT_FULL +void idr_preload_end(void); +#else static inline void idr_preload_end(void) { preempt_enable(); } +#endif /** * idr_find - return pointer for given id diff --git a/lib/idr.c b/lib/idr.c index bfe4db4..ae69d32 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -37,6 +37,7 @@ #include #include #include +#include #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) @@ -389,6 +390,36 @@ int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) } EXPORT_SYMBOL(__idr_get_new_above); +#ifdef CONFIG_PREEMPT_RT_FULL +static DEFINE_LOCAL_IRQ_LOCK(idr_lock); + +static inline void idr_preload_lock(void) +{ + local_lock(idr_lock); +} + +static inline void idr_preload_unlock(void) +{ + local_unlock(idr_lock); +} + +void idr_preload_end(void) +{ + idr_preload_unlock(); +} +EXPORT_SYMBOL(idr_preload_end); +#else +static inline void idr_preload_lock(void) +{ + preempt_disable(); +} + +static inline void idr_preload_unlock(void) +{ + preempt_enable(); +} +#endif + /** * idr_preload - preload for idr_alloc() * @gfp_mask: allocation mask to use for preloading @@ -423,7 +454,7 @@ void idr_preload(gfp_t gfp_mask) WARN_ON_ONCE(in_interrupt()); might_sleep_if(gfp_mask & __GFP_WAIT); - preempt_disable(); + idr_preload_lock(); /* * idr_alloc() is likely to succeed w/o full idr_layer buffer and @@ -435,9 +466,9 @@ void idr_preload(gfp_t gfp_mask) while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) { struct idr_layer *new; - preempt_enable(); + idr_preload_unlock(); new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); - preempt_disable(); + idr_preload_lock(); if (!new) break; -- cgit v0.10.2 From e2cf20e0e398dde23f3f9624cf04d03dceeeefeb Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 21:41:35 +0200 Subject: debugobjects-rt.patch Signed-off-by: Thomas Gleixner diff --git a/lib/debugobjects.c b/lib/debugobjects.c index bf2c8b1..1ac2049 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -308,7 +308,10 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) struct debug_obj *obj; unsigned long flags; - fill_pool(); +#ifdef CONFIG_PREEMPT_RT_FULL + if (preempt_count() == 0 && !irqs_disabled()) +#endif + fill_pool(); db = get_bucket((unsigned long) addr); -- cgit v0.10.2 From 5336abf3662d36182be51af3582757130cf8bea3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 13 Jul 2011 11:03:16 +0200 Subject: jump-label-rt.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index a507907..006627b 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -49,7 +49,8 @@ #include #include -#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) +#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) && \ + !defined(CONFIG_PREEMPT_BASE) struct static_key { atomic_t enabled; -- cgit v0.10.2 From 357da1383b14515af9b099ef760adf3c39bc04b8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 12 Jul 2011 15:38:34 +0200 Subject: skbufhead-raw-lock.patch Signed-off-by: Thomas Gleixner diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e9ff3e5..1f48a4e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1930,6 +1930,7 @@ struct softnet_data { unsigned int dropped; struct sk_buff_head input_pkt_queue; struct napi_struct backlog; + struct sk_buff_head tofree_queue; #ifdef CONFIG_NET_FLOW_LIMIT struct sd_flow_limit __rcu *flow_limit; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 9995165..3bb9cf3 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -133,6 +133,7 @@ struct sk_buff_head { __u32 qlen; spinlock_t lock; + raw_spinlock_t raw_lock; }; struct sk_buff; @@ -1073,6 +1074,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list) __skb_queue_head_init(list); } +static inline void skb_queue_head_init_raw(struct sk_buff_head *list) +{ + raw_spin_lock_init(&list->raw_lock); + __skb_queue_head_init(list); +} + static inline void skb_queue_head_init_class(struct sk_buff_head *list, struct lock_class_key *class) { diff --git a/net/core/dev.c b/net/core/dev.c index 41ae38b..f49b844 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -196,14 +196,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) static inline void rps_lock(struct softnet_data *sd) { #ifdef CONFIG_RPS - spin_lock(&sd->input_pkt_queue.lock); + raw_spin_lock(&sd->input_pkt_queue.raw_lock); #endif } static inline void rps_unlock(struct softnet_data *sd) { #ifdef CONFIG_RPS - spin_unlock(&sd->input_pkt_queue.lock); + raw_spin_unlock(&sd->input_pkt_queue.raw_lock); #endif } @@ -3706,7 +3706,7 @@ static void flush_backlog(void *arg) skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { if (skb->dev == dev) { __skb_unlink(skb, &sd->input_pkt_queue); - kfree_skb(skb); + __skb_queue_tail(&sd->tofree_queue, skb); input_queue_head_incr(sd); } } @@ -3715,10 +3715,13 @@ static void flush_backlog(void *arg) skb_queue_walk_safe(&sd->process_queue, skb, tmp) { if (skb->dev == dev) { __skb_unlink(skb, &sd->process_queue); - kfree_skb(skb); + __skb_queue_tail(&sd->tofree_queue, skb); input_queue_head_incr(sd); } } + + if (!skb_queue_empty(&sd->tofree_queue)) + raise_softirq_irqoff(NET_RX_SOFTIRQ); } static int napi_gro_complete(struct sk_buff *skb) @@ -4276,10 +4279,17 @@ static void net_rx_action(struct softirq_action *h) struct softnet_data *sd = &__get_cpu_var(softnet_data); unsigned long time_limit = jiffies + 2; int budget = netdev_budget; + struct sk_buff *skb; void *have; local_irq_disable(); + while ((skb = __skb_dequeue(&sd->tofree_queue))) { + local_irq_enable(); + kfree_skb(skb); + local_irq_disable(); + } + while (!list_empty(&sd->poll_list)) { struct napi_struct *n; int work, weight; @@ -6408,6 +6418,9 @@ static int dev_cpu_callback(struct notifier_block *nfb, netif_rx(skb); input_queue_head_incr(oldsd); } + while ((skb = __skb_dequeue(&oldsd->tofree_queue))) { + kfree_skb(skb); + } return NOTIFY_OK; } @@ -6719,8 +6732,9 @@ static int __init net_dev_init(void) struct softnet_data *sd = &per_cpu(softnet_data, i); memset(sd, 0, sizeof(*sd)); - skb_queue_head_init(&sd->input_pkt_queue); - skb_queue_head_init(&sd->process_queue); + skb_queue_head_init_raw(&sd->input_pkt_queue); + skb_queue_head_init_raw(&sd->process_queue); + skb_queue_head_init_raw(&sd->tofree_queue); sd->completion_queue = NULL; INIT_LIST_HEAD(&sd->poll_list); sd->output_queue = NULL; -- cgit v0.10.2 From 8c57bf0162a60e73faf923f89db7b8f2b4723b2b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 13 Jul 2011 14:05:05 +0200 Subject: x86-no-perf-irq-work-rt.patch Signed-off-by: Thomas Gleixner diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c index 1de84e3..3d21f7b 100644 --- a/arch/x86/kernel/irq_work.c +++ b/arch/x86/kernel/irq_work.c @@ -38,6 +38,7 @@ __visible void smp_trace_irq_work_interrupt(struct pt_regs *regs) exiting_irq(); } +#ifndef CONFIG_PREEMPT_RT_FULL void arch_irq_work_raise(void) { #ifdef CONFIG_X86_LOCAL_APIC @@ -48,3 +49,4 @@ void arch_irq_work_raise(void) apic_wait_icr_idle(); #endif } +#endif diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 55fcce6..f6e4377 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -119,8 +119,9 @@ static void __irq_work_run(void) if (llist_empty(this_list)) return; +#ifndef CONFIG_PREEMPT_RT_FULL BUG_ON(!irqs_disabled()); - +#endif llnode = llist_del_all(this_list); while (llnode != NULL) { work = llist_entry(llnode, struct irq_work, llnode); @@ -152,7 +153,9 @@ static void __irq_work_run(void) */ void irq_work_run(void) { +#ifndef CONFIG_PREEMPT_RT_FULL BUG_ON(!in_irq()); +#endif __irq_work_run(); } EXPORT_SYMBOL_GPL(irq_work_run); diff --git a/kernel/timer.c b/kernel/timer.c index b4f860c..426d114 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1425,7 +1425,7 @@ void update_process_times(int user_tick) scheduler_tick(); run_local_timers(); rcu_check_callbacks(cpu, user_tick); -#ifdef CONFIG_IRQ_WORK +#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL) if (in_irq()) irq_work_run(); #endif @@ -1439,6 +1439,10 @@ static void run_timer_softirq(struct softirq_action *h) { struct tvec_base *base = __this_cpu_read(tvec_bases); +#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) + irq_work_run(); +#endif + if (time_after_eq(jiffies, base->timer_jiffies)) __run_timers(base); } -- cgit v0.10.2 From 4693b2db3dc1a48d4959e512f334c1772775440b Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 31 Jan 2014 14:20:31 +0100 Subject: irq_work: allow certain work in hard irq context irq_work is processed in softirq context on -RT because we want to avoid long latencies which might arise from processing lots of perf events. The noHZ-full mode requires its callback to be called from real hardirq context (commit 76c24fb ("nohz: New APIs to re-evaluate the tick on full dynticks CPUs")). If it is called from a thread context we might get wrong results for checks like "is_idle_task(current)". This patch introduces a second list (hirq_work_list) which will be used if irq_work_run() has been invoked from hardirq context and process only work items marked with IRQ_WORK_HARD_IRQ. This patch also removes arch_irq_work_raise() from sparc & powerpc like it is already done for x86. Atleast for powerpc it is somehow superfluous because it is called from the timer interrupt which should invoke update_process_times(). Signed-off-by: Sebastian Andrzej Siewior diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index b3b1441..5ac241b 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -423,7 +423,7 @@ unsigned long profile_pc(struct pt_regs *regs) EXPORT_SYMBOL(profile_pc); #endif -#ifdef CONFIG_IRQ_WORK +#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL) /* * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable... diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index 269af58..dbb51a6 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c @@ -43,10 +43,12 @@ void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs) set_irq_regs(old_regs); } +#ifndef CONFIG_PREEMPT_RT_FULL void arch_irq_work_raise(void) { set_softint(1 << PIL_DEFERRED_PCR_WORK); } +#endif const struct pcr_ops *pcr_ops; EXPORT_SYMBOL_GPL(pcr_ops); diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 6601702..60c19ee 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -16,6 +16,7 @@ #define IRQ_WORK_BUSY 2UL #define IRQ_WORK_FLAGS 3UL #define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */ +#define IRQ_WORK_HARD_IRQ 8UL /* Run hard IRQ context, even on RT */ struct irq_work { unsigned long flags; diff --git a/kernel/irq_work.c b/kernel/irq_work.c index f6e4377..35d21f9 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -20,6 +20,9 @@ static DEFINE_PER_CPU(struct llist_head, irq_work_list); +#ifdef CONFIG_PREEMPT_RT_FULL +static DEFINE_PER_CPU(struct llist_head, hirq_work_list); +#endif static DEFINE_PER_CPU(int, irq_work_raised); /* @@ -48,7 +51,11 @@ static bool irq_work_claim(struct irq_work *work) return true; } +#ifdef CONFIG_PREEMPT_RT_FULL +void arch_irq_work_raise(void) +#else void __weak arch_irq_work_raise(void) +#endif { /* * Lame architectures will get the timer tick callback @@ -70,8 +77,12 @@ void irq_work_queue(struct irq_work *work) /* Queue the entry and raise the IPI if needed. */ preempt_disable(); - llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); - +#ifdef CONFIG_PREEMPT_RT_FULL + if (work->flags & IRQ_WORK_HARD_IRQ) + llist_add(&work->llnode, &__get_cpu_var(hirq_work_list)); + else +#endif + llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); /* * If the work is not "lazy" or the tick is stopped, raise the irq * work interrupt (if supported by the arch), otherwise, just wait @@ -115,7 +126,12 @@ static void __irq_work_run(void) __this_cpu_write(irq_work_raised, 0); barrier(); - this_list = &__get_cpu_var(irq_work_list); +#ifdef CONFIG_PREEMPT_RT_FULL + if (in_irq()) + this_list = &__get_cpu_var(hirq_work_list); + else +#endif + this_list = &__get_cpu_var(irq_work_list); if (llist_empty(this_list)) return; diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 1f4299b..3740f28 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -221,6 +221,7 @@ static void nohz_full_kick_work_func(struct irq_work *work) static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { .func = nohz_full_kick_work_func, + .flags = IRQ_WORK_HARD_IRQ, }; /* diff --git a/kernel/timer.c b/kernel/timer.c index 426d114..cc34e42 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1425,7 +1425,7 @@ void update_process_times(int user_tick) scheduler_tick(); run_local_timers(); rcu_check_callbacks(cpu, user_tick); -#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL) +#if defined(CONFIG_IRQ_WORK) if (in_irq()) irq_work_run(); #endif -- cgit v0.10.2 From f4e9bd663f7f72f27ff9bbf75e8b9f65ebd1c14e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 19 Sep 2012 14:50:37 +0200 Subject: printk-rt-aware.patch Signed-off-by: Thomas Gleixner diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 184e346..1d8dbf5 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1272,6 +1272,7 @@ static void call_console_drivers(int level, const char *text, size_t len) if (!console_drivers) return; + migrate_disable(); for_each_console(con) { if (exclusive_console && con != exclusive_console) continue; @@ -1284,6 +1285,7 @@ static void call_console_drivers(int level, const char *text, size_t len) continue; con->write(con, text, len); } + migrate_enable(); } /* @@ -1343,12 +1345,18 @@ static inline int can_use_console(unsigned int cpu) * interrupts disabled. It should return with 'lockbuf_lock' * released but interrupts still disabled. */ -static int console_trylock_for_printk(unsigned int cpu) +static int console_trylock_for_printk(unsigned int cpu, unsigned long flags) __releases(&logbuf_lock) { int retval = 0, wake = 0; +#ifdef CONFIG_PREEMPT_RT_FULL + int lock = !early_boot_irqs_disabled && !irqs_disabled_flags(flags) && + (preempt_count() <= 1); +#else + int lock = 1; +#endif - if (console_trylock()) { + if (lock && console_trylock()) { retval = 1; /* @@ -1683,8 +1691,15 @@ asmlinkage int vprintk_emit(int facility, int level, * The console_trylock_for_printk() function will release 'logbuf_lock' * regardless of whether it actually gets the console semaphore or not. */ - if (console_trylock_for_printk(this_cpu)) + if (console_trylock_for_printk(this_cpu, flags)) { +#ifndef CONFIG_PREEMPT_RT_FULL + console_unlock(); +#else + raw_local_irq_restore(flags); console_unlock(); + raw_local_irq_save(flags); +#endif + } lockdep_on(); out_restore_irqs: @@ -2026,11 +2041,16 @@ static void console_cont_flush(char *text, size_t size) goto out; len = cont_print_text(text, size); +#ifndef CONFIG_PREEMPT_RT_FULL raw_spin_unlock(&logbuf_lock); stop_critical_timings(); call_console_drivers(cont.level, text, len); start_critical_timings(); local_irq_restore(flags); +#else + raw_spin_unlock_irqrestore(&logbuf_lock, flags); + call_console_drivers(cont.level, text, len); +#endif return; out: raw_spin_unlock_irqrestore(&logbuf_lock, flags); @@ -2113,12 +2133,17 @@ skip: console_idx = log_next(console_idx); console_seq++; console_prev = msg->flags; - raw_spin_unlock(&logbuf_lock); +#ifndef CONFIG_PREEMPT_RT_FULL + raw_spin_unlock(&logbuf_lock); stop_critical_timings(); /* don't trace print latency */ call_console_drivers(level, text, len); start_critical_timings(); local_irq_restore(flags); +#else + raw_spin_unlock_irqrestore(&logbuf_lock, flags); + call_console_drivers(level, text, len); +#endif } console_locked = 0; mutex_release(&console_lock_dep_map, 1, _RET_IP_); -- cgit v0.10.2 From 80e7055ba0962969a2a1c2666b721fdf4704c12e Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 21 Mar 2013 19:01:05 +0100 Subject: HACK: printk: drop the logbuf_lock more often The lock is hold with irgs off. The latency drops 500us+ on my arm bugs with a "full" buffer after executing "dmesg" on the shell. Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 1d8dbf5..0a63f7b 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1029,6 +1029,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) { char *text; int len = 0; + int attempts = 0; text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); if (!text) @@ -1040,7 +1041,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear) u64 seq; u32 idx; enum log_flags prev; - + int num_msg; +try_again: + attempts++; + if (attempts > 10) { + len = -EBUSY; + goto out; + } + num_msg = 0; if (clear_seq < log_first_seq) { /* messages are gone, move to first available one */ clear_seq = log_first_seq; @@ -1061,6 +1069,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear) prev = msg->flags; idx = log_next(idx); seq++; + num_msg++; + if (num_msg > 5) { + num_msg = 0; + raw_spin_unlock_irq(&logbuf_lock); + raw_spin_lock_irq(&logbuf_lock); + if (clear_seq < log_first_seq) + goto try_again; + } } /* move first record forward until length fits into the buffer */ @@ -1074,6 +1090,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear) prev = msg->flags; idx = log_next(idx); seq++; + num_msg++; + if (num_msg > 5) { + num_msg = 0; + raw_spin_unlock_irq(&logbuf_lock); + raw_spin_lock_irq(&logbuf_lock); + if (clear_seq < log_first_seq) + goto try_again; + } } /* last message fitting into this dump */ @@ -1115,6 +1139,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) clear_seq = log_next_seq; clear_idx = log_next_idx; } +out: raw_spin_unlock_irq(&logbuf_lock); kfree(text); -- cgit v0.10.2 From d8d65707b6962214b5df1b9baf419cdeb7813923 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 9 Apr 2014 19:19:50 -0500 Subject: Powerpc: Use generic rwsem on RT Signed-off-by: Thomas Gleixner diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 38f3b7e..3cc5598 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -60,10 +60,11 @@ config LOCKDEP_SUPPORT config RWSEM_GENERIC_SPINLOCK bool + default y if PREEMPT_RT_FULL config RWSEM_XCHGADD_ALGORITHM bool - default y + default y if !PREEMPT_RT_FULL config GENERIC_LOCKBREAK bool -- cgit v0.10.2 From f5597e08c92f0dd4f5e605e0fc07520519423888 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 18 Jul 2011 17:08:34 +0200 Subject: power-disable-highmem-on-rt.patch Signed-off-by: Thomas Gleixner diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 3cc5598..a8c537f 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -286,7 +286,7 @@ menu "Kernel options" config HIGHMEM bool "High memory support" - depends on PPC32 + depends on PPC32 && !PREEMPT_RT_FULL source kernel/Kconfig.hz source kernel/Kconfig.preempt -- cgit v0.10.2 From 94e6570f778acc4fc9fde7c2e1d2398f61d23510 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 18 Jul 2011 17:09:28 +0200 Subject: arm-disable-highmem-on-rt.patch Signed-off-by: Thomas Gleixner diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 1ad6fb6..ef77778 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1759,7 +1759,7 @@ config HAVE_ARCH_PFN_VALID config HIGHMEM bool "High Memory Support" - depends on MMU + depends on MMU && !PREEMPT_RT_FULL help The address space of ARM processors is only 4 Gigabytes large and it has to accommodate user address space, kernel address -- cgit v0.10.2 From 4a2c4b3ebff286dcf5474372b5017224b9407a2d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 1 May 2010 18:29:35 +0200 Subject: ARM: at91: tclib: Default to tclib timer for RT RT is not too happy about the shared timer interrupt in AT91 devices. Default to tclib timer for RT. Signed-off-by: Thomas Gleixner diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 8d29ba8..e9b20e8 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -63,6 +63,7 @@ config ATMEL_PWM config ATMEL_TCLIB bool "Atmel AT32/AT91 Timer/Counter Library" depends on (AVR32 || ARCH_AT91) + default y if PREEMPT_RT_FULL help Select this if you want a library to allocate the Timer/Counter blocks found on many Atmel processors. This facilitates using @@ -95,7 +96,7 @@ config ATMEL_TCB_CLKSRC_BLOCK config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK bool "TC Block use 32 KiHz clock" depends on ATMEL_TCB_CLKSRC - default y + default y if !PREEMPT_RT_FULL help Select this to use 32 KiHz base clock rate as TC block clock source for clock events. -- cgit v0.10.2 From 60a0b05ce59c7459f42f135984808accae045206 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 20 Sep 2013 14:31:54 +0200 Subject: arm/unwind: use a raw_spin_lock Mostly unwind is done with irqs enabled however SLUB may call it with irqs disabled while creating a new SLUB cache. I had system freeze while loading a module which called kmem_cache_create() on init. That means SLUB's __slab_alloc() disabled interrupts and then ->new_slab_objects() ->new_slab() ->setup_object() ->setup_object_debug() ->init_tracking() ->set_track() ->save_stack_trace() ->save_stack_trace_tsk() ->walk_stackframe() ->unwind_frame() ->unwind_find_idx() =>spin_lock_irqsave(&unwind_lock); Cc: stable-rt@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index 00df012..bbafc67 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c @@ -87,7 +87,7 @@ extern const struct unwind_idx __start_unwind_idx[]; static const struct unwind_idx *__origin_unwind_idx; extern const struct unwind_idx __stop_unwind_idx[]; -static DEFINE_SPINLOCK(unwind_lock); +static DEFINE_RAW_SPINLOCK(unwind_lock); static LIST_HEAD(unwind_tables); /* Convert a prel31 symbol to an absolute address */ @@ -195,7 +195,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr) /* module unwind tables */ struct unwind_table *table; - spin_lock_irqsave(&unwind_lock, flags); + raw_spin_lock_irqsave(&unwind_lock, flags); list_for_each_entry(table, &unwind_tables, list) { if (addr >= table->begin_addr && addr < table->end_addr) { @@ -207,7 +207,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr) break; } } - spin_unlock_irqrestore(&unwind_lock, flags); + raw_spin_unlock_irqrestore(&unwind_lock, flags); } pr_debug("%s: idx = %p\n", __func__, idx); @@ -469,9 +469,9 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size, tab->begin_addr = text_addr; tab->end_addr = text_addr + text_size; - spin_lock_irqsave(&unwind_lock, flags); + raw_spin_lock_irqsave(&unwind_lock, flags); list_add_tail(&tab->list, &unwind_tables); - spin_unlock_irqrestore(&unwind_lock, flags); + raw_spin_unlock_irqrestore(&unwind_lock, flags); return tab; } @@ -483,9 +483,9 @@ void unwind_table_del(struct unwind_table *tab) if (!tab) return; - spin_lock_irqsave(&unwind_lock, flags); + raw_spin_lock_irqsave(&unwind_lock, flags); list_del(&tab->list); - spin_unlock_irqrestore(&unwind_lock, flags); + raw_spin_unlock_irqrestore(&unwind_lock, flags); kfree(tab); } -- cgit v0.10.2 From dc401f04a28f7c9959c41189d3290241dde9a19f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 18 Jul 2011 17:10:12 +0200 Subject: mips-disable-highmem-on-rt.patch Signed-off-by: Thomas Gleixner diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index f75ab4a..f3981c2 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2078,7 +2078,7 @@ config CPU_R4400_WORKAROUNDS # config HIGHMEM bool "High Memory Support" - depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM + depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !PREEMPT_RT_FULL config CPU_SUPPORTS_HIGHMEM bool -- cgit v0.10.2 From 19e27852e58b811fb6d0677609e79ddd30d6ff03 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 6 Oct 2011 10:48:39 -0400 Subject: net: Avoid livelock in net_tx_action() on RT qdisc_lock is taken w/o disabling interrupts or bottom halfs. So code holding a qdisc_lock() can be interrupted and softirqs can run on the return of interrupt in !RT. The spin_trylock() in net_tx_action() makes sure, that the softirq does not deadlock. When the lock can't be acquired q is requeued and the NET_TX softirq is raised. That causes the softirq to run over and over. That works in mainline as do_softirq() has a retry loop limit and leaves the softirq processing in the interrupt return path and schedules ksoftirqd. The task which holds qdisc_lock cannot be preempted, so the lock is released and either ksoftirqd or the next softirq in the return from interrupt path can proceed. Though it's a bit strange to actually run MAX_SOFTIRQ_RESTART (10) loops before it decides to bail out even if it's clear in the first iteration :) On RT all softirq processing is done in a FIFO thread and we don't have a loop limit, so ksoftirqd preempts the lock holder forever and unqueues and requeues until the reset button is hit. Due to the forced threading of ksoftirqd on RT we actually cannot deadlock on qdisc_lock because it's a "sleeping lock". So it's safe to replace the spin_trylock() with a spin_lock(). When contended, ksoftirqd is scheduled out and the lock holder can proceed. [ tglx: Massaged changelog and code comments ] Solved-by: Thomas Gleixner Signed-off-by: Steven Rostedt Tested-by: Carsten Emde Cc: Clark Williams Cc: John Kacur Cc: Luis Claudio R. Goncalves Signed-off-by: Thomas Gleixner diff --git a/net/core/dev.c b/net/core/dev.c index f49b844..aa5a5a1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3277,6 +3277,36 @@ int netif_rx_ni(struct sk_buff *skb) } EXPORT_SYMBOL(netif_rx_ni); +#ifdef CONFIG_PREEMPT_RT_FULL +/* + * RT runs ksoftirqd as a real time thread and the root_lock is a + * "sleeping spinlock". If the trylock fails then we can go into an + * infinite loop when ksoftirqd preempted the task which actually + * holds the lock, because we requeue q and raise NET_TX softirq + * causing ksoftirqd to loop forever. + * + * It's safe to use spin_lock on RT here as softirqs run in thread + * context and cannot deadlock against the thread which is holding + * root_lock. + * + * On !RT the trylock might fail, but there we bail out from the + * softirq loop after 10 attempts which we can't do on RT. And the + * task holding root_lock cannot be preempted, so the only downside of + * that trylock is that we need 10 loops to decide that we should have + * given up in the first one :) + */ +static inline int take_root_lock(spinlock_t *lock) +{ + spin_lock(lock); + return 1; +} +#else +static inline int take_root_lock(spinlock_t *lock) +{ + return spin_trylock(lock); +} +#endif + static void net_tx_action(struct softirq_action *h) { struct softnet_data *sd = &__get_cpu_var(softnet_data); @@ -3315,7 +3345,7 @@ static void net_tx_action(struct softirq_action *h) head = head->next_sched; root_lock = qdisc_lock(q); - if (spin_trylock(root_lock)) { + if (take_root_lock(root_lock)) { smp_mb__before_clear_bit(); clear_bit(__QDISC_STATE_SCHED, &q->state); -- cgit v0.10.2 From a21edadb2148d493be02e85f584a5a80a8f506b6 Mon Sep 17 00:00:00 2001 From: Carsten Emde Date: Tue, 19 Jul 2011 13:51:17 +0100 Subject: net: sysrq via icmp There are (probably rare) situations when a system crashed and the system console becomes unresponsive but the network icmp layer still is alive. Wouldn't it be wonderful, if we then could submit a sysreq command via ping? This patch provides this facility. Please consult the updated documentation Documentation/sysrq.txt for details. Signed-off-by: Carsten Emde diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt index 8cb4d78..d458683 100644 --- a/Documentation/sysrq.txt +++ b/Documentation/sysrq.txt @@ -57,10 +57,17 @@ On PowerPC - Press 'ALT - Print Screen (or F13) - , On other - If you know of the key combos for other architectures, please let me know so I can add them to this section. -On all - write a character to /proc/sysrq-trigger. e.g.: - +On all - write a character to /proc/sysrq-trigger, e.g.: echo t > /proc/sysrq-trigger +On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g. + echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq + Send an ICMP echo request with this pattern plus the particular + SysRq command key. Example: + # ping -c1 -s57 -p0102030468 + will trigger the SysRq-H (help) command. + + * What are the 'command' keys? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 'b' - Will immediately reboot the system without syncing or unmounting diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index bf2ec22..5e9ce7f 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -57,6 +57,7 @@ struct netns_ipv4 { int sysctl_icmp_echo_ignore_all; int sysctl_icmp_echo_ignore_broadcasts; + int sysctl_icmp_echo_sysrq; int sysctl_icmp_ignore_bogus_error_responses; int sysctl_icmp_ratelimit; int sysctl_icmp_ratemask; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 5f7d11a..1f0c7e0 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -69,6 +69,7 @@ #include #include #include +#include #include #include #include @@ -776,6 +777,30 @@ static void icmp_redirect(struct sk_buff *skb) } /* + * 32bit and 64bit have different timestamp length, so we check for + * the cookie at offset 20 and verify it is repeated at offset 50 + */ +#define CO_POS0 20 +#define CO_POS1 50 +#define CO_SIZE sizeof(int) +#define ICMP_SYSRQ_SIZE 57 + +/* + * We got a ICMP_SYSRQ_SIZE sized ping request. Check for the cookie + * pattern and if it matches send the next byte as a trigger to sysrq. + */ +static void icmp_check_sysrq(struct net *net, struct sk_buff *skb) +{ + int cookie = htonl(net->ipv4.sysctl_icmp_echo_sysrq); + char *p = skb->data; + + if (!memcmp(&cookie, p + CO_POS0, CO_SIZE) && + !memcmp(&cookie, p + CO_POS1, CO_SIZE) && + p[CO_POS0 + CO_SIZE] == p[CO_POS1 + CO_SIZE]) + handle_sysrq(p[CO_POS0 + CO_SIZE]); +} + +/* * Handle ICMP_ECHO ("ping") requests. * * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo @@ -802,6 +827,11 @@ static void icmp_echo(struct sk_buff *skb) icmp_param.data_len = skb->len; icmp_param.head_len = sizeof(struct icmphdr); icmp_reply(&icmp_param, skb); + + if (skb->len == ICMP_SYSRQ_SIZE && + net->ipv4.sysctl_icmp_echo_sysrq) { + icmp_check_sysrq(net, skb); + } } } diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 540279f..99461ed 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -812,6 +812,13 @@ static struct ctl_table ipv4_net_table[] = { .proc_handler = proc_dointvec }, { + .procname = "icmp_echo_sysrq", + .data = &init_net.ipv4.sysctl_icmp_echo_sysrq, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, + { .procname = "icmp_ignore_bogus_error_responses", .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses, .maxlen = sizeof(int), -- cgit v0.10.2 From b130c032ef30436fde60dcc1df76058630abe379 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Thu, 28 Jul 2011 12:42:23 -0500 Subject: kgdb/serial: Short term workaround On 07/27/2011 04:37 PM, Thomas Gleixner wrote: > - KGDB (not yet disabled) is reportedly unusable on -rt right now due > to missing hacks in the console locking which I dropped on purpose. > To work around this in the short term you can use this patch, in addition to the clocksource watchdog patch that Thomas brewed up. Comments are welcome of course. Ultimately the right solution is to change separation between the console and the HW to have a polled mode + work queue so as not to introduce any kind of latency. Thanks, Jason. diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 73a05af..f70ecf1 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -38,6 +38,7 @@ #include #include #include +#include #ifdef CONFIG_SPARC #include #endif @@ -2873,7 +2874,7 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count) touch_nmi_watchdog(); - if (port->sysrq || oops_in_progress) + if (port->sysrq || oops_in_progress || in_kdb_printk()) locked = spin_trylock_irqsave(&port->lock, flags); else spin_lock_irqsave(&port->lock, flags); diff --git a/include/linux/kdb.h b/include/linux/kdb.h index 7f6fe6e..680ad23 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -115,7 +115,7 @@ extern int kdb_trap_printk; extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args); extern __printf(1, 2) int kdb_printf(const char *, ...); typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...); - +#define in_kdb_printk() (kdb_trap_printk) extern void kdb_init(int level); /* Access to kdb specific polling devices */ @@ -150,6 +150,7 @@ extern int kdb_register_repeat(char *, kdb_func_t, char *, char *, extern int kdb_unregister(char *); #else /* ! CONFIG_KGDB_KDB */ static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } +#define in_kdb_printk() (0) static inline void kdb_init(int level) {} static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, char *help, short minlen) { return 0; } diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index 14ff484..399dba6 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -554,7 +554,6 @@ int vkdb_printf(const char *fmt, va_list ap) int linecount; int colcount; int logging, saved_loglevel = 0; - int saved_trap_printk; int got_printf_lock = 0; int retlen = 0; int fnd, len; @@ -565,8 +564,6 @@ int vkdb_printf(const char *fmt, va_list ap) unsigned long uninitialized_var(flags); preempt_disable(); - saved_trap_printk = kdb_trap_printk; - kdb_trap_printk = 0; /* Serialize kdb_printf if multiple cpus try to write at once. * But if any cpu goes recursive in kdb, just print the output, @@ -833,7 +830,6 @@ kdb_print_out: } else { __release(kdb_printf_lock); } - kdb_trap_printk = saved_trap_printk; preempt_enable(); return retlen; } @@ -843,9 +839,11 @@ int kdb_printf(const char *fmt, ...) va_list ap; int r; + kdb_trap_printk++; va_start(ap, fmt); r = vkdb_printf(fmt, ap); va_end(ap); + kdb_trap_printk--; return r; } -- cgit v0.10.2 From 29b03268a7cca25fc60f2b5ec62c85f426f48257 Mon Sep 17 00:00:00 2001 From: Clark Williams Date: Sat, 30 Jul 2011 21:55:53 -0500 Subject: add /sys/kernel/realtime entry Add a /sys/kernel entry to indicate that the kernel is a realtime kernel. Clark says that he needs this for udev rules, udev needs to evaluate if its a PREEMPT_RT kernel a few thousand times and parsing uname output is too slow or so. Are there better solutions? Should it exist and return 0 on !-rt? Signed-off-by: Clark Williams Signed-off-by: Peter Zijlstra diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c index 9659d38..b66ab9e 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c @@ -132,6 +132,15 @@ KERNEL_ATTR_RO(vmcoreinfo); #endif /* CONFIG_KEXEC */ +#if defined(CONFIG_PREEMPT_RT_FULL) +static ssize_t realtime_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", 1); +} +KERNEL_ATTR_RO(realtime); +#endif + /* whether file capabilities are enabled */ static ssize_t fscaps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) @@ -197,6 +206,9 @@ static struct attribute * kernel_attrs[] = { &vmcoreinfo_attr.attr, #endif &rcu_expedited_attr.attr, +#ifdef CONFIG_PREEMPT_RT_FULL + &realtime_attr.attr, +#endif NULL }; -- cgit v0.10.2 From 24a92055470f7d51209baa08bc015564438d0e8a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 28 Jul 2011 10:43:51 +0200 Subject: mm, rt: kmap_atomic scheduling In fact, with migrate_disable() existing one could play games with kmap_atomic. You could save/restore the kmap_atomic slots on context switch (if there are any in use of course), this should be esp easy now that we have a kmap_atomic stack. Something like the below.. it wants replacing all the preempt_disable() stuff with pagefault_disable() && migrate_disable() of course, but then you can flip kmaps around like below. Signed-off-by: Peter Zijlstra [dvhart@linux.intel.com: build fix] Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins [tglx@linutronix.de: Get rid of the per cpu variable and store the idx and the pte content right away in the task struct. Shortens the context switch code. ] diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 884f98f..6ce0537 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -219,6 +220,35 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) } EXPORT_SYMBOL_GPL(start_thread); +#ifdef CONFIG_PREEMPT_RT_FULL +static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) +{ + int i; + + /* + * Clear @prev's kmap_atomic mappings + */ + for (i = 0; i < prev_p->kmap_idx; i++) { + int idx = i + KM_TYPE_NR * smp_processor_id(); + pte_t *ptep = kmap_pte - idx; + + kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx)); + } + /* + * Restore @next_p's kmap_atomic mappings + */ + for (i = 0; i < next_p->kmap_idx; i++) { + int idx = i + KM_TYPE_NR * smp_processor_id(); + + if (!pte_none(next_p->kmap_pte[i])) + set_pte(kmap_pte - idx, next_p->kmap_pte[i]); + } +} +#else +static inline void +switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } +#endif + /* * switch_to(x,y) should switch tasks from x to y. @@ -298,6 +328,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) __switch_to_xtra(prev_p, next_p, tss); + switch_kmaps(prev_p, next_p); + /* * Leave lazy mode, flushing any hypercalls made here. * This must be done before restoring TLS segments so diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 4500142..7f96844 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -32,6 +32,7 @@ EXPORT_SYMBOL(kunmap); */ void *kmap_atomic_prot(struct page *page, pgprot_t prot) { + pte_t pte = mk_pte(page, prot); unsigned long vaddr; int idx, type; @@ -45,7 +46,10 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot) idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); BUG_ON(!pte_none(*(kmap_pte-idx))); - set_pte(kmap_pte-idx, mk_pte(page, prot)); +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = pte; +#endif + set_pte(kmap_pte-idx, pte); arch_flush_lazy_mmu_mode(); return (void *)vaddr; @@ -88,6 +92,9 @@ void __kunmap_atomic(void *kvaddr) * is a bad idea also, in case the page changes cacheability * attributes or becomes a protected page in a hypervisor. */ +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = __pte(0); +#endif kpte_clear_flush(kmap_pte-idx, vaddr); kmap_atomic_idx_pop(); arch_flush_lazy_mmu_mode(); diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index 7b179b4..0c953e3 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -56,6 +56,7 @@ EXPORT_SYMBOL_GPL(iomap_free); void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) { + pte_t pte = pfn_pte(pfn, prot); unsigned long vaddr; int idx, type; @@ -64,7 +65,10 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = pte; +#endif + set_pte(kmap_pte - idx, pte); arch_flush_lazy_mmu_mode(); return (void *)vaddr; @@ -110,6 +114,9 @@ iounmap_atomic(void __iomem *kvaddr) * is a bad idea also, in case the page changes cacheability * attributes or becomes a protected page in a hypervisor. */ +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = __pte(0); +#endif kpte_clear_flush(kmap_pte-idx, vaddr); kmap_atomic_idx_pop(); } diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 7fb31da..cee935c 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -85,32 +85,51 @@ static inline void __kunmap_atomic(void *addr) #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) +#ifndef CONFIG_PREEMPT_RT_FULL DECLARE_PER_CPU(int, __kmap_atomic_idx); +#endif static inline int kmap_atomic_idx_push(void) { +#ifndef CONFIG_PREEMPT_RT_FULL int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; -#ifdef CONFIG_DEBUG_HIGHMEM +# ifdef CONFIG_DEBUG_HIGHMEM WARN_ON_ONCE(in_irq() && !irqs_disabled()); BUG_ON(idx > KM_TYPE_NR); -#endif +# endif return idx; +#else + current->kmap_idx++; + BUG_ON(current->kmap_idx > KM_TYPE_NR); + return current->kmap_idx - 1; +#endif } static inline int kmap_atomic_idx(void) { +#ifndef CONFIG_PREEMPT_RT_FULL return __this_cpu_read(__kmap_atomic_idx) - 1; +#else + return current->kmap_idx - 1; +#endif } static inline void kmap_atomic_idx_pop(void) { -#ifdef CONFIG_DEBUG_HIGHMEM +#ifndef CONFIG_PREEMPT_RT_FULL +# ifdef CONFIG_DEBUG_HIGHMEM int idx = __this_cpu_dec_return(__kmap_atomic_idx); BUG_ON(idx < 0); -#else +# else __this_cpu_dec(__kmap_atomic_idx); +# endif +#else + current->kmap_idx--; +# ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(current->kmap_idx < 0); +# endif #endif } diff --git a/include/linux/sched.h b/include/linux/sched.h index b6a1bfe..49afe5b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -23,6 +23,7 @@ struct sched_param { #include #include +#include #include #include #include @@ -1441,6 +1442,12 @@ struct task_struct { struct rcu_head put_rcu; int softirq_nestcnt; #endif +#ifdef CONFIG_PREEMPT_RT_FULL +# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32 + int kmap_idx; + pte_t kmap_pte[KM_TYPE_NR]; +# endif +#endif }; #ifdef CONFIG_NUMA_BALANCING diff --git a/mm/highmem.c b/mm/highmem.c index b32b70c..b1c7d43 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -29,10 +29,11 @@ #include #include - +#ifndef CONFIG_PREEMPT_RT_FULL #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) DEFINE_PER_CPU(int, __kmap_atomic_idx); #endif +#endif /* * Virtual_count is not a pure "count". @@ -47,8 +48,9 @@ DEFINE_PER_CPU(int, __kmap_atomic_idx); unsigned long totalhigh_pages __read_mostly; EXPORT_SYMBOL(totalhigh_pages); - +#ifndef CONFIG_PREEMPT_RT_FULL EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); +#endif unsigned int nr_free_highpages (void) { diff --git a/mm/memory.c b/mm/memory.c index 5771e09..0dcdc84 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3745,6 +3745,7 @@ unlock: #ifdef CONFIG_PREEMPT_RT_FULL void pagefault_disable(void) { + migrate_disable(); current->pagefault_disabled++; /* * make sure to have issued the store before a pagefault @@ -3762,6 +3763,7 @@ void pagefault_enable(void) */ barrier(); current->pagefault_disabled--; + migrate_enable(); } EXPORT_SYMBOL(pagefault_enable); #endif -- cgit v0.10.2 From 88552ea8ca29ad1f2c4a961f663f8c362df97fa7 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 11 Mar 2013 17:09:55 +0100 Subject: x86/highmem: add a "already used pte" check This is a copy from kmap_atomic_prot(). Signed-off-by: Sebastian Andrzej Siewior diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index 0c953e3..62377d6 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -65,6 +65,8 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + WARN_ON(!pte_none(*(kmap_pte - idx))); + #ifdef CONFIG_PREEMPT_RT_FULL current->kmap_pte[type] = pte; #endif -- cgit v0.10.2 From dad3e3263a3b3978630c1703e6dbd0c41ad07bc1 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 11 Mar 2013 21:37:27 +0100 Subject: arm/highmem: flush tlb on unmap The tlb should be flushed on unmap and thus make the mapping entry invalid. This is only done in the non-debug case which does not look right. Signed-off-by: Sebastian Andrzej Siewior diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 21b9e1b..3688c8b 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -95,10 +95,10 @@ void __kunmap_atomic(void *kvaddr) __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); - set_top_pte(vaddr, __pte(0)); #else (void) idx; /* to kill a warning */ #endif + set_top_pte(vaddr, __pte(0)); kmap_atomic_idx_pop(); } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { /* this address was obtained through kmap_high_get() */ -- cgit v0.10.2 From 7e0509fe7647c827b37ee4034b80c3d8b399830c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 13 Feb 2013 11:03:11 +0100 Subject: arm-enable-highmem-for-rt.patch Signed-off-by: Thomas Gleixner diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ef77778..1ad6fb6 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1759,7 +1759,7 @@ config HAVE_ARCH_PFN_VALID config HIGHMEM bool "High Memory Support" - depends on MMU && !PREEMPT_RT_FULL + depends on MMU help The address space of ARM processors is only 4 Gigabytes large and it has to accommodate user address space, kernel address diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h index c99e259..f3e3d80 100644 --- a/arch/arm/include/asm/switch_to.h +++ b/arch/arm/include/asm/switch_to.h @@ -3,6 +3,13 @@ #include +#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM +void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p); +#else +static inline void +switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } +#endif + /* * For v7 SMP cores running a preemptible kernel we may be pre-empted * during a TLB maintenance operation, so execute an inner-shareable dsb @@ -22,6 +29,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info #define switch_to(prev,next,last) \ do { \ + switch_kmaps(prev, next); \ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ } while (0) diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 3688c8b..bd41dd8 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -38,6 +38,7 @@ EXPORT_SYMBOL(kunmap); void *kmap_atomic(struct page *page) { + pte_t pte = mk_pte(page, kmap_prot); unsigned int idx; unsigned long vaddr; void *kmap; @@ -76,7 +77,10 @@ void *kmap_atomic(struct page *page) * in place, so the contained TLB flush ensures the TLB is updated * with the new mapping. */ - set_top_pte(vaddr, mk_pte(page, kmap_prot)); +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = pte; +#endif + set_top_pte(vaddr, pte); return (void *)vaddr; } @@ -93,6 +97,9 @@ void __kunmap_atomic(void *kvaddr) if (cache_is_vivt()) __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = __pte(0); +#endif #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); #else @@ -110,6 +117,7 @@ EXPORT_SYMBOL(__kunmap_atomic); void *kmap_atomic_pfn(unsigned long pfn) { + pte_t pte = pfn_pte(pfn, kmap_prot); unsigned long vaddr; int idx, type; @@ -121,7 +129,10 @@ void *kmap_atomic_pfn(unsigned long pfn) #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(get_top_pte(vaddr))); #endif - set_top_pte(vaddr, pfn_pte(pfn, kmap_prot)); +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = pte; +#endif + set_top_pte(vaddr, pte); return (void *)vaddr; } @@ -135,3 +146,29 @@ struct page *kmap_atomic_to_page(const void *ptr) return pte_page(get_top_pte(vaddr)); } + +#if defined CONFIG_PREEMPT_RT_FULL +void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) +{ + int i; + + /* + * Clear @prev's kmap_atomic mappings + */ + for (i = 0; i < prev_p->kmap_idx; i++) { + int idx = i + KM_TYPE_NR * smp_processor_id(); + + set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx), __pte(0)); + } + /* + * Restore @next_p's kmap_atomic mappings + */ + for (i = 0; i < next_p->kmap_idx; i++) { + int idx = i + KM_TYPE_NR * smp_processor_id(); + + if (!pte_none(next_p->kmap_pte[i])) + set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx), + next_p->kmap_pte[i]); + } +} +#endif diff --git a/include/linux/highmem.h b/include/linux/highmem.h index cee935c..821d523 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -7,6 +7,7 @@ #include #include #include +#include #include -- cgit v0.10.2 From 27659d80405cf939d803186eae2d91f286cc75d2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 14 Sep 2011 11:57:04 +0200 Subject: ipc/sem: Rework semaphore wakeups Current sysv sems have a weird ass wakeup scheme that involves keeping preemption disabled over a potential O(n^2) loop and busy waiting on that on other CPUs. Kill this and simply wake the task directly from under the sem_lock. This was discovered by a migrate_disable() debug feature that disallows: spin_lock(); preempt_disable(); spin_unlock() preempt_enable(); Cc: Manfred Spraul Suggested-by: Thomas Gleixner Reported-by: Mike Galbraith Signed-off-by: Peter Zijlstra Cc: Manfred Spraul Link: http://lkml.kernel.org/r/1315994224.5040.1.camel@twins Signed-off-by: Thomas Gleixner diff --git a/ipc/sem.c b/ipc/sem.c index db9d241..258b45e 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -666,6 +666,13 @@ undo: static void wake_up_sem_queue_prepare(struct list_head *pt, struct sem_queue *q, int error) { +#ifdef CONFIG_PREEMPT_RT_BASE + struct task_struct *p = q->sleeper; + get_task_struct(p); + q->status = error; + wake_up_process(p); + put_task_struct(p); +#else if (list_empty(pt)) { /* * Hold preempt off so that we don't get preempted and have the @@ -677,6 +684,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt, q->pid = error; list_add_tail(&q->list, pt); +#endif } /** @@ -690,6 +698,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt, */ static void wake_up_sem_queue_do(struct list_head *pt) { +#ifndef CONFIG_PREEMPT_RT_BASE struct sem_queue *q, *t; int did_something; @@ -702,6 +711,7 @@ static void wake_up_sem_queue_do(struct list_head *pt) } if (did_something) preempt_enable(); +#endif } static void unlink_queue(struct sem_array *sma, struct sem_queue *q) -- cgit v0.10.2 From 9cb4b7cab799dd807d86e701bc6d6a6185fac4b6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 6 Nov 2011 12:26:18 +0100 Subject: x86-kvm-require-const-tsc-for-rt.patch Signed-off-by: Thomas Gleixner diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 92af83d..e1b7b17 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5493,6 +5493,13 @@ int kvm_arch_init(void *opaque) goto out; } +#ifdef CONFIG_PREEMPT_RT_FULL + if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { + printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n"); + return -EOPNOTSUPP; + } +#endif + r = kvm_mmu_module_init(); if (r) goto out_free_percpu; -- cgit v0.10.2 From f231c6359b8d31d828777e6f00a21a93a62aa318 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 12 Nov 2011 14:00:48 +0100 Subject: scsi-fcoe-rt-aware.patch Signed-off-by: Thomas Gleixner diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 07453bb..b439c20 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1285,7 +1285,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu) struct sk_buff *skb; #ifdef CONFIG_SMP struct fcoe_percpu_s *p0; - unsigned targ_cpu = get_cpu(); + unsigned targ_cpu = get_cpu_light(); #endif /* CONFIG_SMP */ FCOE_DBG("Destroying receive thread for CPU %d\n", cpu); @@ -1341,7 +1341,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu) kfree_skb(skb); spin_unlock_bh(&p->fcoe_rx_list.lock); } - put_cpu(); + put_cpu_light(); #else /* * This a non-SMP scenario where the singular Rx thread is @@ -1559,11 +1559,11 @@ err2: static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen) { struct fcoe_percpu_s *fps; - int rc; + int rc, cpu = get_cpu_light(); - fps = &get_cpu_var(fcoe_percpu); + fps = &per_cpu(fcoe_percpu, cpu); rc = fcoe_get_paged_crc_eof(skb, tlen, fps); - put_cpu_var(fcoe_percpu); + put_cpu_light(); return rc; } @@ -1761,11 +1761,11 @@ static inline int fcoe_filter_frames(struct fc_lport *lport, return 0; } - stats = per_cpu_ptr(lport->stats, get_cpu()); + stats = per_cpu_ptr(lport->stats, get_cpu_light()); stats->InvalidCRCCount++; if (stats->InvalidCRCCount < 5) printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); - put_cpu(); + put_cpu_light(); return -EINVAL; } @@ -1841,13 +1841,13 @@ static void fcoe_recv_frame(struct sk_buff *skb) goto drop; if (!fcoe_filter_frames(lport, fp)) { - put_cpu(); + put_cpu_light(); fc_exch_recv(lport, fp); return; } drop: stats->ErrorFrames++; - put_cpu(); + put_cpu_light(); kfree_skb(skb); } diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 203415e..48769c3 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -792,7 +792,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) INIT_LIST_HEAD(&del_list); - stats = per_cpu_ptr(fip->lp->stats, get_cpu()); + stats = per_cpu_ptr(fip->lp->stats, get_cpu_light()); list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; @@ -828,7 +828,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) sel_time = fcf->time; } } - put_cpu(); + put_cpu_light(); list_for_each_entry_safe(fcf, next, &del_list, list) { /* Removes fcf from current list */ diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 5879929..74c7eb0 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -741,10 +741,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, } memset(ep, 0, sizeof(*ep)); - cpu = get_cpu(); + cpu = get_cpu_light(); pool = per_cpu_ptr(mp->pool, cpu); spin_lock_bh(&pool->lock); - put_cpu(); + put_cpu_light(); /* peek cache of free slot */ if (pool->left != FC_XID_UNKNOWN) { -- cgit v0.10.2 From 42e35ad7fff723de5dc5d14da7190aa092927b45 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Nov 2011 18:19:27 +0100 Subject: x86: crypto: Reduce preempt disabled regions Restrict the preempt disabled regions to the actual floating point operations and enable preemption for the administrative actions. This is necessary on RT to avoid that kfree and other operations are called with preemption disabled. Reported-and-tested-by: Carsten Emde Signed-off-by: Peter Zijlstra Cc: stable-rt@vger.kernel.org Signed-off-by: Thomas Gleixner diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index f80e668..3fbe870 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -252,14 +252,14 @@ static int ecb_encrypt(struct blkcipher_desc *desc, err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { + kernel_fpu_begin(); aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, - nbytes & AES_BLOCK_MASK); + nbytes & AES_BLOCK_MASK); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } - kernel_fpu_end(); return err; } @@ -276,14 +276,14 @@ static int ecb_decrypt(struct blkcipher_desc *desc, err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { + kernel_fpu_begin(); aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } - kernel_fpu_end(); return err; } @@ -300,14 +300,14 @@ static int cbc_encrypt(struct blkcipher_desc *desc, err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { + kernel_fpu_begin(); aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } - kernel_fpu_end(); return err; } @@ -324,14 +324,14 @@ static int cbc_decrypt(struct blkcipher_desc *desc, err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { + kernel_fpu_begin(); aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } - kernel_fpu_end(); return err; } @@ -364,18 +364,20 @@ static int ctr_crypt(struct blkcipher_desc *desc, err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - kernel_fpu_begin(); while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { + kernel_fpu_begin(); aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } if (walk.nbytes) { + kernel_fpu_begin(); ctr_crypt_final(ctx, &walk); + kernel_fpu_end(); err = blkcipher_walk_done(desc, &walk, 0); } - kernel_fpu_end(); return err; } -- cgit v0.10.2 From 4103c9cde58722d7e97a0c69acecc6d8aab44a1e Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 21 Feb 2014 17:24:04 +0100 Subject: crypto: Reduce preempt disabled regions, more algos Don Estabrook reported | kernel: WARNING: CPU: 2 PID: 858 at kernel/sched/core.c:2428 migrate_disable+0xed/0x100() | kernel: WARNING: CPU: 2 PID: 858 at kernel/sched/core.c:2462 migrate_enable+0x17b/0x200() | kernel: WARNING: CPU: 3 PID: 865 at kernel/sched/core.c:2428 migrate_disable+0xed/0x100() and his backtrace showed some crypto functions which looked fine. The problem is the following sequence: glue_xts_crypt_128bit() { blkcipher_walk_virt(); /* normal migrate_disable() */ glue_fpu_begin(); /* get atomic */ while (nbytes) { __glue_xts_crypt_128bit(); blkcipher_walk_done(); /* with nbytes = 0, migrate_enable() * while we are atomic */ }; glue_fpu_end() /* no longer atomic */ } and this is why the counter get out of sync and the warning is printed. The other problem is that we are non-preemptible between glue_fpu_begin() and glue_fpu_end() and the latency grows. To fix this, I shorten the FPU off region and ensure blkcipher_walk_done() is called with preemption enabled. This might hurt the performance because we now enable/disable the FPU state more often but we gain lower latency and the bug is gone. Cc: stable-rt@vger.kernel.org Reported-by: Don Estabrook Signed-off-by: Sebastian Andrzej Siewior diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c index c663181..2d48e83 100644 --- a/arch/x86/crypto/cast5_avx_glue.c +++ b/arch/x86/crypto/cast5_avx_glue.c @@ -60,7 +60,7 @@ static inline void cast5_fpu_end(bool fpu_enabled) static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, bool enc) { - bool fpu_enabled = false; + bool fpu_enabled; struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); const unsigned int bsize = CAST5_BLOCK_SIZE; unsigned int nbytes; @@ -76,7 +76,7 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, u8 *wsrc = walk->src.virt.addr; u8 *wdst = walk->dst.virt.addr; - fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); + fpu_enabled = cast5_fpu_begin(false, nbytes); /* Process multi-block batch */ if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) { @@ -104,10 +104,9 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, } while (nbytes >= bsize); done: + cast5_fpu_end(fpu_enabled); err = blkcipher_walk_done(desc, walk, nbytes); } - - cast5_fpu_end(fpu_enabled); return err; } @@ -231,7 +230,7 @@ done: static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - bool fpu_enabled = false; + bool fpu_enabled; struct blkcipher_walk walk; int err; @@ -240,12 +239,11 @@ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; while ((nbytes = walk.nbytes)) { - fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); + fpu_enabled = cast5_fpu_begin(false, nbytes); nbytes = __cbc_decrypt(desc, &walk); + cast5_fpu_end(fpu_enabled); err = blkcipher_walk_done(desc, &walk, nbytes); } - - cast5_fpu_end(fpu_enabled); return err; } @@ -315,7 +313,7 @@ done: static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - bool fpu_enabled = false; + bool fpu_enabled; struct blkcipher_walk walk; int err; @@ -324,13 +322,12 @@ static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) { - fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); + fpu_enabled = cast5_fpu_begin(false, nbytes); nbytes = __ctr_crypt(desc, &walk); + cast5_fpu_end(fpu_enabled); err = blkcipher_walk_done(desc, &walk, nbytes); } - cast5_fpu_end(fpu_enabled); - if (walk.nbytes) { ctr_crypt_final(desc, &walk); err = blkcipher_walk_done(desc, &walk, 0); diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c index 432f1d76..4a2bd21 100644 --- a/arch/x86/crypto/glue_helper.c +++ b/arch/x86/crypto/glue_helper.c @@ -39,7 +39,7 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, void *ctx = crypto_blkcipher_ctx(desc->tfm); const unsigned int bsize = 128 / 8; unsigned int nbytes, i, func_bytes; - bool fpu_enabled = false; + bool fpu_enabled; int err; err = blkcipher_walk_virt(desc, walk); @@ -49,7 +49,7 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, u8 *wdst = walk->dst.virt.addr; fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, - desc, fpu_enabled, nbytes); + desc, false, nbytes); for (i = 0; i < gctx->num_funcs; i++) { func_bytes = bsize * gctx->funcs[i].num_blocks; @@ -71,10 +71,10 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, } done: + glue_fpu_end(fpu_enabled); err = blkcipher_walk_done(desc, walk, nbytes); } - glue_fpu_end(fpu_enabled); return err; } @@ -194,7 +194,7 @@ int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx, struct scatterlist *src, unsigned int nbytes) { const unsigned int bsize = 128 / 8; - bool fpu_enabled = false; + bool fpu_enabled; struct blkcipher_walk walk; int err; @@ -203,12 +203,12 @@ int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx, while ((nbytes = walk.nbytes)) { fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, - desc, fpu_enabled, nbytes); + desc, false, nbytes); nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk); + glue_fpu_end(fpu_enabled); err = blkcipher_walk_done(desc, &walk, nbytes); } - glue_fpu_end(fpu_enabled); return err; } EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit); @@ -278,7 +278,7 @@ int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx, struct scatterlist *src, unsigned int nbytes) { const unsigned int bsize = 128 / 8; - bool fpu_enabled = false; + bool fpu_enabled; struct blkcipher_walk walk; int err; @@ -287,13 +287,12 @@ int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx, while ((nbytes = walk.nbytes) >= bsize) { fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, - desc, fpu_enabled, nbytes); + desc, false, nbytes); nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk); + glue_fpu_end(fpu_enabled); err = blkcipher_walk_done(desc, &walk, nbytes); } - glue_fpu_end(fpu_enabled); - if (walk.nbytes) { glue_ctr_crypt_final_128bit( gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk); @@ -348,7 +347,7 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx, void *tweak_ctx, void *crypt_ctx) { const unsigned int bsize = 128 / 8; - bool fpu_enabled = false; + bool fpu_enabled; struct blkcipher_walk walk; int err; @@ -361,21 +360,21 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx, /* set minimum length to bsize, for tweak_fn */ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, - desc, fpu_enabled, + desc, false, nbytes < bsize ? bsize : nbytes); - /* calculate first value of T */ tweak_fn(tweak_ctx, walk.iv, walk.iv); + glue_fpu_end(fpu_enabled); while (nbytes) { + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, + desc, false, nbytes); nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk); + glue_fpu_end(fpu_enabled); err = blkcipher_walk_done(desc, &walk, nbytes); nbytes = walk.nbytes; } - - glue_fpu_end(fpu_enabled); - return err; } EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit); -- cgit v0.10.2 From 51c37666addbc8e49703d45c7f078a29d3832e00 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 14 Nov 2011 23:06:09 +0100 Subject: dm: Make rt aware Use the BUG_ON_NORT variant for the irq_disabled() checks. RT has interrupts legitimately enabled here as we cant deadlock against the irq thread due to the "sleeping spinlocks" conversion. Reported-by: Luis Claudio R. Goncalves Cc: stable-rt@vger.kernel.org Signed-off-by: Thomas Gleixner diff --git a/drivers/md/dm.c b/drivers/md/dm.c index a562d5a..c147f06 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1813,14 +1813,14 @@ static void dm_request_fn(struct request_queue *q) if (map_request(ti, clone, md)) goto requeued; - BUG_ON(!irqs_disabled()); + BUG_ON_NONRT(!irqs_disabled()); spin_lock(q->queue_lock); } goto out; requeued: - BUG_ON(!irqs_disabled()); + BUG_ON_NONRT(!irqs_disabled()); spin_lock(q->queue_lock); delay_and_out: -- cgit v0.10.2 From a8a70bda781207074ba4f7d9d1866e9d61943cba Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 13 Feb 2013 09:26:05 -0500 Subject: acpi/rt: Convert acpi_gbl_hardware lock back to a raw_spinlock_t We hit the following bug with 3.6-rt: [ 5.898990] BUG: scheduling while atomic: swapper/3/0/0x00000002 [ 5.898991] no locks held by swapper/3/0. [ 5.898993] Modules linked in: [ 5.898996] Pid: 0, comm: swapper/3 Not tainted 3.6.11-rt28.19.el6rt.x86_64.debug #1 [ 5.898997] Call Trace: [ 5.899011] [] __schedule_bug+0x67/0x90 [ 5.899028] [] __schedule+0x793/0x7a0 [ 5.899032] [] ? debug_rt_mutex_print_deadlock+0x50/0x200 [ 5.899034] [] schedule+0x29/0x70 [ 5.899036] BUG: scheduling while atomic: swapper/7/0/0x00000002 [ 5.899037] no locks held by swapper/7/0. [ 5.899039] [] rt_spin_lock_slowlock+0xe5/0x2f0 [ 5.899040] Modules linked in: [ 5.899041] [ 5.899045] [] ? _raw_spin_unlock_irqrestore+0x38/0x90 [ 5.899046] Pid: 0, comm: swapper/7 Not tainted 3.6.11-rt28.19.el6rt.x86_64.debug #1 [ 5.899047] Call Trace: [ 5.899049] [] rt_spin_lock+0x16/0x40 [ 5.899052] [] __schedule_bug+0x67/0x90 [ 5.899054] [] ? notifier_call_chain+0x80/0x80 [ 5.899056] [] __schedule+0x793/0x7a0 [ 5.899059] [] acpi_os_acquire_lock+0x1f/0x23 [ 5.899062] [] ? debug_rt_mutex_print_deadlock+0x50/0x200 [ 5.899068] [] acpi_write_bit_register+0x33/0xb0 [ 5.899071] [] schedule+0x29/0x70 [ 5.899072] [] ? acpi_read_bit_register+0x33/0x51 [ 5.899074] [] rt_spin_lock_slowlock+0xe5/0x2f0 [ 5.899077] [] acpi_idle_enter_bm+0x8a/0x28e [ 5.899079] [] ? _raw_spin_unlock_irqrestore+0x38/0x90 [ 5.899081] [] ? this_cpu_load+0x1a/0x30 [ 5.899083] [] rt_spin_lock+0x16/0x40 [ 5.899087] [] cpuidle_enter+0x19/0x20 [ 5.899088] [] ? notifier_call_chain+0x80/0x80 [ 5.899090] [] cpuidle_enter_state+0x17/0x50 [ 5.899092] [] acpi_os_acquire_lock+0x1f/0x23 [ 5.899094] [] cpuidle899101] [] ? As the acpi code disables interrupts in acpi_idle_enter_bm, and calls code that grabs the acpi lock, it causes issues as the lock is currently in RT a sleeping lock. The lock was converted from a raw to a sleeping lock due to some previous issues, and tests that showed it didn't seem to matter. Unfortunately, it did matter for one of our boxes. This patch converts the lock back to a raw lock. I've run this code on a few of my own machines, one being my laptop that uses the acpi quite extensively. I've been able to suspend and resume without issues. [ tglx: Made the change exclusive for acpi_gbl_hardware_lock ] Signed-off-by: Steven Rostedt Cc: John Kacur Cc: Clark Williams Link: http://lkml.kernel.org/r/1360765565.23152.5.camel@gandalf.local.home Cc: stable-rt@vger.kernel.org Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 90e846f..411f68d 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -235,7 +235,7 @@ ACPI_EXTERN u8 acpi_gbl_global_lock_pending; * interrupt level */ ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */ -ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */ +ACPI_EXTERN acpi_raw_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */ ACPI_EXTERN acpi_spinlock acpi_gbl_reference_count_lock; /* Mutex for _OSI support */ diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index 8d2e866..221c567 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c @@ -269,14 +269,14 @@ acpi_status acpi_hw_clear_acpi_status(void) ACPI_BITMASK_ALL_FIXED_STATUS, ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address))); - lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); + raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); /* Clear the fixed events in PM1 A/B */ status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, ACPI_BITMASK_ALL_FIXED_STATUS); - acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); + raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); if (ACPI_FAILURE(status)) goto exit; diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c index 5ee7a81..4c4b0cc 100644 --- a/drivers/acpi/acpica/hwxface.c +++ b/drivers/acpi/acpica/hwxface.c @@ -365,7 +365,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value) return_ACPI_STATUS(AE_BAD_PARAMETER); } - lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); + raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); /* * At this point, we know that the parent register is one of the @@ -426,7 +426,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value) unlock_and_exit: - acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); + raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c index 08c3232..2f4ed8b 100644 --- a/drivers/acpi/acpica/utmutex.c +++ b/drivers/acpi/acpica/utmutex.c @@ -88,7 +88,7 @@ acpi_status acpi_ut_mutex_initialize(void) return_ACPI_STATUS (status); } - status = acpi_os_create_lock (&acpi_gbl_hardware_lock); + status = acpi_os_create_raw_lock (&acpi_gbl_hardware_lock); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } @@ -141,7 +141,7 @@ void acpi_ut_mutex_terminate(void) /* Delete the spinlocks */ acpi_os_delete_lock(acpi_gbl_gpe_lock); - acpi_os_delete_lock(acpi_gbl_hardware_lock); + acpi_os_delete_raw_lock(acpi_gbl_hardware_lock); acpi_os_delete_lock(acpi_gbl_reference_count_lock); /* Delete the reader/writer lock */ diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index 68534ef..0ef299d 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h @@ -72,6 +72,7 @@ #define acpi_cache_t struct kmem_cache #define acpi_spinlock spinlock_t * +#define acpi_raw_spinlock raw_spinlock_t * #define acpi_cpu_flags unsigned long #else /* !__KERNEL__ */ @@ -174,6 +175,19 @@ static inline void *acpi_os_acquire_object(acpi_cache_t * cache) lock ? AE_OK : AE_NO_MEMORY; \ }) +#define acpi_os_create_raw_lock(__handle) \ +({ \ + raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \ + \ + if (lock) { \ + *(__handle) = lock; \ + raw_spin_lock_init(*(__handle)); \ + } \ + lock ? AE_OK : AE_NO_MEMORY; \ +}) + +#define acpi_os_delete_raw_lock(__handle) kfree(__handle) + #endif /* __KERNEL__ */ #endif /* __ACLINUX_H__ */ -- cgit v0.10.2 From 744e56f3ee76bcebf0ab9498d0b6a5909358250e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 14 Dec 2011 01:03:49 +0100 Subject: cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT We can't deal with the cpumask allocations which happen in atomic context (see arch/x86/kernel/apic/io_apic.c) on RT right now. Signed-off-by: Thomas Gleixner diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 7a6b720..083ee96 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -820,7 +820,7 @@ config IOMMU_HELPER config MAXSMP bool "Enable Maximum number of SMP Processors and NUMA Nodes" depends on X86_64 && SMP && DEBUG_KERNEL - select CPUMASK_OFFSTACK + select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL ---help--- Enable maximum number of CPUS and NUMA Nodes for this architecture. If unsure, say N. diff --git a/lib/Kconfig b/lib/Kconfig index c742a2d..eee9e09 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -344,6 +344,7 @@ config CHECK_SIGNATURE config CPUMASK_OFFSTACK bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS + depends on !PREEMPT_RT_FULL help Use dynamic allocation for cpumask_var_t, instead of putting them on the stack. This is a bit more expensive, but avoids -- cgit v0.10.2 From 0c3c1a13909e924e0db663aa08daae807be9ec75 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 21 Aug 2012 20:38:50 +0200 Subject: random: Make it work on rt Delegate the random insertion to the forced threaded interrupt handler. Store the return IP of the hard interrupt handler in the irq descriptor and feed it into the random generator as a source of entropy. Signed-off-by: Thomas Gleixner Cc: stable-rt@vger.kernel.org diff --git a/drivers/char/random.c b/drivers/char/random.c index 02172d2..ddcbcad 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -739,18 +739,16 @@ EXPORT_SYMBOL_GPL(add_input_randomness); static DEFINE_PER_CPU(struct fast_pool, irq_randomness); -void add_interrupt_randomness(int irq, int irq_flags) +void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) { struct entropy_store *r; struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness); - struct pt_regs *regs = get_irq_regs(); unsigned long now = jiffies; __u32 input[4], cycles = random_get_entropy(); input[0] = cycles ^ jiffies; input[1] = irq; - if (regs) { - __u64 ip = instruction_pointer(regs); + if (ip) { input[2] = ip; input[3] = ip >> 32; } @@ -764,7 +762,11 @@ void add_interrupt_randomness(int irq, int irq_flags) fast_pool->last = now; r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool; +#ifndef CONFIG_PREEMPT_RT_FULL __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL); +#else + mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL); +#endif /* * If we don't have a valid cycle counter, and we see * back-to-back timer interrupts, then skip giving credit for diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 56fb646..da992bc 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -52,6 +52,7 @@ struct irq_desc { unsigned int irq_count; /* For detecting broken IRQs */ unsigned long last_unhandled; /* Aging timer for unhandled count */ unsigned int irqs_unhandled; + u64 random_ip; raw_spinlock_t lock; struct cpumask *percpu_enabled; #ifdef CONFIG_SMP diff --git a/include/linux/random.h b/include/linux/random.h index bf9085e..de4894a 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -12,7 +12,7 @@ extern void add_device_randomness(const void *, unsigned int); extern void add_input_randomness(unsigned int type, unsigned int code, unsigned int value); -extern void add_interrupt_randomness(int irq, int irq_flags); +extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip); extern void get_random_bytes(void *buf, int nbytes); extern void get_random_bytes_arch(void *buf, int nbytes); diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 131ca17..7f50c55 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -132,6 +132,8 @@ static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action) irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) { + struct pt_regs *regs = get_irq_regs(); + u64 ip = regs ? instruction_pointer(regs) : 0; irqreturn_t retval = IRQ_NONE; unsigned int flags = 0, irq = desc->irq_data.irq; @@ -172,7 +174,11 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) action = action->next; } while (action); - add_interrupt_randomness(irq, flags); +#ifndef CONFIG_PREEMPT_RT_FULL + add_interrupt_randomness(irq, flags, ip); +#else + desc->random_ip = ip; +#endif if (!noirqdebug) note_interrupt(irq, desc, retval); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index b1b37ea..252bf10 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -947,6 +947,12 @@ static int irq_thread(void *data) if (!noirqdebug) note_interrupt(action->irq, desc, action_ret); +#ifdef CONFIG_PREEMPT_RT_FULL + migrate_disable(); + add_interrupt_randomness(action->irq, 0, + desc->random_ip ^ (unsigned long) action); + migrate_enable(); +#endif wake_threads_waitq(desc); } -- cgit v0.10.2 From 0ce2cbb8fc86b080f134f3d976cd87e08ed7ebea Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 22 Feb 2012 12:03:30 +0100 Subject: seqlock: Prevent rt starvation If a low prio writer gets preempted while holding the seqlock write locked, a high prio reader spins forever on RT. To prevent this let the reader grab the spinlock, so it blocks and eventually boosts the writer. This way the writer can proceed and endless spinning is prevented. For seqcount writers we disable preemption over the update code path. Thaanks to Al Viro for distangling some VFS code to make that possible. Signed-off-by: Thomas Gleixner Cc: stable-rt@vger.kernel.org diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 21a2093..142c0f7 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -152,18 +152,30 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) * Sequence counter only version assumes that callers are using their * own mutexing. */ -static inline void write_seqcount_begin(seqcount_t *s) +static inline void __write_seqcount_begin(seqcount_t *s) { s->sequence++; smp_wmb(); } -static inline void write_seqcount_end(seqcount_t *s) +static inline void write_seqcount_begin(seqcount_t *s) +{ + preempt_disable_rt(); + __write_seqcount_begin(s); +} + +static inline void __write_seqcount_end(seqcount_t *s) { smp_wmb(); s->sequence++; } +static inline void write_seqcount_end(seqcount_t *s) +{ + __write_seqcount_end(s); + preempt_enable_rt(); +} + /** * write_seqcount_barrier - invalidate in-progress read-side seq operations * @s: pointer to seqcount_t @@ -204,10 +216,33 @@ typedef struct { /* * Read side functions for starting and finalizing a read side section. */ +#ifndef CONFIG_PREEMPT_RT_FULL static inline unsigned read_seqbegin(const seqlock_t *sl) { return read_seqcount_begin(&sl->seqcount); } +#else +/* + * Starvation safe read side for RT + */ +static inline unsigned read_seqbegin(seqlock_t *sl) +{ + unsigned ret; + +repeat: + ret = ACCESS_ONCE(sl->seqcount.sequence); + if (unlikely(ret & 1)) { + /* + * Take the lock and let the writer proceed (i.e. evtl + * boost it), otherwise we could loop here forever. + */ + spin_lock(&sl->lock); + spin_unlock(&sl->lock); + goto repeat; + } + return ret; +} +#endif static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) { @@ -222,36 +257,36 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) static inline void write_seqlock(seqlock_t *sl) { spin_lock(&sl->lock); - write_seqcount_begin(&sl->seqcount); + __write_seqcount_begin(&sl->seqcount); } static inline void write_sequnlock(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + __write_seqcount_end(&sl->seqcount); spin_unlock(&sl->lock); } static inline void write_seqlock_bh(seqlock_t *sl) { spin_lock_bh(&sl->lock); - write_seqcount_begin(&sl->seqcount); + __write_seqcount_begin(&sl->seqcount); } static inline void write_sequnlock_bh(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + __write_seqcount_end(&sl->seqcount); spin_unlock_bh(&sl->lock); } static inline void write_seqlock_irq(seqlock_t *sl) { spin_lock_irq(&sl->lock); - write_seqcount_begin(&sl->seqcount); + __write_seqcount_begin(&sl->seqcount); } static inline void write_sequnlock_irq(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + __write_seqcount_end(&sl->seqcount); spin_unlock_irq(&sl->lock); } @@ -260,7 +295,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) unsigned long flags; spin_lock_irqsave(&sl->lock, flags); - write_seqcount_begin(&sl->seqcount); + __write_seqcount_begin(&sl->seqcount); return flags; } @@ -270,7 +305,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) static inline void write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) { - write_seqcount_end(&sl->seqcount); + __write_seqcount_end(&sl->seqcount); spin_unlock_irqrestore(&sl->lock, flags); } diff --git a/include/net/dst.h b/include/net/dst.h index 3c4c944..a158a07 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -395,7 +395,7 @@ static inline void dst_confirm(struct dst_entry *dst) static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, struct sk_buff *skb) { - const struct hh_cache *hh; + struct hh_cache *hh; if (dst->pending_confirm) { unsigned long now = jiffies; diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 536501a..a9d84bf 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -333,7 +333,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) } #endif -static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) +static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) { unsigned int seq; int hh_len; @@ -388,7 +388,7 @@ struct neighbour_cb { #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) -static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, +static inline void neigh_ha_snapshot(char *dst, struct neighbour *n, const struct net_device *dev) { unsigned int seq; -- cgit v0.10.2 From e6e541ec2b28242ffd01f8641e18474b386e4473 Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Sun, 1 Dec 2013 23:03:52 -0500 Subject: seqlock: consolidate spin_lock/unlock waiting with spin_unlock_wait since c2f21ce ("locking: Implement new raw_spinlock") include/linux/spinlock.h includes spin_unlock_wait() to wait for a concurren holder of a lock. this patch just moves over to that API. spin_unlock_wait covers both raw_spinlock_t and spinlock_t so it should be safe here as well. the added rt-variant of read_seqbegin in include/linux/seqlock.h that is being modified, was introduced by patch: seqlock-prevent-rt-starvation.patch behavior should be unchanged. Signed-off-by: Nicholas Mc Guire Signed-off-by: Sebastian Andrzej Siewior diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 142c0f7..019a936 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -236,8 +236,7 @@ repeat: * Take the lock and let the writer proceed (i.e. evtl * boost it), otherwise we could loop here forever. */ - spin_lock(&sl->lock); - spin_unlock(&sl->lock); + spin_unlock_wait(&sl->lock); goto repeat; } return ret; -- cgit v0.10.2 From 1b001aa7d16589fd95b457c6f7b91de5ad98b0c3 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 2 Mar 2012 10:36:57 -0500 Subject: cpu: Make hotplug.lock a "sleeping" spinlock on RT Tasks can block on hotplug.lock in pin_current_cpu(), but their state might be != RUNNING. So the mutex wakeup will set the state unconditionally to RUNNING. That might cause spurious unexpected wakeups. We could provide a state preserving mutex_lock() function, but this is semantically backwards. So instead we convert the hotplug.lock() to a spinlock for RT, which has the state preserving semantics already. Signed-off-by: Steven Rostedt Cc: Carsten Emde Cc: John Kacur Cc: Peter Zijlstra Cc: Clark Williams Cc: stable-rt@vger.kernel.org Link: http://lkml.kernel.org/r/1330702617.25686.265.camel@gandalf.stny.rr.com Signed-off-by: Thomas Gleixner diff --git a/kernel/cpu.c b/kernel/cpu.c index 7e79306..a14c07d 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -51,7 +51,12 @@ static int cpu_hotplug_disabled; static struct { struct task_struct *active_writer; +#ifdef CONFIG_PREEMPT_RT_FULL + /* Makes the lock keep the task's state */ + spinlock_t lock; +#else struct mutex lock; /* Synchronizes accesses to refcount, */ +#endif /* * Also blocks the new readers during * an ongoing cpu hotplug operation. @@ -59,10 +64,22 @@ static struct { int refcount; } cpu_hotplug = { .active_writer = NULL, +#ifdef CONFIG_PREEMPT_RT_FULL + .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock), +#else .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), +#endif .refcount = 0, }; +#ifdef CONFIG_PREEMPT_RT_FULL +# define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock) +# define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock) +#else +# define hotplug_lock() mutex_lock(&cpu_hotplug.lock) +# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock) +#endif + struct hotplug_pcp { struct task_struct *unplug; int refcount; @@ -92,8 +109,8 @@ retry: return; } preempt_enable(); - mutex_lock(&cpu_hotplug.lock); - mutex_unlock(&cpu_hotplug.lock); + hotplug_lock(); + hotplug_unlock(); preempt_disable(); goto retry; } @@ -165,9 +182,9 @@ void get_online_cpus(void) might_sleep(); if (cpu_hotplug.active_writer == current) return; - mutex_lock(&cpu_hotplug.lock); + hotplug_lock(); cpu_hotplug.refcount++; - mutex_unlock(&cpu_hotplug.lock); + hotplug_unlock(); } EXPORT_SYMBOL_GPL(get_online_cpus); @@ -176,14 +193,14 @@ void put_online_cpus(void) { if (cpu_hotplug.active_writer == current) return; - mutex_lock(&cpu_hotplug.lock); + hotplug_lock(); if (WARN_ON(!cpu_hotplug.refcount)) cpu_hotplug.refcount++; /* try to fix things up */ if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) wake_up_process(cpu_hotplug.active_writer); - mutex_unlock(&cpu_hotplug.lock); + hotplug_unlock(); } EXPORT_SYMBOL_GPL(put_online_cpus); @@ -215,11 +232,11 @@ void cpu_hotplug_begin(void) cpu_hotplug.active_writer = current; for (;;) { - mutex_lock(&cpu_hotplug.lock); + hotplug_lock(); if (likely(!cpu_hotplug.refcount)) break; __set_current_state(TASK_UNINTERRUPTIBLE); - mutex_unlock(&cpu_hotplug.lock); + hotplug_unlock(); schedule(); } } @@ -227,7 +244,7 @@ void cpu_hotplug_begin(void) void cpu_hotplug_done(void) { cpu_hotplug.active_writer = NULL; - mutex_unlock(&cpu_hotplug.lock); + hotplug_unlock(); } /* -- cgit v0.10.2 From aaadc65772ba8636503396a2ee1f90b7072114db Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 16 Jul 2012 08:07:43 +0000 Subject: cpu/rt: Rework cpu down for PREEMPT_RT Bringing a CPU down is a pain with the PREEMPT_RT kernel because tasks can be preempted in many more places than in non-RT. In order to handle per_cpu variables, tasks may be pinned to a CPU for a while, and even sleep. But these tasks need to be off the CPU if that CPU is going down. Several synchronization methods have been tried, but when stressed they failed. This is a new approach. A sync_tsk thread is still created and tasks may still block on a lock when the CPU is going down, but how that works is a bit different. When cpu_down() starts, it will create the sync_tsk and wait on it to inform that current tasks that are pinned on the CPU are no longer pinned. But new tasks that are about to be pinned will still be allowed to do so at this time. Then the notifiers are called. Several notifiers will bring down tasks that will enter these locations. Some of these tasks will take locks of other tasks that are on the CPU. If we don't let those other tasks continue, but make them block until CPU down is done, the tasks that the notifiers are waiting on will never complete as they are waiting for the locks held by the tasks that are blocked. Thus we still let the task pin the CPU until the notifiers are done. After the notifiers run, we then make new tasks entering the pinned CPU sections grab a mutex and wait. This mutex is now a per CPU mutex in the hotplug_pcp descriptor. To help things along, a new function in the scheduler code is created called migrate_me(). This function will try to migrate the current task off the CPU this is going down if possible. When the sync_tsk is created, all tasks will then try to migrate off the CPU going down. There are several cases that this wont work, but it helps in most cases. After the notifiers are called and if a task can't migrate off but enters the pin CPU sections, it will be forced to wait on the hotplug_pcp mutex until the CPU down is complete. Then the scheduler will force the migration anyway. Also, I found that THREAD_BOUND need to also be accounted for in the pinned CPU, and the migrate_disable no longer treats them special. This helps fix issues with ksoftirqd and workqueue that unbind on CPU down. Signed-off-by: Steven Rostedt Signed-off-by: Thomas Gleixner diff --git a/include/linux/sched.h b/include/linux/sched.h index 49afe5b..723aaaf 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1808,6 +1808,10 @@ extern void do_set_cpus_allowed(struct task_struct *p, extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); +int migrate_me(void); +void tell_sched_cpu_down_begin(int cpu); +void tell_sched_cpu_down_done(int cpu); + #else static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) @@ -1820,6 +1824,9 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, return -EINVAL; return 0; } +static inline int migrate_me(void) { return 0; } +static inline void tell_sched_cpu_down_begin(int cpu) { } +static inline void tell_sched_cpu_down_done(int cpu) { } #endif #ifdef CONFIG_NO_HZ_COMMON diff --git a/kernel/cpu.c b/kernel/cpu.c index a14c07d..0a004131 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -51,12 +51,7 @@ static int cpu_hotplug_disabled; static struct { struct task_struct *active_writer; -#ifdef CONFIG_PREEMPT_RT_FULL - /* Makes the lock keep the task's state */ - spinlock_t lock; -#else struct mutex lock; /* Synchronizes accesses to refcount, */ -#endif /* * Also blocks the new readers during * an ongoing cpu hotplug operation. @@ -64,28 +59,46 @@ static struct { int refcount; } cpu_hotplug = { .active_writer = NULL, -#ifdef CONFIG_PREEMPT_RT_FULL - .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock), -#else .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), -#endif .refcount = 0, }; -#ifdef CONFIG_PREEMPT_RT_FULL -# define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock) -# define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock) -#else -# define hotplug_lock() mutex_lock(&cpu_hotplug.lock) -# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock) -#endif - +/** + * hotplug_pcp - per cpu hotplug descriptor + * @unplug: set when pin_current_cpu() needs to sync tasks + * @sync_tsk: the task that waits for tasks to finish pinned sections + * @refcount: counter of tasks in pinned sections + * @grab_lock: set when the tasks entering pinned sections should wait + * @synced: notifier for @sync_tsk to tell cpu_down it's finished + * @mutex: the mutex to make tasks wait (used when @grab_lock is true) + * @mutex_init: zero if the mutex hasn't been initialized yet. + * + * Although @unplug and @sync_tsk may point to the same task, the @unplug + * is used as a flag and still exists after @sync_tsk has exited and + * @sync_tsk set to NULL. + */ struct hotplug_pcp { struct task_struct *unplug; + struct task_struct *sync_tsk; int refcount; + int grab_lock; struct completion synced; +#ifdef CONFIG_PREEMPT_RT_FULL + spinlock_t lock; +#else + struct mutex mutex; +#endif + int mutex_init; }; +#ifdef CONFIG_PREEMPT_RT_FULL +# define hotplug_lock(hp) rt_spin_lock(&(hp)->lock) +# define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock) +#else +# define hotplug_lock(hp) mutex_lock(&(hp)->mutex) +# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex) +#endif + static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp); /** @@ -99,18 +112,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp); void pin_current_cpu(void) { struct hotplug_pcp *hp; + int force = 0; retry: hp = &__get_cpu_var(hotplug_pcp); - if (!hp->unplug || hp->refcount || preempt_count() > 1 || + if (!hp->unplug || hp->refcount || force || preempt_count() > 1 || hp->unplug == current) { hp->refcount++; return; } - preempt_enable(); - hotplug_lock(); - hotplug_unlock(); + if (hp->grab_lock) { + preempt_enable(); + hotplug_lock(hp); + hotplug_unlock(hp); + } else { + preempt_enable(); + /* + * Try to push this task off of this CPU. + */ + if (!migrate_me()) { + preempt_disable(); + hp = &__get_cpu_var(hotplug_pcp); + if (!hp->grab_lock) { + /* + * Just let it continue it's already pinned + * or about to sleep. + */ + force = 1; + goto retry; + } + preempt_enable(); + } + } preempt_disable(); goto retry; } @@ -131,26 +165,84 @@ void unpin_current_cpu(void) wake_up_process(hp->unplug); } -/* - * FIXME: Is this really correct under all circumstances ? - */ +static void wait_for_pinned_cpus(struct hotplug_pcp *hp) +{ + set_current_state(TASK_UNINTERRUPTIBLE); + while (hp->refcount) { + schedule_preempt_disabled(); + set_current_state(TASK_UNINTERRUPTIBLE); + } +} + static int sync_unplug_thread(void *data) { struct hotplug_pcp *hp = data; preempt_disable(); hp->unplug = current; + wait_for_pinned_cpus(hp); + + /* + * This thread will synchronize the cpu_down() with threads + * that have pinned the CPU. When the pinned CPU count reaches + * zero, we inform the cpu_down code to continue to the next step. + */ set_current_state(TASK_UNINTERRUPTIBLE); - while (hp->refcount) { - schedule_preempt_disabled(); + preempt_enable(); + complete(&hp->synced); + + /* + * If all succeeds, the next step will need tasks to wait till + * the CPU is offline before continuing. To do this, the grab_lock + * is set and tasks going into pin_current_cpu() will block on the + * mutex. But we still need to wait for those that are already in + * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop() + * will kick this thread out. + */ + while (!hp->grab_lock && !kthread_should_stop()) { + schedule(); + set_current_state(TASK_UNINTERRUPTIBLE); + } + + /* Make sure grab_lock is seen before we see a stale completion */ + smp_mb(); + + /* + * Now just before cpu_down() enters stop machine, we need to make + * sure all tasks that are in pinned CPU sections are out, and new + * tasks will now grab the lock, keeping them from entering pinned + * CPU sections. + */ + if (!kthread_should_stop()) { + preempt_disable(); + wait_for_pinned_cpus(hp); + preempt_enable(); + complete(&hp->synced); + } + + set_current_state(TASK_UNINTERRUPTIBLE); + while (!kthread_should_stop()) { + schedule(); set_current_state(TASK_UNINTERRUPTIBLE); } set_current_state(TASK_RUNNING); - preempt_enable(); - complete(&hp->synced); + + /* + * Force this thread off this CPU as it's going down and + * we don't want any more work on this CPU. + */ + current->flags &= ~PF_NO_SETAFFINITY; + do_set_cpus_allowed(current, cpu_present_mask); + migrate_me(); return 0; } +static void __cpu_unplug_sync(struct hotplug_pcp *hp) +{ + wake_up_process(hp->sync_tsk); + wait_for_completion(&hp->synced); +} + /* * Start the sync_unplug_thread on the target cpu and wait for it to * complete. @@ -158,23 +250,83 @@ static int sync_unplug_thread(void *data) static int cpu_unplug_begin(unsigned int cpu) { struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); - struct task_struct *tsk; + int err; + + /* Protected by cpu_hotplug.lock */ + if (!hp->mutex_init) { +#ifdef CONFIG_PREEMPT_RT_FULL + spin_lock_init(&hp->lock); +#else + mutex_init(&hp->mutex); +#endif + hp->mutex_init = 1; + } + + /* Inform the scheduler to migrate tasks off this CPU */ + tell_sched_cpu_down_begin(cpu); init_completion(&hp->synced); - tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); - if (IS_ERR(tsk)) - return (PTR_ERR(tsk)); - kthread_bind(tsk, cpu); - wake_up_process(tsk); - wait_for_completion(&hp->synced); + + hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); + if (IS_ERR(hp->sync_tsk)) { + err = PTR_ERR(hp->sync_tsk); + hp->sync_tsk = NULL; + return err; + } + kthread_bind(hp->sync_tsk, cpu); + + /* + * Wait for tasks to get out of the pinned sections, + * it's still OK if new tasks enter. Some CPU notifiers will + * wait for tasks that are going to enter these sections and + * we must not have them block. + */ + __cpu_unplug_sync(hp); + return 0; } +static void cpu_unplug_sync(unsigned int cpu) +{ + struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); + + init_completion(&hp->synced); + /* The completion needs to be initialzied before setting grab_lock */ + smp_wmb(); + + /* Grab the mutex before setting grab_lock */ + hotplug_lock(hp); + hp->grab_lock = 1; + + /* + * The CPU notifiers have been completed. + * Wait for tasks to get out of pinned CPU sections and have new + * tasks block until the CPU is completely down. + */ + __cpu_unplug_sync(hp); + + /* All done with the sync thread */ + kthread_stop(hp->sync_tsk); + hp->sync_tsk = NULL; +} + static void cpu_unplug_done(unsigned int cpu) { struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); hp->unplug = NULL; + /* Let all tasks know cpu unplug is finished before cleaning up */ + smp_wmb(); + + if (hp->sync_tsk) + kthread_stop(hp->sync_tsk); + + if (hp->grab_lock) { + hotplug_unlock(hp); + /* protected by cpu_hotplug.lock */ + hp->grab_lock = 0; + } + tell_sched_cpu_down_done(cpu); } void get_online_cpus(void) @@ -182,9 +334,9 @@ void get_online_cpus(void) might_sleep(); if (cpu_hotplug.active_writer == current) return; - hotplug_lock(); + mutex_lock(&cpu_hotplug.lock); cpu_hotplug.refcount++; - hotplug_unlock(); + mutex_unlock(&cpu_hotplug.lock); } EXPORT_SYMBOL_GPL(get_online_cpus); @@ -194,14 +346,13 @@ void put_online_cpus(void) if (cpu_hotplug.active_writer == current) return; - hotplug_lock(); + mutex_lock(&cpu_hotplug.lock); if (WARN_ON(!cpu_hotplug.refcount)) cpu_hotplug.refcount++; /* try to fix things up */ if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) wake_up_process(cpu_hotplug.active_writer); - hotplug_unlock(); - + mutex_unlock(&cpu_hotplug.lock); } EXPORT_SYMBOL_GPL(put_online_cpus); @@ -232,11 +383,11 @@ void cpu_hotplug_begin(void) cpu_hotplug.active_writer = current; for (;;) { - hotplug_lock(); + mutex_lock(&cpu_hotplug.lock); if (likely(!cpu_hotplug.refcount)) break; __set_current_state(TASK_UNINTERRUPTIBLE); - hotplug_unlock(); + mutex_unlock(&cpu_hotplug.lock); schedule(); } } @@ -244,7 +395,7 @@ void cpu_hotplug_begin(void) void cpu_hotplug_done(void) { cpu_hotplug.active_writer = NULL; - hotplug_unlock(); + mutex_unlock(&cpu_hotplug.lock); } /* @@ -442,6 +593,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) } smpboot_park_threads(cpu); + /* Notifiers are done. Don't let any more tasks pin this CPU. */ + cpu_unplug_sync(cpu); + err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); if (err) { /* CPU didn't die: tell everyone. Can't complain. */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fe06ca2..387a4c6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2371,7 +2371,7 @@ void migrate_disable(void) { struct task_struct *p = current; - if (in_atomic() || p->flags & PF_NO_SETAFFINITY) { + if (in_atomic()) { #ifdef CONFIG_SCHED_DEBUG p->migrate_disable_atomic++; #endif @@ -2401,7 +2401,7 @@ void migrate_enable(void) unsigned long flags; struct rq *rq; - if (in_atomic() || p->flags & PF_NO_SETAFFINITY) { + if (in_atomic()) { #ifdef CONFIG_SCHED_DEBUG p->migrate_disable_atomic--; #endif @@ -4440,6 +4440,84 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) cpumask_copy(&p->cpus_allowed, new_mask); } +static DEFINE_PER_CPU(struct cpumask, sched_cpumasks); +static DEFINE_MUTEX(sched_down_mutex); +static cpumask_t sched_down_cpumask; + +void tell_sched_cpu_down_begin(int cpu) +{ + mutex_lock(&sched_down_mutex); + cpumask_set_cpu(cpu, &sched_down_cpumask); + mutex_unlock(&sched_down_mutex); +} + +void tell_sched_cpu_down_done(int cpu) +{ + mutex_lock(&sched_down_mutex); + cpumask_clear_cpu(cpu, &sched_down_cpumask); + mutex_unlock(&sched_down_mutex); +} + +/** + * migrate_me - try to move the current task off this cpu + * + * Used by the pin_current_cpu() code to try to get tasks + * to move off the current CPU as it is going down. + * It will only move the task if the task isn't pinned to + * the CPU (with migrate_disable, affinity or NO_SETAFFINITY) + * and the task has to be in a RUNNING state. Otherwise the + * movement of the task will wake it up (change its state + * to running) when the task did not expect it. + * + * Returns 1 if it succeeded in moving the current task + * 0 otherwise. + */ +int migrate_me(void) +{ + struct task_struct *p = current; + struct migration_arg arg; + struct cpumask *cpumask; + struct cpumask *mask; + unsigned long flags; + unsigned int dest_cpu; + struct rq *rq; + + /* + * We can not migrate tasks bounded to a CPU or tasks not + * running. The movement of the task will wake it up. + */ + if (p->flags & PF_NO_SETAFFINITY || p->state) + return 0; + + mutex_lock(&sched_down_mutex); + rq = task_rq_lock(p, &flags); + + cpumask = &__get_cpu_var(sched_cpumasks); + mask = &p->cpus_allowed; + + cpumask_andnot(cpumask, mask, &sched_down_cpumask); + + if (!cpumask_weight(cpumask)) { + /* It's only on this CPU? */ + task_rq_unlock(rq, p, &flags); + mutex_unlock(&sched_down_mutex); + return 0; + } + + dest_cpu = cpumask_any_and(cpu_active_mask, cpumask); + + arg.task = p; + arg.dest_cpu = dest_cpu; + + task_rq_unlock(rq, p, &flags); + + stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); + tlb_migrate_finish(p->mm); + mutex_unlock(&sched_down_mutex); + + return 1; +} + /* * This is how migration works: * -- cgit v0.10.2 From 39f40c5e71529362336bbbcec60655bf678d049e Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 5 Dec 2013 09:16:52 -0500 Subject: cpu hotplug: Document why PREEMPT_RT uses a spinlock The patch: cpu: Make hotplug.lock a "sleeping" spinlock on RT Tasks can block on hotplug.lock in pin_current_cpu(), but their state might be != RUNNING. So the mutex wakeup will set the state unconditionally to RUNNING. That might cause spurious unexpected wakeups. We could provide a state preserving mutex_lock() function, but this is semantically backwards. So instead we convert the hotplug.lock() to a spinlock for RT, which has the state preserving semantics already. Fixed a bug where the hotplug lock on PREEMPT_RT can be called after a task set its state to TASK_UNINTERRUPTIBLE and before it called schedule. If the hotplug_lock used a mutex, and there was contention, the current task's state would be turned to TASK_RUNNABLE and the schedule call will not sleep. This caused unexpected results. Although the patch had a description of the change, the code had no comments about it. This causes confusion to those that review the code, and as PREEMPT_RT is held in a quilt queue and not git, it's not as easy to see why a change was made. Even if it was in git, the code should still have a comment for something as subtle as this. Document the rational for using a spinlock on PREEMPT_RT in the hotplug lock code. Reported-by: Nicholas Mc Guire Signed-off-by: Steven Rostedt Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/cpu.c b/kernel/cpu.c index 0a004131..f43c8f3 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -84,6 +84,14 @@ struct hotplug_pcp { int grab_lock; struct completion synced; #ifdef CONFIG_PREEMPT_RT_FULL + /* + * Note, on PREEMPT_RT, the hotplug lock must save the state of + * the task, otherwise the mutex will cause the task to fail + * to sleep when required. (Because it's called from migrate_disable()) + * + * The spinlock_t on PREEMPT_RT is a mutex that saves the task's + * state. + */ spinlock_t lock; #else struct mutex mutex; -- cgit v0.10.2 From 454ae47975023ee86def519596e8c29b5255c583 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 7 Jun 2013 22:37:06 +0200 Subject: kernel/cpu: fix cpu down problem if kthread's cpu is going down If kthread is pinned to CPUx and CPUx is going down then we get into trouble: - first the unplug thread is created - it will set itself to hp->unplug. As a result, every task that is going to take a lock, has to leave the CPU. - the CPU_DOWN_PREPARE notifier are started. The worker thread will start a new process for the "high priority worker". Now kthread would like to take a lock but since it can't leave the CPU it will never complete its task. We could fire the unplug thread after the notifier but then the cpu is no longer marked "online" and the unplug thread will run on CPU0 which was fixed before :) So instead the unplug thread is started and kept waiting until the notfier complete their work. Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/cpu.c b/kernel/cpu.c index f43c8f3..3dfdf5d 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -83,6 +83,7 @@ struct hotplug_pcp { int refcount; int grab_lock; struct completion synced; + struct completion unplug_wait; #ifdef CONFIG_PREEMPT_RT_FULL /* * Note, on PREEMPT_RT, the hotplug lock must save the state of @@ -186,6 +187,7 @@ static int sync_unplug_thread(void *data) { struct hotplug_pcp *hp = data; + wait_for_completion(&hp->unplug_wait); preempt_disable(); hp->unplug = current; wait_for_pinned_cpus(hp); @@ -251,6 +253,14 @@ static void __cpu_unplug_sync(struct hotplug_pcp *hp) wait_for_completion(&hp->synced); } +static void __cpu_unplug_wait(unsigned int cpu) +{ + struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); + + complete(&hp->unplug_wait); + wait_for_completion(&hp->synced); +} + /* * Start the sync_unplug_thread on the target cpu and wait for it to * complete. @@ -274,6 +284,7 @@ static int cpu_unplug_begin(unsigned int cpu) tell_sched_cpu_down_begin(cpu); init_completion(&hp->synced); + init_completion(&hp->unplug_wait); hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); if (IS_ERR(hp->sync_tsk)) { @@ -289,8 +300,7 @@ static int cpu_unplug_begin(unsigned int cpu) * wait for tasks that are going to enter these sections and * we must not have them block. */ - __cpu_unplug_sync(hp); - + wake_up_process(hp->sync_tsk); return 0; } @@ -599,6 +609,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) __func__, cpu); goto out_release; } + + __cpu_unplug_wait(cpu); smpboot_park_threads(cpu); /* Notifiers are done. Don't let any more tasks pin this CPU. */ -- cgit v0.10.2 From 0248964081706167e31bd9da7912f3f24462751f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 14 Jun 2013 17:16:35 +0200 Subject: kernel/hotplug: restore original cpu mask oncpu/down If a task which is allowed to run only on CPU X puts CPU Y down then it will be allowed on all CPUs but the on CPU Y after it comes back from kernel. This patch ensures that we don't lose the initial setting unless the CPU the task is running is going down. Cc: stable-rt@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/cpu.c b/kernel/cpu.c index 3dfdf5d..868bfd4 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -573,6 +573,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) .hcpu = hcpu, }; cpumask_var_t cpumask; + cpumask_var_t cpumask_org; if (num_online_cpus() == 1) return -EBUSY; @@ -583,6 +584,12 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) /* Move the downtaker off the unplug cpu */ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) return -ENOMEM; + if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) { + free_cpumask_var(cpumask); + return -ENOMEM; + } + + cpumask_copy(cpumask_org, tsk_cpus_allowed(current)); cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu)); set_cpus_allowed_ptr(current, cpumask); free_cpumask_var(cpumask); @@ -591,7 +598,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) if (mycpu == cpu) { printk(KERN_ERR "Yuck! Still on unplug CPU\n!"); migrate_enable(); - return -EBUSY; + err = -EBUSY; + goto restore_cpus; } cpu_hotplug_begin(); @@ -650,6 +658,9 @@ out_cancel: cpu_hotplug_done(); if (!err) cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); +restore_cpus: + set_cpus_allowed_ptr(current, cpumask_org); + free_cpumask_var(cpumask_org); return err; } -- cgit v0.10.2 From 6578bd13c3ad979c777eafef6e8efd4e18578e94 Mon Sep 17 00:00:00 2001 From: Tiejun Chen Date: Thu, 7 Nov 2013 10:06:07 +0800 Subject: cpu_down: move migrate_enable() back Commit 08c1ab68, "hotplug-use-migrate-disable.patch", intends to use migrate_enable()/migrate_disable() to replace that combination of preempt_enable() and preempt_disable(), but actually in !CONFIG_PREEMPT_RT_FULL case, migrate_enable()/migrate_disable() are still equal to preempt_enable()/preempt_disable(). So that followed cpu_hotplug_begin()/cpu_unplug_begin(cpu) would go schedule() to trigger schedule_debug() like this: _cpu_down() | + migrate_disable() = preempt_disable() | + cpu_hotplug_begin() or cpu_unplug_begin() | + schedule() | + __schedule() | + preempt_disable(); | + __schedule_bug() is true! So we should move migrate_enable() as the original scheme. Cc: stable-rt@vger.kernel.org Signed-off-by: Tiejun Chen diff --git a/kernel/cpu.c b/kernel/cpu.c index 868bfd4..ba7416b 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -601,6 +601,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) err = -EBUSY; goto restore_cpus; } + migrate_enable(); cpu_hotplug_begin(); err = cpu_unplug_begin(cpu); @@ -654,7 +655,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) out_release: cpu_unplug_done(cpu); out_cancel: - migrate_enable(); cpu_hotplug_done(); if (!err) cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); -- cgit v0.10.2 From 8957e88c423194bab29d773b06a519a2fd98dfd5 Mon Sep 17 00:00:00 2001 From: John Kacur Date: Fri, 27 Apr 2012 12:48:46 +0200 Subject: scsi: qla2xxx: Use local_irq_save_nort() in qla2x00_poll RT triggers the following: [ 11.307652] [] __might_sleep+0xe7/0x110 [ 11.307663] [] rt_spin_lock+0x24/0x60 [ 11.307670] [] ? rt_spin_lock_slowunlock+0x78/0x90 [ 11.307703] [] qla24xx_intr_handler+0x63/0x2d0 [qla2xxx] [ 11.307736] [] qla2x00_poll+0x67/0x90 [qla2xxx] Function qla2x00_poll does local_irq_save() before calling qla24xx_intr_handler which has a spinlock. Since spinlocks are sleepable on rt, it is not allowed to call them with interrupts disabled. Therefore we use local_irq_save_nort() instead which saves flags without disabling interrupts. This fix needs to be applied to v3.0-rt, v3.2-rt and v3.4-rt Suggested-by: Thomas Gleixner Signed-off-by: John Kacur Cc: Steven Rostedt Cc: David Sommerseth Link: http://lkml.kernel.org/r/1335523726-10024-1-git-send-email-jkacur@redhat.com Cc: stable-rt@vger.kernel.org Signed-off-by: Thomas Gleixner diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 957088b..f8213e1 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -58,12 +58,12 @@ qla2x00_poll(struct rsp_que *rsp) { unsigned long flags; struct qla_hw_data *ha = rsp->hw; - local_irq_save(flags); + local_irq_save_nort(flags); if (IS_P3P_TYPE(ha)) qla82xx_poll(0, rsp); else ha->isp_ops->intr_handler(0, rsp); - local_irq_restore(flags); + local_irq_restore_nort(flags); } static inline uint8_t * -- cgit v0.10.2 From eafbf4707fed287c3f49a21f9b4fc91c10c409d7 Mon Sep 17 00:00:00 2001 From: Priyanka Jain Date: Thu, 17 May 2012 09:35:11 +0530 Subject: net,RT:REmove preemption disabling in netif_rx() 1)enqueue_to_backlog() (called from netif_rx) should be bind to a particluar CPU. This can be achieved by disabling migration. No need to disable preemption 2)Fixes crash "BUG: scheduling while atomic: ksoftirqd" in case of RT. If preemption is disabled, enqueue_to_backog() is called in atomic context. And if backlog exceeds its count, kfree_skb() is called. But in RT, kfree_skb() might gets scheduled out, so it expects non atomic context. 3)When CONFIG_PREEMPT_RT_FULL is not defined, migrate_enable(), migrate_disable() maps to preempt_enable() and preempt_disable(), so no change in functionality in case of non-RT. -Replace preempt_enable(), preempt_disable() with migrate_enable(), migrate_disable() respectively -Replace get_cpu(), put_cpu() with get_cpu_light(), put_cpu_light() respectively Signed-off-by: Priyanka Jain Acked-by: Rajan Srivastava Cc: Link: http://lkml.kernel.org/r/1337227511-2271-1-git-send-email-Priyanka.Jain@freescale.com Cc: stable-rt@vger.kernel.org Signed-off-by: Thomas Gleixner diff --git a/net/core/dev.c b/net/core/dev.c index aa5a5a1..d539056 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3241,7 +3241,7 @@ int netif_rx(struct sk_buff *skb) struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu; - preempt_disable(); + migrate_disable(); rcu_read_lock(); cpu = get_rps_cpu(skb->dev, skb, &rflow); @@ -3251,13 +3251,13 @@ int netif_rx(struct sk_buff *skb) ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); rcu_read_unlock(); - preempt_enable(); + migrate_enable(); } else #endif { unsigned int qtail; - ret = enqueue_to_backlog(skb, get_cpu(), &qtail); - put_cpu(); + ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail); + put_cpu_light(); } return ret; } -- cgit v0.10.2 From 5149e361d98c6d2c602f5c040e244af5b6903f8d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 26 Sep 2012 16:21:08 +0200 Subject: net: Another local_irq_disable/kmalloc headache Replace it by a local lock. Though that's pretty inefficient :( Signed-off-by: Thomas Gleixner diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 21571dc..da24627 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -62,6 +62,7 @@ #include #include #include +#include #include #include @@ -334,6 +335,7 @@ struct netdev_alloc_cache { unsigned int pagecnt_bias; }; static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); +static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock); static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) { @@ -342,7 +344,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) int order; unsigned long flags; - local_irq_save(flags); + local_lock_irqsave(netdev_alloc_lock, flags); nc = &__get_cpu_var(netdev_alloc_cache); if (unlikely(!nc->frag.page)) { refill: @@ -376,7 +378,7 @@ recycle: nc->frag.offset += fragsz; nc->pagecnt_bias--; end: - local_irq_restore(flags); + local_unlock_irqrestore(netdev_alloc_lock, flags); return data; } -- cgit v0.10.2 From fa1090b0d09f2f942da496cbbb0c65a3299803d9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 1 Oct 2012 17:12:35 +0100 Subject: net: Use get_cpu_light() in ip_send_unicast_reply() Signed-off-by: Thomas Gleixner diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 3982eab..e9fa68c 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1505,7 +1505,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, if (IS_ERR(rt)) return; - inet = &get_cpu_var(unicast_sock); + get_cpu_light(); + inet = &__get_cpu_var(unicast_sock); inet->tos = arg->tos; sk = &inet->sk; @@ -1529,7 +1530,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, ip_push_pending_frames(sk, &fl4); } - put_cpu_var(unicast_sock); + put_cpu_light(); ip_rt_put(rt); } -- cgit v0.10.2 From fac6c86ab0898c9ce6c50f23a8b207d616bb5eac Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Sun, 29 Dec 2013 18:11:54 +0100 Subject: net: ip_send_unicast_reply: add missing local serialization in response to the oops in ip_output.c:ip_send_unicast_reply under high network load with CONFIG_PREEMPT_RT_FULL=y, reported by Sami Pietikainen , this patch adds local serialization in ip_send_unicast_reply. from ip_output.c: /* * Generic function to send a packet as reply to another packet. * Used to send some TCP resets/acks so far. * * Use a fake percpu inet socket to avoid false sharing and contention. */ static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = { ... which was added in commit be9f4a44 in linux-stable. The git log, wich introduced the PER_CPU unicast_sock, states: commit be9f4a44e7d41cee50ddb5f038fc2391cbbb4046 Author: Eric Dumazet Date: Thu Jul 19 07:34:03 2012 +0000 ipv4: tcp: remove per net tcp_sock tcp_v4_send_reset() and tcp_v4_send_ack() use a single socket per network namespace. This leads to bad behavior on multiqueue NICS, because many cpus contend for the socket lock and once socket lock is acquired, extra false sharing on various socket fields slow down the operations. To better resist to attacks, we use a percpu socket. Each cpu can run without contention, using appropriate memory (local node) The per-cpu here thus is assuming exclusivity serializing per cpu - so the use of get_cpu_ligh introduced in net-use-cpu-light-in-ip-send-unicast-reply.patch, which droped the preempt_disable in favor of a migrate_disable is probably wrong as this only handles the referencial consistency but not the serialization. To evade a preempt_disable here a local lock would be needed. Therapie: * add local lock: * and re-introduce local serialization: Tested on x86 with high network load using the testcase from Sami Pietikainen while : ; do wget -O - ftp://LOCAL_SERVER/empty_file > /dev/null 2>&1; done Link: http://www.spinics.net/lists/linux-rt-users/msg11007.html Cc: stable-rt@vger.kernel.org Signed-off-by: Nicholas Mc Guire Signed-off-by: Sebastian Andrzej Siewior diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index e9fa68c..8bb3b4a 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -79,6 +79,7 @@ #include #include #include +#include int sysctl_ip_default_ttl __read_mostly = IPDEFTTL; EXPORT_SYMBOL(sysctl_ip_default_ttl); @@ -1468,6 +1469,9 @@ static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = { .uc_ttl = -1, }; +/* serialize concurrent calls on the same CPU to ip_send_unicast_reply */ +static DEFINE_LOCAL_IRQ_LOCK(unicast_lock); + void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, __be32 saddr, const struct ip_reply_arg *arg, unsigned int len) @@ -1505,8 +1509,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, if (IS_ERR(rt)) return; - get_cpu_light(); - inet = &__get_cpu_var(unicast_sock); + inet = &get_locked_var(unicast_lock, unicast_sock); inet->tos = arg->tos; sk = &inet->sk; @@ -1530,7 +1533,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, ip_push_pending_frames(sk, &fl4); } - put_cpu_light(); + put_locked_var(unicast_lock, unicast_sock); ip_rt_put(rt); } -- cgit v0.10.2 From d51ec0a796144e5e51fb32369aceb93350ac6bc8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 28 Oct 2012 11:18:08 +0100 Subject: net: netfilter: Serialize xt_write_recseq sections on RT The netfilter code relies only on the implicit semantics of local_bh_disable() for serializing wt_write_recseq sections. RT breaks that and needs explicit serialization here. Reported-by: Peter LaDow Signed-off-by: Thomas Gleixner Cc: stable-rt@vger.kernel.org diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index dd49566..7d083af 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -3,6 +3,7 @@ #include +#include #include /** @@ -284,6 +285,8 @@ extern void xt_free_table_info(struct xt_table_info *info); */ DECLARE_PER_CPU(seqcount_t, xt_recseq); +DECLARE_LOCAL_IRQ_LOCK(xt_write_lock); + /** * xt_write_recseq_begin - start of a write section * @@ -298,6 +301,9 @@ static inline unsigned int xt_write_recseq_begin(void) { unsigned int addend; + /* RT protection */ + local_lock(xt_write_lock); + /* * Low order bit of sequence is set if we already * called xt_write_recseq_begin(). @@ -328,6 +334,7 @@ static inline void xt_write_recseq_end(unsigned int addend) /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */ smp_wmb(); __this_cpu_add(xt_recseq.sequence, addend); + local_unlock(xt_write_lock); } /* diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 593b16e..6bd22aa 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -21,11 +21,17 @@ #include #include #include +#include #include #include #include "nf_internals.h" +#ifdef CONFIG_PREEMPT_RT_BASE +DEFINE_LOCAL_IRQ_LOCK(xt_write_lock); +EXPORT_PER_CPU_SYMBOL(xt_write_lock); +#endif + static DEFINE_MUTEX(afinfo_mutex); const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly; -- cgit v0.10.2 From bf76213a581900071fd980e8cad94c46145d6f67 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 28 Oct 2012 15:12:49 +0000 Subject: net: Use local_bh_disable in netif_rx_ni() This code triggers the new WARN in __raise_softirq_irqsoff() though it actually looks at the softirq pending bit and calls into the softirq code, but that fits not well with the context related softirq model of RT. It's correct on mainline though, but going through local_bh_disable/enable here is not going to hurt badly. Signed-off-by: Thomas Gleixner diff --git a/net/core/dev.c b/net/core/dev.c index d539056..ebe863c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3267,11 +3267,9 @@ int netif_rx_ni(struct sk_buff *skb) { int err; - migrate_disable(); + local_bh_disable(); err = netif_rx(skb); - if (local_softirq_pending()) - thread_do_softirq(); - migrate_enable(); + local_bh_enable(); return err; } -- cgit v0.10.2 From f20e74a895887426d75e460b7014f8c1f3874ebe Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Mar 2013 18:06:20 +0100 Subject: net: Add a mutex around devnet_rename_seq On RT write_seqcount_begin() disables preemption and device_rename() allocates memory with GFP_KERNEL and grabs later the sysfs_mutex mutex. Serialize with a mutex and add use the non preemption disabling __write_seqcount_begin(). To avoid writer starvation, let the reader grab the mutex and release it when it detects a writer in progress. This keeps the normal case (no reader on the fly) fast. [ tglx: Instead of replacing the seqcount by a mutex, add the mutex ] Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner diff --git a/net/core/dev.c b/net/core/dev.c index ebe863c..da03d72 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -175,6 +175,7 @@ static unsigned int napi_gen_id; static DEFINE_HASHTABLE(napi_hash, 8); static seqcount_t devnet_rename_seq; +static DEFINE_MUTEX(devnet_rename_mutex); static inline void dev_base_seq_inc(struct net *net) { @@ -826,7 +827,8 @@ retry: strcpy(name, dev->name); rcu_read_unlock(); if (read_seqcount_retry(&devnet_rename_seq, seq)) { - cond_resched(); + mutex_lock(&devnet_rename_mutex); + mutex_unlock(&devnet_rename_mutex); goto retry; } @@ -1092,30 +1094,28 @@ int dev_change_name(struct net_device *dev, const char *newname) if (dev->flags & IFF_UP) return -EBUSY; - write_seqcount_begin(&devnet_rename_seq); + mutex_lock(&devnet_rename_mutex); + __write_seqcount_begin(&devnet_rename_seq); - if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { - write_seqcount_end(&devnet_rename_seq); - return 0; - } + if (strncmp(newname, dev->name, IFNAMSIZ) == 0) + goto outunlock; memcpy(oldname, dev->name, IFNAMSIZ); err = dev_get_valid_name(net, dev, newname); - if (err < 0) { - write_seqcount_end(&devnet_rename_seq); - return err; - } + if (err < 0) + goto outunlock; rollback: ret = device_rename(&dev->dev, dev->name); if (ret) { memcpy(dev->name, oldname, IFNAMSIZ); - write_seqcount_end(&devnet_rename_seq); - return ret; + err = ret; + goto outunlock; } - write_seqcount_end(&devnet_rename_seq); + __write_seqcount_end(&devnet_rename_seq); + mutex_unlock(&devnet_rename_mutex); write_lock_bh(&dev_base_lock); hlist_del_rcu(&dev->name_hlist); @@ -1134,7 +1134,8 @@ rollback: /* err >= 0 after dev_alloc_name() or stores the first errno */ if (err >= 0) { err = ret; - write_seqcount_begin(&devnet_rename_seq); + mutex_lock(&devnet_rename_mutex); + __write_seqcount_begin(&devnet_rename_seq); memcpy(dev->name, oldname, IFNAMSIZ); goto rollback; } else { @@ -1144,6 +1145,11 @@ rollback: } return err; + +outunlock: + __write_seqcount_end(&devnet_rename_seq); + mutex_unlock(&devnet_rename_mutex); + return err; } /** -- cgit v0.10.2 From 87d753b036f826da29280798315f93f0a69c06fa Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 5 Oct 2012 09:03:24 +0100 Subject: crypto: Convert crypto notifier chain to SRCU The crypto notifier deadlocks on RT. Though this can be a real deadlock on mainline as well due to fifo fair rwsems. The involved parties here are: [ 82.172678] swapper/0 S 0000000000000001 0 1 0 0x00000000 [ 82.172682] ffff88042f18fcf0 0000000000000046 ffff88042f18fc80 ffffffff81491238 [ 82.172685] 0000000000011cc0 0000000000011cc0 ffff88042f18c040 ffff88042f18ffd8 [ 82.172688] 0000000000011cc0 0000000000011cc0 ffff88042f18ffd8 0000000000011cc0 [ 82.172689] Call Trace: [ 82.172697] [] ? _raw_spin_unlock_irqrestore+0x6c/0x7a [ 82.172701] [] schedule+0x64/0x66 [ 82.172704] [] schedule_timeout+0x27/0xd0 [ 82.172708] [] ? unpin_current_cpu+0x1a/0x6c [ 82.172713] [] ? migrate_enable+0x12f/0x141 [ 82.172716] [] wait_for_common+0xbb/0x11f [ 82.172719] [] ? try_to_wake_up+0x182/0x182 [ 82.172722] [] wait_for_completion_interruptible+0x1d/0x2e [ 82.172726] [] crypto_wait_for_test+0x49/0x6b [ 82.172728] [] crypto_register_alg+0x53/0x5a [ 82.172730] [] crypto_register_algs+0x33/0x72 [ 82.172734] [] ? aes_init+0x12/0x12 [ 82.172737] [] aesni_init+0x64/0x66 [ 82.172741] [] do_one_initcall+0x7f/0x13b [ 82.172744] [] kernel_init+0x199/0x22c [ 82.172747] [] ? loglevel+0x31/0x31 [ 82.172752] [] kernel_thread_helper+0x4/0x10 [ 82.172755] [] ? retint_restore_args+0x13/0x13 [ 82.172759] [] ? start_kernel+0x3ca/0x3ca [ 82.172761] [] ? gs_change+0x13/0x13 [ 82.174186] cryptomgr_test S 0000000000000001 0 41 2 0x00000000 [ 82.174189] ffff88042c971980 0000000000000046 ffffffff81d74830 0000000000000292 [ 82.174192] 0000000000011cc0 0000000000011cc0 ffff88042c96eb80 ffff88042c971fd8 [ 82.174195] 0000000000011cc0 0000000000011cc0 ffff88042c971fd8 0000000000011cc0 [ 82.174195] Call Trace: [ 82.174198] [] schedule+0x64/0x66 [ 82.174201] [] schedule_timeout+0x27/0xd0 [ 82.174204] [] ? unpin_current_cpu+0x1a/0x6c [ 82.174206] [] ? migrate_enable+0x12f/0x141 [ 82.174209] [] wait_for_common+0xbb/0x11f [ 82.174212] [] ? try_to_wake_up+0x182/0x182 [ 82.174215] [] wait_for_completion_interruptible+0x1d/0x2e [ 82.174218] [] cryptomgr_notify+0x280/0x385 [ 82.174221] [] notifier_call_chain+0x6b/0x98 [ 82.174224] [] ? rt_down_read+0x10/0x12 [ 82.174227] [] __blocking_notifier_call_chain+0x70/0x8d [ 82.174230] [] blocking_notifier_call_chain+0x14/0x16 [ 82.174234] [] crypto_probing_notify+0x24/0x50 [ 82.174236] [] crypto_alg_mod_lookup+0x3e/0x74 [ 82.174238] [] crypto_alloc_base+0x36/0x8f [ 82.174241] [] cryptd_alloc_ablkcipher+0x6e/0xb5 [ 82.174243] [] ? kzalloc.clone.5+0xe/0x10 [ 82.174246] [] ablk_init_common+0x1d/0x38 [ 82.174249] [] ablk_ecb_init+0x15/0x17 [ 82.174251] [] __crypto_alloc_tfm+0xc7/0x114 [ 82.174254] [] ? crypto_lookup_skcipher+0x1f/0xe4 [ 82.174256] [] crypto_alloc_ablkcipher+0x60/0xa5 [ 82.174258] [] alg_test_skcipher+0x24/0x9b [ 82.174261] [] ? finish_task_switch+0x3f/0xfa [ 82.174263] [] alg_test+0x16f/0x1d7 [ 82.174267] [] ? cryptomgr_probe+0xac/0xac [ 82.174269] [] cryptomgr_test+0x2c/0x47 [ 82.174272] [] kthread+0x7e/0x86 [ 82.174275] [] ? finish_task_switch+0xaf/0xfa [ 82.174278] [] kernel_thread_helper+0x4/0x10 [ 82.174281] [] ? retint_restore_args+0x13/0x13 [ 82.174284] [] ? __init_kthread_worker+0x8c/0x8c [ 82.174287] [] ? gs_change+0x13/0x13 [ 82.174329] cryptomgr_probe D 0000000000000002 0 47 2 0x00000000 [ 82.174332] ffff88042c991b70 0000000000000046 ffff88042c991bb0 0000000000000006 [ 82.174335] 0000000000011cc0 0000000000011cc0 ffff88042c98ed00 ffff88042c991fd8 [ 82.174338] 0000000000011cc0 0000000000011cc0 ffff88042c991fd8 0000000000011cc0 [ 82.174338] Call Trace: [ 82.174342] [] schedule+0x64/0x66 [ 82.174344] [] __rt_mutex_slowlock+0x85/0xbe [ 82.174347] [] rt_mutex_slowlock+0xec/0x159 [ 82.174351] [] rt_mutex_fastlock.clone.8+0x29/0x2f [ 82.174353] [] rt_mutex_lock+0x33/0x37 [ 82.174356] [] __rt_down_read+0x50/0x5a [ 82.174358] [] ? rt_down_read+0x10/0x12 [ 82.174360] [] rt_down_read+0x10/0x12 [ 82.174363] [] __blocking_notifier_call_chain+0x58/0x8d [ 82.174366] [] blocking_notifier_call_chain+0x14/0x16 [ 82.174369] [] crypto_probing_notify+0x24/0x50 [ 82.174372] [] crypto_wait_for_test+0x22/0x6b [ 82.174374] [] crypto_register_instance+0xb4/0xc0 [ 82.174377] [] cryptd_create+0x378/0x3b6 [ 82.174379] [] ? __crypto_lookup_template+0x5b/0x63 [ 82.174382] [] cryptomgr_probe+0x45/0xac [ 82.174385] [] ? crypto_alloc_pcomp+0x1b/0x1b [ 82.174388] [] kthread+0x7e/0x86 [ 82.174391] [] ? finish_task_switch+0xaf/0xfa [ 82.174394] [] kernel_thread_helper+0x4/0x10 [ 82.174398] [] ? retint_restore_args+0x13/0x13 [ 82.174401] [] ? __init_kthread_worker+0x8c/0x8c [ 82.174403] [] ? gs_change+0x13/0x13 cryptomgr_test spawns the cryptomgr_probe thread from the notifier call. The probe thread fires the same notifier as the test thread and deadlocks on the rwsem on RT. Now this is a potential deadlock in mainline as well, because we have fifo fair rwsems. If another thread blocks with a down_write() on the notifier chain before the probe thread issues the down_read() it will block the probe thread and the whole party is dead locked. Signed-off-by: Peter Zijlstra Signed-off-by: Thomas Gleixner diff --git a/crypto/algapi.c b/crypto/algapi.c index 7a1ae87..5013cad 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -684,13 +684,13 @@ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2); int crypto_register_notifier(struct notifier_block *nb) { - return blocking_notifier_chain_register(&crypto_chain, nb); + return srcu_notifier_chain_register(&crypto_chain, nb); } EXPORT_SYMBOL_GPL(crypto_register_notifier); int crypto_unregister_notifier(struct notifier_block *nb) { - return blocking_notifier_chain_unregister(&crypto_chain, nb); + return srcu_notifier_chain_unregister(&crypto_chain, nb); } EXPORT_SYMBOL_GPL(crypto_unregister_notifier); diff --git a/crypto/api.c b/crypto/api.c index a2b39c5..9d68122 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(crypto_alg_list); DECLARE_RWSEM(crypto_alg_sem); EXPORT_SYMBOL_GPL(crypto_alg_sem); -BLOCKING_NOTIFIER_HEAD(crypto_chain); +SRCU_NOTIFIER_HEAD(crypto_chain); EXPORT_SYMBOL_GPL(crypto_chain); static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg); @@ -236,10 +236,10 @@ int crypto_probing_notify(unsigned long val, void *v) { int ok; - ok = blocking_notifier_call_chain(&crypto_chain, val, v); + ok = srcu_notifier_call_chain(&crypto_chain, val, v); if (ok == NOTIFY_DONE) { request_module("cryptomgr"); - ok = blocking_notifier_call_chain(&crypto_chain, val, v); + ok = srcu_notifier_call_chain(&crypto_chain, val, v); } return ok; diff --git a/crypto/internal.h b/crypto/internal.h index bd39bfc..a5db167 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -48,7 +48,7 @@ struct crypto_larval { extern struct list_head crypto_alg_list; extern struct rw_semaphore crypto_alg_sem; -extern struct blocking_notifier_head crypto_chain; +extern struct srcu_notifier_head crypto_chain; #ifdef CONFIG_PROC_FS void __init crypto_init_proc(void); @@ -142,7 +142,7 @@ static inline int crypto_is_moribund(struct crypto_alg *alg) static inline void crypto_notify(unsigned long val, void *v) { - blocking_notifier_call_chain(&crypto_chain, val, v); + srcu_notifier_call_chain(&crypto_chain, val, v); } #endif /* _CRYPTO_INTERNAL_H */ -- cgit v0.10.2 From b5f1b39295098e621866d4d60a10e98948e9ef07 Mon Sep 17 00:00:00 2001 From: Yong Zhang Date: Mon, 16 Apr 2012 15:01:56 +0800 Subject: lockdep: Selftest: Only do hardirq context test for raw spinlock On -rt there is no softirq context any more and rwlock is sleepable, disable softirq context test and rwlock+irq test. Signed-off-by: Yong Zhang Cc: Yong Zhang Link: http://lkml.kernel.org/r/1334559716-18447-3-git-send-email-yong.zhang0@gmail.com Signed-off-by: Thomas Gleixner diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 6dc09d8..0acf354 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -1858,6 +1858,7 @@ void locking_selftest(void) printk(" --------------------------------------------------------------------------\n"); +#ifndef CONFIG_PREEMPT_RT_FULL /* * irq-context testcases: */ @@ -1870,6 +1871,28 @@ void locking_selftest(void) DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion); // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2); +#else + /* On -rt, we only do hardirq context test for raw spinlock */ + DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12); + DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21); + + DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12); + DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21); + + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321); + + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321); +#endif ww_tests(); -- cgit v0.10.2 From 538e5b916d6c0ddfd6abe070ebb2b138b1397ff5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 28 Sep 2012 10:49:42 +0100 Subject: rt: rwsem/rwlock: lockdep annotations rwlocks and rwsems on RT do not allow multiple readers. Annotate the lockdep acquire functions accordingly. Signed-off-by: Thomas Gleixner Cc: stable-rt@vger.kernel.org diff --git a/kernel/rt.c b/kernel/rt.c index a9925c6..5d17727 100644 --- a/kernel/rt.c +++ b/kernel/rt.c @@ -213,17 +213,16 @@ int __lockfunc rt_read_trylock(rwlock_t *rwlock) */ if (rt_mutex_owner(lock) != current) { ret = rt_mutex_trylock(lock); - if (ret) + if (ret) { + rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); migrate_disable(); - + } } else if (!rwlock->read_depth) { ret = 0; } - if (ret) { + if (ret) rwlock->read_depth++; - rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); - } return ret; } @@ -241,12 +240,11 @@ void __lockfunc rt_read_lock(rwlock_t *rwlock) { struct rt_mutex *lock = &rwlock->lock; - rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); - /* * recursive read locks succeed when current owns the lock */ if (rt_mutex_owner(lock) != current) { + rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); __rt_spin_lock(lock); migrate_disable(); } @@ -266,10 +264,9 @@ EXPORT_SYMBOL(rt_write_unlock); void __lockfunc rt_read_unlock(rwlock_t *rwlock) { - rwlock_release(&rwlock->dep_map, 1, _RET_IP_); - /* Release the lock only when read_depth is down to 0 */ if (--rwlock->read_depth == 0) { + rwlock_release(&rwlock->dep_map, 1, _RET_IP_); __rt_spin_unlock(&rwlock->lock); migrate_enable(); } @@ -319,9 +316,10 @@ EXPORT_SYMBOL(rt_up_write); void rt_up_read(struct rw_semaphore *rwsem) { - rwsem_release(&rwsem->dep_map, 1, _RET_IP_); - if (--rwsem->read_depth == 0) + if (--rwsem->read_depth == 0) { + rwsem_release(&rwsem->dep_map, 1, _RET_IP_); rt_mutex_unlock(&rwsem->lock); + } } EXPORT_SYMBOL(rt_up_read); @@ -360,6 +358,13 @@ void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass) } EXPORT_SYMBOL(rt_down_write_nested); +void rt_down_write_nested_lock(struct rw_semaphore *rwsem, + struct lockdep_map *nest) +{ + rwsem_acquire_nest(&rwsem->dep_map, 0, 0, nest, _RET_IP_); + rt_mutex_lock(&rwsem->lock); +} + int rt_down_read_trylock(struct rw_semaphore *rwsem) { struct rt_mutex *lock = &rwsem->lock; @@ -370,15 +375,16 @@ int rt_down_read_trylock(struct rw_semaphore *rwsem) * but not when read_depth == 0 which means that the rwsem is * write locked. */ - if (rt_mutex_owner(lock) != current) + if (rt_mutex_owner(lock) != current) { ret = rt_mutex_trylock(&rwsem->lock); - else if (!rwsem->read_depth) + if (ret) + rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); + } else if (!rwsem->read_depth) { ret = 0; + } - if (ret) { + if (ret) rwsem->read_depth++; - rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); - } return ret; } EXPORT_SYMBOL(rt_down_read_trylock); @@ -387,10 +393,10 @@ static void __rt_down_read(struct rw_semaphore *rwsem, int subclass) { struct rt_mutex *lock = &rwsem->lock; - rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_); - - if (rt_mutex_owner(lock) != current) + if (rt_mutex_owner(lock) != current) { + rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_); rt_mutex_lock(&rwsem->lock); + } rwsem->read_depth++; } -- cgit v0.10.2 From 5ecad1fddb27507bf1e3bf1935bb7e5b2c8d3c42 Mon Sep 17 00:00:00 2001 From: Yong Zhang Date: Wed, 11 Jul 2012 22:05:21 +0000 Subject: perf: Make swevent hrtimer run in irq instead of softirq Otherwise we get a deadlock like below: [ 1044.042749] BUG: scheduling while atomic: ksoftirqd/21/141/0x00010003 [ 1044.042752] INFO: lockdep is turned off. [ 1044.042754] Modules linked in: [ 1044.042757] Pid: 141, comm: ksoftirqd/21 Tainted: G W 3.4.0-rc2-rt3-23676-ga723175-dirty #29 [ 1044.042759] Call Trace: [ 1044.042761] [] __schedule_bug+0x65/0x80 [ 1044.042770] [] __schedule+0x83c/0xa70 [ 1044.042775] [] ? prepare_to_wait+0x32/0xb0 [ 1044.042779] [] schedule+0x2e/0xa0 [ 1044.042782] [] hrtimer_wait_for_timer+0x6d/0xb0 [ 1044.042786] [] ? wake_up_bit+0x40/0x40 [ 1044.042790] [] hrtimer_cancel+0x20/0x40 [ 1044.042794] [] perf_swevent_cancel_hrtimer+0x3c/0x50 [ 1044.042798] [] task_clock_event_stop+0x11/0x40 [ 1044.042802] [] task_clock_event_del+0xe/0x10 [ 1044.042805] [] event_sched_out+0x118/0x1d0 [ 1044.042809] [] group_sched_out+0x29/0x90 [ 1044.042813] [] __perf_event_disable+0x18e/0x200 [ 1044.042817] [] remote_function+0x63/0x70 [ 1044.042821] [] generic_smp_call_function_single_interrupt+0xce/0x120 [ 1044.042826] [] smp_call_function_single_interrupt+0x27/0x40 [ 1044.042831] [] call_function_single_interrupt+0x6c/0x80 [ 1044.042833] [] ? perf_event_overflow+0x20/0x20 [ 1044.042840] [] ? _raw_spin_unlock_irq+0x30/0x70 [ 1044.042844] [] ? _raw_spin_unlock_irq+0x36/0x70 [ 1044.042848] [] run_hrtimer_softirq+0xc2/0x200 [ 1044.042853] [] ? perf_event_overflow+0x20/0x20 [ 1044.042857] [] __do_softirq_common+0xf5/0x3a0 [ 1044.042862] [] __thread_do_softirq+0x15d/0x200 [ 1044.042865] [] run_ksoftirqd+0xfa/0x210 [ 1044.042869] [] ? __thread_do_softirq+0x200/0x200 [ 1044.042873] [] ? __thread_do_softirq+0x200/0x200 [ 1044.042877] [] kthread+0xb6/0xc0 [ 1044.042881] [] ? _raw_spin_unlock_irq+0x3b/0x70 [ 1044.042886] [] kernel_thread_helper+0x4/0x10 [ 1044.042889] [] ? finish_task_switch+0x8c/0x110 [ 1044.042894] [] ? _raw_spin_unlock_irq+0x3b/0x70 [ 1044.042897] [] ? retint_restore_args+0xe/0xe [ 1044.042900] [] ? kthreadd+0x1e0/0x1e0 [ 1044.042902] [] ? gs_change+0xb/0xb Signed-off-by: Yong Zhang Cc: Peter Zijlstra Cc: Steven Rostedt Link: http://lkml.kernel.org/r/1341476476-5666-1-git-send-email-yong.zhang0@gmail.com Signed-off-by: Thomas Gleixner Signed-off-by: Steven Rostedt diff --git a/kernel/events/core.c b/kernel/events/core.c index fea4f6c..420de7f 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6029,6 +6029,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event) hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hwc->hrtimer.function = perf_swevent_hrtimer; + hwc->hrtimer.irqsafe = 1; /* * Since hrtimers have a fixed rate, we can do a static freq->period -- cgit v0.10.2 From 01fa1a3cc0c571fa1b2d31ac644841457361cf5f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 13 Nov 2011 17:17:09 +0100 Subject: softirq: Check preemption after reenabling interrupts raise_softirq_irqoff() disables interrupts and wakes the softirq daemon, but after reenabling interrupts there is no preemption check, so the execution of the softirq thread might be delayed arbitrarily. In principle we could add that check to local_irq_enable/restore, but that's overkill as the rasie_softirq_irqoff() sections are the only ones which show this behaviour. Reported-by: Carsten Emde Signed-off-by: Thomas Gleixner Cc: stable-rt@vger.kernel.org diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c index 4b8d9b54..15b4f08 100644 --- a/block/blk-iopoll.c +++ b/block/blk-iopoll.c @@ -38,6 +38,7 @@ void blk_iopoll_sched(struct blk_iopoll *iop) list_add_tail(&iop->list, &__get_cpu_var(blk_cpu_iopoll)); __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); local_irq_restore(flags); + preempt_check_resched_rt(); } EXPORT_SYMBOL(blk_iopoll_sched); @@ -135,6 +136,7 @@ static void blk_iopoll_softirq(struct softirq_action *h) __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); local_irq_enable(); + preempt_check_resched_rt(); } /** @@ -204,6 +206,7 @@ static int blk_iopoll_cpu_notify(struct notifier_block *self, &__get_cpu_var(blk_cpu_iopoll)); __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); local_irq_enable(); + preempt_check_resched_rt(); } return NOTIFY_OK; diff --git a/block/blk-softirq.c b/block/blk-softirq.c index ec9e606..814b3db 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -51,6 +51,7 @@ static void trigger_softirq(void *data) raise_softirq_irqoff(BLOCK_SOFTIRQ); local_irq_restore(flags); + preempt_check_resched_rt(); } /* @@ -93,6 +94,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action, &__get_cpu_var(blk_cpu_done)); raise_softirq_irqoff(BLOCK_SOFTIRQ); local_irq_enable(); + preempt_check_resched_rt(); } return NOTIFY_OK; @@ -150,6 +152,7 @@ do_local: goto do_local; local_irq_restore(flags); + preempt_check_resched_rt(); } /** diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 116ac32..7d08aab 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -72,8 +72,10 @@ do { \ #ifndef CONFIG_PREEMPT_RT_BASE # define preempt_enable_no_resched() sched_preempt_enable_no_resched() +# define preempt_check_resched_rt() barrier() #else # define preempt_enable_no_resched() preempt_enable() +# define preempt_check_resched_rt() preempt_check_resched() #endif #define preempt_enable() \ @@ -127,6 +129,7 @@ do { \ #define preempt_disable_notrace() barrier() #define preempt_enable_no_resched_notrace() barrier() #define preempt_enable_notrace() barrier() +#define preempt_check_resched_rt() barrier() #endif /* CONFIG_PREEMPT_COUNT */ diff --git a/net/core/dev.c b/net/core/dev.c index da03d72..ab4df3d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2138,6 +2138,7 @@ static inline void __netif_reschedule(struct Qdisc *q) sd->output_queue_tailp = &q->next_sched; raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); + preempt_check_resched_rt(); } void __netif_schedule(struct Qdisc *q) @@ -2159,6 +2160,7 @@ void dev_kfree_skb_irq(struct sk_buff *skb) sd->completion_queue = skb; raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); + preempt_check_resched_rt(); } } EXPORT_SYMBOL(dev_kfree_skb_irq); @@ -3210,6 +3212,7 @@ enqueue: rps_unlock(sd); local_irq_restore(flags); + preempt_check_resched_rt(); atomic_long_inc(&skb->dev->rx_dropped); kfree_skb(skb); @@ -4112,6 +4115,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd) } else #endif local_irq_enable(); + preempt_check_resched_rt(); } static int process_backlog(struct napi_struct *napi, int quota) @@ -4184,6 +4188,7 @@ void __napi_schedule(struct napi_struct *n) local_irq_save(flags); ____napi_schedule(&__get_cpu_var(softnet_data), n); local_irq_restore(flags); + preempt_check_resched_rt(); } EXPORT_SYMBOL(__napi_schedule); @@ -6442,6 +6447,7 @@ static int dev_cpu_callback(struct notifier_block *nfb, raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_enable(); + preempt_check_resched_rt(); /* Process offline CPU's input_pkt_queue */ while ((skb = __skb_dequeue(&oldsd->process_queue))) { -- cgit v0.10.2 From 963332a239cb944e79bde8400b4b388fa192df45 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 4 Oct 2012 11:02:04 -0400 Subject: softirq: Init softirq local lock after per cpu section is set up I discovered this bug when booting 3.4-rt on my powerpc box. It crashed with the following report: ------------[ cut here ]------------ kernel BUG at /work/rt/stable-rt.git/kernel/rtmutex_common.h:75! Oops: Exception in kernel mode, sig: 5 [#1] PREEMPT SMP NR_CPUS=64 NUMA PA Semi PWRficient Modules linked in: NIP: c0000000004aa03c LR: c0000000004aa01c CTR: c00000000009b2ac REGS: c00000003e8d7950 TRAP: 0700 Not tainted (3.4.11-test-rt19) MSR: 9000000000029032 CR: 24000082 XER: 20000000 SOFTE: 0 TASK = c00000003e8fdcd0[11] 'ksoftirqd/1' THREAD: c00000003e8d4000 CPU: 1 GPR00: 0000000000000001 c00000003e8d7bd0 c000000000d6cbb0 0000000000000000 GPR04: c00000003e8fdcd0 0000000000000000 0000000024004082 c000000000011454 GPR08: 0000000000000000 0000000080000001 c00000003e8fdcd1 0000000000000000 GPR12: 0000000024000084 c00000000fff0280 ffffffffffffffff 000000003ffffad8 GPR16: ffffffffffffffff 000000000072c798 0000000000000060 0000000000000000 GPR20: 0000000000642741 000000000072c858 000000003ffffaf0 0000000000000417 GPR24: 000000000072dcd0 c00000003e7ff990 0000000000000000 0000000000000001 GPR28: 0000000000000000 c000000000792340 c000000000ccec78 c000000001182338 NIP [c0000000004aa03c] .wakeup_next_waiter+0x44/0xb8 LR [c0000000004aa01c] .wakeup_next_waiter+0x24/0xb8 Call Trace: [c00000003e8d7bd0] [c0000000004aa01c] .wakeup_next_waiter+0x24/0xb8 (unreliable) [c00000003e8d7c60] [c0000000004a0320] .rt_spin_lock_slowunlock+0x8c/0xe4 [c00000003e8d7ce0] [c0000000004a07cc] .rt_spin_unlock+0x54/0x64 [c00000003e8d7d60] [c0000000000636bc] .__thread_do_softirq+0x130/0x174 [c00000003e8d7df0] [c00000000006379c] .run_ksoftirqd+0x9c/0x1a4 [c00000003e8d7ea0] [c000000000080b68] .kthread+0xa8/0xb4 [c00000003e8d7f90] [c00000000001c2f8] .kernel_thread+0x54/0x70 Instruction dump: 60000000 e86d01c8 38630730 4bff7061 60000000 ebbf0008 7c7c1b78 e81d0040 7fe00278 7c000074 7800d182 68000001 <0b000000> e88d01c8 387d0010 38840738 The rtmutex_common.h:75 is: rt_mutex_top_waiter(struct rt_mutex *lock) { struct rt_mutex_waiter *w; w = plist_first_entry(&lock->wait_list, struct rt_mutex_waiter, list_entry); BUG_ON(w->lock != lock); return w; } Where the waiter->lock is corrupted. I saw various other random bugs that all had to with the softirq lock and plist. As plist needs to be initialized before it is used I investigated how this lock is initialized. It's initialized with: void __init softirq_early_init(void) { local_irq_lock_init(local_softirq_lock); } Where: #define local_irq_lock_init(lvar) \ do { \ int __cpu; \ for_each_possible_cpu(__cpu) \ spin_lock_init(&per_cpu(lvar, __cpu).lock); \ } while (0) As the softirq lock is a local_irq_lock, which is a per_cpu lock, the initialization is done to all per_cpu versions of the lock. But lets look at where the softirq_early_init() is called from. In init/main.c: start_kernel() /* * Interrupts are still disabled. Do necessary setups, then * enable them */ softirq_early_init(); tick_init(); boot_cpu_init(); page_address_init(); printk(KERN_NOTICE "%s", linux_banner); setup_arch(&command_line); mm_init_owner(&init_mm, &init_task); mm_init_cpumask(&init_mm); setup_command_line(command_line); setup_nr_cpu_ids(); setup_per_cpu_areas(); smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ One of the first things that is called is the initialization of the softirq lock. But if you look further down, we see the per_cpu areas have not been set up yet. Thus initializing a local_irq_lock() before the per_cpu section is set up, may not work as it is initializing the per cpu locks before the per cpu exists. By moving the softirq_early_init() right after setup_per_cpu_areas(), the kernel boots fine. Signed-off-by: Steven Rostedt Cc: Clark Williams Cc: John Kacur Cc: Carsten Emde Cc: vomlehn@texas.net Link: http://lkml.kernel.org/r/1349362924.6755.18.camel@gandalf.local.home Signed-off-by: Thomas Gleixner diff --git a/init/main.c b/init/main.c index 69e4308..e8087f7 100644 --- a/init/main.c +++ b/init/main.c @@ -499,7 +499,6 @@ asmlinkage void __init start_kernel(void) * Interrupts are still disabled. Do necessary setups, then * enable them */ - softirq_early_init(); boot_cpu_init(); page_address_init(); pr_notice("%s", linux_banner); @@ -509,6 +508,7 @@ asmlinkage void __init start_kernel(void) setup_command_line(command_line); setup_nr_cpu_ids(); setup_per_cpu_areas(); + softirq_early_init(); smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ build_all_zonelists(NULL, NULL); -- cgit v0.10.2 From ed7acbfd2447df4b1a7faeceb8fb4139ee6c1306 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 4 Oct 2012 14:30:25 +0100 Subject: softirq: Make serving softirqs a task flag Avoid the percpu softirq_runner pointer magic by using a task flag. Signed-off-by: Thomas Gleixner diff --git a/include/linux/sched.h b/include/linux/sched.h index 723aaaf..76610b6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1663,6 +1663,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, /* * Per process flags */ +#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */ #define PF_EXITING 0x00000004 /* getting shut down */ #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ diff --git a/kernel/softirq.c b/kernel/softirq.c index 8692908..984942a 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -418,7 +418,6 @@ static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { } * On RT we serialize softirq execution with a cpu local lock */ static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock); -static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner); static void __do_softirq_common(int need_rcu_bh_qs); @@ -473,22 +472,9 @@ void _local_bh_enable(void) } EXPORT_SYMBOL(_local_bh_enable); -/* For tracing */ -int notrace __in_softirq(void) -{ - if (__get_cpu_var(local_softirq_lock).owner == current) - return __get_cpu_var(local_softirq_lock).nestcnt; - return 0; -} - int in_serving_softirq(void) { - int res; - - preempt_disable(); - res = __get_cpu_var(local_softirq_runner) == current; - preempt_enable(); - return res; + return current->flags & PF_IN_SOFTIRQ; } EXPORT_SYMBOL(in_serving_softirq); @@ -506,7 +492,7 @@ static void __do_softirq_common(int need_rcu_bh_qs) /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); - __get_cpu_var(local_softirq_runner) = current; + current->flags |= PF_IN_SOFTIRQ; lockdep_softirq_enter(); @@ -517,7 +503,7 @@ static void __do_softirq_common(int need_rcu_bh_qs) wakeup_softirqd(); lockdep_softirq_exit(); - __get_cpu_var(local_softirq_runner) = NULL; + current->flags &= ~PF_IN_SOFTIRQ; current->softirq_nestcnt--; } -- cgit v0.10.2 From 0f6a61d357fc7af4867359bbde48f9c3cac0f193 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 4 Oct 2012 15:33:53 +0100 Subject: softirq: Split handling function Split out the inner handling function, so RT can reuse it. Signed-off-by: Thomas Gleixner diff --git a/kernel/softirq.c b/kernel/softirq.c index 984942a..b1cc151 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -142,31 +142,34 @@ static void wakeup_softirqd(void) wake_up_process(tsk); } -static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs) +static void handle_softirq(unsigned int vec_nr, int cpu, int need_rcu_bh_qs) { - struct softirq_action *h = softirq_vec; + struct softirq_action *h = softirq_vec + vec_nr; unsigned int prev_count = preempt_count(); - local_irq_enable(); - for (; pending; h++, pending >>= 1) { - unsigned int vec_nr = h - softirq_vec; + kstat_incr_softirqs_this_cpu(vec_nr); + trace_softirq_entry(vec_nr); + h->action(h); + trace_softirq_exit(vec_nr); - if (!(pending & 1)) - continue; + if (unlikely(prev_count != preempt_count())) { + pr_err("softirq %u %s %p preempt count leak: %08x -> %08x\n", + vec_nr, softirq_to_name[vec_nr], h->action, + prev_count, (unsigned int) preempt_count()); + preempt_count() = prev_count; + } + if (need_rcu_bh_qs) + rcu_bh_qs(cpu); +} - kstat_incr_softirqs_this_cpu(vec_nr); - trace_softirq_entry(vec_nr); - h->action(h); - trace_softirq_exit(vec_nr); - if (unlikely(prev_count != preempt_count())) { - pr_err( -"huh, entered softirq %u %s %p with preempt_count %08x exited with %08x?\n", - vec_nr, softirq_to_name[vec_nr], h->action, - prev_count, (unsigned int) preempt_count()); - preempt_count() = prev_count; - } - if (need_rcu_bh_qs) - rcu_bh_qs(cpu); +static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs) +{ + unsigned int vec_nr; + + local_irq_enable(); + for (vec_nr = 0; pending; vec_nr++, pending >>= 1) { + if (pending & 1) + handle_softirq(vec_nr, cpu, need_rcu_bh_qs); } local_irq_disable(); } -- cgit v0.10.2 From 055aaeb0f5e8558b0b74ae6ecf383a3f6c40c007 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 4 Oct 2012 14:20:47 +0100 Subject: softirq: Split softirq locks The 3.x RT series removed the split softirq implementation in favour of pushing softirq processing into the context of the thread which raised it. Though this prevents us from handling the various softirqs at different priorities. Now instead of reintroducing the split softirq threads we split the locks which serialize the softirq processing. If a softirq is raised in context of a thread, then the softirq is noted on a per thread field, if the thread is in a bh disabled region. If the softirq is raised from hard interrupt context, then the bit is set in the flag field of ksoftirqd and ksoftirqd is invoked. When a thread leaves a bh disabled region, then it tries to execute the softirqs which have been raised in its own context. It acquires the per softirq / per cpu lock for the softirq and then checks, whether the softirq is still pending in the per cpu local_softirq_pending() field. If yes, it runs the softirq. If no, then some other task executed it already. This allows for zero config softirq elevation in the context of user space tasks or interrupt threads. Signed-off-by: Thomas Gleixner diff --git a/include/linux/sched.h b/include/linux/sched.h index 76610b6..0035c94 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1441,6 +1441,7 @@ struct task_struct { #ifdef CONFIG_PREEMPT_RT_BASE struct rcu_head put_rcu; int softirq_nestcnt; + unsigned int softirqs_raised; #endif #ifdef CONFIG_PREEMPT_RT_FULL # if defined CONFIG_HIGHMEM || defined CONFIG_X86_32 diff --git a/kernel/softirq.c b/kernel/softirq.c index b1cc151..8d07af5 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -162,6 +162,12 @@ static void handle_softirq(unsigned int vec_nr, int cpu, int need_rcu_bh_qs) rcu_bh_qs(cpu); } +#ifndef CONFIG_PREEMPT_RT_FULL +static inline int ksoftirqd_softirq_pending(void) +{ + return local_softirq_pending(); +} + static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs) { unsigned int vec_nr; @@ -174,7 +180,19 @@ static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs) local_irq_disable(); } -#ifndef CONFIG_PREEMPT_RT_FULL +static void run_ksoftirqd(unsigned int cpu) +{ + local_irq_disable(); + if (ksoftirqd_softirq_pending()) { + __do_softirq(); + rcu_note_context_switch(cpu); + local_irq_enable(); + cond_resched(); + return; + } + local_irq_enable(); +} + /* * preempt_count and SOFTIRQ_OFFSET usage: * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving @@ -410,6 +428,32 @@ asmlinkage void do_softirq(void) #endif +/* + * This function must run with irqs disabled! + */ +void raise_softirq_irqoff(unsigned int nr) +{ + __raise_softirq_irqoff(nr); + + /* + * If we're in an interrupt or softirq, we're done + * (this also catches softirq-disabled code). We will + * actually run the softirq once we return from + * the irq or softirq. + * + * Otherwise we wake up ksoftirqd to make sure we + * schedule the softirq soon. + */ + if (!in_interrupt()) + wakeup_softirqd(); +} + +void __raise_softirq_irqoff(unsigned int nr) +{ + trace_softirq_raise(nr); + or_softirq_pending(1UL << nr); +} + static inline void local_bh_disable_nort(void) { local_bh_disable(); } static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } static void ksoftirqd_set_sched_params(unsigned int cpu) { } @@ -418,20 +462,78 @@ static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { } #else /* !PREEMPT_RT_FULL */ /* - * On RT we serialize softirq execution with a cpu local lock + * On RT we serialize softirq execution with a cpu local lock per softirq */ -static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock); +static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks); + +void __init softirq_early_init(void) +{ + int i; -static void __do_softirq_common(int need_rcu_bh_qs); + for (i = 0; i < NR_SOFTIRQS; i++) + local_irq_lock_init(local_softirq_locks[i]); +} -void __do_softirq(void) +static void lock_softirq(int which) { - __do_softirq_common(0); + __local_lock(&__get_cpu_var(local_softirq_locks[which])); } -void __init softirq_early_init(void) +static void unlock_softirq(int which) +{ + __local_unlock(&__get_cpu_var(local_softirq_locks[which])); +} + +static void do_single_softirq(int which, int need_rcu_bh_qs) { - local_irq_lock_init(local_softirq_lock); + unsigned long old_flags = current->flags; + + current->flags &= ~PF_MEMALLOC; + vtime_account_irq_enter(current); + current->flags |= PF_IN_SOFTIRQ; + lockdep_softirq_enter(); + local_irq_enable(); + handle_softirq(which, smp_processor_id(), need_rcu_bh_qs); + local_irq_disable(); + lockdep_softirq_exit(); + current->flags &= ~PF_IN_SOFTIRQ; + vtime_account_irq_enter(current); + tsk_restore_flags(current, old_flags, PF_MEMALLOC); +} + +/* + * Called with interrupts disabled. Process softirqs which were raised + * in current context (or on behalf of ksoftirqd). + */ +static void do_current_softirqs(int need_rcu_bh_qs) +{ + while (current->softirqs_raised) { + int i = __ffs(current->softirqs_raised); + unsigned int pending, mask = (1U << i); + + current->softirqs_raised &= ~mask; + local_irq_enable(); + + /* + * If the lock is contended, we boost the owner to + * process the softirq or leave the critical section + * now. + */ + lock_softirq(i); + local_irq_disable(); + /* + * Check with the local_softirq_pending() bits, + * whether we need to process this still or if someone + * else took care of it. + */ + pending = local_softirq_pending(); + if (pending & mask) { + set_softirq_pending(pending & ~mask); + do_single_softirq(i, need_rcu_bh_qs); + } + unlock_softirq(i); + WARN_ON(current->softirq_nestcnt != 1); + } } void local_bh_disable(void) @@ -446,17 +548,11 @@ void local_bh_enable(void) if (WARN_ON(current->softirq_nestcnt == 0)) return; - if ((current->softirq_nestcnt == 1) && - local_softirq_pending() && - local_trylock(local_softirq_lock)) { + local_irq_disable(); + if (current->softirq_nestcnt == 1 && current->softirqs_raised) + do_current_softirqs(1); + local_irq_enable(); - local_irq_disable(); - if (local_softirq_pending()) - __do_softirq(); - local_irq_enable(); - local_unlock(local_softirq_lock); - WARN_ON(current->softirq_nestcnt != 1); - } current->softirq_nestcnt--; migrate_enable(); } @@ -481,86 +577,82 @@ int in_serving_softirq(void) } EXPORT_SYMBOL(in_serving_softirq); -/* - * Called with bh and local interrupts disabled. For full RT cpu must - * be pinned. - */ -static void __do_softirq_common(int need_rcu_bh_qs) +/* Called with preemption disabled */ +static void run_ksoftirqd(unsigned int cpu) { - u32 pending = local_softirq_pending(); - int cpu = smp_processor_id(); - + local_irq_disable(); current->softirq_nestcnt++; - - /* Reset the pending bitmask before enabling irqs */ - set_softirq_pending(0); - - current->flags |= PF_IN_SOFTIRQ; - - lockdep_softirq_enter(); - - handle_pending_softirqs(pending, cpu, need_rcu_bh_qs); - - pending = local_softirq_pending(); - if (pending) - wakeup_softirqd(); - - lockdep_softirq_exit(); - current->flags &= ~PF_IN_SOFTIRQ; - + do_current_softirqs(1); current->softirq_nestcnt--; + rcu_note_context_switch(cpu); + local_irq_enable(); } -static int __thread_do_softirq(int cpu) +/* + * Called from netif_rx_ni(). Preemption enabled, but migration + * disabled. So the cpu can't go away under us. + */ +void thread_do_softirq(void) { + if (!in_serving_softirq() && current->softirqs_raised) { + current->softirq_nestcnt++; + do_current_softirqs(0); + current->softirq_nestcnt--; + } +} + +void __raise_softirq_irqoff(unsigned int nr) +{ + trace_softirq_raise(nr); + or_softirq_pending(1UL << nr); + /* - * Prevent the current cpu from going offline. - * pin_current_cpu() can reenable preemption and block on the - * hotplug mutex. When it returns, the current cpu is - * pinned. It might be the wrong one, but the offline check - * below catches that. + * If we are not in a hard interrupt and inside a bh disabled + * region, we simply raise the flag on current. local_bh_enable() + * will make sure that the softirq is executed. Otherwise we + * delegate it to ksoftirqd. */ - pin_current_cpu(); + if (!in_irq() && current->softirq_nestcnt) + current->softirqs_raised |= (1U << nr); + else if (__this_cpu_read(ksoftirqd)) + __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr); +} + +/* + * This function must run with irqs disabled! + */ +void raise_softirq_irqoff(unsigned int nr) +{ + __raise_softirq_irqoff(nr); + /* - * If called from ksoftirqd (cpu >= 0) we need to check - * whether we are on the wrong cpu due to cpu offlining. If - * called via thread_do_softirq() no action required. + * If we're in an hard interrupt we let irq return code deal + * with the wakeup of ksoftirqd. */ - if (cpu >= 0 && cpu_is_offline(cpu)) { - unpin_current_cpu(); - return -1; - } - preempt_enable(); - local_lock(local_softirq_lock); - local_irq_disable(); + if (in_irq()) + return; + /* - * We cannot switch stacks on RT as we want to be able to - * schedule! + * If we are in thread context but outside of a bh disabled + * region, we need to wake ksoftirqd as well. + * + * CHECKME: Some of the places which do that could be wrapped + * into local_bh_disable/enable pairs. Though it's unclear + * whether this is worth the effort. To find those places just + * raise a WARN() if the condition is met. */ - if (local_softirq_pending()) - __do_softirq_common(cpu >= 0); - local_unlock(local_softirq_lock); - unpin_current_cpu(); - preempt_disable(); - local_irq_enable(); - return 0; + if (!current->softirq_nestcnt) + wakeup_softirqd(); } -/* - * Called from netif_rx_ni(). Preemption enabled. - */ -void thread_do_softirq(void) +void do_raise_softirq_irqoff(unsigned int nr) { - if (!in_serving_softirq()) { - preempt_disable(); - __thread_do_softirq(-1); - preempt_enable(); - } + raise_softirq_irqoff(nr); } -static int ksoftirqd_do_softirq(int cpu) +static inline int ksoftirqd_softirq_pending(void) { - return __thread_do_softirq(cpu); + return current->softirqs_raised; } static inline void local_bh_disable_nort(void) { } @@ -571,6 +663,10 @@ static inline void ksoftirqd_set_sched_params(unsigned int cpu) struct sched_param param = { .sched_priority = 1 }; sched_setscheduler(current, SCHED_FIFO, ¶m); + /* Take over all pending softirqs when starting */ + local_irq_disable(); + current->softirqs_raised = local_softirq_pending(); + local_irq_enable(); } static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) @@ -619,8 +715,14 @@ static inline void invoke_softirq(void) } else { wakeup_softirqd(); } -#else - wakeup_softirqd(); +#else /* PREEMPT_RT_FULL */ + unsigned long flags; + + local_irq_save(flags); + if (__this_cpu_read(ksoftirqd) && + __this_cpu_read(ksoftirqd)->softirqs_raised) + wakeup_softirqd(); + local_irq_restore(flags); #endif } @@ -658,26 +760,6 @@ void irq_exit(void) trace_hardirq_exit(); /* must be last! */ } -/* - * This function must run with irqs disabled! - */ -inline void raise_softirq_irqoff(unsigned int nr) -{ - __raise_softirq_irqoff(nr); - - /* - * If we're in an interrupt or softirq, we're done - * (this also catches softirq-disabled code). We will - * actually run the softirq once we return from - * the irq or softirq. - * - * Otherwise we wake up ksoftirqd to make sure we - * schedule the softirq soon. - */ - if (!in_interrupt()) - wakeup_softirqd(); -} - void raise_softirq(unsigned int nr) { unsigned long flags; @@ -687,12 +769,6 @@ void raise_softirq(unsigned int nr) local_irq_restore(flags); } -void __raise_softirq_irqoff(unsigned int nr) -{ - trace_softirq_raise(nr); - or_softirq_pending(1UL << nr); -} - void open_softirq(int nr, void (*action)(struct softirq_action *)) { softirq_vec[nr].action = action; @@ -1136,20 +1212,7 @@ EXPORT_SYMBOL(tasklet_unlock_wait); static int ksoftirqd_should_run(unsigned int cpu) { - return local_softirq_pending(); -} - -static void run_ksoftirqd(unsigned int cpu) -{ - local_irq_disable(); - if (local_softirq_pending()) { - __do_softirq(); - rcu_note_context_switch(cpu); - local_irq_enable(); - cond_resched(); - return; - } - local_irq_enable(); + return ksoftirqd_softirq_pending(); } #ifdef CONFIG_HOTPLUG_CPU -- cgit v0.10.2 From 6450b2ba1ab2fa5258174563567d64ba228d2358 Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Fri, 17 Jan 2014 20:44:03 +0100 Subject: API cleanup - use local_lock not __local_lock for soft trivial API cleanup - kernel/softirq.c was mimiking local_lock. No change of functional behavior Signed-off-by: Nicholas Mc Guire Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/softirq.c b/kernel/softirq.c index 8d07af5..efe1e89 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -476,12 +476,12 @@ void __init softirq_early_init(void) static void lock_softirq(int which) { - __local_lock(&__get_cpu_var(local_softirq_locks[which])); + local_lock(local_softirq_locks[which]); } static void unlock_softirq(int which) { - __local_unlock(&__get_cpu_var(local_softirq_locks[which])); + local_unlock(local_softirq_locks[which]); } static void do_single_softirq(int which, int need_rcu_bh_qs) -- cgit v0.10.2 From 43e0779296459ce13f5eea058833353044a7b151 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 28 Oct 2012 13:46:16 +0000 Subject: softirq: Adapt NOHZ softirq pending check to new RT scheme We can't rely on ksoftirqd anymore and we need to check the tasks which run a particular softirq and if such a task is pi blocked ignore the other pending bits of that task as well. Signed-off-by: Thomas Gleixner diff --git a/kernel/softirq.c b/kernel/softirq.c index efe1e89..4d69177 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -66,46 +66,71 @@ char *softirq_to_name[NR_SOFTIRQS] = { #ifdef CONFIG_NO_HZ_COMMON # ifdef CONFIG_PREEMPT_RT_FULL + +struct softirq_runner { + struct task_struct *runner[NR_SOFTIRQS]; +}; + +static DEFINE_PER_CPU(struct softirq_runner, softirq_runners); + +static inline void softirq_set_runner(unsigned int sirq) +{ + struct softirq_runner *sr = &__get_cpu_var(softirq_runners); + + sr->runner[sirq] = current; +} + +static inline void softirq_clr_runner(unsigned int sirq) +{ + struct softirq_runner *sr = &__get_cpu_var(softirq_runners); + + sr->runner[sirq] = NULL; +} + /* - * On preempt-rt a softirq might be blocked on a lock. There might be - * no other runnable task on this CPU because the lock owner runs on - * some other CPU. So we have to go into idle with the pending bit - * set. Therefor we need to check this otherwise we warn about false - * positives which confuses users and defeats the whole purpose of - * this test. + * On preempt-rt a softirq running context might be blocked on a + * lock. There might be no other runnable task on this CPU because the + * lock owner runs on some other CPU. So we have to go into idle with + * the pending bit set. Therefor we need to check this otherwise we + * warn about false positives which confuses users and defeats the + * whole purpose of this test. * * This code is called with interrupts disabled. */ void softirq_check_pending_idle(void) { static int rate_limit; - u32 warnpending = 0, pending; + struct softirq_runner *sr = &__get_cpu_var(softirq_runners); + u32 warnpending; + int i; if (rate_limit >= 10) return; - pending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; - if (pending) { - struct task_struct *tsk; + warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; + for (i = 0; i < NR_SOFTIRQS; i++) { + struct task_struct *tsk = sr->runner[i]; - tsk = __get_cpu_var(ksoftirqd); /* * The wakeup code in rtmutex.c wakes up the task * _before_ it sets pi_blocked_on to NULL under * tsk->pi_lock. So we need to check for both: state * and pi_blocked_on. */ - raw_spin_lock(&tsk->pi_lock); - - if (!tsk->pi_blocked_on && !(tsk->state == TASK_RUNNING)) - warnpending = 1; - - raw_spin_unlock(&tsk->pi_lock); + if (tsk) { + raw_spin_lock(&tsk->pi_lock); + if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) { + /* Clear all bits pending in that task */ + warnpending &= ~(tsk->softirqs_raised); + warnpending &= ~(1 << i); + } + raw_spin_unlock(&tsk->pi_lock); + } } if (warnpending) { printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", - pending); + warnpending); rate_limit++; } } @@ -125,6 +150,10 @@ void softirq_check_pending_idle(void) } } # endif + +#else /* !CONFIG_NO_HZ_COMMON */ +static inline void softirq_set_runner(unsigned int sirq) { } +static inline void softirq_clr_runner(unsigned int sirq) { } #endif /* @@ -521,6 +550,7 @@ static void do_current_softirqs(int need_rcu_bh_qs) */ lock_softirq(i); local_irq_disable(); + softirq_set_runner(i); /* * Check with the local_softirq_pending() bits, * whether we need to process this still or if someone @@ -531,6 +561,7 @@ static void do_current_softirqs(int need_rcu_bh_qs) set_softirq_pending(pending & ~mask); do_single_softirq(i, need_rcu_bh_qs); } + softirq_clr_runner(i); unlock_softirq(i); WARN_ON(current->softirq_nestcnt != 1); } @@ -601,7 +632,7 @@ void thread_do_softirq(void) } } -void __raise_softirq_irqoff(unsigned int nr) +static void do_raise_softirq_irqoff(unsigned int nr) { trace_softirq_raise(nr); or_softirq_pending(1UL << nr); @@ -618,12 +649,19 @@ void __raise_softirq_irqoff(unsigned int nr) __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr); } +void __raise_softirq_irqoff(unsigned int nr) +{ + do_raise_softirq_irqoff(nr); + if (!in_irq() && !current->softirq_nestcnt) + wakeup_softirqd(); +} + /* * This function must run with irqs disabled! */ void raise_softirq_irqoff(unsigned int nr) { - __raise_softirq_irqoff(nr); + do_raise_softirq_irqoff(nr); /* * If we're in an hard interrupt we let irq return code deal @@ -645,11 +683,6 @@ void raise_softirq_irqoff(unsigned int nr) wakeup_softirqd(); } -void do_raise_softirq_irqoff(unsigned int nr) -{ - raise_softirq_irqoff(nr); -} - static inline int ksoftirqd_softirq_pending(void) { return current->softirqs_raised; -- cgit v0.10.2 From 15041de08dea64a4d2950c14b34acac4db7c91d4 Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Fri, 6 Dec 2013 00:42:22 +0100 Subject: softirq: make migrate disable/enable conditioned on softirq_nestcnt transition This patch removes the recursive calls to migrate_disable/enable in local_bh_disable/enable the softirq-local-lock.patch introduces local_bh_disable/enable wich decrements/increments the current->softirq_nestcnt and disable/enables migration as well. as softirq_nestcnt (include/linux/sched.h conditioned on CONFIG_PREEMPT_RT_BASE) already is tracking the nesting level of the recursive calls to local_bh_disable/enable (all in kernel/softirq.c) - no need to do it twice. migrate_disable/enable thus can be conditionsed on softirq_nestcnt making a transition from 0-1 to disable migration and 1-0 to re-enable it. No change of functional behavior, this does noticably reduce the observed nesting level of migrate_disable/enable Signed-off-by: Nicholas Mc Guire Reviewed-by: Steven Rostedt Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/softirq.c b/kernel/softirq.c index 4d69177..15ad603 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -569,8 +569,8 @@ static void do_current_softirqs(int need_rcu_bh_qs) void local_bh_disable(void) { - migrate_disable(); - current->softirq_nestcnt++; + if (++current->softirq_nestcnt == 1) + migrate_disable(); } EXPORT_SYMBOL(local_bh_disable); @@ -584,8 +584,8 @@ void local_bh_enable(void) do_current_softirqs(1); local_irq_enable(); - current->softirq_nestcnt--; - migrate_enable(); + if (--current->softirq_nestcnt == 0) + migrate_enable(); } EXPORT_SYMBOL(local_bh_enable); @@ -597,8 +597,10 @@ EXPORT_SYMBOL(local_bh_enable_ip); void _local_bh_enable(void) { - current->softirq_nestcnt--; - migrate_enable(); + if (WARN_ON(current->softirq_nestcnt == 0)) + return; + if (--current->softirq_nestcnt == 0) + migrate_enable(); } EXPORT_SYMBOL(_local_bh_enable); -- cgit v0.10.2 From 77d4d0606477a4f434f9b1efdac77a2af9d0e880 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 28 Oct 2012 13:26:09 +0000 Subject: rcu: Disable RCU_FAST_NO_HZ on RT This uses a timer_list timer from the irq disabled guts of the idle code. Disable it for now to prevent wreckage. Signed-off-by: Thomas Gleixner Cc: stable-rt@vger.kernel.org diff --git a/init/Kconfig b/init/Kconfig index a1798e8..4dea595 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -615,7 +615,7 @@ config RCU_FANOUT_EXACT config RCU_FAST_NO_HZ bool "Accelerate last non-dyntick-idle CPU's grace periods" - depends on NO_HZ_COMMON && SMP + depends on NO_HZ_COMMON && SMP && !PREEMPT_RT_FULL default n help This option permits CPUs to enter dynticks-idle state even if -- cgit v0.10.2 From f52ddab2074470fdba8f6d074e829127dbb86b96 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 4 Nov 2013 13:21:10 -0800 Subject: rcu: Eliminate softirq processing from rcutree Running RCU out of softirq is a problem for some workloads that would like to manage RCU core processing independently of other softirq work, for example, setting kthread priority. This commit therefore moves the RCU core work from softirq to a per-CPU/per-flavor SCHED_OTHER kthread named rcuc. The SCHED_OTHER approach avoids the scalability problems that appeared with the earlier attempt to move RCU core processing to from softirq to kthreads. That said, kernels built with RCU_BOOST=y will run the rcuc kthreads at the RCU-boosting priority. Reported-by: Thomas Gleixner Tested-by: Mike Galbraith Signed-off-by: Paul E. McKenney Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/rcutree.c b/kernel/rcutree.c index cae59e3..8b08d37 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -55,6 +55,11 @@ #include #include #include +#include +#include +#include +#include +#include "time/tick-internal.h" #include "rcutree.h" #include @@ -145,8 +150,6 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active); */ static int rcu_scheduler_fully_active __read_mostly; -#ifdef CONFIG_RCU_BOOST - /* * Control variables for per-CPU and per-rcu_node kthreads. These * handle all flavors of RCU. @@ -156,8 +159,6 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); DEFINE_PER_CPU(char, rcu_cpu_has_work); -#endif /* #ifdef CONFIG_RCU_BOOST */ - static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); static void invoke_rcu_core(void); static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); @@ -2225,16 +2226,14 @@ __rcu_process_callbacks(struct rcu_state *rsp) /* * Do RCU core processing for the current CPU. */ -static void rcu_process_callbacks(struct softirq_action *unused) +static void rcu_process_callbacks(void) { struct rcu_state *rsp; if (cpu_is_offline(smp_processor_id())) return; - trace_rcu_utilization(TPS("Start RCU core")); for_each_rcu_flavor(rsp) __rcu_process_callbacks(rsp); - trace_rcu_utilization(TPS("End RCU core")); } /* @@ -2248,18 +2247,105 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) { if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active))) return; - if (likely(!rsp->boost)) { - rcu_do_batch(rsp, rdp); + rcu_do_batch(rsp, rdp); +} + +static void rcu_wake_cond(struct task_struct *t, int status) +{ + /* + * If the thread is yielding, only wake it when this + * is invoked from idle + */ + if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current))) + wake_up_process(t); +} + +/* + * Wake up this CPU's rcuc kthread to do RCU core processing. + */ +static void invoke_rcu_core(void) +{ + unsigned long flags; + struct task_struct *t; + + if (!cpu_online(smp_processor_id())) return; + local_irq_save(flags); + __this_cpu_write(rcu_cpu_has_work, 1); + t = __this_cpu_read(rcu_cpu_kthread_task); + if (t != NULL && current != t) + rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status)); + local_irq_restore(flags); +} + +static void rcu_cpu_kthread_park(unsigned int cpu) +{ + per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; +} + +static int rcu_cpu_kthread_should_run(unsigned int cpu) +{ + return __this_cpu_read(rcu_cpu_has_work); +} + +/* + * Per-CPU kernel thread that invokes RCU callbacks. This replaces the + * RCU softirq used in flavors and configurations of RCU that do not + * support RCU priority boosting. + */ +static void rcu_cpu_kthread(unsigned int cpu) +{ + unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status); + char work, *workp = &__get_cpu_var(rcu_cpu_has_work); + int spincnt; + + for (spincnt = 0; spincnt < 10; spincnt++) { + trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); + local_bh_disable(); + *statusp = RCU_KTHREAD_RUNNING; + this_cpu_inc(rcu_cpu_kthread_loops); + local_irq_disable(); + work = *workp; + *workp = 0; + local_irq_enable(); + if (work) + rcu_process_callbacks(); + local_bh_enable(); + if (*workp == 0) { + trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); + *statusp = RCU_KTHREAD_WAITING; + return; + } } - invoke_rcu_callbacks_kthread(); + *statusp = RCU_KTHREAD_YIELDING; + trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); + schedule_timeout_interruptible(2); + trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); + *statusp = RCU_KTHREAD_WAITING; } -static void invoke_rcu_core(void) +static struct smp_hotplug_thread rcu_cpu_thread_spec = { + .store = &rcu_cpu_kthread_task, + .thread_should_run = rcu_cpu_kthread_should_run, + .thread_fn = rcu_cpu_kthread, + .thread_comm = "rcuc/%u", + .setup = rcu_cpu_kthread_setup, + .park = rcu_cpu_kthread_park, +}; + +/* + * Spawn per-CPU RCU core processing kthreads. + */ +static int __init rcu_spawn_core_kthreads(void) { - if (cpu_online(smp_processor_id())) - raise_softirq(RCU_SOFTIRQ); + int cpu; + + for_each_possible_cpu(cpu) + per_cpu(rcu_cpu_has_work, cpu) = 0; + BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); + return 0; } +early_initcall(rcu_spawn_core_kthreads); /* * Handle any core-RCU processing required by a call_rcu() invocation. @@ -3324,7 +3410,6 @@ void __init rcu_init(void) rcu_init_one(&rcu_sched_state, &rcu_sched_data); rcu_init_one(&rcu_bh_state, &rcu_bh_data); __rcu_init_preempt(); - open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); /* * We don't need protection against CPU-hotplug here because diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 7d71c06..68ed6a8 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h @@ -527,10 +527,9 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, static void __init __rcu_init_preempt(void); static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); -static void invoke_rcu_callbacks_kthread(void); static bool rcu_is_callbacks_kthread(void); +static void rcu_cpu_kthread_setup(unsigned int cpu); #ifdef CONFIG_RCU_BOOST -static void rcu_preempt_do_callbacks(void); static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, struct rcu_node *rnp); #endif /* #ifdef CONFIG_RCU_BOOST */ diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 63e0520..2545f031 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -24,12 +24,6 @@ * Paul E. McKenney */ -#include -#include -#include -#include -#include "time/tick-internal.h" - #define RCU_KTHREAD_PRIO 1 #ifdef CONFIG_RCU_BOOST @@ -656,15 +650,6 @@ static void rcu_preempt_check_callbacks(int cpu) t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; } -#ifdef CONFIG_RCU_BOOST - -static void rcu_preempt_do_callbacks(void) -{ - rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data)); -} - -#endif /* #ifdef CONFIG_RCU_BOOST */ - /* * Queue a preemptible-RCU callback for invocation after a grace period. */ @@ -1126,6 +1111,19 @@ void exit_rcu(void) #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ +/* + * If boosting, set rcuc kthreads to realtime priority. + */ +static void rcu_cpu_kthread_setup(unsigned int cpu) +{ +#ifdef CONFIG_RCU_BOOST + struct sched_param sp; + + sp.sched_priority = RCU_KTHREAD_PRIO; + sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); +#endif /* #ifdef CONFIG_RCU_BOOST */ +} + #ifdef CONFIG_RCU_BOOST #include "rtmutex_common.h" @@ -1157,16 +1155,6 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp) #endif /* #else #ifdef CONFIG_RCU_TRACE */ -static void rcu_wake_cond(struct task_struct *t, int status) -{ - /* - * If the thread is yielding, only wake it when this - * is invoked from idle - */ - if (status != RCU_KTHREAD_YIELDING || is_idle_task(current)) - wake_up_process(t); -} - /* * Carry out RCU priority boosting on the task indicated by ->exp_tasks * or ->boost_tasks, advancing the pointer to the next task in the @@ -1310,23 +1298,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) } /* - * Wake up the per-CPU kthread to invoke RCU callbacks. - */ -static void invoke_rcu_callbacks_kthread(void) -{ - unsigned long flags; - - local_irq_save(flags); - __this_cpu_write(rcu_cpu_has_work, 1); - if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && - current != __this_cpu_read(rcu_cpu_kthread_task)) { - rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task), - __this_cpu_read(rcu_cpu_kthread_status)); - } - local_irq_restore(flags); -} - -/* * Is the current CPU running the RCU-callbacks kthread? * Caller must have preemption disabled. */ @@ -1380,67 +1351,6 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, return 0; } -static void rcu_kthread_do_work(void) -{ - rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); - rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); - rcu_preempt_do_callbacks(); -} - -static void rcu_cpu_kthread_setup(unsigned int cpu) -{ - struct sched_param sp; - - sp.sched_priority = RCU_KTHREAD_PRIO; - sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); -} - -static void rcu_cpu_kthread_park(unsigned int cpu) -{ - per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; -} - -static int rcu_cpu_kthread_should_run(unsigned int cpu) -{ - return __get_cpu_var(rcu_cpu_has_work); -} - -/* - * Per-CPU kernel thread that invokes RCU callbacks. This replaces the - * RCU softirq used in flavors and configurations of RCU that do not - * support RCU priority boosting. - */ -static void rcu_cpu_kthread(unsigned int cpu) -{ - unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status); - char work, *workp = &__get_cpu_var(rcu_cpu_has_work); - int spincnt; - - for (spincnt = 0; spincnt < 10; spincnt++) { - trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); - local_bh_disable(); - *statusp = RCU_KTHREAD_RUNNING; - this_cpu_inc(rcu_cpu_kthread_loops); - local_irq_disable(); - work = *workp; - *workp = 0; - local_irq_enable(); - if (work) - rcu_kthread_do_work(); - local_bh_enable(); - if (*workp == 0) { - trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); - *statusp = RCU_KTHREAD_WAITING; - return; - } - } - *statusp = RCU_KTHREAD_YIELDING; - trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); - schedule_timeout_interruptible(2); - trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); - *statusp = RCU_KTHREAD_WAITING; -} - /* * Set the per-rcu_node kthread's affinity to cover all CPUs that are * served by the rcu_node in question. The CPU hotplug lock is still @@ -1474,27 +1384,14 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) free_cpumask_var(cm); } -static struct smp_hotplug_thread rcu_cpu_thread_spec = { - .store = &rcu_cpu_kthread_task, - .thread_should_run = rcu_cpu_kthread_should_run, - .thread_fn = rcu_cpu_kthread, - .thread_comm = "rcuc/%u", - .setup = rcu_cpu_kthread_setup, - .park = rcu_cpu_kthread_park, -}; - /* * Spawn all kthreads -- called as soon as the scheduler is running. */ static int __init rcu_spawn_kthreads(void) { struct rcu_node *rnp; - int cpu; rcu_scheduler_fully_active = 1; - for_each_possible_cpu(cpu) - per_cpu(rcu_cpu_has_work, cpu) = 0; - BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); rnp = rcu_get_root(rcu_state); (void)rcu_spawn_one_boost_kthread(rcu_state, rnp); if (NUM_RCU_NODES > 1) { @@ -1522,11 +1419,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) raw_spin_unlock_irqrestore(&rnp->lock, flags); } -static void invoke_rcu_callbacks_kthread(void) -{ - WARN_ON_ONCE(1); -} - static bool rcu_is_callbacks_kthread(void) { return false; -- cgit v0.10.2 From 1f87e4573dea08f083d9d530d9aada7f1e43dcda Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 21 Mar 2014 20:19:05 +0100 Subject: rcu: make RCU_BOOST default on RT Since it is no longer invoked from the softirq people run into OOM more often if the priority of the RCU thread is too low. Making boosting default on RT should help in those case and it can be switched off if someone knows better. Cc: stable-rt@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior diff --git a/init/Kconfig b/init/Kconfig index 4dea595..f6aeea6 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -642,7 +642,7 @@ config TREE_RCU_TRACE config RCU_BOOST bool "Enable RCU priority boosting" depends on RT_MUTEXES && PREEMPT_RCU - default n + default y if PREEMPT_RT_FULL help This option boosts the priority of preempted RCU readers that block the current preemptible RCU grace period for too long. -- cgit v0.10.2 From 3c281858d6c784a13901215f900d94e1f917349a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 26 Oct 2012 18:50:54 +0100 Subject: sched: Add support for lazy preemption It has become an obsession to mitigate the determinism vs. throughput loss of RT. Looking at the mainline semantics of preemption points gives a hint why RT sucks throughput wise for ordinary SCHED_OTHER tasks. One major issue is the wakeup of tasks which are right away preempting the waking task while the waking task holds a lock on which the woken task will block right after having preempted the wakee. In mainline this is prevented due to the implicit preemption disable of spin/rw_lock held regions. On RT this is not possible due to the fully preemptible nature of sleeping spinlocks. Though for a SCHED_OTHER task preempting another SCHED_OTHER task this is really not a correctness issue. RT folks are concerned about SCHED_FIFO/RR tasks preemption and not about the purely fairness driven SCHED_OTHER preemption latencies. So I introduced a lazy preemption mechanism which only applies to SCHED_OTHER tasks preempting another SCHED_OTHER task. Aside of the existing preempt_count each tasks sports now a preempt_lazy_count which is manipulated on lock acquiry and release. This is slightly incorrect as for lazyness reasons I coupled this on migrate_disable/enable so some other mechanisms get the same treatment (e.g. get_cpu_light). Now on the scheduler side instead of setting NEED_RESCHED this sets NEED_RESCHED_LAZY in case of a SCHED_OTHER/SCHED_OTHER preemption and therefor allows to exit the waking task the lock held region before the woken task preempts. That also works better for cross CPU wakeups as the other side can stay in the adaptive spinning loop. For RT class preemption there is no change. This simply sets NEED_RESCHED and forgoes the lazy preemption counter. Initial test do not expose any observable latency increasement, but history shows that I've been proven wrong before :) The lazy preemption mode is per default on, but with CONFIG_SCHED_DEBUG enabled it can be disabled via: # echo NO_PREEMPT_LAZY >/sys/kernel/debug/sched_features and reenabled via # echo PREEMPT_LAZY >/sys/kernel/debug/sched_features The test results so far are very machine and workload dependent, but there is a clear trend that it enhances the non RT workload performance. Signed-off-by: Thomas Gleixner diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index cb2b03c..90209e6 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -58,6 +58,7 @@ struct trace_entry { int pid; unsigned short migrate_disable; unsigned short padding; + unsigned char preempt_lazy_count; }; #define FTRACE_MAX_EVENT \ diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 7d08aab..c153cf2 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -23,15 +23,38 @@ #define preempt_count() (current_thread_info()->preempt_count) +#ifdef CONFIG_PREEMPT_LAZY +#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0) +#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0) +#define inc_preempt_lazy_count() add_preempt_lazy_count(1) +#define dec_preempt_lazy_count() sub_preempt_lazy_count(1) +#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count) +#else +#define add_preempt_lazy_count(val) do { } while (0) +#define sub_preempt_lazy_count(val) do { } while (0) +#define inc_preempt_lazy_count() do { } while (0) +#define dec_preempt_lazy_count() do { } while (0) +#define preempt_lazy_count() (0) +#endif + #ifdef CONFIG_PREEMPT asmlinkage void preempt_schedule(void); +# ifdef CONFIG_PREEMPT_LAZY #define preempt_check_resched() \ do { \ - if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ + if (unlikely(test_thread_flag(TIF_NEED_RESCHED) || \ + test_thread_flag(TIF_NEED_RESCHED_LAZY))) \ preempt_schedule(); \ } while (0) +# else +#define preempt_check_resched() \ +do { \ + if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ + preempt_schedule(); \ +} while (0) +# endif #ifdef CONFIG_CONTEXT_TRACKING @@ -64,6 +87,12 @@ do { \ barrier(); \ } while (0) +#define preempt_lazy_disable() \ +do { \ + inc_preempt_lazy_count(); \ + barrier(); \ +} while (0) + #define sched_preempt_enable_no_resched() \ do { \ barrier(); \ @@ -85,6 +114,13 @@ do { \ preempt_check_resched(); \ } while (0) +#define preempt_lazy_enable() \ +do { \ + dec_preempt_lazy_count(); \ + barrier(); \ + preempt_check_resched(); \ +} while (0) + /* For debugging and tracer internals only! */ #define add_preempt_count_notrace(val) \ do { preempt_count() += (val); } while (0) diff --git a/include/linux/sched.h b/include/linux/sched.h index 0035c94..625a41f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2451,6 +2451,52 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } +#ifdef CONFIG_PREEMPT_LAZY +static inline void set_tsk_need_resched_lazy(struct task_struct *tsk) +{ + set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); +} + +static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) +{ + clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); +} + +static inline int test_tsk_need_resched_lazy(struct task_struct *tsk) +{ + return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY)); +} + +static inline int need_resched_lazy(void) +{ + return test_thread_flag(TIF_NEED_RESCHED_LAZY); +} + +static inline int need_resched_now(void) +{ + return test_thread_flag(TIF_NEED_RESCHED); +} + +static inline int need_resched(void) +{ + return test_thread_flag(TIF_NEED_RESCHED) || + test_thread_flag(TIF_NEED_RESCHED_LAZY); +} +#else +static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { } +static inline int need_resched_lazy(void) { return 0; } + +static inline int need_resched_now(void) +{ + return test_thread_flag(TIF_NEED_RESCHED); +} + +static inline int need_resched(void) +{ + return test_thread_flag(TIF_NEED_RESCHED); +} +#endif + static inline int restart_syscall(void) { set_tsk_thread_flag(current, TIF_SIGPENDING); @@ -2482,11 +2528,6 @@ static inline int signal_pending_state(long state, struct task_struct *p) return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); } -static inline int need_resched(void) -{ - return unlikely(test_thread_flag(TIF_NEED_RESCHED)); -} - static inline bool __task_is_stopped_or_traced(struct task_struct *task) { if (task->state & (__TASK_STOPPED | __TASK_TRACED)) diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt index f8a2982..11dbe26 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -6,6 +6,12 @@ config PREEMPT_RT_BASE bool select PREEMPT +config HAVE_PREEMPT_LAZY + bool + +config PREEMPT_LAZY + def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL + choice prompt "Preemption Model" default PREEMPT_NONE diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 387a4c6..01aec90 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -540,6 +540,37 @@ void resched_task(struct task_struct *p) smp_send_reschedule(cpu); } +#ifdef CONFIG_PREEMPT_LAZY +void resched_task_lazy(struct task_struct *p) +{ + int cpu; + + if (!sched_feat(PREEMPT_LAZY)) { + resched_task(p); + return; + } + + assert_raw_spin_locked(&task_rq(p)->lock); + + if (test_tsk_need_resched(p)) + return; + + if (test_tsk_need_resched_lazy(p)) + return; + + set_tsk_need_resched_lazy(p); + + cpu = task_cpu(p); + if (cpu == smp_processor_id()) + return; + + /* NEED_RESCHED_LAZY must be visible before we test polling */ + smp_mb(); + if (!tsk_is_polling(p)) + smp_send_reschedule(cpu); +} +#endif + void resched_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); @@ -704,6 +735,17 @@ void resched_task(struct task_struct *p) assert_raw_spin_locked(&task_rq(p)->lock); set_tsk_need_resched(p); } +#ifdef CONFIG_PREEMPT_LAZY +void resched_task_lazy(struct task_struct *p) +{ + if (!sched_feat(PREEMPT_LAZY)) { + resched_task(p); + return; + } + assert_raw_spin_locked(&task_rq(p)->lock); + set_tsk_need_resched_lazy(p); +} +#endif #endif /* CONFIG_SMP */ #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ @@ -1736,6 +1778,9 @@ void sched_fork(struct task_struct *p) /* Want to start with kernel preemption disabled. */ task_thread_info(p)->preempt_count = 1; #endif +#ifdef CONFIG_HAVE_PREEMPT_LAZY + task_thread_info(p)->preempt_lazy_count = 0; +#endif #ifdef CONFIG_SMP plist_node_init(&p->pushable_tasks, MAX_PRIO); #endif @@ -2388,6 +2433,7 @@ void migrate_disable(void) } preempt_disable(); + preempt_lazy_disable(); pin_current_cpu(); p->migrate_disable = 1; preempt_enable(); @@ -2442,6 +2488,7 @@ void migrate_enable(void) unpin_current_cpu(); preempt_enable(); + preempt_lazy_enable(); } EXPORT_SYMBOL(migrate_enable); #else @@ -2569,6 +2616,7 @@ need_resched: put_prev_task(rq, prev); next = pick_next_task(rq); clear_tsk_need_resched(prev); + clear_tsk_need_resched_lazy(prev); rq->skip_clock_update = 0; if (likely(prev != next)) { @@ -2673,6 +2721,14 @@ asmlinkage void __sched notrace preempt_schedule(void) if (likely(!preemptible())) return; +#ifdef CONFIG_PREEMPT_LAZY + /* + * Check for lazy preemption + */ + if (current_thread_info()->preempt_lazy_count && + !test_thread_flag(TIF_NEED_RESCHED)) + return; +#endif do { add_preempt_count_notrace(PREEMPT_ACTIVE); /* @@ -4417,7 +4473,9 @@ void init_idle(struct task_struct *idle, int cpu) /* Set the preempt count _outside_ the spinlocks! */ task_thread_info(idle)->preempt_count = 0; - +#ifdef CONFIG_HAVE_PREEMPT_LAZY + task_thread_info(idle)->preempt_lazy_count = 0; +#endif /* * The idle tasks have their own, simple scheduling class: */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 790e2fc..0af1448 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1902,7 +1902,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; if (delta_exec > ideal_runtime) { - resched_task(rq_of(cfs_rq)->curr); + resched_task_lazy(rq_of(cfs_rq)->curr); /* * The current task ran long enough, ensure it doesn't get * re-elected due to buddy favours. @@ -1926,7 +1926,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) return; if (delta > ideal_runtime) - resched_task(rq_of(cfs_rq)->curr); + resched_task_lazy(rq_of(cfs_rq)->curr); } static void @@ -2047,7 +2047,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) * validating it and just reschedule. */ if (queued) { - resched_task(rq_of(cfs_rq)->curr); + resched_task_lazy(rq_of(cfs_rq)->curr); return; } /* @@ -2237,7 +2237,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, * hierarchy can be throttled */ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) - resched_task(rq_of(cfs_rq)->curr); + resched_task_lazy(rq_of(cfs_rq)->curr); } static __always_inline @@ -2837,7 +2837,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) if (delta < 0) { if (rq->curr == p) - resched_task(p); + resched_task_lazy(p); return; } @@ -3704,7 +3704,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ return; preempt: - resched_task(curr); + resched_task_lazy(curr); /* * Only set the backward buddy when the current task is still * on the rq. This can happen when a wakeup gets interleaved @@ -5979,7 +5979,7 @@ static void task_fork_fair(struct task_struct *p) * 'current' within the tree based on its new key value. */ swap(curr->vruntime, se->vruntime); - resched_task(rq->curr); + resched_task_lazy(rq->curr); } se->vruntime -= cfs_rq->min_vruntime; @@ -6004,7 +6004,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) */ if (rq->curr == p) { if (p->prio > oldprio) - resched_task(rq->curr); + resched_task_lazy(rq->curr); } else check_preempt_curr(rq, p, 0); } diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 938274c..4594051 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -58,6 +58,9 @@ SCHED_FEAT(NONTASK_POWER, true) SCHED_FEAT(TTWU_QUEUE, true) #else SCHED_FEAT(TTWU_QUEUE, false) +# ifdef CONFIG_PREEMPT_LAZY +SCHED_FEAT(PREEMPT_LAZY, true) +# endif #endif SCHED_FEAT(FORCE_SD_OVERLAP, false) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index ca61374..2843303 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1046,6 +1046,15 @@ extern void init_sched_fair_class(void); extern void resched_task(struct task_struct *p); extern void resched_cpu(int cpu); +#ifdef CONFIG_PREEMPT_LAZY +extern void resched_task_lazy(struct task_struct *tsk); +#else +static inline void resched_task_lazy(struct task_struct *tsk) +{ + resched_task(tsk); +} +#endif + extern struct rt_bandwidth def_rt_bandwidth; extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 402fcc6..f9401ed 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1509,6 +1509,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, struct task_struct *tsk = current; entry->preempt_count = pc & 0xff; + entry->preempt_lazy_count = preempt_lazy_count(); entry->pid = (tsk) ? tsk->pid : 0; entry->flags = #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT @@ -1518,7 +1519,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, #endif ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | - (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); + (need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) | + (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0); entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; } @@ -2410,15 +2412,17 @@ get_total_entries(struct trace_buffer *buf, static void print_lat_help_header(struct seq_file *m) { - seq_puts(m, "# _------=> CPU# \n"); - seq_puts(m, "# / _-----=> irqs-off \n"); - seq_puts(m, "# | / _----=> need-resched \n"); - seq_puts(m, "# || / _---=> hardirq/softirq \n"); - seq_puts(m, "# ||| / _--=> preempt-depth \n"); - seq_puts(m, "# |||| / _--=> migrate-disable\n"); - seq_puts(m, "# ||||| / delay \n"); - seq_puts(m, "# cmd pid |||||| time | caller \n"); - seq_puts(m, "# \\ / ||||| \\ | / \n"); + seq_puts(m, "# _--------=> CPU# \n"); + seq_puts(m, "# / _-------=> irqs-off \n"); + seq_puts(m, "# | / _------=> need-resched \n"); + seq_puts(m, "# || / _-----=> need-resched_lazy \n"); + seq_puts(m, "# ||| / _----=> hardirq/softirq \n"); + seq_puts(m, "# |||| / _---=> preempt-depth \n"); + seq_puts(m, "# ||||| / _--=> preempt-lazy-depth\n"); + seq_puts(m, "# |||||| / _-=> migrate-disable \n"); + seq_puts(m, "# ||||||| / delay \n"); + seq_puts(m, "# cmd pid |||||||| time | caller \n"); + seq_puts(m, "# \\ / |||||||| \\ | / \n"); } static void print_event_info(struct trace_buffer *buf, struct seq_file *m) @@ -2442,13 +2446,16 @@ static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m) static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m) { print_event_info(buf, m); - seq_puts(m, "# _-----=> irqs-off\n"); - seq_puts(m, "# / _----=> need-resched\n"); - seq_puts(m, "# | / _---=> hardirq/softirq\n"); - seq_puts(m, "# || / _--=> preempt-depth\n"); - seq_puts(m, "# ||| / delay\n"); - seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"); - seq_puts(m, "# | | | |||| | |\n"); + seq_puts(m, "# _-------=> irqs-off \n"); + seq_puts(m, "# / _------=> need-resched \n"); + seq_puts(m, "# |/ _-----=> need-resched_lazy \n"); + seq_puts(m, "# ||/ _----=> hardirq/softirq \n"); + seq_puts(m, "# |||/ _---=> preempt-depth \n"); + seq_puts(m, "# ||||/ _--=> preempt-lazy-depth\n"); + seq_puts(m, "# ||||| / _-=> migrate-disable \n"); + seq_puts(m, "# |||||| / delay\n"); + seq_puts(m, "# TASK-PID CPU# |||||| TIMESTAMP FUNCTION\n"); + seq_puts(m, "# | | | |||||| | |\n"); } void diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 10c86fb..109291a 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -117,6 +117,7 @@ struct kretprobe_trace_entry_head { * NEED_RESCHED - reschedule is requested * HARDIRQ - inside an interrupt handler * SOFTIRQ - inside a softirq handler + * NEED_RESCHED_LAZY - lazy reschedule is requested */ enum trace_flag_type { TRACE_FLAG_IRQS_OFF = 0x01, @@ -124,6 +125,7 @@ enum trace_flag_type { TRACE_FLAG_NEED_RESCHED = 0x04, TRACE_FLAG_HARDIRQ = 0x08, TRACE_FLAG_SOFTIRQ = 0x10, + TRACE_FLAG_NEED_RESCHED_LAZY = 0x20, }; #define TRACE_BUF_SIZE 1024 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index b54b3c8..46b6467 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -606,6 +606,7 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) { char hardsoft_irq; char need_resched; + char need_resched_lazy; char irqs_off; int hardirq; int softirq; @@ -620,14 +621,17 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) '.'; need_resched = (entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'; + need_resched_lazy = + (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.'; hardsoft_irq = (hardirq && softirq) ? 'H' : hardirq ? 'h' : softirq ? 's' : '.'; - if (!trace_seq_printf(s, "%c%c%c", - irqs_off, need_resched, hardsoft_irq)) + if (!trace_seq_printf(s, "%c%c%c%c", + irqs_off, need_resched, need_resched_lazy, + hardsoft_irq)) return 0; if (entry->preempt_count) @@ -635,6 +639,11 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) else ret = trace_seq_putc(s, '.'); + if (entry->preempt_lazy_count) + ret = trace_seq_printf(s, "%x", entry->preempt_lazy_count); + else + ret = trace_seq_putc(s, '.'); + if (entry->migrate_disable) ret = trace_seq_printf(s, "%x", entry->migrate_disable); else -- cgit v0.10.2 From 607abbfb4d7b1052e7d35fb9aa7fd24ab79182ec Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 1 Nov 2012 11:03:47 +0100 Subject: x86-preempt-lazy.patch Signed-off-by: Thomas Gleixner diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 083ee96..edbb857 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -21,6 +21,7 @@ config X86_64 ### Arch settings config X86 def_bool y + select HAVE_PREEMPT_LAZY select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS select HAVE_AOUT if X86_32 select HAVE_UNSTABLE_SCHED_CLOCK diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 2781119..f08e527 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -30,6 +30,8 @@ struct thread_info { __u32 cpu; /* current CPU */ int preempt_count; /* 0 => preemptable, <0 => BUG */ + int preempt_lazy_count; /* 0 => lazy preemptable, + <0 => BUG */ mm_segment_t addr_limit; struct restart_block restart_block; void __user *sysenter_return; @@ -81,6 +83,7 @@ struct thread_info { #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SECCOMP 8 /* secure computing */ +#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */ #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ #define TIF_UPROBE 12 /* breakpointed or singlestepping */ @@ -105,6 +108,7 @@ struct thread_info { #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) #define _TIF_UPROBE (1 << TIF_UPROBE) @@ -154,6 +158,8 @@ struct thread_info { #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) + #define PREEMPT_ACTIVE 0x10000000 #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 2861082..a36d9cf 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -33,6 +33,7 @@ void common(void) { OFFSET(TI_status, thread_info, status); OFFSET(TI_addr_limit, thread_info, addr_limit); OFFSET(TI_preempt_count, thread_info, preempt_count); + OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count); BLANK(); OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 15a569a..e491bfd 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -364,14 +364,22 @@ ENTRY(resume_kernel) DISABLE_INTERRUPTS(CLBR_ANY) cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? jnz restore_all -need_resched: movl TI_flags(%ebp), %ecx # need_resched set ? testb $_TIF_NEED_RESCHED, %cl + jnz 1f + + cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ? + jnz restore_all + testl $_TIF_NEED_RESCHED_LAZY, %ecx jz restore_all - testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ? + +1: testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ? jz restore_all call preempt_schedule_irq - jmp need_resched + movl TI_flags(%ebp), %ecx # need_resched set ? + testl $_TIF_NEED_RESCHED_MASK, %ecx + jnz 1b + jmp restore_all END(resume_kernel) #endif CFI_ENDPROC @@ -607,7 +615,7 @@ ENDPROC(system_call) ALIGN RING0_PTREGS_FRAME # can't unwind into user space anyway work_pending: - testb $_TIF_NEED_RESCHED, %cl + testl $_TIF_NEED_RESCHED_MASK, %ecx jz work_notifysig work_resched: call schedule @@ -620,7 +628,7 @@ work_resched: andl $_TIF_WORK_MASK, %ecx # is there any work to be done other # than syscall tracing? jz restore_all - testb $_TIF_NEED_RESCHED, %cl + testl $_TIF_NEED_RESCHED_MASK, %ecx jnz work_resched work_notifysig: # deal with pending signals and diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 0db3eeb..abca4f4 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -658,8 +658,8 @@ sysret_check: /* Handle reschedules */ /* edx: work, edi: workmask */ sysret_careful: - bt $TIF_NEED_RESCHED,%edx - jnc sysret_signal + testl $_TIF_NEED_RESCHED_MASK,%edx + jz sysret_signal TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) pushq_cfi %rdi @@ -771,8 +771,8 @@ GLOBAL(int_with_check) /* First do a reschedule test. */ /* edx: work, edi: workmask */ int_careful: - bt $TIF_NEED_RESCHED,%edx - jnc int_very_careful + testl $_TIF_NEED_RESCHED_MASK,%edx + jz int_very_careful TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) pushq_cfi %rdi @@ -1071,8 +1071,8 @@ bad_iret: /* edi: workmask, edx: work */ retint_careful: CFI_RESTORE_STATE - bt $TIF_NEED_RESCHED,%edx - jnc retint_signal + testl $_TIF_NEED_RESCHED_MASK,%edx + jz retint_signal TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) pushq_cfi %rdi @@ -1105,9 +1105,15 @@ retint_signal: ENTRY(retint_kernel) cmpl $0,TI_preempt_count(%rcx) jnz retint_restore_args - bt $TIF_NEED_RESCHED,TI_flags(%rcx) + bt $TIF_NEED_RESCHED,TI_flags(%rcx) + jc 1f + + cmpl $0,TI_preempt_lazy_count(%rcx) + jnz retint_restore_args + bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx) jnc retint_restore_args - bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ + +1: bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ jnc retint_restore_args call preempt_schedule_irq jmp exit_intr @@ -1531,7 +1537,7 @@ paranoid_userspace: movq %rsp,%rdi /* &pt_regs */ call sync_regs movq %rax,%rsp /* switch stack for scheduling */ - testl $_TIF_NEED_RESCHED,%ebx + testl $_TIF_NEED_RESCHED_MASK,%ebx jnz paranoid_schedule movl %ebx,%edx /* arg3: thread flags */ TRACE_IRQS_ON -- cgit v0.10.2 From 256180b4c4d6a638dee6275efc46d00922895bee Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 31 Oct 2012 12:04:11 +0100 Subject: arm-preempt-lazy-support.patch Signed-off-by: Thomas Gleixner diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 1ad6fb6..2ec9220 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -51,6 +51,7 @@ config ARM select HAVE_MOD_ARCH_SPECIFIC if ARM_UNWIND select HAVE_OPROFILE if (HAVE_PERF_EVENTS) select HAVE_PERF_EVENTS + select HAVE_PREEMPT_LAZY select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_SYSCALL_TRACEPOINTS select HAVE_UID16 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index df5e13d..33cb511 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -60,6 +60,7 @@ struct arm_restart_block { struct thread_info { unsigned long flags; /* low level flags */ int preempt_count; /* 0 => preemptable, <0 => bug */ + int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ mm_segment_t addr_limit; /* address limit */ struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ @@ -159,6 +160,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define TIF_SIGPENDING 0 #define TIF_NEED_RESCHED 1 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ +#define TIF_NEED_RESCHED_LAZY 3 #define TIF_SYSCALL_TRACE 8 #define TIF_SYSCALL_AUDIT 9 #define TIF_SYSCALL_TRACEPOINT 10 @@ -171,6 +173,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index ded0417..12e46dd 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -54,6 +54,7 @@ int main(void) BLANK(); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); + DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index ec3e5cf..8c5e809 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -205,11 +205,18 @@ __irq_svc: #ifdef CONFIG_PREEMPT get_thread_info tsk ldr r8, [tsk, #TI_PREEMPT] @ get preempt count - ldr r0, [tsk, #TI_FLAGS] @ get flags teq r8, #0 @ if preempt count != 0 + bne 1f @ return from exeption + ldr r0, [tsk, #TI_FLAGS] @ get flags + tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set + blne svc_preempt @ preempt! + + ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count + teq r8, #0 @ if preempt lazy count != 0 movne r0, #0 @ force flags to 0 - tst r0, #_TIF_NEED_RESCHED + tst r0, #_TIF_NEED_RESCHED_LAZY blne svc_preempt +1: #endif svc_exit r5, irq = 1 @ return from exception @@ -224,6 +231,8 @@ svc_preempt: 1: bl preempt_schedule_irq @ irq en/disable is done inside ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS tst r0, #_TIF_NEED_RESCHED + bne 1b + tst r0, #_TIF_NEED_RESCHED_LAZY moveq pc, r8 @ go again b 1b #endif diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index ab33042..d1b0bfd 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -589,7 +589,8 @@ asmlinkage int do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) { do { - if (likely(thread_flags & _TIF_NEED_RESCHED)) { + if (likely(thread_flags & (_TIF_NEED_RESCHED | + _TIF_NEED_RESCHED_LAZY))) { schedule(); } else { if (unlikely(!user_mode(regs))) -- cgit v0.10.2 From 8bc7383013cbb9be3e39d3cc9660d171ef6abe87 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 1 Nov 2012 10:14:11 +0100 Subject: powerpc-preempt-lazy-support.patch Signed-off-by: Thomas Gleixner diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index a8c537f..9ca41f7 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -132,6 +132,7 @@ config PPC select GENERIC_CLOCKEVENTS select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER + select HAVE_PREEMPT_LAZY select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_RELA select CLONE_BACKWARDS diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index ba7b197..f50711f 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -43,6 +43,8 @@ struct thread_info { int cpu; /* cpu we're on */ int preempt_count; /* 0 => preemptable, <0 => BUG */ + int preempt_lazy_count; /* 0 => preemptable, + <0 => BUG */ struct restart_block restart_block; unsigned long local_flags; /* private flags for thread */ @@ -90,8 +92,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ -#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling - TIF_NEED_RESCHED */ +#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling necessary */ #define TIF_32BIT 4 /* 32 bit binary */ #define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */ #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */ @@ -107,6 +108,8 @@ static inline struct thread_info *current_thread_info(void) #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation for stack store? */ #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ +#define TIF_POLLING_NRFLAG 18 /* true if poll_idle() is polling + TIF_NEED_RESCHED */ /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1< Date: Thu, 23 Jan 2014 14:45:59 +0100 Subject: leds: trigger: disable CPU trigger on -RT as it triggers: |CPU: 0 PID: 0 Comm: swapper Not tainted 3.12.8-rt10 #141 |[] (unwind_backtrace+0x0/0xf8) from [] (show_stack+0x1c/0x20) |[] (show_stack+0x1c/0x20) from [] (dump_stack+0x20/0x2c) |[] (dump_stack+0x20/0x2c) from [] (__might_sleep+0x13c/0x170) |[] (__might_sleep+0x13c/0x170) from [] (__rt_spin_lock+0x28/0x38) |[] (__rt_spin_lock+0x28/0x38) from [] (rt_read_lock+0x68/0x7c) |[] (rt_read_lock+0x68/0x7c) from [] (led_trigger_event+0x2c/0x5c) |[] (led_trigger_event+0x2c/0x5c) from [] (ledtrig_cpu+0x54/0x5c) |[] (ledtrig_cpu+0x54/0x5c) from [] (arch_cpu_idle_exit+0x18/0x1c) |[] (arch_cpu_idle_exit+0x18/0x1c) from [] (cpu_startup_entry+0xa8/0x234) |[] (cpu_startup_entry+0xa8/0x234) from [] (rest_init+0xb8/0xe0) |[] (rest_init+0xb8/0xe0) from [] (start_kernel+0x2c4/0x380) Cc: stable-rt@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig index 49794b4..3d7245d 100644 --- a/drivers/leds/trigger/Kconfig +++ b/drivers/leds/trigger/Kconfig @@ -61,7 +61,7 @@ config LEDS_TRIGGER_BACKLIGHT config LEDS_TRIGGER_CPU bool "LED CPU Trigger" - depends on LEDS_TRIGGERS + depends on LEDS_TRIGGERS && !PREEMPT_RT_BASE help This allows LEDs to be controlled by active CPUs. This shows the active CPUs across an array of LEDs so you can see which -- cgit v0.10.2 From a6d0d44b2a2a6229d097637663c61af647ac15a0 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 21 Mar 2013 11:35:49 +0100 Subject: i2c/omap: drop the lock hard irq context The lock is taken while reading two registers. On RT the first lock is taken in hard irq where it might sleep and in the threaded irq. The threaded irq runs in oneshot mode so the hard irq does not run until the thread the completes so there is no reason to grab the lock. Signed-off-by: Sebastian Andrzej Siewior diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 9967a6f..2f66478 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -879,15 +879,12 @@ omap_i2c_isr(int irq, void *dev_id) u16 mask; u16 stat; - spin_lock(&dev->lock); - mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG); stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG); + mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG); if (stat & mask) ret = IRQ_WAKE_THREAD; - spin_unlock(&dev->lock); - return ret; } -- cgit v0.10.2 From 994d8525ac0c2bab7de232e8aaf01517db3f500f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 9 Jan 2013 12:11:12 +0100 Subject: mmci: Remove bogus local_irq_save() On !RT interrupt runs with interrupts disabled. On RT it's in a thread, so no need to disable interrupts at all. Signed-off-by: Thomas Gleixner diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index c3785ed..a0f63c9 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1023,15 +1023,12 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) struct sg_mapping_iter *sg_miter = &host->sg_miter; struct variant_data *variant = host->variant; void __iomem *base = host->base; - unsigned long flags; u32 status; status = readl(base + MMCISTATUS); dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); - local_irq_save(flags); - do { unsigned int remain, len; char *buffer; @@ -1071,8 +1068,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) sg_miter_stop(sg_miter); - local_irq_restore(flags); - /* * If we have less than the fifo 'half-full' threshold to transfer, * trigger a PIO interrupt as soon as any data is available. -- cgit v0.10.2 From 67fdd2bc55ab736723abaf7887d82ff9ecaac1d0 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 4 Oct 2013 11:42:34 +0200 Subject: net: iwlwifi: request only a threaded handler for interrupts On RT the trans_pcie->irq_lock lock is converted into a sleeping lock and can't be used in primary irq handler. The lock is used in mutliple places which means turning it into a raw lock could increase the latency of the system. For now both handlers are moved into the thread. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Clark Williams diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 7bdaf06..62aac3b 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -1394,6 +1394,20 @@ static const struct iwl_trans_ops trans_ops_pcie = { .set_bits_mask = iwl_trans_pcie_set_bits_mask, }; +#ifdef CONFIG_PREEMPT_RT_BASE +static irqreturn_t iwl_rt_irq_handler(int irq, void *dev_id) +{ + irqreturn_t ret; + + local_bh_disable(); + ret = iwl_pcie_isr_ict(irq, dev_id); + local_bh_enable(); + if (ret == IRQ_WAKE_THREAD) + ret = iwl_pcie_irq_handler(irq, dev_id); + return ret; +} +#endif + struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, const struct pci_device_id *ent, const struct iwl_cfg *cfg) @@ -1512,9 +1526,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, if (iwl_pcie_alloc_ict(trans)) goto out_free_cmd_pool; +#ifdef CONFIG_PREEMPT_RT_BASE + err = request_threaded_irq(pdev->irq, NULL, iwl_rt_irq_handler, + IRQF_SHARED | IRQF_ONESHOT, DRV_NAME, trans); +#else err = request_threaded_irq(pdev->irq, iwl_pcie_isr_ict, iwl_pcie_irq_handler, IRQF_SHARED, DRV_NAME, trans); +#endif if (err) { IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); goto out_free_ict; -- cgit v0.10.2 From cf7a739aaa7f0f97b263bb5def1b9075496239c2 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 11 Oct 2013 17:14:31 +0200 Subject: drm: remove preempt_disable() from drm_calc_vbltimestamp_from_scanoutpos() Luis captured the following: | BUG: sleeping function called from invalid context at kernel/rtmutex.c:659 | in_atomic(): 1, irqs_disabled(): 0, pid: 517, name: Xorg | 2 locks held by Xorg/517: | #0: | ( | &dev->vbl_lock | ){......} | , at: | [] drm_vblank_get+0x30/0x2b0 [drm] | #1: | ( | &dev->vblank_time_lock | ){......} | , at: | [] drm_vblank_get+0xb1/0x2b0 [drm] | Preemption disabled at: | [] i915_get_vblank_timestamp+0x45/0xa0 [i915] | CPU: 3 PID: 517 Comm: Xorg Not tainted 3.10.10-rt7+ #5 | Call Trace: | [] dump_stack+0x19/0x1b | [] __might_sleep+0xff/0x170 | [] rt_spin_lock+0x24/0x60 | [] i915_read32+0x27/0x170 [i915] | [] i915_pipe_enabled+0x31/0x40 [i915] | [] i915_get_crtc_scanoutpos+0x3e/0x1b0 [i915] | [] drm_calc_vbltimestamp_from_scanoutpos+0xf4/0x430 [drm] | [] i915_get_vblank_timestamp+0x45/0xa0 [i915] | [] drm_get_last_vbltimestamp+0x48/0x70 [drm] | [] drm_vblank_get+0x185/0x2b0 [drm] | [] drm_wait_vblank+0x83/0x5d0 [drm] | [] drm_ioctl+0x552/0x6a0 [drm] | [] do_vfs_ioctl+0x325/0x5b0 | [] SyS_ioctl+0x81/0xa0 | [] tracesys+0xdd/0xe2 After a longer thread it was decided to drop the preempt_disable()/ enable() invocations which were meant for -RT and Mario Kleiner looks for a replacement. Cc: stable-rt@vger.kernel.org Reported-By: Luis Claudio R. Goncalves Signed-off-by: Sebastian Andrzej Siewior diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index f92da0a..434ea84 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -628,11 +628,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, * code gets preempted or delayed for some reason. */ for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) { - /* Disable preemption to make it very likely to - * succeed in the first iteration even on PREEMPT_RT kernel. - */ - preempt_disable(); - /* Get system timestamp before query. */ stime = ktime_get(); @@ -644,8 +639,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, if (!drm_timestamp_monotonic) mono_time_offset = ktime_get_monotonic_offset(); - preempt_enable(); - /* Return as no-op if scanout query unsupported or failed. */ if (!(vbl_status & DRM_SCANOUTPOS_VALID)) { DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n", -- cgit v0.10.2 From 9ce2a885969c0be97269486639ec7859feaab224 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 9 Apr 2014 19:20:12 -0500 Subject: gpu/i915: don't open code these things The opencode part is gone in 1f83fee0 ("drm/i915: clear up wedged transitions") the owner check is still there. Signed-off-by: Sebastian Andrzej Siewior diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b00b32c..aaeac32 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4792,7 +4792,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) if (!mutex_is_locked(mutex)) return false; -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) +#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE) return mutex->owner == task; #else /* Since UP may be pre-empted, we cannot assume that we own the lock */ -- cgit v0.10.2 From a74cafc6b224aa0f18bb5f89355d557db1877c13 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 25 Apr 2013 18:12:52 +0200 Subject: drm/i915: drop trace_i915_gem_ring_dispatch on rt This tracepoint is responsible for: |[<814cc358>] __schedule_bug+0x4d/0x59 |[<814d24cc>] __schedule+0x88c/0x930 |[<814d3b90>] ? _raw_spin_unlock_irqrestore+0x40/0x50 |[<814d3b95>] ? _raw_spin_unlock_irqrestore+0x45/0x50 |[<810b57b5>] ? task_blocks_on_rt_mutex+0x1f5/0x250 |[<814d27d9>] schedule+0x29/0x70 |[<814d3423>] rt_spin_lock_slowlock+0x15b/0x278 |[<814d3786>] rt_spin_lock+0x26/0x30 |[] gen6_gt_force_wake_get+0x29/0x60 [i915] |[] gen6_ring_get_irq+0x5f/0x100 [i915] |[] ftrace_raw_event_i915_gem_ring_dispatch+0xe3/0x100 [i915] |[] i915_gem_do_execbuffer.isra.13+0xbd3/0x1430 [i915] |[<810f8943>] ? trace_buffer_unlock_commit+0x43/0x60 |[<8113e8d2>] ? ftrace_raw_event_kmem_alloc+0xd2/0x180 |[<8101d063>] ? native_sched_clock+0x13/0x80 |[] i915_gem_execbuffer2+0x99/0x280 [i915] |[] drm_ioctl+0x4c3/0x570 [drm] |[<8101d0d9>] ? sched_clock+0x9/0x10 |[] ? i915_gem_execbuffer+0x480/0x480 [i915] |[<810f1c18>] ? rb_commit+0x68/0xa0 |[<810f1c6c>] ? ring_buffer_unlock_commit+0x1c/0xa0 |[<81197467>] do_vfs_ioctl+0x97/0x540 |[<81021318>] ? ftrace_raw_event_sys_enter+0xd8/0x130 |[<811979a1>] sys_ioctl+0x91/0xb0 |[<814db931>] tracesys+0xe1/0xe6 Chris Wilson does not like to move i915_trace_irq_get() out of the macro |No. This enables the IRQ, as well as making a number of |very expensively serialised read, unconditionally. so it is gone now on RT. Cc: stable-rt@vger.kernel.org Reported-by: Joakim Hernberg Signed-off-by: Sebastian Andrzej Siewior diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index bf34577..979a6ea 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1129,7 +1129,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } +#ifndef CONFIG_PREEMPT_RT_BASE trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); +#endif i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring); i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); -- cgit v0.10.2 From cfafda348c23b0535a730e13391c3ee855633d1a Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 28 Oct 2013 12:19:57 +0100 Subject: wait.h: include atomic.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit | CC init/main.o |In file included from include/linux/mmzone.h:9:0, | from include/linux/gfp.h:4, | from include/linux/kmod.h:22, | from include/linux/module.h:13, | from init/main.c:15: |include/linux/wait.h: In function ‘wait_on_atomic_t’: |include/linux/wait.h:982:2: error: implicit declaration of function ‘atomic_read’ [-Werror=implicit-function-declaration] | if (atomic_read(val) == 0) | ^ Signed-off-by: Sebastian Andrzej Siewior diff --git a/include/linux/wait.h b/include/linux/wait.h index a67fc16..68f7245 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -7,6 +7,7 @@ #include #include #include +#include typedef struct __wait_queue wait_queue_t; typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); -- cgit v0.10.2 From f9a772b4f1393d5be7d4010889c5d4ae50216fa1 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 12 Dec 2011 12:29:04 +0100 Subject: wait-simple: Simple waitqueue implementation wait_queue is a swiss army knife and in most of the cases the complexity is not needed. For RT waitqueues are a constant source of trouble as we can't convert the head lock to a raw spinlock due to fancy and long lasting callbacks. Provide a slim version, which allows RT to replace wait queues. This should go mainline as well, as it lowers memory consumption and runtime overhead. Signed-off-by: Thomas Gleixner smp_mb() added by Steven Rostedt to fix a race condition with swait wakeups vs adding items to the list. diff --git a/include/linux/wait-simple.h b/include/linux/wait-simple.h new file mode 100644 index 0000000..a9f2469 --- /dev/null +++ b/include/linux/wait-simple.h @@ -0,0 +1,235 @@ +#ifndef _LINUX_WAIT_SIMPLE_H +#define _LINUX_WAIT_SIMPLE_H + +#include +#include + +#include + +struct swaiter { + struct task_struct *task; + struct list_head node; +}; + +#define DEFINE_SWAITER(name) \ + struct swaiter name = { \ + .task = current, \ + .node = LIST_HEAD_INIT((name).node), \ + } + +struct swait_head { + raw_spinlock_t lock; + struct list_head list; +}; + +#define DEFINE_SWAIT_HEAD(name) \ + struct swait_head name = { \ + .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ + .list = LIST_HEAD_INIT((name).list), \ + } + +extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key); + +#define init_swait_head(swh) \ + do { \ + static struct lock_class_key __key; \ + \ + __init_swait_head((swh), &__key); \ + } while (0) + +/* + * Waiter functions + */ +static inline bool swaiter_enqueued(struct swaiter *w) +{ + return w->task != NULL; +} + +extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state); +extern void swait_finish(struct swait_head *head, struct swaiter *w); + +/* + * Adds w to head->list. Must be called with head->lock locked. + */ +static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w) +{ + list_add(&w->node, &head->list); + /* We can't let the condition leak before the setting of head */ + smp_mb(); +} + +/* + * Removes w from head->list. Must be called with head->lock locked. + */ +static inline void __swait_dequeue(struct swaiter *w) +{ + list_del_init(&w->node); +} + +/* + * Check whether a head has waiters enqueued + */ +static inline bool swait_head_has_waiters(struct swait_head *h) +{ + /* Make sure the condition is visible before checking list_empty() */ + smp_mb(); + return !list_empty(&h->list); +} + +/* + * Wakeup functions + */ +extern int __swait_wake(struct swait_head *head, unsigned int state); + +static inline int swait_wake(struct swait_head *head) +{ + return swait_head_has_waiters(head) ? + __swait_wake(head, TASK_NORMAL) : 0; +} + +static inline int swait_wake_interruptible(struct swait_head *head) +{ + return swait_head_has_waiters(head) ? + __swait_wake(head, TASK_INTERRUPTIBLE) : 0; +} + +/* + * Event API + */ + +#define __swait_event(wq, condition) \ +do { \ + DEFINE_SWAITER(__wait); \ + \ + for (;;) { \ + swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ + if (condition) \ + break; \ + schedule(); \ + } \ + swait_finish(&wq, &__wait); \ +} while (0) + +/** + * swait_event - sleep until a condition gets true + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * + * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the + * @condition evaluates to true. The @condition is checked each time + * the waitqueue @wq is woken up. + * + * wake_up() has to be called after changing any variable that could + * change the result of the wait condition. + */ +#define swait_event(wq, condition) \ +do { \ + if (condition) \ + break; \ + __swait_event(wq, condition); \ +} while (0) + +#define __swait_event_interruptible(wq, condition, ret) \ +do { \ + DEFINE_SWAITER(__wait); \ + \ + for (;;) { \ + swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \ + if (condition) \ + break; \ + if (signal_pending(current)) { \ + ret = -ERESTARTSYS; \ + break; \ + } \ + schedule(); \ + } \ + swait_finish(&wq, &__wait); \ +} while (0) + +#define __swait_event_interruptible_timeout(wq, condition, ret) \ +do { \ + DEFINE_SWAITER(__wait); \ + \ + for (;;) { \ + swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \ + if (condition) \ + break; \ + if (signal_pending(current)) { \ + ret = -ERESTARTSYS; \ + break; \ + } \ + ret = schedule_timeout(ret); \ + if (!ret) \ + break; \ + } \ + swait_finish(&wq, &__wait); \ +} while (0) + +/** + * swait_event_interruptible - sleep until a condition gets true + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * + * The process is put to sleep (TASK_INTERRUPTIBLE) until the + * @condition evaluates to true. The @condition is checked each time + * the waitqueue @wq is woken up. + * + * wake_up() has to be called after changing any variable that could + * change the result of the wait condition. + */ +#define swait_event_interruptible(wq, condition) \ +({ \ + int __ret = 0; \ + if (!(condition)) \ + __swait_event_interruptible(wq, condition, __ret); \ + __ret; \ +}) + +#define swait_event_interruptible_timeout(wq, condition, timeout) \ +({ \ + int __ret = timeout; \ + if (!(condition)) \ + __swait_event_interruptible_timeout(wq, condition, __ret); \ + __ret; \ +}) + +#define __swait_event_timeout(wq, condition, ret) \ +do { \ + DEFINE_SWAITER(__wait); \ + \ + for (;;) { \ + swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ + if (condition) \ + break; \ + ret = schedule_timeout(ret); \ + if (!ret) \ + break; \ + } \ + swait_finish(&wq, &__wait); \ +} while (0) + +/** + * swait_event_timeout - sleep until a condition gets true or a timeout elapses + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * @timeout: timeout, in jiffies + * + * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the + * @condition evaluates to true. The @condition is checked each time + * the waitqueue @wq is woken up. + * + * wake_up() has to be called after changing any variable that could + * change the result of the wait condition. + * + * The function returns 0 if the @timeout elapsed, and the remaining + * jiffies if the condition evaluated to true before the timeout elapsed. + */ +#define swait_event_timeout(wq, condition, timeout) \ +({ \ + long __ret = timeout; \ + if (!(condition)) \ + __swait_event_timeout(wq, condition, __ret); \ + __ret; \ +}) + +#endif diff --git a/kernel/Makefile b/kernel/Makefile index 7fc87d5..b3ff0a8 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -10,7 +10,7 @@ obj-y = fork.o exec_domain.o panic.o \ kthread.o wait.o sys_ni.o posix-cpu-timers.o \ hrtimer.o nsproxy.o srcu.o semaphore.o \ notifier.o ksysfs.o cred.o reboot.o \ - async.o range.o groups.o lglock.o smpboot.o + async.o range.o groups.o lglock.o smpboot.o wait-simple.o ifdef CONFIG_FUNCTION_TRACER # Do not trace debug files and internal ftrace files diff --git a/kernel/wait-simple.c b/kernel/wait-simple.c new file mode 100644 index 0000000..040d714 --- /dev/null +++ b/kernel/wait-simple.c @@ -0,0 +1,68 @@ +/* + * Simple waitqueues without fancy flags and callbacks + * + * (C) 2011 Thomas Gleixner + * + * Based on kernel/wait.c + * + * For licencing details see kernel-base/COPYING + */ +#include +#include +#include +#include + +void __init_swait_head(struct swait_head *head, struct lock_class_key *key) +{ + raw_spin_lock_init(&head->lock); + lockdep_set_class(&head->lock, key); + INIT_LIST_HEAD(&head->list); +} +EXPORT_SYMBOL(__init_swait_head); + +void swait_prepare(struct swait_head *head, struct swaiter *w, int state) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&head->lock, flags); + w->task = current; + if (list_empty(&w->node)) + __swait_enqueue(head, w); + set_current_state(state); + raw_spin_unlock_irqrestore(&head->lock, flags); +} +EXPORT_SYMBOL(swait_prepare); + +void swait_finish(struct swait_head *head, struct swaiter *w) +{ + unsigned long flags; + + __set_current_state(TASK_RUNNING); + if (w->task) { + raw_spin_lock_irqsave(&head->lock, flags); + __swait_dequeue(w); + raw_spin_unlock_irqrestore(&head->lock, flags); + } +} +EXPORT_SYMBOL(swait_finish); + +int __swait_wake(struct swait_head *head, unsigned int state) +{ + struct swaiter *curr, *next; + unsigned long flags; + int woken = 0; + + raw_spin_lock_irqsave(&head->lock, flags); + + list_for_each_entry_safe(curr, next, &head->list, node) { + if (wake_up_state(curr->task, state)) { + __swait_dequeue(curr); + curr->task = NULL; + woken++; + } + } + + raw_spin_unlock_irqrestore(&head->lock, flags); + return woken; +} +EXPORT_SYMBOL(__swait_wake); -- cgit v0.10.2 From 775d07a4a438be108c547220ba0222c129aaf0bc Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 10 Jan 2013 11:47:35 +0100 Subject: wait-simple: Rework for use with completions Signed-off-by: Thomas Gleixner diff --git a/include/linux/wait-simple.h b/include/linux/wait-simple.h index a9f2469..4efba4d 100644 --- a/include/linux/wait-simple.h +++ b/include/linux/wait-simple.h @@ -22,12 +22,14 @@ struct swait_head { struct list_head list; }; -#define DEFINE_SWAIT_HEAD(name) \ - struct swait_head name = { \ +#define SWAIT_HEAD_INITIALIZER(name) { \ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ .list = LIST_HEAD_INIT((name).list), \ } +#define DEFINE_SWAIT_HEAD(name) \ + struct swait_head name = SWAIT_HEAD_INITIALIZER(name) + extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key); #define init_swait_head(swh) \ @@ -40,63 +42,25 @@ extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key); /* * Waiter functions */ -static inline bool swaiter_enqueued(struct swaiter *w) -{ - return w->task != NULL; -} - +extern void swait_prepare_locked(struct swait_head *head, struct swaiter *w); extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state); +extern void swait_finish_locked(struct swait_head *head, struct swaiter *w); extern void swait_finish(struct swait_head *head, struct swaiter *w); /* - * Adds w to head->list. Must be called with head->lock locked. - */ -static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w) -{ - list_add(&w->node, &head->list); - /* We can't let the condition leak before the setting of head */ - smp_mb(); -} - -/* - * Removes w from head->list. Must be called with head->lock locked. - */ -static inline void __swait_dequeue(struct swaiter *w) -{ - list_del_init(&w->node); -} - -/* - * Check whether a head has waiters enqueued - */ -static inline bool swait_head_has_waiters(struct swait_head *h) -{ - /* Make sure the condition is visible before checking list_empty() */ - smp_mb(); - return !list_empty(&h->list); -} - -/* * Wakeup functions */ -extern int __swait_wake(struct swait_head *head, unsigned int state); - -static inline int swait_wake(struct swait_head *head) -{ - return swait_head_has_waiters(head) ? - __swait_wake(head, TASK_NORMAL) : 0; -} +extern unsigned int __swait_wake(struct swait_head *head, unsigned int state, unsigned int num); +extern unsigned int __swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num); -static inline int swait_wake_interruptible(struct swait_head *head) -{ - return swait_head_has_waiters(head) ? - __swait_wake(head, TASK_INTERRUPTIBLE) : 0; -} +#define swait_wake(head) __swait_wake(head, TASK_NORMAL, 1) +#define swait_wake_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 1) +#define swait_wake_all(head) __swait_wake(head, TASK_NORMAL, 0) +#define swait_wake_all_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 0) /* * Event API */ - #define __swait_event(wq, condition) \ do { \ DEFINE_SWAITER(__wait); \ diff --git a/kernel/wait-simple.c b/kernel/wait-simple.c index 040d714..2c85626 100644 --- a/kernel/wait-simple.c +++ b/kernel/wait-simple.c @@ -12,6 +12,28 @@ #include #include +/* Adds w to head->list. Must be called with head->lock locked. */ +static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w) +{ + list_add(&w->node, &head->list); + /* We can't let the condition leak before the setting of head */ + smp_mb(); +} + +/* Removes w from head->list. Must be called with head->lock locked. */ +static inline void __swait_dequeue(struct swaiter *w) +{ + list_del_init(&w->node); +} + +/* Check whether a head has waiters enqueued */ +static inline bool swait_head_has_waiters(struct swait_head *h) +{ + /* Make sure the condition is visible before checking list_empty() */ + smp_mb(); + return !list_empty(&h->list); +} + void __init_swait_head(struct swait_head *head, struct lock_class_key *key) { raw_spin_lock_init(&head->lock); @@ -20,19 +42,31 @@ void __init_swait_head(struct swait_head *head, struct lock_class_key *key) } EXPORT_SYMBOL(__init_swait_head); +void swait_prepare_locked(struct swait_head *head, struct swaiter *w) +{ + w->task = current; + if (list_empty(&w->node)) + __swait_enqueue(head, w); +} + void swait_prepare(struct swait_head *head, struct swaiter *w, int state) { unsigned long flags; raw_spin_lock_irqsave(&head->lock, flags); - w->task = current; - if (list_empty(&w->node)) - __swait_enqueue(head, w); - set_current_state(state); + swait_prepare_locked(head, w); + __set_current_state(state); raw_spin_unlock_irqrestore(&head->lock, flags); } EXPORT_SYMBOL(swait_prepare); +void swait_finish_locked(struct swait_head *head, struct swaiter *w) +{ + __set_current_state(TASK_RUNNING); + if (w->task) + __swait_dequeue(w); +} + void swait_finish(struct swait_head *head, struct swaiter *w) { unsigned long flags; @@ -46,22 +80,43 @@ void swait_finish(struct swait_head *head, struct swaiter *w) } EXPORT_SYMBOL(swait_finish); -int __swait_wake(struct swait_head *head, unsigned int state) +unsigned int +__swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num) { struct swaiter *curr, *next; - unsigned long flags; int woken = 0; - raw_spin_lock_irqsave(&head->lock, flags); - list_for_each_entry_safe(curr, next, &head->list, node) { if (wake_up_state(curr->task, state)) { __swait_dequeue(curr); + /* + * The waiting task can free the waiter as + * soon as curr->task = NULL is written, + * without taking any locks. A memory barrier + * is required here to prevent the following + * store to curr->task from getting ahead of + * the dequeue operation. + */ + smp_wmb(); curr->task = NULL; - woken++; + if (++woken == num) + break; } } + return woken; +} + +unsigned int +__swait_wake(struct swait_head *head, unsigned int state, unsigned int num) +{ + unsigned long flags; + int woken; + if (!swait_head_has_waiters(head)) + return 0; + + raw_spin_lock_irqsave(&head->lock, flags); + woken = __swait_wake_locked(head, state, num); raw_spin_unlock_irqrestore(&head->lock, flags); return woken; } -- cgit v0.10.2 From 17bcc46e39ba8e8d060f15bfd24a1d07dc30e895 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Tue, 27 Aug 2013 14:20:26 -0400 Subject: simple-wait: rename and export the equivalent of waitqueue_active() The function "swait_head_has_waiters()" was internalized into wait-simple.c but it parallels the waitqueue_active of normal waitqueue support. Given that there are over 150 waitqueue_active users in drivers/ fs/ kernel/ and the like, lets make it globally visible, and rename it to parallel the waitqueue_active accordingly. We'll need to do this if we expect to expand its usage beyond RT. Signed-off-by: Paul Gortmaker Signed-off-by: Sebastian Andrzej Siewior diff --git a/include/linux/wait-simple.h b/include/linux/wait-simple.h index 4efba4d..f86bca2 100644 --- a/include/linux/wait-simple.h +++ b/include/linux/wait-simple.h @@ -47,6 +47,14 @@ extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state) extern void swait_finish_locked(struct swait_head *head, struct swaiter *w); extern void swait_finish(struct swait_head *head, struct swaiter *w); +/* Check whether a head has waiters enqueued */ +static inline bool swaitqueue_active(struct swait_head *h) +{ + /* Make sure the condition is visible before checking list_empty() */ + smp_mb(); + return !list_empty(&h->list); +} + /* * Wakeup functions */ diff --git a/kernel/wait-simple.c b/kernel/wait-simple.c index 2c85626..7dfa86d 100644 --- a/kernel/wait-simple.c +++ b/kernel/wait-simple.c @@ -26,14 +26,6 @@ static inline void __swait_dequeue(struct swaiter *w) list_del_init(&w->node); } -/* Check whether a head has waiters enqueued */ -static inline bool swait_head_has_waiters(struct swait_head *h) -{ - /* Make sure the condition is visible before checking list_empty() */ - smp_mb(); - return !list_empty(&h->list); -} - void __init_swait_head(struct swait_head *head, struct lock_class_key *key) { raw_spin_lock_init(&head->lock); @@ -112,7 +104,7 @@ __swait_wake(struct swait_head *head, unsigned int state, unsigned int num) unsigned long flags; int woken; - if (!swait_head_has_waiters(head)) + if (!swaitqueue_active(head)) return 0; raw_spin_lock_irqsave(&head->lock, flags); -- cgit v0.10.2 From e67c1fd6a2cea58184eff3d96209895b6440eeb3 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 8 Apr 2013 16:09:57 +0200 Subject: kernel/treercu: use a simple waitqueue Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 8b08d37..507fab1 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -1488,7 +1488,7 @@ static int __noreturn rcu_gp_kthread(void *arg) /* Handle grace-period start. */ for (;;) { - wait_event_interruptible(rsp->gp_wq, + swait_event_interruptible(rsp->gp_wq, rsp->gp_flags & RCU_GP_FLAG_INIT); if ((rsp->gp_flags & RCU_GP_FLAG_INIT) && @@ -1507,7 +1507,7 @@ static int __noreturn rcu_gp_kthread(void *arg) } for (;;) { rsp->jiffies_force_qs = jiffies + j; - ret = wait_event_interruptible_timeout(rsp->gp_wq, + ret = swait_event_interruptible_timeout(rsp->gp_wq, (rsp->gp_flags & RCU_GP_FLAG_FQS) || (!ACCESS_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)), @@ -1545,7 +1545,7 @@ static void rsp_wakeup(struct irq_work *work) struct rcu_state *rsp = container_of(work, struct rcu_state, wakeup_work); /* Wake up rcu_gp_kthread() to start the grace period. */ - wake_up(&rsp->gp_wq); + swait_wake(&rsp->gp_wq); } /* @@ -1619,7 +1619,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) { WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); - wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ + swait_wake(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ } /* @@ -2189,7 +2189,8 @@ static void force_quiescent_state(struct rcu_state *rsp) } rsp->gp_flags |= RCU_GP_FLAG_FQS; raw_spin_unlock_irqrestore(&rnp_old->lock, flags); - wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ + /* Memory barrier implied by wake_up() path. */ + swait_wake(&rsp->gp_wq); } /* @@ -3314,7 +3315,7 @@ static void __init rcu_init_one(struct rcu_state *rsp, } rsp->rda = rda; - init_waitqueue_head(&rsp->gp_wq); + init_swait_head(&rsp->gp_wq); init_irq_work(&rsp->wakeup_work, rsp_wakeup); rnp = rsp->level[rcu_num_lvls - 1]; for_each_possible_cpu(i) { diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 68ed6a8..976b8a8 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h @@ -403,7 +403,7 @@ struct rcu_state { unsigned long gpnum; /* Current gp number. */ unsigned long completed; /* # of last completed gp. */ struct task_struct *gp_kthread; /* Task for grace periods. */ - wait_queue_head_t gp_wq; /* Where GP task waits. */ + struct swait_head gp_wq; /* Where GP task waits. */ int gp_flags; /* Commands for GP task. */ /* End of fields guarded by root rcu_node's lock. */ -- cgit v0.10.2 From cb5ced0afe0828bf02cc47276db21a7071ea0c22 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 31 Jul 2013 19:00:35 +0200 Subject: rcu-more-swait-conversions.patch Signed-off-by: Thomas Gleixner Merged Steven's static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) { - swait_wake(&rnp->nocb_gp_wq[rnp->completed & 0x1]); + wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]); } Signed-off-by: Steven Rostedt Signed-off-by: Sebastian Andrzej Siewior diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 976b8a8..1df8d9e 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h @@ -28,6 +28,7 @@ #include #include #include +#include /* * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and @@ -200,7 +201,7 @@ struct rcu_node { /* This can happen due to race conditions. */ #endif /* #ifdef CONFIG_RCU_BOOST */ #ifdef CONFIG_RCU_NOCB_CPU - wait_queue_head_t nocb_gp_wq[2]; + struct swait_head nocb_gp_wq[2]; /* Place for rcu_nocb_kthread() to wait GP. */ #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ int need_future_gp[2]; @@ -333,7 +334,7 @@ struct rcu_data { atomic_long_t nocb_q_count_lazy; /* (approximate). */ int nocb_p_count; /* # CBs being invoked by kthread */ int nocb_p_count_lazy; /* (approximate). */ - wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */ + struct swait_head nocb_wq; /* For nocb kthreads to sleep on. */ struct task_struct *nocb_kthread; #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 2545f031..c849bd4 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -1959,7 +1959,7 @@ static int rcu_nocb_needs_gp(struct rcu_state *rsp) */ static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) { - wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]); + swait_wake_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]); } /* @@ -1977,8 +1977,8 @@ static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq) static void rcu_init_one_nocb(struct rcu_node *rnp) { - init_waitqueue_head(&rnp->nocb_gp_wq[0]); - init_waitqueue_head(&rnp->nocb_gp_wq[1]); + init_swait_head(&rnp->nocb_gp_wq[0]); + init_swait_head(&rnp->nocb_gp_wq[1]); } /* Is the specified CPU a no-CPUs CPU? */ @@ -2018,7 +2018,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, return; len = atomic_long_read(&rdp->nocb_q_count); if (old_rhpp == &rdp->nocb_head) { - wake_up(&rdp->nocb_wq); /* ... only if queue was empty ... */ + swait_wake(&rdp->nocb_wq); /* ... only if queue was empty ... */ rdp->qlen_last_fqs_check = 0; } else if (len > rdp->qlen_last_fqs_check + qhimark) { wake_up_process(t); /* ... or if many callbacks queued. */ @@ -2108,7 +2108,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) */ trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait")); for (;;) { - wait_event_interruptible( + swait_event_interruptible( rnp->nocb_gp_wq[c & 0x1], (d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c))); if (likely(d)) @@ -2136,7 +2136,7 @@ static int rcu_nocb_kthread(void *arg) for (;;) { /* If not polling, wait for next batch of callbacks. */ if (!rcu_nocb_poll) - wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head); + swait_event_interruptible(rdp->nocb_wq, rdp->nocb_head); list = ACCESS_ONCE(rdp->nocb_head); if (!list) { schedule_timeout_interruptible(1); @@ -2186,7 +2186,7 @@ static int rcu_nocb_kthread(void *arg) static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) { rdp->nocb_tail = &rdp->nocb_head; - init_waitqueue_head(&rdp->nocb_wq); + init_swait_head(&rdp->nocb_wq); } /* Create a kthread for each RCU flavor for each no-CBs CPU. */ -- cgit v0.10.2 From f296f2763578bd6aaeeb01fd4c3729bd673f803c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 11 Jan 2013 11:23:51 +0100 Subject: completion: Use simple wait queues Completions have no long lasting callbacks and therefor do not need the complex waitqueue variant. Use simple waitqueues which reduces the contention on the waitqueue lock. Signed-off-by: Thomas Gleixner diff --git a/include/linux/completion.h b/include/linux/completion.h index 3cd574d..eb2d4ac 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -8,7 +8,7 @@ * See kernel/sched/core.c for details. */ -#include +#include /* * struct completion - structure used to maintain state for a "completion" @@ -24,11 +24,11 @@ */ struct completion { unsigned int done; - wait_queue_head_t wait; + struct swait_head wait; }; #define COMPLETION_INITIALIZER(work) \ - { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } + { 0, SWAIT_HEAD_INITIALIZER((work).wait) } #define COMPLETION_INITIALIZER_ONSTACK(work) \ ({ init_completion(&work); work; }) @@ -73,7 +73,7 @@ struct completion { static inline void init_completion(struct completion *x) { x->done = 0; - init_waitqueue_head(&x->wait); + init_swait_head(&x->wait); } extern void wait_for_completion(struct completion *); diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 06f28be..d115f62 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -26,6 +26,7 @@ #include #include +#include struct vm_area_struct; struct mm_struct; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 01aec90..8749d20 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2424,7 +2424,10 @@ void migrate_disable(void) } #ifdef CONFIG_SCHED_DEBUG - WARN_ON_ONCE(p->migrate_disable_atomic); + if (unlikely(p->migrate_disable_atomic)) { + tracing_off(); + WARN_ON_ONCE(1); + } #endif if (p->migrate_disable) { @@ -2455,7 +2458,10 @@ void migrate_enable(void) } #ifdef CONFIG_SCHED_DEBUG - WARN_ON_ONCE(p->migrate_disable_atomic); + if (unlikely(p->migrate_disable_atomic)) { + tracing_off(); + WARN_ON_ONCE(1); + } #endif WARN_ON_ONCE(p->migrate_disable <= 0); @@ -2913,10 +2919,10 @@ void complete(struct completion *x) { unsigned long flags; - spin_lock_irqsave(&x->wait.lock, flags); + raw_spin_lock_irqsave(&x->wait.lock, flags); x->done++; - __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL); - spin_unlock_irqrestore(&x->wait.lock, flags); + __swait_wake_locked(&x->wait, TASK_NORMAL, 1); + raw_spin_unlock_irqrestore(&x->wait.lock, flags); } EXPORT_SYMBOL(complete); @@ -2933,10 +2939,10 @@ void complete_all(struct completion *x) { unsigned long flags; - spin_lock_irqsave(&x->wait.lock, flags); + raw_spin_lock_irqsave(&x->wait.lock, flags); x->done += UINT_MAX/2; - __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL); - spin_unlock_irqrestore(&x->wait.lock, flags); + __swait_wake_locked(&x->wait, TASK_NORMAL, 0); + raw_spin_unlock_irqrestore(&x->wait.lock, flags); } EXPORT_SYMBOL(complete_all); @@ -2945,20 +2951,20 @@ do_wait_for_common(struct completion *x, long (*action)(long), long timeout, int state) { if (!x->done) { - DECLARE_WAITQUEUE(wait, current); + DEFINE_SWAITER(wait); - __add_wait_queue_tail_exclusive(&x->wait, &wait); + swait_prepare_locked(&x->wait, &wait); do { if (signal_pending_state(state, current)) { timeout = -ERESTARTSYS; break; } __set_current_state(state); - spin_unlock_irq(&x->wait.lock); + raw_spin_unlock_irq(&x->wait.lock); timeout = action(timeout); - spin_lock_irq(&x->wait.lock); + raw_spin_lock_irq(&x->wait.lock); } while (!x->done && timeout); - __remove_wait_queue(&x->wait, &wait); + swait_finish_locked(&x->wait, &wait); if (!x->done) return timeout; } @@ -2972,9 +2978,9 @@ __wait_for_common(struct completion *x, { might_sleep(); - spin_lock_irq(&x->wait.lock); + raw_spin_lock_irq(&x->wait.lock); timeout = do_wait_for_common(x, action, timeout, state); - spin_unlock_irq(&x->wait.lock); + raw_spin_unlock_irq(&x->wait.lock); return timeout; } @@ -3150,12 +3156,12 @@ bool try_wait_for_completion(struct completion *x) unsigned long flags; int ret = 1; - spin_lock_irqsave(&x->wait.lock, flags); + raw_spin_lock_irqsave(&x->wait.lock, flags); if (!x->done) ret = 0; else x->done--; - spin_unlock_irqrestore(&x->wait.lock, flags); + raw_spin_unlock_irqrestore(&x->wait.lock, flags); return ret; } EXPORT_SYMBOL(try_wait_for_completion); @@ -3173,10 +3179,10 @@ bool completion_done(struct completion *x) unsigned long flags; int ret = 1; - spin_lock_irqsave(&x->wait.lock, flags); + raw_spin_lock_irqsave(&x->wait.lock, flags); if (!x->done) ret = 0; - spin_unlock_irqrestore(&x->wait.lock, flags); + raw_spin_unlock_irqrestore(&x->wait.lock, flags); return ret; } EXPORT_SYMBOL(completion_done); -- cgit v0.10.2 From 1273b9566225d091a44f46630266f08808b62f87 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 28 Oct 2013 11:50:06 +0100 Subject: a few open coded completions Signed-off-by: Sebastian Andrzej Siewior diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c index bdfe637..0fbb1ce 100644 --- a/drivers/net/wireless/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/orinoco/orinoco_usb.c @@ -714,7 +714,7 @@ static void ezusb_req_ctx_wait(struct ezusb_priv *upriv, while (!ctx->done.done && msecs--) udelay(1000); } else { - wait_event_interruptible(ctx->done.wait, + swait_event_interruptible(ctx->done.wait, ctx->done.done); } break; diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c index 44cf775..fd2fe19 100644 --- a/drivers/usb/gadget/f_fs.c +++ b/drivers/usb/gadget/f_fs.c @@ -1282,7 +1282,7 @@ static void ffs_data_put(struct ffs_data *ffs) pr_info("%s(): freeing\n", __func__); ffs_data_clear(ffs); BUG_ON(waitqueue_active(&ffs->ev.waitq) || - waitqueue_active(&ffs->ep0req_completion.wait)); + swaitqueue_active(&ffs->ep0req_completion.wait)); kfree(ffs->dev_name); kfree(ffs); } diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c index b94c049..1033ecc 100644 --- a/drivers/usb/gadget/inode.c +++ b/drivers/usb/gadget/inode.c @@ -340,7 +340,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len) spin_unlock_irq (&epdata->dev->lock); if (likely (value == 0)) { - value = wait_event_interruptible (done.wait, done.done); + value = swait_event_interruptible (done.wait, done.done); if (value != 0) { spin_lock_irq (&epdata->dev->lock); if (likely (epdata->ep != NULL)) { @@ -349,7 +349,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len) usb_ep_dequeue (epdata->ep, epdata->req); spin_unlock_irq (&epdata->dev->lock); - wait_event (done.wait, done.done); + swait_event (done.wait, done.done); if (epdata->status == -ECONNRESET) epdata->status = -EINTR; } else { -- cgit v0.10.2 From 66fc1a4436cb3a79d597c14dcb8ce25bdbd6214e Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 26 Jun 2013 15:28:11 -0400 Subject: rt,ntp: Move call to schedule_delayed_work() to helper thread The ntp code for notify_cmos_timer() is called from a hard interrupt context. schedule_delayed_work() under PREEMPT_RT_FULL calls spinlocks that have been converted to mutexes, thus calling schedule_delayed_work() from interrupt is not safe. Add a helper thread that does the call to schedule_delayed_work and wake up that thread instead of calling schedule_delayed_work() directly. This is only for CONFIG_PREEMPT_RT_FULL, otherwise the code still calls schedule_delayed_work() directly in irq context. Note: There's a few places in the kernel that do this. Perhaps the RT code should have a dedicated thread that does the checks. Just register a notifier on boot up for your check and wake up the thread when needed. This will be a todo. Signed-off-by: Steven Rostedt diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index af8d1d4..d6132cd 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -517,10 +518,49 @@ static void sync_cmos_clock(struct work_struct *work) schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next)); } +#ifdef CONFIG_PREEMPT_RT_FULL +/* + * RT can not call schedule_delayed_work from real interrupt context. + * Need to make a thread to do the real work. + */ +static struct task_struct *cmos_delay_thread; +static bool do_cmos_delay; + +static int run_cmos_delay(void *ignore) +{ + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + if (do_cmos_delay) { + do_cmos_delay = false; + schedule_delayed_work(&sync_cmos_work, 0); + } + schedule(); + } + __set_current_state(TASK_RUNNING); + return 0; +} + +void ntp_notify_cmos_timer(void) +{ + do_cmos_delay = true; + /* Make visible before waking up process */ + smp_wmb(); + wake_up_process(cmos_delay_thread); +} + +static __init int create_cmos_delay_thread(void) +{ + cmos_delay_thread = kthread_run(run_cmos_delay, NULL, "kcmosdelayd"); + BUG_ON(!cmos_delay_thread); + return 0; +} +early_initcall(create_cmos_delay_thread); +#else void ntp_notify_cmos_timer(void) { schedule_delayed_work(&sync_cmos_work, 0); } +#endif /* CONFIG_PREEMPT_RT_FULL */ #else void ntp_notify_cmos_timer(void) { } -- cgit v0.10.2 From 799512e8ddf8d5f4659681cca27e643ee0762d28 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 29 Aug 2013 11:48:57 +0200 Subject: md: disable bcache MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It uses anon semaphores |drivers/md/bcache/request.c: In function ‘cached_dev_write_complete’: |drivers/md/bcache/request.c:1007:2: error: implicit declaration of function ‘up_read_non_owner’ [-Werror=implicit-function-declaration] | up_read_non_owner(&dc->writeback_lock); | ^ |drivers/md/bcache/request.c: In function ‘request_write’: |drivers/md/bcache/request.c:1033:2: error: implicit declaration of function ‘down_read_non_owner’ [-Werror=implicit-function-declaration] | down_read_non_owner(&dc->writeback_lock); | ^ either we get rid of those or we have to introduce them… Signed-off-by: Sebastian Andrzej Siewior diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig index f950c9d..5eb76dd 100644 --- a/drivers/md/bcache/Kconfig +++ b/drivers/md/bcache/Kconfig @@ -1,6 +1,7 @@ config BCACHE tristate "Block device as cache" + depends on !PREEMPT_RT_FULL ---help--- Allows a block device to be used as cache for other devices; uses a btree for indexing and the layout is optimized for SSDs. -- cgit v0.10.2 From 07c8b57b111585a617b2b456497fc9b33c00743c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 8 Jul 2011 20:25:16 +0200 Subject: localversion.patch Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/n/tip-8vdw4bfcsds27cvox6rpb334@git.kernel.org diff --git a/localversion-rt b/localversion-rt new file mode 100644 index 0000000..c5b71f9 --- /dev/null +++ b/localversion-rt @@ -0,0 +1 @@ +-rt25 -- cgit v0.10.2