diff options
-rw-r--r-- | arch/i386/boot/boot.h | 24 | ||||
-rw-r--r-- | arch/i386/boot/cpucheck.c | 3 | ||||
-rw-r--r-- | arch/i386/boot/edd.c | 6 | ||||
-rw-r--r-- | arch/i386/boot/tty.c | 14 | ||||
-rw-r--r-- | arch/i386/boot/video-vga.c | 17 | ||||
-rw-r--r-- | drivers/md/Kconfig | 2 | ||||
-rw-r--r-- | kernel/sched.c | 8 | ||||
-rw-r--r-- | kernel/sched_fair.c | 35 | ||||
-rw-r--r-- | kernel/sched_rt.c | 11 |
9 files changed, 62 insertions, 58 deletions
diff --git a/arch/i386/boot/boot.h b/arch/i386/boot/boot.h index dec70c9..20bab94 100644 --- a/arch/i386/boot/boot.h +++ b/arch/i386/boot/boot.h @@ -87,7 +87,7 @@ static inline void set_fs(u16 seg) static inline u16 fs(void) { u16 seg; - asm("movw %%fs,%0" : "=rm" (seg)); + asm volatile("movw %%fs,%0" : "=rm" (seg)); return seg; } @@ -98,7 +98,7 @@ static inline void set_gs(u16 seg) static inline u16 gs(void) { u16 seg; - asm("movw %%gs,%0" : "=rm" (seg)); + asm volatile("movw %%gs,%0" : "=rm" (seg)); return seg; } @@ -107,19 +107,19 @@ typedef unsigned int addr_t; static inline u8 rdfs8(addr_t addr) { u8 v; - asm("movb %%fs:%1,%0" : "=r" (v) : "m" (*(u8 *)addr)); + asm volatile("movb %%fs:%1,%0" : "=r" (v) : "m" (*(u8 *)addr)); return v; } static inline u16 rdfs16(addr_t addr) { u16 v; - asm("movw %%fs:%1,%0" : "=r" (v) : "m" (*(u16 *)addr)); + asm volatile("movw %%fs:%1,%0" : "=r" (v) : "m" (*(u16 *)addr)); return v; } static inline u32 rdfs32(addr_t addr) { u32 v; - asm("movl %%fs:%1,%0" : "=r" (v) : "m" (*(u32 *)addr)); + asm volatile("movl %%fs:%1,%0" : "=r" (v) : "m" (*(u32 *)addr)); return v; } @@ -139,19 +139,19 @@ static inline void wrfs32(u32 v, addr_t addr) static inline u8 rdgs8(addr_t addr) { u8 v; - asm("movb %%gs:%1,%0" : "=r" (v) : "m" (*(u8 *)addr)); + asm volatile("movb %%gs:%1,%0" : "=r" (v) : "m" (*(u8 *)addr)); return v; } static inline u16 rdgs16(addr_t addr) { u16 v; - asm("movw %%gs:%1,%0" : "=r" (v) : "m" (*(u16 *)addr)); + asm volatile("movw %%gs:%1,%0" : "=r" (v) : "m" (*(u16 *)addr)); return v; } static inline u32 rdgs32(addr_t addr) { u32 v; - asm("movl %%gs:%1,%0" : "=r" (v) : "m" (*(u32 *)addr)); + asm volatile("movl %%gs:%1,%0" : "=r" (v) : "m" (*(u32 *)addr)); return v; } @@ -180,15 +180,15 @@ static inline int memcmp(const void *s1, const void *s2, size_t len) static inline int memcmp_fs(const void *s1, addr_t s2, size_t len) { u8 diff; - asm("fs; repe; cmpsb; setnz %0" - : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len)); + asm volatile("fs; repe; cmpsb; setnz %0" + : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len)); return diff; } static inline int memcmp_gs(const void *s1, addr_t s2, size_t len) { u8 diff; - asm("gs; repe; cmpsb; setnz %0" - : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len)); + asm volatile("gs; repe; cmpsb; setnz %0" + : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len)); return diff; } diff --git a/arch/i386/boot/cpucheck.c b/arch/i386/boot/cpucheck.c index 991e8ce..e655a89 100644 --- a/arch/i386/boot/cpucheck.c +++ b/arch/i386/boot/cpucheck.c @@ -96,7 +96,8 @@ static int has_fpu(void) asm volatile("movl %0,%%cr0" : : "r" (cr0)); } - asm("fninit ; fnstsw %0 ; fnstcw %1" : "+m" (fsw), "+m" (fcw)); + asm volatile("fninit ; fnstsw %0 ; fnstcw %1" + : "+m" (fsw), "+m" (fcw)); return fsw == 0 && (fcw & 0x103f) == 0x003f; } diff --git a/arch/i386/boot/edd.c b/arch/i386/boot/edd.c index 82b5c84..bd138e4 100644 --- a/arch/i386/boot/edd.c +++ b/arch/i386/boot/edd.c @@ -30,9 +30,9 @@ static int read_mbr(u8 devno, void *buf) cx = 0x0001; /* Sector 0-0-1 */ dx = devno; bx = (size_t)buf; - asm("pushfl; stc; int $0x13; setc %%al; popfl" - : "+a" (ax), "+c" (cx), "+d" (dx), "+b" (bx) - : : "esi", "edi", "memory"); + asm volatile("pushfl; stc; int $0x13; setc %%al; popfl" + : "+a" (ax), "+c" (cx), "+d" (dx), "+b" (bx) + : : "esi", "edi", "memory"); return -(u8)ax; /* 0 or -1 */ } diff --git a/arch/i386/boot/tty.c b/arch/i386/boot/tty.c index 9c668aa..f3f14bd 100644 --- a/arch/i386/boot/tty.c +++ b/arch/i386/boot/tty.c @@ -54,9 +54,9 @@ static u8 gettime(void) u16 ax = 0x0200; u16 cx, dx; - asm("int $0x1a" - : "+a" (ax), "=c" (cx), "=d" (dx) - : : "ebx", "esi", "edi"); + asm volatile("int $0x1a" + : "+a" (ax), "=c" (cx), "=d" (dx) + : : "ebx", "esi", "edi"); return dx >> 8; } @@ -67,7 +67,7 @@ static u8 gettime(void) int getchar(void) { u16 ax = 0; - asm("int $0x16" : "+a" (ax)); + asm volatile("int $0x16" : "+a" (ax)); return ax & 0xff; } @@ -75,9 +75,9 @@ int getchar(void) static int kbd_pending(void) { u8 pending; - asm("int $0x16; setnz %0" - : "=rm" (pending) - : "a" (0x0100)); + asm volatile("int $0x16; setnz %0" + : "=rm" (pending) + : "a" (0x0100)); return pending; } diff --git a/arch/i386/boot/video-vga.c b/arch/i386/boot/video-vga.c index 700d09a..aef02f9 100644 --- a/arch/i386/boot/video-vga.c +++ b/arch/i386/boot/video-vga.c @@ -47,16 +47,16 @@ static u8 vga_set_basic_mode(void) #ifdef CONFIG_VIDEO_400_HACK if (adapter >= ADAPTER_VGA) { - asm(INT10 - : : "a" (0x1202), "b" (0x0030) - : "ecx", "edx", "esi", "edi"); + asm volatile(INT10 + : : "a" (0x1202), "b" (0x0030) + : "ecx", "edx", "esi", "edi"); } #endif ax = 0x0f00; - asm(INT10 - : "+a" (ax) - : : "ebx", "ecx", "edx", "esi", "edi"); + asm volatile(INT10 + : "+a" (ax) + : : "ebx", "ecx", "edx", "esi", "edi"); mode = (u8)ax; @@ -73,9 +73,10 @@ static u8 vga_set_basic_mode(void) mode = 3; /* Set the mode */ + ax = mode; asm volatile(INT10 - : : "a" (mode) - : "ebx", "ecx", "edx", "esi", "edi"); + : "+a" (ax) + : : "ebx", "ecx", "edx", "esi", "edi"); do_restore = 1; return mode; } diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 531d4d1..34a8c60 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -263,7 +263,7 @@ config DM_MULTIPATH_EMC config DM_MULTIPATH_RDAC tristate "LSI/Engenio RDAC multipath support (EXPERIMENTAL)" - depends on DM_MULTIPATH && BLK_DEV_DM && EXPERIMENTAL + depends on DM_MULTIPATH && BLK_DEV_DM && SCSI && EXPERIMENTAL ---help--- Multipath support for LSI/Engenio RDAC. diff --git a/kernel/sched.c b/kernel/sched.c index 96e9b82..6798328 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2180,12 +2180,6 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, if (task_running(rq, p)) return 0; - /* - * Aggressive migration if too many balance attempts have failed: - */ - if (sd->nr_balance_failed > sd->cache_nice_tries) - return 1; - return 1; } @@ -4923,7 +4917,7 @@ static inline void sched_init_granularity(void) if (sysctl_sched_granularity > gran_limit) sysctl_sched_granularity = gran_limit; - sysctl_sched_runtime_limit = sysctl_sched_granularity * 8; + sysctl_sched_runtime_limit = sysctl_sched_granularity * 5; sysctl_sched_wakeup_granularity = sysctl_sched_granularity / 2; } diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index fedbb51..4d6b7e2 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -19,7 +19,7 @@ /* * Preemption granularity: - * (default: 2 msec, units: nanoseconds) + * (default: 10 msec, units: nanoseconds) * * NOTE: this granularity value is not the same as the concept of * 'timeslice length' - timeslices in CFS will typically be somewhat @@ -31,18 +31,17 @@ * number of CPUs. (i.e. factor 2x on 2-way systems, 3x on 4-way * systems, 4x on 8-way systems, 5x on 16-way systems, etc.) */ -unsigned int sysctl_sched_granularity __read_mostly = 2000000000ULL/HZ; +unsigned int sysctl_sched_granularity __read_mostly = 10000000UL; /* * SCHED_BATCH wake-up granularity. - * (default: 10 msec, units: nanoseconds) + * (default: 25 msec, units: nanoseconds) * * This option delays the preemption effects of decoupled workloads * and reduces their over-scheduling. Synchronous workloads will still * have immediate wakeup/sleep latencies. */ -unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly = - 10000000000ULL/HZ; +unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly = 25000000UL; /* * SCHED_OTHER wake-up granularity. @@ -52,12 +51,12 @@ unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly = * and reduces their over-scheduling. Synchronous workloads will still * have immediate wakeup/sleep latencies. */ -unsigned int sysctl_sched_wakeup_granularity __read_mostly = 1000000000ULL/HZ; +unsigned int sysctl_sched_wakeup_granularity __read_mostly = 1000000UL; unsigned int sysctl_sched_stat_granularity __read_mostly; /* - * Initialized in sched_init_granularity(): + * Initialized in sched_init_granularity() [to 5 times the base granularity]: */ unsigned int sysctl_sched_runtime_limit __read_mostly; @@ -304,9 +303,9 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr) delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw); if (cfs_rq->sleeper_bonus > sysctl_sched_granularity) { - delta = min(cfs_rq->sleeper_bonus, (u64)delta_exec); - delta = calc_delta_mine(delta, curr->load.weight, lw); - delta = min((u64)delta, cfs_rq->sleeper_bonus); + delta = min((u64)delta_mine, cfs_rq->sleeper_bonus); + delta = min(delta, (unsigned long)( + (long)sysctl_sched_runtime_limit - curr->wait_runtime)); cfs_rq->sleeper_bonus -= delta; delta_mine -= delta; } @@ -494,6 +493,13 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) unsigned long load = cfs_rq->load.weight, delta_fair; long prev_runtime; + /* + * Do not boost sleepers if there's too much bonus 'in flight' + * already: + */ + if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit)) + return; + if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG) load = rq_of(cfs_rq)->cpu_load[2]; @@ -513,16 +519,13 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) prev_runtime = se->wait_runtime; __add_wait_runtime(cfs_rq, se, delta_fair); + schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); delta_fair = se->wait_runtime - prev_runtime; /* * Track the amount of bonus we've given to sleepers: */ cfs_rq->sleeper_bonus += delta_fair; - if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit)) - cfs_rq->sleeper_bonus = sysctl_sched_runtime_limit; - - schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); } static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) @@ -1044,7 +1047,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) * -granularity/2, so initialize the task with that: */ if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) - p->se.wait_runtime = -(sysctl_sched_granularity / 2); + p->se.wait_runtime = -((long)sysctl_sched_granularity / 2); __enqueue_entity(cfs_rq, se); } @@ -1057,7 +1060,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) */ static void set_curr_task_fair(struct rq *rq) { - struct sched_entity *se = &rq->curr.se; + struct sched_entity *se = &rq->curr->se; for_each_sched_entity(se) set_next_entity(cfs_rq_of(se), se); diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index dcdcad6..4b87476 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -207,10 +207,15 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p) return; p->time_slice = static_prio_timeslice(p->static_prio); - set_tsk_need_resched(p); - /* put it at the end of the queue: */ - requeue_task_rt(rq, p); + /* + * Requeue to the end of queue if we are not the only element + * on the queue: + */ + if (p->run_list.prev != p->run_list.next) { + requeue_task_rt(rq, p); + set_tsk_need_resched(p); + } } static struct sched_class rt_sched_class __read_mostly = { |