diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 31 | ||||
-rw-r--r-- | lib/Makefile | 3 | ||||
-rw-r--r-- | lib/debugobjects.c | 890 | ||||
-rw-r--r-- | lib/devres.c | 6 | ||||
-rw-r--r-- | lib/div64.c | 35 | ||||
-rw-r--r-- | lib/find_next_bit.c | 22 | ||||
-rw-r--r-- | lib/idr.c | 12 | ||||
-rw-r--r-- | lib/inflate.c | 3 | ||||
-rw-r--r-- | lib/iomap.c | 2 | ||||
-rw-r--r-- | lib/klist.c | 235 | ||||
-rw-r--r-- | lib/kobject.c | 44 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 10 | ||||
-rw-r--r-- | lib/percpu_counter.c | 1 | ||||
-rw-r--r-- | lib/proportions.c | 38 | ||||
-rw-r--r-- | lib/ratelimit.c | 51 | ||||
-rw-r--r-- | lib/string.c | 27 | ||||
-rw-r--r-- | lib/swiotlb.c | 149 |
17 files changed, 1306 insertions, 253 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 754cc00..d2099f4 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -194,6 +194,37 @@ config TIMER_STATS (it defaults to deactivated on bootup and will only be activated if some application like powertop activates it explicitly). +config DEBUG_OBJECTS + bool "Debug object operations" + depends on DEBUG_KERNEL + help + If you say Y here, additional code will be inserted into the + kernel to track the life time of various objects and validate + the operations on those objects. + +config DEBUG_OBJECTS_SELFTEST + bool "Debug objects selftest" + depends on DEBUG_OBJECTS + help + This enables the selftest of the object debug code. + +config DEBUG_OBJECTS_FREE + bool "Debug objects in freed memory" + depends on DEBUG_OBJECTS + help + This enables checks whether a k/v free operation frees an area + which contains an object which has not been deactivated + properly. This can make kmalloc/kfree-intensive workloads + much slower. + +config DEBUG_OBJECTS_TIMERS + bool "Debug timer objects" + depends on DEBUG_OBJECTS + help + If you say Y here, additional code will be inserted into the + timer routines to track the life time of timer objects and + validate the timer operations. + config DEBUG_SLAB bool "Debug slab memory allocations" depends on DEBUG_KERNEL && SLAB diff --git a/lib/Makefile b/lib/Makefile index 2d7001b..74b0cfb 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -6,7 +6,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o \ idr.o int_sqrt.o extable.o prio_tree.o \ sha1.o irq_regs.o reciprocal_div.o argv_split.o \ - proportions.o prio_heap.o + proportions.o prio_heap.o ratelimit.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o @@ -36,6 +36,7 @@ obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o obj-$(CONFIG_PLIST) += plist.o obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o obj-$(CONFIG_DEBUG_LIST) += list_debug.o +obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o ifneq ($(CONFIG_HAVE_DEC_LOCK),y) lib-y += dec_and_lock.o diff --git a/lib/debugobjects.c b/lib/debugobjects.c new file mode 100644 index 0000000..a76a5e1 --- /dev/null +++ b/lib/debugobjects.c @@ -0,0 +1,890 @@ +/* + * Generic infrastructure for lifetime debugging of objects. + * + * Started by Thomas Gleixner + * + * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> + * + * For licencing details see kernel-base/COPYING + */ +#include <linux/debugobjects.h> +#include <linux/interrupt.h> +#include <linux/seq_file.h> +#include <linux/debugfs.h> +#include <linux/hash.h> + +#define ODEBUG_HASH_BITS 14 +#define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) + +#define ODEBUG_POOL_SIZE 512 +#define ODEBUG_POOL_MIN_LEVEL 256 + +#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT +#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) +#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) + +struct debug_bucket { + struct hlist_head list; + spinlock_t lock; +}; + +static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; + +static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE]; + +static DEFINE_SPINLOCK(pool_lock); + +static HLIST_HEAD(obj_pool); + +static int obj_pool_min_free = ODEBUG_POOL_SIZE; +static int obj_pool_free = ODEBUG_POOL_SIZE; +static int obj_pool_used; +static int obj_pool_max_used; +static struct kmem_cache *obj_cache; + +static int debug_objects_maxchain __read_mostly; +static int debug_objects_fixups __read_mostly; +static int debug_objects_warnings __read_mostly; +static int debug_objects_enabled __read_mostly; +static struct debug_obj_descr *descr_test __read_mostly; + +static int __init enable_object_debug(char *str) +{ + debug_objects_enabled = 1; + return 0; +} +early_param("debug_objects", enable_object_debug); + +static const char *obj_states[ODEBUG_STATE_MAX] = { + [ODEBUG_STATE_NONE] = "none", + [ODEBUG_STATE_INIT] = "initialized", + [ODEBUG_STATE_INACTIVE] = "inactive", + [ODEBUG_STATE_ACTIVE] = "active", + [ODEBUG_STATE_DESTROYED] = "destroyed", + [ODEBUG_STATE_NOTAVAILABLE] = "not available", +}; + +static int fill_pool(void) +{ + gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; + struct debug_obj *new; + + if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) + return obj_pool_free; + + if (unlikely(!obj_cache)) + return obj_pool_free; + + while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { + + new = kmem_cache_zalloc(obj_cache, gfp); + if (!new) + return obj_pool_free; + + spin_lock(&pool_lock); + hlist_add_head(&new->node, &obj_pool); + obj_pool_free++; + spin_unlock(&pool_lock); + } + return obj_pool_free; +} + +/* + * Lookup an object in the hash bucket. + */ +static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) +{ + struct hlist_node *node; + struct debug_obj *obj; + int cnt = 0; + + hlist_for_each_entry(obj, node, &b->list, node) { + cnt++; + if (obj->object == addr) + return obj; + } + if (cnt > debug_objects_maxchain) + debug_objects_maxchain = cnt; + + return NULL; +} + +/* + * Allocate a new object. If the pool is empty and no refill possible, + * switch off the debugger. + */ +static struct debug_obj * +alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) +{ + struct debug_obj *obj = NULL; + int retry = 0; + +repeat: + spin_lock(&pool_lock); + if (obj_pool.first) { + obj = hlist_entry(obj_pool.first, typeof(*obj), node); + + obj->object = addr; + obj->descr = descr; + obj->state = ODEBUG_STATE_NONE; + hlist_del(&obj->node); + + hlist_add_head(&obj->node, &b->list); + + obj_pool_used++; + if (obj_pool_used > obj_pool_max_used) + obj_pool_max_used = obj_pool_used; + + obj_pool_free--; + if (obj_pool_free < obj_pool_min_free) + obj_pool_min_free = obj_pool_free; + } + spin_unlock(&pool_lock); + + if (fill_pool() && !obj && !retry++) + goto repeat; + + return obj; +} + +/* + * Put the object back into the pool or give it back to kmem_cache: + */ +static void free_object(struct debug_obj *obj) +{ + unsigned long idx = (unsigned long)(obj - obj_static_pool); + + if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { + spin_lock(&pool_lock); + hlist_add_head(&obj->node, &obj_pool); + obj_pool_free++; + obj_pool_used--; + spin_unlock(&pool_lock); + } else { + spin_lock(&pool_lock); + obj_pool_used--; + spin_unlock(&pool_lock); + kmem_cache_free(obj_cache, obj); + } +} + +/* + * We run out of memory. That means we probably have tons of objects + * allocated. + */ +static void debug_objects_oom(void) +{ + struct debug_bucket *db = obj_hash; + struct hlist_node *node, *tmp; + struct debug_obj *obj; + unsigned long flags; + int i; + + printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); + + for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { + spin_lock_irqsave(&db->lock, flags); + hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { + hlist_del(&obj->node); + free_object(obj); + } + spin_unlock_irqrestore(&db->lock, flags); + } +} + +/* + * We use the pfn of the address for the hash. That way we can check + * for freed objects simply by checking the affected bucket. + */ +static struct debug_bucket *get_bucket(unsigned long addr) +{ + unsigned long hash; + + hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); + return &obj_hash[hash]; +} + +static void debug_print_object(struct debug_obj *obj, char *msg) +{ + static int limit; + + if (limit < 5 && obj->descr != descr_test) { + limit++; + printk(KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, + obj_states[obj->state], obj->descr->name); + WARN_ON(1); + } + debug_objects_warnings++; +} + +/* + * Try to repair the damage, so we have a better chance to get useful + * debug output. + */ +static void +debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), + void * addr, enum debug_obj_state state) +{ + if (fixup) + debug_objects_fixups += fixup(addr, state); +} + +static void debug_object_is_on_stack(void *addr, int onstack) +{ + void *stack = current->stack; + int is_on_stack; + static int limit; + + if (limit > 4) + return; + + is_on_stack = (addr >= stack && addr < (stack + THREAD_SIZE)); + + if (is_on_stack == onstack) + return; + + limit++; + if (is_on_stack) + printk(KERN_WARNING + "ODEBUG: object is on stack, but not annotated\n"); + else + printk(KERN_WARNING + "ODEBUG: object is not on stack, but annotated\n"); + WARN_ON(1); +} + +static void +__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) +{ + enum debug_obj_state state; + struct debug_bucket *db; + struct debug_obj *obj; + unsigned long flags; + + db = get_bucket((unsigned long) addr); + + spin_lock_irqsave(&db->lock, flags); + + obj = lookup_object(addr, db); + if (!obj) { + obj = alloc_object(addr, db, descr); + if (!obj) { + debug_objects_enabled = 0; + spin_unlock_irqrestore(&db->lock, flags); + debug_objects_oom(); + return; + } + debug_object_is_on_stack(addr, onstack); + } + + switch (obj->state) { + case ODEBUG_STATE_NONE: + case ODEBUG_STATE_INIT: + case ODEBUG_STATE_INACTIVE: + obj->state = ODEBUG_STATE_INIT; + break; + + case ODEBUG_STATE_ACTIVE: + debug_print_object(obj, "init"); + state = obj->state; + spin_unlock_irqrestore(&db->lock, flags); + debug_object_fixup(descr->fixup_init, addr, state); + return; + + case ODEBUG_STATE_DESTROYED: + debug_print_object(obj, "init"); + break; + default: + break; + } + + spin_unlock_irqrestore(&db->lock, flags); +} + +/** + * debug_object_init - debug checks when an object is initialized + * @addr: address of the object + * @descr: pointer to an object specific debug description structure + */ +void debug_object_init(void *addr, struct debug_obj_descr *descr) +{ + if (!debug_objects_enabled) + return; + + __debug_object_init(addr, descr, 0); +} + +/** + * debug_object_init_on_stack - debug checks when an object on stack is + * initialized + * @addr: address of the object + * @descr: pointer to an object specific debug description structure + */ +void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) +{ + if (!debug_objects_enabled) + return; + + __debug_object_init(addr, descr, 1); +} + +/** + * debug_object_activate - debug checks when an object is activated + * @addr: address of the object + * @descr: pointer to an object specific debug description structure + */ +void debug_object_activate(void *addr, struct debug_obj_descr *descr) +{ + enum debug_obj_state state; + struct debug_bucket *db; + struct debug_obj *obj; + unsigned long flags; + + if (!debug_objects_enabled) + return; + + db = get_bucket((unsigned long) addr); + + spin_lock_irqsave(&db->lock, flags); + + obj = lookup_object(addr, db); + if (obj) { + switch (obj->state) { + case ODEBUG_STATE_INIT: + case ODEBUG_STATE_INACTIVE: + obj->state = ODEBUG_STATE_ACTIVE; + break; + + case ODEBUG_STATE_ACTIVE: + debug_print_object(obj, "activate"); + state = obj->state; + spin_unlock_irqrestore(&db->lock, flags); + debug_object_fixup(descr->fixup_activate, addr, state); + return; + + case ODEBUG_STATE_DESTROYED: + debug_print_object(obj, "activate"); + break; + default: + break; + } + spin_unlock_irqrestore(&db->lock, flags); + return; + } + + spin_unlock_irqrestore(&db->lock, flags); + /* + * This happens when a static object is activated. We + * let the type specific code decide whether this is + * true or not. + */ + debug_object_fixup(descr->fixup_activate, addr, + ODEBUG_STATE_NOTAVAILABLE); +} + +/** + * debug_object_deactivate - debug checks when an object is deactivated + * @addr: address of the object + * @descr: pointer to an object specific debug description structure + */ +void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) +{ + struct debug_bucket *db; + struct debug_obj *obj; + unsigned long flags; + + if (!debug_objects_enabled) + return; + + db = get_bucket((unsigned long) addr); + + spin_lock_irqsave(&db->lock, flags); + + obj = lookup_object(addr, db); + if (obj) { + switch (obj->state) { + case ODEBUG_STATE_INIT: + case ODEBUG_STATE_INACTIVE: + case ODEBUG_STATE_ACTIVE: + obj->state = ODEBUG_STATE_INACTIVE; + break; + + case ODEBUG_STATE_DESTROYED: + debug_print_object(obj, "deactivate"); + break; + default: + break; + } + } else { + struct debug_obj o = { .object = addr, + .state = ODEBUG_STATE_NOTAVAILABLE, + .descr = descr }; + + debug_print_object(&o, "deactivate"); + } + + spin_unlock_irqrestore(&db->lock, flags); +} + +/** + * debug_object_destroy - debug checks when an object is destroyed + * @addr: address of the object + * @descr: pointer to an object specific debug description structure + */ +void debug_object_destroy(void *addr, struct debug_obj_descr *descr) +{ + enum debug_obj_state state; + struct debug_bucket *db; + struct debug_obj *obj; + unsigned long flags; + + if (!debug_objects_enabled) + return; + + db = get_bucket((unsigned long) addr); + + spin_lock_irqsave(&db->lock, flags); + + obj = lookup_object(addr, db); + if (!obj) + goto out_unlock; + + switch (obj->state) { + case ODEBUG_STATE_NONE: + case ODEBUG_STATE_INIT: + case ODEBUG_STATE_INACTIVE: + obj->state = ODEBUG_STATE_DESTROYED; + break; + case ODEBUG_STATE_ACTIVE: + debug_print_object(obj, "destroy"); + state = obj->state; + spin_unlock_irqrestore(&db->lock, flags); + debug_object_fixup(descr->fixup_destroy, addr, state); + return; + + case ODEBUG_STATE_DESTROYED: + debug_print_object(obj, "destroy"); + break; + default: + break; + } +out_unlock: + spin_unlock_irqrestore(&db->lock, flags); +} + +/** + * debug_object_free - debug checks when an object is freed + * @addr: address of the object + * @descr: pointer to an object specific debug description structure + */ +void debug_object_free(void *addr, struct debug_obj_descr *descr) +{ + enum debug_obj_state state; + struct debug_bucket *db; + struct debug_obj *obj; + unsigned long flags; + + if (!debug_objects_enabled) + return; + + db = get_bucket((unsigned long) addr); + + spin_lock_irqsave(&db->lock, flags); + + obj = lookup_object(addr, db); + if (!obj) + goto out_unlock; + + switch (obj->state) { + case ODEBUG_STATE_ACTIVE: + debug_print_object(obj, "free"); + state = obj->state; + spin_unlock_irqrestore(&db->lock, flags); + debug_object_fixup(descr->fixup_free, addr, state); + return; + default: + hlist_del(&obj->node); + free_object(obj); + break; + } +out_unlock: + spin_unlock_irqrestore(&db->lock, flags); +} + +#ifdef CONFIG_DEBUG_OBJECTS_FREE +static void __debug_check_no_obj_freed(const void *address, unsigned long size) +{ + unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; + struct hlist_node *node, *tmp; + struct debug_obj_descr *descr; + enum debug_obj_state state; + struct debug_bucket *db; + struct debug_obj *obj; + int cnt; + + saddr = (unsigned long) address; + eaddr = saddr + size; + paddr = saddr & ODEBUG_CHUNK_MASK; + chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); + chunks >>= ODEBUG_CHUNK_SHIFT; + + for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { + db = get_bucket(paddr); + +repeat: + cnt = 0; + spin_lock_irqsave(&db->lock, flags); + hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { + cnt++; + oaddr = (unsigned long) obj->object; + if (oaddr < saddr || oaddr >= eaddr) + continue; + + switch (obj->state) { + case ODEBUG_STATE_ACTIVE: + debug_print_object(obj, "free"); + descr = obj->descr; + state = obj->state; + spin_unlock_irqrestore(&db->lock, flags); + debug_object_fixup(descr->fixup_free, + (void *) oaddr, state); + goto repeat; + default: + hlist_del(&obj->node); + free_object(obj); + break; + } + } + spin_unlock_irqrestore(&db->lock, flags); + if (cnt > debug_objects_maxchain) + debug_objects_maxchain = cnt; + } +} + +void debug_check_no_obj_freed(const void *address, unsigned long size) +{ + if (debug_objects_enabled) + __debug_check_no_obj_freed(address, size); +} +#endif + +#ifdef CONFIG_DEBUG_FS + +static int debug_stats_show(struct seq_file *m, void *v) +{ + seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); + seq_printf(m, "warnings :%d\n", debug_objects_warnings); + seq_printf(m, "fixups :%d\n", debug_objects_fixups); + seq_printf(m, "pool_free :%d\n", obj_pool_free); + seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); + seq_printf(m, "pool_used :%d\n", obj_pool_used); + seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); + return 0; +} + +static int debug_stats_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, debug_stats_show, NULL); +} + +static const struct file_operations debug_stats_fops = { + .open = debug_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init debug_objects_init_debugfs(void) +{ + struct dentry *dbgdir, *dbgstats; + + if (!debug_objects_enabled) + return 0; + + dbgdir = debugfs_create_dir("debug_objects", NULL); + if (!dbgdir) + return -ENOMEM; + + dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, + &debug_stats_fops); + if (!dbgstats) + goto err; + + return 0; + +err: + debugfs_remove(dbgdir); + + return -ENOMEM; +} +__initcall(debug_objects_init_debugfs); + +#else +static inline void debug_objects_init_debugfs(void) { } +#endif + +#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST + +/* Random data structure for the self test */ +struct self_test { + unsigned long dummy1[6]; + int static_init; + unsigned long dummy2[3]; +}; + +static __initdata struct debug_obj_descr descr_type_test; + +/* + * fixup_init is called when: + * - an active object is initialized + */ +static int __init fixup_init(void *addr, enum debug_obj_state state) +{ + struct self_test *obj = addr; + + switch (state) { + case ODEBUG_STATE_ACTIVE: + debug_object_deactivate(obj, &descr_type_test); + debug_object_init(obj, &descr_type_test); + return 1; + default: + return 0; + } +} + +/* + * fixup_activate is called when: + * - an active object is activated + * - an unknown object is activated (might be a statically initialized object) + */ +static int __init fixup_activate(void *addr, enum debug_obj_state state) +{ + struct self_test *obj = addr; + + switch (state) { + case ODEBUG_STATE_NOTAVAILABLE: + if (obj->static_init == 1) { + debug_object_init(obj, &descr_type_test); + debug_object_activate(obj, &descr_type_test); + /* + * Real code should return 0 here ! This is + * not a fixup of some bad behaviour. We + * merily call the debug_init function to keep + * track of the object. + */ + return 1; + } else { + /* Real code needs to emit a warning here */ + } + return 0; + + case ODEBUG_STATE_ACTIVE: + debug_object_deactivate(obj, &descr_type_test); + debug_object_activate(obj, &descr_type_test); + return 1; + + default: + return 0; + } +} + +/* + * fixup_destroy is called when: + * - an active object is destroyed + */ +static int __init fixup_destroy(void *addr, enum debug_obj_state state) +{ + struct self_test *obj = addr; + + switch (state) { + case ODEBUG_STATE_ACTIVE: + debug_object_deactivate(obj, &descr_type_test); + debug_object_destroy(obj, &descr_type_test); + return 1; + default: + return 0; + } +} + +/* + * fixup_free is called when: + * - an active object is freed + */ +static int __init fixup_free(void *addr, enum debug_obj_state state) +{ + struct self_test *obj = addr; + + switch (state) { + case ODEBUG_STATE_ACTIVE: + debug_object_deactivate(obj, &descr_type_test); + debug_object_free(obj, &descr_type_test); + return 1; + default: + return 0; + } +} + +static int +check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) +{ + struct debug_bucket *db; + struct debug_obj *obj; + unsigned long flags; + int res = -EINVAL; + + db = get_bucket((unsigned long) addr); + + spin_lock_irqsave(&db->lock, flags); + + obj = lookup_object(addr, db); + if (!obj && state != ODEBUG_STATE_NONE) { + printk(KERN_ERR "ODEBUG: selftest object not found\n"); + WARN_ON(1); + goto out; + } + if (obj && obj->state != state) { + printk(KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", + obj->state, state); + WARN_ON(1); + goto out; + } + if (fixups != debug_objects_fixups) { + printk(KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", + fixups, debug_objects_fixups); + WARN_ON(1); + goto out; + } + if (warnings != debug_objects_warnings) { + printk(KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", + warnings, debug_objects_warnings); + WARN_ON(1); + goto out; + } + res = 0; +out: + spin_unlock_irqrestore(&db->lock, flags); + if (res) + debug_objects_enabled = 0; + return res; +} + +static __initdata struct debug_obj_descr descr_type_test = { + .name = "selftest", + .fixup_init = fixup_init, + .fixup_activate = fixup_activate, + .fixup_destroy = fixup_destroy, + .fixup_free = fixup_free, +}; + +static __initdata struct self_test obj = { .static_init = 0 }; + +static void __init debug_objects_selftest(void) +{ + int fixups, oldfixups, warnings, oldwarnings; + unsigned long flags; + + local_irq_save(flags); + + fixups = oldfixups = debug_objects_fixups; + warnings = oldwarnings = debug_objects_warnings; + descr_test = &descr_type_test; + + debug_object_init(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) + goto out; + debug_object_activate(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) + goto out; + debug_object_activate(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) + goto out; + debug_object_deactivate(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) + goto out; + debug_object_destroy(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) + goto out; + debug_object_init(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) + goto out; + debug_object_activate(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) + goto out; + debug_object_deactivate(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) + goto out; + debug_object_free(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) + goto out; + + obj.static_init = 1; + debug_object_activate(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings)) + goto out; + debug_object_init(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) + goto out; + debug_object_free(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) + goto out; + +#ifdef CONFIG_DEBUG_OBJECTS_FREE + debug_object_init(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) + goto out; + debug_object_activate(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) + goto out; + __debug_check_no_obj_freed(&obj, sizeof(obj)); + if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) + goto out; +#endif + printk(KERN_INFO "ODEBUG: selftest passed\n"); + +out: + debug_objects_fixups = oldfixups; + debug_objects_warnings = oldwarnings; + descr_test = NULL; + + local_irq_restore(flags); +} +#else +static inline void debug_objects_selftest(void) { } +#endif + +/* + * Called during early boot to initialize the hash buckets and link + * the static object pool objects into the poll list. After this call + * the object tracker is fully operational. + */ +void __init debug_objects_early_init(void) +{ + int i; + + for (i = 0; i < ODEBUG_HASH_SIZE; i++) + spin_lock_init(&obj_hash[i].lock); + + for (i = 0; i < ODEBUG_POOL_SIZE; i++) + hlist_add_head(&obj_static_pool[i].node, &obj_pool); +} + +/* + * Called after the kmem_caches are functional to setup a dedicated + * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag + * prevents that the debug code is called on kmem_cache_free() for the + * debug tracker objects to avoid recursive calls. + */ +void __init debug_objects_mem_init(void) +{ + if (!debug_objects_enabled) + return; + + obj_cache = kmem_cache_create("debug_objects_cache", + sizeof (struct debug_obj), 0, + SLAB_DEBUG_OBJECTS, NULL); + + if (!obj_cache) + debug_objects_enabled = 0; + else + debug_objects_selftest(); +} diff --git a/lib/devres.c b/lib/devres.c index edc27a5..72c8909 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -2,7 +2,7 @@ #include <linux/io.h> #include <linux/module.h> -static void devm_ioremap_release(struct device *dev, void *res) +void devm_ioremap_release(struct device *dev, void *res) { iounmap(*(void __iomem **)res); } @@ -20,7 +20,7 @@ static int devm_ioremap_match(struct device *dev, void *res, void *match_data) * * Managed ioremap(). Map is automatically unmapped on driver detach. */ -void __iomem *devm_ioremap(struct device *dev, unsigned long offset, +void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, unsigned long size) { void __iomem **ptr, *addr; @@ -49,7 +49,7 @@ EXPORT_SYMBOL(devm_ioremap); * Managed ioremap_nocache(). Map is automatically unmapped on driver * detach. */ -void __iomem *devm_ioremap_nocache(struct device *dev, unsigned long offset, +void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, unsigned long size) { void __iomem **ptr, *addr; diff --git a/lib/div64.c b/lib/div64.c index b71cf93..bb5bd0c 100644 --- a/lib/div64.c +++ b/lib/div64.c @@ -16,9 +16,8 @@ * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S. */ -#include <linux/types.h> #include <linux/module.h> -#include <asm/div64.h> +#include <linux/math64.h> /* Not needed on 64bit architectures */ #if BITS_PER_LONG == 32 @@ -58,10 +57,31 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base) EXPORT_SYMBOL(__div64_32); +#ifndef div_s64_rem +s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) +{ + u64 quotient; + + if (dividend < 0) { + quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder); + *remainder = -*remainder; + if (divisor > 0) + quotient = -quotient; + } else { + quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder); + if (divisor < 0) + quotient = -quotient; + } + return quotient; +} +EXPORT_SYMBOL(div_s64_rem); +#endif + /* 64bit divisor, dividend and result. dynamic precision */ -uint64_t div64_64(uint64_t dividend, uint64_t divisor) +#ifndef div64_u64 +u64 div64_u64(u64 dividend, u64 divisor) { - uint32_t high, d; + u32 high, d; high = divisor >> 32; if (high) { @@ -72,10 +92,9 @@ uint64_t div64_64(uint64_t dividend, uint64_t divisor) } else d = divisor; - do_div(dividend, d); - - return dividend; + return div_u64(dividend, d); } -EXPORT_SYMBOL(div64_64); +EXPORT_SYMBOL(div64_u64); +#endif #endif /* BITS_PER_LONG == 32 */ diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index d3f5784..24c59de 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c @@ -20,8 +20,8 @@ /* * Find the next set bit in a memory region. */ -unsigned long __find_next_bit(const unsigned long *addr, - unsigned long size, unsigned long offset) +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) { const unsigned long *p = addr + BITOP_WORD(offset); unsigned long result = offset & ~(BITS_PER_LONG-1); @@ -58,14 +58,14 @@ found_first: found_middle: return result + __ffs(tmp); } -EXPORT_SYMBOL(__find_next_bit); +EXPORT_SYMBOL(find_next_bit); /* * This implementation of find_{first,next}_zero_bit was stolen from * Linus' asm-alpha/bitops.h. */ -unsigned long __find_next_zero_bit(const unsigned long *addr, - unsigned long size, unsigned long offset) +unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) { const unsigned long *p = addr + BITOP_WORD(offset); unsigned long result = offset & ~(BITS_PER_LONG-1); @@ -102,15 +102,14 @@ found_first: found_middle: return result + ffz(tmp); } -EXPORT_SYMBOL(__find_next_zero_bit); +EXPORT_SYMBOL(find_next_zero_bit); #endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ #ifdef CONFIG_GENERIC_FIND_FIRST_BIT /* * Find the first set bit in a memory region. */ -unsigned long __find_first_bit(const unsigned long *addr, - unsigned long size) +unsigned long find_first_bit(const unsigned long *addr, unsigned long size) { const unsigned long *p = addr; unsigned long result = 0; @@ -131,13 +130,12 @@ unsigned long __find_first_bit(const unsigned long *addr, found: return result + __ffs(tmp); } -EXPORT_SYMBOL(__find_first_bit); +EXPORT_SYMBOL(find_first_bit); /* * Find the first cleared bit in a memory region. */ -unsigned long __find_first_zero_bit(const unsigned long *addr, - unsigned long size) +unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) { const unsigned long *p = addr; unsigned long result = 0; @@ -158,7 +156,7 @@ unsigned long __find_first_zero_bit(const unsigned long *addr, found: return result + ffz(tmp); } -EXPORT_SYMBOL(__find_first_zero_bit); +EXPORT_SYMBOL(find_first_zero_bit); #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ #ifdef __BIG_ENDIAN @@ -385,8 +385,8 @@ void idr_remove(struct idr *idp, int id) while (idp->id_free_cnt >= IDR_FREE_MAX) { p = alloc_layer(idp); kmem_cache_free(idr_layer_cache, p); - return; } + return; } EXPORT_SYMBOL(idr_remove); @@ -585,12 +585,11 @@ static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer) memset(idr_layer, 0, sizeof(struct idr_layer)); } -static int init_id_cache(void) +void __init idr_init_cache(void) { - if (!idr_layer_cache) - idr_layer_cache = kmem_cache_create("idr_layer_cache", - sizeof(struct idr_layer), 0, 0, idr_cache_ctor); - return 0; + idr_layer_cache = kmem_cache_create("idr_layer_cache", + sizeof(struct idr_layer), 0, SLAB_PANIC, + idr_cache_ctor); } /** @@ -602,7 +601,6 @@ static int init_id_cache(void) */ void idr_init(struct idr *idp) { - init_id_cache(); memset(idp, 0, sizeof(struct idr)); spin_lock_init(&idp->lock); } diff --git a/lib/inflate.c b/lib/inflate.c index 845f91d..9762294 100644 --- a/lib/inflate.c +++ b/lib/inflate.c @@ -811,6 +811,9 @@ DEBG("<dyn"); ll = malloc(sizeof(*ll) * (286+30)); /* literal/length and distance code lengths */ #endif + if (ll == NULL) + return 1; + /* make local bit buffer */ b = bb; k = bk; diff --git a/lib/iomap.c b/lib/iomap.c index dd6ca48..37a3ea4 100644 --- a/lib/iomap.c +++ b/lib/iomap.c @@ -257,7 +257,7 @@ EXPORT_SYMBOL(ioport_unmap); void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) { resource_size_t start = pci_resource_start(dev, bar); - unsigned long len = pci_resource_len(dev, bar); + resource_size_t len = pci_resource_len(dev, bar); unsigned long flags = pci_resource_flags(dev, bar); if (!len || !start) diff --git a/lib/klist.c b/lib/klist.c index 120bd17..cca37f9 100644 --- a/lib/klist.c +++ b/lib/klist.c @@ -1,38 +1,37 @@ /* - * klist.c - Routines for manipulating klists. + * klist.c - Routines for manipulating klists. * + * Copyright (C) 2005 Patrick Mochel * - * This klist interface provides a couple of structures that wrap around - * struct list_head to provide explicit list "head" (struct klist) and - * list "node" (struct klist_node) objects. For struct klist, a spinlock - * is included that protects access to the actual list itself. struct - * klist_node provides a pointer to the klist that owns it and a kref - * reference count that indicates the number of current users of that node - * in the list. + * This file is released under the GPL v2. * - * The entire point is to provide an interface for iterating over a list - * that is safe and allows for modification of the list during the - * iteration (e.g. insertion and removal), including modification of the - * current node on the list. + * This klist interface provides a couple of structures that wrap around + * struct list_head to provide explicit list "head" (struct klist) and list + * "node" (struct klist_node) objects. For struct klist, a spinlock is + * included that protects access to the actual list itself. struct + * klist_node provides a pointer to the klist that owns it and a kref + * reference count that indicates the number of current users of that node + * in the list. * - * It works using a 3rd object type - struct klist_iter - that is declared - * and initialized before an iteration. klist_next() is used to acquire the - * next element in the list. It returns NULL if there are no more items. - * Internally, that routine takes the klist's lock, decrements the reference - * count of the previous klist_node and increments the count of the next - * klist_node. It then drops the lock and returns. + * The entire point is to provide an interface for iterating over a list + * that is safe and allows for modification of the list during the + * iteration (e.g. insertion and removal), including modification of the + * current node on the list. * - * There are primitives for adding and removing nodes to/from a klist. - * When deleting, klist_del() will simply decrement the reference count. - * Only when the count goes to 0 is the node removed from the list. - * klist_remove() will try to delete the node from the list and block - * until it is actually removed. This is useful for objects (like devices) - * that have been removed from the system and must be freed (but must wait - * until all accessors have finished). + * It works using a 3rd object type - struct klist_iter - that is declared + * and initialized before an iteration. klist_next() is used to acquire the + * next element in the list. It returns NULL if there are no more items. + * Internally, that routine takes the klist's lock, decrements the + * reference count of the previous klist_node and increments the count of + * the next klist_node. It then drops the lock and returns. * - * Copyright (C) 2005 Patrick Mochel - * - * This file is released under the GPL v2. + * There are primitives for adding and removing nodes to/from a klist. + * When deleting, klist_del() will simply decrement the reference count. + * Only when the count goes to 0 is the node removed from the list. + * klist_remove() will try to delete the node from the list and block until + * it is actually removed. This is useful for objects (like devices) that + * have been removed from the system and must be freed (but must wait until + * all accessors have finished). */ #include <linux/klist.h> @@ -40,10 +39,10 @@ /** - * klist_init - Initialize a klist structure. - * @k: The klist we're initializing. - * @get: The get function for the embedding object (NULL if none) - * @put: The put function for the embedding object (NULL if none) + * klist_init - Initialize a klist structure. + * @k: The klist we're initializing. + * @get: The get function for the embedding object (NULL if none) + * @put: The put function for the embedding object (NULL if none) * * Initialises the klist structure. If the klist_node structures are * going to be embedded in refcounted objects (necessary for safe @@ -51,8 +50,7 @@ * functions that take and release references on the embedding * objects. */ - -void klist_init(struct klist * k, void (*get)(struct klist_node *), +void klist_init(struct klist *k, void (*get)(struct klist_node *), void (*put)(struct klist_node *)) { INIT_LIST_HEAD(&k->k_list); @@ -60,26 +58,23 @@ void klist_init(struct klist * k, void (*get)(struct klist_node *), k->get = get; k->put = put; } - EXPORT_SYMBOL_GPL(klist_init); - -static void add_head(struct klist * k, struct klist_node * n) +static void add_head(struct klist *k, struct klist_node *n) { spin_lock(&k->k_lock); list_add(&n->n_node, &k->k_list); spin_unlock(&k->k_lock); } -static void add_tail(struct klist * k, struct klist_node * n) +static void add_tail(struct klist *k, struct klist_node *n) { spin_lock(&k->k_lock); list_add_tail(&n->n_node, &k->k_list); spin_unlock(&k->k_lock); } - -static void klist_node_init(struct klist * k, struct klist_node * n) +static void klist_node_init(struct klist *k, struct klist_node *n) { INIT_LIST_HEAD(&n->n_node); init_completion(&n->n_removed); @@ -89,60 +84,83 @@ static void klist_node_init(struct klist * k, struct klist_node * n) k->get(n); } - /** - * klist_add_head - Initialize a klist_node and add it to front. - * @n: node we're adding. - * @k: klist it's going on. + * klist_add_head - Initialize a klist_node and add it to front. + * @n: node we're adding. + * @k: klist it's going on. */ - -void klist_add_head(struct klist_node * n, struct klist * k) +void klist_add_head(struct klist_node *n, struct klist *k) { klist_node_init(k, n); add_head(k, n); } - EXPORT_SYMBOL_GPL(klist_add_head); - /** - * klist_add_tail - Initialize a klist_node and add it to back. - * @n: node we're adding. - * @k: klist it's going on. + * klist_add_tail - Initialize a klist_node and add it to back. + * @n: node we're adding. + * @k: klist it's going on. */ - -void klist_add_tail(struct klist_node * n, struct klist * k) +void klist_add_tail(struct klist_node *n, struct klist *k) { klist_node_init(k, n); add_tail(k, n); } - EXPORT_SYMBOL_GPL(klist_add_tail); +/** + * klist_add_after - Init a klist_node and add it after an existing node + * @n: node we're adding. + * @pos: node to put @n after + */ +void klist_add_after(struct klist_node *n, struct klist_node *pos) +{ + struct klist *k = pos->n_klist; + + klist_node_init(k, n); + spin_lock(&k->k_lock); + list_add(&n->n_node, &pos->n_node); + spin_unlock(&k->k_lock); +} +EXPORT_SYMBOL_GPL(klist_add_after); + +/** + * klist_add_before - Init a klist_node and add it before an existing node + * @n: node we're adding. + * @pos: node to put @n after + */ +void klist_add_before(struct klist_node *n, struct klist_node *pos) +{ + struct klist *k = pos->n_klist; + + klist_node_init(k, n); + spin_lock(&k->k_lock); + list_add_tail(&n->n_node, &pos->n_node); + spin_unlock(&k->k_lock); +} +EXPORT_SYMBOL_GPL(klist_add_before); -static void klist_release(struct kref * kref) +static void klist_release(struct kref *kref) { - struct klist_node * n = container_of(kref, struct klist_node, n_ref); + struct klist_node *n = container_of(kref, struct klist_node, n_ref); list_del(&n->n_node); complete(&n->n_removed); n->n_klist = NULL; } -static int klist_dec_and_del(struct klist_node * n) +static int klist_dec_and_del(struct klist_node *n) { return kref_put(&n->n_ref, klist_release); } - /** - * klist_del - Decrement the reference count of node and try to remove. - * @n: node we're deleting. + * klist_del - Decrement the reference count of node and try to remove. + * @n: node we're deleting. */ - -void klist_del(struct klist_node * n) +void klist_del(struct klist_node *n) { - struct klist * k = n->n_klist; + struct klist *k = n->n_klist; void (*put)(struct klist_node *) = k->put; spin_lock(&k->k_lock); @@ -152,48 +170,40 @@ void klist_del(struct klist_node * n) if (put) put(n); } - EXPORT_SYMBOL_GPL(klist_del); - /** - * klist_remove - Decrement the refcount of node and wait for it to go away. - * @n: node we're removing. + * klist_remove - Decrement the refcount of node and wait for it to go away. + * @n: node we're removing. */ - -void klist_remove(struct klist_node * n) +void klist_remove(struct klist_node *n) { klist_del(n); wait_for_completion(&n->n_removed); } - EXPORT_SYMBOL_GPL(klist_remove); - /** - * klist_node_attached - Say whether a node is bound to a list or not. - * @n: Node that we're testing. + * klist_node_attached - Say whether a node is bound to a list or not. + * @n: Node that we're testing. */ - -int klist_node_attached(struct klist_node * n) +int klist_node_attached(struct klist_node *n) { return (n->n_klist != NULL); } - EXPORT_SYMBOL_GPL(klist_node_attached); - /** - * klist_iter_init_node - Initialize a klist_iter structure. - * @k: klist we're iterating. - * @i: klist_iter we're filling. - * @n: node to start with. + * klist_iter_init_node - Initialize a klist_iter structure. + * @k: klist we're iterating. + * @i: klist_iter we're filling. + * @n: node to start with. * - * Similar to klist_iter_init(), but starts the action off with @n, - * instead of with the list head. + * Similar to klist_iter_init(), but starts the action off with @n, + * instead of with the list head. */ - -void klist_iter_init_node(struct klist * k, struct klist_iter * i, struct klist_node * n) +void klist_iter_init_node(struct klist *k, struct klist_iter *i, + struct klist_node *n) { i->i_klist = k; i->i_head = &k->k_list; @@ -201,66 +211,56 @@ void klist_iter_init_node(struct klist * k, struct klist_iter * i, struct klist_ if (n) kref_get(&n->n_ref); } - EXPORT_SYMBOL_GPL(klist_iter_init_node); - /** - * klist_iter_init - Iniitalize a klist_iter structure. - * @k: klist we're iterating. - * @i: klist_iter structure we're filling. + * klist_iter_init - Iniitalize a klist_iter structure. + * @k: klist we're iterating. + * @i: klist_iter structure we're filling. * - * Similar to klist_iter_init_node(), but start with the list head. + * Similar to klist_iter_init_node(), but start with the list head. */ - -void klist_iter_init(struct klist * k, struct klist_iter * i) +void klist_iter_init(struct klist *k, struct klist_iter *i) { klist_iter_init_node(k, i, NULL); } - EXPORT_SYMBOL_GPL(klist_iter_init); - /** - * klist_iter_exit - Finish a list iteration. - * @i: Iterator structure. + * klist_iter_exit - Finish a list iteration. + * @i: Iterator structure. * - * Must be called when done iterating over list, as it decrements the - * refcount of the current node. Necessary in case iteration exited before - * the end of the list was reached, and always good form. + * Must be called when done iterating over list, as it decrements the + * refcount of the current node. Necessary in case iteration exited before + * the end of the list was reached, and always good form. */ - -void klist_iter_exit(struct klist_iter * i) +void klist_iter_exit(struct klist_iter *i) { if (i->i_cur) { klist_del(i->i_cur); i->i_cur = NULL; } } - EXPORT_SYMBOL_GPL(klist_iter_exit); - -static struct klist_node * to_klist_node(struct list_head * n) +static struct klist_node *to_klist_node(struct list_head *n) { return container_of(n, struct klist_node, n_node); } - /** - * klist_next - Ante up next node in list. - * @i: Iterator structure. + * klist_next - Ante up next node in list. + * @i: Iterator structure. * - * First grab list lock. Decrement the reference count of the previous - * node, if there was one. Grab the next node, increment its reference - * count, drop the lock, and return that next node. + * First grab list lock. Decrement the reference count of the previous + * node, if there was one. Grab the next node, increment its reference + * count, drop the lock, and return that next node. */ - -struct klist_node * klist_next(struct klist_iter * i) +struct klist_node *klist_next(struct klist_iter *i) { - struct list_head * next; - struct klist_node * lnode = i->i_cur; - struct klist_node * knode = NULL; + struct list_head *next; + struct klist_node *lnode = i->i_cur; + struct klist_node *knode = NULL; void (*put)(struct klist_node *) = i->i_klist->put; spin_lock(&i->i_klist->k_lock); @@ -281,7 +281,4 @@ struct klist_node * klist_next(struct klist_iter * i) put(lnode); return knode; } - EXPORT_SYMBOL_GPL(klist_next); - - diff --git a/lib/kobject.c b/lib/kobject.c index 2c64903..718e510 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -90,7 +90,7 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length) } pr_debug("kobject: '%s' (%p): %s: path = '%s'\n", kobject_name(kobj), - kobj, __FUNCTION__, path); + kobj, __func__, path); } /** @@ -181,7 +181,7 @@ static int kobject_add_internal(struct kobject *kobj) } pr_debug("kobject: '%s' (%p): %s: parent: '%s', set: '%s'\n", - kobject_name(kobj), kobj, __FUNCTION__, + kobject_name(kobj), kobj, __func__, parent ? kobject_name(parent) : "<NULL>", kobj->kset ? kobject_name(&kobj->kset->kobj) : "<NULL>"); @@ -196,10 +196,10 @@ static int kobject_add_internal(struct kobject *kobj) printk(KERN_ERR "%s failed for %s with " "-EEXIST, don't try to register things with " "the same name in the same directory.\n", - __FUNCTION__, kobject_name(kobj)); + __func__, kobject_name(kobj)); else printk(KERN_ERR "%s failed for %s (%d)\n", - __FUNCTION__, kobject_name(kobj), error); + __func__, kobject_name(kobj), error); dump_stack(); } else kobj->state_in_sysfs = 1; @@ -216,21 +216,12 @@ static int kobject_add_internal(struct kobject *kobj) static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list vargs) { - va_list aq; - char *name; - - va_copy(aq, vargs); - name = kvasprintf(GFP_KERNEL, fmt, vargs); - va_end(aq); - - if (!name) - return -ENOMEM; - /* Free the old name, if necessary. */ kfree(kobj->name); - /* Now, set the new name */ - kobj->name = name; + kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); + if (!kobj->name) + return -ENOMEM; return 0; } @@ -246,12 +237,12 @@ static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, */ int kobject_set_name(struct kobject *kobj, const char *fmt, ...) { - va_list args; + va_list vargs; int retval; - va_start(args, fmt); - retval = kobject_set_name_vargs(kobj, fmt, args); - va_end(args); + va_start(vargs, fmt); + retval = kobject_set_name_vargs(kobj, fmt, vargs); + va_end(vargs); return retval; } @@ -301,12 +292,9 @@ EXPORT_SYMBOL(kobject_init); static int kobject_add_varg(struct kobject *kobj, struct kobject *parent, const char *fmt, va_list vargs) { - va_list aq; int retval; - va_copy(aq, vargs); - retval = kobject_set_name_vargs(kobj, fmt, aq); - va_end(aq); + retval = kobject_set_name_vargs(kobj, fmt, vargs); if (retval) { printk(KERN_ERR "kobject: can not set name properly!\n"); return retval; @@ -540,7 +528,7 @@ static void kobject_cleanup(struct kobject *kobj) const char *name = kobj->name; pr_debug("kobject: '%s' (%p): %s\n", - kobject_name(kobj), kobj, __FUNCTION__); + kobject_name(kobj), kobj, __func__); if (t && !t->release) pr_debug("kobject: '%s' (%p): does not have a release() " @@ -600,7 +588,7 @@ void kobject_put(struct kobject *kobj) static void dynamic_kobj_release(struct kobject *kobj) { - pr_debug("kobject: (%p): %s\n", kobj, __FUNCTION__); + pr_debug("kobject: (%p): %s\n", kobj, __func__); kfree(kobj); } @@ -657,7 +645,7 @@ struct kobject *kobject_create_and_add(const char *name, struct kobject *parent) retval = kobject_add(kobj, parent, "%s", name); if (retval) { printk(KERN_WARNING "%s: kobject_add error: %d\n", - __FUNCTION__, retval); + __func__, retval); kobject_put(kobj); kobj = NULL; } @@ -765,7 +753,7 @@ static void kset_release(struct kobject *kobj) { struct kset *kset = container_of(kobj, struct kset, kobj); pr_debug("kobject: '%s' (%p): %s\n", - kobject_name(kobj), kobj, __FUNCTION__); + kobject_name(kobj), kobj, __func__); kfree(kset); } diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 9fb6b86..2fa545a 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -101,7 +101,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, int retval = 0; pr_debug("kobject: '%s' (%p): %s\n", - kobject_name(kobj), kobj, __FUNCTION__); + kobject_name(kobj), kobj, __func__); /* search the kset we belong to */ top_kobj = kobj; @@ -111,7 +111,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, if (!top_kobj->kset) { pr_debug("kobject: '%s' (%p): %s: attempted to send uevent " "without kset!\n", kobject_name(kobj), kobj, - __FUNCTION__); + __func__); return -EINVAL; } @@ -123,7 +123,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, if (!uevent_ops->filter(kset, kobj)) { pr_debug("kobject: '%s' (%p): %s: filter function " "caused the event to drop!\n", - kobject_name(kobj), kobj, __FUNCTION__); + kobject_name(kobj), kobj, __func__); return 0; } @@ -135,7 +135,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, if (!subsystem) { pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the " "event to drop!\n", kobject_name(kobj), kobj, - __FUNCTION__); + __func__); return 0; } @@ -177,7 +177,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, if (retval) { pr_debug("kobject: '%s' (%p): %s: uevent() returned " "%d\n", kobject_name(kobj), kobj, - __FUNCTION__, retval); + __func__, retval); goto exit; } } diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 393a0e9..1191744 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -102,6 +102,7 @@ void percpu_counter_destroy(struct percpu_counter *fbc) return; free_percpu(fbc->counters); + fbc->counters = NULL; #ifdef CONFIG_HOTPLUG_CPU mutex_lock(&percpu_counters_lock); list_del(&fbc->list); diff --git a/lib/proportions.c b/lib/proportions.c index 9508d9a..4f387a6 100644 --- a/lib/proportions.c +++ b/lib/proportions.c @@ -73,12 +73,6 @@ #include <linux/proportions.h> #include <linux/rcupdate.h> -/* - * Limit the time part in order to ensure there are some bits left for the - * cycle counter. - */ -#define PROP_MAX_SHIFT (3*BITS_PER_LONG/4) - int prop_descriptor_init(struct prop_descriptor *pd, int shift) { int err; @@ -268,6 +262,38 @@ void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl) } /* + * identical to __prop_inc_percpu, except that it limits this pl's fraction to + * @frac/PROP_FRAC_BASE by ignoring events when this limit has been exceeded. + */ +void __prop_inc_percpu_max(struct prop_descriptor *pd, + struct prop_local_percpu *pl, long frac) +{ + struct prop_global *pg = prop_get_global(pd); + + prop_norm_percpu(pg, pl); + + if (unlikely(frac != PROP_FRAC_BASE)) { + unsigned long period_2 = 1UL << (pg->shift - 1); + unsigned long counter_mask = period_2 - 1; + unsigned long global_count; + long numerator, denominator; + + numerator = percpu_counter_read_positive(&pl->events); + global_count = percpu_counter_read(&pg->events); + denominator = period_2 + (global_count & counter_mask); + + if (numerator > ((denominator * frac) >> PROP_FRAC_SHIFT)) + goto out_put; + } + + percpu_counter_add(&pl->events, 1); + percpu_counter_add(&pg->events, 1); + +out_put: + prop_put_global(pd, pg); +} + +/* * Obtain a fraction of this proportion * * p_{j} = x_{j} / (period/2 + t % period/2) diff --git a/lib/ratelimit.c b/lib/ratelimit.c new file mode 100644 index 0000000..485e304 --- /dev/null +++ b/lib/ratelimit.c @@ -0,0 +1,51 @@ +/* + * ratelimit.c - Do something with rate limit. + * + * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com> + * + * This file is released under the GPLv2. + * + */ + +#include <linux/kernel.h> +#include <linux/jiffies.h> +#include <linux/module.h> + +/* + * __ratelimit - rate limiting + * @ratelimit_jiffies: minimum time in jiffies between two callbacks + * @ratelimit_burst: number of callbacks we do before ratelimiting + * + * This enforces a rate limit: not more than @ratelimit_burst callbacks + * in every ratelimit_jiffies + */ +int __ratelimit(int ratelimit_jiffies, int ratelimit_burst) +{ + static DEFINE_SPINLOCK(ratelimit_lock); + static unsigned toks = 10 * 5 * HZ; + static unsigned long last_msg; + static int missed; + unsigned long flags; + unsigned long now = jiffies; + + spin_lock_irqsave(&ratelimit_lock, flags); + toks += now - last_msg; + last_msg = now; + if (toks > (ratelimit_burst * ratelimit_jiffies)) + toks = ratelimit_burst * ratelimit_jiffies; + if (toks >= ratelimit_jiffies) { + int lost = missed; + + missed = 0; + toks -= ratelimit_jiffies; + spin_unlock_irqrestore(&ratelimit_lock, flags); + if (lost) + printk(KERN_WARNING "%s: %d messages suppressed\n", + __func__, lost); + return 1; + } + missed++; + spin_unlock_irqrestore(&ratelimit_lock, flags); + return 0; +} +EXPORT_SYMBOL(__ratelimit); diff --git a/lib/string.c b/lib/string.c index 5efafed..b19b87a 100644 --- a/lib/string.c +++ b/lib/string.c @@ -493,6 +493,33 @@ char *strsep(char **s, const char *ct) EXPORT_SYMBOL(strsep); #endif +/** + * sysfs_streq - return true if strings are equal, modulo trailing newline + * @s1: one string + * @s2: another string + * + * This routine returns true iff two strings are equal, treating both + * NUL and newline-then-NUL as equivalent string terminations. It's + * geared for use with sysfs input strings, which generally terminate + * with newlines but are compared against values without newlines. + */ +bool sysfs_streq(const char *s1, const char *s2) +{ + while (*s1 && *s1 == *s2) { + s1++; + s2++; + } + + if (*s1 == *s2) + return true; + if (!*s1 && *s2 == '\n' && !s2[1]) + return true; + if (*s1 == '\n' && !s1[1] && !*s2) + return true; + return false; +} +EXPORT_SYMBOL(sysfs_streq); + #ifndef __HAVE_ARCH_MEMSET /** * memset - Fill a region of memory with the given value diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 0259228..d568894 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -31,6 +31,7 @@ #include <linux/init.h> #include <linux/bootmem.h> +#include <linux/iommu-helper.h> #define OFFSET(val,align) ((unsigned long) \ ( (val) & ( (align) - 1))) @@ -282,15 +283,6 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr) return (addr & ~mask) != 0; } -static inline unsigned int is_span_boundary(unsigned int index, - unsigned int nslots, - unsigned long offset_slots, - unsigned long max_slots) -{ - unsigned long offset = (offset_slots + index) & (max_slots - 1); - return offset + nslots > max_slots; -} - /* * Allocates bounce buffer and returns its kernel virtual address. */ @@ -331,56 +323,53 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir) * request and allocate a buffer from that IO TLB pool. */ spin_lock_irqsave(&io_tlb_lock, flags); - { - index = ALIGN(io_tlb_index, stride); - if (index >= io_tlb_nslabs) - index = 0; - wrap = index; - - do { - while (is_span_boundary(index, nslots, offset_slots, - max_slots)) { - index += stride; - if (index >= io_tlb_nslabs) - index = 0; - if (index == wrap) - goto not_found; - } - - /* - * If we find a slot that indicates we have 'nslots' - * number of contiguous buffers, we allocate the - * buffers from that slot and mark the entries as '0' - * indicating unavailable. - */ - if (io_tlb_list[index] >= nslots) { - int count = 0; - - for (i = index; i < (int) (index + nslots); i++) - io_tlb_list[i] = 0; - for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) - io_tlb_list[i] = ++count; - dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); - - /* - * Update the indices to avoid searching in - * the next round. - */ - io_tlb_index = ((index + nslots) < io_tlb_nslabs - ? (index + nslots) : 0); - - goto found; - } + index = ALIGN(io_tlb_index, stride); + if (index >= io_tlb_nslabs) + index = 0; + wrap = index; + + do { + while (iommu_is_span_boundary(index, nslots, offset_slots, + max_slots)) { index += stride; if (index >= io_tlb_nslabs) index = 0; - } while (index != wrap); + if (index == wrap) + goto not_found; + } - not_found: - spin_unlock_irqrestore(&io_tlb_lock, flags); - return NULL; - } - found: + /* + * If we find a slot that indicates we have 'nslots' number of + * contiguous buffers, we allocate the buffers from that slot + * and mark the entries as '0' indicating unavailable. + */ + if (io_tlb_list[index] >= nslots) { + int count = 0; + + for (i = index; i < (int) (index + nslots); i++) + io_tlb_list[i] = 0; + for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) + io_tlb_list[i] = ++count; + dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); + + /* + * Update the indices to avoid searching in the next + * round. + */ + io_tlb_index = ((index + nslots) < io_tlb_nslabs + ? (index + nslots) : 0); + + goto found; + } + index += stride; + if (index >= io_tlb_nslabs) + index = 0; + } while (index != wrap); + +not_found: + spin_unlock_irqrestore(&io_tlb_lock, flags); + return NULL; +found: spin_unlock_irqrestore(&io_tlb_lock, flags); /* @@ -566,7 +555,8 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. */ dma_addr_t -swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) +swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, + int dir, struct dma_attrs *attrs) { dma_addr_t dev_addr = virt_to_bus(ptr); void *map; @@ -599,6 +589,13 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) return dev_addr; } +EXPORT_SYMBOL(swiotlb_map_single_attrs); + +dma_addr_t +swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) +{ + return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL); +} /* * Unmap a single streaming mode DMA translation. The dma_addr and size must @@ -609,8 +606,8 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) * whatever the device wrote there. */ void -swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, - int dir) +swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, + size_t size, int dir, struct dma_attrs *attrs) { char *dma_addr = bus_to_virt(dev_addr); @@ -620,7 +617,14 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, else if (dir == DMA_FROM_DEVICE) dma_mark_clean(dma_addr, size); } +EXPORT_SYMBOL(swiotlb_unmap_single_attrs); +void +swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, + int dir) +{ + return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL); +} /* * Make physical memory consistent for a single streaming mode DMA translation * after a transfer. @@ -691,6 +695,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, SYNC_FOR_DEVICE); } +void swiotlb_unmap_sg_attrs(struct device *, struct scatterlist *, int, int, + struct dma_attrs *); /* * Map a set of buffers described by scatterlist in streaming mode for DMA. * This is the scatter-gather version of the above swiotlb_map_single @@ -708,8 +714,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, * same here. */ int -swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, - int dir) +swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, + int dir, struct dma_attrs *attrs) { struct scatterlist *sg; void *addr; @@ -727,7 +733,8 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, /* Don't panic here, we expect map_sg users to do proper error handling. */ swiotlb_full(hwdev, sg->length, dir, 0); - swiotlb_unmap_sg(hwdev, sgl, i, dir); + swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, + attrs); sgl[0].dma_length = 0; return 0; } @@ -738,14 +745,22 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, } return nelems; } +EXPORT_SYMBOL(swiotlb_map_sg_attrs); + +int +swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, + int dir) +{ + return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); +} /* * Unmap a set of streaming mode DMA translations. Again, cpu read rules * concerning calls here are the same as for swiotlb_unmap_single() above. */ void -swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, - int dir) +swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, + int nelems, int dir, struct dma_attrs *attrs) { struct scatterlist *sg; int i; @@ -760,6 +775,14 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); } } +EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); + +void +swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, + int dir) +{ + return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); +} /* * Make physical memory consistent for a set of streaming mode DMA translations |