diff options
author | James Morris <jmorris@namei.org> | 2009-08-05 22:55:03 (GMT) |
---|---|---|
committer | James Morris <jmorris@namei.org> | 2009-08-05 22:55:03 (GMT) |
commit | 012a5299a29672039f42944a37984558393ef769 (patch) | |
tree | de0815c67cf4156c32c8b552cd7448387cc391b0 /lib | |
parent | da34d4248bd2013ee64ce51e63ec0ebd1f32b46c (diff) | |
parent | 90bc1a658a53f8832ee799685703977a450e5af9 (diff) | |
download | linux-fsl-qoriq-012a5299a29672039f42944a37984558393ef769.tar.xz |
Merge branch 'master' into next
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/atomic64.c | 11 | ||||
-rw-r--r-- | lib/dynamic_debug.c | 2 | ||||
-rw-r--r-- | lib/flex_array.c | 267 | ||||
-rw-r--r-- | lib/scatterlist.c | 16 |
5 files changed, 292 insertions, 6 deletions
diff --git a/lib/Makefile b/lib/Makefile index b6d1857..2e78277 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -12,7 +12,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ idr.o int_sqrt.o extable.o prio_tree.o \ sha1.o irq_regs.o reciprocal_div.o argv_split.o \ proportions.o prio_heap.o ratelimit.o show_mem.o \ - is_single_threaded.o plist.o decompress.o + is_single_threaded.o plist.o decompress.o flex_array.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o diff --git a/lib/atomic64.c b/lib/atomic64.c index c5e7255..8bee16e 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c @@ -13,6 +13,7 @@ #include <linux/cache.h> #include <linux/spinlock.h> #include <linux/init.h> +#include <linux/module.h> #include <asm/atomic.h> /* @@ -52,6 +53,7 @@ long long atomic64_read(const atomic64_t *v) spin_unlock_irqrestore(lock, flags); return val; } +EXPORT_SYMBOL(atomic64_read); void atomic64_set(atomic64_t *v, long long i) { @@ -62,6 +64,7 @@ void atomic64_set(atomic64_t *v, long long i) v->counter = i; spin_unlock_irqrestore(lock, flags); } +EXPORT_SYMBOL(atomic64_set); void atomic64_add(long long a, atomic64_t *v) { @@ -72,6 +75,7 @@ void atomic64_add(long long a, atomic64_t *v) v->counter += a; spin_unlock_irqrestore(lock, flags); } +EXPORT_SYMBOL(atomic64_add); long long atomic64_add_return(long long a, atomic64_t *v) { @@ -84,6 +88,7 @@ long long atomic64_add_return(long long a, atomic64_t *v) spin_unlock_irqrestore(lock, flags); return val; } +EXPORT_SYMBOL(atomic64_add_return); void atomic64_sub(long long a, atomic64_t *v) { @@ -94,6 +99,7 @@ void atomic64_sub(long long a, atomic64_t *v) v->counter -= a; spin_unlock_irqrestore(lock, flags); } +EXPORT_SYMBOL(atomic64_sub); long long atomic64_sub_return(long long a, atomic64_t *v) { @@ -106,6 +112,7 @@ long long atomic64_sub_return(long long a, atomic64_t *v) spin_unlock_irqrestore(lock, flags); return val; } +EXPORT_SYMBOL(atomic64_sub_return); long long atomic64_dec_if_positive(atomic64_t *v) { @@ -120,6 +127,7 @@ long long atomic64_dec_if_positive(atomic64_t *v) spin_unlock_irqrestore(lock, flags); return val; } +EXPORT_SYMBOL(atomic64_dec_if_positive); long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) { @@ -134,6 +142,7 @@ long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) spin_unlock_irqrestore(lock, flags); return val; } +EXPORT_SYMBOL(atomic64_cmpxchg); long long atomic64_xchg(atomic64_t *v, long long new) { @@ -147,6 +156,7 @@ long long atomic64_xchg(atomic64_t *v, long long new) spin_unlock_irqrestore(lock, flags); return val; } +EXPORT_SYMBOL(atomic64_xchg); int atomic64_add_unless(atomic64_t *v, long long a, long long u) { @@ -162,6 +172,7 @@ int atomic64_add_unless(atomic64_t *v, long long a, long long u) spin_unlock_irqrestore(lock, flags); return ret; } +EXPORT_SYMBOL(atomic64_add_unless); static int init_atomic64_lock(void) { diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 833139c..e22c148 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -164,7 +164,7 @@ static void ddebug_change(const struct ddebug_query *query, if (!newflags) dt->num_enabled--; - else if (!dp-flags) + else if (!dp->flags) dt->num_enabled++; dp->flags = newflags; if (newflags) { diff --git a/lib/flex_array.c b/lib/flex_array.c new file mode 100644 index 0000000..08f1636 --- /dev/null +++ b/lib/flex_array.c @@ -0,0 +1,267 @@ +/* + * Flexible array managed in PAGE_SIZE parts + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright IBM Corporation, 2009 + * + * Author: Dave Hansen <dave@linux.vnet.ibm.com> + */ + +#include <linux/flex_array.h> +#include <linux/slab.h> +#include <linux/stddef.h> + +struct flex_array_part { + char elements[FLEX_ARRAY_PART_SIZE]; +}; + +static inline int __elements_per_part(int element_size) +{ + return FLEX_ARRAY_PART_SIZE / element_size; +} + +static inline int bytes_left_in_base(void) +{ + int element_offset = offsetof(struct flex_array, parts); + int bytes_left = FLEX_ARRAY_BASE_SIZE - element_offset; + return bytes_left; +} + +static inline int nr_base_part_ptrs(void) +{ + return bytes_left_in_base() / sizeof(struct flex_array_part *); +} + +/* + * If a user requests an allocation which is small + * enough, we may simply use the space in the + * flex_array->parts[] array to store the user + * data. + */ +static inline int elements_fit_in_base(struct flex_array *fa) +{ + int data_size = fa->element_size * fa->total_nr_elements; + if (data_size <= bytes_left_in_base()) + return 1; + return 0; +} + +/** + * flex_array_alloc - allocate a new flexible array + * @element_size: the size of individual elements in the array + * @total: total number of elements that this should hold + * + * Note: all locking must be provided by the caller. + * + * @total is used to size internal structures. If the user ever + * accesses any array indexes >=@total, it will produce errors. + * + * The maximum number of elements is defined as: the number of + * elements that can be stored in a page times the number of + * page pointers that we can fit in the base structure or (using + * integer math): + * + * (PAGE_SIZE/element_size) * (PAGE_SIZE-8)/sizeof(void *) + * + * Here's a table showing example capacities. Note that the maximum + * index that the get/put() functions is just nr_objects-1. This + * basically means that you get 4MB of storage on 32-bit and 2MB on + * 64-bit. + * + * + * Element size | Objects | Objects | + * PAGE_SIZE=4k | 32-bit | 64-bit | + * ---------------------------------| + * 1 bytes | 4186112 | 2093056 | + * 2 bytes | 2093056 | 1046528 | + * 3 bytes | 1395030 | 697515 | + * 4 bytes | 1046528 | 523264 | + * 32 bytes | 130816 | 65408 | + * 33 bytes | 126728 | 63364 | + * 2048 bytes | 2044 | 1022 | + * 2049 bytes | 1022 | 511 | + * void * | 1046528 | 261632 | + * + * Since 64-bit pointers are twice the size, we lose half the + * capacity in the base structure. Also note that no effort is made + * to efficiently pack objects across page boundaries. + */ +struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags) +{ + struct flex_array *ret; + int max_size = nr_base_part_ptrs() * __elements_per_part(element_size); + + /* max_size will end up 0 if element_size > PAGE_SIZE */ + if (total > max_size) + return NULL; + ret = kzalloc(sizeof(struct flex_array), flags); + if (!ret) + return NULL; + ret->element_size = element_size; + ret->total_nr_elements = total; + return ret; +} + +static int fa_element_to_part_nr(struct flex_array *fa, int element_nr) +{ + return element_nr / __elements_per_part(fa->element_size); +} + +/** + * flex_array_free_parts - just free the second-level pages + * @src: address of data to copy into the array + * @element_nr: index of the position in which to insert + * the new element. + * + * This is to be used in cases where the base 'struct flex_array' + * has been statically allocated and should not be free. + */ +void flex_array_free_parts(struct flex_array *fa) +{ + int part_nr; + int max_part = nr_base_part_ptrs(); + + if (elements_fit_in_base(fa)) + return; + for (part_nr = 0; part_nr < max_part; part_nr++) + kfree(fa->parts[part_nr]); +} + +void flex_array_free(struct flex_array *fa) +{ + flex_array_free_parts(fa); + kfree(fa); +} + +static int fa_index_inside_part(struct flex_array *fa, int element_nr) +{ + return element_nr % __elements_per_part(fa->element_size); +} + +static int index_inside_part(struct flex_array *fa, int element_nr) +{ + int part_offset = fa_index_inside_part(fa, element_nr); + return part_offset * fa->element_size; +} + +static struct flex_array_part * +__fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags) +{ + struct flex_array_part *part = fa->parts[part_nr]; + if (!part) { + /* + * This leaves the part pages uninitialized + * and with potentially random data, just + * as if the user had kmalloc()'d the whole. + * __GFP_ZERO can be used to zero it. + */ + part = kmalloc(FLEX_ARRAY_PART_SIZE, flags); + if (!part) + return NULL; + fa->parts[part_nr] = part; + } + return part; +} + +/** + * flex_array_put - copy data into the array at @element_nr + * @src: address of data to copy into the array + * @element_nr: index of the position in which to insert + * the new element. + * + * Note that this *copies* the contents of @src into + * the array. If you are trying to store an array of + * pointers, make sure to pass in &ptr instead of ptr. + * + * Locking must be provided by the caller. + */ +int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags) +{ + int part_nr = fa_element_to_part_nr(fa, element_nr); + struct flex_array_part *part; + void *dst; + + if (element_nr >= fa->total_nr_elements) + return -ENOSPC; + if (elements_fit_in_base(fa)) + part = (struct flex_array_part *)&fa->parts[0]; + else + part = __fa_get_part(fa, part_nr, flags); + if (!part) + return -ENOMEM; + dst = &part->elements[index_inside_part(fa, element_nr)]; + memcpy(dst, src, fa->element_size); + return 0; +} + +/** + * flex_array_prealloc - guarantee that array space exists + * @start: index of first array element for which space is allocated + * @end: index of last (inclusive) element for which space is allocated + * + * This will guarantee that no future calls to flex_array_put() + * will allocate memory. It can be used if you are expecting to + * be holding a lock or in some atomic context while writing + * data into the array. + * + * Locking must be provided by the caller. + */ +int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags) +{ + int start_part; + int end_part; + int part_nr; + struct flex_array_part *part; + + if (start >= fa->total_nr_elements || end >= fa->total_nr_elements) + return -ENOSPC; + if (elements_fit_in_base(fa)) + return 0; + start_part = fa_element_to_part_nr(fa, start); + end_part = fa_element_to_part_nr(fa, end); + for (part_nr = start_part; part_nr <= end_part; part_nr++) { + part = __fa_get_part(fa, part_nr, flags); + if (!part) + return -ENOMEM; + } + return 0; +} + +/** + * flex_array_get - pull data back out of the array + * @element_nr: index of the element to fetch from the array + * + * Returns a pointer to the data at index @element_nr. Note + * that this is a copy of the data that was passed in. If you + * are using this to store pointers, you'll get back &ptr. + * + * Locking must be provided by the caller. + */ +void *flex_array_get(struct flex_array *fa, int element_nr) +{ + int part_nr = fa_element_to_part_nr(fa, element_nr); + struct flex_array_part *part; + + if (element_nr >= fa->total_nr_elements) + return NULL; + if (!fa->parts[part_nr]) + return NULL; + if (elements_fit_in_base(fa)) + part = (struct flex_array_part *)&fa->parts[0]; + else + part = fa->parts[part_nr]; + return &part->elements[index_inside_part(fa, element_nr)]; +} diff --git a/lib/scatterlist.c b/lib/scatterlist.c index a295e40..0d475d8 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -314,6 +314,7 @@ void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, miter->__sg = sgl; miter->__nents = nents; miter->__offset = 0; + WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG))); miter->__flags = flags; } EXPORT_SYMBOL(sg_miter_start); @@ -394,6 +395,9 @@ void sg_miter_stop(struct sg_mapping_iter *miter) if (miter->addr) { miter->__offset += miter->consumed; + if (miter->__flags & SG_MITER_TO_SG) + flush_kernel_dcache_page(miter->page); + if (miter->__flags & SG_MITER_ATOMIC) { WARN_ON(!irqs_disabled()); kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); @@ -426,8 +430,14 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, unsigned int offset = 0; struct sg_mapping_iter miter; unsigned long flags; + unsigned int sg_flags = SG_MITER_ATOMIC; + + if (to_buffer) + sg_flags |= SG_MITER_FROM_SG; + else + sg_flags |= SG_MITER_TO_SG; - sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC); + sg_miter_start(&miter, sgl, nents, sg_flags); local_irq_save(flags); @@ -438,10 +448,8 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, if (to_buffer) memcpy(buf + offset, miter.addr, len); - else { + else memcpy(miter.addr, buf + offset, len); - flush_kernel_dcache_page(miter.page); - } offset += len; } |