From 7ba5c48b5faf6fbd16e61263dfd4a380e3902569 Mon Sep 17 00:00:00 2001 From: Zhao Qiang Date: Tue, 30 Sep 2014 10:25:25 +0800 Subject: rheap: move rheap.c from arch/powerpc/lib/ to lib/ qe need to use the rheap, so move it to public directory. Signed-off-by: Zhao Qiang --- upstream link: http://patchwork.ozlabs.org/patch/393170/ it is under discussion. Change-Id: Ied2765d6e0eb3b7ade0fef02cfe226c8a8566c5f Reviewed-on: http://git.am.freescale.net:8181/16841 Tested-by: Review Code-CDREVIEW Reviewed-by: Xiaobo Xie Reviewed-by: Zhengxiong Jin diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index d20dc2b..18b658e 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -1046,7 +1046,4 @@ config PPC_CLOCK default n select HAVE_CLK -config PPC_LIB_RHEAP - bool - source "arch/powerpc/kvm/Kconfig" diff --git a/arch/powerpc/include/asm/fsl_85xx_cache_sram.h b/arch/powerpc/include/asm/fsl_85xx_cache_sram.h index 2af2bdc..5071a2c 100644 --- a/arch/powerpc/include/asm/fsl_85xx_cache_sram.h +++ b/arch/powerpc/include/asm/fsl_85xx_cache_sram.h @@ -26,7 +26,7 @@ #ifndef __ASM_POWERPC_FSL_85XX_CACHE_SRAM_H__ #define __ASM_POWERPC_FSL_85XX_CACHE_SRAM_H__ -#include +#include #include /* @@ -37,7 +37,7 @@ struct mpc85xx_cache_sram { phys_addr_t base_phys; void *base_virt; unsigned int size; - rh_info_t *rh; + struct _rh_info *rh; spinlock_t lock; }; diff --git a/arch/powerpc/include/asm/rheap.h b/arch/powerpc/include/asm/rheap.h deleted file mode 100644 index 1723817..0000000 --- a/arch/powerpc/include/asm/rheap.h +++ /dev/null @@ -1,89 +0,0 @@ -/* - * include/asm-ppc/rheap.h - * - * Header file for the implementation of a remote heap. - * - * Author: Pantelis Antoniou - * - * 2004 (c) INTRACOM S.A. Greece. This file is licensed under - * the terms of the GNU General Public License version 2. This program - * is licensed "as is" without any warranty of any kind, whether express - * or implied. - */ - -#ifndef __ASM_PPC_RHEAP_H__ -#define __ASM_PPC_RHEAP_H__ - -#include - -typedef struct _rh_block { - struct list_head list; - unsigned long start; - int size; - const char *owner; -} rh_block_t; - -typedef struct _rh_info { - unsigned int alignment; - int max_blocks; - int empty_slots; - rh_block_t *block; - struct list_head empty_list; - struct list_head free_list; - struct list_head taken_list; - unsigned int flags; -} rh_info_t; - -#define RHIF_STATIC_INFO 0x1 -#define RHIF_STATIC_BLOCK 0x2 - -typedef struct _rh_stats { - unsigned long start; - int size; - const char *owner; -} rh_stats_t; - -#define RHGS_FREE 0 -#define RHGS_TAKEN 1 - -/* Create a remote heap dynamically */ -extern rh_info_t *rh_create(unsigned int alignment); - -/* Destroy a remote heap, created by rh_create() */ -extern void rh_destroy(rh_info_t * info); - -/* Initialize in place a remote info block */ -extern void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks, - rh_block_t * block); - -/* Attach a free region to manage */ -extern int rh_attach_region(rh_info_t * info, unsigned long start, int size); - -/* Detach a free region */ -extern unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size); - -/* Allocate the given size from the remote heap (with alignment) */ -extern unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, - const char *owner); - -/* Allocate the given size from the remote heap */ -extern unsigned long rh_alloc(rh_info_t * info, int size, const char *owner); - -/* Allocate the given size from the given address */ -extern unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, - const char *owner); - -/* Free the allocated area */ -extern int rh_free(rh_info_t * info, unsigned long start); - -/* Get stats for debugging purposes */ -extern int rh_get_stats(rh_info_t * info, int what, int max_stats, - rh_stats_t * stats); - -/* Simple dump of remote heap info */ -extern void rh_dump(rh_info_t * info); - -/* Set owner of taken block */ -extern int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner); - -#endif /* __ASM_PPC_RHEAP_H__ */ diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 4504332..c7b4e2f 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -26,8 +26,6 @@ obj-$(CONFIG_SMP) += locks.o obj-$(CONFIG_ALTIVEC) += vmx-helper.o endif -obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o - obj-y += code-patching.o obj-y += feature-fixups.o obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o diff --git a/arch/powerpc/lib/rheap.c b/arch/powerpc/lib/rheap.c deleted file mode 100644 index a1060a8..0000000 --- a/arch/powerpc/lib/rheap.c +++ /dev/null @@ -1,747 +0,0 @@ -/* - * A Remote Heap. Remote means that we don't touch the memory that the - * heap points to. Normal heap implementations use the memory they manage - * to place their list. We cannot do that because the memory we manage may - * have special properties, for example it is uncachable or of different - * endianess. - * - * Author: Pantelis Antoniou - * - * 2004 (c) INTRACOM S.A. Greece. This file is licensed under - * the terms of the GNU General Public License version 2. This program - * is licensed "as is" without any warranty of any kind, whether express - * or implied. - */ -#include -#include -#include -#include -#include -#include -#include - -#include - -/* - * Fixup a list_head, needed when copying lists. If the pointers fall - * between s and e, apply the delta. This assumes that - * sizeof(struct list_head *) == sizeof(unsigned long *). - */ -static inline void fixup(unsigned long s, unsigned long e, int d, - struct list_head *l) -{ - unsigned long *pp; - - pp = (unsigned long *)&l->next; - if (*pp >= s && *pp < e) - *pp += d; - - pp = (unsigned long *)&l->prev; - if (*pp >= s && *pp < e) - *pp += d; -} - -/* Grow the allocated blocks */ -static int grow(rh_info_t * info, int max_blocks) -{ - rh_block_t *block, *blk; - int i, new_blocks; - int delta; - unsigned long blks, blke; - - if (max_blocks <= info->max_blocks) - return -EINVAL; - - new_blocks = max_blocks - info->max_blocks; - - block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_ATOMIC); - if (block == NULL) - return -ENOMEM; - - if (info->max_blocks > 0) { - - /* copy old block area */ - memcpy(block, info->block, - sizeof(rh_block_t) * info->max_blocks); - - delta = (char *)block - (char *)info->block; - - /* and fixup list pointers */ - blks = (unsigned long)info->block; - blke = (unsigned long)(info->block + info->max_blocks); - - for (i = 0, blk = block; i < info->max_blocks; i++, blk++) - fixup(blks, blke, delta, &blk->list); - - fixup(blks, blke, delta, &info->empty_list); - fixup(blks, blke, delta, &info->free_list); - fixup(blks, blke, delta, &info->taken_list); - - /* free the old allocated memory */ - if ((info->flags & RHIF_STATIC_BLOCK) == 0) - kfree(info->block); - } - - info->block = block; - info->empty_slots += new_blocks; - info->max_blocks = max_blocks; - info->flags &= ~RHIF_STATIC_BLOCK; - - /* add all new blocks to the free list */ - blk = block + info->max_blocks - new_blocks; - for (i = 0; i < new_blocks; i++, blk++) - list_add(&blk->list, &info->empty_list); - - return 0; -} - -/* - * Assure at least the required amount of empty slots. If this function - * causes a grow in the block area then all pointers kept to the block - * area are invalid! - */ -static int assure_empty(rh_info_t * info, int slots) -{ - int max_blocks; - - /* This function is not meant to be used to grow uncontrollably */ - if (slots >= 4) - return -EINVAL; - - /* Enough space */ - if (info->empty_slots >= slots) - return 0; - - /* Next 16 sized block */ - max_blocks = ((info->max_blocks + slots) + 15) & ~15; - - return grow(info, max_blocks); -} - -static rh_block_t *get_slot(rh_info_t * info) -{ - rh_block_t *blk; - - /* If no more free slots, and failure to extend. */ - /* XXX: You should have called assure_empty before */ - if (info->empty_slots == 0) { - printk(KERN_ERR "rh: out of slots; crash is imminent.\n"); - return NULL; - } - - /* Get empty slot to use */ - blk = list_entry(info->empty_list.next, rh_block_t, list); - list_del_init(&blk->list); - info->empty_slots--; - - /* Initialize */ - blk->start = 0; - blk->size = 0; - blk->owner = NULL; - - return blk; -} - -static inline void release_slot(rh_info_t * info, rh_block_t * blk) -{ - list_add(&blk->list, &info->empty_list); - info->empty_slots++; -} - -static void attach_free_block(rh_info_t * info, rh_block_t * blkn) -{ - rh_block_t *blk; - rh_block_t *before; - rh_block_t *after; - rh_block_t *next; - int size; - unsigned long s, e, bs, be; - struct list_head *l; - - /* We assume that they are aligned properly */ - size = blkn->size; - s = blkn->start; - e = s + size; - - /* Find the blocks immediately before and after the given one - * (if any) */ - before = NULL; - after = NULL; - next = NULL; - - list_for_each(l, &info->free_list) { - blk = list_entry(l, rh_block_t, list); - - bs = blk->start; - be = bs + blk->size; - - if (next == NULL && s >= bs) - next = blk; - - if (be == s) - before = blk; - - if (e == bs) - after = blk; - - /* If both are not null, break now */ - if (before != NULL && after != NULL) - break; - } - - /* Now check if they are really adjacent */ - if (before && s != (before->start + before->size)) - before = NULL; - - if (after && e != after->start) - after = NULL; - - /* No coalescing; list insert and return */ - if (before == NULL && after == NULL) { - - if (next != NULL) - list_add(&blkn->list, &next->list); - else - list_add(&blkn->list, &info->free_list); - - return; - } - - /* We don't need it anymore */ - release_slot(info, blkn); - - /* Grow the before block */ - if (before != NULL && after == NULL) { - before->size += size; - return; - } - - /* Grow the after block backwards */ - if (before == NULL && after != NULL) { - after->start -= size; - after->size += size; - return; - } - - /* Grow the before block, and release the after block */ - before->size += size + after->size; - list_del(&after->list); - release_slot(info, after); -} - -static void attach_taken_block(rh_info_t * info, rh_block_t * blkn) -{ - rh_block_t *blk; - struct list_head *l; - - /* Find the block immediately before the given one (if any) */ - list_for_each(l, &info->taken_list) { - blk = list_entry(l, rh_block_t, list); - if (blk->start > blkn->start) { - list_add_tail(&blkn->list, &blk->list); - return; - } - } - - list_add_tail(&blkn->list, &info->taken_list); -} - -/* - * Create a remote heap dynamically. Note that no memory for the blocks - * are allocated. It will upon the first allocation - */ -rh_info_t *rh_create(unsigned int alignment) -{ - rh_info_t *info; - - /* Alignment must be a power of two */ - if ((alignment & (alignment - 1)) != 0) - return ERR_PTR(-EINVAL); - - info = kmalloc(sizeof(*info), GFP_ATOMIC); - if (info == NULL) - return ERR_PTR(-ENOMEM); - - info->alignment = alignment; - - /* Initially everything as empty */ - info->block = NULL; - info->max_blocks = 0; - info->empty_slots = 0; - info->flags = 0; - - INIT_LIST_HEAD(&info->empty_list); - INIT_LIST_HEAD(&info->free_list); - INIT_LIST_HEAD(&info->taken_list); - - return info; -} -EXPORT_SYMBOL_GPL(rh_create); - -/* - * Destroy a dynamically created remote heap. Deallocate only if the areas - * are not static - */ -void rh_destroy(rh_info_t * info) -{ - if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL) - kfree(info->block); - - if ((info->flags & RHIF_STATIC_INFO) == 0) - kfree(info); -} -EXPORT_SYMBOL_GPL(rh_destroy); - -/* - * Initialize in place a remote heap info block. This is needed to support - * operation very early in the startup of the kernel, when it is not yet safe - * to call kmalloc. - */ -void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks, - rh_block_t * block) -{ - int i; - rh_block_t *blk; - - /* Alignment must be a power of two */ - if ((alignment & (alignment - 1)) != 0) - return; - - info->alignment = alignment; - - /* Initially everything as empty */ - info->block = block; - info->max_blocks = max_blocks; - info->empty_slots = max_blocks; - info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK; - - INIT_LIST_HEAD(&info->empty_list); - INIT_LIST_HEAD(&info->free_list); - INIT_LIST_HEAD(&info->taken_list); - - /* Add all new blocks to the free list */ - for (i = 0, blk = block; i < max_blocks; i++, blk++) - list_add(&blk->list, &info->empty_list); -} -EXPORT_SYMBOL_GPL(rh_init); - -/* Attach a free memory region, coalesces regions if adjuscent */ -int rh_attach_region(rh_info_t * info, unsigned long start, int size) -{ - rh_block_t *blk; - unsigned long s, e, m; - int r; - - /* The region must be aligned */ - s = start; - e = s + size; - m = info->alignment - 1; - - /* Round start up */ - s = (s + m) & ~m; - - /* Round end down */ - e = e & ~m; - - if (IS_ERR_VALUE(e) || (e < s)) - return -ERANGE; - - /* Take final values */ - start = s; - size = e - s; - - /* Grow the blocks, if needed */ - r = assure_empty(info, 1); - if (r < 0) - return r; - - blk = get_slot(info); - blk->start = start; - blk->size = size; - blk->owner = NULL; - - attach_free_block(info, blk); - - return 0; -} -EXPORT_SYMBOL_GPL(rh_attach_region); - -/* Detatch given address range, splits free block if needed. */ -unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size) -{ - struct list_head *l; - rh_block_t *blk, *newblk; - unsigned long s, e, m, bs, be; - - /* Validate size */ - if (size <= 0) - return (unsigned long) -EINVAL; - - /* The region must be aligned */ - s = start; - e = s + size; - m = info->alignment - 1; - - /* Round start up */ - s = (s + m) & ~m; - - /* Round end down */ - e = e & ~m; - - if (assure_empty(info, 1) < 0) - return (unsigned long) -ENOMEM; - - blk = NULL; - list_for_each(l, &info->free_list) { - blk = list_entry(l, rh_block_t, list); - /* The range must lie entirely inside one free block */ - bs = blk->start; - be = blk->start + blk->size; - if (s >= bs && e <= be) - break; - blk = NULL; - } - - if (blk == NULL) - return (unsigned long) -ENOMEM; - - /* Perfect fit */ - if (bs == s && be == e) { - /* Delete from free list, release slot */ - list_del(&blk->list); - release_slot(info, blk); - return s; - } - - /* blk still in free list, with updated start and/or size */ - if (bs == s || be == e) { - if (bs == s) - blk->start += size; - blk->size -= size; - - } else { - /* The front free fragment */ - blk->size = s - bs; - - /* the back free fragment */ - newblk = get_slot(info); - newblk->start = e; - newblk->size = be - e; - - list_add(&newblk->list, &blk->list); - } - - return s; -} -EXPORT_SYMBOL_GPL(rh_detach_region); - -/* Allocate a block of memory at the specified alignment. The value returned - * is an offset into the buffer initialized by rh_init(), or a negative number - * if there is an error. - */ -unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const char *owner) -{ - struct list_head *l; - rh_block_t *blk; - rh_block_t *newblk; - unsigned long start, sp_size; - - /* Validate size, and alignment must be power of two */ - if (size <= 0 || (alignment & (alignment - 1)) != 0) - return (unsigned long) -EINVAL; - - /* Align to configured alignment */ - size = (size + (info->alignment - 1)) & ~(info->alignment - 1); - - if (assure_empty(info, 2) < 0) - return (unsigned long) -ENOMEM; - - blk = NULL; - list_for_each(l, &info->free_list) { - blk = list_entry(l, rh_block_t, list); - if (size <= blk->size) { - start = (blk->start + alignment - 1) & ~(alignment - 1); - if (start + size <= blk->start + blk->size) - break; - } - blk = NULL; - } - - if (blk == NULL) - return (unsigned long) -ENOMEM; - - /* Just fits */ - if (blk->size == size) { - /* Move from free list to taken list */ - list_del(&blk->list); - newblk = blk; - } else { - /* Fragment caused, split if needed */ - /* Create block for fragment in the beginning */ - sp_size = start - blk->start; - if (sp_size) { - rh_block_t *spblk; - - spblk = get_slot(info); - spblk->start = blk->start; - spblk->size = sp_size; - /* add before the blk */ - list_add(&spblk->list, blk->list.prev); - } - newblk = get_slot(info); - newblk->start = start; - newblk->size = size; - - /* blk still in free list, with updated start and size - * for fragment in the end */ - blk->start = start + size; - blk->size -= sp_size + size; - /* No fragment in the end, remove blk */ - if (blk->size == 0) { - list_del(&blk->list); - release_slot(info, blk); - } - } - - newblk->owner = owner; - attach_taken_block(info, newblk); - - return start; -} -EXPORT_SYMBOL_GPL(rh_alloc_align); - -/* Allocate a block of memory at the default alignment. The value returned is - * an offset into the buffer initialized by rh_init(), or a negative number if - * there is an error. - */ -unsigned long rh_alloc(rh_info_t * info, int size, const char *owner) -{ - return rh_alloc_align(info, size, info->alignment, owner); -} -EXPORT_SYMBOL_GPL(rh_alloc); - -/* Allocate a block of memory at the given offset, rounded up to the default - * alignment. The value returned is an offset into the buffer initialized by - * rh_init(), or a negative number if there is an error. - */ -unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, const char *owner) -{ - struct list_head *l; - rh_block_t *blk, *newblk1, *newblk2; - unsigned long s, e, m, bs = 0, be = 0; - - /* Validate size */ - if (size <= 0) - return (unsigned long) -EINVAL; - - /* The region must be aligned */ - s = start; - e = s + size; - m = info->alignment - 1; - - /* Round start up */ - s = (s + m) & ~m; - - /* Round end down */ - e = e & ~m; - - if (assure_empty(info, 2) < 0) - return (unsigned long) -ENOMEM; - - blk = NULL; - list_for_each(l, &info->free_list) { - blk = list_entry(l, rh_block_t, list); - /* The range must lie entirely inside one free block */ - bs = blk->start; - be = blk->start + blk->size; - if (s >= bs && e <= be) - break; - blk = NULL; - } - - if (blk == NULL) - return (unsigned long) -ENOMEM; - - /* Perfect fit */ - if (bs == s && be == e) { - /* Move from free list to taken list */ - list_del(&blk->list); - blk->owner = owner; - - start = blk->start; - attach_taken_block(info, blk); - - return start; - - } - - /* blk still in free list, with updated start and/or size */ - if (bs == s || be == e) { - if (bs == s) - blk->start += size; - blk->size -= size; - - } else { - /* The front free fragment */ - blk->size = s - bs; - - /* The back free fragment */ - newblk2 = get_slot(info); - newblk2->start = e; - newblk2->size = be - e; - - list_add(&newblk2->list, &blk->list); - } - - newblk1 = get_slot(info); - newblk1->start = s; - newblk1->size = e - s; - newblk1->owner = owner; - - start = newblk1->start; - attach_taken_block(info, newblk1); - - return start; -} -EXPORT_SYMBOL_GPL(rh_alloc_fixed); - -/* Deallocate the memory previously allocated by one of the rh_alloc functions. - * The return value is the size of the deallocated block, or a negative number - * if there is an error. - */ -int rh_free(rh_info_t * info, unsigned long start) -{ - rh_block_t *blk, *blk2; - struct list_head *l; - int size; - - /* Linear search for block */ - blk = NULL; - list_for_each(l, &info->taken_list) { - blk2 = list_entry(l, rh_block_t, list); - if (start < blk2->start) - break; - blk = blk2; - } - - if (blk == NULL || start > (blk->start + blk->size)) - return -EINVAL; - - /* Remove from taken list */ - list_del(&blk->list); - - /* Get size of freed block */ - size = blk->size; - attach_free_block(info, blk); - - return size; -} -EXPORT_SYMBOL_GPL(rh_free); - -int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats) -{ - rh_block_t *blk; - struct list_head *l; - struct list_head *h; - int nr; - - switch (what) { - - case RHGS_FREE: - h = &info->free_list; - break; - - case RHGS_TAKEN: - h = &info->taken_list; - break; - - default: - return -EINVAL; - } - - /* Linear search for block */ - nr = 0; - list_for_each(l, h) { - blk = list_entry(l, rh_block_t, list); - if (stats != NULL && nr < max_stats) { - stats->start = blk->start; - stats->size = blk->size; - stats->owner = blk->owner; - stats++; - } - nr++; - } - - return nr; -} -EXPORT_SYMBOL_GPL(rh_get_stats); - -int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner) -{ - rh_block_t *blk, *blk2; - struct list_head *l; - int size; - - /* Linear search for block */ - blk = NULL; - list_for_each(l, &info->taken_list) { - blk2 = list_entry(l, rh_block_t, list); - if (start < blk2->start) - break; - blk = blk2; - } - - if (blk == NULL || start > (blk->start + blk->size)) - return -EINVAL; - - blk->owner = owner; - size = blk->size; - - return size; -} -EXPORT_SYMBOL_GPL(rh_set_owner); - -void rh_dump(rh_info_t * info) -{ - static rh_stats_t st[32]; /* XXX maximum 32 blocks */ - int maxnr; - int i, nr; - - maxnr = ARRAY_SIZE(st); - - printk(KERN_INFO - "info @0x%p (%d slots empty / %d max)\n", - info, info->empty_slots, info->max_blocks); - - printk(KERN_INFO " Free:\n"); - nr = rh_get_stats(info, RHGS_FREE, maxnr, st); - if (nr > maxnr) - nr = maxnr; - for (i = 0; i < nr; i++) - printk(KERN_INFO - " 0x%lx-0x%lx (%u)\n", - st[i].start, st[i].start + st[i].size, - st[i].size); - printk(KERN_INFO "\n"); - - printk(KERN_INFO " Taken:\n"); - nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st); - if (nr > maxnr) - nr = maxnr; - for (i = 0; i < nr; i++) - printk(KERN_INFO - " 0x%lx-0x%lx (%u) %s\n", - st[i].start, st[i].start + st[i].size, - st[i].size, st[i].owner != NULL ? st[i].owner : ""); - printk(KERN_INFO "\n"); -} -EXPORT_SYMBOL_GPL(rh_dump); - -void rh_dump_blk(rh_info_t * info, rh_block_t * blk) -{ - printk(KERN_INFO - "blk @0x%p: 0x%lx-0x%lx (%u)\n", - blk, blk->start, blk->start + blk->size, blk->size); -} -EXPORT_SYMBOL_GPL(rh_dump_blk); - diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index d6c7506..0ea1aee 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig @@ -254,7 +254,7 @@ config PPC4xx_GPIO config PPC4xx_OCM bool "PPC4xx On Chip Memory (OCM) support" depends on 4xx - select PPC_LIB_RHEAP + select LIB_RHEAP help Enable OCM support for PowerPC 4xx platforms with on chip memory, OCM provides the fast place for memory access to improve performance. diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig index ae9fdb51..ab4777a 100644 --- a/arch/powerpc/platforms/85xx/Kconfig +++ b/arch/powerpc/platforms/85xx/Kconfig @@ -19,7 +19,7 @@ if PPC32 config FSL_85XX_CACHE_SRAM bool "Freescale l2cache sram support" - select PPC_LIB_RHEAP + select LIB_RHEAP help When selected, this option enables cache-sram support for memory allocation on P1/P2 QorIQ platforms. diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index d09ae32f..9c38a8d 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -282,7 +282,7 @@ config CPM2 bool "Enable support for the CPM2 (Communications Processor Module)" depends on (FSL_SOC_BOOKE && PPC32) || 8260 select CPM - select PPC_LIB_RHEAP + select LIB_RHEAP select PPC_PCI_CHOICE select ARCH_REQUIRE_GPIOLIB help diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 1211e3b..1d7281d 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -33,7 +33,7 @@ config PPC_8xx bool "Freescale 8xx" select FSL_SOC select 8xx - select PPC_LIB_RHEAP + select LIB_RHEAP config 40x bool "AMCC 40x" diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c index 5e6ff38..c6f5762 100644 --- a/arch/powerpc/sysdev/cpm1.c +++ b/arch/powerpc/sysdev/cpm1.c @@ -38,7 +38,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/powerpc/sysdev/cpm2.c b/arch/powerpc/sysdev/cpm2.c index 8dc1e24..5a63d35 100644 --- a/arch/powerpc/sysdev/cpm2.c +++ b/arch/powerpc/sysdev/cpm2.c @@ -41,7 +41,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c index 4dd5341..0a98dca 100644 --- a/arch/powerpc/sysdev/cpm_common.c +++ b/arch/powerpc/sysdev/cpm_common.c @@ -26,7 +26,7 @@ #include #include -#include +#include #include #include @@ -65,8 +65,8 @@ void __init udbg_init_cpm(void) #endif static spinlock_t cpm_muram_lock; -static rh_block_t cpm_boot_muram_rh_block[16]; -static rh_info_t cpm_muram_info; +static struct _rh_block cpm_boot_muram_rh_block[16]; +static struct _rh_info cpm_muram_info; static u8 __iomem *muram_vbase; static phys_addr_t muram_pbase; diff --git a/arch/powerpc/sysdev/ppc4xx_ocm.c b/arch/powerpc/sysdev/ppc4xx_ocm.c index 1b15f93..6ae237a 100644 --- a/arch/powerpc/sysdev/ppc4xx_ocm.c +++ b/arch/powerpc/sysdev/ppc4xx_ocm.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include #include @@ -49,7 +49,7 @@ struct ocm_region { int memtotal; int memfree; - rh_info_t *rh; + struct _rh_info *rh; struct list_head list; }; diff --git a/drivers/dma/bestcomm/Kconfig b/drivers/dma/bestcomm/Kconfig index 29e4270..9bb1bf8 100644 --- a/drivers/dma/bestcomm/Kconfig +++ b/drivers/dma/bestcomm/Kconfig @@ -6,7 +6,7 @@ config PPC_BESTCOMM tristate "Bestcomm DMA engine support" depends on PPC_MPC52xx default n - select PPC_LIB_RHEAP + select LIB_RHEAP help BestComm is the name of the communication coprocessor found on the Freescale MPC5200 family of processor. Its usage is diff --git a/drivers/soc/qe/Kconfig b/drivers/soc/qe/Kconfig index 5c2b9d8..49118e1 100644 --- a/drivers/soc/qe/Kconfig +++ b/drivers/soc/qe/Kconfig @@ -5,7 +5,7 @@ config QUICC_ENGINE bool "Freescale QUICC Engine (QE) Support" depends on FSL_SOC && (PPC32 || PPC64) - select PPC_LIB_RHEAP + select LIB_RHEAP select CRC32 ---help--- The QUICC Engine (QE) is a new generation of communications diff --git a/drivers/soc/qe/qe.c b/drivers/soc/qe/qe.c index 25f0e0d..e0926f5 100644 --- a/drivers/soc/qe/qe.c +++ b/drivers/soc/qe/qe.c @@ -36,7 +36,7 @@ #include #include #include -#include +#include static void qe_snums_init(void); static int qe_sdma_init(void); diff --git a/drivers/soc/qe/qe_common.c b/drivers/soc/qe/qe_common.c index c82ddcc..8e93b88 100644 --- a/drivers/soc/qe/qe_common.c +++ b/drivers/soc/qe/qe_common.c @@ -26,12 +26,12 @@ #include #include -#include +#include #include static spinlock_t qe_muram_lock; -static rh_block_t qe_boot_muram_rh_block[16]; -static rh_info_t qe_muram_info; +static struct _rh_block qe_boot_muram_rh_block[16]; +static struct _rh_info qe_muram_info; static u8 __iomem *muram_vbase; static phys_addr_t muram_pbase; diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 2a46153..4ce7204 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -1972,7 +1972,7 @@ config FB_FSL_DIU select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT - select PPC_LIB_RHEAP + select LIB_RHEAP ---help--- Framebuffer driver for the Freescale SoC DIU diff --git a/include/linux/fsl/bestcomm/sram.h b/include/linux/fsl/bestcomm/sram.h index b6d6689..8555dab 100644 --- a/include/linux/fsl/bestcomm/sram.h +++ b/include/linux/fsl/bestcomm/sram.h @@ -12,7 +12,7 @@ #ifndef __BESTCOMM_SRAM_H__ #define __BESTCOMM_SRAM_H__ -#include +#include #include #include @@ -25,7 +25,7 @@ struct bcom_sram { phys_addr_t base_phys; void *base_virt; unsigned int size; - rh_info_t *rh; + struct _rh_info *rh; spinlock_t lock; }; diff --git a/include/linux/fsl/rheap.h b/include/linux/fsl/rheap.h new file mode 100644 index 0000000..88149da --- /dev/null +++ b/include/linux/fsl/rheap.h @@ -0,0 +1,93 @@ +/* + * include/asm-ppc/rheap.h + * + * Header file for the implementation of a remote heap. + * + * Author: Pantelis Antoniou + * + * 2004 (c) INTRACOM S.A. Greece. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + */ + +#ifndef __ASM_PPC_RHEAP_H__ +#define __ASM_PPC_RHEAP_H__ + +#include + +struct _rh_block { + struct list_head list; + unsigned long start; + int size; + const char *owner; +}; + +struct _rh_info { + unsigned int alignment; + int max_blocks; + int empty_slots; + struct _rh_block *block; + struct list_head empty_list; + struct list_head free_list; + struct list_head taken_list; + unsigned int flags; +}; + +#define RHIF_STATIC_INFO 0x1 +#define RHIF_STATIC_BLOCK 0x2 + +struct _rh_stats { + unsigned long start; + int size; + const char *owner; +}; + +#define RHGS_FREE 0 +#define RHGS_TAKEN 1 + +/* Create a remote heap dynamically */ +extern struct _rh_info *rh_create(unsigned int alignment); + +/* Destroy a remote heap, created by rh_create() */ +extern void rh_destroy(struct _rh_info *info); + +/* Initialize in place a remote info block */ +extern void rh_init(struct _rh_info *info, unsigned int alignment, + int max_blocks, struct _rh_block *block); + +/* Attach a free region to manage */ +extern int rh_attach_region(struct _rh_info *info, unsigned long start, + int size); + +/* Detach a free region */ +extern unsigned long rh_detach_region(struct _rh_info *info, + unsigned long start, int size); + +/* Allocate the given size from the remote heap (with alignment) */ +extern unsigned long rh_alloc_align(struct _rh_info *info, int size, + int alignment, const char *owner); + +/* Allocate the given size from the remote heap */ +extern unsigned long rh_alloc(struct _rh_info *info, int size, + const char *owner); + +/* Allocate the given size from the given address */ +extern unsigned long rh_alloc_fixed(struct _rh_info *info, unsigned long start, + int size, const char *owner); + +/* Free the allocated area */ +extern int rh_free(struct _rh_info *info, unsigned long start); + +/* Get stats for debugging purposes */ +extern int rh_get_stats(struct _rh_info *info, int what, int max_stats, + struct _rh_stats *stats); + +/* Simple dump of remote heap info */ +extern void rh_dump(struct _rh_info *info); + +/* Set owner of taken block */ +extern int rh_set_owner(struct _rh_info *info, unsigned long start, + const char *owner); + +#endif /* __ASM_PPC_RHEAP_H__ */ diff --git a/lib/Kconfig b/lib/Kconfig index 4490089..4ffcc86 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -365,6 +365,9 @@ config CPU_RMAP bool depends on SMP +config LIB_RHEAP + bool + config DQL bool diff --git a/lib/Makefile b/lib/Makefile index 6e23a0f..8523e9d 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -194,3 +194,5 @@ quiet_cmd_build_OID_registry = GEN $@ clean-files += oid_registry_data.c obj-$(CONFIG_UCS2_STRING) += ucs2_string.o + +obj-$(CONFIG_LIB_RHEAP) += rheap.o diff --git a/lib/rheap.c b/lib/rheap.c new file mode 100644 index 0000000..241cc0a --- /dev/null +++ b/lib/rheap.c @@ -0,0 +1,747 @@ +/* + * A Remote Heap. Remote means that we don't touch the memory that the + * heap points to. Normal heap implementations use the memory they manage + * to place their list. We cannot do that because the memory we manage may + * have special properties, for example it is uncachable or of different + * endianess. + * + * Author: Pantelis Antoniou + * + * 2004 (c) INTRACOM S.A. Greece. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + */ +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * Fixup a list_head, needed when copying lists. If the pointers fall + * between s and e, apply the delta. This assumes that + * sizeof(struct list_head *) == sizeof(unsigned long *). + */ +static inline void fixup(unsigned long s, unsigned long e, int d, + struct list_head *l) +{ + unsigned long *pp; + + pp = (unsigned long *)&l->next; + if (*pp >= s && *pp < e) + *pp += d; + + pp = (unsigned long *)&l->prev; + if (*pp >= s && *pp < e) + *pp += d; +} + +/* Grow the allocated blocks */ +static int grow(struct _rh_info *info, int max_blocks) +{ + struct _rh_block *block, *blk; + int i, new_blocks; + int delta; + unsigned long blks, blke; + + if (max_blocks <= info->max_blocks) + return -EINVAL; + + new_blocks = max_blocks - info->max_blocks; + + block = kmalloc(sizeof(struct _rh_block) * max_blocks, GFP_ATOMIC); + if (block == NULL) + return -ENOMEM; + + if (info->max_blocks > 0) { + + /* copy old block area */ + memcpy(block, info->block, + sizeof(struct _rh_block) * info->max_blocks); + + delta = (char *)block - (char *)info->block; + + /* and fixup list pointers */ + blks = (unsigned long)info->block; + blke = (unsigned long)(info->block + info->max_blocks); + + for (i = 0, blk = block; i < info->max_blocks; i++, blk++) + fixup(blks, blke, delta, &blk->list); + + fixup(blks, blke, delta, &info->empty_list); + fixup(blks, blke, delta, &info->free_list); + fixup(blks, blke, delta, &info->taken_list); + + /* free the old allocated memory */ + if ((info->flags & RHIF_STATIC_BLOCK) == 0) + kfree(info->block); + } + + info->block = block; + info->empty_slots += new_blocks; + info->max_blocks = max_blocks; + info->flags &= ~RHIF_STATIC_BLOCK; + + /* add all new blocks to the free list */ + blk = block + info->max_blocks - new_blocks; + for (i = 0; i < new_blocks; i++, blk++) + list_add(&blk->list, &info->empty_list); + + return 0; +} + +/* + * Assure at least the required amount of empty slots. If this function + * causes a grow in the block area then all pointers kept to the block + * area are invalid! + */ +static int assure_empty(struct _rh_info *info, int slots) +{ + int max_blocks; + + /* This function is not meant to be used to grow uncontrollably */ + if (slots >= 4) + return -EINVAL; + + /* Enough space */ + if (info->empty_slots >= slots) + return 0; + + /* Next 16 sized block */ + max_blocks = ((info->max_blocks + slots) + 15) & ~15; + + return grow(info, max_blocks); +} + +static struct _rh_block *get_slot(struct _rh_info *info) +{ + struct _rh_block *blk; + + /* If no more free slots, and failure to extend. */ + /* XXX: You should have called assure_empty before */ + if (info->empty_slots == 0) { + pr_err("rh: out of slots; crash is imminent.\n"); + return NULL; + } + + /* Get empty slot to use */ + blk = list_entry(info->empty_list.next, struct _rh_block, list); + list_del_init(&blk->list); + info->empty_slots--; + + /* Initialize */ + blk->start = 0; + blk->size = 0; + blk->owner = NULL; + + return blk; +} + +static inline void release_slot(struct _rh_info *info, struct _rh_block *blk) +{ + list_add(&blk->list, &info->empty_list); + info->empty_slots++; +} + +static void attach_free_block(struct _rh_info *info, struct _rh_block *blkn) +{ + struct _rh_block *blk; + struct _rh_block *before; + struct _rh_block *after; + struct _rh_block *next; + int size; + unsigned long s, e, bs, be; + struct list_head *l; + + /* We assume that they are aligned properly */ + size = blkn->size; + s = blkn->start; + e = s + size; + + /* Find the blocks immediately before and after the given one + * (if any) */ + before = NULL; + after = NULL; + next = NULL; + + list_for_each(l, &info->free_list) { + blk = list_entry(l, struct _rh_block, list); + + bs = blk->start; + be = bs + blk->size; + + if (next == NULL && s >= bs) + next = blk; + + if (be == s) + before = blk; + + if (e == bs) + after = blk; + + /* If both are not null, break now */ + if (before != NULL && after != NULL) + break; + } + + /* Now check if they are really adjacent */ + if (before && s != (before->start + before->size)) + before = NULL; + + if (after && e != after->start) + after = NULL; + + /* No coalescing; list insert and return */ + if (before == NULL && after == NULL) { + + if (next != NULL) + list_add(&blkn->list, &next->list); + else + list_add(&blkn->list, &info->free_list); + + return; + } + + /* We don't need it anymore */ + release_slot(info, blkn); + + /* Grow the before block */ + if (before != NULL && after == NULL) { + before->size += size; + return; + } + + /* Grow the after block backwards */ + if (before == NULL && after != NULL) { + after->start -= size; + after->size += size; + return; + } + + /* Grow the before block, and release the after block */ + before->size += size + after->size; + list_del(&after->list); + release_slot(info, after); +} + +static void attach_taken_block(struct _rh_info *info, struct _rh_block *blkn) +{ + struct _rh_block *blk; + struct list_head *l; + + /* Find the block immediately before the given one (if any) */ + list_for_each(l, &info->taken_list) { + blk = list_entry(l, struct _rh_block, list); + if (blk->start > blkn->start) { + list_add_tail(&blkn->list, &blk->list); + return; + } + } + + list_add_tail(&blkn->list, &info->taken_list); +} + +/* + * Create a remote heap dynamically. Note that no memory for the blocks + * are allocated. It will upon the first allocation + */ +struct _rh_info *rh_create(unsigned int alignment) +{ + struct _rh_info *info; + + /* Alignment must be a power of two */ + if ((alignment & (alignment - 1)) != 0) + return ERR_PTR(-EINVAL); + + info = kmalloc(sizeof(*info), GFP_ATOMIC); + if (info == NULL) + return ERR_PTR(-ENOMEM); + + info->alignment = alignment; + + /* Initially everything as empty */ + info->block = NULL; + info->max_blocks = 0; + info->empty_slots = 0; + info->flags = 0; + + INIT_LIST_HEAD(&info->empty_list); + INIT_LIST_HEAD(&info->free_list); + INIT_LIST_HEAD(&info->taken_list); + + return info; +} +EXPORT_SYMBOL_GPL(rh_create); + +/* + * Destroy a dynamically created remote heap. Deallocate only if the areas + * are not static + */ +void rh_destroy(struct _rh_info *info) +{ + if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL) + kfree(info->block); + + if ((info->flags & RHIF_STATIC_INFO) == 0) + kfree(info); +} +EXPORT_SYMBOL_GPL(rh_destroy); + +/* + * Initialize in place a remote heap info block. This is needed to support + * operation very early in the startup of the kernel, when it is not yet safe + * to call kmalloc. + */ +void rh_init(struct _rh_info *info, unsigned int alignment, int max_blocks, + struct _rh_block *block) +{ + int i; + struct _rh_block *blk; + + /* Alignment must be a power of two */ + if ((alignment & (alignment - 1)) != 0) + return; + + info->alignment = alignment; + + /* Initially everything as empty */ + info->block = block; + info->max_blocks = max_blocks; + info->empty_slots = max_blocks; + info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK; + + INIT_LIST_HEAD(&info->empty_list); + INIT_LIST_HEAD(&info->free_list); + INIT_LIST_HEAD(&info->taken_list); + + /* Add all new blocks to the free list */ + for (i = 0, blk = block; i < max_blocks; i++, blk++) + list_add(&blk->list, &info->empty_list); +} +EXPORT_SYMBOL_GPL(rh_init); + +/* Attach a free memory region, coalesces regions if adjuscent */ +int rh_attach_region(struct _rh_info *info, unsigned long start, int size) +{ + struct _rh_block *blk; + unsigned long s, e, m; + int r; + + /* The region must be aligned */ + s = start; + e = s + size; + m = info->alignment - 1; + + /* Round start up */ + s = (s + m) & ~m; + + /* Round end down */ + e = e & ~m; + + if (IS_ERR_VALUE(e) || (e < s)) + return -ERANGE; + + /* Take final values */ + start = s; + size = e - s; + + /* Grow the blocks, if needed */ + r = assure_empty(info, 1); + if (r < 0) + return r; + + blk = get_slot(info); + blk->start = start; + blk->size = size; + blk->owner = NULL; + + attach_free_block(info, blk); + + return 0; +} +EXPORT_SYMBOL_GPL(rh_attach_region); + +/* Detatch given address range, splits free block if needed. */ +unsigned long rh_detach_region(struct _rh_info *info, unsigned long start, + int size) +{ + struct list_head *l; + struct _rh_block *blk, *newblk; + unsigned long s, e, m, bs, be; + + /* Validate size */ + if (size <= 0) + return (unsigned long) -EINVAL; + + /* The region must be aligned */ + s = start; + e = s + size; + m = info->alignment - 1; + + /* Round start up */ + s = (s + m) & ~m; + + /* Round end down */ + e = e & ~m; + + if (assure_empty(info, 1) < 0) + return (unsigned long) -ENOMEM; + + blk = NULL; + list_for_each(l, &info->free_list) { + blk = list_entry(l, struct _rh_block, list); + /* The range must lie entirely inside one free block */ + bs = blk->start; + be = blk->start + blk->size; + if (s >= bs && e <= be) + break; + blk = NULL; + } + + if (blk == NULL) + return (unsigned long) -ENOMEM; + + /* Perfect fit */ + if (bs == s && be == e) { + /* Delete from free list, release slot */ + list_del(&blk->list); + release_slot(info, blk); + return s; + } + + /* blk still in free list, with updated start and/or size */ + if (bs == s || be == e) { + if (bs == s) + blk->start += size; + blk->size -= size; + + } else { + /* The front free fragment */ + blk->size = s - bs; + + /* the back free fragment */ + newblk = get_slot(info); + newblk->start = e; + newblk->size = be - e; + + list_add(&newblk->list, &blk->list); + } + + return s; +} +EXPORT_SYMBOL_GPL(rh_detach_region); + +/* Allocate a block of memory at the specified alignment. The value returned + * is an offset into the buffer initialized by rh_init(), or a negative number + * if there is an error. + */ +unsigned long rh_alloc_align(struct _rh_info *info, int size, + int alignment, const char *owner) +{ + struct list_head *l; + struct _rh_block *blk; + struct _rh_block *newblk; + unsigned long start, sp_size; + + /* Validate size, and alignment must be power of two */ + if (size <= 0 || (alignment & (alignment - 1)) != 0) + return (unsigned long) -EINVAL; + + /* Align to configured alignment */ + size = (size + (info->alignment - 1)) & ~(info->alignment - 1); + + if (assure_empty(info, 2) < 0) + return (unsigned long) -ENOMEM; + + blk = NULL; + list_for_each(l, &info->free_list) { + blk = list_entry(l, struct _rh_block, list); + if (size <= blk->size) { + start = (blk->start + alignment - 1) & ~(alignment - 1); + if (start + size <= blk->start + blk->size) + break; + } + blk = NULL; + } + + if (blk == NULL) + return (unsigned long) -ENOMEM; + + /* Just fits */ + if (blk->size == size) { + /* Move from free list to taken list */ + list_del(&blk->list); + newblk = blk; + } else { + /* Fragment caused, split if needed */ + /* Create block for fragment in the beginning */ + sp_size = start - blk->start; + if (sp_size) { + struct _rh_block *spblk; + + spblk = get_slot(info); + spblk->start = blk->start; + spblk->size = sp_size; + /* add before the blk */ + list_add(&spblk->list, blk->list.prev); + } + newblk = get_slot(info); + newblk->start = start; + newblk->size = size; + + /* blk still in free list, with updated start and size + * for fragment in the end */ + blk->start = start + size; + blk->size -= sp_size + size; + /* No fragment in the end, remove blk */ + if (blk->size == 0) { + list_del(&blk->list); + release_slot(info, blk); + } + } + + newblk->owner = owner; + attach_taken_block(info, newblk); + + return start; +} +EXPORT_SYMBOL_GPL(rh_alloc_align); + +/* Allocate a block of memory at the default alignment. The value returned is + * an offset into the buffer initialized by rh_init(), or a negative number if + * there is an error. + */ +unsigned long rh_alloc(struct _rh_info *info, int size, const char *owner) +{ + return rh_alloc_align(info, size, info->alignment, owner); +} +EXPORT_SYMBOL_GPL(rh_alloc); + +/* Allocate a block of memory at the given offset, rounded up to the default + * alignment. The value returned is an offset into the buffer initialized by + * rh_init(), or a negative number if there is an error. + */ +unsigned long rh_alloc_fixed(struct _rh_info *info, unsigned long start, + int size, const char *owner) +{ + struct list_head *l; + struct _rh_block *blk, *newblk1, *newblk2; + unsigned long s, e, m, bs = 0, be = 0; + + /* Validate size */ + if (size <= 0) + return (unsigned long) -EINVAL; + + /* The region must be aligned */ + s = start; + e = s + size; + m = info->alignment - 1; + + /* Round start up */ + s = (s + m) & ~m; + + /* Round end down */ + e = e & ~m; + + if (assure_empty(info, 2) < 0) + return (unsigned long) -ENOMEM; + + blk = NULL; + list_for_each(l, &info->free_list) { + blk = list_entry(l, struct _rh_block, list); + /* The range must lie entirely inside one free block */ + bs = blk->start; + be = blk->start + blk->size; + if (s >= bs && e <= be) + break; + blk = NULL; + } + + if (blk == NULL) + return (unsigned long) -ENOMEM; + + /* Perfect fit */ + if (bs == s && be == e) { + /* Move from free list to taken list */ + list_del(&blk->list); + blk->owner = owner; + + start = blk->start; + attach_taken_block(info, blk); + + return start; + + } + + /* blk still in free list, with updated start and/or size */ + if (bs == s || be == e) { + if (bs == s) + blk->start += size; + blk->size -= size; + + } else { + /* The front free fragment */ + blk->size = s - bs; + + /* The back free fragment */ + newblk2 = get_slot(info); + newblk2->start = e; + newblk2->size = be - e; + + list_add(&newblk2->list, &blk->list); + } + + newblk1 = get_slot(info); + newblk1->start = s; + newblk1->size = e - s; + newblk1->owner = owner; + + start = newblk1->start; + attach_taken_block(info, newblk1); + + return start; +} +EXPORT_SYMBOL_GPL(rh_alloc_fixed); + +/* Deallocate the memory previously allocated by one of the rh_alloc functions. + * The return value is the size of the deallocated block, or a negative number + * if there is an error. + */ +int rh_free(struct _rh_info *info, unsigned long start) +{ + struct _rh_block *blk, *blk2; + struct list_head *l; + int size; + + /* Linear search for block */ + blk = NULL; + list_for_each(l, &info->taken_list) { + blk2 = list_entry(l, struct _rh_block, list); + if (start < blk2->start) + break; + blk = blk2; + } + + if (blk == NULL || start > (blk->start + blk->size)) + return -EINVAL; + + /* Remove from taken list */ + list_del(&blk->list); + + /* Get size of freed block */ + size = blk->size; + attach_free_block(info, blk); + + return size; +} +EXPORT_SYMBOL_GPL(rh_free); + +int rh_get_stats(struct _rh_info *info, int what, int max_stats, + struct _rh_stats *stats) +{ + struct _rh_block *blk; + struct list_head *l; + struct list_head *h; + int nr; + + switch (what) { + + case RHGS_FREE: + h = &info->free_list; + break; + + case RHGS_TAKEN: + h = &info->taken_list; + break; + + default: + return -EINVAL; + } + + /* Linear search for block */ + nr = 0; + list_for_each(l, h) { + blk = list_entry(l, struct _rh_block, list); + if (stats != NULL && nr < max_stats) { + stats->start = blk->start; + stats->size = blk->size; + stats->owner = blk->owner; + stats++; + } + nr++; + } + + return nr; +} +EXPORT_SYMBOL_GPL(rh_get_stats); + +int rh_set_owner(struct _rh_info *info, unsigned long start, const char *owner) +{ + struct _rh_block *blk, *blk2; + struct list_head *l; + int size; + + /* Linear search for block */ + blk = NULL; + list_for_each(l, &info->taken_list) { + blk2 = list_entry(l, struct _rh_block, list); + if (start < blk2->start) + break; + blk = blk2; + } + + if (blk == NULL || start > (blk->start + blk->size)) + return -EINVAL; + + blk->owner = owner; + size = blk->size; + + return size; +} +EXPORT_SYMBOL_GPL(rh_set_owner); + +void rh_dump(struct _rh_info *info) +{ + static struct _rh_stats st[32]; /* XXX maximum 32 blocks */ + int maxnr; + int i, nr; + + maxnr = ARRAY_SIZE(st); + + pr_info("info @0x%p (%d slots empty / %d max)\n", + info, info->empty_slots, info->max_blocks); + + pr_info(" Free:\n"); + nr = rh_get_stats(info, RHGS_FREE, maxnr, st); + if (nr > maxnr) + nr = maxnr; + for (i = 0; i < nr; i++) + pr_info(" 0x%lx-0x%lx (%u)\n", + st[i].start, st[i].start + st[i].size, + st[i].size); + pr_info("\n"); + + pr_info(" Taken:\n"); + nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st); + if (nr > maxnr) + nr = maxnr; + for (i = 0; i < nr; i++) + pr_info(" 0x%lx-0x%lx (%u) %s\n", + st[i].start, st[i].start + st[i].size, + st[i].size, st[i].owner != NULL ? st[i].owner : ""); + pr_info("\n"); +} +EXPORT_SYMBOL_GPL(rh_dump); + +void rh_dump_blk(struct _rh_info *info, struct _rh_block *blk) +{ + pr_info("blk @0x%p: 0x%lx-0x%lx (%u)\n", + blk, blk->start, blk->start + blk->size, blk->size); +} +EXPORT_SYMBOL_GPL(rh_dump_blk); + -- cgit v0.10.2