summaryrefslogtreecommitdiff
path: root/fs/ubifs/super.c
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2008-11-18 18:20:05 (GMT)
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2008-11-21 16:59:25 (GMT)
commit6c0c42cdfd73fb161417403d8d077cb136e10bbf (patch)
tree343de0cb98df07295bc3e03eee083012ac12bae7 /fs/ubifs/super.c
parent39ce81ce7168aa7226fb9f182c3a2b57060d0905 (diff)
downloadlinux-6c0c42cdfd73fb161417403d8d077cb136e10bbf.tar.xz
UBIFS: do not allocate too much
Bulk-read allocates 128KiB or more using kmalloc. The allocation starts failing often when the memory gets fragmented. UBIFS still works fine in this case, because it falls-back to standard (non-optimized) read method, though. This patch teaches bulk-read to allocate exactly the amount of memory it needs, instead of allocating 128KiB every time. This patch is also a preparation to the further fix where we'll have a pre-allocated bulk-read buffer as well. For example, now the @bu object is prepared in 'ubifs_bulk_read()', so we could path either pre-allocated or allocated information to 'ubifs_do_bulk_read()' later. Or teaching 'ubifs_do_bulk_read()' not to allocate 'bu->buf' if it is already there. Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'fs/ubifs/super.c')
-rw-r--r--fs/ubifs/super.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index ea493e6..1d51156 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -569,16 +569,16 @@ static int init_constants_early(struct ubifs_info *c)
c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ;
/* Buffer size for bulk-reads */
- c->bulk_read_buf_size = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ;
- if (c->bulk_read_buf_size > c->leb_size)
- c->bulk_read_buf_size = c->leb_size;
- if (c->bulk_read_buf_size > UBIFS_KMALLOC_OK) {
+ c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ;
+ if (c->max_bu_buf_len > c->leb_size)
+ c->max_bu_buf_len = c->leb_size;
+ if (c->max_bu_buf_len > UBIFS_KMALLOC_OK) {
/* Check if we can kmalloc that much */
- void *try = kmalloc(c->bulk_read_buf_size,
+ void *try = kmalloc(c->max_bu_buf_len,
GFP_KERNEL | __GFP_NOWARN);
kfree(try);
if (!try)
- c->bulk_read_buf_size = UBIFS_KMALLOC_OK;
+ c->max_bu_buf_len = UBIFS_KMALLOC_OK;
}
return 0;
}