summaryrefslogtreecommitdiff
path: root/fs/btrfs/ordered-data.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-01-08 20:46:30 (GMT)
committerChris Mason <chris.mason@oracle.com>2008-09-25 15:03:59 (GMT)
commitdc17ff8f11d129db9e83ab7244769e4eae05e14d (patch)
tree622e70100d6082e371a6ca62b02fd57e0c37f8dc /fs/btrfs/ordered-data.c
parente4204dedbbaa3a614605cb83cc0ac5161af6b4e6 (diff)
downloadlinux-fsl-qoriq-dc17ff8f11d129db9e83ab7244769e4eae05e14d.tar.xz
Btrfs: Add data=ordered support
This forces file data extents down the disk along with the metadata that references them. The current implementation is fairly simple, and just writes out all of the dirty pages in an inode before the commit. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/ordered-data.c')
-rw-r--r--fs/btrfs/ordered-data.c221
1 files changed, 221 insertions, 0 deletions
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
new file mode 100644
index 0000000..411aba8
--- /dev/null
+++ b/fs/btrfs/ordered-data.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2007 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include "ctree.h"
+#include "transaction.h"
+#include "btrfs_inode.h"
+
+struct tree_entry {
+ u64 root_objectid;
+ u64 objectid;
+ struct rb_node rb_node;
+};
+
+/*
+ * returns > 0 if entry passed (root, objectid) is > entry,
+ * < 0 if (root, objectid) < entry and zero if they are equal
+ */
+static int comp_entry(struct tree_entry *entry, u64 root_objectid,
+ u64 objectid)
+{
+ if (root_objectid < entry->root_objectid)
+ return -1;
+ if (root_objectid > entry->root_objectid)
+ return 1;
+ if (objectid < entry->objectid)
+ return -1;
+ if (objectid > entry->objectid)
+ return 1;
+ return 0;
+}
+
+static struct rb_node *tree_insert(struct rb_root *root, u64 root_objectid,
+ u64 objectid, struct rb_node *node)
+{
+ struct rb_node ** p = &root->rb_node;
+ struct rb_node * parent = NULL;
+ struct tree_entry *entry;
+ int comp;
+
+ while(*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct tree_entry, rb_node);
+
+ comp = comp_entry(entry, root_objectid, objectid);
+ if (comp < 0)
+ p = &(*p)->rb_left;
+ else if (comp > 0)
+ p = &(*p)->rb_right;
+ else
+ return parent;
+ }
+
+ rb_link_node(node, parent, p);
+ rb_insert_color(node, root);
+ return NULL;
+}
+
+static struct rb_node *__tree_search(struct rb_root *root, u64 root_objectid,
+ u64 objectid, struct rb_node **prev_ret)
+{
+ struct rb_node * n = root->rb_node;
+ struct rb_node *prev = NULL;
+ struct tree_entry *entry;
+ struct tree_entry *prev_entry = NULL;
+ int comp;
+
+ while(n) {
+ entry = rb_entry(n, struct tree_entry, rb_node);
+ prev = n;
+ prev_entry = entry;
+ comp = comp_entry(entry, root_objectid, objectid);
+
+ if (comp < 0)
+ n = n->rb_left;
+ else if (comp > 0)
+ n = n->rb_right;
+ else
+ return n;
+ }
+ if (!prev_ret)
+ return NULL;
+
+ while(prev && comp_entry(prev_entry, root_objectid, objectid) >= 0) {
+ prev = rb_next(prev);
+ prev_entry = rb_entry(prev, struct tree_entry, rb_node);
+ }
+ *prev_ret = prev;
+ return NULL;
+}
+
+static inline struct rb_node *tree_search(struct rb_root *root,
+ u64 root_objectid, u64 objectid)
+{
+ struct rb_node *prev;
+ struct rb_node *ret;
+ ret = __tree_search(root, root_objectid, objectid, &prev);
+ if (!ret)
+ return prev;
+ return ret;
+}
+
+int btrfs_add_ordered_inode(struct inode *inode)
+{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ u64 root_objectid = root->root_key.objectid;
+ u64 transid = root->fs_info->running_transaction->transid;
+ struct tree_entry *entry;
+ struct rb_node *node;
+ struct btrfs_ordered_inode_tree *tree;
+
+ if (transid <= BTRFS_I(inode)->ordered_trans)
+ return 0;
+
+ tree = &root->fs_info->running_transaction->ordered_inode_tree;
+
+ read_lock(&tree->lock);
+ node = __tree_search(&tree->tree, root_objectid, inode->i_ino, NULL);
+ read_unlock(&tree->lock);
+ if (node) {
+ return 0;
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_NOFS);
+ if (!entry)
+ return -ENOMEM;
+
+ write_lock(&tree->lock);
+ entry->objectid = inode->i_ino;
+ entry->root_objectid = root_objectid;
+
+ node = tree_insert(&tree->tree, root_objectid,
+ inode->i_ino, &entry->rb_node);
+
+ BTRFS_I(inode)->ordered_trans = transid;
+
+ write_unlock(&tree->lock);
+ if (node)
+ kfree(entry);
+ return 0;
+}
+
+int btrfs_find_first_ordered_inode(struct btrfs_ordered_inode_tree *tree,
+ u64 *root_objectid, u64 *objectid)
+{
+ struct tree_entry *entry;
+ struct rb_node *node;
+
+ write_lock(&tree->lock);
+ node = tree_search(&tree->tree, *root_objectid, *objectid);
+ if (!node) {
+ write_unlock(&tree->lock);
+ return 0;
+ }
+ entry = rb_entry(node, struct tree_entry, rb_node);
+
+ while(comp_entry(entry, *root_objectid, *objectid) >= 0) {
+ node = rb_next(node);
+ if (!node)
+ break;
+ entry = rb_entry(node, struct tree_entry, rb_node);
+ }
+ if (!node) {
+ write_unlock(&tree->lock);
+ return 0;
+ }
+
+ *root_objectid = entry->root_objectid;
+ *objectid = entry->objectid;
+ write_unlock(&tree->lock);
+ return 1;
+}
+
+int btrfs_find_del_first_ordered_inode(struct btrfs_ordered_inode_tree *tree,
+ u64 *root_objectid, u64 *objectid)
+{
+ struct tree_entry *entry;
+ struct rb_node *node;
+
+ write_lock(&tree->lock);
+ node = tree_search(&tree->tree, *root_objectid, *objectid);
+ if (!node) {
+ write_unlock(&tree->lock);
+ return 0;
+ }
+
+ entry = rb_entry(node, struct tree_entry, rb_node);
+ while(comp_entry(entry, *root_objectid, *objectid) >= 0) {
+ node = rb_next(node);
+ if (!node)
+ break;
+ entry = rb_entry(node, struct tree_entry, rb_node);
+ }
+ if (!node) {
+ write_unlock(&tree->lock);
+ return 0;
+ }
+
+ *root_objectid = entry->root_objectid;
+ *objectid = entry->objectid;
+ rb_erase(node, &tree->tree);
+ write_unlock(&tree->lock);
+ kfree(entry);
+ return 1;
+}