summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/filesystems/ext3.txt7
-rw-r--r--fs/ext3/super.c43
-rw-r--r--fs/isofs/inode.c16
-rw-r--r--fs/jbd/commit.c2
-rw-r--r--fs/jbd/journal.c18
-rw-r--r--fs/reiserfs/bitmap.c22
-rw-r--r--fs/reiserfs/dir.c7
-rw-r--r--fs/reiserfs/fix_node.c26
-rw-r--r--fs/reiserfs/inode.c114
-rw-r--r--fs/reiserfs/ioctl.c7
-rw-r--r--fs/reiserfs/journal.c104
-rw-r--r--fs/reiserfs/lock.c43
-rw-r--r--fs/reiserfs/namei.c24
-rw-r--r--fs/reiserfs/prints.c5
-rw-r--r--fs/reiserfs/reiserfs.h36
-rw-r--r--fs/reiserfs/resize.c10
-rw-r--r--fs/reiserfs/stree.c74
-rw-r--r--fs/reiserfs/super.c75
-rw-r--r--fs/reiserfs/xattr.c46
-rw-r--r--fs/reiserfs/xattr_acl.c16
-rw-r--r--fs/udf/super.c342
-rw-r--r--include/linux/jbd.h17
22 files changed, 609 insertions, 445 deletions
diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt
index 293855e..7ed0d17 100644
--- a/Documentation/filesystems/ext3.txt
+++ b/Documentation/filesystems/ext3.txt
@@ -26,11 +26,12 @@ journal=inum When a journal already exists, this option is ignored.
Otherwise, it specifies the number of the inode which
will represent the ext3 file system's journal file.
+journal_path=path
journal_dev=devnum When the external journal device's major/minor numbers
- have changed, this option allows the user to specify
+ have changed, these options allow the user to specify
the new journal location. The journal device is
- identified through its new major/minor numbers encoded
- in devnum.
+ identified through either its new major/minor numbers
+ encoded in devnum, or via a path to the device.
norecovery Don't load the journal on mounting. Note that this forces
noload mount of inconsistent filesystem, which can lead to
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index c47f147..c50c761 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -27,6 +27,7 @@
#include <linux/seq_file.h>
#include <linux/log2.h>
#include <linux/cleancache.h>
+#include <linux/namei.h>
#include <asm/uaccess.h>
@@ -819,6 +820,7 @@ enum {
Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh,
Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev,
+ Opt_journal_path,
Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
Opt_data_err_abort, Opt_data_err_ignore,
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
@@ -860,6 +862,7 @@ static const match_table_t tokens = {
{Opt_journal_update, "journal=update"},
{Opt_journal_inum, "journal=%u"},
{Opt_journal_dev, "journal_dev=%u"},
+ {Opt_journal_path, "journal_path=%s"},
{Opt_abort, "abort"},
{Opt_data_journal, "data=journal"},
{Opt_data_ordered, "data=ordered"},
@@ -975,6 +978,11 @@ static int parse_options (char *options, struct super_block *sb,
int option;
kuid_t uid;
kgid_t gid;
+ char *journal_path;
+ struct inode *journal_inode;
+ struct path path;
+ int error;
+
#ifdef CONFIG_QUOTA
int qfmt;
#endif
@@ -1129,6 +1137,41 @@ static int parse_options (char *options, struct super_block *sb,
return 0;
*journal_devnum = option;
break;
+ case Opt_journal_path:
+ if (is_remount) {
+ ext3_msg(sb, KERN_ERR, "error: cannot specify "
+ "journal on remount");
+ return 0;
+ }
+
+ journal_path = match_strdup(&args[0]);
+ if (!journal_path) {
+ ext3_msg(sb, KERN_ERR, "error: could not dup "
+ "journal device string");
+ return 0;
+ }
+
+ error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
+ if (error) {
+ ext3_msg(sb, KERN_ERR, "error: could not find "
+ "journal device path: error %d", error);
+ kfree(journal_path);
+ return 0;
+ }
+
+ journal_inode = path.dentry->d_inode;
+ if (!S_ISBLK(journal_inode->i_mode)) {
+ ext3_msg(sb, KERN_ERR, "error: journal path %s "
+ "is not a block device", journal_path);
+ path_put(&path);
+ kfree(journal_path);
+ return 0;
+ }
+
+ *journal_devnum = new_encode_dev(journal_inode->i_rdev);
+ path_put(&path);
+ kfree(journal_path);
+ break;
case Opt_noload:
set_opt (sbi->s_mount_opt, NOLOAD);
break;
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index c348d6d..e5d408a 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -117,8 +117,8 @@ static void destroy_inodecache(void)
static int isofs_remount(struct super_block *sb, int *flags, char *data)
{
- /* we probably want a lot more here */
- *flags |= MS_RDONLY;
+ if (!(*flags & MS_RDONLY))
+ return -EROFS;
return 0;
}
@@ -763,15 +763,6 @@ root_found:
*/
s->s_maxbytes = 0x80000000000LL;
- /*
- * The CDROM is read-only, has no nodes (devices) on it, and since
- * all of the files appear to be owned by root, we really do not want
- * to allow suid. (suid or devices will not show up unless we have
- * Rock Ridge extensions)
- */
-
- s->s_flags |= MS_RDONLY /* | MS_NODEV | MS_NOSUID */;
-
/* Set this for reference. Its not currently used except on write
which we don't have .. */
@@ -1530,6 +1521,9 @@ struct inode *isofs_iget(struct super_block *sb,
static struct dentry *isofs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
+ /* We don't support read-write mounts */
+ if (!(flags & MS_RDONLY))
+ return ERR_PTR(-EACCES);
return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
}
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 11bb11f..bb217dc 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -340,13 +340,13 @@ void journal_commit_transaction(journal_t *journal)
J_ASSERT(journal->j_committing_transaction == NULL);
commit_transaction = journal->j_running_transaction;
- J_ASSERT(commit_transaction->t_state == T_RUNNING);
trace_jbd_start_commit(journal, commit_transaction);
jbd_debug(1, "JBD: starting commit of transaction %d\n",
commit_transaction->t_tid);
spin_lock(&journal->j_state_lock);
+ J_ASSERT(commit_transaction->t_state == T_RUNNING);
commit_transaction->t_state = T_LOCKED;
trace_jbd_commit_locking(journal, commit_transaction);
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 6510d63..2d04f9a 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -90,6 +90,24 @@ static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
static void __journal_abort_soft (journal_t *journal, int errno);
static const char *journal_dev_name(journal_t *journal, char *buffer);
+#ifdef CONFIG_JBD_DEBUG
+void __jbd_debug(int level, const char *file, const char *func,
+ unsigned int line, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if (level > journal_enable_debug)
+ return;
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ printk(KERN_DEBUG "%s: (%s, %u): %pV\n", file, func, line, &vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL(__jbd_debug);
+#endif
+
/*
* Helper function used to manage commit timeouts
*/
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index a98b774..dc9a682 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -423,8 +423,11 @@ static void _reiserfs_free_block(struct reiserfs_transaction_handle *th,
set_sb_free_blocks(rs, sb_free_blocks(rs) + 1);
journal_mark_dirty(th, s, sbh);
- if (for_unformatted)
+ if (for_unformatted) {
+ int depth = reiserfs_write_unlock_nested(s);
dquot_free_block_nodirty(inode, 1);
+ reiserfs_write_lock_nested(s, depth);
+ }
}
void reiserfs_free_block(struct reiserfs_transaction_handle *th,
@@ -1128,6 +1131,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
b_blocknr_t finish = SB_BLOCK_COUNT(s) - 1;
int passno = 0;
int nr_allocated = 0;
+ int depth;
determine_prealloc_size(hint);
if (!hint->formatted_node) {
@@ -1137,10 +1141,13 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
"reiserquota: allocating %d blocks id=%u",
amount_needed, hint->inode->i_uid);
#endif
+ depth = reiserfs_write_unlock_nested(s);
quota_ret =
dquot_alloc_block_nodirty(hint->inode, amount_needed);
- if (quota_ret) /* Quota exceeded? */
+ if (quota_ret) { /* Quota exceeded? */
+ reiserfs_write_lock_nested(s, depth);
return QUOTA_EXCEEDED;
+ }
if (hint->preallocate && hint->prealloc_size) {
#ifdef REISERQUOTA_DEBUG
reiserfs_debug(s, REISERFS_DEBUG_CODE,
@@ -1153,6 +1160,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
hint->preallocate = hint->prealloc_size = 0;
}
/* for unformatted nodes, force large allocations */
+ reiserfs_write_lock_nested(s, depth);
}
do {
@@ -1181,9 +1189,11 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
hint->inode->i_uid);
#endif
/* Free not allocated blocks */
+ depth = reiserfs_write_unlock_nested(s);
dquot_free_block_nodirty(hint->inode,
amount_needed + hint->prealloc_size -
nr_allocated);
+ reiserfs_write_lock_nested(s, depth);
}
while (nr_allocated--)
reiserfs_free_block(hint->th, hint->inode,
@@ -1214,10 +1224,13 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
REISERFS_I(hint->inode)->i_prealloc_count,
hint->inode->i_uid);
#endif
+
+ depth = reiserfs_write_unlock_nested(s);
dquot_free_block_nodirty(hint->inode, amount_needed +
hint->prealloc_size - nr_allocated -
REISERFS_I(hint->inode)->
i_prealloc_count);
+ reiserfs_write_lock_nested(s, depth);
}
return CARRY_ON;
@@ -1340,10 +1353,11 @@ struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb,
"reading failed", __func__, block);
else {
if (buffer_locked(bh)) {
+ int depth;
PROC_INFO_INC(sb, scan_bitmap.wait);
- reiserfs_write_unlock(sb);
+ depth = reiserfs_write_unlock_nested(sb);
__wait_on_buffer(bh);
- reiserfs_write_lock(sb);
+ reiserfs_write_lock_nested(sb, depth);
}
BUG_ON(!buffer_uptodate(bh));
BUG_ON(atomic_read(&bh->b_count) == 0);
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
index 03e4ca5..1fd2051 100644
--- a/fs/reiserfs/dir.c
+++ b/fs/reiserfs/dir.c
@@ -71,6 +71,7 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
char small_buf[32]; /* avoid kmalloc if we can */
struct reiserfs_dir_entry de;
int ret = 0;
+ int depth;
reiserfs_write_lock(inode->i_sb);
@@ -181,17 +182,17 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
* Since filldir might sleep, we can release
* the write lock here for other waiters
*/
- reiserfs_write_unlock(inode->i_sb);
+ depth = reiserfs_write_unlock_nested(inode->i_sb);
if (!dir_emit
(ctx, local_buf, d_reclen, d_ino,
DT_UNKNOWN)) {
- reiserfs_write_lock(inode->i_sb);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
if (local_buf != small_buf) {
kfree(local_buf);
}
goto end;
}
- reiserfs_write_lock(inode->i_sb);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
if (local_buf != small_buf) {
kfree(local_buf);
}
diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c
index 430e065..dc4d415 100644
--- a/fs/reiserfs/fix_node.c
+++ b/fs/reiserfs/fix_node.c
@@ -1022,9 +1022,9 @@ static int get_far_parent(struct tree_balance *tb,
if (buffer_locked(*pcom_father)) {
/* Release the write lock while the buffer is busy */
- reiserfs_write_unlock(tb->tb_sb);
+ int depth = reiserfs_write_unlock_nested(tb->tb_sb);
__wait_on_buffer(*pcom_father);
- reiserfs_write_lock(tb->tb_sb);
+ reiserfs_write_lock_nested(tb->tb_sb, depth);
if (FILESYSTEM_CHANGED_TB(tb)) {
brelse(*pcom_father);
return REPEAT_SEARCH;
@@ -1929,9 +1929,9 @@ static int get_direct_parent(struct tree_balance *tb, int h)
return REPEAT_SEARCH;
if (buffer_locked(bh)) {
- reiserfs_write_unlock(tb->tb_sb);
+ int depth = reiserfs_write_unlock_nested(tb->tb_sb);
__wait_on_buffer(bh);
- reiserfs_write_lock(tb->tb_sb);
+ reiserfs_write_lock_nested(tb->tb_sb, depth);
if (FILESYSTEM_CHANGED_TB(tb))
return REPEAT_SEARCH;
}
@@ -1952,6 +1952,7 @@ static int get_neighbors(struct tree_balance *tb, int h)
unsigned long son_number;
struct super_block *sb = tb->tb_sb;
struct buffer_head *bh;
+ int depth;
PROC_INFO_INC(sb, get_neighbors[h]);
@@ -1969,9 +1970,9 @@ static int get_neighbors(struct tree_balance *tb, int h)
tb->FL[h]) ? tb->lkey[h] : B_NR_ITEMS(tb->
FL[h]);
son_number = B_N_CHILD_NUM(tb->FL[h], child_position);
- reiserfs_write_unlock(sb);
+ depth = reiserfs_write_unlock_nested(tb->tb_sb);
bh = sb_bread(sb, son_number);
- reiserfs_write_lock(sb);
+ reiserfs_write_lock_nested(tb->tb_sb, depth);
if (!bh)
return IO_ERROR;
if (FILESYSTEM_CHANGED_TB(tb)) {
@@ -2009,9 +2010,9 @@ static int get_neighbors(struct tree_balance *tb, int h)
child_position =
(bh == tb->FR[h]) ? tb->rkey[h] + 1 : 0;
son_number = B_N_CHILD_NUM(tb->FR[h], child_position);
- reiserfs_write_unlock(sb);
+ depth = reiserfs_write_unlock_nested(tb->tb_sb);
bh = sb_bread(sb, son_number);
- reiserfs_write_lock(sb);
+ reiserfs_write_lock_nested(tb->tb_sb, depth);
if (!bh)
return IO_ERROR;
if (FILESYSTEM_CHANGED_TB(tb)) {
@@ -2272,6 +2273,7 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *tb)
}
if (locked) {
+ int depth;
#ifdef CONFIG_REISERFS_CHECK
repeat_counter++;
if ((repeat_counter % 10000) == 0) {
@@ -2286,9 +2288,9 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *tb)
REPEAT_SEARCH : CARRY_ON;
}
#endif
- reiserfs_write_unlock(tb->tb_sb);
+ depth = reiserfs_write_unlock_nested(tb->tb_sb);
__wait_on_buffer(locked);
- reiserfs_write_lock(tb->tb_sb);
+ reiserfs_write_lock_nested(tb->tb_sb, depth);
if (FILESYSTEM_CHANGED_TB(tb))
return REPEAT_SEARCH;
}
@@ -2359,9 +2361,9 @@ int fix_nodes(int op_mode, struct tree_balance *tb,
/* if it possible in indirect_to_direct conversion */
if (buffer_locked(tbS0)) {
- reiserfs_write_unlock(tb->tb_sb);
+ int depth = reiserfs_write_unlock_nested(tb->tb_sb);
__wait_on_buffer(tbS0);
- reiserfs_write_lock(tb->tb_sb);
+ reiserfs_write_lock_nested(tb->tb_sb, depth);
if (FILESYSTEM_CHANGED_TB(tb))
return REPEAT_SEARCH;
}
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 0048cc1..ad62bdb 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -30,7 +30,6 @@ void reiserfs_evict_inode(struct inode *inode)
JOURNAL_PER_BALANCE_CNT * 2 +
2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb);
struct reiserfs_transaction_handle th;
- int depth;
int err;
if (!inode->i_nlink && !is_bad_inode(inode))
@@ -40,12 +39,13 @@ void reiserfs_evict_inode(struct inode *inode)
if (inode->i_nlink)
goto no_delete;
- depth = reiserfs_write_lock_once(inode->i_sb);
-
/* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */
if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) { /* also handles bad_inode case */
+
reiserfs_delete_xattrs(inode);
+ reiserfs_write_lock(inode->i_sb);
+
if (journal_begin(&th, inode->i_sb, jbegin_count))
goto out;
reiserfs_update_inode_transaction(inode);
@@ -57,8 +57,11 @@ void reiserfs_evict_inode(struct inode *inode)
/* Do quota update inside a transaction for journaled quotas. We must do that
* after delete_object so that quota updates go into the same transaction as
* stat data deletion */
- if (!err)
+ if (!err) {
+ int depth = reiserfs_write_unlock_nested(inode->i_sb);
dquot_free_inode(inode);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
+ }
if (journal_end(&th, inode->i_sb, jbegin_count))
goto out;
@@ -72,12 +75,12 @@ void reiserfs_evict_inode(struct inode *inode)
/* all items of file are deleted, so we can remove "save" link */
remove_save_link(inode, 0 /* not truncate */ ); /* we can't do anything
* about an error here */
+out:
+ reiserfs_write_unlock(inode->i_sb);
} else {
/* no object items are in the tree */
;
}
- out:
- reiserfs_write_unlock_once(inode->i_sb, depth);
clear_inode(inode); /* note this must go after the journal_end to prevent deadlock */
dquot_drop(inode);
inode->i_blocks = 0;
@@ -610,7 +613,6 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
__le32 *item;
int done;
int fs_gen;
- int lock_depth;
struct reiserfs_transaction_handle *th = NULL;
/* space reserved in transaction batch:
. 3 balancings in direct->indirect conversion
@@ -626,11 +628,11 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
loff_t new_offset =
(((loff_t) block) << inode->i_sb->s_blocksize_bits) + 1;
- lock_depth = reiserfs_write_lock_once(inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
version = get_inode_item_key_version(inode);
if (!file_capable(inode, block)) {
- reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ reiserfs_write_unlock(inode->i_sb);
return -EFBIG;
}
@@ -642,7 +644,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
/* find number of block-th logical block of the file */
ret = _get_block_create_0(inode, block, bh_result,
create | GET_BLOCK_READ_DIRECT);
- reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ reiserfs_write_unlock(inode->i_sb);
return ret;
}
/*
@@ -760,7 +762,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
if (!dangle && th)
retval = reiserfs_end_persistent_transaction(th);
- reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ reiserfs_write_unlock(inode->i_sb);
/* the item was found, so new blocks were not added to the file
** there is no need to make sure the inode is updated with this
@@ -1011,11 +1013,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
* long time. reschedule if needed and also release the write
* lock for others.
*/
- if (need_resched()) {
- reiserfs_write_unlock_once(inode->i_sb, lock_depth);
- schedule();
- lock_depth = reiserfs_write_lock_once(inode->i_sb);
- }
+ reiserfs_cond_resched(inode->i_sb);
retval = search_for_position_by_key(inode->i_sb, &key, &path);
if (retval == IO_ERROR) {
@@ -1050,7 +1048,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
retval = err;
}
- reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ reiserfs_write_unlock(inode->i_sb);
reiserfs_check_path(&path);
return retval;
}
@@ -1509,14 +1507,15 @@ struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key)
{
struct inode *inode;
struct reiserfs_iget_args args;
+ int depth;
args.objectid = key->on_disk_key.k_objectid;
args.dirid = key->on_disk_key.k_dir_id;
- reiserfs_write_unlock(s);
+ depth = reiserfs_write_unlock_nested(s);
inode = iget5_locked(s, key->on_disk_key.k_objectid,
reiserfs_find_actor, reiserfs_init_locked_inode,
(void *)(&args));
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(s, depth);
if (!inode)
return ERR_PTR(-ENOMEM);
@@ -1772,7 +1771,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
struct inode *inode,
struct reiserfs_security_handle *security)
{
- struct super_block *sb;
+ struct super_block *sb = dir->i_sb;
struct reiserfs_iget_args args;
INITIALIZE_PATH(path_to_key);
struct cpu_key key;
@@ -1780,12 +1779,13 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
struct stat_data sd;
int retval;
int err;
+ int depth;
BUG_ON(!th->t_trans_id);
- reiserfs_write_unlock(inode->i_sb);
+ depth = reiserfs_write_unlock_nested(sb);
err = dquot_alloc_inode(inode);
- reiserfs_write_lock(inode->i_sb);
+ reiserfs_write_lock_nested(sb, depth);
if (err)
goto out_end_trans;
if (!dir->i_nlink) {
@@ -1793,8 +1793,6 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
goto out_bad_inode;
}
- sb = dir->i_sb;
-
/* item head of new item */
ih.ih_key.k_dir_id = reiserfs_choose_packing(dir);
ih.ih_key.k_objectid = cpu_to_le32(reiserfs_get_unused_objectid(th));
@@ -1812,10 +1810,10 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
memcpy(INODE_PKEY(inode), &(ih.ih_key), KEY_SIZE);
args.dirid = le32_to_cpu(ih.ih_key.k_dir_id);
- reiserfs_write_unlock(inode->i_sb);
+ depth = reiserfs_write_unlock_nested(inode->i_sb);
err = insert_inode_locked4(inode, args.objectid,
reiserfs_find_actor, &args);
- reiserfs_write_lock(inode->i_sb);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
if (err) {
err = -EINVAL;
goto out_bad_inode;
@@ -1941,7 +1939,9 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
}
if (reiserfs_posixacl(inode->i_sb)) {
+ reiserfs_write_unlock(inode->i_sb);
retval = reiserfs_inherit_default_acl(th, dir, dentry, inode);
+ reiserfs_write_lock(inode->i_sb);
if (retval) {
err = retval;
reiserfs_check_path(&path_to_key);
@@ -1956,7 +1956,9 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
inode->i_flags |= S_PRIVATE;
if (security->name) {
+ reiserfs_write_unlock(inode->i_sb);
retval = reiserfs_security_write(th, inode, security);
+ reiserfs_write_lock(inode->i_sb);
if (retval) {
err = retval;
reiserfs_check_path(&path_to_key);
@@ -1982,14 +1984,16 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
INODE_PKEY(inode)->k_objectid = 0;
/* Quota change must be inside a transaction for journaling */
+ depth = reiserfs_write_unlock_nested(inode->i_sb);
dquot_free_inode(inode);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
out_end_trans:
journal_end(th, th->t_super, th->t_blocks_allocated);
- reiserfs_write_unlock(inode->i_sb);
/* Drop can be outside and it needs more credits so it's better to have it outside */
+ depth = reiserfs_write_unlock_nested(inode->i_sb);
dquot_drop(inode);
- reiserfs_write_lock(inode->i_sb);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
inode->i_flags |= S_NOQUOTA;
make_bad_inode(inode);
@@ -2103,9 +2107,8 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
int error;
struct buffer_head *bh = NULL;
int err2;
- int lock_depth;
- lock_depth = reiserfs_write_lock_once(inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
if (inode->i_size > 0) {
error = grab_tail_page(inode, &page, &bh);
@@ -2174,7 +2177,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
page_cache_release(page);
}
- reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ reiserfs_write_unlock(inode->i_sb);
return 0;
out:
@@ -2183,7 +2186,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
page_cache_release(page);
}
- reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ reiserfs_write_unlock(inode->i_sb);
return error;
}
@@ -2648,10 +2651,11 @@ int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len)
struct inode *inode = page->mapping->host;
int ret;
int old_ref = 0;
+ int depth;
- reiserfs_write_unlock(inode->i_sb);
+ depth = reiserfs_write_unlock_nested(inode->i_sb);
reiserfs_wait_on_write_block(inode->i_sb);
- reiserfs_write_lock(inode->i_sb);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
fix_tail_page_for_writing(page);
if (reiserfs_transaction_running(inode->i_sb)) {
@@ -2708,7 +2712,6 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
int update_sd = 0;
struct reiserfs_transaction_handle *th;
unsigned start;
- int lock_depth = 0;
bool locked = false;
if ((unsigned long)fsdata & AOP_FLAG_CONT_EXPAND)
@@ -2737,7 +2740,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
*/
if (pos + copied > inode->i_size) {
struct reiserfs_transaction_handle myth;
- lock_depth = reiserfs_write_lock_once(inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
locked = true;
/* If the file have grown beyond the border where it
can have a tail, unmark it as needing a tail
@@ -2768,7 +2771,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
}
if (th) {
if (!locked) {
- lock_depth = reiserfs_write_lock_once(inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
locked = true;
}
if (!update_sd)
@@ -2780,7 +2783,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
out:
if (locked)
- reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ reiserfs_write_unlock(inode->i_sb);
unlock_page(page);
page_cache_release(page);
@@ -2790,7 +2793,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
return ret == 0 ? copied : ret;
journal_error:
- reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ reiserfs_write_unlock(inode->i_sb);
locked = false;
if (th) {
if (!update_sd)
@@ -2808,10 +2811,11 @@ int reiserfs_commit_write(struct file *f, struct page *page,
int ret = 0;
int update_sd = 0;
struct reiserfs_transaction_handle *th = NULL;
+ int depth;
- reiserfs_write_unlock(inode->i_sb);
+ depth = reiserfs_write_unlock_nested(inode->i_sb);
reiserfs_wait_on_write_block(inode->i_sb);
- reiserfs_write_lock(inode->i_sb);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
if (reiserfs_transaction_running(inode->i_sb)) {
th = current->journal_info;
@@ -3110,7 +3114,6 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
unsigned int ia_valid;
- int depth;
int error;
error = inode_change_ok(inode, attr);
@@ -3122,13 +3125,14 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
if (is_quota_modification(inode, attr))
dquot_initialize(inode);
- depth = reiserfs_write_lock_once(inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
if (attr->ia_valid & ATTR_SIZE) {
/* version 2 items will be caught by the s_maxbytes check
** done for us in vmtruncate
*/
if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5 &&
attr->ia_size > MAX_NON_LFS) {
+ reiserfs_write_unlock(inode->i_sb);
error = -EFBIG;
goto out;
}
@@ -3150,8 +3154,10 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
if (err)
error = err;
}
- if (error)
+ if (error) {
+ reiserfs_write_unlock(inode->i_sb);
goto out;
+ }
/*
* file size is changed, ctime and mtime are
* to be updated
@@ -3159,6 +3165,7 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
attr->ia_valid |= (ATTR_MTIME | ATTR_CTIME);
}
}
+ reiserfs_write_unlock(inode->i_sb);
if ((((attr->ia_valid & ATTR_UID) && (from_kuid(&init_user_ns, attr->ia_uid) & ~0xffff)) ||
((attr->ia_valid & ATTR_GID) && (from_kgid(&init_user_ns, attr->ia_gid) & ~0xffff))) &&
@@ -3183,14 +3190,16 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
return error;
/* (user+group)*(old+new) structure - we count quota info and , inode write (sb, inode) */
+ reiserfs_write_lock(inode->i_sb);
error = journal_begin(&th, inode->i_sb, jbegin_count);
+ reiserfs_write_unlock(inode->i_sb);
if (error)
goto out;
- reiserfs_write_unlock_once(inode->i_sb, depth);
error = dquot_transfer(inode, attr);
- depth = reiserfs_write_lock_once(inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
if (error) {
journal_end(&th, inode->i_sb, jbegin_count);
+ reiserfs_write_unlock(inode->i_sb);
goto out;
}
@@ -3202,17 +3211,11 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
inode->i_gid = attr->ia_gid;
mark_inode_dirty(inode);
error = journal_end(&th, inode->i_sb, jbegin_count);
+ reiserfs_write_unlock(inode->i_sb);
if (error)
goto out;
}
- /*
- * Relax the lock here, as it might truncate the
- * inode pages and wait for inode pages locks.
- * To release such page lock, the owner needs the
- * reiserfs lock
- */
- reiserfs_write_unlock_once(inode->i_sb, depth);
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size != i_size_read(inode)) {
error = inode_newsize_ok(inode, attr->ia_size);
@@ -3226,16 +3229,13 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
setattr_copy(inode, attr);
mark_inode_dirty(inode);
}
- depth = reiserfs_write_lock_once(inode->i_sb);
if (!error && reiserfs_posixacl(inode->i_sb)) {
if (attr->ia_valid & ATTR_MODE)
error = reiserfs_acl_chmod(inode);
}
- out:
- reiserfs_write_unlock_once(inode->i_sb, depth);
-
+out:
return error;
}
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 15cb5fe..946ccbf 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -167,7 +167,6 @@ int reiserfs_commit_write(struct file *f, struct page *page,
int reiserfs_unpack(struct inode *inode, struct file *filp)
{
int retval = 0;
- int depth;
int index;
struct page *page;
struct address_space *mapping;
@@ -183,11 +182,11 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
return 0;
}
- depth = reiserfs_write_lock_once(inode->i_sb);
-
/* we need to make sure nobody is changing the file size beneath us */
reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
+
write_from = inode->i_size & (blocksize - 1);
/* if we are on a block boundary, we are already unpacked. */
if (write_from == 0) {
@@ -221,6 +220,6 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
out:
mutex_unlock(&inode->i_mutex);
- reiserfs_write_unlock_once(inode->i_sb, depth);
+ reiserfs_write_unlock(inode->i_sb);
return retval;
}
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 742fdd4..73feacc4 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -947,9 +947,11 @@ static int reiserfs_async_progress_wait(struct super_block *s)
struct reiserfs_journal *j = SB_JOURNAL(s);
if (atomic_read(&j->j_async_throttle)) {
- reiserfs_write_unlock(s);
+ int depth;
+
+ depth = reiserfs_write_unlock_nested(s);
congestion_wait(BLK_RW_ASYNC, HZ / 10);
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(s, depth);
}
return 0;
@@ -972,6 +974,7 @@ static int flush_commit_list(struct super_block *s,
struct reiserfs_journal *journal = SB_JOURNAL(s);
int retval = 0;
int write_len;
+ int depth;
reiserfs_check_lock_depth(s, "flush_commit_list");
@@ -1018,12 +1021,12 @@ static int flush_commit_list(struct super_block *s,
* We might sleep in numerous places inside
* write_ordered_buffers. Relax the write lock.
*/
- reiserfs_write_unlock(s);
+ depth = reiserfs_write_unlock_nested(s);
ret = write_ordered_buffers(&journal->j_dirty_buffers_lock,
journal, jl, &jl->j_bh_list);
if (ret < 0 && retval == 0)
retval = ret;
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(s, depth);
}
BUG_ON(!list_empty(&jl->j_bh_list));
/*
@@ -1043,9 +1046,9 @@ static int flush_commit_list(struct super_block *s,
tbh = journal_find_get_block(s, bn);
if (tbh) {
if (buffer_dirty(tbh)) {
- reiserfs_write_unlock(s);
+ depth = reiserfs_write_unlock_nested(s);
ll_rw_block(WRITE, 1, &tbh);
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(s, depth);
}
put_bh(tbh) ;
}
@@ -1057,17 +1060,17 @@ static int flush_commit_list(struct super_block *s,
(jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s);
tbh = journal_find_get_block(s, bn);
- reiserfs_write_unlock(s);
- wait_on_buffer(tbh);
- reiserfs_write_lock(s);
+ depth = reiserfs_write_unlock_nested(s);
+ __wait_on_buffer(tbh);
+ reiserfs_write_lock_nested(s, depth);
// since we're using ll_rw_blk above, it might have skipped over
// a locked buffer. Double check here
//
/* redundant, sync_dirty_buffer() checks */
if (buffer_dirty(tbh)) {
- reiserfs_write_unlock(s);
+ depth = reiserfs_write_unlock_nested(s);
sync_dirty_buffer(tbh);
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(s, depth);
}
if (unlikely(!buffer_uptodate(tbh))) {
#ifdef CONFIG_REISERFS_CHECK
@@ -1091,12 +1094,12 @@ static int flush_commit_list(struct super_block *s,
if (buffer_dirty(jl->j_commit_bh))
BUG();
mark_buffer_dirty(jl->j_commit_bh) ;
- reiserfs_write_unlock(s);
+ depth = reiserfs_write_unlock_nested(s);
if (reiserfs_barrier_flush(s))
__sync_dirty_buffer(jl->j_commit_bh, WRITE_FLUSH_FUA);
else
sync_dirty_buffer(jl->j_commit_bh);
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(s, depth);
}
/* If there was a write error in the journal - we can't commit this
@@ -1228,15 +1231,16 @@ static int _update_journal_header_block(struct super_block *sb,
{
struct reiserfs_journal_header *jh;
struct reiserfs_journal *journal = SB_JOURNAL(sb);
+ int depth;
if (reiserfs_is_journal_aborted(journal))
return -EIO;
if (trans_id >= journal->j_last_flush_trans_id) {
if (buffer_locked((journal->j_header_bh))) {
- reiserfs_write_unlock(sb);
- wait_on_buffer((journal->j_header_bh));
- reiserfs_write_lock(sb);
+ depth = reiserfs_write_unlock_nested(sb);
+ __wait_on_buffer(journal->j_header_bh);
+ reiserfs_write_lock_nested(sb, depth);
if (unlikely(!buffer_uptodate(journal->j_header_bh))) {
#ifdef CONFIG_REISERFS_CHECK
reiserfs_warning(sb, "journal-699",
@@ -1254,14 +1258,14 @@ static int _update_journal_header_block(struct super_block *sb,
jh->j_mount_id = cpu_to_le32(journal->j_mount_id);
set_buffer_dirty(journal->j_header_bh);
- reiserfs_write_unlock(sb);
+ depth = reiserfs_write_unlock_nested(sb);
if (reiserfs_barrier_flush(sb))
__sync_dirty_buffer(journal->j_header_bh, WRITE_FLUSH_FUA);
else
sync_dirty_buffer(journal->j_header_bh);
- reiserfs_write_lock(sb);
+ reiserfs_write_lock_nested(sb, depth);
if (!buffer_uptodate(journal->j_header_bh)) {
reiserfs_warning(sb, "journal-837",
"IO error during journal replay");
@@ -1341,6 +1345,7 @@ static int flush_journal_list(struct super_block *s,
unsigned long j_len_saved = jl->j_len;
struct reiserfs_journal *journal = SB_JOURNAL(s);
int err = 0;
+ int depth;
BUG_ON(j_len_saved <= 0);
@@ -1495,9 +1500,9 @@ static int flush_journal_list(struct super_block *s,
"cn->bh is NULL");
}
- reiserfs_write_unlock(s);
- wait_on_buffer(cn->bh);
- reiserfs_write_lock(s);
+ depth = reiserfs_write_unlock_nested(s);
+ __wait_on_buffer(cn->bh);
+ reiserfs_write_lock_nested(s, depth);
if (!cn->bh) {
reiserfs_panic(s, "journal-1012",
@@ -1974,6 +1979,7 @@ static int journal_compare_desc_commit(struct super_block *sb,
/* returns 0 if it did not find a description block
** returns -1 if it found a corrupt commit block
** returns 1 if both desc and commit were valid
+** NOTE: only called during fs mount
*/
static int journal_transaction_is_valid(struct super_block *sb,
struct buffer_head *d_bh,
@@ -2073,8 +2079,9 @@ static void brelse_array(struct buffer_head **heads, int num)
/*
** given the start, and values for the oldest acceptable transactions,
-** this either reads in a replays a transaction, or returns because the transaction
-** is invalid, or too old.
+** this either reads in a replays a transaction, or returns because the
+** transaction is invalid, or too old.
+** NOTE: only called during fs mount
*/
static int journal_read_transaction(struct super_block *sb,
unsigned long cur_dblock,
@@ -2208,10 +2215,7 @@ static int journal_read_transaction(struct super_block *sb,
ll_rw_block(READ, get_desc_trans_len(desc), log_blocks);
for (i = 0; i < get_desc_trans_len(desc); i++) {
- reiserfs_write_unlock(sb);
wait_on_buffer(log_blocks[i]);
- reiserfs_write_lock(sb);
-
if (!buffer_uptodate(log_blocks[i])) {
reiserfs_warning(sb, "journal-1212",
"REPLAY FAILURE fsck required! "
@@ -2318,12 +2322,13 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
/*
** read and replay the log
-** on a clean unmount, the journal header's next unflushed pointer will be to an invalid
-** transaction. This tests that before finding all the transactions in the log, which makes normal mount times fast.
-**
-** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid.
-**
+** on a clean unmount, the journal header's next unflushed pointer will
+** be to an invalid transaction. This tests that before finding all the
+** transactions in the log, which makes normal mount times fast.
+** After a crash, this starts with the next unflushed transaction, and
+** replays until it finds one too old, or invalid.
** On exit, it sets things up so the first transaction will work correctly.
+** NOTE: only called during fs mount
*/
static int journal_read(struct super_block *sb)
{
@@ -2501,14 +2506,18 @@ static int journal_read(struct super_block *sb)
"replayed %d transactions in %lu seconds\n",
replay_count, get_seconds() - start);
}
+ /* needed to satisfy the locking in _update_journal_header_block */
+ reiserfs_write_lock(sb);
if (!bdev_read_only(sb->s_bdev) &&
_update_journal_header_block(sb, journal->j_start,
journal->j_last_flush_trans_id)) {
+ reiserfs_write_unlock(sb);
/* replay failed, caller must call free_journal_ram and abort
** the mount
*/
return -1;
}
+ reiserfs_write_unlock(sb);
return 0;
}
@@ -2828,13 +2837,7 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
goto free_and_return;
}
- /*
- * Journal_read needs to be inspected in order to push down
- * the lock further inside (or even remove it).
- */
- reiserfs_write_lock(sb);
ret = journal_read(sb);
- reiserfs_write_unlock(sb);
if (ret < 0) {
reiserfs_warning(sb, "reiserfs-2006",
"Replay Failure, unable to mount");
@@ -2923,9 +2926,9 @@ static void queue_log_writer(struct super_block *s)
add_wait_queue(&journal->j_join_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
if (test_bit(J_WRITERS_QUEUED, &journal->j_state)) {
- reiserfs_write_unlock(s);
+ int depth = reiserfs_write_unlock_nested(s);
schedule();
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(s, depth);
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(&journal->j_join_wait, &wait);
@@ -2943,9 +2946,12 @@ static void let_transaction_grow(struct super_block *sb, unsigned int trans_id)
struct reiserfs_journal *journal = SB_JOURNAL(sb);
unsigned long bcount = journal->j_bcount;
while (1) {
- reiserfs_write_unlock(sb);
+ int depth;
+
+ depth = reiserfs_write_unlock_nested(sb);
schedule_timeout_uninterruptible(1);
- reiserfs_write_lock(sb);
+ reiserfs_write_lock_nested(sb, depth);
+
journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
while ((atomic_read(&journal->j_wcount) > 0 ||
atomic_read(&journal->j_jlock)) &&
@@ -2976,6 +2982,7 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
struct reiserfs_transaction_handle myth;
int sched_count = 0;
int retval;
+ int depth;
reiserfs_check_lock_depth(sb, "journal_begin");
BUG_ON(nblocks > journal->j_trans_max);
@@ -2996,9 +3003,9 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
unlock_journal(sb);
- reiserfs_write_unlock(sb);
+ depth = reiserfs_write_unlock_nested(sb);
reiserfs_wait_on_write_block(sb);
- reiserfs_write_lock(sb);
+ reiserfs_write_lock_nested(sb, depth);
PROC_INFO_INC(sb, journal.journal_relock_writers);
goto relock;
}
@@ -3821,6 +3828,7 @@ void reiserfs_restore_prepared_buffer(struct super_block *sb,
if (test_clear_buffer_journal_restore_dirty(bh) &&
buffer_journal_dirty(bh)) {
struct reiserfs_journal_cnode *cn;
+ reiserfs_write_lock(sb);
cn = get_journal_hash_dev(sb,
journal->j_list_hash_table,
bh->b_blocknr);
@@ -3828,6 +3836,7 @@ void reiserfs_restore_prepared_buffer(struct super_block *sb,
set_buffer_journal_test(bh);
mark_buffer_dirty(bh);
}
+ reiserfs_write_unlock(sb);
}
clear_buffer_journal_prepared(bh);
}
@@ -3911,6 +3920,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
unsigned long jindex;
unsigned int commit_trans_id;
int trans_half;
+ int depth;
BUG_ON(th->t_refcount > 1);
BUG_ON(!th->t_trans_id);
@@ -4116,9 +4126,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
next = cn->next;
free_cnode(sb, cn);
cn = next;
- reiserfs_write_unlock(sb);
- cond_resched();
- reiserfs_write_lock(sb);
+ reiserfs_cond_resched(sb);
}
/* we are done with both the c_bh and d_bh, but
@@ -4165,10 +4173,10 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
* is lost.
*/
if (!list_empty(&jl->j_tail_bh_list)) {
- reiserfs_write_unlock(sb);
+ depth = reiserfs_write_unlock_nested(sb);
write_ordered_buffers(&journal->j_dirty_buffers_lock,
journal, jl, &jl->j_tail_bh_list);
- reiserfs_write_lock(sb);
+ reiserfs_write_lock_nested(sb, depth);
}
BUG_ON(!list_empty(&jl->j_tail_bh_list));
mutex_unlock(&jl->j_commit_mutex);
diff --git a/fs/reiserfs/lock.c b/fs/reiserfs/lock.c
index d735bc8..045b83e 100644
--- a/fs/reiserfs/lock.c
+++ b/fs/reiserfs/lock.c
@@ -48,30 +48,35 @@ void reiserfs_write_unlock(struct super_block *s)
}
}
-/*
- * If we already own the lock, just exit and don't increase the depth.
- * Useful when we don't want to lock more than once.
- *
- * We always return the lock_depth we had before calling
- * this function.
- */
-int reiserfs_write_lock_once(struct super_block *s)
+int __must_check reiserfs_write_unlock_nested(struct super_block *s)
{
struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
+ int depth;
- if (sb_i->lock_owner != current) {
- mutex_lock(&sb_i->lock);
- sb_i->lock_owner = current;
- return sb_i->lock_depth++;
- }
+ /* this can happen when the lock isn't always held */
+ if (sb_i->lock_owner != current)
+ return -1;
+
+ depth = sb_i->lock_depth;
+
+ sb_i->lock_depth = -1;
+ sb_i->lock_owner = NULL;
+ mutex_unlock(&sb_i->lock);
- return sb_i->lock_depth;
+ return depth;
}
-void reiserfs_write_unlock_once(struct super_block *s, int lock_depth)
+void reiserfs_write_lock_nested(struct super_block *s, int depth)
{
- if (lock_depth == -1)
- reiserfs_write_unlock(s);
+ struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
+
+ /* this can happen when the lock isn't always held */
+ if (depth == -1)
+ return;
+
+ mutex_lock(&sb_i->lock);
+ sb_i->lock_owner = current;
+ sb_i->lock_depth = depth;
}
/*
@@ -82,9 +87,7 @@ void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
{
struct reiserfs_sb_info *sb_i = REISERFS_SB(sb);
- if (sb_i->lock_depth < 0)
- reiserfs_panic(sb, "%s called without kernel lock held %d",
- caller);
+ WARN_ON(sb_i->lock_depth < 0);
}
#ifdef CONFIG_REISERFS_CHECK
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 8567fb8..dc5236f 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -325,7 +325,6 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
int retval;
- int lock_depth;
struct inode *inode = NULL;
struct reiserfs_dir_entry de;
INITIALIZE_PATH(path_to_entry);
@@ -333,12 +332,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
if (REISERFS_MAX_NAME(dir->i_sb->s_blocksize) < dentry->d_name.len)
return ERR_PTR(-ENAMETOOLONG);
- /*
- * Might be called with or without the write lock, must be careful
- * to not recursively hold it in case we want to release the lock
- * before rescheduling.
- */
- lock_depth = reiserfs_write_lock_once(dir->i_sb);
+ reiserfs_write_lock(dir->i_sb);
de.de_gen_number_bit_string = NULL;
retval =
@@ -349,7 +343,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
inode = reiserfs_iget(dir->i_sb,
(struct cpu_key *)&(de.de_dir_id));
if (!inode || IS_ERR(inode)) {
- reiserfs_write_unlock_once(dir->i_sb, lock_depth);
+ reiserfs_write_unlock(dir->i_sb);
return ERR_PTR(-EACCES);
}
@@ -358,7 +352,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
if (IS_PRIVATE(dir))
inode->i_flags |= S_PRIVATE;
}
- reiserfs_write_unlock_once(dir->i_sb, lock_depth);
+ reiserfs_write_unlock(dir->i_sb);
if (retval == IO_ERROR) {
return ERR_PTR(-EIO);
}
@@ -727,7 +721,6 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
struct inode *inode;
struct reiserfs_transaction_handle th;
struct reiserfs_security_handle security;
- int lock_depth;
/* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */
int jbegin_count =
JOURNAL_PER_BALANCE_CNT * 3 +
@@ -753,7 +746,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
return retval;
}
jbegin_count += retval;
- lock_depth = reiserfs_write_lock_once(dir->i_sb);
+ reiserfs_write_lock(dir->i_sb);
retval = journal_begin(&th, dir->i_sb, jbegin_count);
if (retval) {
@@ -804,7 +797,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
d_instantiate(dentry, inode);
retval = journal_end(&th, dir->i_sb, jbegin_count);
out_failed:
- reiserfs_write_unlock_once(dir->i_sb, lock_depth);
+ reiserfs_write_unlock(dir->i_sb);
return retval;
}
@@ -920,7 +913,6 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
struct reiserfs_transaction_handle th;
int jbegin_count;
unsigned long savelink;
- int depth;
dquot_initialize(dir);
@@ -934,7 +926,7 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
JOURNAL_PER_BALANCE_CNT * 2 + 2 +
4 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb);
- depth = reiserfs_write_lock_once(dir->i_sb);
+ reiserfs_write_lock(dir->i_sb);
retval = journal_begin(&th, dir->i_sb, jbegin_count);
if (retval)
goto out_unlink;
@@ -995,7 +987,7 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
retval = journal_end(&th, dir->i_sb, jbegin_count);
reiserfs_check_path(&path);
- reiserfs_write_unlock_once(dir->i_sb, depth);
+ reiserfs_write_unlock(dir->i_sb);
return retval;
end_unlink:
@@ -1005,7 +997,7 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
if (err)
retval = err;
out_unlink:
- reiserfs_write_unlock_once(dir->i_sb, depth);
+ reiserfs_write_unlock(dir->i_sb);
return retval;
}
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index c0b1112..54944d5 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -358,12 +358,13 @@ void __reiserfs_panic(struct super_block *sb, const char *id,
dump_stack();
#endif
if (sb)
- panic(KERN_WARNING "REISERFS panic (device %s): %s%s%s: %s\n",
+ printk(KERN_WARNING "REISERFS panic (device %s): %s%s%s: %s\n",
sb->s_id, id ? id : "", id ? " " : "",
function, error_buf);
else
- panic(KERN_WARNING "REISERFS panic: %s%s%s: %s\n",
+ printk(KERN_WARNING "REISERFS panic: %s%s%s: %s\n",
id ? id : "", id ? " " : "", function, error_buf);
+ BUG();
}
void __reiserfs_error(struct super_block *sb, const char *id,
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index 3df5ce6..f8adaee 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -630,8 +630,8 @@ static inline int __reiserfs_is_journal_aborted(struct reiserfs_journal
*/
void reiserfs_write_lock(struct super_block *s);
void reiserfs_write_unlock(struct super_block *s);
-int reiserfs_write_lock_once(struct super_block *s);
-void reiserfs_write_unlock_once(struct super_block *s, int lock_depth);
+int __must_check reiserfs_write_unlock_nested(struct super_block *s);
+void reiserfs_write_lock_nested(struct super_block *s, int depth);
#ifdef CONFIG_REISERFS_CHECK
void reiserfs_lock_check_recursive(struct super_block *s);
@@ -667,31 +667,33 @@ static inline void reiserfs_lock_check_recursive(struct super_block *s) { }
* - The inode mutex
*/
static inline void reiserfs_mutex_lock_safe(struct mutex *m,
- struct super_block *s)
+ struct super_block *s)
{
- reiserfs_lock_check_recursive(s);
- reiserfs_write_unlock(s);
+ int depth;
+
+ depth = reiserfs_write_unlock_nested(s);
mutex_lock(m);
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(s, depth);
}
static inline void
reiserfs_mutex_lock_nested_safe(struct mutex *m, unsigned int subclass,
- struct super_block *s)
+ struct super_block *s)
{
- reiserfs_lock_check_recursive(s);
- reiserfs_write_unlock(s);
+ int depth;
+
+ depth = reiserfs_write_unlock_nested(s);
mutex_lock_nested(m, subclass);
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(s, depth);
}
static inline void
reiserfs_down_read_safe(struct rw_semaphore *sem, struct super_block *s)
{
- reiserfs_lock_check_recursive(s);
- reiserfs_write_unlock(s);
- down_read(sem);
- reiserfs_write_lock(s);
+ int depth;
+ depth = reiserfs_write_unlock_nested(s);
+ down_read(sem);
+ reiserfs_write_lock_nested(s, depth);
}
/*
@@ -701,9 +703,11 @@ reiserfs_down_read_safe(struct rw_semaphore *sem, struct super_block *s)
static inline void reiserfs_cond_resched(struct super_block *s)
{
if (need_resched()) {
- reiserfs_write_unlock(s);
+ int depth;
+
+ depth = reiserfs_write_unlock_nested(s);
schedule();
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(s, depth);
}
}
diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c
index 3ce02cf..a4ef5cd 100644
--- a/fs/reiserfs/resize.c
+++ b/fs/reiserfs/resize.c
@@ -34,6 +34,7 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
unsigned long int block_count, free_blocks;
int i;
int copy_size;
+ int depth;
sb = SB_DISK_SUPER_BLOCK(s);
@@ -43,7 +44,9 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
}
/* check the device size */
+ depth = reiserfs_write_unlock_nested(s);
bh = sb_bread(s, block_count_new - 1);
+ reiserfs_write_lock_nested(s, depth);
if (!bh) {
printk("reiserfs_resize: can\'t read last block\n");
return -EINVAL;
@@ -125,9 +128,12 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
* transaction begins, and the new bitmaps don't matter if the
* transaction fails. */
for (i = bmap_nr; i < bmap_nr_new; i++) {
+ int depth;
/* don't use read_bitmap_block since it will cache
* the uninitialized bitmap */
+ depth = reiserfs_write_unlock_nested(s);
bh = sb_bread(s, i * s->s_blocksize * 8);
+ reiserfs_write_lock_nested(s, depth);
if (!bh) {
vfree(bitmap);
return -EIO;
@@ -138,9 +144,9 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
- reiserfs_write_unlock(s);
+ depth = reiserfs_write_unlock_nested(s);
sync_dirty_buffer(bh);
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(s, depth);
// update bitmap_info stuff
bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
brelse(bh);
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 2f40a4c..b14706a 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -524,14 +524,14 @@ static int is_tree_node(struct buffer_head *bh, int level)
* the caller (search_by_key) will perform other schedule-unsafe
* operations just after calling this function.
*
- * @return true if we have unlocked
+ * @return depth of lock to be restored after read completes
*/
-static bool search_by_key_reada(struct super_block *s,
+static int search_by_key_reada(struct super_block *s,
struct buffer_head **bh,
b_blocknr_t *b, int num)
{
int i, j;
- bool unlocked = false;
+ int depth = -1;
for (i = 0; i < num; i++) {
bh[i] = sb_getblk(s, b[i]);
@@ -549,15 +549,13 @@ static bool search_by_key_reada(struct super_block *s,
* you have to make sure the prepared bit isn't set on this buffer
*/
if (!buffer_uptodate(bh[j])) {
- if (!unlocked) {
- reiserfs_write_unlock(s);
- unlocked = true;
- }
+ if (depth == -1)
+ depth = reiserfs_write_unlock_nested(s);
ll_rw_block(READA, 1, bh + j);
}
brelse(bh[j]);
}
- return unlocked;
+ return depth;
}
/**************************************************************************
@@ -645,26 +643,26 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s
have a pointer to it. */
if ((bh = last_element->pe_buffer =
sb_getblk(sb, block_number))) {
- bool unlocked = false;
- if (!buffer_uptodate(bh) && reada_count > 1)
- /* may unlock the write lock */
- unlocked = search_by_key_reada(sb, reada_bh,
- reada_blocks, reada_count);
/*
- * If we haven't already unlocked the write lock,
- * then we need to do that here before reading
- * the current block
+ * We'll need to drop the lock if we encounter any
+ * buffers that need to be read. If all of them are
+ * already up to date, we don't need to drop the lock.
*/
- if (!buffer_uptodate(bh) && !unlocked) {
- reiserfs_write_unlock(sb);
- unlocked = true;
- }
+ int depth = -1;
+
+ if (!buffer_uptodate(bh) && reada_count > 1)
+ depth = search_by_key_reada(sb, reada_bh,
+ reada_blocks, reada_count);
+
+ if (!buffer_uptodate(bh) && depth == -1)
+ depth = reiserfs_write_unlock_nested(sb);
+
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
- if (unlocked)
- reiserfs_write_lock(sb);
+ if (depth != -1)
+ reiserfs_write_lock_nested(sb, depth);
if (!buffer_uptodate(bh))
goto io_error;
} else {
@@ -1059,9 +1057,7 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st
reiserfs_free_block(th, inode, block, 1);
}
- reiserfs_write_unlock(sb);
- cond_resched();
- reiserfs_write_lock(sb);
+ reiserfs_cond_resched(sb);
if (item_moved (&s_ih, path)) {
need_re_search = 1;
@@ -1190,6 +1186,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
struct item_head *q_ih;
int quota_cut_bytes;
int ret_value, del_size, removed;
+ int depth;
#ifdef CONFIG_REISERFS_CHECK
char mode;
@@ -1299,7 +1296,9 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
"reiserquota delete_item(): freeing %u, id=%u type=%c",
quota_cut_bytes, inode->i_uid, head2type(&s_ih));
#endif
+ depth = reiserfs_write_unlock_nested(inode->i_sb);
dquot_free_space_nodirty(inode, quota_cut_bytes);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
/* Return deleted body length */
return ret_value;
@@ -1325,6 +1324,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
struct inode *inode, struct reiserfs_key *key)
{
+ struct super_block *sb = th->t_super;
struct tree_balance tb;
INITIALIZE_PATH(path);
int item_len = 0;
@@ -1377,14 +1377,17 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
if (retval == CARRY_ON) {
do_balance(&tb, NULL, NULL, M_DELETE);
if (inode) { /* Should we count quota for item? (we don't count quotas for save-links) */
+ int depth;
#ifdef REISERQUOTA_DEBUG
reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE,
"reiserquota delete_solid_item(): freeing %u id=%u type=%c",
quota_cut_bytes, inode->i_uid,
key2type(key));
#endif
+ depth = reiserfs_write_unlock_nested(sb);
dquot_free_space_nodirty(inode,
quota_cut_bytes);
+ reiserfs_write_lock_nested(sb, depth);
}
break;
}
@@ -1561,6 +1564,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
int retval2 = -1;
int quota_cut_bytes;
loff_t tail_pos = 0;
+ int depth;
BUG_ON(!th->t_trans_id);
@@ -1733,7 +1737,9 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
"reiserquota cut_from_item(): freeing %u id=%u type=%c",
quota_cut_bytes, inode->i_uid, '?');
#endif
+ depth = reiserfs_write_unlock_nested(sb);
dquot_free_space_nodirty(inode, quota_cut_bytes);
+ reiserfs_write_lock_nested(sb, depth);
return ret_value;
}
@@ -1953,9 +1959,11 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
const char *body, /* Pointer to the bytes to paste. */
int pasted_size)
{ /* Size of pasted bytes. */
+ struct super_block *sb = inode->i_sb;
struct tree_balance s_paste_balance;
int retval;
int fs_gen;
+ int depth;
BUG_ON(!th->t_trans_id);
@@ -1968,9 +1976,9 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
key2type(&(key->on_disk_key)));
#endif
- reiserfs_write_unlock(inode->i_sb);
+ depth = reiserfs_write_unlock_nested(sb);
retval = dquot_alloc_space_nodirty(inode, pasted_size);
- reiserfs_write_lock(inode->i_sb);
+ reiserfs_write_lock_nested(sb, depth);
if (retval) {
pathrelse(search_path);
return retval;
@@ -2027,7 +2035,9 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
pasted_size, inode->i_uid,
key2type(&(key->on_disk_key)));
#endif
+ depth = reiserfs_write_unlock_nested(sb);
dquot_free_space_nodirty(inode, pasted_size);
+ reiserfs_write_lock_nested(sb, depth);
return retval;
}
@@ -2050,6 +2060,7 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
BUG_ON(!th->t_trans_id);
if (inode) { /* Do we count quotas for item? */
+ int depth;
fs_gen = get_generation(inode->i_sb);
quota_bytes = ih_item_len(ih);
@@ -2063,11 +2074,11 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
"reiserquota insert_item(): allocating %u id=%u type=%c",
quota_bytes, inode->i_uid, head2type(ih));
#endif
- reiserfs_write_unlock(inode->i_sb);
/* We can't dirty inode here. It would be immediately written but
* appropriate stat item isn't inserted yet... */
+ depth = reiserfs_write_unlock_nested(inode->i_sb);
retval = dquot_alloc_space_nodirty(inode, quota_bytes);
- reiserfs_write_lock(inode->i_sb);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
if (retval) {
pathrelse(path);
return retval;
@@ -2118,7 +2129,10 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
"reiserquota insert_item(): freeing %u id=%u type=%c",
quota_bytes, inode->i_uid, head2type(ih));
#endif
- if (inode)
+ if (inode) {
+ int depth = reiserfs_write_unlock_nested(inode->i_sb);
dquot_free_space_nodirty(inode, quota_bytes);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
+ }
return retval;
}
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index e2e202a..3ead145 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -243,6 +243,7 @@ static int finish_unfinished(struct super_block *s)
done = 0;
REISERFS_SB(s)->s_is_unlinked_ok = 1;
while (!retval) {
+ int depth;
retval = search_item(s, &max_cpu_key, &path);
if (retval != ITEM_NOT_FOUND) {
reiserfs_error(s, "vs-2140",
@@ -298,9 +299,9 @@ static int finish_unfinished(struct super_block *s)
retval = remove_save_link_only(s, &save_link_key, 0);
continue;
}
- reiserfs_write_unlock(s);
+ depth = reiserfs_write_unlock_nested(inode->i_sb);
dquot_initialize(inode);
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
if (truncate && S_ISDIR(inode->i_mode)) {
/* We got a truncate request for a dir which is impossible.
@@ -356,10 +357,12 @@ static int finish_unfinished(struct super_block *s)
#ifdef CONFIG_QUOTA
/* Turn quotas off */
+ reiserfs_write_unlock(s);
for (i = 0; i < MAXQUOTAS; i++) {
if (sb_dqopt(s)->files[i] && quota_enabled[i])
dquot_quota_off(s, i);
}
+ reiserfs_write_lock(s);
if (ms_active_set)
/* Restore the flag back */
s->s_flags &= ~MS_ACTIVE;
@@ -623,7 +626,6 @@ static void reiserfs_dirty_inode(struct inode *inode, int flags)
struct reiserfs_transaction_handle th;
int err = 0;
- int lock_depth;
if (inode->i_sb->s_flags & MS_RDONLY) {
reiserfs_warning(inode->i_sb, "clm-6006",
@@ -631,7 +633,7 @@ static void reiserfs_dirty_inode(struct inode *inode, int flags)
inode->i_ino);
return;
}
- lock_depth = reiserfs_write_lock_once(inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
/* this is really only used for atime updates, so they don't have
** to be included in O_SYNC or fsync
@@ -644,7 +646,7 @@ static void reiserfs_dirty_inode(struct inode *inode, int flags)
journal_end(&th, inode->i_sb, 1);
out:
- reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ reiserfs_write_unlock(inode->i_sb);
}
static int reiserfs_show_options(struct seq_file *seq, struct dentry *root)
@@ -1334,7 +1336,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
kfree(qf_names[i]);
#endif
err = -EINVAL;
- goto out_unlock;
+ goto out_err_unlock;
}
#ifdef CONFIG_QUOTA
handle_quota_files(s, qf_names, &qfmt);
@@ -1378,35 +1380,32 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
if (blocks) {
err = reiserfs_resize(s, blocks);
if (err != 0)
- goto out_unlock;
+ goto out_err_unlock;
}
if (*mount_flags & MS_RDONLY) {
+ reiserfs_write_unlock(s);
reiserfs_xattr_init(s, *mount_flags);
/* remount read-only */
if (s->s_flags & MS_RDONLY)
/* it is read-only already */
- goto out_ok;
+ goto out_ok_unlocked;
- /*
- * Drop write lock. Quota will retake it when needed and lock
- * ordering requires calling dquot_suspend() without it.
- */
- reiserfs_write_unlock(s);
err = dquot_suspend(s, -1);
if (err < 0)
goto out_err;
- reiserfs_write_lock(s);
/* try to remount file system with read-only permissions */
if (sb_umount_state(rs) == REISERFS_VALID_FS
|| REISERFS_SB(s)->s_mount_state != REISERFS_VALID_FS) {
- goto out_ok;
+ goto out_ok_unlocked;
}
+ reiserfs_write_lock(s);
+
err = journal_begin(&th, s, 10);
if (err)
- goto out_unlock;
+ goto out_err_unlock;
/* Mounting a rw partition read-only. */
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
@@ -1415,13 +1414,14 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
} else {
/* remount read-write */
if (!(s->s_flags & MS_RDONLY)) {
+ reiserfs_write_unlock(s);
reiserfs_xattr_init(s, *mount_flags);
- goto out_ok; /* We are read-write already */
+ goto out_ok_unlocked; /* We are read-write already */
}
if (reiserfs_is_journal_aborted(journal)) {
err = journal->j_errno;
- goto out_unlock;
+ goto out_err_unlock;
}
handle_data_mode(s, mount_options);
@@ -1430,7 +1430,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
s->s_flags &= ~MS_RDONLY; /* now it is safe to call journal_begin */
err = journal_begin(&th, s, 10);
if (err)
- goto out_unlock;
+ goto out_err_unlock;
/* Mount a partition which is read-only, read-write */
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
@@ -1447,26 +1447,22 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
SB_JOURNAL(s)->j_must_wait = 1;
err = journal_end(&th, s, 10);
if (err)
- goto out_unlock;
+ goto out_err_unlock;
+ reiserfs_write_unlock(s);
if (!(*mount_flags & MS_RDONLY)) {
- /*
- * Drop write lock. Quota will retake it when needed and lock
- * ordering requires calling dquot_resume() without it.
- */
- reiserfs_write_unlock(s);
dquot_resume(s, -1);
reiserfs_write_lock(s);
finish_unfinished(s);
+ reiserfs_write_unlock(s);
reiserfs_xattr_init(s, *mount_flags);
}
-out_ok:
+out_ok_unlocked:
replace_mount_options(s, new_opts);
- reiserfs_write_unlock(s);
return 0;
-out_unlock:
+out_err_unlock:
reiserfs_write_unlock(s);
out_err:
kfree(new_opts);
@@ -2013,12 +2009,14 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
goto error;
}
+ reiserfs_write_unlock(s);
if ((errval = reiserfs_lookup_privroot(s)) ||
(errval = reiserfs_xattr_init(s, s->s_flags))) {
dput(s->s_root);
s->s_root = NULL;
- goto error;
+ goto error_unlocked;
}
+ reiserfs_write_lock(s);
/* look for files which were to be removed in previous session */
finish_unfinished(s);
@@ -2027,12 +2025,14 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
reiserfs_info(s, "using 3.5.x disk format\n");
}
+ reiserfs_write_unlock(s);
if ((errval = reiserfs_lookup_privroot(s)) ||
(errval = reiserfs_xattr_init(s, s->s_flags))) {
dput(s->s_root);
s->s_root = NULL;
- goto error;
+ goto error_unlocked;
}
+ reiserfs_write_lock(s);
}
// mark hash in super block: it could be unset. overwrite should be ok
set_sb_hash_function_code(rs, function2code(sbi->s_hash_function));
@@ -2100,6 +2100,7 @@ static int reiserfs_write_dquot(struct dquot *dquot)
{
struct reiserfs_transaction_handle th;
int ret, err;
+ int depth;
reiserfs_write_lock(dquot->dq_sb);
ret =
@@ -2107,9 +2108,9 @@ static int reiserfs_write_dquot(struct dquot *dquot)
REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
if (ret)
goto out;
- reiserfs_write_unlock(dquot->dq_sb);
+ depth = reiserfs_write_unlock_nested(dquot->dq_sb);
ret = dquot_commit(dquot);
- reiserfs_write_lock(dquot->dq_sb);
+ reiserfs_write_lock_nested(dquot->dq_sb, depth);
err =
journal_end(&th, dquot->dq_sb,
REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
@@ -2124,6 +2125,7 @@ static int reiserfs_acquire_dquot(struct dquot *dquot)
{
struct reiserfs_transaction_handle th;
int ret, err;
+ int depth;
reiserfs_write_lock(dquot->dq_sb);
ret =
@@ -2131,9 +2133,9 @@ static int reiserfs_acquire_dquot(struct dquot *dquot)
REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
if (ret)
goto out;
- reiserfs_write_unlock(dquot->dq_sb);
+ depth = reiserfs_write_unlock_nested(dquot->dq_sb);
ret = dquot_acquire(dquot);
- reiserfs_write_lock(dquot->dq_sb);
+ reiserfs_write_lock_nested(dquot->dq_sb, depth);
err =
journal_end(&th, dquot->dq_sb,
REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
@@ -2186,15 +2188,16 @@ static int reiserfs_write_info(struct super_block *sb, int type)
{
struct reiserfs_transaction_handle th;
int ret, err;
+ int depth;
/* Data block + inode block */
reiserfs_write_lock(sb);
ret = journal_begin(&th, sb, 2);
if (ret)
goto out;
- reiserfs_write_unlock(sb);
+ depth = reiserfs_write_unlock_nested(sb);
ret = dquot_commit_info(sb, type);
- reiserfs_write_lock(sb);
+ reiserfs_write_lock_nested(sb, depth);
err = journal_end(&th, sb, 2);
if (!ret && err)
ret = err;
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index c69cdd7..8a9e2dc 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -81,8 +81,7 @@ static int xattr_unlink(struct inode *dir, struct dentry *dentry)
int error;
BUG_ON(!mutex_is_locked(&dir->i_mutex));
- reiserfs_mutex_lock_nested_safe(&dentry->d_inode->i_mutex,
- I_MUTEX_CHILD, dir->i_sb);
+ mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD);
error = dir->i_op->unlink(dir, dentry);
mutex_unlock(&dentry->d_inode->i_mutex);
@@ -96,8 +95,7 @@ static int xattr_rmdir(struct inode *dir, struct dentry *dentry)
int error;
BUG_ON(!mutex_is_locked(&dir->i_mutex));
- reiserfs_mutex_lock_nested_safe(&dentry->d_inode->i_mutex,
- I_MUTEX_CHILD, dir->i_sb);
+ mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD);
error = dir->i_op->rmdir(dir, dentry);
if (!error)
dentry->d_inode->i_flags |= S_DEAD;
@@ -232,22 +230,17 @@ static int reiserfs_for_each_xattr(struct inode *inode,
if (IS_PRIVATE(inode) || get_inode_sd_version(inode) == STAT_DATA_V1)
return 0;
- reiserfs_write_unlock(inode->i_sb);
dir = open_xa_dir(inode, XATTR_REPLACE);
if (IS_ERR(dir)) {
err = PTR_ERR(dir);
- reiserfs_write_lock(inode->i_sb);
goto out;
} else if (!dir->d_inode) {
err = 0;
- reiserfs_write_lock(inode->i_sb);
goto out_dir;
}
mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR);
- reiserfs_write_lock(inode->i_sb);
-
buf.xadir = dir;
while (1) {
err = reiserfs_readdir_inode(dir->d_inode, &buf.ctx);
@@ -281,14 +274,17 @@ static int reiserfs_for_each_xattr(struct inode *inode,
int blocks = JOURNAL_PER_BALANCE_CNT * 2 + 2 +
4 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb);
struct reiserfs_transaction_handle th;
+ reiserfs_write_lock(inode->i_sb);
err = journal_begin(&th, inode->i_sb, blocks);
+ reiserfs_write_unlock(inode->i_sb);
if (!err) {
int jerror;
- reiserfs_mutex_lock_nested_safe(
- &dir->d_parent->d_inode->i_mutex,
- I_MUTEX_XATTR, inode->i_sb);
+ mutex_lock_nested(&dir->d_parent->d_inode->i_mutex,
+ I_MUTEX_XATTR);
err = action(dir, data);
+ reiserfs_write_lock(inode->i_sb);
jerror = journal_end(&th, inode->i_sb, blocks);
+ reiserfs_write_unlock(inode->i_sb);
mutex_unlock(&dir->d_parent->d_inode->i_mutex);
err = jerror ?: err;
}
@@ -455,9 +451,7 @@ static int lookup_and_delete_xattr(struct inode *inode, const char *name)
}
if (dentry->d_inode) {
- reiserfs_write_lock(inode->i_sb);
err = xattr_unlink(xadir->d_inode, dentry);
- reiserfs_write_unlock(inode->i_sb);
update_ctime(inode);
}
@@ -491,24 +485,17 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
if (get_inode_sd_version(inode) == STAT_DATA_V1)
return -EOPNOTSUPP;
- reiserfs_write_unlock(inode->i_sb);
-
if (!buffer) {
err = lookup_and_delete_xattr(inode, name);
- reiserfs_write_lock(inode->i_sb);
return err;
}
dentry = xattr_lookup(inode, name, flags);
- if (IS_ERR(dentry)) {
- reiserfs_write_lock(inode->i_sb);
+ if (IS_ERR(dentry))
return PTR_ERR(dentry);
- }
down_write(&REISERFS_I(inode)->i_xattr_sem);
- reiserfs_write_lock(inode->i_sb);
-
xahash = xattr_hash(buffer, buffer_size);
while (buffer_pos < buffer_size || buffer_pos == 0) {
size_t chunk;
@@ -538,6 +525,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
rxh->h_hash = cpu_to_le32(xahash);
}
+ reiserfs_write_lock(inode->i_sb);
err = __reiserfs_write_begin(page, page_offset, chunk + skip);
if (!err) {
if (buffer)
@@ -546,6 +534,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
page_offset + chunk +
skip);
}
+ reiserfs_write_unlock(inode->i_sb);
unlock_page(page);
reiserfs_put_page(page);
buffer_pos += chunk;
@@ -563,10 +552,8 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
.ia_valid = ATTR_SIZE | ATTR_CTIME,
};
- reiserfs_write_unlock(inode->i_sb);
mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR);
inode_dio_wait(dentry->d_inode);
- reiserfs_write_lock(inode->i_sb);
err = reiserfs_setattr(dentry, &newattrs);
mutex_unlock(&dentry->d_inode->i_mutex);
@@ -592,18 +579,19 @@ int reiserfs_xattr_set(struct inode *inode, const char *name,
reiserfs_write_lock(inode->i_sb);
error = journal_begin(&th, inode->i_sb, jbegin_count);
+ reiserfs_write_unlock(inode->i_sb);
if (error) {
- reiserfs_write_unlock(inode->i_sb);
return error;
}
error = reiserfs_xattr_set_handle(&th, inode, name,
buffer, buffer_size, flags);
+ reiserfs_write_lock(inode->i_sb);
error2 = journal_end(&th, inode->i_sb, jbegin_count);
+ reiserfs_write_unlock(inode->i_sb);
if (error == 0)
error = error2;
- reiserfs_write_unlock(inode->i_sb);
return error;
}
@@ -968,7 +956,7 @@ int reiserfs_lookup_privroot(struct super_block *s)
int err = 0;
/* If we don't have the privroot located yet - go find it */
- reiserfs_mutex_lock_safe(&s->s_root->d_inode->i_mutex, s);
+ mutex_lock(&s->s_root->d_inode->i_mutex);
dentry = lookup_one_len(PRIVROOT_NAME, s->s_root,
strlen(PRIVROOT_NAME));
if (!IS_ERR(dentry)) {
@@ -996,14 +984,14 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags)
goto error;
if (!privroot->d_inode && !(mount_flags & MS_RDONLY)) {
- reiserfs_mutex_lock_safe(&s->s_root->d_inode->i_mutex, s);
+ mutex_lock(&s->s_root->d_inode->i_mutex);
err = create_privroot(REISERFS_SB(s)->priv_root);
mutex_unlock(&s->s_root->d_inode->i_mutex);
}
if (privroot->d_inode) {
s->s_xattr = reiserfs_xattr_handlers;
- reiserfs_mutex_lock_safe(&privroot->d_inode->i_mutex, s);
+ mutex_lock(&privroot->d_inode->i_mutex);
if (!REISERFS_SB(s)->xattr_root) {
struct dentry *dentry;
dentry = lookup_one_len(XAROOT_NAME, privroot,
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index 6c8767f..06c04f7 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -49,13 +49,15 @@ posix_acl_set(struct dentry *dentry, const char *name, const void *value,
reiserfs_write_lock(inode->i_sb);
error = journal_begin(&th, inode->i_sb, jcreate_blocks);
+ reiserfs_write_unlock(inode->i_sb);
if (error == 0) {
error = reiserfs_set_acl(&th, inode, type, acl);
+ reiserfs_write_lock(inode->i_sb);
error2 = journal_end(&th, inode->i_sb, jcreate_blocks);
+ reiserfs_write_unlock(inode->i_sb);
if (error2)
error = error2;
}
- reiserfs_write_unlock(inode->i_sb);
release_and_out:
posix_acl_release(acl);
@@ -435,12 +437,14 @@ int reiserfs_cache_default_acl(struct inode *inode)
return nblocks;
}
+/*
+ * Called under i_mutex
+ */
int reiserfs_acl_chmod(struct inode *inode)
{
struct reiserfs_transaction_handle th;
struct posix_acl *acl;
size_t size;
- int depth;
int error;
if (IS_PRIVATE(inode))
@@ -454,9 +458,7 @@ int reiserfs_acl_chmod(struct inode *inode)
return 0;
}
- reiserfs_write_unlock(inode->i_sb);
acl = reiserfs_get_acl(inode, ACL_TYPE_ACCESS);
- reiserfs_write_lock(inode->i_sb);
if (!acl)
return 0;
if (IS_ERR(acl))
@@ -466,16 +468,18 @@ int reiserfs_acl_chmod(struct inode *inode)
return error;
size = reiserfs_xattr_nblocks(inode, reiserfs_acl_size(acl->a_count));
- depth = reiserfs_write_lock_once(inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
error = journal_begin(&th, inode->i_sb, size * 2);
+ reiserfs_write_unlock(inode->i_sb);
if (!error) {
int error2;
error = reiserfs_set_acl(&th, inode, ACL_TYPE_ACCESS, acl);
+ reiserfs_write_lock(inode->i_sb);
error2 = journal_end(&th, inode->i_sb, size * 2);
+ reiserfs_write_unlock(inode->i_sb);
if (error2)
error = error2;
}
- reiserfs_write_unlock_once(inode->i_sb, depth);
posix_acl_release(acl);
return error;
}
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 9ac4057..839a2ba 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -630,6 +630,12 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
struct udf_sb_info *sbi = UDF_SB(sb);
int error = 0;
+ if (sbi->s_lvid_bh) {
+ int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
+ if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY))
+ return -EACCES;
+ }
+
uopt.flags = sbi->s_flags;
uopt.uid = sbi->s_uid;
uopt.gid = sbi->s_gid;
@@ -649,12 +655,6 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
sbi->s_dmode = uopt.dmode;
write_unlock(&sbi->s_cred_lock);
- if (sbi->s_lvid_bh) {
- int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
- if (write_rev > UDF_MAX_WRITE_VERSION)
- *flags |= MS_RDONLY;
- }
-
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
goto out_unlock;
@@ -843,27 +843,38 @@ static int udf_find_fileset(struct super_block *sb,
return 1;
}
+/*
+ * Load primary Volume Descriptor Sequence
+ *
+ * Return <0 on error, 0 on success. -EAGAIN is special meaning next sequence
+ * should be tried.
+ */
static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
{
struct primaryVolDesc *pvoldesc;
struct ustr *instr, *outstr;
struct buffer_head *bh;
uint16_t ident;
- int ret = 1;
+ int ret = -ENOMEM;
instr = kmalloc(sizeof(struct ustr), GFP_NOFS);
if (!instr)
- return 1;
+ return -ENOMEM;
outstr = kmalloc(sizeof(struct ustr), GFP_NOFS);
if (!outstr)
goto out1;
bh = udf_read_tagged(sb, block, block, &ident);
- if (!bh)
+ if (!bh) {
+ ret = -EAGAIN;
goto out2;
+ }
- BUG_ON(ident != TAG_IDENT_PVD);
+ if (ident != TAG_IDENT_PVD) {
+ ret = -EIO;
+ goto out_bh;
+ }
pvoldesc = (struct primaryVolDesc *)bh->b_data;
@@ -889,8 +900,9 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
if (udf_CS0toUTF8(outstr, instr))
udf_debug("volSetIdent[] = '%s'\n", outstr->u_name);
- brelse(bh);
ret = 0;
+out_bh:
+ brelse(bh);
out2:
kfree(outstr);
out1:
@@ -947,7 +959,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
if (mdata->s_mirror_fe == NULL) {
udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
- goto error_exit;
+ return -EIO;
}
}
@@ -964,23 +976,18 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
addr.logicalBlockNum, addr.partitionReferenceNum);
mdata->s_bitmap_fe = udf_iget(sb, &addr);
-
if (mdata->s_bitmap_fe == NULL) {
if (sb->s_flags & MS_RDONLY)
udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
else {
udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
- goto error_exit;
+ return -EIO;
}
}
}
udf_debug("udf_load_metadata_files Ok\n");
-
return 0;
-
-error_exit:
- return 1;
}
static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
@@ -1069,7 +1076,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
if (!map->s_uspace.s_table) {
udf_debug("cannot load unallocSpaceTable (part %d)\n",
p_index);
- return 1;
+ return -EIO;
}
map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
udf_debug("unallocSpaceTable (part %d) @ %ld\n",
@@ -1079,7 +1086,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
if (phd->unallocSpaceBitmap.extLength) {
struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
if (!bitmap)
- return 1;
+ return -ENOMEM;
map->s_uspace.s_bitmap = bitmap;
bitmap->s_extPosition = le32_to_cpu(
phd->unallocSpaceBitmap.extPosition);
@@ -1102,7 +1109,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
if (!map->s_fspace.s_table) {
udf_debug("cannot load freedSpaceTable (part %d)\n",
p_index);
- return 1;
+ return -EIO;
}
map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE;
@@ -1113,7 +1120,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
if (phd->freedSpaceBitmap.extLength) {
struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
if (!bitmap)
- return 1;
+ return -ENOMEM;
map->s_fspace.s_bitmap = bitmap;
bitmap->s_extPosition = le32_to_cpu(
phd->freedSpaceBitmap.extPosition);
@@ -1165,7 +1172,7 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
}
if (!sbi->s_vat_inode)
- return 1;
+ return -EIO;
if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
map->s_type_specific.s_virtual.s_start_offset = 0;
@@ -1177,7 +1184,7 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
pos = udf_block_map(sbi->s_vat_inode, 0);
bh = sb_bread(sb, pos);
if (!bh)
- return 1;
+ return -EIO;
vat20 = (struct virtualAllocationTable20 *)bh->b_data;
} else {
vat20 = (struct virtualAllocationTable20 *)
@@ -1195,6 +1202,12 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
return 0;
}
+/*
+ * Load partition descriptor block
+ *
+ * Returns <0 on error, 0 on success, -EAGAIN is special - try next descriptor
+ * sequence.
+ */
static int udf_load_partdesc(struct super_block *sb, sector_t block)
{
struct buffer_head *bh;
@@ -1204,13 +1217,15 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
int i, type1_idx;
uint16_t partitionNumber;
uint16_t ident;
- int ret = 0;
+ int ret;
bh = udf_read_tagged(sb, block, block, &ident);
if (!bh)
- return 1;
- if (ident != TAG_IDENT_PD)
+ return -EAGAIN;
+ if (ident != TAG_IDENT_PD) {
+ ret = 0;
goto out_bh;
+ }
p = (struct partitionDesc *)bh->b_data;
partitionNumber = le16_to_cpu(p->partitionNumber);
@@ -1229,10 +1244,13 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
if (i >= sbi->s_partitions) {
udf_debug("Partition (%d) not found in partition map\n",
partitionNumber);
+ ret = 0;
goto out_bh;
}
ret = udf_fill_partdesc_info(sb, p, i);
+ if (ret < 0)
+ goto out_bh;
/*
* Now rescan for VIRTUAL or METADATA partitions when SPARABLE and
@@ -1249,32 +1267,37 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
break;
}
- if (i >= sbi->s_partitions)
+ if (i >= sbi->s_partitions) {
+ ret = 0;
goto out_bh;
+ }
ret = udf_fill_partdesc_info(sb, p, i);
- if (ret)
+ if (ret < 0)
goto out_bh;
if (map->s_partition_type == UDF_METADATA_MAP25) {
ret = udf_load_metadata_files(sb, i);
- if (ret) {
+ if (ret < 0) {
udf_err(sb, "error loading MetaData partition map %d\n",
i);
goto out_bh;
}
} else {
- ret = udf_load_vat(sb, i, type1_idx);
- if (ret)
- goto out_bh;
/*
- * Mark filesystem read-only if we have a partition with
- * virtual map since we don't handle writing to it (we
- * overwrite blocks instead of relocating them).
+ * If we have a partition with virtual map, we don't handle
+ * writing to it (we overwrite blocks instead of relocating
+ * them).
*/
- sb->s_flags |= MS_RDONLY;
- pr_notice("Filesystem marked read-only because writing to pseudooverwrite partition is not implemented\n");
+ if (!(sb->s_flags & MS_RDONLY)) {
+ ret = -EACCES;
+ goto out_bh;
+ }
+ ret = udf_load_vat(sb, i, type1_idx);
+ if (ret < 0)
+ goto out_bh;
}
+ ret = 0;
out_bh:
/* In case loading failed, we handle cleanup in udf_fill_super */
brelse(bh);
@@ -1340,11 +1363,11 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
uint16_t ident;
struct buffer_head *bh;
unsigned int table_len;
- int ret = 0;
+ int ret;
bh = udf_read_tagged(sb, block, block, &ident);
if (!bh)
- return 1;
+ return -EAGAIN;
BUG_ON(ident != TAG_IDENT_LVD);
lvd = (struct logicalVolDesc *)bh->b_data;
table_len = le32_to_cpu(lvd->mapTableLength);
@@ -1352,7 +1375,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
udf_err(sb, "error loading logical volume descriptor: "
"Partition table too long (%u > %lu)\n", table_len,
sb->s_blocksize - sizeof(*lvd));
- ret = 1;
+ ret = -EIO;
goto out_bh;
}
@@ -1396,11 +1419,10 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
} else if (!strncmp(upm2->partIdent.ident,
UDF_ID_SPARABLE,
strlen(UDF_ID_SPARABLE))) {
- if (udf_load_sparable_map(sb, map,
- (struct sparablePartitionMap *)gpm) < 0) {
- ret = 1;
+ ret = udf_load_sparable_map(sb, map,
+ (struct sparablePartitionMap *)gpm);
+ if (ret < 0)
goto out_bh;
- }
} else if (!strncmp(upm2->partIdent.ident,
UDF_ID_METADATA,
strlen(UDF_ID_METADATA))) {
@@ -1465,7 +1487,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
}
if (lvd->integritySeqExt.extLength)
udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
-
+ ret = 0;
out_bh:
brelse(bh);
return ret;
@@ -1503,22 +1525,18 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
}
/*
- * udf_process_sequence
- *
- * PURPOSE
- * Process a main/reserve volume descriptor sequence.
- *
- * PRE-CONDITIONS
- * sb Pointer to _locked_ superblock.
- * block First block of first extent of the sequence.
- * lastblock Lastblock of first extent of the sequence.
+ * Process a main/reserve volume descriptor sequence.
+ * @block First block of first extent of the sequence.
+ * @lastblock Lastblock of first extent of the sequence.
+ * @fileset There we store extent containing root fileset
*
- * HISTORY
- * July 1, 1997 - Andrew E. Mileski
- * Written, tested, and released.
+ * Returns <0 on error, 0 on success. -EAGAIN is special - try next descriptor
+ * sequence
*/
-static noinline int udf_process_sequence(struct super_block *sb, long block,
- long lastblock, struct kernel_lb_addr *fileset)
+static noinline int udf_process_sequence(
+ struct super_block *sb,
+ sector_t block, sector_t lastblock,
+ struct kernel_lb_addr *fileset)
{
struct buffer_head *bh = NULL;
struct udf_vds_record vds[VDS_POS_LENGTH];
@@ -1529,6 +1547,7 @@ static noinline int udf_process_sequence(struct super_block *sb, long block,
uint32_t vdsn;
uint16_t ident;
long next_s = 0, next_e = 0;
+ int ret;
memset(vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
@@ -1543,7 +1562,7 @@ static noinline int udf_process_sequence(struct super_block *sb, long block,
udf_err(sb,
"Block %llu of volume descriptor sequence is corrupted or we could not read it\n",
(unsigned long long)block);
- return 1;
+ return -EAGAIN;
}
/* Process each descriptor (ISO 13346 3/8.3-8.4) */
@@ -1616,14 +1635,19 @@ static noinline int udf_process_sequence(struct super_block *sb, long block,
*/
if (!vds[VDS_POS_PRIMARY_VOL_DESC].block) {
udf_err(sb, "Primary Volume Descriptor not found!\n");
- return 1;
+ return -EAGAIN;
+ }
+ ret = udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block);
+ if (ret < 0)
+ return ret;
+
+ if (vds[VDS_POS_LOGICAL_VOL_DESC].block) {
+ ret = udf_load_logicalvol(sb,
+ vds[VDS_POS_LOGICAL_VOL_DESC].block,
+ fileset);
+ if (ret < 0)
+ return ret;
}
- if (udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block))
- return 1;
-
- if (vds[VDS_POS_LOGICAL_VOL_DESC].block && udf_load_logicalvol(sb,
- vds[VDS_POS_LOGICAL_VOL_DESC].block, fileset))
- return 1;
if (vds[VDS_POS_PARTITION_DESC].block) {
/*
@@ -1632,19 +1656,27 @@ static noinline int udf_process_sequence(struct super_block *sb, long block,
*/
for (block = vds[VDS_POS_PARTITION_DESC].block;
block < vds[VDS_POS_TERMINATING_DESC].block;
- block++)
- if (udf_load_partdesc(sb, block))
- return 1;
+ block++) {
+ ret = udf_load_partdesc(sb, block);
+ if (ret < 0)
+ return ret;
+ }
}
return 0;
}
+/*
+ * Load Volume Descriptor Sequence described by anchor in bh
+ *
+ * Returns <0 on error, 0 on success
+ */
static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
struct kernel_lb_addr *fileset)
{
struct anchorVolDescPtr *anchor;
- long main_s, main_e, reserve_s, reserve_e;
+ sector_t main_s, main_e, reserve_s, reserve_e;
+ int ret;
anchor = (struct anchorVolDescPtr *)bh->b_data;
@@ -1662,18 +1694,26 @@ static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
/* Process the main & reserve sequences */
/* responsible for finding the PartitionDesc(s) */
- if (!udf_process_sequence(sb, main_s, main_e, fileset))
- return 1;
- udf_sb_free_partitions(sb);
- if (!udf_process_sequence(sb, reserve_s, reserve_e, fileset))
- return 1;
+ ret = udf_process_sequence(sb, main_s, main_e, fileset);
+ if (ret != -EAGAIN)
+ return ret;
udf_sb_free_partitions(sb);
- return 0;
+ ret = udf_process_sequence(sb, reserve_s, reserve_e, fileset);
+ if (ret < 0) {
+ udf_sb_free_partitions(sb);
+ /* No sequence was OK, return -EIO */
+ if (ret == -EAGAIN)
+ ret = -EIO;
+ }
+ return ret;
}
/*
* Check whether there is an anchor block in the given block and
* load Volume Descriptor Sequence if so.
+ *
+ * Returns <0 on error, 0 on success, -EAGAIN is special - try next anchor
+ * block
*/
static int udf_check_anchor_block(struct super_block *sb, sector_t block,
struct kernel_lb_addr *fileset)
@@ -1685,33 +1725,40 @@ static int udf_check_anchor_block(struct super_block *sb, sector_t block,
if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV) &&
udf_fixed_to_variable(block) >=
sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits)
- return 0;
+ return -EAGAIN;
bh = udf_read_tagged(sb, block, block, &ident);
if (!bh)
- return 0;
+ return -EAGAIN;
if (ident != TAG_IDENT_AVDP) {
brelse(bh);
- return 0;
+ return -EAGAIN;
}
ret = udf_load_sequence(sb, bh, fileset);
brelse(bh);
return ret;
}
-/* Search for an anchor volume descriptor pointer */
-static sector_t udf_scan_anchors(struct super_block *sb, sector_t lastblock,
- struct kernel_lb_addr *fileset)
+/*
+ * Search for an anchor volume descriptor pointer.
+ *
+ * Returns < 0 on error, 0 on success. -EAGAIN is special - try next set
+ * of anchors.
+ */
+static int udf_scan_anchors(struct super_block *sb, sector_t *lastblock,
+ struct kernel_lb_addr *fileset)
{
sector_t last[6];
int i;
struct udf_sb_info *sbi = UDF_SB(sb);
int last_count = 0;
+ int ret;
/* First try user provided anchor */
if (sbi->s_anchor) {
- if (udf_check_anchor_block(sb, sbi->s_anchor, fileset))
- return lastblock;
+ ret = udf_check_anchor_block(sb, sbi->s_anchor, fileset);
+ if (ret != -EAGAIN)
+ return ret;
}
/*
* according to spec, anchor is in either:
@@ -1720,39 +1767,46 @@ static sector_t udf_scan_anchors(struct super_block *sb, sector_t lastblock,
* lastblock
* however, if the disc isn't closed, it could be 512.
*/
- if (udf_check_anchor_block(sb, sbi->s_session + 256, fileset))
- return lastblock;
+ ret = udf_check_anchor_block(sb, sbi->s_session + 256, fileset);
+ if (ret != -EAGAIN)
+ return ret;
/*
* The trouble is which block is the last one. Drives often misreport
* this so we try various possibilities.
*/
- last[last_count++] = lastblock;
- if (lastblock >= 1)
- last[last_count++] = lastblock - 1;
- last[last_count++] = lastblock + 1;
- if (lastblock >= 2)
- last[last_count++] = lastblock - 2;
- if (lastblock >= 150)
- last[last_count++] = lastblock - 150;
- if (lastblock >= 152)
- last[last_count++] = lastblock - 152;
+ last[last_count++] = *lastblock;
+ if (*lastblock >= 1)
+ last[last_count++] = *lastblock - 1;
+ last[last_count++] = *lastblock + 1;
+ if (*lastblock >= 2)
+ last[last_count++] = *lastblock - 2;
+ if (*lastblock >= 150)
+ last[last_count++] = *lastblock - 150;
+ if (*lastblock >= 152)
+ last[last_count++] = *lastblock - 152;
for (i = 0; i < last_count; i++) {
if (last[i] >= sb->s_bdev->bd_inode->i_size >>
sb->s_blocksize_bits)
continue;
- if (udf_check_anchor_block(sb, last[i], fileset))
- return last[i];
+ ret = udf_check_anchor_block(sb, last[i], fileset);
+ if (ret != -EAGAIN) {
+ if (!ret)
+ *lastblock = last[i];
+ return ret;
+ }
if (last[i] < 256)
continue;
- if (udf_check_anchor_block(sb, last[i] - 256, fileset))
- return last[i];
+ ret = udf_check_anchor_block(sb, last[i] - 256, fileset);
+ if (ret != -EAGAIN) {
+ if (!ret)
+ *lastblock = last[i];
+ return ret;
+ }
}
/* Finally try block 512 in case media is open */
- if (udf_check_anchor_block(sb, sbi->s_session + 512, fileset))
- return last[0];
- return 0;
+ return udf_check_anchor_block(sb, sbi->s_session + 512, fileset);
}
/*
@@ -1760,54 +1814,59 @@ static sector_t udf_scan_anchors(struct super_block *sb, sector_t lastblock,
* area specified by it. The function expects sbi->s_lastblock to be the last
* block on the media.
*
- * Return 1 if ok, 0 if not found.
- *
+ * Return <0 on error, 0 if anchor found. -EAGAIN is special meaning anchor
+ * was not found.
*/
static int udf_find_anchor(struct super_block *sb,
struct kernel_lb_addr *fileset)
{
- sector_t lastblock;
struct udf_sb_info *sbi = UDF_SB(sb);
+ sector_t lastblock = sbi->s_last_block;
+ int ret;
- lastblock = udf_scan_anchors(sb, sbi->s_last_block, fileset);
- if (lastblock)
+ ret = udf_scan_anchors(sb, &lastblock, fileset);
+ if (ret != -EAGAIN)
goto out;
/* No anchor found? Try VARCONV conversion of block numbers */
UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
+ lastblock = udf_variable_to_fixed(sbi->s_last_block);
/* Firstly, we try to not convert number of the last block */
- lastblock = udf_scan_anchors(sb,
- udf_variable_to_fixed(sbi->s_last_block),
- fileset);
- if (lastblock)
+ ret = udf_scan_anchors(sb, &lastblock, fileset);
+ if (ret != -EAGAIN)
goto out;
+ lastblock = sbi->s_last_block;
/* Secondly, we try with converted number of the last block */
- lastblock = udf_scan_anchors(sb, sbi->s_last_block, fileset);
- if (!lastblock) {
+ ret = udf_scan_anchors(sb, &lastblock, fileset);
+ if (ret < 0) {
/* VARCONV didn't help. Clear it. */
UDF_CLEAR_FLAG(sb, UDF_FLAG_VARCONV);
- return 0;
}
out:
- sbi->s_last_block = lastblock;
- return 1;
+ if (ret == 0)
+ sbi->s_last_block = lastblock;
+ return ret;
}
/*
* Check Volume Structure Descriptor, find Anchor block and load Volume
- * Descriptor Sequence
+ * Descriptor Sequence.
+ *
+ * Returns < 0 on error, 0 on success. -EAGAIN is special meaning anchor
+ * block was not found.
*/
static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
int silent, struct kernel_lb_addr *fileset)
{
struct udf_sb_info *sbi = UDF_SB(sb);
loff_t nsr_off;
+ int ret;
if (!sb_set_blocksize(sb, uopt->blocksize)) {
if (!silent)
udf_warn(sb, "Bad block size\n");
- return 0;
+ return -EINVAL;
}
sbi->s_last_block = uopt->lastblock;
if (!uopt->novrs) {
@@ -1828,12 +1887,13 @@ static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
/* Look for anchor block and load Volume Descriptor Sequence */
sbi->s_anchor = uopt->anchor;
- if (!udf_find_anchor(sb, fileset)) {
- if (!silent)
+ ret = udf_find_anchor(sb, fileset);
+ if (ret < 0) {
+ if (!silent && ret == -EAGAIN)
udf_warn(sb, "No anchor found\n");
- return 0;
+ return ret;
}
- return 1;
+ return 0;
}
static void udf_open_lvid(struct super_block *sb)
@@ -1939,7 +1999,7 @@ u64 lvid_get_unique_id(struct super_block *sb)
static int udf_fill_super(struct super_block *sb, void *options, int silent)
{
- int ret;
+ int ret = -EINVAL;
struct inode *inode = NULL;
struct udf_options uopt;
struct kernel_lb_addr rootdir, fileset;
@@ -2011,7 +2071,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
} else {
uopt.blocksize = bdev_logical_block_size(sb->s_bdev);
ret = udf_load_vrs(sb, &uopt, silent, &fileset);
- if (!ret && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) {
+ if (ret == -EAGAIN && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) {
if (!silent)
pr_notice("Rescanning with blocksize %d\n",
UDF_DEFAULT_BLOCKSIZE);
@@ -2021,8 +2081,11 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
ret = udf_load_vrs(sb, &uopt, silent, &fileset);
}
}
- if (!ret) {
- udf_warn(sb, "No partition found (1)\n");
+ if (ret < 0) {
+ if (ret == -EAGAIN) {
+ udf_warn(sb, "No partition found (1)\n");
+ ret = -EINVAL;
+ }
goto error_out;
}
@@ -2040,9 +2103,13 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
le16_to_cpu(lvidiu->minUDFReadRev),
UDF_MAX_READ_VERSION);
+ ret = -EINVAL;
+ goto error_out;
+ } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION &&
+ !(sb->s_flags & MS_RDONLY)) {
+ ret = -EACCES;
goto error_out;
- } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION)
- sb->s_flags |= MS_RDONLY;
+ }
sbi->s_udfrev = minUDFWriteRev;
@@ -2054,17 +2121,20 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
if (!sbi->s_partitions) {
udf_warn(sb, "No partition found (2)\n");
+ ret = -EINVAL;
goto error_out;
}
if (sbi->s_partmaps[sbi->s_partition].s_partition_flags &
- UDF_PART_FLAG_READ_ONLY) {
- pr_notice("Partition marked readonly; forcing readonly mount\n");
- sb->s_flags |= MS_RDONLY;
+ UDF_PART_FLAG_READ_ONLY &&
+ !(sb->s_flags & MS_RDONLY)) {
+ ret = -EACCES;
+ goto error_out;
}
if (udf_find_fileset(sb, &fileset, &rootdir)) {
udf_warn(sb, "No fileset found\n");
+ ret = -EINVAL;
goto error_out;
}
@@ -2086,6 +2156,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
if (!inode) {
udf_err(sb, "Error in udf_iget, block=%d, partition=%d\n",
rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
+ ret = -EIO;
goto error_out;
}
@@ -2093,6 +2164,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
sb->s_root = d_make_root(inode);
if (!sb->s_root) {
udf_err(sb, "Couldn't allocate root dentry\n");
+ ret = -ENOMEM;
goto error_out;
}
sb->s_maxbytes = MAX_LFS_FILESIZE;
@@ -2113,7 +2185,7 @@ error_out:
kfree(sbi);
sb->s_fs_info = NULL;
- return -EINVAL;
+ return ret;
}
void _udf_err(struct super_block *sb, const char *function,
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 8685d1b..31229e0 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -57,16 +57,13 @@
#define JBD_EXPENSIVE_CHECKING
extern u8 journal_enable_debug;
-#define jbd_debug(n, f, a...) \
- do { \
- if ((n) <= journal_enable_debug) { \
- printk (KERN_DEBUG "(%s, %d): %s: ", \
- __FILE__, __LINE__, __func__); \
- printk (f, ## a); \
- } \
- } while (0)
+void __jbd_debug(int level, const char *file, const char *func,
+ unsigned int line, const char *fmt, ...);
+
+#define jbd_debug(n, fmt, a...) \
+ __jbd_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
#else
-#define jbd_debug(f, a...) /**/
+#define jbd_debug(n, fmt, a...) /**/
#endif
static inline void *jbd_alloc(size_t size, gfp_t flags)
@@ -77,7 +74,7 @@ static inline void *jbd_alloc(size_t size, gfp_t flags)
static inline void jbd_free(void *ptr, size_t size)
{
free_pages((unsigned long)ptr, get_order(size));
-};
+}
#define JFS_MIN_JOURNAL_BLOCKS 1024