summaryrefslogtreecommitdiff
path: root/fs/gfs2
diff options
context:
space:
mode:
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/aops.c4
-rw-r--r--fs/gfs2/bmap.c7
-rw-r--r--fs/gfs2/file.c10
-rw-r--r--fs/gfs2/glock.c84
-rw-r--r--fs/gfs2/glock.h2
-rw-r--r--fs/gfs2/glops.c4
-rw-r--r--fs/gfs2/incore.h41
-rw-r--r--fs/gfs2/inode.c58
-rw-r--r--fs/gfs2/lock_dlm.c8
-rw-r--r--fs/gfs2/main.c19
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/gfs2/quota.c344
-rw-r--r--fs/gfs2/quota.h9
-rw-r--r--fs/gfs2/rgrp.c212
-rw-r--r--fs/gfs2/rgrp.h4
-rw-r--r--fs/gfs2/super.c2
-rw-r--r--fs/gfs2/sys.c2
-rw-r--r--fs/gfs2/util.c20
-rw-r--r--fs/gfs2/util.h2
-rw-r--r--fs/gfs2/xattr.c3
20 files changed, 367 insertions, 470 deletions
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index b7fc035..1f7d805 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -611,14 +611,12 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
if (alloc_required) {
- struct gfs2_alloc_parms ap = { .aflags = 0, };
error = gfs2_quota_lock_check(ip);
if (error)
goto out_unlock;
requested = data_blocks + ind_blocks;
- ap.target = requested;
- error = gfs2_inplace_reserve(ip, &ap);
+ error = gfs2_inplace_reserve(ip, requested, 0);
if (error)
goto out_qunlock;
}
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index fe0500c..62a65fc 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1216,7 +1216,6 @@ static int do_grow(struct inode *inode, u64 size)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- struct gfs2_alloc_parms ap = { .target = 1, };
struct buffer_head *dibh;
int error;
int unstuff = 0;
@@ -1227,7 +1226,7 @@ static int do_grow(struct inode *inode, u64 size)
if (error)
return error;
- error = gfs2_inplace_reserve(ip, &ap);
+ error = gfs2_inplace_reserve(ip, 1, 0);
if (error)
goto do_grow_qunlock;
unstuff = 1;
@@ -1280,7 +1279,6 @@ do_grow_qunlock:
int gfs2_setattr_size(struct inode *inode, u64 newsize)
{
- struct gfs2_inode *ip = GFS2_I(inode);
int ret;
u64 oldsize;
@@ -1296,7 +1294,7 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
inode_dio_wait(inode);
- ret = gfs2_rs_alloc(ip);
+ ret = gfs2_rs_alloc(GFS2_I(inode));
if (ret)
goto out;
@@ -1306,7 +1304,6 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
goto out;
}
- gfs2_rs_deltree(ip->i_res);
ret = do_shrink(inode, oldsize, newsize);
out:
put_write_access(inode);
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index efc078f..0621b46 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -383,7 +383,6 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
struct inode *inode = file_inode(vma->vm_file);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- struct gfs2_alloc_parms ap = { .aflags = 0, };
unsigned long last_index;
u64 pos = page->index << PAGE_CACHE_SHIFT;
unsigned int data_blocks, ind_blocks, rblocks;
@@ -431,8 +430,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret)
goto out_unlock;
gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
- ap.target = data_blocks + ind_blocks;
- ret = gfs2_inplace_reserve(ip, &ap);
+ ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks, 0);
if (ret)
goto out_quota_unlock;
@@ -622,7 +620,7 @@ static int gfs2_release(struct inode *inode, struct file *file)
if (!(file->f_mode & FMODE_WRITE))
return 0;
- gfs2_rs_delete(ip, &inode->i_writecount);
+ gfs2_rs_delete(ip);
return 0;
}
@@ -802,7 +800,6 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
struct inode *inode = file_inode(file);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_inode *ip = GFS2_I(inode);
- struct gfs2_alloc_parms ap = { .aflags = 0, };
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
loff_t bytes, max_bytes;
int error;
@@ -853,8 +850,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
retry:
gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
- ap.target = data_blocks + ind_blocks;
- error = gfs2_inplace_reserve(ip, &ap);
+ error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks, 0);
if (error) {
if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
bytes >>= 1;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index c8420f7..c2f41b4 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -31,7 +31,6 @@
#include <linux/bit_spinlock.h>
#include <linux/percpu.h>
#include <linux/list_sort.h>
-#include <linux/lockref.h>
#include "gfs2.h"
#include "incore.h"
@@ -130,10 +129,10 @@ void gfs2_glock_free(struct gfs2_glock *gl)
*
*/
-static void gfs2_glock_hold(struct gfs2_glock *gl)
+void gfs2_glock_hold(struct gfs2_glock *gl)
{
- GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
- lockref_get(&gl->gl_lockref);
+ GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
+ atomic_inc(&gl->gl_ref);
}
/**
@@ -188,6 +187,20 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
}
/**
+ * gfs2_glock_put_nolock() - Decrement reference count on glock
+ * @gl: The glock to put
+ *
+ * This function should only be used if the caller has its own reference
+ * to the glock, in addition to the one it is dropping.
+ */
+
+void gfs2_glock_put_nolock(struct gfs2_glock *gl)
+{
+ if (atomic_dec_and_test(&gl->gl_ref))
+ GLOCK_BUG_ON(gl, 1);
+}
+
+/**
* gfs2_glock_put() - Decrement reference count on glock
* @gl: The glock to put
*
@@ -198,22 +211,17 @@ void gfs2_glock_put(struct gfs2_glock *gl)
struct gfs2_sbd *sdp = gl->gl_sbd;
struct address_space *mapping = gfs2_glock2aspace(gl);
- if (lockref_put_or_lock(&gl->gl_lockref))
- return;
-
- lockref_mark_dead(&gl->gl_lockref);
-
- spin_lock(&lru_lock);
- __gfs2_glock_remove_from_lru(gl);
- spin_unlock(&lru_lock);
- spin_unlock(&gl->gl_lockref.lock);
- spin_lock_bucket(gl->gl_hash);
- hlist_bl_del_rcu(&gl->gl_list);
- spin_unlock_bucket(gl->gl_hash);
- GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
- GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
- trace_gfs2_glock_put(gl);
- sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
+ if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
+ __gfs2_glock_remove_from_lru(gl);
+ spin_unlock(&lru_lock);
+ spin_lock_bucket(gl->gl_hash);
+ hlist_bl_del_rcu(&gl->gl_list);
+ spin_unlock_bucket(gl->gl_hash);
+ GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
+ GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
+ trace_gfs2_glock_put(gl);
+ sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
+ }
}
/**
@@ -236,7 +244,7 @@ static struct gfs2_glock *search_bucket(unsigned int hash,
continue;
if (gl->gl_sbd != sdp)
continue;
- if (lockref_get_not_dead(&gl->gl_lockref))
+ if (atomic_inc_not_zero(&gl->gl_ref))
return gl;
}
@@ -388,11 +396,10 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
held2 = (new_state != LM_ST_UNLOCKED);
if (held1 != held2) {
- GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
if (held2)
- gl->gl_lockref.count++;
+ gfs2_glock_hold(gl);
else
- gl->gl_lockref.count--;
+ gfs2_glock_put_nolock(gl);
}
if (held1 && held2 && list_empty(&gl->gl_holders))
clear_bit(GLF_QUEUED, &gl->gl_flags);
@@ -619,9 +626,9 @@ out:
out_sched:
clear_bit(GLF_LOCK, &gl->gl_flags);
smp_mb__after_clear_bit();
- gl->gl_lockref.count++;
+ gfs2_glock_hold(gl);
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
- gl->gl_lockref.count--;
+ gfs2_glock_put_nolock(gl);
return;
out_unlock:
@@ -747,7 +754,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_sbd = sdp;
gl->gl_flags = 0;
gl->gl_name = name;
- gl->gl_lockref.count = 1;
+ atomic_set(&gl->gl_ref, 1);
gl->gl_state = LM_ST_UNLOCKED;
gl->gl_target = LM_ST_UNLOCKED;
gl->gl_demote_state = LM_ST_EXCLUSIVE;
@@ -1349,10 +1356,10 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
}
}
- gl->gl_lockref.count++;
- set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
spin_unlock(&gl->gl_spin);
-
+ set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
+ smp_wmb();
+ gfs2_glock_hold(gl);
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put(gl);
}
@@ -1397,19 +1404,15 @@ __acquires(&lru_lock)
while(!list_empty(list)) {
gl = list_entry(list->next, struct gfs2_glock, gl_lru);
list_del_init(&gl->gl_lru);
- if (!spin_trylock(&gl->gl_spin)) {
- list_add(&gl->gl_lru, &lru_list);
- atomic_inc(&lru_count);
- continue;
- }
clear_bit(GLF_LRU, &gl->gl_flags);
+ gfs2_glock_hold(gl);
spin_unlock(&lru_lock);
- gl->gl_lockref.count++;
+ spin_lock(&gl->gl_spin);
if (demote_ok(gl))
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
- gl->gl_lockref.count--;
+ gfs2_glock_put_nolock(gl);
spin_unlock(&gl->gl_spin);
spin_lock(&lru_lock);
}
@@ -1490,7 +1493,7 @@ static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
rcu_read_lock();
hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
- if ((gl->gl_sbd == sdp) && lockref_get_not_dead(&gl->gl_lockref))
+ if ((gl->gl_sbd == sdp) && atomic_inc_not_zero(&gl->gl_ref))
examiner(gl);
}
rcu_read_unlock();
@@ -1743,7 +1746,7 @@ int gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
state2str(gl->gl_demote_state), dtime,
atomic_read(&gl->gl_ail_count),
atomic_read(&gl->gl_revokes),
- (int)gl->gl_lockref.count, gl->gl_hold_time);
+ atomic_read(&gl->gl_ref), gl->gl_hold_time);
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
error = dump_holder(seq, gh);
@@ -1899,8 +1902,7 @@ static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
gi->nhash = 0;
}
/* Skip entries for other sb and dead entries */
- } while (gi->sdp != gi->gl->gl_sbd ||
- __lockref_is_dead(&gi->gl->gl_lockref));
+ } while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
return 0;
}
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 6647d77..69f66e3 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -181,6 +181,8 @@ static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
const struct gfs2_glock_operations *glops,
int create, struct gfs2_glock **glp);
+extern void gfs2_glock_hold(struct gfs2_glock *gl);
+extern void gfs2_glock_put_nolock(struct gfs2_glock *gl);
extern void gfs2_glock_put(struct gfs2_glock *gl);
extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
unsigned flags, struct gfs2_holder *gh);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index db908f6..e2e0a90 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -525,9 +525,9 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
gl->gl_state == LM_ST_SHARED && ip) {
- gl->gl_lockref.count++;
+ gfs2_glock_hold(gl);
if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
- gl->gl_lockref.count--;
+ gfs2_glock_put_nolock(gl);
}
}
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index ba1ea67..26aabd7 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -21,7 +21,6 @@
#include <linux/rbtree.h>
#include <linux/ktime.h>
#include <linux/percpu.h>
-#include <linux/lockref.h>
#define DIO_WAIT 0x00000010
#define DIO_METADATA 0x00000020
@@ -72,7 +71,6 @@ struct gfs2_bitmap {
u32 bi_offset;
u32 bi_start;
u32 bi_len;
- u32 bi_blocks;
};
struct gfs2_rgrpd {
@@ -103,25 +101,19 @@ struct gfs2_rgrpd {
struct gfs2_rbm {
struct gfs2_rgrpd *rgd;
+ struct gfs2_bitmap *bi; /* Bitmap must belong to the rgd */
u32 offset; /* The offset is bitmap relative */
- int bii; /* Bitmap index */
};
-static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm)
-{
- return rbm->rgd->rd_bits + rbm->bii;
-}
-
static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
{
- return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) +
- rbm->offset;
+ return rbm->rgd->rd_data0 + (rbm->bi->bi_start * GFS2_NBBY) + rbm->offset;
}
static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1,
const struct gfs2_rbm *rbm2)
{
- return (rbm1->rgd == rbm2->rgd) && (rbm1->bii == rbm2->bii) &&
+ return (rbm1->rgd == rbm2->rgd) && (rbm1->bi == rbm2->bi) &&
(rbm1->offset == rbm2->offset);
}
@@ -286,20 +278,6 @@ struct gfs2_blkreserv {
unsigned int rs_qa_qd_num;
};
-/*
- * Allocation parameters
- * @target: The number of blocks we'd ideally like to allocate
- * @aflags: The flags (e.g. Orlov flag)
- *
- * The intent is to gradually expand this structure over time in
- * order to give more information, e.g. alignment, min extent size
- * to the allocation code.
- */
-struct gfs2_alloc_parms {
- u32 target;
- u32 aflags;
-};
-
enum {
GLF_LOCK = 1,
GLF_DEMOTE = 3,
@@ -322,9 +300,9 @@ struct gfs2_glock {
struct gfs2_sbd *gl_sbd;
unsigned long gl_flags; /* GLF_... */
struct lm_lockname gl_name;
+ atomic_t gl_ref;
- struct lockref gl_lockref;
-#define gl_spin gl_lockref.lock
+ spinlock_t gl_spin;
/* State fields protected by gl_spin */
unsigned int gl_state:2, /* Current state */
@@ -420,10 +398,11 @@ enum {
struct gfs2_quota_data {
struct list_head qd_list;
- struct kqid qd_id;
- struct lockref qd_lockref;
- struct list_head qd_lru;
+ struct list_head qd_reclaim;
+ atomic_t qd_count;
+
+ struct kqid qd_id;
unsigned long qd_flags; /* QDF_... */
s64 qd_change;
@@ -537,6 +516,7 @@ struct gfs2_tune {
unsigned int gt_logd_secs;
+ unsigned int gt_quota_simul_sync; /* Max quotavals to sync at once */
unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
unsigned int gt_quota_scale_num; /* Numerator */
unsigned int gt_quota_scale_den; /* Denominator */
@@ -714,7 +694,6 @@ struct gfs2_sbd {
struct list_head sd_quota_list;
atomic_t sd_quota_count;
struct mutex sd_quota_mutex;
- struct mutex sd_quota_sync_mutex;
wait_queue_head_t sd_quota_wait;
struct list_head sd_trunc_list;
spinlock_t sd_trunc_lock;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 7119504..ced3257 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -379,7 +379,6 @@ static void munge_mode_uid_gid(const struct gfs2_inode *dip,
static int alloc_dinode(struct gfs2_inode *ip, u32 flags)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_alloc_parms ap = { .target = RES_DINODE, .aflags = flags, };
int error;
int dblocks = 1;
@@ -387,7 +386,7 @@ static int alloc_dinode(struct gfs2_inode *ip, u32 flags)
if (error)
goto out;
- error = gfs2_inplace_reserve(ip, &ap);
+ error = gfs2_inplace_reserve(ip, RES_DINODE, flags);
if (error)
goto out_quota;
@@ -473,7 +472,6 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
struct gfs2_inode *ip, int arq)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
- struct gfs2_alloc_parms ap = { .target = sdp->sd_max_dirres, };
int error;
if (arq) {
@@ -481,7 +479,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
if (error)
goto fail_quota_locks;
- error = gfs2_inplace_reserve(dip, &ap);
+ error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres, 0);
if (error)
goto fail_quota_locks;
@@ -586,17 +584,17 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
if (!IS_ERR(inode)) {
d = d_splice_alias(inode, dentry);
error = 0;
- if (file) {
- if (S_ISREG(inode->i_mode)) {
- WARN_ON(d != NULL);
- error = finish_open(file, dentry, gfs2_open_common, opened);
- } else {
+ if (file && !IS_ERR(d)) {
+ if (d == NULL)
+ d = dentry;
+ if (S_ISREG(inode->i_mode))
+ error = finish_open(file, d, gfs2_open_common, opened);
+ else
error = finish_no_open(file, d);
- }
- } else {
- dput(d);
}
gfs2_glock_dq_uninit(ghs);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
return error;
} else if (error != -ENOENT) {
goto fail_gunlock;
@@ -715,7 +713,7 @@ fail_gunlock2:
fail_free_inode:
if (ip->i_gl)
gfs2_glock_put(ip->i_gl);
- gfs2_rs_delete(ip, NULL);
+ gfs2_rs_delete(ip);
free_inode_nonrcu(inode);
inode = NULL;
fail_gunlock:
@@ -783,10 +781,8 @@ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
error = finish_open(file, dentry, gfs2_open_common, opened);
gfs2_glock_dq_uninit(&gh);
- if (error) {
- dput(d);
+ if (error)
return ERR_PTR(error);
- }
return d;
}
@@ -878,12 +874,11 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
error = 0;
if (alloc_required) {
- struct gfs2_alloc_parms ap = { .target = sdp->sd_max_dirres, };
error = gfs2_quota_lock_check(dip);
if (error)
goto out_gunlock;
- error = gfs2_inplace_reserve(dip, &ap);
+ error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres, 0);
if (error)
goto out_gunlock_q;
@@ -1168,19 +1163,14 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
d = __gfs2_lookup(dir, dentry, file, opened);
if (IS_ERR(d))
return PTR_ERR(d);
- if (d != NULL)
- dentry = d;
- if (dentry->d_inode) {
- if (!(*opened & FILE_OPENED)) {
- if (d == NULL)
- dget(dentry);
- return finish_no_open(file, dentry);
- }
- dput(d);
+ if (d == NULL)
+ d = dentry;
+ if (d->d_inode) {
+ if (!(*opened & FILE_OPENED))
+ return finish_no_open(file, d);
return 0;
}
- BUG_ON(d != NULL);
if (!(flags & O_CREAT))
return -ENOENT;
@@ -1395,12 +1385,11 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
goto out_gunlock;
if (alloc_required) {
- struct gfs2_alloc_parms ap = { .target = sdp->sd_max_dirres, };
error = gfs2_quota_lock_check(ndip);
if (error)
goto out_gunlock;
- error = gfs2_inplace_reserve(ndip, &ap);
+ error = gfs2_inplace_reserve(ndip, sdp->sd_max_dirres, 0);
if (error)
goto out_gunlock_q;
@@ -1517,6 +1506,13 @@ out:
return NULL;
}
+static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+{
+ char *s = nd_get_link(nd);
+ if (!IS_ERR(s))
+ kfree(s);
+}
+
/**
* gfs2_permission -
* @inode: The inode
@@ -1868,7 +1864,7 @@ const struct inode_operations gfs2_dir_iops = {
const struct inode_operations gfs2_symlink_iops = {
.readlink = generic_readlink,
.follow_link = gfs2_follow_link,
- .put_link = kfree_put_link,
+ .put_link = gfs2_put_link,
.permission = gfs2_permission,
.setattr = gfs2_setattr,
.getattr = gfs2_getattr,
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 2a6ba06..c8423d6 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -466,19 +466,19 @@ static void gdlm_cancel(struct gfs2_glock *gl)
static void control_lvb_read(struct lm_lockstruct *ls, uint32_t *lvb_gen,
char *lvb_bits)
{
- __le32 gen;
+ uint32_t gen;
memcpy(lvb_bits, ls->ls_control_lvb, GDLM_LVB_SIZE);
- memcpy(&gen, lvb_bits, sizeof(__le32));
+ memcpy(&gen, lvb_bits, sizeof(uint32_t));
*lvb_gen = le32_to_cpu(gen);
}
static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen,
char *lvb_bits)
{
- __le32 gen;
+ uint32_t gen;
memcpy(ls->ls_control_lvb, lvb_bits, GDLM_LVB_SIZE);
gen = cpu_to_le32(lvb_gen);
- memcpy(ls->ls_control_lvb, &gen, sizeof(__le32));
+ memcpy(ls->ls_control_lvb, &gen, sizeof(uint32_t));
}
static int all_jid_bits_clear(char *lvb)
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 0650db2..351586e 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -31,6 +31,12 @@
struct workqueue_struct *gfs2_control_wq;
+static struct shrinker qd_shrinker = {
+ .count_objects = gfs2_qd_shrink_count,
+ .scan_objects = gfs2_qd_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
static void gfs2_init_inode_once(void *foo)
{
struct gfs2_inode *ip = foo;
@@ -81,10 +87,6 @@ static int __init init_gfs2_fs(void)
if (error)
return error;
- error = list_lru_init(&gfs2_qd_lru);
- if (error)
- goto fail_lru;
-
error = gfs2_glock_init();
if (error)
goto fail;
@@ -137,7 +139,7 @@ static int __init init_gfs2_fs(void)
if (!gfs2_rsrv_cachep)
goto fail;
- register_shrinker(&gfs2_qd_shrinker);
+ register_shrinker(&qd_shrinker);
error = register_filesystem(&gfs2_fs_type);
if (error)
@@ -177,9 +179,7 @@ fail_wq:
fail_unregister:
unregister_filesystem(&gfs2_fs_type);
fail:
- list_lru_destroy(&gfs2_qd_lru);
-fail_lru:
- unregister_shrinker(&gfs2_qd_shrinker);
+ unregister_shrinker(&qd_shrinker);
gfs2_glock_exit();
if (gfs2_rsrv_cachep)
@@ -214,14 +214,13 @@ fail_lru:
static void __exit exit_gfs2_fs(void)
{
- unregister_shrinker(&gfs2_qd_shrinker);
+ unregister_shrinker(&qd_shrinker);
gfs2_glock_exit();
gfs2_unregister_debugfs();
unregister_filesystem(&gfs2_fs_type);
unregister_filesystem(&gfs2meta_fs_type);
destroy_workqueue(gfs_recovery_wq);
destroy_workqueue(gfs2_control_wq);
- list_lru_destroy(&gfs2_qd_lru);
rcu_barrier();
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 82303b4..19ff5e8 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -51,6 +51,7 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
{
spin_lock_init(&gt->gt_spin);
+ gt->gt_quota_simul_sync = 64;
gt->gt_quota_warn_period = 10;
gt->gt_quota_scale_num = 1;
gt->gt_quota_scale_den = 1;
@@ -93,7 +94,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
INIT_LIST_HEAD(&sdp->sd_quota_list);
mutex_init(&sdp->sd_quota_mutex);
- mutex_init(&sdp->sd_quota_sync_mutex);
init_waitqueue_head(&sdp->sd_quota_wait);
INIT_LIST_HEAD(&sdp->sd_trunc_list);
spin_lock_init(&sdp->sd_trunc_lock);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 98236d0..db44135 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -50,8 +50,6 @@
#include <linux/freezer.h>
#include <linux/quota.h>
#include <linux/dqblk_xfs.h>
-#include <linux/lockref.h>
-#include <linux/list_lru.h>
#include "gfs2.h"
#include "incore.h"
@@ -73,25 +71,29 @@ struct gfs2_quota_change_host {
struct kqid qc_id;
};
-/* Lock order: qd_lock -> qd->lockref.lock -> lru lock */
-static DEFINE_SPINLOCK(qd_lock);
-struct list_lru gfs2_qd_lru;
+static LIST_HEAD(qd_lru_list);
+static atomic_t qd_lru_count = ATOMIC_INIT(0);
+static DEFINE_SPINLOCK(qd_lru_lock);
-static void gfs2_qd_dispose(struct list_head *list)
+unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
{
struct gfs2_quota_data *qd;
struct gfs2_sbd *sdp;
+ int nr_to_scan = sc->nr_to_scan;
+ long freed = 0;
- while (!list_empty(list)) {
- qd = list_entry(list->next, struct gfs2_quota_data, qd_lru);
- sdp = qd->qd_gl->gl_sbd;
+ if (!(sc->gfp_mask & __GFP_FS))
+ return SHRINK_STOP;
- list_del(&qd->qd_lru);
+ spin_lock(&qd_lru_lock);
+ while (nr_to_scan && !list_empty(&qd_lru_list)) {
+ qd = list_entry(qd_lru_list.next,
+ struct gfs2_quota_data, qd_reclaim);
+ sdp = qd->qd_gl->gl_sbd;
/* Free from the filesystem-specific list */
- spin_lock(&qd_lock);
list_del(&qd->qd_list);
- spin_unlock(&qd_lock);
gfs2_assert_warn(sdp, !qd->qd_change);
gfs2_assert_warn(sdp, !qd->qd_slot_count);
@@ -101,59 +103,24 @@ static void gfs2_qd_dispose(struct list_head *list)
atomic_dec(&sdp->sd_quota_count);
/* Delete it from the common reclaim list */
+ list_del_init(&qd->qd_reclaim);
+ atomic_dec(&qd_lru_count);
+ spin_unlock(&qd_lru_lock);
kmem_cache_free(gfs2_quotad_cachep, qd);
+ spin_lock(&qd_lru_lock);
+ nr_to_scan--;
+ freed++;
}
-}
-
-
-static enum lru_status gfs2_qd_isolate(struct list_head *item, spinlock_t *lock, void *arg)
-{
- struct list_head *dispose = arg;
- struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
-
- if (!spin_trylock(&qd->qd_lockref.lock))
- return LRU_SKIP;
-
- if (qd->qd_lockref.count == 0) {
- lockref_mark_dead(&qd->qd_lockref);
- list_move(&qd->qd_lru, dispose);
- }
-
- spin_unlock(&qd->qd_lockref.lock);
- return LRU_REMOVED;
-}
-
-static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
- struct shrink_control *sc)
-{
- LIST_HEAD(dispose);
- unsigned long freed;
-
- if (!(sc->gfp_mask & __GFP_FS))
- return SHRINK_STOP;
-
- freed = list_lru_walk_node(&gfs2_qd_lru, sc->nid, gfs2_qd_isolate,
- &dispose, &sc->nr_to_scan);
-
- gfs2_qd_dispose(&dispose);
-
+ spin_unlock(&qd_lru_lock);
return freed;
}
-static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
- struct shrink_control *sc)
+unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
+ struct shrink_control *sc)
{
- return vfs_pressure_ratio(list_lru_count_node(&gfs2_qd_lru, sc->nid));
+ return vfs_pressure_ratio(atomic_read(&qd_lru_count));
}
-struct shrinker gfs2_qd_shrinker = {
- .count_objects = gfs2_qd_shrink_count,
- .scan_objects = gfs2_qd_shrink_scan,
- .seeks = DEFAULT_SEEKS,
- .flags = SHRINKER_NUMA_AWARE,
-};
-
-
static u64 qd2index(struct gfs2_quota_data *qd)
{
struct kqid qid = qd->qd_id;
@@ -181,11 +148,10 @@ static int qd_alloc(struct gfs2_sbd *sdp, struct kqid qid,
if (!qd)
return -ENOMEM;
- qd->qd_lockref.count = 1;
- spin_lock_init(&qd->qd_lockref.lock);
+ atomic_set(&qd->qd_count, 1);
qd->qd_id = qid;
qd->qd_slot = -1;
- INIT_LIST_HEAD(&qd->qd_lru);
+ INIT_LIST_HEAD(&qd->qd_reclaim);
error = gfs2_glock_get(sdp, qd2index(qd),
&gfs2_quota_glops, CREATE, &qd->qd_gl);
@@ -211,11 +177,16 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
for (;;) {
found = 0;
- spin_lock(&qd_lock);
+ spin_lock(&qd_lru_lock);
list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
- if (qid_eq(qd->qd_id, qid) &&
- lockref_get_not_dead(&qd->qd_lockref)) {
- list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
+ if (qid_eq(qd->qd_id, qid)) {
+ if (!atomic_read(&qd->qd_count) &&
+ !list_empty(&qd->qd_reclaim)) {
+ /* Remove it from reclaim list */
+ list_del_init(&qd->qd_reclaim);
+ atomic_dec(&qd_lru_count);
+ }
+ atomic_inc(&qd->qd_count);
found = 1;
break;
}
@@ -231,7 +202,7 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
new_qd = NULL;
}
- spin_unlock(&qd_lock);
+ spin_unlock(&qd_lru_lock);
if (qd) {
if (new_qd) {
@@ -251,19 +222,18 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
static void qd_hold(struct gfs2_quota_data *qd)
{
struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
- gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
- lockref_get(&qd->qd_lockref);
+ gfs2_assert(sdp, atomic_read(&qd->qd_count));
+ atomic_inc(&qd->qd_count);
}
static void qd_put(struct gfs2_quota_data *qd)
{
- if (lockref_put_or_lock(&qd->qd_lockref))
- return;
-
- qd->qd_lockref.count = 0;
- list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
- spin_unlock(&qd->qd_lockref.lock);
-
+ if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) {
+ /* Add to the reclaim list */
+ list_add_tail(&qd->qd_reclaim, &qd_lru_list);
+ atomic_inc(&qd_lru_count);
+ spin_unlock(&qd_lru_lock);
+ }
}
static int slot_get(struct gfs2_quota_data *qd)
@@ -272,10 +242,10 @@ static int slot_get(struct gfs2_quota_data *qd)
unsigned int c, o = 0, b;
unsigned char byte = 0;
- spin_lock(&qd_lock);
+ spin_lock(&qd_lru_lock);
if (qd->qd_slot_count++) {
- spin_unlock(&qd_lock);
+ spin_unlock(&qd_lru_lock);
return 0;
}
@@ -299,13 +269,13 @@ found:
sdp->sd_quota_bitmap[c][o] |= 1 << b;
- spin_unlock(&qd_lock);
+ spin_unlock(&qd_lru_lock);
return 0;
fail:
qd->qd_slot_count--;
- spin_unlock(&qd_lock);
+ spin_unlock(&qd_lru_lock);
return -ENOSPC;
}
@@ -313,43 +283,23 @@ static void slot_hold(struct gfs2_quota_data *qd)
{
struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
- spin_lock(&qd_lock);
+ spin_lock(&qd_lru_lock);
gfs2_assert(sdp, qd->qd_slot_count);
qd->qd_slot_count++;
- spin_unlock(&qd_lock);
-}
-
-static void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
- unsigned int bit, int new_value)
-{
- unsigned int c, o, b = bit;
- int old_value;
-
- c = b / (8 * PAGE_SIZE);
- b %= 8 * PAGE_SIZE;
- o = b / 8;
- b %= 8;
-
- old_value = (bitmap[c][o] & (1 << b));
- gfs2_assert_withdraw(sdp, !old_value != !new_value);
-
- if (new_value)
- bitmap[c][o] |= 1 << b;
- else
- bitmap[c][o] &= ~(1 << b);
+ spin_unlock(&qd_lru_lock);
}
static void slot_put(struct gfs2_quota_data *qd)
{
struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
- spin_lock(&qd_lock);
+ spin_lock(&qd_lru_lock);
gfs2_assert(sdp, qd->qd_slot_count);
if (!--qd->qd_slot_count) {
gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
qd->qd_slot = -1;
}
- spin_unlock(&qd_lock);
+ spin_unlock(&qd_lru_lock);
}
static int bh_get(struct gfs2_quota_data *qd)
@@ -413,25 +363,6 @@ static void bh_put(struct gfs2_quota_data *qd)
mutex_unlock(&sdp->sd_quota_mutex);
}
-static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
- u64 *sync_gen)
-{
- if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
- !test_bit(QDF_CHANGE, &qd->qd_flags) ||
- (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
- return 0;
-
- if (!lockref_get_not_dead(&qd->qd_lockref))
- return 0;
-
- list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
- set_bit(QDF_LOCKED, &qd->qd_flags);
- qd->qd_change_sync = qd->qd_change;
- gfs2_assert_warn(sdp, qd->qd_slot_count);
- qd->qd_slot_count++;
- return 1;
-}
-
static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
{
struct gfs2_quota_data *qd = NULL;
@@ -443,18 +374,31 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
if (sdp->sd_vfs->s_flags & MS_RDONLY)
return 0;
- spin_lock(&qd_lock);
+ spin_lock(&qd_lru_lock);
list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
- found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
- if (found)
- break;
+ if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
+ !test_bit(QDF_CHANGE, &qd->qd_flags) ||
+ qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
+ continue;
+
+ list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
+
+ set_bit(QDF_LOCKED, &qd->qd_flags);
+ gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
+ atomic_inc(&qd->qd_count);
+ qd->qd_change_sync = qd->qd_change;
+ gfs2_assert_warn(sdp, qd->qd_slot_count);
+ qd->qd_slot_count++;
+ found = 1;
+
+ break;
}
if (!found)
qd = NULL;
- spin_unlock(&qd_lock);
+ spin_unlock(&qd_lru_lock);
if (qd) {
gfs2_assert_warn(sdp, qd->qd_change_sync);
@@ -472,6 +416,43 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
return 0;
}
+static int qd_trylock(struct gfs2_quota_data *qd)
+{
+ struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
+
+ if (sdp->sd_vfs->s_flags & MS_RDONLY)
+ return 0;
+
+ spin_lock(&qd_lru_lock);
+
+ if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
+ !test_bit(QDF_CHANGE, &qd->qd_flags)) {
+ spin_unlock(&qd_lru_lock);
+ return 0;
+ }
+
+ list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
+
+ set_bit(QDF_LOCKED, &qd->qd_flags);
+ gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
+ atomic_inc(&qd->qd_count);
+ qd->qd_change_sync = qd->qd_change;
+ gfs2_assert_warn(sdp, qd->qd_slot_count);
+ qd->qd_slot_count++;
+
+ spin_unlock(&qd_lru_lock);
+
+ gfs2_assert_warn(sdp, qd->qd_change_sync);
+ if (bh_get(qd)) {
+ clear_bit(QDF_LOCKED, &qd->qd_flags);
+ slot_put(qd);
+ qd_put(qd);
+ return 0;
+ }
+
+ return 1;
+}
+
static void qd_unlock(struct gfs2_quota_data *qd)
{
gfs2_assert_warn(qd->qd_gl->gl_sbd,
@@ -621,9 +602,9 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
x = be64_to_cpu(qc->qc_change) + change;
qc->qc_change = cpu_to_be64(x);
- spin_lock(&qd_lock);
+ spin_lock(&qd_lru_lock);
qd->qd_change = x;
- spin_unlock(&qd_lock);
+ spin_unlock(&qd_lru_lock);
if (!x) {
gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
@@ -667,7 +648,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
struct buffer_head *bh;
struct page *page;
void *kaddr, *ptr;
- struct gfs2_quota q;
+ struct gfs2_quota q, *qp;
int err, nbytes;
u64 size;
@@ -683,25 +664,28 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
return err;
err = -EIO;
- be64_add_cpu(&q.qu_value, change);
- qd->qd_qb.qb_value = q.qu_value;
+ qp = &q;
+ qp->qu_value = be64_to_cpu(qp->qu_value);
+ qp->qu_value += change;
+ qp->qu_value = cpu_to_be64(qp->qu_value);
+ qd->qd_qb.qb_value = qp->qu_value;
if (fdq) {
if (fdq->d_fieldmask & FS_DQ_BSOFT) {
- q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
- qd->qd_qb.qb_warn = q.qu_warn;
+ qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
+ qd->qd_qb.qb_warn = qp->qu_warn;
}
if (fdq->d_fieldmask & FS_DQ_BHARD) {
- q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
- qd->qd_qb.qb_limit = q.qu_limit;
+ qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
+ qd->qd_qb.qb_limit = qp->qu_limit;
}
if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
- q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
- qd->qd_qb.qb_value = q.qu_value;
+ qp->qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
+ qd->qd_qb.qb_value = qp->qu_value;
}
}
/* Write the quota into the quota file on disk */
- ptr = &q;
+ ptr = qp;
nbytes = sizeof(struct gfs2_quota);
get_a_page:
page = find_or_create_page(mapping, index, GFP_NOFS);
@@ -779,7 +763,6 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
{
struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
- struct gfs2_alloc_parms ap = { .aflags = 0, };
unsigned int data_blocks, ind_blocks;
struct gfs2_holder *ghs, i_gh;
unsigned int qx, x;
@@ -832,8 +815,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
reserved = 1 + (nalloc * (data_blocks + ind_blocks));
- ap.target = reserved;
- error = gfs2_inplace_reserve(ip, &ap);
+ error = gfs2_inplace_reserve(ip, reserved, 0);
if (error)
goto out_alloc;
@@ -992,9 +974,9 @@ static int need_sync(struct gfs2_quota_data *qd)
if (!qd->qd_qb.qb_limit)
return 0;
- spin_lock(&qd_lock);
+ spin_lock(&qd_lru_lock);
value = qd->qd_change;
- spin_unlock(&qd_lock);
+ spin_unlock(&qd_lru_lock);
spin_lock(&gt->gt_spin);
num = gt->gt_quota_scale_num;
@@ -1019,11 +1001,9 @@ static int need_sync(struct gfs2_quota_data *qd)
void gfs2_quota_unlock(struct gfs2_inode *ip)
{
- struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_quota_data *qda[4];
unsigned int count = 0;
unsigned int x;
- int found;
if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
goto out;
@@ -1036,25 +1016,9 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
sync = need_sync(qd);
gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
- if (!sync)
- continue;
-
- spin_lock(&qd_lock);
- found = qd_check_sync(sdp, qd, NULL);
- spin_unlock(&qd_lock);
-
- if (!found)
- continue;
- gfs2_assert_warn(sdp, qd->qd_change_sync);
- if (bh_get(qd)) {
- clear_bit(QDF_LOCKED, &qd->qd_flags);
- slot_put(qd);
- qd_put(qd);
- continue;
- }
-
- qda[count++] = qd;
+ if (sync && qd_trylock(qd))
+ qda[count++] = qd;
}
if (count) {
@@ -1103,9 +1067,9 @@ int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
continue;
value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
- spin_lock(&qd_lock);
+ spin_lock(&qd_lru_lock);
value += qd->qd_change;
- spin_unlock(&qd_lock);
+ spin_unlock(&qd_lru_lock);
if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
print_message(qd, "exceeded");
@@ -1154,18 +1118,17 @@ int gfs2_quota_sync(struct super_block *sb, int type)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_quota_data **qda;
- unsigned int max_qd = PAGE_SIZE/sizeof(struct gfs2_holder);
+ unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
unsigned int num_qd;
unsigned int x;
int error = 0;
+ sdp->sd_quota_sync_gen++;
+
qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
if (!qda)
return -ENOMEM;
- mutex_lock(&sdp->sd_quota_sync_mutex);
- sdp->sd_quota_sync_gen++;
-
do {
num_qd = 0;
@@ -1190,7 +1153,6 @@ int gfs2_quota_sync(struct super_block *sb, int type)
}
} while (!error && num_qd == max_qd);
- mutex_unlock(&sdp->sd_quota_sync_mutex);
kfree(qda);
return error;
@@ -1296,11 +1258,11 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
qd->qd_slot = slot;
qd->qd_slot_count = 1;
- spin_lock(&qd_lock);
+ spin_lock(&qd_lru_lock);
gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
list_add(&qd->qd_list, &sdp->sd_quota_list);
atomic_inc(&sdp->sd_quota_count);
- spin_unlock(&qd_lock);
+ spin_unlock(&qd_lru_lock);
found++;
}
@@ -1326,34 +1288,30 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
struct gfs2_quota_data *qd;
unsigned int x;
- spin_lock(&qd_lock);
+ spin_lock(&qd_lru_lock);
while (!list_empty(head)) {
qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
- /*
- * To be removed in due course... we should be able to
- * ensure that all refs to the qd have done by this point
- * so that this rather odd test is not required
- */
- spin_lock(&qd->qd_lockref.lock);
- if (qd->qd_lockref.count > 1 ||
- (qd->qd_lockref.count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
- spin_unlock(&qd->qd_lockref.lock);
+ if (atomic_read(&qd->qd_count) > 1 ||
+ (atomic_read(&qd->qd_count) &&
+ !test_bit(QDF_CHANGE, &qd->qd_flags))) {
list_move(&qd->qd_list, head);
- spin_unlock(&qd_lock);
+ spin_unlock(&qd_lru_lock);
schedule();
- spin_lock(&qd_lock);
+ spin_lock(&qd_lru_lock);
continue;
}
- spin_unlock(&qd->qd_lockref.lock);
list_del(&qd->qd_list);
/* Also remove if this qd exists in the reclaim list */
- list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
+ if (!list_empty(&qd->qd_reclaim)) {
+ list_del_init(&qd->qd_reclaim);
+ atomic_dec(&qd_lru_count);
+ }
atomic_dec(&sdp->sd_quota_count);
- spin_unlock(&qd_lock);
+ spin_unlock(&qd_lru_lock);
- if (!qd->qd_lockref.count) {
+ if (!atomic_read(&qd->qd_count)) {
gfs2_assert_warn(sdp, !qd->qd_change);
gfs2_assert_warn(sdp, !qd->qd_slot_count);
} else
@@ -1363,9 +1321,9 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
gfs2_glock_put(qd->qd_gl);
kmem_cache_free(gfs2_quotad_cachep, qd);
- spin_lock(&qd_lock);
+ spin_lock(&qd_lru_lock);
}
- spin_unlock(&qd_lock);
+ spin_unlock(&qd_lru_lock);
gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
@@ -1504,7 +1462,7 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
}
fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
- fqs->qs_incoredqs = list_lru_count(&gfs2_qd_lru);
+ fqs->qs_incoredqs = atomic_read(&qd_lru_count);
return 0;
}
@@ -1615,12 +1573,10 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
if (gfs2_is_stuffed(ip))
alloc_required = 1;
if (alloc_required) {
- struct gfs2_alloc_parms ap = { .aflags = 0, };
gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
&data_blocks, &ind_blocks);
blocks = 1 + data_blocks + ind_blocks;
- ap.target = blocks;
- error = gfs2_inplace_reserve(ip, &ap);
+ error = gfs2_inplace_reserve(ip, blocks, 0);
if (error)
goto out_i;
blocks += gfs2_rg_blocks(ip, blocks);
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index 96e4f34..0f64d9d 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -10,10 +10,9 @@
#ifndef __QUOTA_DOT_H__
#define __QUOTA_DOT_H__
-#include <linux/list_lru.h>
-
struct gfs2_inode;
struct gfs2_sbd;
+struct shrink_control;
#define NO_UID_QUOTA_CHANGE INVALID_UID
#define NO_GID_QUOTA_CHANGE INVALID_GID
@@ -54,8 +53,10 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
return ret;
}
+extern unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
+ struct shrink_control *sc);
+extern unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
+ struct shrink_control *sc);
extern const struct quotactl_ops gfs2_quotactl_ops;
-extern struct shrinker gfs2_qd_shrinker;
-extern struct list_lru gfs2_qd_lru;
#endif /* __QUOTA_DOT_H__ */
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index c8d6161..6931743 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -81,12 +81,11 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
unsigned char new_state)
{
unsigned char *byte1, *byte2, *end, cur_state;
- struct gfs2_bitmap *bi = rbm_bi(rbm);
- unsigned int buflen = bi->bi_len;
+ unsigned int buflen = rbm->bi->bi_len;
const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
- byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
- end = bi->bi_bh->b_data + bi->bi_offset + buflen;
+ byte1 = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY);
+ end = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + buflen;
BUG_ON(byte1 >= end);
@@ -96,17 +95,18 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
printk(KERN_WARNING "GFS2: buf_blk = 0x%x old_state=%d, "
"new_state=%d\n", rbm->offset, cur_state, new_state);
printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%x\n",
- (unsigned long long)rbm->rgd->rd_addr, bi->bi_start);
+ (unsigned long long)rbm->rgd->rd_addr,
+ rbm->bi->bi_start);
printk(KERN_WARNING "GFS2: bi_offset=0x%x bi_len=0x%x\n",
- bi->bi_offset, bi->bi_len);
+ rbm->bi->bi_offset, rbm->bi->bi_len);
dump_stack();
gfs2_consist_rgrpd(rbm->rgd);
return;
}
*byte1 ^= (cur_state ^ new_state) << bit;
- if (do_clone && bi->bi_clone) {
- byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
+ if (do_clone && rbm->bi->bi_clone) {
+ byte2 = rbm->bi->bi_clone + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY);
cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
*byte2 ^= (cur_state ^ new_state) << bit;
}
@@ -121,8 +121,7 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
{
- struct gfs2_bitmap *bi = rbm_bi(rbm);
- const u8 *buffer = bi->bi_bh->b_data + bi->bi_offset;
+ const u8 *buffer = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset;
const u8 *byte;
unsigned int bit;
@@ -253,53 +252,29 @@ static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
{
u64 rblock = block - rbm->rgd->rd_data0;
+ u32 x;
if (WARN_ON_ONCE(rblock > UINT_MAX))
return -EINVAL;
if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
return -E2BIG;
- rbm->bii = 0;
+ rbm->bi = rbm->rgd->rd_bits;
rbm->offset = (u32)(rblock);
/* Check if the block is within the first block */
- if (rbm->offset < rbm_bi(rbm)->bi_blocks)
+ if (rbm->offset < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY)
return 0;
/* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
rbm->offset += (sizeof(struct gfs2_rgrp) -
sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
- rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
- rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
+ x = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
+ rbm->offset -= x * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
+ rbm->bi += x;
return 0;
}
/**
- * gfs2_rbm_incr - increment an rbm structure
- * @rbm: The rbm with rgd already set correctly
- *
- * This function takes an existing rbm structure and increments it to the next
- * viable block offset.
- *
- * Returns: If incrementing the offset would cause the rbm to go past the
- * end of the rgrp, true is returned, otherwise false.
- *
- */
-
-static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
-{
- if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */
- rbm->offset++;
- return false;
- }
- if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
- return true;
-
- rbm->offset = 0;
- rbm->bii++;
- return false;
-}
-
-/**
* gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
* @rbm: Position to search (value/result)
* @n_unaligned: Number of unaligned blocks to check
@@ -310,6 +285,7 @@ static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
{
+ u64 block;
u32 n;
u8 res;
@@ -320,7 +296,8 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le
(*len)--;
if (*len == 0)
return true;
- if (gfs2_rbm_incr(rbm))
+ block = gfs2_rbm_to_block(rbm);
+ if (gfs2_rbm_from_block(rbm, block + 1))
return true;
}
@@ -351,7 +328,6 @@ static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
u32 chunk_size;
u8 *ptr, *start, *end;
u64 block;
- struct gfs2_bitmap *bi;
if (n_unaligned &&
gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
@@ -360,12 +336,11 @@ static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
n_unaligned = len & 3;
/* Start is now byte aligned */
while (len > 3) {
- bi = rbm_bi(&rbm);
- start = bi->bi_bh->b_data;
- if (bi->bi_clone)
- start = bi->bi_clone;
- end = start + bi->bi_bh->b_size;
- start += bi->bi_offset;
+ start = rbm.bi->bi_bh->b_data;
+ if (rbm.bi->bi_clone)
+ start = rbm.bi->bi_clone;
+ end = start + rbm.bi->bi_bh->b_size;
+ start += rbm.bi->bi_offset;
BUG_ON(rbm.offset & 3);
start += (rbm.offset / GFS2_NBBY);
bytes = min_t(u32, len / GFS2_NBBY, (end - start));
@@ -630,13 +605,11 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
RB_CLEAR_NODE(&rs->rs_node);
if (rs->rs_free) {
- struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm);
-
/* return reserved blocks to the rgrp */
BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
rs->rs_free = 0;
- clear_bit(GBF_FULL, &bi->bi_flags);
+ clear_bit(GBF_FULL, &rs->rs_rbm.bi->bi_flags);
smp_mb__after_clear_bit();
}
}
@@ -661,13 +634,14 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
/**
* gfs2_rs_delete - delete a multi-block reservation
* @ip: The inode for this reservation
- * @wcount: The inode's write count, or NULL
*
*/
-void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount)
+void gfs2_rs_delete(struct gfs2_inode *ip)
{
+ struct inode *inode = &ip->i_inode;
+
down_write(&ip->i_rw_mutex);
- if (ip->i_res && ((wcount == NULL) || (atomic_read(wcount) <= 1))) {
+ if (ip->i_res && atomic_read(&inode->i_writecount) <= 1) {
gfs2_rs_deltree(ip->i_res);
BUG_ON(ip->i_res->rs_free);
kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
@@ -769,21 +743,18 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd)
bi->bi_offset = sizeof(struct gfs2_rgrp);
bi->bi_start = 0;
bi->bi_len = bytes;
- bi->bi_blocks = bytes * GFS2_NBBY;
/* header block */
} else if (x == 0) {
bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
bi->bi_offset = sizeof(struct gfs2_rgrp);
bi->bi_start = 0;
bi->bi_len = bytes;
- bi->bi_blocks = bytes * GFS2_NBBY;
/* last block */
} else if (x + 1 == length) {
bytes = bytes_left;
bi->bi_offset = sizeof(struct gfs2_meta_header);
bi->bi_start = rgd->rd_bitbytes - bytes_left;
bi->bi_len = bytes;
- bi->bi_blocks = bytes * GFS2_NBBY;
/* other blocks */
} else {
bytes = sdp->sd_sb.sb_bsize -
@@ -791,7 +762,6 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd)
bi->bi_offset = sizeof(struct gfs2_meta_header);
bi->bi_start = rgd->rd_bitbytes - bytes_left;
bi->bi_len = bytes;
- bi->bi_blocks = bytes * GFS2_NBBY;
}
bytes_left -= bytes;
@@ -1127,7 +1097,7 @@ int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
rgd->rd_free_clone = rgd->rd_free;
}
- if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
+ if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
rgd->rd_bits[0].bi_bh->b_data);
@@ -1161,7 +1131,7 @@ int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
if (rgd->rd_flags & GFS2_RDF_UPTODATE)
return 0;
- if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
+ if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
return gfs2_rgrp_bh_get(rgd);
rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
@@ -1422,12 +1392,12 @@ static void rs_insert(struct gfs2_inode *ip)
* rg_mblk_search - find a group of multiple free blocks to form a reservation
* @rgd: the resource group descriptor
* @ip: pointer to the inode for which we're reserving blocks
- * @ap: the allocation parameters
+ * @requested: number of blocks required for this allocation
*
*/
static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
- const struct gfs2_alloc_parms *ap)
+ unsigned requested)
{
struct gfs2_rbm rbm = { .rgd = rgd, };
u64 goal;
@@ -1440,7 +1410,7 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
if (S_ISDIR(inode->i_mode))
extlen = 1;
else {
- extlen = max_t(u32, atomic_read(&rs->rs_sizehint), ap->target);
+ extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested);
extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
}
if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
@@ -1584,14 +1554,14 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
const struct gfs2_inode *ip, bool nowrap)
{
struct buffer_head *bh;
- int initial_bii;
+ struct gfs2_bitmap *initial_bi;
u32 initial_offset;
u32 offset;
u8 *buffer;
+ int index;
int n = 0;
int iters = rbm->rgd->rd_length;
int ret;
- struct gfs2_bitmap *bi;
/* If we are not starting at the beginning of a bitmap, then we
* need to add one to the bitmap count to ensure that we search
@@ -1601,53 +1571,52 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
iters++;
while(1) {
- bi = rbm_bi(rbm);
- if (test_bit(GBF_FULL, &bi->bi_flags) &&
+ if (test_bit(GBF_FULL, &rbm->bi->bi_flags) &&
(state == GFS2_BLKST_FREE))
goto next_bitmap;
- bh = bi->bi_bh;
- buffer = bh->b_data + bi->bi_offset;
+ bh = rbm->bi->bi_bh;
+ buffer = bh->b_data + rbm->bi->bi_offset;
WARN_ON(!buffer_uptodate(bh));
- if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
- buffer = bi->bi_clone + bi->bi_offset;
+ if (state != GFS2_BLKST_UNLINKED && rbm->bi->bi_clone)
+ buffer = rbm->bi->bi_clone + rbm->bi->bi_offset;
initial_offset = rbm->offset;
- offset = gfs2_bitfit(buffer, bi->bi_len, rbm->offset, state);
+ offset = gfs2_bitfit(buffer, rbm->bi->bi_len, rbm->offset, state);
if (offset == BFITNOENT)
goto bitmap_full;
rbm->offset = offset;
if (ip == NULL)
return 0;
- initial_bii = rbm->bii;
+ initial_bi = rbm->bi;
ret = gfs2_reservation_check_and_update(rbm, ip, minext);
if (ret == 0)
return 0;
if (ret > 0) {
- n += (rbm->bii - initial_bii);
+ n += (rbm->bi - initial_bi);
goto next_iter;
}
if (ret == -E2BIG) {
- rbm->bii = 0;
+ index = 0;
rbm->offset = 0;
- n += (rbm->bii - initial_bii);
+ n += (rbm->bi - initial_bi);
goto res_covered_end_of_rgrp;
}
return ret;
bitmap_full: /* Mark bitmap as full and fall through */
- if ((state == GFS2_BLKST_FREE) && initial_offset == 0) {
- struct gfs2_bitmap *bi = rbm_bi(rbm);
- set_bit(GBF_FULL, &bi->bi_flags);
- }
+ if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
+ set_bit(GBF_FULL, &rbm->bi->bi_flags);
next_bitmap: /* Find next bitmap in the rgrp */
rbm->offset = 0;
- rbm->bii++;
- if (rbm->bii == rbm->rgd->rd_length)
- rbm->bii = 0;
+ index = rbm->bi - rbm->rgd->rd_bits;
+ index++;
+ if (index == rbm->rgd->rd_length)
+ index = 0;
res_covered_end_of_rgrp:
- if ((rbm->bii == 0) && nowrap)
+ rbm->bi = &rbm->rgd->rd_bits[index];
+ if ((index == 0) && nowrap)
break;
n++;
next_iter:
@@ -1676,7 +1645,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
struct gfs2_inode *ip;
int error;
int found = 0;
- struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
+ struct gfs2_rbm rbm = { .rgd = rgd, .bi = rgd->rd_bits, .offset = 0 };
while (1) {
down_write(&sdp->sd_log_flush_lock);
@@ -1831,12 +1800,12 @@ static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *b
/**
* gfs2_inplace_reserve - Reserve space in the filesystem
* @ip: the inode to reserve space for
- * @ap: the allocation parameters
+ * @requested: the number of blocks to be reserved
*
* Returns: errno
*/
-int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *ap)
+int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *begin = NULL;
@@ -1848,16 +1817,17 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *a
if (sdp->sd_args.ar_rgrplvb)
flags |= GL_SKIP;
- if (gfs2_assert_warn(sdp, ap->target))
+ if (gfs2_assert_warn(sdp, requested))
return -EINVAL;
if (gfs2_rs_active(rs)) {
begin = rs->rs_rbm.rgd;
+ flags = 0; /* Yoda: Do or do not. There is no try */
} else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
rs->rs_rbm.rgd = begin = ip->i_rgd;
} else {
rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
}
- if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
+ if (S_ISDIR(ip->i_inode.i_mode) && (aflags & GFS2_AF_ORLOV))
skip = gfs2_orlov_skip(ip);
if (rs->rs_rbm.rgd == NULL)
return -EBADSLT;
@@ -1899,14 +1869,14 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *a
/* Get a reservation if we don't already have one */
if (!gfs2_rs_active(rs))
- rg_mblk_search(rs->rs_rbm.rgd, ip, ap);
+ rg_mblk_search(rs->rs_rbm.rgd, ip, requested);
/* Skip rgrps when we can't get a reservation on first pass */
if (!gfs2_rs_active(rs) && (loops < 1))
goto check_rgrp;
/* If rgrp has enough free space, use it */
- if (rs->rs_rbm.rgd->rd_free_clone >= ap->target) {
+ if (rs->rs_rbm.rgd->rd_free_clone >= requested) {
ip->i_rgd = rs->rs_rbm.rgd;
return 0;
}
@@ -2003,14 +1973,14 @@ static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
*n = 1;
block = gfs2_rbm_to_block(rbm);
- gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
+ gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm->bi->bi_bh);
gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
block++;
while (*n < elen) {
ret = gfs2_rbm_from_block(&pos, block);
if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
break;
- gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
+ gfs2_trans_add_meta(pos.rgd->rd_gl, pos.bi->bi_bh);
gfs2_setbit(&pos, true, GFS2_BLKST_USED);
(*n)++;
block++;
@@ -2031,7 +2001,6 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
u32 blen, unsigned char new_state)
{
struct gfs2_rbm rbm;
- struct gfs2_bitmap *bi;
rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
if (!rbm.rgd) {
@@ -2042,15 +2011,15 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
while (blen--) {
gfs2_rbm_from_block(&rbm, bstart);
- bi = rbm_bi(&rbm);
bstart++;
- if (!bi->bi_clone) {
- bi->bi_clone = kmalloc(bi->bi_bh->b_size,
- GFP_NOFS | __GFP_NOFAIL);
- memcpy(bi->bi_clone + bi->bi_offset,
- bi->bi_bh->b_data + bi->bi_offset, bi->bi_len);
+ if (!rbm.bi->bi_clone) {
+ rbm.bi->bi_clone = kmalloc(rbm.bi->bi_bh->b_size,
+ GFP_NOFS | __GFP_NOFAIL);
+ memcpy(rbm.bi->bi_clone + rbm.bi->bi_offset,
+ rbm.bi->bi_bh->b_data + rbm.bi->bi_offset,
+ rbm.bi->bi_len);
}
- gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
+ gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.bi->bi_bh);
gfs2_setbit(&rbm, false, new_state);
}
@@ -2134,35 +2103,6 @@ out:
}
/**
- * gfs2_set_alloc_start - Set starting point for block allocation
- * @rbm: The rbm which will be set to the required location
- * @ip: The gfs2 inode
- * @dinode: Flag to say if allocation includes a new inode
- *
- * This sets the starting point from the reservation if one is active
- * otherwise it falls back to guessing a start point based on the
- * inode's goal block or the last allocation point in the rgrp.
- */
-
-static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
- const struct gfs2_inode *ip, bool dinode)
-{
- u64 goal;
-
- if (gfs2_rs_active(ip->i_res)) {
- *rbm = ip->i_res->rs_rbm;
- return;
- }
-
- if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
- goal = ip->i_goal;
- else
- goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
-
- gfs2_rbm_from_block(rbm, goal);
-}
-
-/**
* gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
* @ip: the inode to allocate the block for
* @bn: Used to return the starting block number
@@ -2180,14 +2120,22 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
struct buffer_head *dibh;
struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
unsigned int ndata;
+ u64 goal;
u64 block; /* block, within the file system scope */
int error;
- gfs2_set_alloc_start(&rbm, ip, dinode);
+ if (gfs2_rs_active(ip->i_res))
+ goal = gfs2_rbm_to_block(&ip->i_res->rs_rbm);
+ else if (!dinode && rgrp_contains_block(rbm.rgd, ip->i_goal))
+ goal = ip->i_goal;
+ else
+ goal = rbm.rgd->rd_last_alloc + rbm.rgd->rd_data0;
+
+ gfs2_rbm_from_block(&rbm, goal);
error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, ip, false);
if (error == -ENOSPC) {
- gfs2_set_alloc_start(&rbm, ip, dinode);
+ gfs2_rbm_from_block(&rbm, goal);
error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, NULL, false);
}
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index 3a10d2f..5b3f4a8 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -40,7 +40,7 @@ extern void gfs2_rgrp_go_unlock(struct gfs2_holder *gh);
extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
#define GFS2_AF_ORLOV 1
-extern int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *ap);
+extern int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 flags);
extern void gfs2_inplace_release(struct gfs2_inode *ip);
extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
@@ -48,7 +48,7 @@ extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
extern int gfs2_rs_alloc(struct gfs2_inode *ip);
extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
-extern void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount);
+extern void gfs2_rs_delete(struct gfs2_inode *ip);
extern void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta);
extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
extern void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 35da5b1..e5639de 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1526,7 +1526,7 @@ out_unlock:
out:
/* Case 3 starts here */
truncate_inode_pages(&inode->i_data, 0);
- gfs2_rs_delete(ip, NULL);
+ gfs2_rs_delete(ip);
gfs2_ordered_del_inode(ip);
clear_inode(inode);
gfs2_dir_hash_inval(ip);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index d09f6ed..aa5c480 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -587,6 +587,7 @@ TUNE_ATTR(max_readahead, 0);
TUNE_ATTR(complain_secs, 0);
TUNE_ATTR(statfs_slow, 0);
TUNE_ATTR(new_files_jdata, 0);
+TUNE_ATTR(quota_simul_sync, 1);
TUNE_ATTR(statfs_quantum, 1);
TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
@@ -596,6 +597,7 @@ static struct attribute *tune_attrs[] = {
&tune_attr_max_readahead.attr,
&tune_attr_complain_secs.attr,
&tune_attr_statfs_slow.attr,
+ &tune_attr_quota_simul_sync.attr,
&tune_attr_statfs_quantum.attr,
&tune_attr_quota_scale.attr,
&tune_attr_new_files_jdata.attr,
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index f7109f6..6402fb6 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -268,3 +268,23 @@ int gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
return rv;
}
+void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
+ unsigned int bit, int new_value)
+{
+ unsigned int c, o, b = bit;
+ int old_value;
+
+ c = b / (8 * PAGE_SIZE);
+ b %= 8 * PAGE_SIZE;
+ o = b / 8;
+ b %= 8;
+
+ old_value = (bitmap[c][o] & (1 << b));
+ gfs2_assert_withdraw(sdp, !old_value != !new_value);
+
+ if (new_value)
+ bitmap[c][o] |= 1 << b;
+ else
+ bitmap[c][o] &= ~(1 << b);
+}
+
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index b7ffb09..8053573 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -164,6 +164,8 @@ static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt,
#define gfs2_tune_get(sdp, field) \
gfs2_tune_get_i(&(sdp)->sd_tune, &(sdp)->sd_tune.field)
+void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
+ unsigned int bit, int new_value);
int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...);
#endif /* __UTIL_DOT_H__ */
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 8c6a6f6..ecd37f3 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -723,7 +723,6 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
unsigned int blks,
ea_skeleton_call_t skeleton_call, void *private)
{
- struct gfs2_alloc_parms ap = { .target = blks };
struct buffer_head *dibh;
int error;
@@ -735,7 +734,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
if (error)
return error;
- error = gfs2_inplace_reserve(ip, &ap);
+ error = gfs2_inplace_reserve(ip, blks, 0);
if (error)
goto out_gunlock_q;