summaryrefslogtreecommitdiff
path: root/fs/gfs2
diff options
context:
space:
mode:
authorAbhi Das <adas@redhat.com>2015-06-02 16:02:24 (GMT)
committerBob Peterson <rpeterso@redhat.com>2015-06-02 16:02:24 (GMT)
commit39a725803beeffeb319261df1ab145ef3c1ea0a1 (patch)
treeed5e6b7755daa76b1cd28d3b8260f5ba3afa7007 /fs/gfs2
parenta3e3213676d8208328ef9f79936f94e583c0ec90 (diff)
downloadlinux-39a725803beeffeb319261df1ab145ef3c1ea0a1.tar.xz
gfs2: fix quota updates on block boundaries
For smaller block sizes (512B, 1K, 2K), some quotas straddle block boundaries such that the usage value is on one block and the rest of the quota is on the previous block. In such cases, the value does not get updated correctly. This patch fixes that by addressing the boundary conditions correctly. This patch also adds a (s64) cast that was missing in a call to gfs2_quota_change() in inode.c Signed-off-by: Abhi Das <adas@redhat.com> Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/inode.c2
-rw-r--r--fs/gfs2/quota.c197
2 files changed, 119 insertions, 80 deletions
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 4d809eb..a088e54 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -1889,7 +1889,7 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
if (!uid_eq(ouid, NO_UID_QUOTA_CHANGE) ||
!gid_eq(ogid, NO_GID_QUOTA_CHANGE)) {
- gfs2_quota_change(ip, -ap.target, ouid, ogid);
+ gfs2_quota_change(ip, -(s64)ap.target, ouid, ogid);
gfs2_quota_change(ip, ap.target, nuid, ngid);
}
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 5c27e48..7c20031 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -652,6 +652,112 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
mutex_unlock(&sdp->sd_quota_mutex);
}
+static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
+ unsigned off, void *buf, unsigned bytes)
+{
+ struct inode *inode = &ip->i_inode;
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page;
+ struct buffer_head *bh;
+ void *kaddr;
+ u64 blk;
+ unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
+ unsigned to_write = bytes, pg_off = off;
+ int done = 0;
+
+ blk = index << (PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift);
+ boff = off % bsize;
+
+ page = find_or_create_page(mapping, index, GFP_NOFS);
+ if (!page)
+ return -ENOMEM;
+ if (!page_has_buffers(page))
+ create_empty_buffers(page, bsize, 0);
+
+ bh = page_buffers(page);
+ while (!done) {
+ /* Find the beginning block within the page */
+ if (pg_off >= ((bnum * bsize) + bsize)) {
+ bh = bh->b_this_page;
+ bnum++;
+ blk++;
+ continue;
+ }
+ if (!buffer_mapped(bh)) {
+ gfs2_block_map(inode, blk, bh, 1);
+ if (!buffer_mapped(bh))
+ goto unlock_out;
+ /* If it's a newly allocated disk block, zero it */
+ if (buffer_new(bh))
+ zero_user(page, bnum * bsize, bh->b_size);
+ }
+ if (PageUptodate(page))
+ set_buffer_uptodate(bh);
+ if (!buffer_uptodate(bh)) {
+ ll_rw_block(READ | REQ_META, 1, &bh);
+ wait_on_buffer(bh);
+ if (!buffer_uptodate(bh))
+ goto unlock_out;
+ }
+ gfs2_trans_add_data(ip->i_gl, bh);
+
+ /* If we need to write to the next block as well */
+ if (to_write > (bsize - boff)) {
+ pg_off += (bsize - boff);
+ to_write -= (bsize - boff);
+ boff = pg_off % bsize;
+ continue;
+ }
+ done = 1;
+ }
+
+ /* Write to the page, now that we have setup the buffer(s) */
+ kaddr = kmap_atomic(page);
+ memcpy(kaddr + off, buf, bytes);
+ flush_dcache_page(page);
+ kunmap_atomic(kaddr);
+ unlock_page(page);
+ page_cache_release(page);
+
+ return 0;
+
+unlock_out:
+ unlock_page(page);
+ page_cache_release(page);
+ return -EIO;
+}
+
+static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
+ loff_t loc)
+{
+ unsigned long pg_beg;
+ unsigned pg_off, nbytes, overflow = 0;
+ int pg_oflow = 0, error;
+ void *ptr;
+
+ nbytes = sizeof(struct gfs2_quota);
+
+ pg_beg = loc >> PAGE_CACHE_SHIFT;
+ pg_off = loc % PAGE_CACHE_SIZE;
+
+ /* If the quota straddles a page boundary, split the write in two */
+ if ((pg_off + nbytes) > PAGE_CACHE_SIZE) {
+ pg_oflow = 1;
+ overflow = (pg_off + nbytes) - PAGE_CACHE_SIZE;
+ }
+
+ ptr = qp;
+ error = gfs2_write_buf_to_page(ip, pg_beg, pg_off, ptr,
+ nbytes - overflow);
+ /* If there's an overflow, write the remaining bytes to the next page */
+ if (!error && pg_oflow)
+ error = gfs2_write_buf_to_page(ip, pg_beg + 1, 0,
+ ptr + nbytes - overflow,
+ overflow);
+ return error;
+}
+
/**
* gfs2_adjust_quota - adjust record of current block usage
* @ip: The quota inode
@@ -672,15 +778,8 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
{
struct inode *inode = &ip->i_inode;
struct gfs2_sbd *sdp = GFS2_SB(inode);
- struct address_space *mapping = inode->i_mapping;
- unsigned long index = loc >> PAGE_CACHE_SHIFT;
- unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
- unsigned blocksize, iblock, pos;
- struct buffer_head *bh;
- struct page *page;
- void *kaddr, *ptr;
struct gfs2_quota q;
- int err, nbytes;
+ int err;
u64 size;
if (gfs2_is_stuffed(ip)) {
@@ -694,8 +793,11 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
if (err < 0)
return err;
+ loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
err = -EIO;
be64_add_cpu(&q.qu_value, change);
+ if (be64_to_cpu(q.qu_value) < 0)
+ q.qu_value = 0; /* Never go negative on quota usage */
qd->qd_qb.qb_value = q.qu_value;
if (fdq) {
if (fdq->d_fieldmask & QC_SPC_SOFT) {
@@ -712,79 +814,16 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
}
}
- /* Write the quota into the quota file on disk */
- ptr = &q;
- nbytes = sizeof(struct gfs2_quota);
-get_a_page:
- page = find_or_create_page(mapping, index, GFP_NOFS);
- if (!page)
- return -ENOMEM;
-
- blocksize = inode->i_sb->s_blocksize;
- iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
-
- if (!page_has_buffers(page))
- create_empty_buffers(page, blocksize, 0);
-
- bh = page_buffers(page);
- pos = blocksize;
- while (offset >= pos) {
- bh = bh->b_this_page;
- iblock++;
- pos += blocksize;
- }
-
- if (!buffer_mapped(bh)) {
- gfs2_block_map(inode, iblock, bh, 1);
- if (!buffer_mapped(bh))
- goto unlock_out;
- /* If it's a newly allocated disk block for quota, zero it */
- if (buffer_new(bh))
- zero_user(page, pos - blocksize, bh->b_size);
- }
-
- if (PageUptodate(page))
- set_buffer_uptodate(bh);
-
- if (!buffer_uptodate(bh)) {
- ll_rw_block(READ | REQ_META, 1, &bh);
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh))
- goto unlock_out;
- }
-
- gfs2_trans_add_data(ip->i_gl, bh);
-
- kaddr = kmap_atomic(page);
- if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
- nbytes = PAGE_CACHE_SIZE - offset;
- memcpy(kaddr + offset, ptr, nbytes);
- flush_dcache_page(page);
- kunmap_atomic(kaddr);
- unlock_page(page);
- page_cache_release(page);
-
- /* If quota straddles page boundary, we need to update the rest of the
- * quota at the beginning of the next page */
- if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) {
- ptr = ptr + nbytes;
- nbytes = sizeof(struct gfs2_quota) - nbytes;
- offset = 0;
- index++;
- goto get_a_page;
+ err = gfs2_write_disk_quota(ip, &q, loc);
+ if (!err) {
+ size = loc + sizeof(struct gfs2_quota);
+ if (size > inode->i_size)
+ i_size_write(inode, size);
+ inode->i_mtime = inode->i_atime = CURRENT_TIME;
+ mark_inode_dirty(inode);
+ set_bit(QDF_REFRESH, &qd->qd_flags);
}
- size = loc + sizeof(struct gfs2_quota);
- if (size > inode->i_size)
- i_size_write(inode, size);
- inode->i_mtime = inode->i_atime = CURRENT_TIME;
- mark_inode_dirty(inode);
- set_bit(QDF_REFRESH, &qd->qd_flags);
- return 0;
-
-unlock_out:
- unlock_page(page);
- page_cache_release(page);
return err;
}