From 4dd3fd7197303739094183b139bae3142a3d55e6 Mon Sep 17 00:00:00 2001 From: Brian Foster Date: Wed, 17 Aug 2016 08:30:28 +1000 Subject: xfs: don't assert fail on non-async buffers on ioacct decrement The buffer I/O accounting mechanism tracks async buffers under I/O. As an optimization, the buffer I/O count is incremented only once on the first async I/O for a given hold cycle of a buffer and decremented once the buffer is released to the LRU (or freed). xfs_buf_ioacct_dec() has an ASSERT() check for an XBF_ASYNC buffer, but we have one or two corner cases where a buffer can be submitted for I/O multiple times via different methods in a single hold cycle. If an async I/O occurs first, the I/O count is incremented. If a sync I/O occurs before the hold count drops, XBF_ASYNC is cleared by the time the I/O count is decremented. Remove the async assert check from xfs_buf_ioacct_dec() as this is a perfectly valid scenario. For the purposes of I/O accounting, we really only care about the buffer async state at I/O submission time. Discovered-and-analyzed-by: Dave Chinner Signed-off-by: Brian Foster Reviewed-by: Dave Chinner Signed-off-by: Dave Chinner diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 47a318c..607cc29 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -115,7 +115,6 @@ xfs_buf_ioacct_dec( if (!(bp->b_flags & _XBF_IN_FLIGHT)) return; - ASSERT(bp->b_flags & XBF_ASYNC); bp->b_flags &= ~_XBF_IN_FLIGHT; percpu_counter_dec(&bp->b_target->bt_io_count); } -- cgit v0.10.2 From 0af32fb468b4a4434dd759d68611763658650b59 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 17 Aug 2016 08:30:28 +1000 Subject: xfs: fix bogus space reservation in xfs_iomap_write_allocate The space reservations was without an explaination in commit "Add error reporting calls in error paths that return EFSCORRUPTED" back in 2003. There is no reason to reserve disk blocks in the transaction when allocating blocks for delalloc space as we already reserved the space when creating the delalloc extent. With this fix we stop running out of the reserved pool in generic/229, which has happened for long time with small blocksize file systems, and has increased in severity with the new buffered write path. [ dchinner: we still need to pass the block reservation into xfs_bmapi_write() to ensure we don't deadlock during AG selection. See commit dbd5c8c ("xfs: pass total block res. as total xfs_bmapi_write() parameter") for more details on why this is necessary. ] Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Dave Chinner diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 2114d53..4398932 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -715,12 +715,16 @@ xfs_iomap_write_allocate( * is in the delayed allocation extent on which we sit * but before our buffer starts. */ - nimaps = 0; while (nimaps == 0) { nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); - - error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, nres, + /* + * We have already reserved space for the extent and any + * indirect blocks when creating the delalloc extent, + * there is no need to reserve space in this transaction + * again. + */ + error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, XFS_TRANS_RESERVE, &tp); if (error) return error; -- cgit v0.10.2 From 8b2180b3bf0338625cab07da6543acb436df9c40 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Wed, 17 Aug 2016 08:31:33 +1000 Subject: xfs: don't invalidate whole file on DAX read/write When we do DAX IO, we try to invalidate the entire page cache held on the file. This is incorrect as it will trash the entire mapping tree that now tracks dirty state in exceptional entries in the radix tree slots. What we are trying to do is remove cached pages (e.g from reads into holes) that sit in the radix tree over the range we are about to write to. Hence we should just limit the invalidation to the range we are about to overwrite. Reported-by: Jan Kara Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Dave Chinner diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index ed95e5b..e612a02 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -741,9 +741,20 @@ xfs_file_dax_write( * page is inserted into the pagecache when we have to serve a write * fault on a hole. It should never be dirtied and can simply be * dropped from the pagecache once we get real data for the page. + * + * XXX: This is racy against mmap, and there's nothing we can do about + * it. dax_do_io() should really do this invalidation internally as + * it will know if we've allocated over a holei for this specific IO and + * if so it needs to update the mapping tree and invalidate existing + * PTEs over the newly allocated range. Remove this invalidation when + * dax_do_io() is fixed up. */ if (mapping->nrpages) { - ret = invalidate_inode_pages2(mapping); + loff_t end = iocb->ki_pos + iov_iter_count(from) - 1; + + ret = invalidate_inode_pages2_range(mapping, + iocb->ki_pos >> PAGE_SHIFT, + end >> PAGE_SHIFT); WARN_ON_ONCE(ret); } -- cgit v0.10.2 From f32866fdc9ed077c3bc7160f4f53d6a479201d46 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Wed, 17 Aug 2016 08:31:49 +1000 Subject: xfs: store rmapbt block count in the AGF Track the number of blocks used for the rmapbt in the AGF. When we get to the AG reservation code we need this counter to quickly make our reservation during mount. Signed-off-by: Darrick J. Wong Reviewed-by: Dave Chinner Signed-off-by: Dave Chinner diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 776ae2f..af1a72e 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -2264,6 +2264,7 @@ xfs_alloc_log_agf( offsetof(xfs_agf_t, agf_longest), offsetof(xfs_agf_t, agf_btreeblks), offsetof(xfs_agf_t, agf_uuid), + offsetof(xfs_agf_t, agf_rmap_blocks), sizeof(xfs_agf_t) }; diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h index f814d42..e6a8bea 100644 --- a/fs/xfs/libxfs/xfs_format.h +++ b/fs/xfs/libxfs/xfs_format.h @@ -640,12 +640,15 @@ typedef struct xfs_agf { __be32 agf_btreeblks; /* # of blocks held in AGF btrees */ uuid_t agf_uuid; /* uuid of filesystem */ + __be32 agf_rmap_blocks; /* rmapbt blocks used */ + __be32 agf_padding; /* padding */ + /* * reserve some contiguous space for future logged fields before we add * the unlogged fields. This makes the range logging via flags and * structure offsets much simpler. */ - __be64 agf_spare64[16]; + __be64 agf_spare64[15]; /* unlogged fields, written during buffer writeback. */ __be64 agf_lsn; /* last write sequence */ @@ -670,7 +673,8 @@ typedef struct xfs_agf { #define XFS_AGF_LONGEST 0x00000400 #define XFS_AGF_BTREEBLKS 0x00000800 #define XFS_AGF_UUID 0x00001000 -#define XFS_AGF_NUM_BITS 13 +#define XFS_AGF_RMAP_BLOCKS 0x00002000 +#define XFS_AGF_NUM_BITS 14 #define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1) #define XFS_AGF_FLAGS \ @@ -686,7 +690,8 @@ typedef struct xfs_agf { { XFS_AGF_FREEBLKS, "FREEBLKS" }, \ { XFS_AGF_LONGEST, "LONGEST" }, \ { XFS_AGF_BTREEBLKS, "BTREEBLKS" }, \ - { XFS_AGF_UUID, "UUID" } + { XFS_AGF_UUID, "UUID" }, \ + { XFS_AGF_RMAP_BLOCKS, "RMAP_BLOCKS" } /* disk block (xfs_daddr_t) in the AG */ #define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log)) diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c index bc1faeb..17b8eeb 100644 --- a/fs/xfs/libxfs/xfs_rmap_btree.c +++ b/fs/xfs/libxfs/xfs_rmap_btree.c @@ -98,6 +98,8 @@ xfs_rmapbt_alloc_block( union xfs_btree_ptr *new, int *stat) { + struct xfs_buf *agbp = cur->bc_private.a.agbp; + struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); int error; xfs_agblock_t bno; @@ -124,6 +126,8 @@ xfs_rmapbt_alloc_block( xfs_trans_agbtree_delta(cur->bc_tp, 1); new->s = cpu_to_be32(bno); + be32_add_cpu(&agf->agf_rmap_blocks, 1); + xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS); XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 1; @@ -143,6 +147,8 @@ xfs_rmapbt_free_block( bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp)); trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_private.a.agno, bno, 1); + be32_add_cpu(&agf->agf_rmap_blocks, -1); + xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS); error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1); if (error) return error; diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 0f96847..0b7f986 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -248,6 +248,7 @@ xfs_growfs_data_private( agf->agf_roots[XFS_BTNUM_RMAPi] = cpu_to_be32(XFS_RMAP_BLOCK(mp)); agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1); + agf->agf_rmap_blocks = cpu_to_be32(1); } agf->agf_flfirst = cpu_to_be32(1); -- cgit v0.10.2 From 97dd8c9ee60c98f09be86a2e7e61b18f8d8ed4e9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 17 Aug 2016 08:39:47 +1000 Subject: iomap: remove superflous mark_page_accessed from iomap_write_actor This catches up with commit 2457ae ("mm: non-atomically mark page accessed during page cache allocation where possible"), which moved the initial access marking into the pagecache allocator. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Dave Chinner diff --git a/fs/iomap.c b/fs/iomap.c index 48141b8..f39c318 100644 --- a/fs/iomap.c +++ b/fs/iomap.c @@ -199,7 +199,6 @@ again: pagefault_enable(); flush_dcache_page(page); - mark_page_accessed(page); status = iomap_write_end(inode, pos, bytes, copied, page); if (unlikely(status < 0)) -- cgit v0.10.2 From 274c887494cb248eb05f8180bda8298942d98625 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 17 Aug 2016 08:40:18 +1000 Subject: iomap: remove superflous pagefault_disable from iomap_write_actor iov_iter_copy_from_user_atomic disables page faults internally, no need to do it around the call. This also brings the iomap code in line with the original filemap version. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Dave Chinner diff --git a/fs/iomap.c b/fs/iomap.c index f39c318..74712e2 100644 --- a/fs/iomap.c +++ b/fs/iomap.c @@ -194,9 +194,7 @@ again: if (mapping_writably_mapped(inode->i_mapping)) flush_dcache_page(page); - pagefault_disable(); copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); - pagefault_enable(); flush_dcache_page(page); -- cgit v0.10.2 From 8896b8f60951ff0a8b7092f495a445714a1bddc4 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Wed, 17 Aug 2016 08:41:10 +1000 Subject: iomap: fiemap should honor the FIEMAP_FLAG_SYNC flag The flag is checked as supported, but then we do an unconditional sync of the file, regardless of whether the flag is set or not. Make the sync conditional on having the FIEMAP_FLAG_SYNC flag set. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Dave Chinner diff --git a/fs/iomap.c b/fs/iomap.c index 74712e2..56c19e6 100644 --- a/fs/iomap.c +++ b/fs/iomap.c @@ -467,9 +467,11 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi, if (ret) return ret; - ret = filemap_write_and_wait(inode->i_mapping); - if (ret) - return ret; + if (fi->fi_flags & FIEMAP_FLAG_SYNC) { + ret = filemap_write_and_wait(inode->i_mapping); + if (ret) + return ret; + } while (len > 0) { ret = iomap_apply(inode, start, len, 0, ops, &ctx, -- cgit v0.10.2 From ac2dc058bce81b83631ad5accb90b8f4abe613b7 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Wed, 17 Aug 2016 08:41:34 +1000 Subject: iomap: prepare iomap_fiemap for attribute mappings By bassing through an -ENOENT, similar to the old XFS implementation of FIEMAP_FLAG_XATTR. Signed-off-by: Dave Chinner [hch: split from a larger patch] Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Dave Chinner diff --git a/fs/iomap.c b/fs/iomap.c index 56c19e6..d9d1f50 100644 --- a/fs/iomap.c +++ b/fs/iomap.c @@ -476,6 +476,9 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi, while (len > 0) { ret = iomap_apply(inode, start, len, 0, ops, &ctx, iomap_fiemap_actor); + /* inode with no (attribute) mapping will give ENOENT */ + if (ret == -ENOENT) + break; if (ret < 0) return ret; if (ret == 0) -- cgit v0.10.2 From f20ac7ab17fcf7046bd9bc4166faf9580f713afd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 17 Aug 2016 08:42:34 +1000 Subject: iomap: mark ->iomap_end as optional No need to implement it for read-only mappings. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Dave Chinner diff --git a/fs/iomap.c b/fs/iomap.c index d9d1f50..0342254 100644 --- a/fs/iomap.c +++ b/fs/iomap.c @@ -84,8 +84,11 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags, * Now the data has been copied, commit the range we've copied. This * should not fail unless the filesystem has had a fatal error. */ - ret = ops->iomap_end(inode, pos, length, written > 0 ? written : 0, - flags, &iomap); + if (ops->iomap_end) { + ret = ops->iomap_end(inode, pos, length, + written > 0 ? written : 0, + flags, &iomap); + } return written ? written : ret; } -- cgit v0.10.2 From b95a21271b30544a9fb992269d79ed1e1978e023 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 17 Aug 2016 08:44:52 +1000 Subject: xfs: simplify xfs_file_iomap_begin We'll never get nimap == 0 for a successful return from xfs_bmapi_read, so don't try to handle it. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Dave Chinner diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 2114d53..1cce760 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -1037,20 +1037,14 @@ xfs_file_iomap_begin( return error; trace_xfs_iomap_alloc(ip, offset, length, 0, &imap); - xfs_bmbt_to_iomap(ip, iomap, &imap); - } else if (nimaps) { - xfs_iunlock(ip, XFS_ILOCK_EXCL); - trace_xfs_iomap_found(ip, offset, length, 0, &imap); - xfs_bmbt_to_iomap(ip, iomap, &imap); } else { + ASSERT(nimaps); + xfs_iunlock(ip, XFS_ILOCK_EXCL); - trace_xfs_iomap_not_found(ip, offset, length, 0, &imap); - iomap->blkno = IOMAP_NULL_BLOCK; - iomap->type = IOMAP_HOLE; - iomap->offset = offset; - iomap->length = length; + trace_xfs_iomap_found(ip, offset, length, 0, &imap); } + xfs_bmbt_to_iomap(ip, iomap, &imap); return 0; } diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 551b7e2..7e88bec 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -1298,7 +1298,6 @@ DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc); DEFINE_IOMAP_EVENT(xfs_get_blocks_map_direct); DEFINE_IOMAP_EVENT(xfs_iomap_alloc); DEFINE_IOMAP_EVENT(xfs_iomap_found); -DEFINE_IOMAP_EVENT(xfs_iomap_not_found); DECLARE_EVENT_CLASS(xfs_simple_io_class, TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), -- cgit v0.10.2 From 1d4795e7bde075588c90df2175349bb2251802d5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 17 Aug 2016 08:45:30 +1000 Subject: xfs: (re-)implement FIEMAP_FLAG_XATTR Use a special read-only iomap_ops implementation to support fiemap on the attr fork. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Dave Chinner diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 1cce760..697c8fd 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -1106,3 +1106,48 @@ struct iomap_ops xfs_iomap_ops = { .iomap_begin = xfs_file_iomap_begin, .iomap_end = xfs_file_iomap_end, }; + +static int +xfs_xattr_iomap_begin( + struct inode *inode, + loff_t offset, + loff_t length, + unsigned flags, + struct iomap *iomap) +{ + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); + xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length); + struct xfs_bmbt_irec imap; + int nimaps = 1, error = 0; + unsigned lockmode; + + if (XFS_FORCED_SHUTDOWN(mp)) + return -EIO; + + lockmode = xfs_ilock_data_map_shared(ip); + + /* if there are no attribute fork or extents, return ENOENT */ + if (XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) { + error = -ENOENT; + goto out_unlock; + } + + ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL); + error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, + &nimaps, XFS_BMAPI_ENTIRE | XFS_BMAPI_ATTRFORK); +out_unlock: + xfs_iunlock(ip, lockmode); + + if (!error) { + ASSERT(nimaps); + xfs_bmbt_to_iomap(ip, iomap, &imap); + } + + return error; +} + +struct iomap_ops xfs_xattr_iomap_ops = { + .iomap_begin = xfs_xattr_iomap_begin, +}; diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h index e066d04..fb8aca3 100644 --- a/fs/xfs/xfs_iomap.h +++ b/fs/xfs/xfs_iomap.h @@ -35,5 +35,6 @@ void xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *, struct xfs_bmbt_irec *); extern struct iomap_ops xfs_iomap_ops; +extern struct iomap_ops xfs_xattr_iomap_ops; #endif /* __XFS_IOMAP_H__*/ diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index ab820f8..b24c310 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -1009,7 +1009,14 @@ xfs_vn_fiemap( int error; xfs_ilock(XFS_I(inode), XFS_IOLOCK_SHARED); - error = iomap_fiemap(inode, fieinfo, start, length, &xfs_iomap_ops); + if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { + fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR; + error = iomap_fiemap(inode, fieinfo, start, length, + &xfs_xattr_iomap_ops); + } else { + error = iomap_fiemap(inode, fieinfo, start, length, + &xfs_iomap_ops); + } xfs_iunlock(XFS_I(inode), XFS_IOLOCK_SHARED); return error; -- cgit v0.10.2 From a03f1a6633144300ef4a3a33e95dfa11866f1299 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Wed, 17 Aug 2016 11:12:57 +1000 Subject: xfs: remove OWN_AG rmap when allocating a block from the AGFL When we're really tight on space, xfs_alloc_ag_vextent_small() can allocate a block from the AGFL and give it to the caller. Since the caller is never the AGFL-fixing method, we must remove the OWN_AG reverse mapping because it will clash with whatever rmap the caller wants to set up. This bug was discovered by running generic/299 repeatedly. Signed-off-by: Darrick J. Wong Reviewed-by: Dave Chinner Signed-off-by: Dave Chinner diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index af1a72e..3dd8f1d 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -1582,6 +1582,7 @@ xfs_alloc_ag_vextent_small( xfs_extlen_t *flenp, /* result length */ int *stat) /* status: 0-freelist, 1-normal/none */ { + struct xfs_owner_info oinfo; int error; xfs_agblock_t fbno; xfs_extlen_t flen; @@ -1624,6 +1625,18 @@ xfs_alloc_ag_vextent_small( error0); args->wasfromfl = 1; trace_xfs_alloc_small_freelist(args); + + /* + * If we're feeding an AGFL block to something that + * doesn't live in the free space, we need to clear + * out the OWN_AG rmap. + */ + xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG); + error = xfs_rmap_free(args->tp, args->agbp, args->agno, + fbno, 1, &oinfo); + if (error) + goto error0; + *stat = 0; return 0; } -- cgit v0.10.2