summaryrefslogtreecommitdiff
path: root/fs/xfs/linux-2.6
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/linux-2.6')
-rw-r--r--fs/xfs/linux-2.6/kmem.c23
-rw-r--r--fs/xfs/linux-2.6/kmem.h23
-rw-r--r--fs/xfs/linux-2.6/spin.h3
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c259
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.h50
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c117
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h12
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c90
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c18
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c65
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c15
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h13
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.h7
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c166
-rw-r--r--fs/xfs/linux-2.6/xfs_vfs.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_vfs.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.c251
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h60
19 files changed, 564 insertions, 614 deletions
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c
index 364ea8c..4b18455 100644
--- a/fs/xfs/linux-2.6/kmem.c
+++ b/fs/xfs/linux-2.6/kmem.c
@@ -45,11 +45,11 @@
void *
-kmem_alloc(size_t size, int flags)
+kmem_alloc(size_t size, unsigned int __nocast flags)
{
- int retries = 0;
- int lflags = kmem_flags_convert(flags);
- void *ptr;
+ int retries = 0;
+ unsigned int lflags = kmem_flags_convert(flags);
+ void *ptr;
do {
if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS)
@@ -67,7 +67,7 @@ kmem_alloc(size_t size, int flags)
}
void *
-kmem_zalloc(size_t size, int flags)
+kmem_zalloc(size_t size, unsigned int __nocast flags)
{
void *ptr;
@@ -89,7 +89,8 @@ kmem_free(void *ptr, size_t size)
}
void *
-kmem_realloc(void *ptr, size_t newsize, size_t oldsize, int flags)
+kmem_realloc(void *ptr, size_t newsize, size_t oldsize,
+ unsigned int __nocast flags)
{
void *new;
@@ -104,11 +105,11 @@ kmem_realloc(void *ptr, size_t newsize, size_t oldsize, int flags)
}
void *
-kmem_zone_alloc(kmem_zone_t *zone, int flags)
+kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
{
- int retries = 0;
- int lflags = kmem_flags_convert(flags);
- void *ptr;
+ int retries = 0;
+ unsigned int lflags = kmem_flags_convert(flags);
+ void *ptr;
do {
ptr = kmem_cache_alloc(zone, lflags);
@@ -123,7 +124,7 @@ kmem_zone_alloc(kmem_zone_t *zone, int flags)
}
void *
-kmem_zone_zalloc(kmem_zone_t *zone, int flags)
+kmem_zone_zalloc(kmem_zone_t *zone, unsigned int __nocast flags)
{
void *ptr;
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h
index 1397b66..109fcf2 100644
--- a/fs/xfs/linux-2.6/kmem.h
+++ b/fs/xfs/linux-2.6/kmem.h
@@ -39,10 +39,10 @@
/*
* memory management routines
*/
-#define KM_SLEEP 0x0001
-#define KM_NOSLEEP 0x0002
-#define KM_NOFS 0x0004
-#define KM_MAYFAIL 0x0008
+#define KM_SLEEP 0x0001u
+#define KM_NOSLEEP 0x0002u
+#define KM_NOFS 0x0004u
+#define KM_MAYFAIL 0x0008u
#define kmem_zone kmem_cache_s
#define kmem_zone_t kmem_cache_t
@@ -81,9 +81,9 @@ typedef unsigned long xfs_pflags_t;
*(NSTATEP) = *(OSTATEP); \
} while (0)
-static __inline unsigned int kmem_flags_convert(int flags)
+static __inline unsigned int kmem_flags_convert(unsigned int __nocast flags)
{
- int lflags = __GFP_NOWARN; /* we'll report problems, if need be */
+ unsigned int lflags = __GFP_NOWARN; /* we'll report problems, if need be */
#ifdef DEBUG
if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL))) {
@@ -125,12 +125,13 @@ kmem_zone_destroy(kmem_zone_t *zone)
BUG();
}
-extern void *kmem_zone_zalloc(kmem_zone_t *, int);
-extern void *kmem_zone_alloc(kmem_zone_t *, int);
+extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
+extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
-extern void *kmem_alloc(size_t, int);
-extern void *kmem_realloc(void *, size_t, size_t, int);
-extern void *kmem_zalloc(size_t, int);
+extern void *kmem_alloc(size_t, unsigned int __nocast);
+extern void *kmem_realloc(void *, size_t, size_t,
+ unsigned int __nocast);
+extern void *kmem_zalloc(size_t, unsigned int __nocast);
extern void kmem_free(void *, size_t);
typedef struct shrinker *kmem_shaker_t;
diff --git a/fs/xfs/linux-2.6/spin.h b/fs/xfs/linux-2.6/spin.h
index bcf60a0..0039504 100644
--- a/fs/xfs/linux-2.6/spin.h
+++ b/fs/xfs/linux-2.6/spin.h
@@ -45,6 +45,9 @@
typedef spinlock_t lock_t;
#define SPLDECL(s) unsigned long s
+#ifndef DEFINE_SPINLOCK
+#define DEFINE_SPINLOCK(s) spinlock_t s = SPIN_LOCK_UNLOCKED
+#endif
#define spinlock_init(lock, name) spin_lock_init(lock)
#define spinlock_destroy(lock)
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index a3a4b5a..c6c0779 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -104,66 +104,114 @@ xfs_page_trace(
#define xfs_page_trace(tag, inode, page, mask)
#endif
-void
-linvfs_unwritten_done(
- struct buffer_head *bh,
- int uptodate)
+/*
+ * Schedule IO completion handling on a xfsdatad if this was
+ * the final hold on this ioend.
+ */
+STATIC void
+xfs_finish_ioend(
+ xfs_ioend_t *ioend)
{
- xfs_buf_t *pb = (xfs_buf_t *)bh->b_private;
+ if (atomic_dec_and_test(&ioend->io_remaining))
+ queue_work(xfsdatad_workqueue, &ioend->io_work);
+}
- ASSERT(buffer_unwritten(bh));
- bh->b_end_io = NULL;
- clear_buffer_unwritten(bh);
- if (!uptodate)
- pagebuf_ioerror(pb, EIO);
- if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
- pagebuf_iodone(pb, 1, 1);
- }
- end_buffer_async_write(bh, uptodate);
+STATIC void
+xfs_destroy_ioend(
+ xfs_ioend_t *ioend)
+{
+ vn_iowake(ioend->io_vnode);
+ mempool_free(ioend, xfs_ioend_pool);
}
/*
* Issue transactions to convert a buffer range from unwritten
- * to written extents (buffered IO).
+ * to written extents.
*/
STATIC void
-linvfs_unwritten_convert(
- xfs_buf_t *bp)
+xfs_end_bio_unwritten(
+ void *data)
{
- vnode_t *vp = XFS_BUF_FSPRIVATE(bp, vnode_t *);
- int error;
+ xfs_ioend_t *ioend = data;
+ vnode_t *vp = ioend->io_vnode;
+ xfs_off_t offset = ioend->io_offset;
+ size_t size = ioend->io_size;
+ struct buffer_head *bh, *next;
+ int error;
+
+ if (ioend->io_uptodate)
+ VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
+
+ /* ioend->io_buffer_head is only non-NULL for buffered I/O */
+ for (bh = ioend->io_buffer_head; bh; bh = next) {
+ next = bh->b_private;
+
+ bh->b_end_io = NULL;
+ clear_buffer_unwritten(bh);
+ end_buffer_async_write(bh, ioend->io_uptodate);
+ }
- BUG_ON(atomic_read(&bp->pb_hold) < 1);
- VOP_BMAP(vp, XFS_BUF_OFFSET(bp), XFS_BUF_SIZE(bp),
- BMAPI_UNWRITTEN, NULL, NULL, error);
- XFS_BUF_SET_FSPRIVATE(bp, NULL);
- XFS_BUF_CLR_IODONE_FUNC(bp);
- XFS_BUF_UNDATAIO(bp);
- iput(LINVFS_GET_IP(vp));
- pagebuf_iodone(bp, 0, 0);
+ xfs_destroy_ioend(ioend);
}
/*
- * Issue transactions to convert a buffer range from unwritten
- * to written extents (direct IO).
+ * Allocate and initialise an IO completion structure.
+ * We need to track unwritten extent write completion here initially.
+ * We'll need to extend this for updating the ondisk inode size later
+ * (vs. incore size).
*/
-STATIC void
-linvfs_unwritten_convert_direct(
- struct kiocb *iocb,
- loff_t offset,
- ssize_t size,
- void *private)
+STATIC xfs_ioend_t *
+xfs_alloc_ioend(
+ struct inode *inode)
{
- struct inode *inode = iocb->ki_filp->f_dentry->d_inode;
- ASSERT(!private || inode == (struct inode *)private);
+ xfs_ioend_t *ioend;
- /* private indicates an unwritten extent lay beneath this IO */
- if (private && size > 0) {
- vnode_t *vp = LINVFS_GET_VP(inode);
- int error;
+ ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
- VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
- }
+ /*
+ * Set the count to 1 initially, which will prevent an I/O
+ * completion callback from happening before we have started
+ * all the I/O from calling the completion routine too early.
+ */
+ atomic_set(&ioend->io_remaining, 1);
+ ioend->io_uptodate = 1; /* cleared if any I/O fails */
+ ioend->io_vnode = LINVFS_GET_VP(inode);
+ ioend->io_buffer_head = NULL;
+ atomic_inc(&ioend->io_vnode->v_iocount);
+ ioend->io_offset = 0;
+ ioend->io_size = 0;
+
+ INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
+
+ return ioend;
+}
+
+void
+linvfs_unwritten_done(
+ struct buffer_head *bh,
+ int uptodate)
+{
+ xfs_ioend_t *ioend = bh->b_private;
+ static spinlock_t unwritten_done_lock = SPIN_LOCK_UNLOCKED;
+ unsigned long flags;
+
+ ASSERT(buffer_unwritten(bh));
+ bh->b_end_io = NULL;
+
+ if (!uptodate)
+ ioend->io_uptodate = 0;
+
+ /*
+ * Deep magic here. We reuse b_private in the buffer_heads to build
+ * a chain for completing the I/O from user context after we've issued
+ * a transaction to convert the unwritten extent.
+ */
+ spin_lock_irqsave(&unwritten_done_lock, flags);
+ bh->b_private = ioend->io_buffer_head;
+ ioend->io_buffer_head = bh;
+ spin_unlock_irqrestore(&unwritten_done_lock, flags);
+
+ xfs_finish_ioend(ioend);
}
STATIC int
@@ -255,7 +303,7 @@ xfs_probe_unwritten_page(
struct address_space *mapping,
pgoff_t index,
xfs_iomap_t *iomapp,
- xfs_buf_t *pb,
+ xfs_ioend_t *ioend,
unsigned long max_offset,
unsigned long *fsbs,
unsigned int bbits)
@@ -283,7 +331,7 @@ xfs_probe_unwritten_page(
break;
xfs_map_at_offset(page, bh, p_offset, bbits, iomapp);
set_buffer_unwritten_io(bh);
- bh->b_private = pb;
+ bh->b_private = ioend;
p_offset += bh->b_size;
(*fsbs)++;
} while ((bh = bh->b_this_page) != head);
@@ -434,34 +482,15 @@ xfs_map_unwritten(
{
struct buffer_head *bh = curr;
xfs_iomap_t *tmp;
- xfs_buf_t *pb;
- loff_t offset, size;
+ xfs_ioend_t *ioend;
+ loff_t offset;
unsigned long nblocks = 0;
offset = start_page->index;
offset <<= PAGE_CACHE_SHIFT;
offset += p_offset;
- /* get an "empty" pagebuf to manage IO completion
- * Proper values will be set before returning */
- pb = pagebuf_lookup(iomapp->iomap_target, 0, 0, 0);
- if (!pb)
- return -EAGAIN;
-
- /* Take a reference to the inode to prevent it from
- * being reclaimed while we have outstanding unwritten
- * extent IO on it.
- */
- if ((igrab(inode)) != inode) {
- pagebuf_free(pb);
- return -EAGAIN;
- }
-
- /* Set the count to 1 initially, this will stop an I/O
- * completion callout which happens before we have started
- * all the I/O from calling pagebuf_iodone too early.
- */
- atomic_set(&pb->pb_io_remaining, 1);
+ ioend = xfs_alloc_ioend(inode);
/* First map forwards in the page consecutive buffers
* covering this unwritten extent
@@ -474,12 +503,12 @@ xfs_map_unwritten(
break;
xfs_map_at_offset(start_page, bh, p_offset, block_bits, iomapp);
set_buffer_unwritten_io(bh);
- bh->b_private = pb;
+ bh->b_private = ioend;
p_offset += bh->b_size;
nblocks++;
} while ((bh = bh->b_this_page) != head);
- atomic_add(nblocks, &pb->pb_io_remaining);
+ atomic_add(nblocks, &ioend->io_remaining);
/* If we reached the end of the page, map forwards in any
* following pages which are also covered by this extent.
@@ -496,13 +525,13 @@ xfs_map_unwritten(
tloff = min(tlast, tloff);
for (tindex = start_page->index + 1; tindex < tloff; tindex++) {
page = xfs_probe_unwritten_page(mapping,
- tindex, iomapp, pb,
+ tindex, iomapp, ioend,
PAGE_CACHE_SIZE, &bs, bbits);
if (!page)
break;
nblocks += bs;
- atomic_add(bs, &pb->pb_io_remaining);
- xfs_convert_page(inode, page, iomapp, wbc, pb,
+ atomic_add(bs, &ioend->io_remaining);
+ xfs_convert_page(inode, page, iomapp, wbc, ioend,
startio, all_bh);
/* stop if converting the next page might add
* enough blocks that the corresponding byte
@@ -514,12 +543,12 @@ xfs_map_unwritten(
if (tindex == tlast &&
(pg_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) {
page = xfs_probe_unwritten_page(mapping,
- tindex, iomapp, pb,
+ tindex, iomapp, ioend,
pg_offset, &bs, bbits);
if (page) {
nblocks += bs;
- atomic_add(bs, &pb->pb_io_remaining);
- xfs_convert_page(inode, page, iomapp, wbc, pb,
+ atomic_add(bs, &ioend->io_remaining);
+ xfs_convert_page(inode, page, iomapp, wbc, ioend,
startio, all_bh);
if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
goto enough;
@@ -528,21 +557,9 @@ xfs_map_unwritten(
}
enough:
- size = nblocks; /* NB: using 64bit number here */
- size <<= block_bits; /* convert fsb's to byte range */
-
- XFS_BUF_DATAIO(pb);
- XFS_BUF_ASYNC(pb);
- XFS_BUF_SET_SIZE(pb, size);
- XFS_BUF_SET_COUNT(pb, size);
- XFS_BUF_SET_OFFSET(pb, offset);
- XFS_BUF_SET_FSPRIVATE(pb, LINVFS_GET_VP(inode));
- XFS_BUF_SET_IODONE_FUNC(pb, linvfs_unwritten_convert);
-
- if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
- pagebuf_iodone(pb, 1, 1);
- }
-
+ ioend->io_size = (xfs_off_t)nblocks << block_bits;
+ ioend->io_offset = offset;
+ xfs_finish_ioend(ioend);
return 0;
}
@@ -787,7 +804,7 @@ xfs_page_state_convert(
continue;
if (!iomp) {
err = xfs_map_blocks(inode, offset, len, &iomap,
- BMAPI_READ|BMAPI_IGNSTATE);
+ BMAPI_WRITE|BMAPI_IGNSTATE);
if (err) {
goto error;
}
@@ -1028,6 +1045,44 @@ linvfs_get_blocks_direct(
create, 1, BMAPI_WRITE|BMAPI_DIRECT);
}
+STATIC void
+linvfs_end_io_direct(
+ struct kiocb *iocb,
+ loff_t offset,
+ ssize_t size,
+ void *private)
+{
+ xfs_ioend_t *ioend = iocb->private;
+
+ /*
+ * Non-NULL private data means we need to issue a transaction to
+ * convert a range from unwritten to written extents. This needs
+ * to happen from process contect but aio+dio I/O completion
+ * happens from irq context so we need to defer it to a workqueue.
+ * This is not nessecary for synchronous direct I/O, but we do
+ * it anyway to keep the code uniform and simpler.
+ *
+ * The core direct I/O code might be changed to always call the
+ * completion handler in the future, in which case all this can
+ * go away.
+ */
+ if (private && size > 0) {
+ ioend->io_offset = offset;
+ ioend->io_size = size;
+ xfs_finish_ioend(ioend);
+ } else {
+ ASSERT(size >= 0);
+ xfs_destroy_ioend(ioend);
+ }
+
+ /*
+ * blockdev_direct_IO can return an error even afer the I/O
+ * completion handler was called. Thus we need to protect
+ * against double-freeing.
+ */
+ iocb->private = NULL;
+}
+
STATIC ssize_t
linvfs_direct_IO(
int rw,
@@ -1042,16 +1097,23 @@ linvfs_direct_IO(
xfs_iomap_t iomap;
int maps = 1;
int error;
+ ssize_t ret;
VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error);
if (error)
return -error;
- return blockdev_direct_IO_own_locking(rw, iocb, inode,
+ iocb->private = xfs_alloc_ioend(inode);
+
+ ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
iomap.iomap_target->pbr_bdev,
iov, offset, nr_segs,
linvfs_get_blocks_direct,
- linvfs_unwritten_convert_direct);
+ linvfs_end_io_direct);
+
+ if (unlikely(ret <= 0 && iocb->private))
+ xfs_destroy_ioend(iocb->private);
+ return ret;
}
@@ -1202,6 +1264,16 @@ out_unlock:
return error;
}
+STATIC int
+linvfs_invalidate_page(
+ struct page *page,
+ unsigned long offset)
+{
+ xfs_page_trace(XFS_INVALIDPAGE_ENTER,
+ page->mapping->host, page, offset);
+ return block_invalidatepage(page, offset);
+}
+
/*
* Called to move a page into cleanable state - and from there
* to be released. Possibly the page is already clean. We always
@@ -1279,6 +1351,7 @@ struct address_space_operations linvfs_aops = {
.writepage = linvfs_writepage,
.sync_page = block_sync_page,
.releasepage = linvfs_release_page,
+ .invalidatepage = linvfs_invalidate_page,
.prepare_write = linvfs_prepare_write,
.commit_write = generic_commit_write,
.bmap = linvfs_bmap,
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h
new file mode 100644
index 0000000..2fa6297
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_aops.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_AOPS_H__
+#define __XFS_AOPS_H__
+
+extern struct workqueue_struct *xfsdatad_workqueue;
+extern mempool_t *xfs_ioend_pool;
+
+typedef void (*xfs_ioend_func_t)(void *);
+
+typedef struct xfs_ioend {
+ unsigned int io_uptodate; /* I/O status register */
+ atomic_t io_remaining; /* hold count */
+ struct vnode *io_vnode; /* file being written to */
+ struct buffer_head *io_buffer_head;/* buffer linked list head */
+ size_t io_size; /* size of the extent */
+ xfs_off_t io_offset; /* offset in the file */
+ struct work_struct io_work; /* xfsdatad work queue */
+} xfs_ioend_t;
+
+#endif /* __XFS_IOPS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index df0cba2..655bf4a 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -54,6 +54,7 @@
#include <linux/percpu.h>
#include <linux/blkdev.h>
#include <linux/hash.h>
+#include <linux/kthread.h>
#include "xfs_linux.h"
@@ -67,7 +68,7 @@ STATIC int xfsbufd_wakeup(int, unsigned int);
STATIC void pagebuf_delwri_queue(xfs_buf_t *, int);
STATIC struct workqueue_struct *xfslogd_workqueue;
-STATIC struct workqueue_struct *xfsdatad_workqueue;
+struct workqueue_struct *xfsdatad_workqueue;
/*
* Pagebuf debugging
@@ -590,8 +591,10 @@ found:
PB_SET_OWNER(pb);
}
- if (pb->pb_flags & PBF_STALE)
+ if (pb->pb_flags & PBF_STALE) {
+ ASSERT((pb->pb_flags & _PBF_DELWRI_Q) == 0);
pb->pb_flags &= PBF_MAPPED;
+ }
PB_TRACE(pb, "got_lock", 0);
XFS_STATS_INC(pb_get_locked);
return (pb);
@@ -700,25 +703,6 @@ xfs_buf_read_flags(
}
/*
- * Create a skeletal pagebuf (no pages associated with it).
- */
-xfs_buf_t *
-pagebuf_lookup(
- xfs_buftarg_t *target,
- loff_t ioff,
- size_t isize,
- page_buf_flags_t flags)
-{
- xfs_buf_t *pb;
-
- pb = pagebuf_allocate(flags);
- if (pb) {
- _pagebuf_initialize(pb, target, ioff, isize, flags);
- }
- return pb;
-}
-
-/*
* If we are not low on memory then do the readahead in a deadlock
* safe manner.
*/
@@ -913,22 +897,23 @@ pagebuf_rele(
do_free = 0;
}
- if (pb->pb_flags & PBF_DELWRI) {
- pb->pb_flags |= PBF_ASYNC;
- atomic_inc(&pb->pb_hold);
- pagebuf_delwri_queue(pb, 0);
- do_free = 0;
- } else if (pb->pb_flags & PBF_FS_MANAGED) {
+ if (pb->pb_flags & PBF_FS_MANAGED) {
do_free = 0;
}
if (do_free) {
+ ASSERT((pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)) == 0);
list_del_init(&pb->pb_hash_list);
spin_unlock(&hash->bh_lock);
pagebuf_free(pb);
} else {
spin_unlock(&hash->bh_lock);
}
+ } else {
+ /*
+ * Catch reference count leaks
+ */
+ ASSERT(atomic_read(&pb->pb_hold) >= 0);
}
}
@@ -1006,13 +991,24 @@ pagebuf_lock(
* pagebuf_unlock
*
* pagebuf_unlock releases the lock on the buffer object created by
- * pagebuf_lock or pagebuf_cond_lock (not any
- * pinning of underlying pages created by pagebuf_pin).
+ * pagebuf_lock or pagebuf_cond_lock (not any pinning of underlying pages
+ * created by pagebuf_pin).
+ *
+ * If the buffer is marked delwri but is not queued, do so before we
+ * unlock the buffer as we need to set flags correctly. We also need to
+ * take a reference for the delwri queue because the unlocker is going to
+ * drop their's and they don't know we just queued it.
*/
void
pagebuf_unlock( /* unlock buffer */
xfs_buf_t *pb) /* buffer to unlock */
{
+ if ((pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)) == PBF_DELWRI) {
+ atomic_inc(&pb->pb_hold);
+ pb->pb_flags |= PBF_ASYNC;
+ pagebuf_delwri_queue(pb, 0);
+ }
+
PB_CLEAR_OWNER(pb);
up(&pb->pb_sema);
PB_TRACE(pb, "unlock", 0);
@@ -1249,8 +1245,8 @@ bio_end_io_pagebuf(
int error)
{
xfs_buf_t *pb = (xfs_buf_t *)bio->bi_private;
- unsigned int i, blocksize = pb->pb_target->pbr_bsize;
- struct bio_vec *bvec = bio->bi_io_vec;
+ unsigned int blocksize = pb->pb_target->pbr_bsize;
+ struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
if (bio->bi_size)
return 1;
@@ -1258,10 +1254,12 @@ bio_end_io_pagebuf(
if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
pb->pb_error = EIO;
- for (i = 0; i < bio->bi_vcnt; i++, bvec++) {
+ do {
struct page *page = bvec->bv_page;
- if (pb->pb_error) {
+ if (unlikely(pb->pb_error)) {
+ if (pb->pb_flags & PBF_READ)
+ ClearPageUptodate(page);
SetPageError(page);
} else if (blocksize == PAGE_CACHE_SIZE) {
SetPageUptodate(page);
@@ -1270,10 +1268,13 @@ bio_end_io_pagebuf(
set_page_region(page, bvec->bv_offset, bvec->bv_len);
}
+ if (--bvec >= bio->bi_io_vec)
+ prefetchw(&bvec->bv_page->flags);
+
if (_pagebuf_iolocked(pb)) {
unlock_page(page);
}
- }
+ } while (bvec >= bio->bi_io_vec);
_pagebuf_iodone(pb, 1);
bio_put(bio);
@@ -1511,6 +1512,11 @@ again:
ASSERT(btp == bp->pb_target);
if (!(bp->pb_flags & PBF_FS_MANAGED)) {
spin_unlock(&hash->bh_lock);
+ /*
+ * Catch superblock reference count leaks
+ * immediately
+ */
+ BUG_ON(bp->pb_bn == 0);
delay(100);
goto again;
}
@@ -1686,17 +1692,20 @@ pagebuf_delwri_queue(
int unlock)
{
PB_TRACE(pb, "delwri_q", (long)unlock);
- ASSERT(pb->pb_flags & PBF_DELWRI);
+ ASSERT((pb->pb_flags & (PBF_DELWRI|PBF_ASYNC)) ==
+ (PBF_DELWRI|PBF_ASYNC));
spin_lock(&pbd_delwrite_lock);
/* If already in the queue, dequeue and place at tail */
if (!list_empty(&pb->pb_list)) {
+ ASSERT(pb->pb_flags & _PBF_DELWRI_Q);
if (unlock) {
atomic_dec(&pb->pb_hold);
}
list_del(&pb->pb_list);
}
+ pb->pb_flags |= _PBF_DELWRI_Q;
list_add_tail(&pb->pb_list, &pbd_delwrite_queue);
pb->pb_queuetime = jiffies;
spin_unlock(&pbd_delwrite_lock);
@@ -1713,10 +1722,11 @@ pagebuf_delwri_dequeue(
spin_lock(&pbd_delwrite_lock);
if ((pb->pb_flags & PBF_DELWRI) && !list_empty(&pb->pb_list)) {
+ ASSERT(pb->pb_flags & _PBF_DELWRI_Q);
list_del_init(&pb->pb_list);
dequeued = 1;
}
- pb->pb_flags &= ~PBF_DELWRI;
+ pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
spin_unlock(&pbd_delwrite_lock);
if (dequeued)
@@ -1733,9 +1743,7 @@ pagebuf_runall_queues(
}
/* Defines for pagebuf daemon */
-STATIC DECLARE_COMPLETION(xfsbufd_done);
STATIC struct task_struct *xfsbufd_task;
-STATIC int xfsbufd_active;
STATIC int xfsbufd_force_flush;
STATIC int xfsbufd_force_sleep;
@@ -1761,14 +1769,8 @@ xfsbufd(
xfs_buftarg_t *target;
xfs_buf_t *pb, *n;
- /* Set up the thread */
- daemonize("xfsbufd");
current->flags |= PF_MEMALLOC;
- xfsbufd_task = current;
- xfsbufd_active = 1;
- barrier();
-
INIT_LIST_HEAD(&tmp);
do {
if (unlikely(freezing(current))) {
@@ -1795,7 +1797,7 @@ xfsbufd(
break;
}
- pb->pb_flags &= ~PBF_DELWRI;
+ pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
pb->pb_flags |= PBF_WRITE;
list_move(&pb->pb_list, &tmp);
}
@@ -1816,9 +1818,9 @@ xfsbufd(
purge_addresses();
xfsbufd_force_flush = 0;
- } while (xfsbufd_active);
+ } while (!kthread_should_stop());
- complete_and_exit(&xfsbufd_done, 0);
+ return 0;
}
/*
@@ -1845,15 +1847,13 @@ xfs_flush_buftarg(
if (pb->pb_target != target)
continue;
- ASSERT(pb->pb_flags & PBF_DELWRI);
+ ASSERT(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q));
PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb));
if (pagebuf_ispin(pb)) {
pincount++;
continue;
}
- pb->pb_flags &= ~PBF_DELWRI;
- pb->pb_flags |= PBF_WRITE;
list_move(&pb->pb_list, &tmp);
}
spin_unlock(&pbd_delwrite_lock);
@@ -1862,12 +1862,14 @@ xfs_flush_buftarg(
* Dropped the delayed write list lock, now walk the temporary list
*/
list_for_each_entry_safe(pb, n, &tmp, pb_list) {
+ pagebuf_lock(pb);
+ pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
+ pb->pb_flags |= PBF_WRITE;
if (wait)
pb->pb_flags &= ~PBF_ASYNC;
else
list_del_init(&pb->pb_list);
- pagebuf_lock(pb);
pagebuf_iostrategy(pb);
}
@@ -1901,9 +1903,11 @@ xfs_buf_daemons_start(void)
if (!xfsdatad_workqueue)
goto out_destroy_xfslogd_workqueue;
- error = kernel_thread(xfsbufd, NULL, CLONE_FS|CLONE_FILES);
- if (error < 0)
+ xfsbufd_task = kthread_run(xfsbufd, NULL, "xfsbufd");
+ if (IS_ERR(xfsbufd_task)) {
+ error = PTR_ERR(xfsbufd_task);
goto out_destroy_xfsdatad_workqueue;
+ }
return 0;
out_destroy_xfsdatad_workqueue:
@@ -1920,10 +1924,7 @@ xfs_buf_daemons_start(void)
STATIC void
xfs_buf_daemons_stop(void)
{
- xfsbufd_active = 0;
- barrier();
- wait_for_completion(&xfsbufd_done);
-
+ kthread_stop(xfsbufd_task);
destroy_workqueue(xfslogd_workqueue);
destroy_workqueue(xfsdatad_workqueue);
}
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 3f8f69a..67c19f7 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -89,6 +89,7 @@ typedef enum page_buf_flags_e { /* pb_flags values */
_PBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */
_PBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc() */
_PBF_RUN_QUEUES = (1 << 19),/* run block device task queue */
+ _PBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */
} page_buf_flags_t;
#define PBF_UPDATE (PBF_READ | PBF_WRITE)
@@ -206,13 +207,6 @@ extern xfs_buf_t *xfs_buf_read_flags( /* allocate and read a buffer */
#define xfs_buf_read(target, blkno, len, flags) \
xfs_buf_read_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED)
-extern xfs_buf_t *pagebuf_lookup(
- xfs_buftarg_t *,
- loff_t, /* starting offset of range */
- size_t, /* length of range */
- page_buf_flags_t); /* PBF_READ, PBF_WRITE, */
- /* PBF_FORCEIO, */
-
extern xfs_buf_t *pagebuf_get_empty( /* allocate pagebuf struct with */
/* no memory or disk address */
size_t len,
@@ -344,8 +338,6 @@ extern void pagebuf_trace(
-
-
/* These are just for xfs_syncsub... it sets an internal variable
* then passes it to VOP_FLUSH_PAGES or adds the flags to a newly gotten buf_t
*/
@@ -452,7 +444,7 @@ extern void pagebuf_trace(
#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->pb_addr)
-extern inline xfs_caddr_t xfs_buf_offset(xfs_buf_t *bp, size_t offset)
+static inline xfs_caddr_t xfs_buf_offset(xfs_buf_t *bp, size_t offset)
{
if (bp->pb_flags & PBF_MAPPED)
return XFS_BUF_PTR(bp) + offset;
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index f1ce432..3881622 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -311,6 +311,31 @@ linvfs_fsync(
#define nextdp(dp) ((struct xfs_dirent *)((char *)(dp) + (dp)->d_reclen))
+#ifdef CONFIG_XFS_DMAPI
+
+STATIC struct page *
+linvfs_filemap_nopage(
+ struct vm_area_struct *area,
+ unsigned long address,
+ int *type)
+{
+ struct inode *inode = area->vm_file->f_dentry->d_inode;
+ vnode_t *vp = LINVFS_GET_VP(inode);
+ xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
+ int error;
+
+ ASSERT_ALWAYS(vp->v_vfsp->vfs_flag & VFS_DMI);
+
+ error = XFS_SEND_MMAP(mp, area, 0);
+ if (error)
+ return NULL;
+
+ return filemap_nopage(area, address, type);
+}
+
+#endif /* CONFIG_XFS_DMAPI */
+
+
STATIC int
linvfs_readdir(
struct file *filp,
@@ -390,14 +415,6 @@ done:
return -error;
}
-#ifdef CONFIG_XFS_DMAPI
-STATIC void
-linvfs_mmap_close(
- struct vm_area_struct *vma)
-{
- xfs_dm_mm_put(vma);
-}
-#endif /* CONFIG_XFS_DMAPI */
STATIC int
linvfs_file_mmap(
@@ -411,16 +428,11 @@ linvfs_file_mmap(
vma->vm_ops = &linvfs_file_vm_ops;
- if (vp->v_vfsp->vfs_flag & VFS_DMI) {
- xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
-
- error = -XFS_SEND_MMAP(mp, vma, 0);
- if (error)
- return error;
#ifdef CONFIG_XFS_DMAPI
+ if (vp->v_vfsp->vfs_flag & VFS_DMI) {
vma->vm_ops = &linvfs_dmapi_file_vm_ops;
-#endif
}
+#endif /* CONFIG_XFS_DMAPI */
VOP_SETATTR(vp, &va, XFS_AT_UPDATIME, NULL, error);
if (!error)
@@ -474,6 +486,7 @@ linvfs_ioctl_invis(
return error;
}
+#ifdef CONFIG_XFS_DMAPI
#ifdef HAVE_VMOP_MPROTECT
STATIC int
linvfs_mprotect(
@@ -494,6 +507,7 @@ linvfs_mprotect(
return error;
}
#endif /* HAVE_VMOP_MPROTECT */
+#endif /* CONFIG_XFS_DMAPI */
#ifdef HAVE_FOP_OPEN_EXEC
/* If the user is attempting to execute a file that is offline then
@@ -528,49 +542,10 @@ open_exec_out:
}
#endif /* HAVE_FOP_OPEN_EXEC */
-/*
- * Temporary workaround to the AIO direct IO write problem.
- * This code can go and we can revert to do_sync_write once
- * the writepage(s) rework is merged.
- */
-STATIC ssize_t
-linvfs_write(
- struct file *filp,
- const char __user *buf,
- size_t len,
- loff_t *ppos)
-{
- struct kiocb kiocb;
- ssize_t ret;
-
- init_sync_kiocb(&kiocb, filp);
- kiocb.ki_pos = *ppos;
- ret = __linvfs_write(&kiocb, buf, 0, len, kiocb.ki_pos);
- *ppos = kiocb.ki_pos;
- return ret;
-}
-STATIC ssize_t
-linvfs_write_invis(
- struct file *filp,
- const char __user *buf,
- size_t len,
- loff_t *ppos)
-{
- struct kiocb kiocb;
- ssize_t ret;
-
- init_sync_kiocb(&kiocb, filp);
- kiocb.ki_pos = *ppos;
- ret = __linvfs_write(&kiocb, buf, IO_INVIS, len, kiocb.ki_pos);
- *ppos = kiocb.ki_pos;
- return ret;
-}
-
-
struct file_operations linvfs_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
- .write = linvfs_write,
+ .write = do_sync_write,
.readv = linvfs_readv,
.writev = linvfs_writev,
.aio_read = linvfs_aio_read,
@@ -592,7 +567,7 @@ struct file_operations linvfs_file_operations = {
struct file_operations linvfs_invis_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
- .write = linvfs_write_invis,
+ .write = do_sync_write,
.readv = linvfs_readv_invis,
.writev = linvfs_writev_invis,
.aio_read = linvfs_aio_read_invis,
@@ -626,8 +601,7 @@ static struct vm_operations_struct linvfs_file_vm_ops = {
#ifdef CONFIG_XFS_DMAPI
static struct vm_operations_struct linvfs_dmapi_file_vm_ops = {
- .close = linvfs_mmap_close,
- .nopage = filemap_nopage,
+ .nopage = linvfs_filemap_nopage,
.populate = filemap_populate,
#ifdef HAVE_VMOP_MPROTECT
.mprotect = linvfs_mprotect,
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 05a447e..6a3326b 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -141,13 +141,19 @@ xfs_find_handle(
return -XFS_ERROR(EINVAL);
}
- /* we need the vnode */
- vp = LINVFS_GET_VP(inode);
- if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFREG:
+ case S_IFDIR:
+ case S_IFLNK:
+ break;
+ default:
iput(inode);
return -XFS_ERROR(EBADF);
}
+ /* we need the vnode */
+ vp = LINVFS_GET_VP(inode);
+
/* now we can grab the fsid */
memcpy(&handle.ha_fsid, vp->v_vfsp->vfs_altfsid, sizeof(xfs_fsid_t));
hsize = sizeof(xfs_fsid_t);
@@ -386,7 +392,7 @@ xfs_readlink_by_handle(
return -error;
/* Restrict this handle operation to symlinks only. */
- if (vp->v_type != VLNK) {
+ if (!S_ISLNK(inode->i_mode)) {
VN_RELE(vp);
return -XFS_ERROR(EINVAL);
}
@@ -982,10 +988,10 @@ xfs_ioc_space(
if (vp->v_inode.i_flags & (S_IMMUTABLE|S_APPEND))
return -XFS_ERROR(EPERM);
- if (!(filp->f_flags & FMODE_WRITE))
+ if (!(filp->f_mode & FMODE_WRITE))
return -XFS_ERROR(EBADF);
- if (vp->v_type != VREG)
+ if (!VN_ISREG(vp))
return -XFS_ERROR(EINVAL);
if (copy_from_user(&bf, arg, sizeof(bf)))
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index 0f8f138..4636b7f 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -47,8 +47,52 @@
#include "xfs_vnode.h"
#include "xfs_dfrag.h"
+#define _NATIVE_IOC(cmd, type) \
+ _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type))
+
#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
#define BROKEN_X86_ALIGNMENT
+/* on ia32 l_start is on a 32-bit boundary */
+typedef struct xfs_flock64_32 {
+ __s16 l_type;
+ __s16 l_whence;
+ __s64 l_start __attribute__((packed));
+ /* len == 0 means until end of file */
+ __s64 l_len __attribute__((packed));
+ __s32 l_sysid;
+ __u32 l_pid;
+ __s32 l_pad[4]; /* reserve area */
+} xfs_flock64_32_t;
+
+#define XFS_IOC_ALLOCSP_32 _IOW ('X', 10, struct xfs_flock64_32)
+#define XFS_IOC_FREESP_32 _IOW ('X', 11, struct xfs_flock64_32)
+#define XFS_IOC_ALLOCSP64_32 _IOW ('X', 36, struct xfs_flock64_32)
+#define XFS_IOC_FREESP64_32 _IOW ('X', 37, struct xfs_flock64_32)
+#define XFS_IOC_RESVSP_32 _IOW ('X', 40, struct xfs_flock64_32)
+#define XFS_IOC_UNRESVSP_32 _IOW ('X', 41, struct xfs_flock64_32)
+#define XFS_IOC_RESVSP64_32 _IOW ('X', 42, struct xfs_flock64_32)
+#define XFS_IOC_UNRESVSP64_32 _IOW ('X', 43, struct xfs_flock64_32)
+
+/* just account for different alignment */
+STATIC unsigned long
+xfs_ioctl32_flock(
+ unsigned long arg)
+{
+ xfs_flock64_32_t __user *p32 = (void __user *)arg;
+ xfs_flock64_t __user *p = compat_alloc_user_space(sizeof(*p));
+
+ if (copy_in_user(&p->l_type, &p32->l_type, sizeof(s16)) ||
+ copy_in_user(&p->l_whence, &p32->l_whence, sizeof(s16)) ||
+ copy_in_user(&p->l_start, &p32->l_start, sizeof(s64)) ||
+ copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
+ copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
+ copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
+ copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
+ return -EFAULT;
+
+ return (unsigned long)p;
+}
+
#else
typedef struct xfs_fsop_bulkreq32 {
@@ -103,7 +147,6 @@ __linvfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg)
/* not handled
case XFS_IOC_FD_TO_HANDLE:
case XFS_IOC_PATH_TO_HANDLE:
- case XFS_IOC_PATH_TO_HANDLE:
case XFS_IOC_PATH_TO_FSHANDLE:
case XFS_IOC_OPEN_BY_HANDLE:
case XFS_IOC_FSSETDM_BY_HANDLE:
@@ -124,8 +167,21 @@ __linvfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg)
case XFS_IOC_ERROR_CLEARALL:
break;
-#ifndef BROKEN_X86_ALIGNMENT
- /* xfs_flock_t and xfs_bstat_t have wrong u32 vs u64 alignment */
+#ifdef BROKEN_X86_ALIGNMENT
+ /* xfs_flock_t has wrong u32 vs u64 alignment */
+ case XFS_IOC_ALLOCSP_32:
+ case XFS_IOC_FREESP_32:
+ case XFS_IOC_ALLOCSP64_32:
+ case XFS_IOC_FREESP64_32:
+ case XFS_IOC_RESVSP_32:
+ case XFS_IOC_UNRESVSP_32:
+ case XFS_IOC_RESVSP64_32:
+ case XFS_IOC_UNRESVSP64_32:
+ arg = xfs_ioctl32_flock(arg);
+ cmd = _NATIVE_IOC(cmd, struct xfs_flock64);
+ break;
+
+#else /* These are handled fine if no alignment issues */
case XFS_IOC_ALLOCSP:
case XFS_IOC_FREESP:
case XFS_IOC_RESVSP:
@@ -134,6 +190,9 @@ __linvfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg)
case XFS_IOC_FREESP64:
case XFS_IOC_RESVSP64:
case XFS_IOC_UNRESVSP64:
+ break;
+
+ /* xfs_bstat_t still has wrong u32 vs u64 alignment */
case XFS_IOC_SWAPEXT:
break;
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index f252605..77708a8 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -140,7 +140,6 @@ linvfs_mknod(
memset(&va, 0, sizeof(va));
va.va_mask = XFS_AT_TYPE|XFS_AT_MODE;
- va.va_type = IFTOVT(mode);
va.va_mode = mode;
switch (mode & S_IFMT) {
@@ -308,14 +307,13 @@ linvfs_symlink(
cvp = NULL;
memset(&va, 0, sizeof(va));
- va.va_type = VLNK;
- va.va_mode = irix_symlink_mode ? 0777 & ~current->fs->umask : S_IRWXUGO;
+ va.va_mode = S_IFLNK |
+ (irix_symlink_mode ? 0777 & ~current->fs->umask : S_IRWXUGO);
va.va_mask = XFS_AT_TYPE|XFS_AT_MODE;
error = 0;
VOP_SYMLINK(dvp, dentry, &va, (char *)symname, &cvp, NULL, error);
if (!error && cvp) {
- ASSERT(cvp->v_type == VLNK);
ip = LINVFS_GET_IP(cvp);
d_instantiate(dentry, ip);
validate_fields(dir);
@@ -425,9 +423,14 @@ linvfs_follow_link(
return NULL;
}
-static void linvfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+STATIC void
+linvfs_put_link(
+ struct dentry *dentry,
+ struct nameidata *nd,
+ void *p)
{
- char *s = nd_get_link(nd);
+ char *s = nd_get_link(nd);
+
if (!IS_ERR(s))
kfree(s);
}
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 42dc5e4..68c5d88 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -64,7 +64,6 @@
#include <sema.h>
#include <time.h>
-#include <support/qsort.h>
#include <support/ktrace.h>
#include <support/debug.h>
#include <support/move.h>
@@ -104,6 +103,7 @@
#include <xfs_stats.h>
#include <xfs_sysctl.h>
#include <xfs_iops.h>
+#include <xfs_aops.h>
#include <xfs_super.h>
#include <xfs_globals.h>
#include <xfs_fs_subr.h>
@@ -254,11 +254,18 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh)
#define MAX(a,b) (max(a,b))
#define howmany(x, y) (((x)+((y)-1))/(y))
#define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
+#define qsort(a,n,s,fn) sort(a,n,s,fn,NULL)
+/*
+ * Various platform dependent calls that don't fit anywhere else
+ */
#define xfs_stack_trace() dump_stack()
-
#define xfs_itruncate_data(ip, off) \
(-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off)))
+#define xfs_statvfs_fsid(statp, mp) \
+ ({ u64 id = huge_encode_dev((mp)->m_dev); \
+ __kernel_fsid_t *fsid = &(statp)->f_fsid; \
+ (fsid->val[0] = (u32)id, fsid->val[1] = (u32)(id >> 32)); })
/* Move the kernel do_div definition off to one side */
@@ -371,6 +378,4 @@ static inline __uint64_t roundup_64(__uint64_t x, __uint32_t y)
return(x * y);
}
-#define qsort(a, n, s, cmp) sort(a, n, s, cmp, NULL)
-
#endif /* __XFS_LINUX__ */
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index acab58c..3b5fabe 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -660,9 +660,6 @@ xfs_write(
(xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
mp->m_rtdev_targp : mp->m_ddev_targp;
- if (ioflags & IO_ISAIO)
- return XFS_ERROR(-ENOSYS);
-
if ((pos & target->pbr_smask) || (count & target->pbr_smask))
return XFS_ERROR(-EINVAL);
diff --git a/fs/xfs/linux-2.6/xfs_lrw.h b/fs/xfs/linux-2.6/xfs_lrw.h
index f197a72..6294dcd 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.h
+++ b/fs/xfs/linux-2.6/xfs_lrw.h
@@ -70,9 +70,10 @@ struct xfs_iomap;
#define XFS_SENDFILE_ENTER 21
#define XFS_WRITEPAGE_ENTER 22
#define XFS_RELEASEPAGE_ENTER 23
-#define XFS_IOMAP_ALLOC_ENTER 24
-#define XFS_IOMAP_ALLOC_MAP 25
-#define XFS_IOMAP_UNWRITTEN 26
+#define XFS_INVALIDPAGE_ENTER 24
+#define XFS_IOMAP_ALLOC_ENTER 25
+#define XFS_IOMAP_ALLOC_MAP 26
+#define XFS_IOMAP_UNWRITTEN 27
extern void xfs_rw_enter_trace(int, struct xfs_iocore *,
void *, size_t, loff_t, int);
extern void xfs_inval_cached_trace(struct xfs_iocore *,
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index f6dd7de..0da87bf 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -70,11 +70,15 @@
#include <linux/namei.h>
#include <linux/init.h>
#include <linux/mount.h>
+#include <linux/mempool.h>
#include <linux/writeback.h>
+#include <linux/kthread.h>
STATIC struct quotactl_ops linvfs_qops;
STATIC struct super_operations linvfs_sops;
-STATIC kmem_zone_t *linvfs_inode_zone;
+STATIC kmem_zone_t *xfs_vnode_zone;
+STATIC kmem_zone_t *xfs_ioend_zone;
+mempool_t *xfs_ioend_pool;
STATIC struct xfs_mount_args *
xfs_args_allocate(
@@ -138,24 +142,25 @@ STATIC __inline__ void
xfs_set_inodeops(
struct inode *inode)
{
- vnode_t *vp = LINVFS_GET_VP(inode);
-
- if (vp->v_type == VNON) {
- vn_mark_bad(vp);
- } else if (S_ISREG(inode->i_mode)) {
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFREG:
inode->i_op = &linvfs_file_inode_operations;
inode->i_fop = &linvfs_file_operations;
inode->i_mapping->a_ops = &linvfs_aops;
- } else if (S_ISDIR(inode->i_mode)) {
+ break;
+ case S_IFDIR:
inode->i_op = &linvfs_dir_inode_operations;
inode->i_fop = &linvfs_dir_operations;
- } else if (S_ISLNK(inode->i_mode)) {
+ break;
+ case S_IFLNK:
inode->i_op = &linvfs_symlink_inode_operations;
if (inode->i_blocks)
inode->i_mapping->a_ops = &linvfs_aops;
- } else {
+ break;
+ default:
inode->i_op = &linvfs_file_inode_operations;
init_special_inode(inode, inode->i_mode, inode->i_rdev);
+ break;
}
}
@@ -167,16 +172,23 @@ xfs_revalidate_inode(
{
struct inode *inode = LINVFS_GET_IP(vp);
- inode->i_mode = (ip->i_d.di_mode & MODEMASK) | VTTOIF(vp->v_type);
+ inode->i_mode = ip->i_d.di_mode;
inode->i_nlink = ip->i_d.di_nlink;
inode->i_uid = ip->i_d.di_uid;
inode->i_gid = ip->i_d.di_gid;
- if (((1 << vp->v_type) & ((1<<VBLK) | (1<<VCHR))) == 0) {
+
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFBLK:
+ case S_IFCHR:
+ inode->i_rdev =
+ MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
+ sysv_minor(ip->i_df.if_u2.if_rdev));
+ break;
+ default:
inode->i_rdev = 0;
- } else {
- xfs_dev_t dev = ip->i_df.if_u2.if_rdev;
- inode->i_rdev = MKDEV(sysv_major(dev) & 0x1ff, sysv_minor(dev));
+ break;
}
+
inode->i_blksize = PAGE_CACHE_SIZE;
inode->i_generation = ip->i_d.di_gen;
i_size_write(inode, ip->i_d.di_size);
@@ -231,7 +243,6 @@ xfs_initialize_vnode(
* finish our work.
*/
if (ip->i_d.di_mode != 0 && unlock && (inode->i_state & I_NEW)) {
- vp->v_type = IFTOVT(ip->i_d.di_mode);
xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip);
xfs_set_inodeops(inode);
@@ -274,8 +285,7 @@ linvfs_alloc_inode(
{
vnode_t *vp;
- vp = (vnode_t *)kmem_cache_alloc(linvfs_inode_zone,
- kmem_flags_convert(KM_SLEEP));
+ vp = kmem_cache_alloc(xfs_vnode_zone, kmem_flags_convert(KM_SLEEP));
if (!vp)
return NULL;
return LINVFS_GET_IP(vp);
@@ -285,11 +295,11 @@ STATIC void
linvfs_destroy_inode(
struct inode *inode)
{
- kmem_cache_free(linvfs_inode_zone, LINVFS_GET_VP(inode));
+ kmem_zone_free(xfs_vnode_zone, LINVFS_GET_VP(inode));
}
STATIC void
-init_once(
+linvfs_inode_init_once(
void *data,
kmem_cache_t *cachep,
unsigned long flags)
@@ -302,21 +312,41 @@ init_once(
}
STATIC int
-init_inodecache( void )
+linvfs_init_zones(void)
{
- linvfs_inode_zone = kmem_cache_create("linvfs_icache",
+ xfs_vnode_zone = kmem_cache_create("xfs_vnode",
sizeof(vnode_t), 0, SLAB_RECLAIM_ACCOUNT,
- init_once, NULL);
- if (linvfs_inode_zone == NULL)
- return -ENOMEM;
+ linvfs_inode_init_once, NULL);
+ if (!xfs_vnode_zone)
+ goto out;
+
+ xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
+ if (!xfs_ioend_zone)
+ goto out_destroy_vnode_zone;
+
+ xfs_ioend_pool = mempool_create(4 * MAX_BUF_PER_PAGE,
+ mempool_alloc_slab, mempool_free_slab,
+ xfs_ioend_zone);
+ if (!xfs_ioend_pool)
+ goto out_free_ioend_zone;
+
return 0;
+
+
+ out_free_ioend_zone:
+ kmem_zone_destroy(xfs_ioend_zone);
+ out_destroy_vnode_zone:
+ kmem_zone_destroy(xfs_vnode_zone);
+ out:
+ return -ENOMEM;
}
STATIC void
-destroy_inodecache( void )
+linvfs_destroy_zones(void)
{
- if (kmem_cache_destroy(linvfs_inode_zone))
- printk(KERN_WARNING "%s: cache still in use!\n", __FUNCTION__);
+ mempool_destroy(xfs_ioend_pool);
+ kmem_zone_destroy(xfs_vnode_zone);
+ kmem_zone_destroy(xfs_ioend_zone);
}
/*
@@ -354,17 +384,38 @@ linvfs_clear_inode(
struct inode *inode)
{
vnode_t *vp = LINVFS_GET_VP(inode);
+ int error, cache;
- if (vp) {
- vn_rele(vp);
- vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
- /*
- * Do all our cleanup, and remove this vnode.
- */
- vn_remove(vp);
+ vn_trace_entry(vp, "clear_inode", (inst_t *)__return_address);
+
+ XFS_STATS_INC(vn_rele);
+ XFS_STATS_INC(vn_remove);
+ XFS_STATS_INC(vn_reclaim);
+ XFS_STATS_DEC(vn_active);
+
+ /*
+ * This can happen because xfs_iget_core calls xfs_idestroy if we
+ * find an inode with di_mode == 0 but without IGET_CREATE set.
+ */
+ if (vp->v_fbhv)
+ VOP_INACTIVE(vp, NULL, cache);
+
+ VN_LOCK(vp);
+ vp->v_flag &= ~VMODIFIED;
+ VN_UNLOCK(vp, 0);
+
+ if (vp->v_fbhv) {
+ VOP_RECLAIM(vp, error);
+ if (error)
+ panic("vn_purge: cannot reclaim");
}
-}
+ ASSERT(vp->v_fbhv == NULL);
+
+#ifdef XFS_VNODE_TRACE
+ ktrace_free(vp->v_trace);
+#endif
+}
/*
* Enqueue a work item to be picked up by the vfs xfssyncd thread.
@@ -466,25 +517,16 @@ xfssyncd(
{
long timeleft;
vfs_t *vfsp = (vfs_t *) arg;
- struct list_head tmp;
struct vfs_sync_work *work, *n;
+ LIST_HEAD (tmp);
- daemonize("xfssyncd");
-
- vfsp->vfs_sync_work.w_vfs = vfsp;
- vfsp->vfs_sync_work.w_syncer = vfs_sync_worker;
- vfsp->vfs_sync_task = current;
- wmb();
- wake_up(&vfsp->vfs_wait_sync_task);
-
- INIT_LIST_HEAD(&tmp);
timeleft = (xfs_syncd_centisecs * HZ) / 100;
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
timeleft = schedule_timeout(timeleft);
/* swsusp */
try_to_freeze();
- if (vfsp->vfs_flag & VFS_UMOUNT)
+ if (kthread_should_stop())
break;
spin_lock(&vfsp->vfs_sync_lock);
@@ -513,10 +555,6 @@ xfssyncd(
}
}
- vfsp->vfs_sync_task = NULL;
- wmb();
- wake_up(&vfsp->vfs_wait_sync_task);
-
return 0;
}
@@ -524,13 +562,11 @@ STATIC int
linvfs_start_syncd(
vfs_t *vfsp)
{
- int pid;
-
- pid = kernel_thread(xfssyncd, (void *) vfsp,
- CLONE_VM | CLONE_FS | CLONE_FILES);
- if (pid < 0)
- return -pid;
- wait_event(vfsp->vfs_wait_sync_task, vfsp->vfs_sync_task);
+ vfsp->vfs_sync_work.w_syncer = vfs_sync_worker;
+ vfsp->vfs_sync_work.w_vfs = vfsp;
+ vfsp->vfs_sync_task = kthread_run(xfssyncd, vfsp, "xfssyncd");
+ if (IS_ERR(vfsp->vfs_sync_task))
+ return -PTR_ERR(vfsp->vfs_sync_task);
return 0;
}
@@ -538,11 +574,7 @@ STATIC void
linvfs_stop_syncd(
vfs_t *vfsp)
{
- vfsp->vfs_flag |= VFS_UMOUNT;
- wmb();
-
- wake_up_process(vfsp->vfs_sync_task);
- wait_event(vfsp->vfs_wait_sync_task, !vfsp->vfs_sync_task);
+ kthread_stop(vfsp->vfs_sync_task);
}
STATIC void
@@ -866,9 +898,9 @@ init_xfs_fs( void )
ktrace_init(64);
- error = init_inodecache();
+ error = linvfs_init_zones();
if (error < 0)
- goto undo_inodecache;
+ goto undo_zones;
error = pagebuf_init();
if (error < 0)
@@ -889,9 +921,9 @@ undo_register:
pagebuf_terminate();
undo_pagebuf:
- destroy_inodecache();
+ linvfs_destroy_zones();
-undo_inodecache:
+undo_zones:
return error;
}
@@ -903,7 +935,7 @@ exit_xfs_fs( void )
unregister_filesystem(&xfs_fs_type);
xfs_cleanup();
pagebuf_terminate();
- destroy_inodecache();
+ linvfs_destroy_zones();
ktrace_uninit();
}
diff --git a/fs/xfs/linux-2.6/xfs_vfs.c b/fs/xfs/linux-2.6/xfs_vfs.c
index 669c616..34cc902 100644
--- a/fs/xfs/linux-2.6/xfs_vfs.c
+++ b/fs/xfs/linux-2.6/xfs_vfs.c
@@ -251,7 +251,6 @@ vfs_allocate( void )
bhv_head_init(VFS_BHVHEAD(vfsp), "vfs");
INIT_LIST_HEAD(&vfsp->vfs_sync_list);
spin_lock_init(&vfsp->vfs_sync_lock);
- init_waitqueue_head(&vfsp->vfs_wait_sync_task);
init_waitqueue_head(&vfsp->vfs_wait_single_sync_task);
return vfsp;
}
diff --git a/fs/xfs/linux-2.6/xfs_vfs.h b/fs/xfs/linux-2.6/xfs_vfs.h
index 7ee1f71..f0ab574f 100644
--- a/fs/xfs/linux-2.6/xfs_vfs.h
+++ b/fs/xfs/linux-2.6/xfs_vfs.h
@@ -65,7 +65,6 @@ typedef struct vfs {
spinlock_t vfs_sync_lock; /* work item list lock */
int vfs_sync_seq; /* sync thread generation no. */
wait_queue_head_t vfs_wait_single_sync_task;
- wait_queue_head_t vfs_wait_sync_task;
} vfs_t;
#define vfs_fbhv vfs_bh.bh_first /* 1st on vfs behavior chain */
@@ -96,7 +95,6 @@ typedef enum {
#define VFS_RDONLY 0x0001 /* read-only vfs */
#define VFS_GRPID 0x0002 /* group-ID assigned from directory */
#define VFS_DMI 0x0004 /* filesystem has the DMI enabled */
-#define VFS_UMOUNT 0x0008 /* unmount in progress */
#define VFS_END 0x0008 /* max flag */
#define SYNC_ATTR 0x0001 /* sync attributes */
diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c
index 250cad5..268f45b 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.c
+++ b/fs/xfs/linux-2.6/xfs_vnode.c
@@ -42,93 +42,33 @@ DEFINE_SPINLOCK(vnumber_lock);
*/
#define NVSYNC 37
#define vptosync(v) (&vsync[((unsigned long)v) % NVSYNC])
-sv_t vsync[NVSYNC];
-
-/*
- * Translate stat(2) file types to vnode types and vice versa.
- * Aware of numeric order of S_IFMT and vnode type values.
- */
-enum vtype iftovt_tab[] = {
- VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
- VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON
-};
-
-u_short vttoif_tab[] = {
- 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, S_IFIFO, 0, S_IFSOCK
-};
+STATIC wait_queue_head_t vsync[NVSYNC];
void
vn_init(void)
{
- register sv_t *svp;
- register int i;
+ int i;
- for (svp = vsync, i = 0; i < NVSYNC; i++, svp++)
- init_sv(svp, SV_DEFAULT, "vsy", i);
+ for (i = 0; i < NVSYNC; i++)
+ init_waitqueue_head(&vsync[i]);
}
-/*
- * Clean a vnode of filesystem-specific data and prepare it for reuse.
- */
-STATIC int
-vn_reclaim(
+void
+vn_iowait(
struct vnode *vp)
{
- int error;
+ wait_queue_head_t *wq = vptosync(vp);
- XFS_STATS_INC(vn_reclaim);
- vn_trace_entry(vp, "vn_reclaim", (inst_t *)__return_address);
-
- /*
- * Only make the VOP_RECLAIM call if there are behaviors
- * to call.
- */
- if (vp->v_fbhv) {
- VOP_RECLAIM(vp, error);
- if (error)
- return -error;
- }
- ASSERT(vp->v_fbhv == NULL);
-
- VN_LOCK(vp);
- vp->v_flag &= (VRECLM|VWAIT);
- VN_UNLOCK(vp, 0);
-
- vp->v_type = VNON;
- vp->v_fbhv = NULL;
-
-#ifdef XFS_VNODE_TRACE
- ktrace_free(vp->v_trace);
- vp->v_trace = NULL;
-#endif
-
- return 0;
-}
-
-STATIC void
-vn_wakeup(
- struct vnode *vp)
-{
- VN_LOCK(vp);
- if (vp->v_flag & VWAIT)
- sv_broadcast(vptosync(vp));
- vp->v_flag &= ~(VRECLM|VWAIT|VMODIFIED);
- VN_UNLOCK(vp, 0);
+ wait_event(*wq, (atomic_read(&vp->v_iocount) == 0));
}
-int
-vn_wait(
+void
+vn_iowake(
struct vnode *vp)
{
- VN_LOCK(vp);
- if (vp->v_flag & (VINACT | VRECLM)) {
- vp->v_flag |= VWAIT;
- sv_wait(vptosync(vp), PINOD, &vp->v_lock, 0);
- return 1;
- }
- VN_UNLOCK(vp, 0);
- return 0;
+ if (atomic_dec_and_test(&vp->v_iocount))
+ wake_up(vptosync(vp));
}
struct vnode *
@@ -154,6 +94,8 @@ vn_initialize(
/* Initialize the first behavior and the behavior chain head. */
vn_bhv_head_init(VN_BHV_HEAD(vp), "vnode");
+ atomic_set(&vp->v_iocount, 0);
+
#ifdef XFS_VNODE_TRACE
vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP);
#endif /* XFS_VNODE_TRACE */
@@ -163,30 +105,6 @@ vn_initialize(
}
/*
- * Get a reference on a vnode.
- */
-vnode_t *
-vn_get(
- struct vnode *vp,
- vmap_t *vmap)
-{
- struct inode *inode;
-
- XFS_STATS_INC(vn_get);
- inode = LINVFS_GET_IP(vp);
- if (inode->i_state & I_FREEING)
- return NULL;
-
- inode = ilookup(vmap->v_vfsp->vfs_super, vmap->v_ino);
- if (!inode) /* Inode not present */
- return NULL;
-
- vn_trace_exit(vp, "vn_get", (inst_t *)__return_address);
-
- return vp;
-}
-
-/*
* Revalidate the Linux inode from the vattr.
* Note: i_size _not_ updated; we must hold the inode
* semaphore when doing that - callers responsibility.
@@ -198,7 +116,7 @@ vn_revalidate_core(
{
struct inode *inode = LINVFS_GET_IP(vp);
- inode->i_mode = VTTOIF(vap->va_type) | vap->va_mode;
+ inode->i_mode = vap->va_mode;
inode->i_nlink = vap->va_nlink;
inode->i_uid = vap->va_uid;
inode->i_gid = vap->va_gid;
@@ -247,71 +165,6 @@ vn_revalidate(
}
/*
- * purge a vnode from the cache
- * At this point the vnode is guaranteed to have no references (vn_count == 0)
- * The caller has to make sure that there are no ways someone could
- * get a handle (via vn_get) on the vnode (usually done via a mount/vfs lock).
- */
-void
-vn_purge(
- struct vnode *vp,
- vmap_t *vmap)
-{
- vn_trace_entry(vp, "vn_purge", (inst_t *)__return_address);
-
-again:
- /*
- * Check whether vp has already been reclaimed since our caller
- * sampled its version while holding a filesystem cache lock that
- * its VOP_RECLAIM function acquires.
- */
- VN_LOCK(vp);
- if (vp->v_number != vmap->v_number) {
- VN_UNLOCK(vp, 0);
- return;
- }
-
- /*
- * If vp is being reclaimed or inactivated, wait until it is inert,
- * then proceed. Can't assume that vnode is actually reclaimed
- * just because the reclaimed flag is asserted -- a vn_alloc
- * reclaim can fail.
- */
- if (vp->v_flag & (VINACT | VRECLM)) {
- ASSERT(vn_count(vp) == 0);
- vp->v_flag |= VWAIT;
- sv_wait(vptosync(vp), PINOD, &vp->v_lock, 0);
- goto again;
- }
-
- /*
- * Another process could have raced in and gotten this vnode...
- */
- if (vn_count(vp) > 0) {
- VN_UNLOCK(vp, 0);
- return;
- }
-
- XFS_STATS_DEC(vn_active);
- vp->v_flag |= VRECLM;
- VN_UNLOCK(vp, 0);
-
- /*
- * Call VOP_RECLAIM and clean vp. The FSYNC_INVAL flag tells
- * vp's filesystem to flush and invalidate all cached resources.
- * When vn_reclaim returns, vp should have no private data,
- * either in a system cache or attached to v_data.
- */
- if (vn_reclaim(vp) != 0)
- panic("vn_purge: cannot reclaim");
-
- /*
- * Wakeup anyone waiting for vp to be reclaimed.
- */
- vn_wakeup(vp);
-}
-
-/*
* Add a reference to a referenced vnode.
*/
struct vnode *
@@ -330,80 +183,6 @@ vn_hold(
return vp;
}
-/*
- * Call VOP_INACTIVE on last reference.
- */
-void
-vn_rele(
- struct vnode *vp)
-{
- int vcnt;
- int cache;
-
- XFS_STATS_INC(vn_rele);
-
- VN_LOCK(vp);
-
- vn_trace_entry(vp, "vn_rele", (inst_t *)__return_address);
- vcnt = vn_count(vp);
-
- /*
- * Since we always get called from put_inode we know
- * that i_count won't be decremented after we
- * return.
- */
- if (!vcnt) {
- /*
- * As soon as we turn this on, noone can find us in vn_get
- * until we turn off VINACT or VRECLM
- */
- vp->v_flag |= VINACT;
- VN_UNLOCK(vp, 0);
-
- /*
- * Do not make the VOP_INACTIVE call if there
- * are no behaviors attached to the vnode to call.
- */
- if (vp->v_fbhv)
- VOP_INACTIVE(vp, NULL, cache);
-
- VN_LOCK(vp);
- if (vp->v_flag & VWAIT)
- sv_broadcast(vptosync(vp));
-
- vp->v_flag &= ~(VINACT|VWAIT|VRECLM|VMODIFIED);
- }
-
- VN_UNLOCK(vp, 0);
-
- vn_trace_exit(vp, "vn_rele", (inst_t *)__return_address);
-}
-
-/*
- * Finish the removal of a vnode.
- */
-void
-vn_remove(
- struct vnode *vp)
-{
- vmap_t vmap;
-
- /* Make sure we don't do this to the same vnode twice */
- if (!(vp->v_fbhv))
- return;
-
- XFS_STATS_INC(vn_remove);
- vn_trace_exit(vp, "vn_remove", (inst_t *)__return_address);
-
- /*
- * After the following purge the vnode
- * will no longer exist.
- */
- VMAP(vp, vmap);
- vn_purge(vp, &vmap);
-}
-
-
#ifdef XFS_VNODE_TRACE
#define KTRACE_ENTER(vp, vk, s, line, ra) \
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index a6e57c6..35f306c 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -65,10 +65,6 @@ struct vattr;
struct xfs_iomap;
struct attrlist_cursor_kern;
-/*
- * Vnode types. VNON means no type.
- */
-enum vtype { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VFIFO, VBAD, VSOCK };
typedef xfs_ino_t vnumber_t;
typedef struct dentry vname_t;
@@ -77,15 +73,14 @@ typedef bhv_head_t vn_bhv_head_t;
/*
* MP locking protocols:
* v_flag, v_vfsp VN_LOCK/VN_UNLOCK
- * v_type read-only or fs-dependent
*/
typedef struct vnode {
__u32 v_flag; /* vnode flags (see below) */
- enum vtype v_type; /* vnode type */
struct vfs *v_vfsp; /* ptr to containing VFS */
vnumber_t v_number; /* in-core vnode number */
vn_bhv_head_t v_bh; /* behavior head */
spinlock_t v_lock; /* VN_LOCK/VN_UNLOCK */
+ atomic_t v_iocount; /* outstanding I/O count */
#ifdef XFS_VNODE_TRACE
struct ktrace *v_trace; /* trace header structure */
#endif
@@ -93,6 +88,12 @@ typedef struct vnode {
/* inode MUST be last */
} vnode_t;
+#define VN_ISLNK(vp) S_ISLNK((vp)->v_inode.i_mode)
+#define VN_ISREG(vp) S_ISREG((vp)->v_inode.i_mode)
+#define VN_ISDIR(vp) S_ISDIR((vp)->v_inode.i_mode)
+#define VN_ISCHR(vp) S_ISCHR((vp)->v_inode.i_mode)
+#define VN_ISBLK(vp) S_ISBLK((vp)->v_inode.i_mode)
+
#define v_fbhv v_bh.bh_first /* first behavior */
#define v_fops v_bh.bh_first->bd_ops /* first behavior ops */
@@ -133,22 +134,8 @@ typedef enum {
#define LINVFS_GET_IP(vp) (&(vp)->v_inode)
/*
- * Convert between vnode types and inode formats (since POSIX.1
- * defines mode word of stat structure in terms of inode formats).
- */
-extern enum vtype iftovt_tab[];
-extern u_short vttoif_tab[];
-#define IFTOVT(mode) (iftovt_tab[((mode) & S_IFMT) >> 12])
-#define VTTOIF(indx) (vttoif_tab[(int)(indx)])
-#define MAKEIMODE(indx, mode) (int)(VTTOIF(indx) | (mode))
-
-
-/*
* Vnode flags.
*/
-#define VINACT 0x1 /* vnode is being inactivated */
-#define VRECLM 0x2 /* vnode is being reclaimed */
-#define VWAIT 0x4 /* waiting for VINACT/VRECLM to end */
#define VMODIFIED 0x8 /* XFS inode state possibly differs */
/* to the Linux inode state. */
@@ -408,7 +395,6 @@ typedef struct vnodeops {
*/
typedef struct vattr {
int va_mask; /* bit-mask of attributes present */
- enum vtype va_type; /* vnode type (for create) */
mode_t va_mode; /* file access mode and type */
xfs_nlink_t va_nlink; /* number of references to file */
uid_t va_uid; /* owner user id */
@@ -498,27 +484,12 @@ typedef struct vattr {
* Check whether mandatory file locking is enabled.
*/
#define MANDLOCK(vp, mode) \
- ((vp)->v_type == VREG && ((mode) & (VSGID|(VEXEC>>3))) == VSGID)
+ (VN_ISREG(vp) && ((mode) & (VSGID|(VEXEC>>3))) == VSGID)
extern void vn_init(void);
-extern int vn_wait(struct vnode *);
extern vnode_t *vn_initialize(struct inode *);
/*
- * Acquiring and invalidating vnodes:
- *
- * if (vn_get(vp, version, 0))
- * ...;
- * vn_purge(vp, version);
- *
- * vn_get and vn_purge must be called with vmap_t arguments, sampled
- * while a lock that the vnode's VOP_RECLAIM function acquires is
- * held, to ensure that the vnode sampled with the lock held isn't
- * recycled (VOP_RECLAIMed) or deallocated between the release of the lock
- * and the subsequent vn_get or vn_purge.
- */
-
-/*
* vnode_map structures _must_ match vn_epoch and vnode structure sizes.
*/
typedef struct vnode_map {
@@ -531,11 +502,11 @@ typedef struct vnode_map {
(vmap).v_number = (vp)->v_number, \
(vmap).v_ino = (vp)->v_inode.i_ino; }
-extern void vn_purge(struct vnode *, vmap_t *);
-extern vnode_t *vn_get(struct vnode *, vmap_t *);
extern int vn_revalidate(struct vnode *);
extern void vn_revalidate_core(struct vnode *, vattr_t *);
-extern void vn_remove(struct vnode *);
+
+extern void vn_iowait(struct vnode *vp);
+extern void vn_iowake(struct vnode *vp);
static inline int vn_count(struct vnode *vp)
{
@@ -546,7 +517,6 @@ static inline int vn_count(struct vnode *vp)
* Vnode reference counting functions (and macros for compatibility).
*/
extern vnode_t *vn_hold(struct vnode *);
-extern void vn_rele(struct vnode *);
#if defined(XFS_VNODE_TRACE)
#define VN_HOLD(vp) \
@@ -560,6 +530,12 @@ extern void vn_rele(struct vnode *);
#define VN_RELE(vp) (iput(LINVFS_GET_IP(vp)))
#endif
+static inline struct vnode *vn_grab(struct vnode *vp)
+{
+ struct inode *inode = igrab(LINVFS_GET_IP(vp));
+ return inode ? LINVFS_GET_VP(inode) : NULL;
+}
+
/*
* Vname handling macros.
*/