summaryrefslogtreecommitdiff
path: root/include/trace/events/ext4.h
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2009-09-29 17:31:31 (GMT)
committerTheodore Ts'o <tytso@mit.edu>2009-09-29 17:31:31 (GMT)
commit55138e0bc29c0751e2152df9ad35deea542f29b3 (patch)
tree40dbc77de3e7a426030bd740ba8e2fd0d2171523 /include/trace/events/ext4.h
parent71780577306fd1e76c7a92e3b308db624d03adb9 (diff)
downloadlinux-55138e0bc29c0751e2152df9ad35deea542f29b3.tar.xz
ext4: Adjust ext4_da_writepages() to write out larger contiguous chunks
Work around problems in the writeback code to force out writebacks in larger chunks than just 4mb, which is just too small. This also works around limitations in the ext4 block allocator, which can't allocate more than 2048 blocks at a time. So we need to defeat the round-robin characteristics of the writeback code and try to write out as many blocks in one inode before allowing the writeback code to move on to another inode. We add a a new per-filesystem tunable, max_writeback_mb_bump, which caps this to a default of 128mb per inode. Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'include/trace/events/ext4.h')
-rw-r--r--include/trace/events/ext4.h14
1 files changed, 10 insertions, 4 deletions
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index c1bd8f1..7c6bbb7 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -236,6 +236,7 @@ TRACE_EVENT(ext4_da_writepages,
__field( char, for_kupdate )
__field( char, for_reclaim )
__field( char, range_cyclic )
+ __field( pgoff_t, writeback_index )
),
TP_fast_assign(
@@ -249,15 +250,17 @@ TRACE_EVENT(ext4_da_writepages,
__entry->for_kupdate = wbc->for_kupdate;
__entry->for_reclaim = wbc->for_reclaim;
__entry->range_cyclic = wbc->range_cyclic;
+ __entry->writeback_index = inode->i_mapping->writeback_index;
),
- TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d range_cyclic %d",
+ TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d range_cyclic %d writeback_index %lu",
jbd2_dev_to_name(__entry->dev),
(unsigned long) __entry->ino, __entry->nr_to_write,
__entry->pages_skipped, __entry->range_start,
__entry->range_end, __entry->nonblocking,
__entry->for_kupdate, __entry->for_reclaim,
- __entry->range_cyclic)
+ __entry->range_cyclic,
+ (unsigned long) __entry->writeback_index)
);
TRACE_EVENT(ext4_da_write_pages,
@@ -309,6 +312,7 @@ TRACE_EVENT(ext4_da_writepages_result,
__field( char, encountered_congestion )
__field( char, more_io )
__field( char, no_nrwrite_index_update )
+ __field( pgoff_t, writeback_index )
),
TP_fast_assign(
@@ -320,14 +324,16 @@ TRACE_EVENT(ext4_da_writepages_result,
__entry->encountered_congestion = wbc->encountered_congestion;
__entry->more_io = wbc->more_io;
__entry->no_nrwrite_index_update = wbc->no_nrwrite_index_update;
+ __entry->writeback_index = inode->i_mapping->writeback_index;
),
- TP_printk("dev %s ino %lu ret %d pages_written %d pages_skipped %ld congestion %d more_io %d no_nrwrite_index_update %d",
+ TP_printk("dev %s ino %lu ret %d pages_written %d pages_skipped %ld congestion %d more_io %d no_nrwrite_index_update %d writeback_index %lu",
jbd2_dev_to_name(__entry->dev),
(unsigned long) __entry->ino, __entry->ret,
__entry->pages_written, __entry->pages_skipped,
__entry->encountered_congestion, __entry->more_io,
- __entry->no_nrwrite_index_update)
+ __entry->no_nrwrite_index_update,
+ (unsigned long) __entry->writeback_index)
);
TRACE_EVENT(ext4_da_write_begin,