summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/backref.c8
-rw-r--r--fs/btrfs/file.c2
-rw-r--r--fs/btrfs/ioctl.c8
-rw-r--r--fs/btrfs/volumes.h8
-rw-r--r--fs/cifs/file.c6
-rw-r--r--fs/dax.c70
-rw-r--r--fs/ext4/Kconfig2
-rw-r--r--fs/ext4/readpage.c4
-rw-r--r--fs/file.c42
-rw-r--r--fs/fs-writeback.c35
-rw-r--r--fs/mpage.c15
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c3
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c2
-rw-r--r--fs/overlayfs/copy_up.c6
-rw-r--r--fs/overlayfs/inode.c3
-rw-r--r--fs/overlayfs/super.c2
-rw-r--r--fs/proc/array.c16
-rw-r--r--fs/proc/base.c9
-rw-r--r--fs/proc/meminfo.c7
-rw-r--r--fs/ramfs/file-nommu.c5
20 files changed, 158 insertions, 95 deletions
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index ecbc63d..9a2ec79 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1828,7 +1828,6 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
int found = 0;
struct extent_buffer *eb;
struct btrfs_inode_extref *extref;
- struct extent_buffer *leaf;
u32 item_size;
u32 cur_offset;
unsigned long ptr;
@@ -1856,9 +1855,8 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
btrfs_release_path(path);
- leaf = path->nodes[0];
- item_size = btrfs_item_size_nr(leaf, slot);
- ptr = btrfs_item_ptr_offset(leaf, slot);
+ item_size = btrfs_item_size_nr(eb, slot);
+ ptr = btrfs_item_ptr_offset(eb, slot);
cur_offset = 0;
while (cur_offset < item_size) {
@@ -1872,7 +1870,7 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
if (ret)
break;
- cur_offset += btrfs_inode_extref_name_len(leaf, extref);
+ cur_offset += btrfs_inode_extref_name_len(eb, extref);
cur_offset += sizeof(*extref);
}
btrfs_tree_read_unlock_blocking(eb);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index b823fac..8c6f247 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2584,7 +2584,7 @@ static long btrfs_fallocate(struct file *file, int mode,
alloc_start);
if (ret)
goto out;
- } else {
+ } else if (offset + len > inode->i_size) {
/*
* If we are fallocating from the end of the file onward we
* need to zero out the end of the page if i_size lands in the
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 0adf542..8d20f3b 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -4639,6 +4639,11 @@ locked:
bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
}
+ if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
+ ret = -EINVAL;
+ goto out_bctl;
+ }
+
do_balance:
/*
* Ownership of bctl and mutually_exclusive_operation_running
@@ -4650,12 +4655,15 @@ do_balance:
need_unlock = false;
ret = btrfs_balance(bctl, bargs);
+ bctl = NULL;
if (arg) {
if (copy_to_user(arg, bargs, sizeof(*bargs)))
ret = -EFAULT;
}
+out_bctl:
+ kfree(bctl);
out_bargs:
kfree(bargs);
out_unlock:
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 2ca784a..595279a 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -376,6 +376,14 @@ struct map_lookup {
#define BTRFS_BALANCE_ARGS_VRANGE (1ULL << 4)
#define BTRFS_BALANCE_ARGS_LIMIT (1ULL << 5)
+#define BTRFS_BALANCE_ARGS_MASK \
+ (BTRFS_BALANCE_ARGS_PROFILES | \
+ BTRFS_BALANCE_ARGS_USAGE | \
+ BTRFS_BALANCE_ARGS_DEVID | \
+ BTRFS_BALANCE_ARGS_DRANGE | \
+ BTRFS_BALANCE_ARGS_VRANGE | \
+ BTRFS_BALANCE_ARGS_LIMIT)
+
/*
* Profile changing flags. When SOFT is set we won't relocate chunk if
* it already has the target profile (even though it may be
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index e2a6af1..62203c3 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3380,6 +3380,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
struct page *page, *tpage;
unsigned int expected_index;
int rc;
+ gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping);
INIT_LIST_HEAD(tmplist);
@@ -3392,7 +3393,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
*/
__set_page_locked(page);
rc = add_to_page_cache_locked(page, mapping,
- page->index, GFP_KERNEL);
+ page->index, gfp);
/* give up if we can't stick it in the cache */
if (rc) {
@@ -3418,8 +3419,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
break;
__set_page_locked(page);
- if (add_to_page_cache_locked(page, mapping, page->index,
- GFP_KERNEL)) {
+ if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
__clear_page_locked(page);
break;
}
diff --git a/fs/dax.c b/fs/dax.c
index bcfb14b..a86d3cc 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -285,6 +285,7 @@ static int copy_user_bh(struct page *to, struct buffer_head *bh,
static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
+ struct address_space *mapping = inode->i_mapping;
sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
unsigned long vaddr = (unsigned long)vmf->virtual_address;
void __pmem *addr;
@@ -292,6 +293,8 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
pgoff_t size;
int error;
+ i_mmap_lock_read(mapping);
+
/*
* Check truncate didn't happen while we were allocating a block.
* If it did, this block may or may not be still allocated to the
@@ -321,6 +324,8 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
error = vm_insert_mixed(vma, vaddr, pfn);
out:
+ i_mmap_unlock_read(mapping);
+
return error;
}
@@ -382,17 +387,15 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
* from a read fault and we've raced with a truncate
*/
error = -EIO;
- goto unlock;
+ goto unlock_page;
}
- } else {
- i_mmap_lock_write(mapping);
}
error = get_block(inode, block, &bh, 0);
if (!error && (bh.b_size < PAGE_SIZE))
error = -EIO; /* fs corruption? */
if (error)
- goto unlock;
+ goto unlock_page;
if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
if (vmf->flags & FAULT_FLAG_WRITE) {
@@ -403,9 +406,8 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
if (!error && (bh.b_size < PAGE_SIZE))
error = -EIO;
if (error)
- goto unlock;
+ goto unlock_page;
} else {
- i_mmap_unlock_write(mapping);
return dax_load_hole(mapping, page, vmf);
}
}
@@ -417,15 +419,17 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
else
clear_user_highpage(new_page, vaddr);
if (error)
- goto unlock;
+ goto unlock_page;
vmf->page = page;
if (!page) {
+ i_mmap_lock_read(mapping);
/* Check we didn't race with truncate */
size = (i_size_read(inode) + PAGE_SIZE - 1) >>
PAGE_SHIFT;
if (vmf->pgoff >= size) {
+ i_mmap_unlock_read(mapping);
error = -EIO;
- goto unlock;
+ goto out;
}
}
return VM_FAULT_LOCKED;
@@ -461,8 +465,6 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
}
- if (!page)
- i_mmap_unlock_write(mapping);
out:
if (error == -ENOMEM)
return VM_FAULT_OOM | major;
@@ -471,14 +473,11 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
return VM_FAULT_SIGBUS | major;
return VM_FAULT_NOPAGE | major;
- unlock:
+ unlock_page:
if (page) {
unlock_page(page);
page_cache_release(page);
- } else {
- i_mmap_unlock_write(mapping);
}
-
goto out;
}
EXPORT_SYMBOL(__dax_fault);
@@ -556,10 +555,10 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
bh.b_size = PMD_SIZE;
- i_mmap_lock_write(mapping);
length = get_block(inode, block, &bh, write);
if (length)
return VM_FAULT_SIGBUS;
+ i_mmap_lock_read(mapping);
/*
* If the filesystem isn't willing to tell us the length of a hole,
@@ -569,36 +568,14 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE)
goto fallback;
- sector = bh.b_blocknr << (blkbits - 9);
-
- if (buffer_unwritten(&bh) || buffer_new(&bh)) {
- int i;
-
- length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
- bh.b_size);
- if (length < 0) {
- result = VM_FAULT_SIGBUS;
- goto out;
- }
- if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
- goto fallback;
-
- for (i = 0; i < PTRS_PER_PMD; i++)
- clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
- wmb_pmem();
- count_vm_event(PGMAJFAULT);
- mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
- result |= VM_FAULT_MAJOR;
- }
-
/*
* If we allocated new storage, make sure no process has any
* zero pages covering this hole
*/
if (buffer_new(&bh)) {
- i_mmap_unlock_write(mapping);
+ i_mmap_unlock_read(mapping);
unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0);
- i_mmap_lock_write(mapping);
+ i_mmap_lock_read(mapping);
}
/*
@@ -635,6 +612,7 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
result = VM_FAULT_NOPAGE;
spin_unlock(ptl);
} else {
+ sector = bh.b_blocknr << (blkbits - 9);
length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
bh.b_size);
if (length < 0) {
@@ -644,15 +622,25 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
goto fallback;
+ if (buffer_unwritten(&bh) || buffer_new(&bh)) {
+ int i;
+ for (i = 0; i < PTRS_PER_PMD; i++)
+ clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
+ wmb_pmem();
+ count_vm_event(PGMAJFAULT);
+ mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
+ result |= VM_FAULT_MAJOR;
+ }
+
result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write);
}
out:
+ i_mmap_unlock_read(mapping);
+
if (buffer_unwritten(&bh))
complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
- i_mmap_unlock_write(mapping);
-
return result;
fallback:
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index 47728da..b46e9fc 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -63,7 +63,7 @@ config EXT4_FS
If unsure, say N.
config EXT4_USE_FOR_EXT2
- bool "Use ext4 for ext2/ext3 file systems"
+ bool "Use ext4 for ext2 file systems"
depends on EXT4_FS
depends on EXT2_FS=n
default y
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index e26803f..560af04 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -165,8 +165,8 @@ int ext4_mpage_readpages(struct address_space *mapping,
if (pages) {
page = list_entry(pages->prev, struct page, lru);
list_del(&page->lru);
- if (add_to_page_cache_lru(page, mapping,
- page->index, GFP_KERNEL))
+ if (add_to_page_cache_lru(page, mapping, page->index,
+ GFP_KERNEL & mapping_gfp_mask(mapping)))
goto next_page;
}
diff --git a/fs/file.c b/fs/file.c
index 6c672ad..c6986dc 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -56,6 +56,9 @@ static void free_fdtable_rcu(struct rcu_head *rcu)
__free_fdtable(container_of(rcu, struct fdtable, rcu));
}
+#define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
+#define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
+
/*
* Expand the fdset in the files_struct. Called with the files spinlock
* held for write.
@@ -77,6 +80,11 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
memset((char *)(nfdt->open_fds) + cpy, 0, set);
memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
memset((char *)(nfdt->close_on_exec) + cpy, 0, set);
+
+ cpy = BITBIT_SIZE(ofdt->max_fds);
+ set = BITBIT_SIZE(nfdt->max_fds) - cpy;
+ memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
+ memset(cpy+(char *)nfdt->full_fds_bits, 0, set);
}
static struct fdtable * alloc_fdtable(unsigned int nr)
@@ -115,12 +123,14 @@ static struct fdtable * alloc_fdtable(unsigned int nr)
fdt->fd = data;
data = alloc_fdmem(max_t(size_t,
- 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES));
+ 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES));
if (!data)
goto out_arr;
fdt->open_fds = data;
data += nr / BITS_PER_BYTE;
fdt->close_on_exec = data;
+ data += nr / BITS_PER_BYTE;
+ fdt->full_fds_bits = data;
return fdt;
@@ -226,17 +236,22 @@ static inline void __set_close_on_exec(int fd, struct fdtable *fdt)
static inline void __clear_close_on_exec(int fd, struct fdtable *fdt)
{
- __clear_bit(fd, fdt->close_on_exec);
+ if (test_bit(fd, fdt->close_on_exec))
+ __clear_bit(fd, fdt->close_on_exec);
}
-static inline void __set_open_fd(int fd, struct fdtable *fdt)
+static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
{
__set_bit(fd, fdt->open_fds);
+ fd /= BITS_PER_LONG;
+ if (!~fdt->open_fds[fd])
+ __set_bit(fd, fdt->full_fds_bits);
}
-static inline void __clear_open_fd(int fd, struct fdtable *fdt)
+static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
{
__clear_bit(fd, fdt->open_fds);
+ __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
}
static int count_open_files(struct fdtable *fdt)
@@ -280,6 +295,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
new_fdt->max_fds = NR_OPEN_DEFAULT;
new_fdt->close_on_exec = newf->close_on_exec_init;
new_fdt->open_fds = newf->open_fds_init;
+ new_fdt->full_fds_bits = newf->full_fds_bits_init;
new_fdt->fd = &newf->fd_array[0];
spin_lock(&oldf->file_lock);
@@ -323,6 +339,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
memcpy(new_fdt->open_fds, old_fdt->open_fds, open_files / 8);
memcpy(new_fdt->close_on_exec, old_fdt->close_on_exec, open_files / 8);
+ memcpy(new_fdt->full_fds_bits, old_fdt->full_fds_bits, BITBIT_SIZE(open_files));
for (i = open_files; i != 0; i--) {
struct file *f = *old_fds++;
@@ -454,10 +471,25 @@ struct files_struct init_files = {
.fd = &init_files.fd_array[0],
.close_on_exec = init_files.close_on_exec_init,
.open_fds = init_files.open_fds_init,
+ .full_fds_bits = init_files.full_fds_bits_init,
},
.file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
};
+static unsigned long find_next_fd(struct fdtable *fdt, unsigned long start)
+{
+ unsigned long maxfd = fdt->max_fds;
+ unsigned long maxbit = maxfd / BITS_PER_LONG;
+ unsigned long bitbit = start / BITS_PER_LONG;
+
+ bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
+ if (bitbit > maxfd)
+ return maxfd;
+ if (bitbit > start)
+ start = bitbit;
+ return find_next_zero_bit(fdt->open_fds, maxfd, start);
+}
+
/*
* allocate a file descriptor, mark it busy.
*/
@@ -476,7 +508,7 @@ repeat:
fd = files->next_fd;
if (fd < fdt->max_fds)
- fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, fd);
+ fd = find_next_fd(fdt, fd);
/*
* N.B. For clone tasks sharing a files structure, this test
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 091a364..29e4599 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -778,19 +778,24 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
struct wb_writeback_work *base_work,
bool skip_if_busy)
{
- int next_memcg_id = 0;
- struct bdi_writeback *wb;
- struct wb_iter iter;
+ struct bdi_writeback *last_wb = NULL;
+ struct bdi_writeback *wb = list_entry_rcu(&bdi->wb_list,
+ struct bdi_writeback, bdi_node);
might_sleep();
restart:
rcu_read_lock();
- bdi_for_each_wb(wb, bdi, &iter, next_memcg_id) {
+ list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
DEFINE_WB_COMPLETION_ONSTACK(fallback_work_done);
struct wb_writeback_work fallback_work;
struct wb_writeback_work *work;
long nr_pages;
+ if (last_wb) {
+ wb_put(last_wb);
+ last_wb = NULL;
+ }
+
/* SYNC_ALL writes out I_DIRTY_TIME too */
if (!wb_has_dirty_io(wb) &&
(base_work->sync_mode == WB_SYNC_NONE ||
@@ -819,12 +824,22 @@ restart:
wb_queue_work(wb, work);
- next_memcg_id = wb->memcg_css->id + 1;
+ /*
+ * Pin @wb so that it stays on @bdi->wb_list. This allows
+ * continuing iteration from @wb after dropping and
+ * regrabbing rcu read lock.
+ */
+ wb_get(wb);
+ last_wb = wb;
+
rcu_read_unlock();
wb_wait_for_completion(bdi, &fallback_work_done);
goto restart;
}
rcu_read_unlock();
+
+ if (last_wb)
+ wb_put(last_wb);
}
#else /* CONFIG_CGROUP_WRITEBACK */
@@ -1857,12 +1872,11 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
rcu_read_lock();
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
struct bdi_writeback *wb;
- struct wb_iter iter;
if (!bdi_has_dirty_io(bdi))
continue;
- bdi_for_each_wb(wb, bdi, &iter, 0)
+ list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
wb_start_writeback(wb, wb_split_bdi_pages(wb, nr_pages),
false, reason);
}
@@ -1894,11 +1908,10 @@ static void wakeup_dirtytime_writeback(struct work_struct *w)
rcu_read_lock();
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
struct bdi_writeback *wb;
- struct wb_iter iter;
- bdi_for_each_wb(wb, bdi, &iter, 0)
- if (!list_empty(&bdi->wb.b_dirty_time))
- wb_wakeup(&bdi->wb);
+ list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
+ if (!list_empty(&wb->b_dirty_time))
+ wb_wakeup(wb);
}
rcu_read_unlock();
schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
diff --git a/fs/mpage.c b/fs/mpage.c
index 778a4dd..a7c3427 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -139,7 +139,8 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
static struct bio *
do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
sector_t *last_block_in_bio, struct buffer_head *map_bh,
- unsigned long *first_logical_block, get_block_t get_block)
+ unsigned long *first_logical_block, get_block_t get_block,
+ gfp_t gfp)
{
struct inode *inode = page->mapping->host;
const unsigned blkbits = inode->i_blkbits;
@@ -277,8 +278,7 @@ alloc_new:
goto out;
}
bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
- min_t(int, nr_pages, BIO_MAX_PAGES),
- GFP_KERNEL);
+ min_t(int, nr_pages, BIO_MAX_PAGES), gfp);
if (bio == NULL)
goto confused;
}
@@ -361,6 +361,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
sector_t last_block_in_bio = 0;
struct buffer_head map_bh;
unsigned long first_logical_block = 0;
+ gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping);
map_bh.b_state = 0;
map_bh.b_size = 0;
@@ -370,12 +371,13 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
prefetchw(&page->flags);
list_del(&page->lru);
if (!add_to_page_cache_lru(page, mapping,
- page->index, GFP_KERNEL)) {
+ page->index,
+ gfp)) {
bio = do_mpage_readpage(bio, page,
nr_pages - page_idx,
&last_block_in_bio, &map_bh,
&first_logical_block,
- get_block);
+ get_block, gfp);
}
page_cache_release(page);
}
@@ -395,11 +397,12 @@ int mpage_readpage(struct page *page, get_block_t get_block)
sector_t last_block_in_bio = 0;
struct buffer_head map_bh;
unsigned long first_logical_block = 0;
+ gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(page->mapping);
map_bh.b_state = 0;
map_bh.b_size = 0;
bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
- &map_bh, &first_logical_block, get_block);
+ &map_bh, &first_logical_block, get_block, gfp);
if (bio)
mpage_bio_submit(READ, bio);
return 0;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index ee5aa4d..ce38b4c 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -1658,12 +1658,13 @@ send_response:
if (ret < 0) {
mlog(ML_ERROR, "failed to dispatch assert master work\n");
response = DLM_MASTER_RESP_ERROR;
+ spin_unlock(&res->spinlock);
dlm_lockres_put(res);
} else {
dispatched = 1;
__dlm_lockres_grab_inflight_worker(dlm, res);
+ spin_unlock(&res->spinlock);
}
- spin_unlock(&res->spinlock);
} else {
if (res)
dlm_lockres_put(res);
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 3d90ad7..58eaa5c 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1723,8 +1723,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
} else {
dispatched = 1;
__dlm_lockres_grab_inflight_worker(dlm, res);
+ spin_unlock(&res->spinlock);
}
- spin_unlock(&res->spinlock);
} else {
/* put.. incase we are not the master */
spin_unlock(&res->spinlock);
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 84d693d..871fcb6 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -81,11 +81,11 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
if (len == 0)
return 0;
- old_file = ovl_path_open(old, O_RDONLY);
+ old_file = ovl_path_open(old, O_LARGEFILE | O_RDONLY);
if (IS_ERR(old_file))
return PTR_ERR(old_file);
- new_file = ovl_path_open(new, O_WRONLY);
+ new_file = ovl_path_open(new, O_LARGEFILE | O_WRONLY);
if (IS_ERR(new_file)) {
error = PTR_ERR(new_file);
goto out_fput;
@@ -267,7 +267,7 @@ out:
out_cleanup:
ovl_cleanup(wdir, newdentry);
- goto out;
+ goto out2;
}
/*
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index d9da5a4..ec0c2a050 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -363,6 +363,9 @@ struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags)
ovl_path_upper(dentry, &realpath);
}
+ if (realpath.dentry->d_flags & DCACHE_OP_SELECT_INODE)
+ return realpath.dentry->d_op->d_select_inode(realpath.dentry, file_flags);
+
return d_backing_inode(realpath.dentry);
}
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 79073d6..e38ee0f 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -544,6 +544,7 @@ static void ovl_put_super(struct super_block *sb)
mntput(ufs->upper_mnt);
for (i = 0; i < ufs->numlower; i++)
mntput(ufs->lower_mnt[i]);
+ kfree(ufs->lower_mnt);
kfree(ufs->config.lowerdir);
kfree(ufs->config.upperdir);
@@ -1048,6 +1049,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
oe->lowerstack[i].dentry = stack[i].dentry;
oe->lowerstack[i].mnt = ufs->lower_mnt[i];
}
+ kfree(stack);
root_dentry->d_fsdata = oe;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index f60f012..eed2050 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -375,7 +375,7 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task, int whole)
{
- unsigned long vsize, eip, esp, wchan = ~0UL;
+ unsigned long vsize, eip, esp, wchan = 0;
int priority, nice;
int tty_pgrp = -1, tty_nr = 0;
sigset_t sigign, sigcatch;
@@ -507,7 +507,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
seq_put_decimal_ull(m, ' ', task->blocked.sig[0] & 0x7fffffffUL);
seq_put_decimal_ull(m, ' ', sigign.sig[0] & 0x7fffffffUL);
seq_put_decimal_ull(m, ' ', sigcatch.sig[0] & 0x7fffffffUL);
- seq_put_decimal_ull(m, ' ', wchan);
+
+ /*
+ * We used to output the absolute kernel address, but that's an
+ * information leak - so instead we show a 0/1 flag here, to signal
+ * to user-space whether there's a wchan field in /proc/PID/wchan.
+ *
+ * This works with older implementations of procps as well.
+ */
+ if (wchan)
+ seq_puts(m, " 1");
+ else
+ seq_puts(m, " 0");
+
seq_put_decimal_ull(m, ' ', 0);
seq_put_decimal_ull(m, ' ', 0);
seq_put_decimal_ll(m, ' ', task->exit_signal);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index b25eee4..29595af 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -430,13 +430,10 @@ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns,
wchan = get_wchan(task);
- if (lookup_symbol_name(wchan, symname) < 0) {
- if (!ptrace_may_access(task, PTRACE_MODE_READ))
- return 0;
- seq_printf(m, "%lu", wchan);
- } else {
+ if (wchan && ptrace_may_access(task, PTRACE_MODE_READ) && !lookup_symbol_name(wchan, symname))
seq_printf(m, "%s", symname);
- }
+ else
+ seq_putc(m, '0');
return 0;
}
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index d3ebf2e..9155a5a 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -27,7 +27,6 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
{
struct sysinfo i;
unsigned long committed;
- struct vmalloc_info vmi;
long cached;
long available;
unsigned long pagecache;
@@ -49,8 +48,6 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
if (cached < 0)
cached = 0;
- get_vmalloc_info(&vmi);
-
for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
pages[lru] = global_page_state(NR_LRU_BASE + lru);
@@ -191,8 +188,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
K(vm_commit_limit()),
K(committed),
(unsigned long)VMALLOC_TOTAL >> 10,
- vmi.used >> 10,
- vmi.largest_chunk >> 10
+ 0ul, // used to be vmalloc 'used'
+ 0ul // used to be vmalloc 'largest_chunk'
#ifdef CONFIG_MEMORY_FAILURE
, atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
#endif
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index ba1323a..a586467 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -70,6 +70,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
unsigned order;
void *data;
int ret;
+ gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
/* make various checks */
order = get_order(newsize);
@@ -84,7 +85,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
/* allocate enough contiguous pages to be able to satisfy the
* request */
- pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order);
+ pages = alloc_pages(gfp, order);
if (!pages)
return -ENOMEM;
@@ -108,7 +109,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
struct page *page = pages + loop;
ret = add_to_page_cache_lru(page, inode->i_mapping, loop,
- GFP_KERNEL);
+ gfp);
if (ret < 0)
goto add_error;