From 2958ec177e400be1e26fc37e1759f84fa2c1761c Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 31 Mar 2016 21:34:25 -0400 Subject: aio: remove a pointless assignment the value is never used after that point Signed-off-by: Al Viro diff --git a/fs/aio.c b/fs/aio.c index 155f842..a6deaa7 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -1447,8 +1447,6 @@ rw_common: return ret; } - len = ret; - if (rw == WRITE) file_start_write(file); -- cgit v0.10.2 From bc61384dcdd82a6faabafecdcd80040625db5e40 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 31 Mar 2016 21:48:20 -0400 Subject: rw_verify_area(): saner calling conventions Lift length capping into the few callers that care about it. Most of them treat all non-negatives as "success" and ignore the capped value, and with good reasons. Make rw_verify_area() return 0 on success. Signed-off-by: Al Viro diff --git a/fs/read_write.c b/fs/read_write.c index cf377cf..b1a0e6c 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -410,11 +410,6 @@ ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos) } EXPORT_SYMBOL(vfs_iter_write); -/* - * rw_verify_area doesn't like huge counts. We limit - * them to something that fits in "int" so that others - * won't have to do range checks all the time. - */ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t count) { struct inode *inode; @@ -441,11 +436,8 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t if (retval < 0) return retval; } - retval = security_file_permission(file, + return security_file_permission(file, read_write == READ ? MAY_READ : MAY_WRITE); - if (retval) - return retval; - return count > MAX_RW_COUNT ? MAX_RW_COUNT : count; } static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) @@ -489,8 +481,9 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos) return -EFAULT; ret = rw_verify_area(READ, file, pos, count); - if (ret >= 0) { - count = ret; + if (!ret) { + if (count > MAX_RW_COUNT) + count = MAX_RW_COUNT; ret = __vfs_read(file, buf, count, pos); if (ret > 0) { fsnotify_access(file); @@ -572,8 +565,9 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_ return -EFAULT; ret = rw_verify_area(WRITE, file, pos, count); - if (ret >= 0) { - count = ret; + if (!ret) { + if (count > MAX_RW_COUNT) + count = MAX_RW_COUNT; file_start_write(file); ret = __vfs_write(file, buf, count, pos); if (ret > 0) { @@ -1323,7 +1317,8 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, retval = rw_verify_area(READ, in.file, &pos, count); if (retval < 0) goto fput_in; - count = retval; + if (count > MAX_RW_COUNT) + count = MAX_RW_COUNT; /* * Get output file, and verify that it is ok.. @@ -1341,7 +1336,6 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, retval = rw_verify_area(WRITE, out.file, &out_pos, count); if (retval < 0) goto fput_out; - count = retval; if (!max) max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes); @@ -1485,11 +1479,12 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in, if (flags != 0) return -EINVAL; - /* copy_file_range allows full ssize_t len, ignoring MAX_RW_COUNT */ ret = rw_verify_area(READ, file_in, &pos_in, len); - if (ret >= 0) - ret = rw_verify_area(WRITE, file_out, &pos_out, len); - if (ret < 0) + if (unlikely(ret)) + return ret; + + ret = rw_verify_area(WRITE, file_out, &pos_out, len); + if (unlikely(ret)) return ret; if (!(file_in->f_mode & FMODE_READ) || -- cgit v0.10.2 From dd254f5a382cc7879db7a07ed266b12d38fe3ab6 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 9 May 2016 11:54:48 -0400 Subject: fold checks into iterate_and_advance() they are open-coded in all users except iov_iter_advance(), and there they wouldn't be a bad idea either - as it is, iov_iter_advance(i, 0) ends up dereferencing potentially past the end of iovec array. It doesn't do anything with the value it reads, and very unlikely to trigger an oops on dereference, but it is not impossible. Reported-by: Jiri Slaby Reported-by: Takashi Iwai Signed-off-by: Al Viro diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 5fecddc..015061e 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -99,40 +99,44 @@ } #define iterate_and_advance(i, n, v, I, B, K) { \ - size_t skip = i->iov_offset; \ - if (unlikely(i->type & ITER_BVEC)) { \ - const struct bio_vec *bvec; \ - struct bio_vec v; \ - iterate_bvec(i, n, v, bvec, skip, (B)) \ - if (skip == bvec->bv_len) { \ - bvec++; \ - skip = 0; \ - } \ - i->nr_segs -= bvec - i->bvec; \ - i->bvec = bvec; \ - } else if (unlikely(i->type & ITER_KVEC)) { \ - const struct kvec *kvec; \ - struct kvec v; \ - iterate_kvec(i, n, v, kvec, skip, (K)) \ - if (skip == kvec->iov_len) { \ - kvec++; \ - skip = 0; \ - } \ - i->nr_segs -= kvec - i->kvec; \ - i->kvec = kvec; \ - } else { \ - const struct iovec *iov; \ - struct iovec v; \ - iterate_iovec(i, n, v, iov, skip, (I)) \ - if (skip == iov->iov_len) { \ - iov++; \ - skip = 0; \ + if (unlikely(i->count < n)) \ + n = i->count; \ + if (n) { \ + size_t skip = i->iov_offset; \ + if (unlikely(i->type & ITER_BVEC)) { \ + const struct bio_vec *bvec; \ + struct bio_vec v; \ + iterate_bvec(i, n, v, bvec, skip, (B)) \ + if (skip == bvec->bv_len) { \ + bvec++; \ + skip = 0; \ + } \ + i->nr_segs -= bvec - i->bvec; \ + i->bvec = bvec; \ + } else if (unlikely(i->type & ITER_KVEC)) { \ + const struct kvec *kvec; \ + struct kvec v; \ + iterate_kvec(i, n, v, kvec, skip, (K)) \ + if (skip == kvec->iov_len) { \ + kvec++; \ + skip = 0; \ + } \ + i->nr_segs -= kvec - i->kvec; \ + i->kvec = kvec; \ + } else { \ + const struct iovec *iov; \ + struct iovec v; \ + iterate_iovec(i, n, v, iov, skip, (I)) \ + if (skip == iov->iov_len) { \ + iov++; \ + skip = 0; \ + } \ + i->nr_segs -= iov - i->iov; \ + i->iov = iov; \ } \ - i->nr_segs -= iov - i->iov; \ - i->iov = iov; \ + i->count -= n; \ + i->iov_offset = skip; \ } \ - i->count -= n; \ - i->iov_offset = skip; \ } static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, @@ -386,12 +390,6 @@ static void memzero_page(struct page *page, size_t offset, size_t len) size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { const char *from = addr; - if (unlikely(bytes > i->count)) - bytes = i->count; - - if (unlikely(!bytes)) - return 0; - iterate_and_advance(i, bytes, v, __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len), @@ -407,12 +405,6 @@ EXPORT_SYMBOL(copy_to_iter); size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; - if (unlikely(bytes > i->count)) - bytes = i->count; - - if (unlikely(!bytes)) - return 0; - iterate_and_advance(i, bytes, v, __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), @@ -428,12 +420,6 @@ EXPORT_SYMBOL(copy_from_iter); size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; - if (unlikely(bytes > i->count)) - bytes = i->count; - - if (unlikely(!bytes)) - return 0; - iterate_and_advance(i, bytes, v, __copy_from_user_nocache((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), @@ -474,12 +460,6 @@ EXPORT_SYMBOL(copy_page_from_iter); size_t iov_iter_zero(size_t bytes, struct iov_iter *i) { - if (unlikely(bytes > i->count)) - bytes = i->count; - - if (unlikely(!bytes)) - return 0; - iterate_and_advance(i, bytes, v, __clear_user(v.iov_base, v.iov_len), memzero_page(v.bv_page, v.bv_offset, v.bv_len), @@ -666,12 +646,6 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, char *to = addr; __wsum sum, next; size_t off = 0; - if (unlikely(bytes > i->count)) - bytes = i->count; - - if (unlikely(!bytes)) - return 0; - sum = *csum; iterate_and_advance(i, bytes, v, ({ int err = 0; @@ -710,12 +684,6 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, const char *from = addr; __wsum sum, next; size_t off = 0; - if (unlikely(bytes > i->count)) - bytes = i->count; - - if (unlikely(!bytes)) - return 0; - sum = *csum; iterate_and_advance(i, bytes, v, ({ int err = 0; -- cgit v0.10.2