summaryrefslogtreecommitdiff
path: root/drivers/staging/lustre
diff options
context:
space:
mode:
authorJinshan Xiong <jinshan.xiong@intel.com>2016-08-16 20:18:58 (GMT)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-08-21 13:57:36 (GMT)
commit297e908f08cb40c4bbe11b06c30ff1de1a3740c0 (patch)
treee8f7ea97a2f1e247ee76c2e2b731d008bd5ca3b3 /drivers/staging/lustre
parentd6099af20a148d705a16da2aed25b605ba0a162c (diff)
downloadlinux-297e908f08cb40c4bbe11b06c30ff1de1a3740c0.tar.xz
staging: lustre: llite: Fix the deadlock in balance_dirty_pages()
If the page is already dirtied in ll_write_end() and kernel tries to call balance_dirty_pages() to write back dirty pages in the same thread, this is deadlock case if the page is already held by clio. This can also fix the issue of LU-4873. Signed-off-by: Jinshan Xiong <jinshan.xiong@intel.com> Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-4977 Reviewed-on: http://review.whamcloud.com/10149 Reviewed-by: Bobi Jam <bobijam@gmail.com> Reviewed-by: Lai Siyao <lai.siyao@intel.com> Reviewed-by: Oleg Drokin <oleg.drokin@intel.com> Signed-off-by: James Simmons <jsimmons@infradead.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging/lustre')
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index c14a1b6..8c8c100 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -506,9 +506,8 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
env = lcc->lcc_env;
io = lcc->lcc_io;
- if (likely(to == PAGE_SIZE)) /* LU-4873 */
- /* To avoid deadlock, try to lock page first. */
- vmpage = grab_cache_page_nowait(mapping, index);
+ /* To avoid deadlock, try to lock page first. */
+ vmpage = grab_cache_page_nowait(mapping, index);
if (unlikely(!vmpage || PageDirty(vmpage) || PageWriteback(vmpage))) {
struct vvp_io *vio = vvp_env_io(env);
struct cl_page_list *plist = &vio->u.write.vui_queue;
@@ -617,6 +616,13 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
LASSERT(from == 0);
vio->u.write.vui_to = from + copied;
+ /*
+ * To address the deadlock in balance_dirty_pages() where
+ * this dirty page may be written back in the same thread.
+ */
+ if (PageDirty(vmpage))
+ unplug = true;
+
/* We may have one full RPC, commit it soon */
if (plist->pl_nr >= PTLRPC_MAX_BRW_PAGES)
unplug = true;