summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c8
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/oom_kill.c5
-rw-r--r--mm/page-writeback.c23
-rw-r--r--mm/vmalloc.c27
6 files changed, 36 insertions, 31 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index a086064..71034f4 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -724,6 +724,14 @@ void bdi_destroy(struct backing_dev_info *bdi)
bdi_unregister(bdi);
+ /*
+ * If bdi_unregister() had already been called earlier, the
+ * wakeup_timer could still be armed because bdi_prune_sb()
+ * can race with the bdi_wakeup_thread_delayed() calls from
+ * __mark_inode_dirty().
+ */
+ del_timer_sync(&bdi->wb.wakeup_timer);
+
for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
percpu_counter_destroy(&bdi->bdi_stat[i]);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index dae27ba..bb28a5f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2422,6 +2422,8 @@ retry_avoidcopy:
* anon_vma prepared.
*/
if (unlikely(anon_vma_prepare(vma))) {
+ page_cache_release(new_page);
+ page_cache_release(old_page);
/* Caller expects lock to be held */
spin_lock(&mm->page_table_lock);
return VM_FAULT_OOM;
diff --git a/mm/nommu.c b/mm/nommu.c
index 73419c5..b982290 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -454,7 +454,7 @@ void __attribute__((weak)) vmalloc_sync_all(void)
* between processes, it syncs the pagetable across all
* processes.
*/
-struct vm_struct *alloc_vm_area(size_t size)
+struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
{
BUG();
return NULL;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 471dedb..76f2c5a 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -185,6 +185,11 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
if (!p)
return 0;
+ if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
+ task_unlock(p);
+ return 0;
+ }
+
/*
* The memory controller may have a limit of 0 bytes, so avoid a divide
* by zero, if necessary.
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index a3278f0..7125248 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -128,7 +128,6 @@ unsigned long global_dirty_limit;
*
*/
static struct prop_descriptor vm_completions;
-static struct prop_descriptor vm_dirties;
/*
* couple the period to the dirty_ratio:
@@ -154,7 +153,6 @@ static void update_completion_period(void)
{
int shift = calc_period_shift();
prop_change_shift(&vm_completions, shift);
- prop_change_shift(&vm_dirties, shift);
writeback_set_ratelimit();
}
@@ -235,11 +233,6 @@ void bdi_writeout_inc(struct backing_dev_info *bdi)
}
EXPORT_SYMBOL_GPL(bdi_writeout_inc);
-void task_dirty_inc(struct task_struct *tsk)
-{
- prop_inc_single(&vm_dirties, &tsk->dirties);
-}
-
/*
* Obtain an accurate fraction of the BDI's portion.
*/
@@ -1133,17 +1126,17 @@ pause:
pages_dirtied,
pause,
start_time);
- __set_current_state(TASK_UNINTERRUPTIBLE);
+ __set_current_state(TASK_KILLABLE);
io_schedule_timeout(pause);
- dirty_thresh = hard_dirty_limit(dirty_thresh);
/*
- * max-pause area. If dirty exceeded but still within this
- * area, no need to sleep for more than 200ms: (a) 8 pages per
- * 200ms is typically more than enough to curb heavy dirtiers;
- * (b) the pause time limit makes the dirtiers more responsive.
+ * This is typically equal to (nr_dirty < dirty_thresh) and can
+ * also keep "1000+ dd on a slow USB stick" under control.
*/
- if (nr_dirty < dirty_thresh)
+ if (task_ratelimit)
+ break;
+
+ if (fatal_signal_pending(current))
break;
}
@@ -1395,7 +1388,6 @@ void __init page_writeback_init(void)
shift = calc_period_shift();
prop_descriptor_init(&vm_completions, shift);
- prop_descriptor_init(&vm_dirties, shift);
}
/**
@@ -1724,7 +1716,6 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
__inc_zone_page_state(page, NR_DIRTIED);
__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
__inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
- task_dirty_inc(current);
task_io_account_write(PAGE_CACHE_SIZE);
}
}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index b669aa6..3231bf3 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2141,23 +2141,30 @@ void __attribute__((weak)) vmalloc_sync_all(void)
static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
{
- /* apply_to_page_range() does all the hard work. */
+ pte_t ***p = data;
+
+ if (p) {
+ *(*p) = pte;
+ (*p)++;
+ }
return 0;
}
/**
* alloc_vm_area - allocate a range of kernel address space
* @size: size of the area
+ * @ptes: returns the PTEs for the address space
*
* Returns: NULL on failure, vm_struct on success
*
* This function reserves a range of kernel address space, and
* allocates pagetables to map that range. No actual mappings
- * are created. If the kernel address space is not shared
- * between processes, it syncs the pagetable across all
- * processes.
+ * are created.
+ *
+ * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
+ * allocated for the VM area are returned.
*/
-struct vm_struct *alloc_vm_area(size_t size)
+struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
{
struct vm_struct *area;
@@ -2171,19 +2178,11 @@ struct vm_struct *alloc_vm_area(size_t size)
* of kernel virtual address space and mapped into init_mm.
*/
if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
- area->size, f, NULL)) {
+ size, f, ptes ? &ptes : NULL)) {
free_vm_area(area);
return NULL;
}
- /*
- * If the allocated address space is passed to a hypercall
- * before being used then we cannot rely on a page fault to
- * trigger an update of the page tables. So sync all the page
- * tables here.
- */
- vmalloc_sync_all();
-
return area;
}
EXPORT_SYMBOL_GPL(alloc_vm_area);