diff options
author | Tejun Heo <tj@kernel.org> | 2012-11-06 20:26:23 (GMT) |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2012-11-06 20:26:23 (GMT) |
commit | 5b805f2a7675634fbdf9ac1c9b2256905ab2ea68 (patch) | |
tree | ee00d1e3d757458d66209b926d274491c6c3f61c /mm | |
parent | 1db1e31b1ee3ae126ef98f39083b5f213c7b41bf (diff) | |
parent | 201e72acb2d3821e2de9ce6091e98859c316b29a (diff) | |
download | linux-5b805f2a7675634fbdf9ac1c9b2256905ab2ea68.tar.xz |
Merge branch 'cgroup/for-3.7-fixes' into cgroup/for-3.8
This is to receive device_cgroup fixes so that further device_cgroup
changes can be made in cgroup/for-3.8.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/compaction.c | 2 | ||||
-rw-r--r-- | mm/fremap.c | 2 | ||||
-rw-r--r-- | mm/huge_memory.c | 1 | ||||
-rw-r--r-- | mm/memblock.c | 24 | ||||
-rw-r--r-- | mm/mempolicy.c | 5 | ||||
-rw-r--r-- | mm/mmu_notifier.c | 26 | ||||
-rw-r--r-- | mm/page_alloc.c | 6 | ||||
-rw-r--r-- | mm/rmap.c | 20 | ||||
-rw-r--r-- | mm/slob.c | 6 |
9 files changed, 63 insertions, 29 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 2c4ce17..9eef558 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -346,7 +346,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, * pages requested were isolated. If there were any failures, 0 is * returned and CMA will fail. */ - if (strict && nr_strict_required != total_isolated) + if (strict && nr_strict_required > total_isolated) total_isolated = 0; if (locked) diff --git a/mm/fremap.c b/mm/fremap.c index 3899a86..a0aaf0e 100644 --- a/mm/fremap.c +++ b/mm/fremap.c @@ -169,7 +169,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR)) goto out; - if (!vma->vm_ops->remap_pages) + if (!vma->vm_ops || !vma->vm_ops->remap_pages) goto out; if (start < vma->vm_start || start + size > vma->vm_end) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index a863af2..40f17c3 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -17,6 +17,7 @@ #include <linux/khugepaged.h> #include <linux/freezer.h> #include <linux/mman.h> +#include <linux/pagemap.h> #include <asm/tlb.h> #include <asm/pgalloc.h> #include "internal.h" diff --git a/mm/memblock.c b/mm/memblock.c index 931eef1..6259055 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -930,6 +930,30 @@ int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t si return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; } +void __init_memblock memblock_trim_memory(phys_addr_t align) +{ + int i; + phys_addr_t start, end, orig_start, orig_end; + struct memblock_type *mem = &memblock.memory; + + for (i = 0; i < mem->cnt; i++) { + orig_start = mem->regions[i].base; + orig_end = mem->regions[i].base + mem->regions[i].size; + start = round_up(orig_start, align); + end = round_down(orig_end, align); + + if (start == orig_start && end == orig_end) + continue; + + if (start < end) { + mem->regions[i].base = start; + mem->regions[i].size = end - start; + } else { + memblock_remove_region(mem, i); + i--; + } + } +} void __init_memblock memblock_set_current_limit(phys_addr_t limit) { diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 0b78fb9..d04a8a5 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1536,9 +1536,8 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, * * Returns effective policy for a VMA at specified address. * Falls back to @task or system default policy, as necessary. - * Current or other task's task mempolicy and non-shared vma policies - * are protected by the task's mmap_sem, which must be held for read by - * the caller. + * Current or other task's task mempolicy and non-shared vma policies must be + * protected by task_lock(task) by the caller. * Shared policies [those marked as MPOL_F_SHARED] require an extra reference * count--added by the get_policy() vm_op, as appropriate--to protect against * freeing by another task. It is the caller's responsibility to free the diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index 479a1e7..8a5ac8c 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -196,28 +196,28 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn, BUG_ON(atomic_read(&mm->mm_users) <= 0); /* - * Verify that mmu_notifier_init() already run and the global srcu is - * initialized. - */ + * Verify that mmu_notifier_init() already run and the global srcu is + * initialized. + */ BUG_ON(!srcu.per_cpu_ref); + ret = -ENOMEM; + mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); + if (unlikely(!mmu_notifier_mm)) + goto out; + if (take_mmap_sem) down_write(&mm->mmap_sem); ret = mm_take_all_locks(mm); if (unlikely(ret)) - goto out; + goto out_clean; if (!mm_has_notifiers(mm)) { - mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), - GFP_KERNEL); - if (unlikely(!mmu_notifier_mm)) { - ret = -ENOMEM; - goto out_of_mem; - } INIT_HLIST_HEAD(&mmu_notifier_mm->list); spin_lock_init(&mmu_notifier_mm->lock); mm->mmu_notifier_mm = mmu_notifier_mm; + mmu_notifier_mm = NULL; } atomic_inc(&mm->mm_count); @@ -233,12 +233,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn, hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list); spin_unlock(&mm->mmu_notifier_mm->lock); -out_of_mem: mm_drop_all_locks(mm); -out: +out_clean: if (take_mmap_sem) up_write(&mm->mmap_sem); - + kfree(mmu_notifier_mm); +out: BUG_ON(atomic_read(&mm->mm_users) <= 0); return ret; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index bb90971..5b74de6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1809,10 +1809,10 @@ static void __paginginit init_zone_allows_reclaim(int nid) int i; for_each_online_node(i) - if (node_distance(nid, i) <= RECLAIM_DISTANCE) { + if (node_distance(nid, i) <= RECLAIM_DISTANCE) node_set(i, NODE_DATA(nid)->reclaim_nodes); + else zone_reclaim_mode = 1; - } } #else /* CONFIG_NUMA */ @@ -5825,7 +5825,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, ret = start_isolate_page_range(pfn_max_align_down(start), pfn_max_align_up(end), migratetype); if (ret) - goto done; + return ret; ret = __alloc_contig_migrate_range(&cc, start, end); if (ret) @@ -56,6 +56,7 @@ #include <linux/mmu_notifier.h> #include <linux/migrate.h> #include <linux/hugetlb.h> +#include <linux/backing-dev.h> #include <asm/tlbflush.h> @@ -926,11 +927,8 @@ int page_mkclean(struct page *page) if (page_mapped(page)) { struct address_space *mapping = page_mapping(page); - if (mapping) { + if (mapping) ret = page_mkclean_file(mapping, page); - if (page_test_and_clear_dirty(page_to_pfn(page), 1)) - ret = 1; - } } return ret; @@ -1116,6 +1114,7 @@ void page_add_file_rmap(struct page *page) */ void page_remove_rmap(struct page *page) { + struct address_space *mapping = page_mapping(page); bool anon = PageAnon(page); bool locked; unsigned long flags; @@ -1138,8 +1137,19 @@ void page_remove_rmap(struct page *page) * this if the page is anon, so about to be freed; but perhaps * not if it's in swapcache - there might be another pte slot * containing the swap entry, but page not yet written to swap. + * + * And we can skip it on file pages, so long as the filesystem + * participates in dirty tracking; but need to catch shm and tmpfs + * and ramfs pages which have been modified since creation by read + * fault. + * + * Note that mapping must be decided above, before decrementing + * mapcount (which luckily provides a barrier): once page is unmapped, + * it could be truncated and page->mapping reset to NULL at any moment. + * Note also that we are relying on page_mapping(page) to set mapping + * to &swapper_space when PageSwapCache(page). */ - if ((!anon || PageSwapCache(page)) && + if (mapping && !mapping_cap_account_dirty(mapping) && page_test_and_clear_dirty(page_to_pfn(page), 1)) set_page_dirty(page); /* @@ -429,7 +429,7 @@ static __always_inline void * __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) { unsigned int *m; - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); + int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); void *ret; gfp &= gfp_allowed_mask; @@ -502,7 +502,7 @@ void kfree(const void *block) sp = virt_to_page(block); if (PageSlab(sp)) { - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); + int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); unsigned int *m = (unsigned int *)(block - align); slob_free(m, *m + align); } else @@ -521,7 +521,7 @@ size_t ksize(const void *block) sp = virt_to_page(block); if (PageSlab(sp)) { - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); + int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); unsigned int *m = (unsigned int *)(block - align); return SLOB_UNITS(*m) * SLOB_UNIT; } else |