diff options
author | Kukjin Kim <kgene.kim@samsung.com> | 2013-04-08 16:10:13 (GMT) |
---|---|---|
committer | Kukjin Kim <kgene.kim@samsung.com> | 2013-04-08 16:10:13 (GMT) |
commit | da821eb7d42935b0f7056d98c75fd1150f6636f4 (patch) | |
tree | de83ac171d51bbe7621110c3145a68c198b13711 /mm | |
parent | 296f3f2c9322480400338f71d8c118107f9a7f20 (diff) | |
parent | 07961ac7c0ee8b546658717034fe692fd12eefa9 (diff) | |
download | linux-da821eb7d42935b0f7056d98c75fd1150f6636f4.tar.xz |
Merge commit 'v3.9-rc5' into next/clk-exynos
Conflicts:
arch/arm/boot/dts/exynos4.dtsi
arch/arm/boot/dts/exynos5440.dtsi
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 8 | ||||
-rw-r--r-- | mm/fremap.c | 17 | ||||
-rw-r--r-- | mm/hugetlb.c | 8 | ||||
-rw-r--r-- | mm/ksm.c | 2 | ||||
-rw-r--r-- | mm/memcontrol.c | 8 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 8 | ||||
-rw-r--r-- | mm/mempolicy.c | 4 | ||||
-rw-r--r-- | mm/mlock.c | 11 | ||||
-rw-r--r-- | mm/mmap.c | 4 | ||||
-rw-r--r-- | mm/process_vm_access.c | 8 |
10 files changed, 40 insertions, 38 deletions
@@ -286,8 +286,12 @@ config NR_QUICK default "1" config VIRT_TO_BUS - def_bool y - depends on HAVE_VIRT_TO_BUS + bool + help + An architecture should select this if it implements the + deprecated interface virt_to_bus(). All new architectures + should probably not select this. + config MMU_NOTIFIER bool diff --git a/mm/fremap.c b/mm/fremap.c index 0cd4c11..87da359 100644 --- a/mm/fremap.c +++ b/mm/fremap.c @@ -129,7 +129,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, struct vm_area_struct *vma; int err = -EINVAL; int has_write_lock = 0; - vm_flags_t vm_flags; + vm_flags_t vm_flags = 0; if (prot) return err; @@ -204,10 +204,8 @@ get_write_lock: unsigned long addr; struct file *file = get_file(vma->vm_file); - vm_flags = vma->vm_flags; - if (!(flags & MAP_NONBLOCK)) - vm_flags |= VM_POPULATE; - addr = mmap_region(file, start, size, vm_flags, pgoff); + addr = mmap_region(file, start, size, + vma->vm_flags, pgoff); fput(file); if (IS_ERR_VALUE(addr)) { err = addr; @@ -226,12 +224,6 @@ get_write_lock: mutex_unlock(&mapping->i_mmap_mutex); } - if (!(flags & MAP_NONBLOCK) && !(vma->vm_flags & VM_POPULATE)) { - if (!has_write_lock) - goto get_write_lock; - vma->vm_flags |= VM_POPULATE; - } - if (vma->vm_flags & VM_LOCKED) { /* * drop PG_Mlocked flag for over-mapped range @@ -254,7 +246,8 @@ get_write_lock: */ out: - vm_flags = vma->vm_flags; + if (vma) + vm_flags = vma->vm_flags; if (likely(!has_write_lock)) up_read(&mm->mmap_sem); else diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0a0be33..ca9a7c6 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2124,8 +2124,12 @@ int hugetlb_report_node_meminfo(int nid, char *buf) /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ unsigned long hugetlb_total_pages(void) { - struct hstate *h = &default_hstate; - return h->nr_huge_pages * pages_per_huge_page(h); + struct hstate *h; + unsigned long nr_total_pages = 0; + + for_each_hstate(h) + nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); + return nr_total_pages; } static int hugetlb_acct_memory(struct hstate *h, long delta) @@ -489,7 +489,7 @@ out: page = NULL; */ static inline int get_kpfn_nid(unsigned long kpfn) { - return ksm_merge_across_nodes ? 0 : pfn_to_nid(kpfn); + return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn)); } static void remove_node_from_stable_tree(struct stable_node *stable_node) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 53b8201..2b55222 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3012,6 +3012,8 @@ void memcg_update_array_size(int num) memcg_limited_groups_array_size = memcg_caches_array_size(num); } +static void kmem_cache_destroy_work_func(struct work_struct *w); + int memcg_update_cache_size(struct kmem_cache *s, int num_groups) { struct memcg_cache_params *cur_params = s->memcg_params; @@ -3031,6 +3033,8 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups) return -ENOMEM; } + INIT_WORK(&s->memcg_params->destroy, + kmem_cache_destroy_work_func); s->memcg_params->is_root_cache = true; /* @@ -3078,6 +3082,8 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, if (!s->memcg_params) return -ENOMEM; + INIT_WORK(&s->memcg_params->destroy, + kmem_cache_destroy_work_func); if (memcg) { s->memcg_params->memcg = memcg; s->memcg_params->root_cache = root_cache; @@ -3358,8 +3364,6 @@ static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg) list_for_each_entry(params, &memcg->memcg_slab_caches, list) { cachep = memcg_params_to_cache(params); cachep->memcg_params->dead = true; - INIT_WORK(&cachep->memcg_params->destroy, - kmem_cache_destroy_work_func); schedule_work(&cachep->memcg_params->destroy); } mutex_unlock(&memcg->slab_caches_mutex); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index b81a367b..ee37657 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1779,7 +1779,11 @@ void try_offline_node(int nid) for (i = 0; i < MAX_NR_ZONES; i++) { struct zone *zone = pgdat->node_zones + i; - if (zone->wait_table) + /* + * wait_table may be allocated from boot memory, + * here only free if it's allocated by vmalloc. + */ + if (is_vmalloc_addr(zone->wait_table)) vfree(zone->wait_table); } @@ -1801,7 +1805,7 @@ int __ref remove_memory(int nid, u64 start, u64 size) int retry = 1; start_pfn = PFN_DOWN(start); - end_pfn = start_pfn + PFN_DOWN(size); + end_pfn = PFN_UP(start + size - 1); /* * When CONFIG_MEMCG is on, one memory block may be used by other diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 31d2663..7431001 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2390,9 +2390,9 @@ restart: *mpol_new = *n->policy; atomic_set(&mpol_new->refcnt, 1); - sp_node_init(n_new, n->end, end, mpol_new); - sp_insert(sp, n_new); + sp_node_init(n_new, end, n->end, mpol_new); n->end = start; + sp_insert(sp, n_new); n_new = NULL; mpol_new = NULL; break; @@ -358,7 +358,7 @@ static int do_mlock(unsigned long start, size_t len, int on) newflags = vma->vm_flags & ~VM_LOCKED; if (on) - newflags |= VM_LOCKED | VM_POPULATE; + newflags |= VM_LOCKED; tmp = vma->vm_end; if (tmp > end) @@ -418,8 +418,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) * range with the first VMA. Also, skip undesirable VMA types. */ nend = min(end, vma->vm_end); - if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_POPULATE)) != - VM_POPULATE) + if (vma->vm_flags & (VM_IO | VM_PFNMAP)) continue; if (nstart < vma->vm_start) nstart = vma->vm_start; @@ -492,9 +491,9 @@ static int do_mlockall(int flags) struct vm_area_struct * vma, * prev = NULL; if (flags & MCL_FUTURE) - current->mm->def_flags |= VM_LOCKED | VM_POPULATE; + current->mm->def_flags |= VM_LOCKED; else - current->mm->def_flags &= ~(VM_LOCKED | VM_POPULATE); + current->mm->def_flags &= ~VM_LOCKED; if (flags == MCL_FUTURE) goto out; @@ -503,7 +502,7 @@ static int do_mlockall(int flags) newflags = vma->vm_flags & ~VM_LOCKED; if (flags & MCL_CURRENT) - newflags |= VM_LOCKED | VM_POPULATE; + newflags |= VM_LOCKED; /* Ignore errors */ mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); @@ -1306,7 +1306,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, } addr = mmap_region(file, addr, len, vm_flags, pgoff); - if (!IS_ERR_VALUE(addr) && (vm_flags & VM_POPULATE)) + if (!IS_ERR_VALUE(addr) && + ((vm_flags & VM_LOCKED) || + (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) *populate = len; return addr; } diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index 926b466..fd26d04 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c @@ -429,12 +429,6 @@ compat_process_vm_rw(compat_pid_t pid, if (flags != 0) return -EINVAL; - if (!access_ok(VERIFY_READ, lvec, liovcnt * sizeof(*lvec))) - goto out; - - if (!access_ok(VERIFY_READ, rvec, riovcnt * sizeof(*rvec))) - goto out; - if (vm_write) rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt, UIO_FASTIOV, iovstack_l, @@ -459,8 +453,6 @@ free_iovecs: kfree(iov_r); if (iov_l != iovstack_l) kfree(iov_l); - -out: return rc; } |