diff options
author | David S. Miller <davem@davemloft.net> | 2011-04-11 20:44:25 (GMT) |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-04-11 20:44:25 (GMT) |
commit | 1c01a80cfec6f806246f31ff2680cd3639b30e67 (patch) | |
tree | 0b554aad2ec1da71ecf6339d4ba51617bfe1dc3c /mm | |
parent | c44d79950b2daa1025e62eede73e4e4a274d1ef3 (diff) | |
parent | 4a9f65f6304a00f6473e83b19c1e83caa1e42530 (diff) | |
download | linux-1c01a80cfec6f806246f31ff2680cd3639b30e67.tar.xz |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts:
drivers/net/smsc911x.c
Diffstat (limited to 'mm')
-rw-r--r-- | mm/backing-dev.c | 2 | ||||
-rw-r--r-- | mm/hugetlb.c | 10 | ||||
-rw-r--r-- | mm/hwpoison-inject.c | 2 | ||||
-rw-r--r-- | mm/internal.h | 2 | ||||
-rw-r--r-- | mm/kmemleak.c | 6 | ||||
-rw-r--r-- | mm/ksm.c | 2 | ||||
-rw-r--r-- | mm/memcontrol.c | 8 | ||||
-rw-r--r-- | mm/memory-failure.c | 6 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 2 | ||||
-rw-r--r-- | mm/migrate.c | 2 | ||||
-rw-r--r-- | mm/mremap.c | 11 | ||||
-rw-r--r-- | mm/nobootmem.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 4 | ||||
-rw-r--r-- | mm/page_cgroup.c | 2 | ||||
-rw-r--r-- | mm/percpu.c | 10 | ||||
-rw-r--r-- | mm/slab.c | 4 | ||||
-rw-r--r-- | mm/slub.c | 8 | ||||
-rw-r--r-- | mm/sparse.c | 2 | ||||
-rw-r--r-- | mm/util.c | 2 | ||||
-rw-r--r-- | mm/vmscan.c | 4 |
20 files changed, 49 insertions, 42 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 0d9a036..befc875 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -787,7 +787,7 @@ EXPORT_SYMBOL(congestion_wait); * jiffies for either a BDI to exit congestion of the given @sync queue * or a write to complete. * - * In the absense of zone congestion, cond_resched() is called to yield + * In the absence of zone congestion, cond_resched() is called to yield * the processor if necessary but otherwise does not sleep. * * The return value is 0 if the sleep is for the full timeout. Otherwise, diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 06de5aa..8ee3bd8 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -146,7 +146,7 @@ static long region_chg(struct list_head *head, long f, long t) if (rg->from > t) return chg; - /* We overlap with this area, if it extends futher than + /* We overlap with this area, if it extends further than * us then we must extend ourselves. Account for its * existing reservation. */ if (rg->to > t) { @@ -842,7 +842,7 @@ struct page *alloc_huge_page_node(struct hstate *h, int nid) } /* - * Increase the hugetlb pool such that it can accomodate a reservation + * Increase the hugetlb pool such that it can accommodate a reservation * of size 'delta'. */ static int gather_surplus_pages(struct hstate *h, int delta) @@ -890,7 +890,7 @@ retry: /* * The surplus_list now contains _at_least_ the number of extra pages - * needed to accomodate the reservation. Add the appropriate number + * needed to accommodate the reservation. Add the appropriate number * of pages to the hugetlb pool and free the extras back to the buddy * allocator. Commit the entire reservation here to prevent another * process from stealing the pages as they are added to the pool but @@ -2043,7 +2043,7 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma) * This new VMA should share its siblings reservation map if present. * The VMA will only ever have a valid reservation map pointer where * it is being copied for another still existing VMA. As that VMA - * has a reference to the reservation map it cannot dissappear until + * has a reference to the reservation map it cannot disappear until * after this open call completes. It is therefore safe to take a * new reference here without additional locking. */ @@ -2490,7 +2490,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, /* * Currently, we are forced to kill the process in the event the * original mapper has unmapped pages from the child due to a failed - * COW. Warn that such a situation has occured as it may not be obvious + * COW. Warn that such a situation has occurred as it may not be obvious */ if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { printk(KERN_WARNING diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c index 0948f10..c7fc7fd 100644 --- a/mm/hwpoison-inject.c +++ b/mm/hwpoison-inject.c @@ -1,4 +1,4 @@ -/* Inject a hwpoison memory failure on a arbitary pfn */ +/* Inject a hwpoison memory failure on a arbitrary pfn */ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/kernel.h> diff --git a/mm/internal.h b/mm/internal.h index 3438dd4..9d0ced8 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -162,7 +162,7 @@ static inline struct page *mem_map_offset(struct page *base, int offset) } /* - * Iterator over all subpages withing the maximally aligned gigantic + * Iterator over all subpages within the maximally aligned gigantic * page 'base'. Handle any discontiguity in the mem_map. */ static inline struct page *mem_map_next(struct page *iter, diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 84225f3..c1d5867 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -265,7 +265,7 @@ static void kmemleak_disable(void); } while (0) /* - * Macro invoked when a serious kmemleak condition occured and cannot be + * Macro invoked when a serious kmemleak condition occurred and cannot be * recovered from. Kmemleak will be disabled and further allocation/freeing * tracing no longer available. */ @@ -1006,7 +1006,7 @@ static bool update_checksum(struct kmemleak_object *object) /* * Memory scanning is a long process and it needs to be interruptable. This - * function checks whether such interrupt condition occured. + * function checks whether such interrupt condition occurred. */ static int scan_should_stop(void) { @@ -1733,7 +1733,7 @@ static int __init kmemleak_late_init(void) if (atomic_read(&kmemleak_error)) { /* - * Some error occured and kmemleak was disabled. There is a + * Some error occurred and kmemleak was disabled. There is a * small chance that kmemleak_disable() was called immediately * after setting kmemleak_initialized and we may end up with * two clean-up threads but serialized by scan_mutex. @@ -720,7 +720,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, swapped = PageSwapCache(page); flush_cache_page(vma, addr, page_to_pfn(page)); /* - * Ok this is tricky, when get_user_pages_fast() run it doesnt + * Ok this is tricky, when get_user_pages_fast() run it doesn't * take any lock, therefore the check that we are going to make * with the pagecount against the mapcount is racey and * O_DIRECT can happen right after the check. diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 1f0b460..010f916 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1466,7 +1466,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, break; } /* - * We want to do more targetted reclaim. + * We want to do more targeted reclaim. * excess >> 2 is not to excessive so as to * reclaim too much, nor too less that we keep * coming back to reclaim from this cgroup @@ -2265,7 +2265,7 @@ void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail) * - compound_lock is held when nr_pages > 1 * * This function doesn't do "charge" nor css_get to new cgroup. It should be - * done by a caller(__mem_cgroup_try_charge would be usefull). If @uncharge is + * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is * true, this function does "uncharge" from old cgroup, but it doesn't if * @uncharge is false, so a caller should do "uncharge". */ @@ -2318,7 +2318,7 @@ static int mem_cgroup_move_account(struct page *page, * We charges against "to" which may not have any tasks. Then, "to" * can be under rmdir(). But in current implementation, caller of * this function is just force_empty() and move charge, so it's - * garanteed that "to" is never removed. So, we don't check rmdir + * guaranteed that "to" is never removed. So, we don't check rmdir * status here. */ move_unlock_page_cgroup(pc, &flags); @@ -2648,7 +2648,7 @@ static void mem_cgroup_do_uncharge(struct mem_cgroup *mem, batch->memcg = mem; /* * do_batch > 0 when unmapping pages or inode invalidate/truncate. - * In those cases, all pages freed continously can be expected to be in + * In those cases, all pages freed continuously can be expected to be in * the same cgroup and we have chance to coalesce uncharges. * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE) * because we want to do uncharge as soon as possible. diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 37feb9f..2b9a5ee 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -208,7 +208,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno, * Don't use force here, it's convenient if the signal * can be temporarily blocked. * This could cause a loop when the user sets SIGBUS - * to SIG_IGN, but hopefully noone will do that? + * to SIG_IGN, but hopefully no one will do that? */ ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */ if (ret < 0) @@ -634,7 +634,7 @@ static int me_pagecache_dirty(struct page *p, unsigned long pfn) * when the page is reread or dropped. If an * application assumes it will always get error on * fsync, but does other operations on the fd before - * and the page is dropped inbetween then the error + * and the page is dropped between then the error * will not be properly reported. * * This can already happen even without hwpoisoned @@ -728,7 +728,7 @@ static int me_huge_page(struct page *p, unsigned long pfn) * The table matches them in order and calls the right handler. * * This is quite tricky because we can access page at any time - * in its live cycle, so all accesses have to be extremly careful. + * in its live cycle, so all accesses have to be extremely careful. * * This is not complete. More states could be added. * For any missing state don't attempt recovery. diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 321fc74..a2acaf8 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -724,7 +724,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) pfn); dump_page(page); #endif - /* Becasue we don't have big zone->lock. we should + /* Because we don't have big zone->lock. we should check this again here. */ if (page_count(page)) { not_managed++; diff --git a/mm/migrate.c b/mm/migrate.c index b0406d7..34132f8 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -375,7 +375,7 @@ void migrate_page_copy(struct page *newpage, struct page *page) * redo the accounting that clear_page_dirty_for_io undid, * but we can't use set_page_dirty because that function * is actually a signal that all of the page has become dirty. - * Wheras only part of our page may be dirty. + * Whereas only part of our page may be dirty. */ __set_page_dirty_nobuffers(newpage); } diff --git a/mm/mremap.c b/mm/mremap.c index 1de98d4..a7c1f9f 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -277,9 +277,16 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr, if (old_len > vma->vm_end - addr) goto Efault; - if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) { - if (new_len > old_len) + /* Need to be careful about a growing mapping */ + if (new_len > old_len) { + unsigned long pgoff; + + if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) goto Efault; + pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; + pgoff += vma->vm_pgoff; + if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) + goto Einval; } if (vma->vm_flags & VM_LOCKED) { diff --git a/mm/nobootmem.c b/mm/nobootmem.c index e99f6cd..9109049 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c @@ -150,7 +150,7 @@ unsigned long __init free_all_bootmem(void) { /* * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id - * because in some case like Node0 doesnt have RAM installed + * because in some case like Node0 doesn't have RAM installed * low ram will be on Node1 * Use MAX_NUMNODES will make sure all ranges in early_node_map[] * will be used instead of only Node0 related diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d6e7ba7..2747f5e5a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -942,7 +942,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) * If breaking a large block of pages, move all free * pages to the preferred allocation list. If falling * back for a reclaimable kernel allocation, be more - * agressive about taking ownership of free pages + * aggressive about taking ownership of free pages */ if (unlikely(current_order >= (pageblock_order >> 1)) || start_migratetype == MIGRATE_RECLAIMABLE || @@ -3926,7 +3926,7 @@ static void __init find_usable_zone_for_movable(void) /* * The zone ranges provided by the architecture do not include ZONE_MOVABLE - * because it is sized independant of architecture. Unlike the other zones, + * because it is sized independent of architecture. Unlike the other zones, * the starting point for ZONE_MOVABLE is not fixed. It may be different * in each node depending on the size of each node and how evenly kernelcore * is distributed. This helper function adjusts the zone ranges diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index a12cc3f..9905501 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c @@ -377,7 +377,7 @@ not_enough_page: * @new: new id * * Returns old id at success, 0 at failure. - * (There is no mem_cgroup useing 0 as its id) + * (There is no mem_cgroup using 0 as its id) */ unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, unsigned short old, unsigned short new) diff --git a/mm/percpu.c b/mm/percpu.c index 55d4d11..a160db3 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -342,7 +342,7 @@ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) * @chunk: chunk of interest * * Determine whether area map of @chunk needs to be extended to - * accomodate a new allocation. + * accommodate a new allocation. * * CONTEXT: * pcpu_lock. @@ -431,7 +431,7 @@ out_unlock: * depending on @head, is reduced by @tail bytes and @tail byte block * is inserted after the target block. * - * @chunk->map must have enough free slots to accomodate the split. + * @chunk->map must have enough free slots to accommodate the split. * * CONTEXT: * pcpu_lock. @@ -1435,7 +1435,7 @@ static struct pcpu_alloc_info * __init pcpu_build_alloc_info( /* * Determine min_unit_size, alloc_size and max_upa such that * alloc_size is multiple of atom_size and is the smallest - * which can accomodate 4k aligned segments which are equal to + * which can accommodate 4k aligned segments which are equal to * or larger than min_unit_size. */ min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); @@ -1550,7 +1550,7 @@ static struct pcpu_alloc_info * __init pcpu_build_alloc_info( * @atom_size: allocation atom size * @cpu_distance_fn: callback to determine distance between cpus, optional * @alloc_fn: function to allocate percpu page - * @free_fn: funtion to free percpu page + * @free_fn: function to free percpu page * * This is a helper to ease setting up embedded first percpu chunk and * can be called where pcpu_setup_first_chunk() is expected. @@ -1678,7 +1678,7 @@ out_free: * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages * @reserved_size: the size of reserved percpu area in bytes * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE - * @free_fn: funtion to free percpu page, always called with PAGE_SIZE + * @free_fn: function to free percpu page, always called with PAGE_SIZE * @populate_pte_fn: function to populate pte * * This is a helper to ease setting up page-remapped first percpu @@ -878,7 +878,7 @@ static struct array_cache *alloc_arraycache(int node, int entries, nc = kmalloc_node(memsize, gfp, node); /* * The array_cache structures contain pointers to free object. - * However, when such objects are allocated or transfered to another + * However, when such objects are allocated or transferred to another * cache the pointers are not cleared and they could be counted as * valid references during a kmemleak scan. Therefore, kmemleak must * not scan such objects. @@ -2606,7 +2606,7 @@ EXPORT_SYMBOL(kmem_cache_shrink); * * The cache must be empty before calling this function. * - * The caller must guarantee that noone will allocate memory from the cache + * The caller must guarantee that no one will allocate memory from the cache * during the kmem_cache_destroy(). */ void kmem_cache_destroy(struct kmem_cache *cachep) @@ -64,7 +64,7 @@ * we must stay away from it for a while since we may cause a bouncing * cacheline if we try to acquire the lock. So go onto the next slab. * If all pages are busy then we may allocate a new slab instead of reusing - * a partial slab. A new slab has noone operating on it and thus there is + * a partial slab. A new slab has no one operating on it and thus there is * no danger of cacheline contention. * * Interrupts are disabled during allocation and deallocation in order to @@ -1929,7 +1929,7 @@ redo: else { #ifdef CONFIG_CMPXCHG_LOCAL /* - * The cmpxchg will only match if there was no additonal + * The cmpxchg will only match if there was no additional * operation and if we are on the right processor. * * The cmpxchg does the following atomically (without lock semantics!) @@ -3547,7 +3547,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller); - /* Honor the call site pointer we recieved. */ + /* Honor the call site pointer we received. */ trace_kmalloc(caller, ret, size, s->size, gfpflags); return ret; @@ -3577,7 +3577,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, ret = slab_alloc(s, gfpflags, node, caller); - /* Honor the call site pointer we recieved. */ + /* Honor the call site pointer we received. */ trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); return ret; diff --git a/mm/sparse.c b/mm/sparse.c index 9325020..aa64b12 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -500,7 +500,7 @@ void __init sparse_init(void) * so alloc 2M (with 2M align) and 24 bytes in turn will * make next 2M slip to one more 2M later. * then in big system, the memory will have a lot of holes... - * here try to allocate 2M pages continously. + * here try to allocate 2M pages continuously. * * powerpc need to call sparse_init_one_section right after each * sparse_early_mem_map_alloc, so allocate usemap_map at first. @@ -227,7 +227,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm) /* * Like get_user_pages_fast() except its IRQ-safe in that it won't fall * back to the regular GUP. - * If the architecture not support this fucntion, simply return with no + * If the architecture not support this function, simply return with no * page pinned */ int __attribute__((weak)) __get_user_pages_fast(unsigned long start, diff --git a/mm/vmscan.c b/mm/vmscan.c index f73b865..c7f5a6d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1065,7 +1065,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, * surrounding the tag page. Only take those pages of * the same active state as that tag page. We may safely * round the target page pfn down to the requested order - * as the mem_map is guarenteed valid out to MAX_ORDER, + * as the mem_map is guaranteed valid out to MAX_ORDER, * where that page is in a different zone we will detect * it from its zone id and abort this block scan. */ @@ -2224,7 +2224,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, * o a 16M DMA zone that is balanced will not balance a zone on any * reasonable sized machine * o On all other machines, the top zone must be at least a reasonable - * precentage of the middle zones. For example, on 32-bit x86, highmem + * percentage of the middle zones. For example, on 32-bit x86, highmem * would need to be at least 256M for it to be balance a whole node. * Similarly, on x86-64 the Normal zone would need to be at least 1G * to balance a node on its own. These seemed like reasonable ratios. |