summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/mempolicy.c5
-rw-r--r--mm/migrate.c11
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/slub.c46
4 files changed, 45 insertions, 19 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 172abff..bb54b88c 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -955,6 +955,11 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
goto out;
}
+ if (!nodes_subset(new, node_online_map)) {
+ err = -EINVAL;
+ goto out;
+ }
+
err = security_task_movememory(task);
if (err)
goto out;
diff --git a/mm/migrate.c b/mm/migrate.c
index 37c73b9..e2fdbce 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -611,6 +611,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
int rc = 0;
int *result = NULL;
struct page *newpage = get_new_page(page, private, &result);
+ int rcu_locked = 0;
if (!newpage)
return -ENOMEM;
@@ -636,8 +637,13 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
* we cannot notice that anon_vma is freed while we migrates a page.
* This rcu_read_lock() delays freeing anon_vma pointer until the end
* of migration. File cache pages are no problem because of page_lock()
+ * File Caches may use write_page() or lock_page() in migration, then,
+ * just care Anon page here.
*/
- rcu_read_lock();
+ if (PageAnon(page)) {
+ rcu_read_lock();
+ rcu_locked = 1;
+ }
/*
* This is a corner case handling.
* When a new swap-cache is read into, it is linked to LRU
@@ -656,7 +662,8 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
if (rc)
remove_migration_ptes(page, page);
rcu_unlock:
- rcu_read_unlock();
+ if (rcu_locked)
+ rcu_read_unlock();
unlock:
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6427653..1a8c595 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2345,6 +2345,8 @@ static int __cpuinit process_zones(int cpu)
return 0;
bad:
for_each_zone(dzone) {
+ if (!populated_zone(dzone))
+ continue;
if (dzone == zone)
break;
kfree(zone_pcp(dzone, cpu));
diff --git a/mm/slub.c b/mm/slub.c
index 04151da..addb20a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -986,7 +986,9 @@ out:
__setup("slub_debug", setup_slub_debug);
-static void kmem_cache_open_debug_check(struct kmem_cache *s)
+static unsigned long kmem_cache_flags(unsigned long objsize,
+ unsigned long flags, const char *name,
+ void (*ctor)(void *, struct kmem_cache *, unsigned long))
{
/*
* The page->offset field is only 16 bit wide. This is an offset
@@ -1000,19 +1002,21 @@ static void kmem_cache_open_debug_check(struct kmem_cache *s)
* Debugging or ctor may create a need to move the free
* pointer. Fail if this happens.
*/
- if (s->objsize >= 65535 * sizeof(void *)) {
- BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON |
+ if (objsize >= 65535 * sizeof(void *)) {
+ BUG_ON(flags & (SLAB_RED_ZONE | SLAB_POISON |
SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
- BUG_ON(s->ctor);
- }
- else
+ BUG_ON(ctor);
+ } else {
/*
* Enable debugging if selected on the kernel commandline.
*/
if (slub_debug && (!slub_debug_slabs ||
- strncmp(slub_debug_slabs, s->name,
+ strncmp(slub_debug_slabs, name,
strlen(slub_debug_slabs)) == 0))
- s->flags |= slub_debug;
+ flags |= slub_debug;
+ }
+
+ return flags;
}
#else
static inline void setup_object_debug(struct kmem_cache *s,
@@ -1029,7 +1033,12 @@ static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
static inline int check_object(struct kmem_cache *s, struct page *page,
void *object, int active) { return 1; }
static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
-static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {}
+static inline unsigned long kmem_cache_flags(unsigned long objsize,
+ unsigned long flags, const char *name,
+ void (*ctor)(void *, struct kmem_cache *, unsigned long))
+{
+ return flags;
+}
#define slub_debug 0
#endif
/*
@@ -2088,9 +2097,8 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
s->name = name;
s->ctor = ctor;
s->objsize = size;
- s->flags = flags;
s->align = align;
- kmem_cache_open_debug_check(s);
+ s->flags = kmem_cache_flags(size, flags, name, ctor);
if (!calculate_sizes(s))
goto error;
@@ -2660,7 +2668,7 @@ static int slab_unmergeable(struct kmem_cache *s)
}
static struct kmem_cache *find_mergeable(size_t size,
- size_t align, unsigned long flags,
+ size_t align, unsigned long flags, const char *name,
void (*ctor)(void *, struct kmem_cache *, unsigned long))
{
struct kmem_cache *s;
@@ -2674,6 +2682,7 @@ static struct kmem_cache *find_mergeable(size_t size,
size = ALIGN(size, sizeof(void *));
align = calculate_alignment(flags, align, size);
size = ALIGN(size, align);
+ flags = kmem_cache_flags(size, flags, name, NULL);
list_for_each_entry(s, &slab_caches, list) {
if (slab_unmergeable(s))
@@ -2682,8 +2691,7 @@ static struct kmem_cache *find_mergeable(size_t size,
if (size > s->size)
continue;
- if (((flags | slub_debug) & SLUB_MERGE_SAME) !=
- (s->flags & SLUB_MERGE_SAME))
+ if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
continue;
/*
* Check if alignment is compatible.
@@ -2707,7 +2715,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
struct kmem_cache *s;
down_write(&slub_lock);
- s = find_mergeable(size, align, flags, ctor);
+ s = find_mergeable(size, align, flags, name, ctor);
if (s) {
s->refcount++;
/*
@@ -3813,7 +3821,9 @@ static int __init slab_sysfs_init(void)
list_for_each_entry(s, &slab_caches, list) {
err = sysfs_slab_add(s);
- BUG_ON(err);
+ if (err)
+ printk(KERN_ERR "SLUB: Unable to add boot slab %s"
+ " to sysfs\n", s->name);
}
while (alias_list) {
@@ -3821,7 +3831,9 @@ static int __init slab_sysfs_init(void)
alias_list = alias_list->next;
err = sysfs_slab_alias(al->s, al->name);
- BUG_ON(err);
+ if (err)
+ printk(KERN_ERR "SLUB: Unable to add boot slab alias"
+ " %s to sysfs\n", s->name);
kfree(al);
}