summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/numa.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/numa.c')
-rw-r--r--arch/powerpc/mm/numa.c26
1 files changed, 12 insertions, 14 deletions
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 261adbd..3feefc3 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -58,7 +58,7 @@ static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
* Allocate node_to_cpumask_map based on number of available nodes
* Requires node_possible_map to be valid.
*
- * Note: node_to_cpumask() is not valid until after this is done.
+ * Note: cpumask_of_node() is not valid until after this is done.
*/
static void __init setup_node_to_cpumask_map(void)
{
@@ -386,7 +386,7 @@ static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
of_node_put(memory);
}
-static unsigned long __devinit read_n_cells(int n, const unsigned int **buf)
+static unsigned long read_n_cells(int n, const unsigned int **buf)
{
unsigned long result = 0;
@@ -501,7 +501,7 @@ static int of_get_assoc_arrays(struct device_node *memory,
aa->n_arrays = *prop++;
aa->array_sz = *prop++;
- /* Now that we know the number of arrrays and size of each array,
+ /* Now that we know the number of arrays and size of each array,
* revalidate the size of the property read in.
*/
if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
@@ -690,9 +690,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
node_set_online(nid);
sz = numa_enforce_memory_limit(base, size);
if (sz)
- add_active_range(nid, base >> PAGE_SHIFT,
- (base >> PAGE_SHIFT)
- + (sz >> PAGE_SHIFT));
+ memblock_set_node(base, sz, nid);
} while (--ranges);
}
}
@@ -782,8 +780,7 @@ new_range:
continue;
}
- add_active_range(nid, start >> PAGE_SHIFT,
- (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
+ memblock_set_node(start, size, nid);
if (--ranges)
goto new_range;
@@ -819,7 +816,8 @@ static void __init setup_nonnuma(void)
end_pfn = memblock_region_memory_end_pfn(reg);
fake_numa_create_new_node(end_pfn, &nid);
- add_active_range(nid, start_pfn, end_pfn);
+ memblock_set_node(PFN_PHYS(start_pfn),
+ PFN_PHYS(end_pfn - start_pfn), nid);
node_set_online(nid);
}
}
@@ -949,7 +947,7 @@ static struct notifier_block __cpuinitdata ppc64_numa_nb = {
.priority = 1 /* Must run before sched domains notifier. */
};
-static void mark_reserved_regions_for_nid(int nid)
+static void __init mark_reserved_regions_for_nid(int nid)
{
struct pglist_data *node = NODE_DATA(nid);
struct memblock_region *reg;
@@ -1442,7 +1440,7 @@ int arch_update_cpu_topology(void)
{
int cpu, nid, old_nid;
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
- struct sys_device *sysdev;
+ struct device *dev;
for_each_cpu(cpu,&cpu_associativity_changes_mask) {
vphn_get_associativity(cpu, associativity);
@@ -1463,9 +1461,9 @@ int arch_update_cpu_topology(void)
register_cpu_under_node(cpu, nid);
put_online_cpus();
- sysdev = get_cpu_sysdev(cpu);
- if (sysdev)
- kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
+ dev = get_cpu_device(cpu);
+ if (dev)
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
}
return 1;