summaryrefslogtreecommitdiff
path: root/arch/ia64/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/mm/init.c')
-rw-r--r--arch/ia64/mm/init.c64
1 files changed, 56 insertions, 8 deletions
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 07d82cd..5b70241 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -19,6 +19,7 @@
#include <linux/swap.h>
#include <linux/proc_fs.h>
#include <linux/bitops.h>
+#include <linux/kexec.h>
#include <asm/a.out.h>
#include <asm/dma.h>
@@ -67,7 +68,7 @@ max_pgt_pages(void)
#ifndef CONFIG_NUMA
node_free_pages = nr_free_pages();
#else
- node_free_pages = nr_free_pages_pgdat(NODE_DATA(numa_node_id()));
+ node_free_pages = node_page_state(numa_node_id(), NR_FREE_PAGES);
#endif
max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM;
max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES);
@@ -128,6 +129,25 @@ lazy_mmu_prot_update (pte_t pte)
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
}
+/*
+ * Since DMA is i-cache coherent, any (complete) pages that were written via
+ * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
+ * flush them when they get mapped into an executable vm-area.
+ */
+void
+dma_mark_clean(void *addr, size_t size)
+{
+ unsigned long pg_addr, end;
+
+ pg_addr = PAGE_ALIGN((unsigned long) addr);
+ end = (unsigned long) addr + size;
+ while (pg_addr + PAGE_SIZE <= end) {
+ struct page *page = virt_to_page(pg_addr);
+ set_bit(PG_arch_1, &page->flags);
+ pg_addr += PAGE_SIZE;
+ }
+}
+
inline void
ia64_set_rbs_bot (void)
{
@@ -135,7 +155,7 @@ ia64_set_rbs_bot (void)
if (stack_size > MAX_USER_STACK_SIZE)
stack_size = MAX_USER_STACK_SIZE;
- current->thread.rbs_bot = STACK_TOP - stack_size;
+ current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
}
/*
@@ -156,9 +176,8 @@ ia64_init_addr_space (void)
* the problem. When the process attempts to write to the register backing store
* for the first time, it will get a SEGFAULT in this case.
*/
- vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
- memset(vma, 0, sizeof(*vma));
vma->vm_mm = current->mm;
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE;
@@ -175,9 +194,8 @@ ia64_init_addr_space (void)
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
if (!(current->personality & MMAP_PAGE_ZERO)) {
- vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
- memset(vma, 0, sizeof(*vma));
vma->vm_mm = current->mm;
vma->vm_end = PAGE_SIZE;
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
@@ -586,13 +604,27 @@ find_largest_hole (u64 start, u64 end, void *arg)
return 0;
}
+#endif /* CONFIG_VIRTUAL_MEM_MAP */
+
int __init
register_active_ranges(u64 start, u64 end, void *arg)
{
- add_active_range(0, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT);
+ int nid = paddr_to_nid(__pa(start));
+
+ if (nid < 0)
+ nid = 0;
+#ifdef CONFIG_KEXEC
+ if (start > crashk_res.start && start < crashk_res.end)
+ start = crashk_res.end;
+ if (end > crashk_res.start && end < crashk_res.end)
+ end = crashk_res.start;
+#endif
+
+ if (start < end)
+ add_active_range(nid, __pa(start) >> PAGE_SHIFT,
+ __pa(end) >> PAGE_SHIFT);
return 0;
}
-#endif /* CONFIG_VIRTUAL_MEM_MAP */
static int __init
count_reserved_pages (u64 start, u64 end, void *arg)
@@ -607,6 +639,22 @@ count_reserved_pages (u64 start, u64 end, void *arg)
return 0;
}
+int
+find_max_min_low_pfn (unsigned long start, unsigned long end, void *arg)
+{
+ unsigned long pfn_start, pfn_end;
+#ifdef CONFIG_FLATMEM
+ pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
+ pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
+#else
+ pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
+ pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
+#endif
+ min_low_pfn = min(min_low_pfn, pfn_start);
+ max_low_pfn = max(max_low_pfn, pfn_end);
+ return 0;
+}
+
/*
* Boot command-line option "nolwsys" can be used to disable the use of any light-weight
* system call handler. When this option is in effect, all fsyscalls will end up bubbling