summaryrefslogtreecommitdiff
path: root/arch/x86/xen
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/mmu.c10
-rw-r--r--arch/x86/xen/p2m.c18
-rw-r--r--arch/x86/xen/setup.c17
3 files changed, 26 insertions, 19 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 0c376a2..832765c 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1036,10 +1036,9 @@ static void xen_pgd_pin(struct mm_struct *mm)
*/
void xen_mm_pin_all(void)
{
- unsigned long flags;
struct page *page;
- spin_lock_irqsave(&pgd_lock, flags);
+ spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) {
if (!PagePinned(page)) {
@@ -1048,7 +1047,7 @@ void xen_mm_pin_all(void)
}
}
- spin_unlock_irqrestore(&pgd_lock, flags);
+ spin_unlock(&pgd_lock);
}
/*
@@ -1149,10 +1148,9 @@ static void xen_pgd_unpin(struct mm_struct *mm)
*/
void xen_mm_unpin_all(void)
{
- unsigned long flags;
struct page *page;
- spin_lock_irqsave(&pgd_lock, flags);
+ spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) {
if (PageSavePinned(page)) {
@@ -1162,7 +1160,7 @@ void xen_mm_unpin_all(void)
}
}
- spin_unlock_irqrestore(&pgd_lock, flags);
+ spin_unlock(&pgd_lock);
}
void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 65f21f4..00fe560 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -374,21 +374,15 @@ void __init xen_build_dynamic_phys_to_machine(void)
* As long as the mfn_list has enough entries to completely
* fill a p2m page, pointing into the array is ok. But if
* not the entries beyond the last pfn will be undefined.
- * And guessing that the 'what-ever-there-is' does not take it
- * too kindly when changing it to invalid markers, a new page
- * is allocated, initialized and filled with the valid part.
*/
if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) {
unsigned long p2midx;
- unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
- p2m_init(p2m);
-
- for (p2midx = 0; pfn + p2midx < max_pfn; p2midx++) {
- p2m[p2midx] = mfn_list[pfn + p2midx];
- }
- p2m_top[topidx][mididx] = p2m;
- } else
- p2m_top[topidx][mididx] = &mfn_list[pfn];
+
+ p2midx = max_pfn % P2M_PER_PAGE;
+ for ( ; p2midx < P2M_PER_PAGE; p2midx++)
+ mfn_list[pfn + p2midx] = INVALID_P2M_ENTRY;
+ }
+ p2m_top[topidx][mididx] = &mfn_list[pfn];
}
m2p_override_init();
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 54d9379..fa0269a 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -229,8 +229,13 @@ char * __init xen_memory_setup(void)
e820.nr_map = 0;
xen_extra_mem_start = mem_end;
for (i = 0; i < memmap.nr_entries; i++) {
- unsigned long long end = map[i].addr + map[i].size;
+ unsigned long long end;
+ /* Guard against non-page aligned E820 entries. */
+ if (map[i].type == E820_RAM)
+ map[i].size -= (map[i].size + map[i].addr) % PAGE_SIZE;
+
+ end = map[i].addr + map[i].size;
if (map[i].type == E820_RAM && end > mem_end) {
/* RAM off the end - may be partially included */
u64 delta = min(map[i].size, end - mem_end);
@@ -239,6 +244,15 @@ char * __init xen_memory_setup(void)
end -= delta;
extra_pages += PFN_DOWN(delta);
+ /*
+ * Set RAM below 4GB that is not for us to be unusable.
+ * This prevents "System RAM" address space from being
+ * used as potential resource for I/O address (happens
+ * when 'allocate_resource' is called).
+ */
+ if (delta &&
+ (xen_initial_domain() && end < 0x100000000ULL))
+ e820_add_region(end, delta, E820_UNUSABLE);
}
if (map[i].size > 0 && end > xen_extra_mem_start)
@@ -407,6 +421,7 @@ void __init xen_arch_setup(void)
boot_cpu_data.hlt_works_ok = 1;
#endif
pm_idle = default_idle;
+ boot_option_idle_override = IDLE_HALT;
fiddle_vdso();
}