summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2007-06-04 05:15:36 (GMT)
committerPaul Mackerras <paulus@samba.org>2007-06-14 12:29:56 (GMT)
commit3d5134ee8341bffc4f539049abb9e90d469b448d (patch)
tree037958e0daa97b4ef350908a53182167ee2c8a03 /arch/powerpc/mm
parentc19c03fc749147f565e807fa65f1729066800571 (diff)
downloadlinux-3d5134ee8341bffc4f539049abb9e90d469b448d.tar.xz
[POWERPC] Rewrite IO allocation & mapping on powerpc64
This rewrites pretty much from scratch the handling of MMIO and PIO space allocations on powerpc64. The main goals are: - Get rid of imalloc and use more common code where possible - Simplify the current mess so that PIO space is allocated and mapped in a single place for PCI bridges - Handle allocation constraints of PIO for all bridges including hot plugged ones within the 2GB space reserved for IO ports, so that devices on hotplugged busses will now work with drivers that assume IO ports fit in an int. - Cleanup and separate tracking of the ISA space in the reserved low 64K of IO space. No ISA -> Nothing mapped there. I booted a cell blade with IDE on PIO and MMIO and a dual G5 so far, that's it :-) With this patch, all allocations are done using the code in mm/vmalloc.c, though we use the low level __get_vm_area with explicit start/stop constraints in order to manage separate areas for vmalloc/vmap, ioremap, and PCI IOs. This greatly simplifies a lot of things, as you can see in the diffstat of that patch :-) A new pair of functions pcibios_map/unmap_io_space() now replace all of the previous code that used to manipulate PCI IOs space. The allocation is done at mapping time, which is now called from scan_phb's, just before the devices are probed (instead of after, which is by itself a bug fix). The only other caller is the PCI hotplug code for hot adding PCI-PCI bridges (slots). imalloc is gone, as is the "sub-allocation" thing, but I do beleive that hotplug should still work in the sense that the space allocation is always done by the PHB, but if you unmap a child bus of this PHB (which seems to be possible), then the code should properly tear down all the HPTE mappings for that area of the PHB allocated IO space. I now always reserve the first 64K of IO space for the bridge with the ISA bus on it. I have moved the code for tracking ISA in a separate file which should also make it smarter if we ever are capable of hot unplugging or re-plugging an ISA bridge. This should have a side effect on platforms like powermac where VGA IOs will no longer work. This is done on purpose though as they would have worked semi-randomly before. The idea at this point is to isolate drivers that might need to access those and fix them by providing a proper function to obtain an offset to the legacy IOs of a given bus. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/Makefile3
-rw-r--r--arch/powerpc/mm/imalloc.c314
-rw-r--r--arch/powerpc/mm/mmu_decl.h12
-rw-r--r--arch/powerpc/mm/pgtable_64.c204
-rw-r--r--arch/powerpc/mm/tlb_64.c56
5 files changed, 106 insertions, 483 deletions
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 4f839c6..7e4d27a 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -11,8 +11,7 @@ obj-$(CONFIG_PPC32) += init_32.o pgtable_32.o mmu_context_32.o
hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o
obj-$(CONFIG_PPC64) += init_64.o pgtable_64.o mmu_context_64.o \
hash_utils_64.o hash_low_64.o tlb_64.o \
- slb_low.o slb.o stab.o mmap.o imalloc.o \
- $(hash-y)
+ slb_low.o slb.o stab.o mmap.o $(hash-y)
obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o tlb_32.o
obj-$(CONFIG_40x) += 4xx_mmu.o
obj-$(CONFIG_44x) += 44x_mmu.o
diff --git a/arch/powerpc/mm/imalloc.c b/arch/powerpc/mm/imalloc.c
deleted file mode 100644
index 9eddf37..0000000
--- a/arch/powerpc/mm/imalloc.c
+++ /dev/null
@@ -1,314 +0,0 @@
-/*
- * c 2001 PPC 64 Team, IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <linux/mutex.h>
-#include <asm/cacheflush.h>
-
-#include "mmu_decl.h"
-
-static DEFINE_MUTEX(imlist_mutex);
-struct vm_struct * imlist = NULL;
-
-static int get_free_im_addr(unsigned long size, unsigned long *im_addr)
-{
- unsigned long addr;
- struct vm_struct **p, *tmp;
-
- addr = ioremap_bot;
- for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
- if (size + addr < (unsigned long) tmp->addr)
- break;
- if ((unsigned long)tmp->addr >= ioremap_bot)
- addr = tmp->size + (unsigned long) tmp->addr;
- if (addr >= IMALLOC_END-size)
- return 1;
- }
- *im_addr = addr;
-
- return 0;
-}
-
-/* Return whether the region described by v_addr and size is a subset
- * of the region described by parent
- */
-static inline int im_region_is_subset(unsigned long v_addr, unsigned long size,
- struct vm_struct *parent)
-{
- return (int) (v_addr >= (unsigned long) parent->addr &&
- v_addr < (unsigned long) parent->addr + parent->size &&
- size < parent->size);
-}
-
-/* Return whether the region described by v_addr and size is a superset
- * of the region described by child
- */
-static int im_region_is_superset(unsigned long v_addr, unsigned long size,
- struct vm_struct *child)
-{
- struct vm_struct parent;
-
- parent.addr = (void *) v_addr;
- parent.size = size;
-
- return im_region_is_subset((unsigned long) child->addr, child->size,
- &parent);
-}
-
-/* Return whether the region described by v_addr and size overlaps
- * the region described by vm. Overlapping regions meet the
- * following conditions:
- * 1) The regions share some part of the address space
- * 2) The regions aren't identical
- * 3) Neither region is a subset of the other
- */
-static int im_region_overlaps(unsigned long v_addr, unsigned long size,
- struct vm_struct *vm)
-{
- if (im_region_is_superset(v_addr, size, vm))
- return 0;
-
- return (v_addr + size > (unsigned long) vm->addr + vm->size &&
- v_addr < (unsigned long) vm->addr + vm->size) ||
- (v_addr < (unsigned long) vm->addr &&
- v_addr + size > (unsigned long) vm->addr);
-}
-
-/* Determine imalloc status of region described by v_addr and size.
- * Can return one of the following:
- * IM_REGION_UNUSED - Entire region is unallocated in imalloc space.
- * IM_REGION_SUBSET - Region is a subset of a region that is already
- * allocated in imalloc space.
- * vm will be assigned to a ptr to the parent region.
- * IM_REGION_EXISTS - Exact region already allocated in imalloc space.
- * vm will be assigned to a ptr to the existing imlist
- * member.
- * IM_REGION_OVERLAPS - Region overlaps an allocated region in imalloc space.
- * IM_REGION_SUPERSET - Region is a superset of a region that is already
- * allocated in imalloc space.
- */
-static int im_region_status(unsigned long v_addr, unsigned long size,
- struct vm_struct **vm)
-{
- struct vm_struct *tmp;
-
- for (tmp = imlist; tmp; tmp = tmp->next)
- if (v_addr < (unsigned long) tmp->addr + tmp->size)
- break;
-
- *vm = NULL;
- if (tmp) {
- if (im_region_overlaps(v_addr, size, tmp))
- return IM_REGION_OVERLAP;
-
- *vm = tmp;
- if (im_region_is_subset(v_addr, size, tmp)) {
- /* Return with tmp pointing to superset */
- return IM_REGION_SUBSET;
- }
- if (im_region_is_superset(v_addr, size, tmp)) {
- /* Return with tmp pointing to first subset */
- return IM_REGION_SUPERSET;
- }
- else if (v_addr == (unsigned long) tmp->addr &&
- size == tmp->size) {
- /* Return with tmp pointing to exact region */
- return IM_REGION_EXISTS;
- }
- }
-
- return IM_REGION_UNUSED;
-}
-
-static struct vm_struct * split_im_region(unsigned long v_addr,
- unsigned long size, struct vm_struct *parent)
-{
- struct vm_struct *vm1 = NULL;
- struct vm_struct *vm2 = NULL;
- struct vm_struct *new_vm = NULL;
-
- vm1 = kmalloc(sizeof(*vm1), GFP_KERNEL);
- if (vm1 == NULL) {
- printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
- return NULL;
- }
-
- if (v_addr == (unsigned long) parent->addr) {
- /* Use existing parent vm_struct to represent child, allocate
- * new one for the remainder of parent range
- */
- vm1->size = parent->size - size;
- vm1->addr = (void *) (v_addr + size);
- vm1->next = parent->next;
-
- parent->size = size;
- parent->next = vm1;
- new_vm = parent;
- } else if (v_addr + size == (unsigned long) parent->addr +
- parent->size) {
- /* Allocate new vm_struct to represent child, use existing
- * parent one for remainder of parent range
- */
- vm1->size = size;
- vm1->addr = (void *) v_addr;
- vm1->next = parent->next;
- new_vm = vm1;
-
- parent->size -= size;
- parent->next = vm1;
- } else {
- /* Allocate two new vm_structs for the new child and
- * uppermost remainder, and use existing parent one for the
- * lower remainder of parent range
- */
- vm2 = kmalloc(sizeof(*vm2), GFP_KERNEL);
- if (vm2 == NULL) {
- printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
- kfree(vm1);
- return NULL;
- }
-
- vm1->size = size;
- vm1->addr = (void *) v_addr;
- vm1->next = vm2;
- new_vm = vm1;
-
- vm2->size = ((unsigned long) parent->addr + parent->size) -
- (v_addr + size);
- vm2->addr = (void *) v_addr + size;
- vm2->next = parent->next;
-
- parent->size = v_addr - (unsigned long) parent->addr;
- parent->next = vm1;
- }
-
- return new_vm;
-}
-
-static struct vm_struct * __add_new_im_area(unsigned long req_addr,
- unsigned long size)
-{
- struct vm_struct **p, *tmp, *area;
-
- for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
- if (req_addr + size <= (unsigned long)tmp->addr)
- break;
- }
-
- area = kmalloc(sizeof(*area), GFP_KERNEL);
- if (!area)
- return NULL;
- area->flags = 0;
- area->addr = (void *)req_addr;
- area->size = size;
- area->next = *p;
- *p = area;
-
- return area;
-}
-
-static struct vm_struct * __im_get_area(unsigned long req_addr,
- unsigned long size,
- int criteria)
-{
- struct vm_struct *tmp;
- int status;
-
- status = im_region_status(req_addr, size, &tmp);
- if ((criteria & status) == 0) {
- return NULL;
- }
-
- switch (status) {
- case IM_REGION_UNUSED:
- tmp = __add_new_im_area(req_addr, size);
- break;
- case IM_REGION_SUBSET:
- tmp = split_im_region(req_addr, size, tmp);
- break;
- case IM_REGION_EXISTS:
- /* Return requested region */
- break;
- case IM_REGION_SUPERSET:
- /* Return first existing subset of requested region */
- break;
- default:
- printk(KERN_ERR "%s() unexpected imalloc region status\n",
- __FUNCTION__);
- tmp = NULL;
- }
-
- return tmp;
-}
-
-struct vm_struct * im_get_free_area(unsigned long size)
-{
- struct vm_struct *area;
- unsigned long addr;
-
- mutex_lock(&imlist_mutex);
- if (get_free_im_addr(size, &addr)) {
- printk(KERN_ERR "%s() cannot obtain addr for size 0x%lx\n",
- __FUNCTION__, size);
- area = NULL;
- goto next_im_done;
- }
-
- area = __im_get_area(addr, size, IM_REGION_UNUSED);
- if (area == NULL) {
- printk(KERN_ERR
- "%s() cannot obtain area for addr 0x%lx size 0x%lx\n",
- __FUNCTION__, addr, size);
- }
-next_im_done:
- mutex_unlock(&imlist_mutex);
- return area;
-}
-
-struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
- int criteria)
-{
- struct vm_struct *area;
-
- mutex_lock(&imlist_mutex);
- area = __im_get_area(v_addr, size, criteria);
- mutex_unlock(&imlist_mutex);
- return area;
-}
-
-void im_free(void * addr)
-{
- struct vm_struct **p, *tmp;
-
- if (!addr)
- return;
- if ((unsigned long) addr & ~PAGE_MASK) {
- printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__, addr);
- return;
- }
- mutex_lock(&imlist_mutex);
- for (p = &imlist ; (tmp = *p) ; p = &tmp->next) {
- if (tmp->addr == addr) {
- *p = tmp->next;
- unmap_kernel_range((unsigned long)tmp->addr,
- tmp->size);
- kfree(tmp);
- mutex_unlock(&imlist_mutex);
- return;
- }
- }
- mutex_unlock(&imlist_mutex);
- printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__,
- addr);
-}
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 2558c34..f7a4066 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -90,16 +90,4 @@ static inline void flush_HPTE(unsigned context, unsigned long va,
else
_tlbie(va);
}
-#else /* CONFIG_PPC64 */
-/* imalloc region types */
-#define IM_REGION_UNUSED 0x1
-#define IM_REGION_SUBSET 0x2
-#define IM_REGION_EXISTS 0x4
-#define IM_REGION_OVERLAP 0x8
-#define IM_REGION_SUPERSET 0x10
-
-extern struct vm_struct * im_get_free_area(unsigned long size);
-extern struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
- int region_type);
-extern void im_free(void *addr);
#endif
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index fa5c828..a895de7 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -34,41 +34,27 @@
#include <linux/stddef.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/bootmem.h>
-#include <linux/highmem.h>
-#include <linux/idr.h>
-#include <linux/nodemask.h>
-#include <linux/module.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/prom.h>
-#include <asm/lmb.h>
-#include <asm/rtas.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
-#include <asm/uaccess.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/tlb.h>
-#include <asm/eeh.h>
#include <asm/processor.h>
-#include <asm/mmzone.h>
#include <asm/cputable.h>
#include <asm/sections.h>
#include <asm/system.h>
-#include <asm/iommu.h>
#include <asm/abs_addr.h>
-#include <asm/vdso.h>
#include <asm/firmware.h>
#include "mmu_decl.h"
-unsigned long ioremap_bot = IMALLOC_BASE;
-static unsigned long phbs_io_bot = PHBS_IO_BASE;
+unsigned long ioremap_bot = IOREMAP_BASE;
/*
* map_io_page currently only called by __ioremap
@@ -102,8 +88,8 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
* entry in the hardware page table.
*
*/
- if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
- mmu_io_psize)) {
+ if (htab_bolt_mapping(ea, (unsigned long)ea + PAGE_SIZE,
+ pa, flags, mmu_io_psize)) {
printk(KERN_ERR "Failed to do bolted mapping IO "
"memory at %016lx !\n", pa);
return -ENOMEM;
@@ -113,8 +99,11 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
}
-static void __iomem * __ioremap_com(phys_addr_t addr, unsigned long pa,
- unsigned long ea, unsigned long size,
+/**
+ * __ioremap_at - Low level function to establish the page tables
+ * for an IO mapping
+ */
+void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
unsigned long flags)
{
unsigned long i;
@@ -122,17 +111,35 @@ static void __iomem * __ioremap_com(phys_addr_t addr, unsigned long pa,
if ((flags & _PAGE_PRESENT) == 0)
flags |= pgprot_val(PAGE_KERNEL);
+ WARN_ON(pa & ~PAGE_MASK);
+ WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
+ WARN_ON(size & ~PAGE_MASK);
+
for (i = 0; i < size; i += PAGE_SIZE)
- if (map_io_page(ea+i, pa+i, flags))
+ if (map_io_page((unsigned long)ea+i, pa+i, flags))
return NULL;
- return (void __iomem *) (ea + (addr & ~PAGE_MASK));
+ return (void __iomem *)ea;
+}
+
+/**
+ * __iounmap_from - Low level function to tear down the page tables
+ * for an IO mapping. This is used for mappings that
+ * are manipulated manually, like partial unmapping of
+ * PCI IOs or ISA space.
+ */
+void __iounmap_at(void *ea, unsigned long size)
+{
+ WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
+ WARN_ON(size & ~PAGE_MASK);
+
+ unmap_kernel_range((unsigned long)ea, size);
}
void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
unsigned long flags)
{
- unsigned long pa, ea;
+ phys_addr_t paligned;
void __iomem *ret;
/*
@@ -144,27 +151,30 @@ void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
* IMALLOC_END
*
*/
- pa = addr & PAGE_MASK;
- size = PAGE_ALIGN(addr + size) - pa;
+ paligned = addr & PAGE_MASK;
+ size = PAGE_ALIGN(addr + size) - paligned;
- if ((size == 0) || (pa == 0))
+ if ((size == 0) || (paligned == 0))
return NULL;
if (mem_init_done) {
struct vm_struct *area;
- area = im_get_free_area(size);
+
+ area = __get_vm_area(size, VM_IOREMAP,
+ ioremap_bot, IOREMAP_END);
if (area == NULL)
return NULL;
- ea = (unsigned long)(area->addr);
- ret = __ioremap_com(addr, pa, ea, size, flags);
+ ret = __ioremap_at(paligned, area->addr, size, flags);
if (!ret)
- im_free(area->addr);
+ vunmap(area->addr);
} else {
- ea = ioremap_bot;
- ret = __ioremap_com(addr, pa, ea, size, flags);
+ ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
if (ret)
ioremap_bot += size;
}
+
+ if (ret)
+ ret += addr & ~PAGE_MASK;
return ret;
}
@@ -187,61 +197,9 @@ void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
}
-#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
-
-int __ioremap_explicit(phys_addr_t pa, unsigned long ea,
- unsigned long size, unsigned long flags)
-{
- struct vm_struct *area;
- void __iomem *ret;
-
- /* For now, require page-aligned values for pa, ea, and size */
- if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
- !IS_PAGE_ALIGNED(size)) {
- printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__);
- return 1;
- }
-
- if (!mem_init_done) {
- /* Two things to consider in this case:
- * 1) No records will be kept (imalloc, etc) that the region
- * has been remapped
- * 2) It won't be easy to iounmap() the region later (because
- * of 1)
- */
- ;
- } else {
- area = im_get_area(ea, size,
- IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
- if (area == NULL) {
- /* Expected when PHB-dlpar is in play */
- return 1;
- }
- if (ea != (unsigned long) area->addr) {
- printk(KERN_ERR "unexpected addr return from "
- "im_get_area\n");
- return 1;
- }
- }
-
- ret = __ioremap_com(pa, pa, ea, size, flags);
- if (ret == NULL) {
- printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
- return 1;
- }
- if (ret != (void *) ea) {
- printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
- return 1;
- }
-
- return 0;
-}
-
/*
* Unmap an IO region and remove it from imalloc'd list.
* Access to IO memory should be serialized by driver.
- *
- * XXX what about calls before mem_init_done (ie python_countermeasures())
*/
void __iounmap(volatile void __iomem *token)
{
@@ -250,9 +208,14 @@ void __iounmap(volatile void __iomem *token)
if (!mem_init_done)
return;
- addr = (void *) ((unsigned long __force) token & PAGE_MASK);
-
- im_free(addr);
+ addr = (void *) ((unsigned long __force)
+ PCI_FIX_ADDR(token) & PAGE_MASK);
+ if ((unsigned long)addr < ioremap_bot) {
+ printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
+ " at 0x%p\n", addr);
+ return;
+ }
+ vunmap(addr);
}
void iounmap(volatile void __iomem *token)
@@ -263,77 +226,8 @@ void iounmap(volatile void __iomem *token)
__iounmap(token);
}
-static int iounmap_subset_regions(unsigned long addr, unsigned long size)
-{
- struct vm_struct *area;
-
- /* Check whether subsets of this region exist */
- area = im_get_area(addr, size, IM_REGION_SUPERSET);
- if (area == NULL)
- return 1;
-
- while (area) {
- iounmap((void __iomem *) area->addr);
- area = im_get_area(addr, size,
- IM_REGION_SUPERSET);
- }
-
- return 0;
-}
-
-int __iounmap_explicit(volatile void __iomem *start, unsigned long size)
-{
- struct vm_struct *area;
- unsigned long addr;
- int rc;
-
- addr = (unsigned long __force) start & PAGE_MASK;
-
- /* Verify that the region either exists or is a subset of an existing
- * region. In the latter case, split the parent region to create
- * the exact region
- */
- area = im_get_area(addr, size,
- IM_REGION_EXISTS | IM_REGION_SUBSET);
- if (area == NULL) {
- /* Determine whether subset regions exist. If so, unmap */
- rc = iounmap_subset_regions(addr, size);
- if (rc) {
- printk(KERN_ERR
- "%s() cannot unmap nonexistent range 0x%lx\n",
- __FUNCTION__, addr);
- return 1;
- }
- } else {
- iounmap((void __iomem *) area->addr);
- }
- /*
- * FIXME! This can't be right:
- iounmap(area->addr);
- * Maybe it should be "iounmap(area);"
- */
- return 0;
-}
-
EXPORT_SYMBOL(ioremap);
EXPORT_SYMBOL(ioremap_flags);
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(__iounmap);
-
-static DEFINE_SPINLOCK(phb_io_lock);
-
-void __iomem * reserve_phb_iospace(unsigned long size)
-{
- void __iomem *virt_addr;
-
- if (phbs_io_bot >= IMALLOC_BASE)
- panic("reserve_phb_iospace(): phb io space overflow\n");
-
- spin_lock(&phb_io_lock);
- virt_addr = (void __iomem *) phbs_io_bot;
- phbs_io_bot += size;
- spin_unlock(&phb_io_lock);
-
- return virt_addr;
-}
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index 2bfc4d7..fdecb7f 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -239,3 +239,59 @@ void pte_free_finish(void)
pte_free_submit(*batchp);
*batchp = NULL;
}
+
+/**
+ * __flush_hash_table_range - Flush all HPTEs for a given address range
+ * from the hash table (and the TLB). But keeps
+ * the linux PTEs intact.
+ *
+ * @mm : mm_struct of the target address space (generally init_mm)
+ * @start : starting address
+ * @end : ending address (not included in the flush)
+ *
+ * This function is mostly to be used by some IO hotplug code in order
+ * to remove all hash entries from a given address range used to map IO
+ * space on a removed PCI-PCI bidge without tearing down the full mapping
+ * since 64K pages may overlap with other bridges when using 64K pages
+ * with 4K HW pages on IO space.
+ *
+ * Because of that usage pattern, it's only available with CONFIG_HOTPLUG
+ * and is implemented for small size rather than speed.
+ */
+#ifdef CONFIG_HOTPLUG
+
+void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
+{
+ unsigned long flags;
+
+ start = _ALIGN_DOWN(start, PAGE_SIZE);
+ end = _ALIGN_UP(end, PAGE_SIZE);
+
+ BUG_ON(!mm->pgd);
+
+ /* Note: Normally, we should only ever use a batch within a
+ * PTE locked section. This violates the rule, but will work
+ * since we don't actually modify the PTEs, we just flush the
+ * hash while leaving the PTEs intact (including their reference
+ * to being hashed). This is not the most performance oriented
+ * way to do things but is fine for our needs here.
+ */
+ local_irq_save(flags);
+ arch_enter_lazy_mmu_mode();
+ for (; start < end; start += PAGE_SIZE) {
+ pte_t *ptep = find_linux_pte(mm->pgd, start);
+ unsigned long pte;
+
+ if (ptep == NULL)
+ continue;
+ pte = pte_val(*ptep);
+ if (!(pte & _PAGE_HASHPTE))
+ continue;
+ hpte_need_flush(mm, start, ptep, pte, 0);
+ }
+ arch_leave_lazy_mmu_mode();
+ local_irq_restore(flags);
+}
+
+#endif /* CONFIG_HOTPLUG */