summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-12-12 03:57:52 (GMT)
committerLinus Torvalds <torvalds@g5.osdl.org>2005-12-12 03:57:52 (GMT)
commit7fc7e2eeecb599ba719c4c4503100fc8cd6a6920 (patch)
tree389b63cf2cbdf27fceef52a450b4b05367fe4425 /mm
parentfb155c1619f056ae9765eed272cd6aba6e1a7399 (diff)
downloadlinux-7fc7e2eeecb599ba719c4c4503100fc8cd6a6920.tar.xz
Remove (at least temporarily) the "incomplete PFN mapping" support
With the previous commit, we can handle arbitrary shared re-mappings even without this complexity, and since the only known private mappings are for strange users of /dev/mem (which never create an incomplete one), there seems to be no reason to support it. Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c46
1 files changed, 1 insertions, 45 deletions
diff --git a/mm/memory.c b/mm/memory.c
index e65f8fc..430a72e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1228,50 +1228,6 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *
EXPORT_SYMBOL(vm_insert_page);
/*
- * Somebody does a pfn remapping that doesn't actually work as a vma.
- *
- * Do it as individual pages instead, and warn about it. It's bad form,
- * and very inefficient.
- */
-static int incomplete_pfn_remap(struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- unsigned long pfn, pgprot_t prot)
-{
- static int warn = 10;
- struct page *page;
- int retval;
-
- if (!(vma->vm_flags & VM_INCOMPLETE)) {
- if (warn) {
- warn--;
- printk("%s does an incomplete pfn remapping", current->comm);
- dump_stack();
- }
- }
- vma->vm_flags |= VM_INCOMPLETE | VM_IO | VM_RESERVED;
-
- if (start < vma->vm_start || end > vma->vm_end)
- return -EINVAL;
-
- if (!pfn_valid(pfn))
- return -EINVAL;
-
- page = pfn_to_page(pfn);
- if (!PageReserved(page))
- return -EINVAL;
-
- retval = 0;
- while (start < end) {
- retval = insert_page(vma->vm_mm, start, page, prot);
- if (retval < 0)
- break;
- start += PAGE_SIZE;
- page++;
- }
- return retval;
-}
-
-/*
* maps a range of physical memory into the requested pages. the old
* mappings are removed. any references to nonexistent pages results
* in null mappings (currently treated as "copy-on-access")
@@ -1365,7 +1321,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
*/
if (!(vma->vm_flags & VM_SHARED)) {
if (addr != vma->vm_start || end != vma->vm_end)
- return incomplete_pfn_remap(vma, addr, end, pfn, prot);
+ return -EINVAL;
vma->vm_pgoff = pfn;
}