summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 16:40:26 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 16:40:26 (GMT)
commit9f3938346a5c1fa504647670edb5fea5756cfb00 (patch)
tree7cf6d24d6b076c8db8571494984924cac03703a2 /arch
parent69a7aebcf019ab3ff5764525ad6858fbe23bb86d (diff)
parent317b6e128247f75976b0fc2b9fd8d2c20ef13b3a (diff)
downloadlinux-9f3938346a5c1fa504647670edb5fea5756cfb00.tar.xz
Merge branch 'kmap_atomic' of git://github.com/congwang/linux
Pull kmap_atomic cleanup from Cong Wang. It's been in -next for a long time, and it gets rid of the (no longer used) second argument to k[un]map_atomic(). Fix up a few trivial conflicts in various drivers, and do an "evil merge" to catch some new uses that have come in since Cong's tree. * 'kmap_atomic' of git://github.com/congwang/linux: (59 commits) feature-removal-schedule.txt: schedule the deprecated form of kmap_atomic() for removal highmem: kill all __kmap_atomic() [swarren@nvidia.com: highmem: Fix ARM build break due to __kmap_atomic rename] drbd: remove the second argument of k[un]map_atomic() zcache: remove the second argument of k[un]map_atomic() gma500: remove the second argument of k[un]map_atomic() dm: remove the second argument of k[un]map_atomic() tomoyo: remove the second argument of k[un]map_atomic() sunrpc: remove the second argument of k[un]map_atomic() rds: remove the second argument of k[un]map_atomic() net: remove the second argument of k[un]map_atomic() mm: remove the second argument of k[un]map_atomic() lib: remove the second argument of k[un]map_atomic() power: remove the second argument of k[un]map_atomic() kdb: remove the second argument of k[un]map_atomic() udf: remove the second argument of k[un]map_atomic() ubifs: remove the second argument of k[un]map_atomic() squashfs: remove the second argument of k[un]map_atomic() reiserfs: remove the second argument of k[un]map_atomic() ocfs2: remove the second argument of k[un]map_atomic() ntfs: remove the second argument of k[un]map_atomic() ...
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/highmem.h2
-rw-r--r--arch/arm/mm/copypage-fa.c12
-rw-r--r--arch/arm/mm/copypage-feroceon.c12
-rw-r--r--arch/arm/mm/copypage-v3.c12
-rw-r--r--arch/arm/mm/copypage-v4mc.c8
-rw-r--r--arch/arm/mm/copypage-v4wb.c12
-rw-r--r--arch/arm/mm/copypage-v4wt.c12
-rw-r--r--arch/arm/mm/copypage-v6.c12
-rw-r--r--arch/arm/mm/copypage-xsc3.c12
-rw-r--r--arch/arm/mm/copypage-xscale.c8
-rw-r--r--arch/arm/mm/highmem.c4
-rw-r--r--arch/frv/include/asm/highmem.h2
-rw-r--r--arch/frv/mm/highmem.c4
-rw-r--r--arch/mips/include/asm/highmem.h2
-rw-r--r--arch/mips/mm/c-r4k.c4
-rw-r--r--arch/mips/mm/highmem.c4
-rw-r--r--arch/mips/mm/init.c8
-rw-r--r--arch/mn10300/include/asm/highmem.h2
-rw-r--r--arch/parisc/include/asm/cacheflush.h2
-rw-r--r--arch/powerpc/include/asm/highmem.h2
-rw-r--r--arch/powerpc/kvm/book3s_pr.c4
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c5
-rw-r--r--arch/powerpc/mm/hugetlbpage.c4
-rw-r--r--arch/powerpc/mm/mem.c4
-rw-r--r--arch/sh/mm/cache-sh4.c4
-rw-r--r--arch/sh/mm/cache.c12
-rw-r--r--arch/sparc/include/asm/highmem.h2
-rw-r--r--arch/sparc/mm/highmem.c4
-rw-r--r--arch/tile/include/asm/highmem.h2
-rw-r--r--arch/tile/mm/highmem.c4
-rw-r--r--arch/um/kernel/skas/uaccess.c4
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c24
-rw-r--r--arch/x86/include/asm/highmem.h2
-rw-r--r--arch/x86/kernel/crash_dump_32.c6
-rw-r--r--arch/x86/kvm/lapic.c8
-rw-r--r--arch/x86/kvm/paging_tmpl.h4
-rw-r--r--arch/x86/kvm/x86.c8
-rw-r--r--arch/x86/lib/usercopy_32.c4
-rw-r--r--arch/x86/mm/highmem_32.c4
39 files changed, 122 insertions, 123 deletions
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index a4edd19..8c5e828 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -57,7 +57,7 @@ static inline void *kmap_high_get(struct page *page)
#ifdef CONFIG_HIGHMEM
extern void *kmap(struct page *page);
extern void kunmap(struct page *page);
-extern void *__kmap_atomic(struct page *page);
+extern void *kmap_atomic(struct page *page);
extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *kmap_atomic_to_page(const void *ptr);
diff --git a/arch/arm/mm/copypage-fa.c b/arch/arm/mm/copypage-fa.c
index d2852e1..d130a5e 100644
--- a/arch/arm/mm/copypage-fa.c
+++ b/arch/arm/mm/copypage-fa.c
@@ -44,11 +44,11 @@ void fa_copy_user_highpage(struct page *to, struct page *from,
{
void *kto, *kfrom;
- kto = kmap_atomic(to, KM_USER0);
- kfrom = kmap_atomic(from, KM_USER1);
+ kto = kmap_atomic(to);
+ kfrom = kmap_atomic(from);
fa_copy_user_page(kto, kfrom);
- kunmap_atomic(kfrom, KM_USER1);
- kunmap_atomic(kto, KM_USER0);
+ kunmap_atomic(kfrom);
+ kunmap_atomic(kto);
}
/*
@@ -58,7 +58,7 @@ void fa_copy_user_highpage(struct page *to, struct page *from,
*/
void fa_clear_user_highpage(struct page *page, unsigned long vaddr)
{
- void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
+ void *ptr, *kaddr = kmap_atomic(page);
asm volatile("\
mov r1, %2 @ 1\n\
mov r2, #0 @ 1\n\
@@ -77,7 +77,7 @@ void fa_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 32)
: "r1", "r2", "r3", "ip", "lr");
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
struct cpu_user_fns fa_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c
index ac163de..49ee0c1 100644
--- a/arch/arm/mm/copypage-feroceon.c
+++ b/arch/arm/mm/copypage-feroceon.c
@@ -72,17 +72,17 @@ void feroceon_copy_user_highpage(struct page *to, struct page *from,
{
void *kto, *kfrom;
- kto = kmap_atomic(to, KM_USER0);
- kfrom = kmap_atomic(from, KM_USER1);
+ kto = kmap_atomic(to);
+ kfrom = kmap_atomic(from);
flush_cache_page(vma, vaddr, page_to_pfn(from));
feroceon_copy_user_page(kto, kfrom);
- kunmap_atomic(kfrom, KM_USER1);
- kunmap_atomic(kto, KM_USER0);
+ kunmap_atomic(kfrom);
+ kunmap_atomic(kto);
}
void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr)
{
- void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
+ void *ptr, *kaddr = kmap_atomic(page);
asm volatile ("\
mov r1, %2 \n\
mov r2, #0 \n\
@@ -102,7 +102,7 @@ void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 32)
: "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr");
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
struct cpu_user_fns feroceon_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c
index f72303e..3935bdd 100644
--- a/arch/arm/mm/copypage-v3.c
+++ b/arch/arm/mm/copypage-v3.c
@@ -42,11 +42,11 @@ void v3_copy_user_highpage(struct page *to, struct page *from,
{
void *kto, *kfrom;
- kto = kmap_atomic(to, KM_USER0);
- kfrom = kmap_atomic(from, KM_USER1);
+ kto = kmap_atomic(to);
+ kfrom = kmap_atomic(from);
v3_copy_user_page(kto, kfrom);
- kunmap_atomic(kfrom, KM_USER1);
- kunmap_atomic(kto, KM_USER0);
+ kunmap_atomic(kfrom);
+ kunmap_atomic(kto);
}
/*
@@ -56,7 +56,7 @@ void v3_copy_user_highpage(struct page *to, struct page *from,
*/
void v3_clear_user_highpage(struct page *page, unsigned long vaddr)
{
- void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
+ void *ptr, *kaddr = kmap_atomic(page);
asm volatile("\n\
mov r1, %2 @ 1\n\
mov r2, #0 @ 1\n\
@@ -72,7 +72,7 @@ void v3_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 64)
: "r1", "r2", "r3", "ip", "lr");
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
struct cpu_user_fns v3_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index 7d0a8c2..ec8c3be 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -71,7 +71,7 @@ mc_copy_user_page(void *from, void *to)
void v4_mc_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
- void *kto = kmap_atomic(to, KM_USER1);
+ void *kto = kmap_atomic(to);
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);
@@ -85,7 +85,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
raw_spin_unlock(&minicache_lock);
- kunmap_atomic(kto, KM_USER1);
+ kunmap_atomic(kto);
}
/*
@@ -93,7 +93,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
*/
void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
{
- void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
+ void *ptr, *kaddr = kmap_atomic(page);
asm volatile("\
mov r1, %2 @ 1\n\
mov r2, #0 @ 1\n\
@@ -111,7 +111,7 @@ void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 64)
: "r1", "r2", "r3", "ip", "lr");
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
struct cpu_user_fns v4_mc_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
index cb589cb..067d0fd 100644
--- a/arch/arm/mm/copypage-v4wb.c
+++ b/arch/arm/mm/copypage-v4wb.c
@@ -52,12 +52,12 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from,
{
void *kto, *kfrom;
- kto = kmap_atomic(to, KM_USER0);
- kfrom = kmap_atomic(from, KM_USER1);
+ kto = kmap_atomic(to);
+ kfrom = kmap_atomic(from);
flush_cache_page(vma, vaddr, page_to_pfn(from));
v4wb_copy_user_page(kto, kfrom);
- kunmap_atomic(kfrom, KM_USER1);
- kunmap_atomic(kto, KM_USER0);
+ kunmap_atomic(kfrom);
+ kunmap_atomic(kto);
}
/*
@@ -67,7 +67,7 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from,
*/
void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
{
- void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
+ void *ptr, *kaddr = kmap_atomic(page);
asm volatile("\
mov r1, %2 @ 1\n\
mov r2, #0 @ 1\n\
@@ -86,7 +86,7 @@ void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 64)
: "r1", "r2", "r3", "ip", "lr");
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
struct cpu_user_fns v4wb_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c
index 30c7d04..b85c5da 100644
--- a/arch/arm/mm/copypage-v4wt.c
+++ b/arch/arm/mm/copypage-v4wt.c
@@ -48,11 +48,11 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from,
{
void *kto, *kfrom;
- kto = kmap_atomic(to, KM_USER0);
- kfrom = kmap_atomic(from, KM_USER1);
+ kto = kmap_atomic(to);
+ kfrom = kmap_atomic(from);
v4wt_copy_user_page(kto, kfrom);
- kunmap_atomic(kfrom, KM_USER1);
- kunmap_atomic(kto, KM_USER0);
+ kunmap_atomic(kfrom);
+ kunmap_atomic(kto);
}
/*
@@ -62,7 +62,7 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from,
*/
void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr)
{
- void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
+ void *ptr, *kaddr = kmap_atomic(page);
asm volatile("\
mov r1, %2 @ 1\n\
mov r2, #0 @ 1\n\
@@ -79,7 +79,7 @@ void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 64)
: "r1", "r2", "r3", "ip", "lr");
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
struct cpu_user_fns v4wt_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 3d9a155..8b03a58 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -38,11 +38,11 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
{
void *kto, *kfrom;
- kfrom = kmap_atomic(from, KM_USER0);
- kto = kmap_atomic(to, KM_USER1);
+ kfrom = kmap_atomic(from);
+ kto = kmap_atomic(to);
copy_page(kto, kfrom);
- kunmap_atomic(kto, KM_USER1);
- kunmap_atomic(kfrom, KM_USER0);
+ kunmap_atomic(kto);
+ kunmap_atomic(kfrom);
}
/*
@@ -51,9 +51,9 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
*/
static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
{
- void *kaddr = kmap_atomic(page, KM_USER0);
+ void *kaddr = kmap_atomic(page);
clear_page(kaddr);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
/*
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c
index f9cde07..03a2042 100644
--- a/arch/arm/mm/copypage-xsc3.c
+++ b/arch/arm/mm/copypage-xsc3.c
@@ -75,12 +75,12 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
{
void *kto, *kfrom;
- kto = kmap_atomic(to, KM_USER0);
- kfrom = kmap_atomic(from, KM_USER1);
+ kto = kmap_atomic(to);
+ kfrom = kmap_atomic(from);
flush_cache_page(vma, vaddr, page_to_pfn(from));
xsc3_mc_copy_user_page(kto, kfrom);
- kunmap_atomic(kfrom, KM_USER1);
- kunmap_atomic(kto, KM_USER0);
+ kunmap_atomic(kfrom);
+ kunmap_atomic(kto);
}
/*
@@ -90,7 +90,7 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
*/
void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
{
- void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
+ void *ptr, *kaddr = kmap_atomic(page);
asm volatile ("\
mov r1, %2 \n\
mov r2, #0 \n\
@@ -105,7 +105,7 @@ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 32)
: "r1", "r2", "r3");
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
struct cpu_user_fns xsc3_mc_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 610c24c..439d106 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -93,7 +93,7 @@ mc_copy_user_page(void *from, void *to)
void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
- void *kto = kmap_atomic(to, KM_USER1);
+ void *kto = kmap_atomic(to);
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);
@@ -107,7 +107,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
raw_spin_unlock(&minicache_lock);
- kunmap_atomic(kto, KM_USER1);
+ kunmap_atomic(kto);
}
/*
@@ -116,7 +116,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
void
xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
{
- void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
+ void *ptr, *kaddr = kmap_atomic(page);
asm volatile(
"mov r1, %2 \n\
mov r2, #0 \n\
@@ -133,7 +133,7 @@ xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
: "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 32)
: "r1", "r2", "r3", "ip");
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
struct cpu_user_fns xscale_mc_user_fns __initdata = {
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 807c057..5a21505 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -36,7 +36,7 @@ void kunmap(struct page *page)
}
EXPORT_SYMBOL(kunmap);
-void *__kmap_atomic(struct page *page)
+void *kmap_atomic(struct page *page)
{
unsigned int idx;
unsigned long vaddr;
@@ -81,7 +81,7 @@ void *__kmap_atomic(struct page *page)
return (void *)vaddr;
}
-EXPORT_SYMBOL(__kmap_atomic);
+EXPORT_SYMBOL(kmap_atomic);
void __kunmap_atomic(void *kvaddr)
{
diff --git a/arch/frv/include/asm/highmem.h b/arch/frv/include/asm/highmem.h
index a8d6565..716956a 100644
--- a/arch/frv/include/asm/highmem.h
+++ b/arch/frv/include/asm/highmem.h
@@ -157,7 +157,7 @@ static inline void kunmap_atomic_primary(void *kvaddr, enum km_type type)
pagefault_enable();
}
-void *__kmap_atomic(struct page *page);
+void *kmap_atomic(struct page *page);
void __kunmap_atomic(void *kvaddr);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/frv/mm/highmem.c b/arch/frv/mm/highmem.c
index fd7fcd4..31902c9 100644
--- a/arch/frv/mm/highmem.c
+++ b/arch/frv/mm/highmem.c
@@ -37,7 +37,7 @@ struct page *kmap_atomic_to_page(void *ptr)
return virt_to_page(ptr);
}
-void *__kmap_atomic(struct page *page)
+void *kmap_atomic(struct page *page)
{
unsigned long paddr;
int type;
@@ -64,7 +64,7 @@ void *__kmap_atomic(struct page *page)
return NULL;
}
}
-EXPORT_SYMBOL(__kmap_atomic);
+EXPORT_SYMBOL(kmap_atomic);
void __kunmap_atomic(void *kvaddr)
{
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index 77e6440..2d91888 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -47,7 +47,7 @@ extern void kunmap_high(struct page *page);
extern void *kmap(struct page *page);
extern void kunmap(struct page *page);
-extern void *__kmap_atomic(struct page *page);
+extern void *kmap_atomic(struct page *page);
extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *kmap_atomic_to_page(void *ptr);
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 4f9eb0b..c97087d 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -498,7 +498,7 @@ static inline void local_r4k_flush_cache_page(void *args)
if (map_coherent)
vaddr = kmap_coherent(page, addr);
else
- vaddr = kmap_atomic(page, KM_USER0);
+ vaddr = kmap_atomic(page);
addr = (unsigned long)vaddr;
}
@@ -521,7 +521,7 @@ static inline void local_r4k_flush_cache_page(void *args)
if (map_coherent)
kunmap_coherent();
else
- kunmap_atomic(vaddr, KM_USER0);
+ kunmap_atomic(vaddr);
}
}
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index 3634c7ea..aff5705 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -41,7 +41,7 @@ EXPORT_SYMBOL(kunmap);
* kmaps are appropriate for short, tight code paths only.
*/
-void *__kmap_atomic(struct page *page)
+void *kmap_atomic(struct page *page)
{
unsigned long vaddr;
int idx, type;
@@ -62,7 +62,7 @@ void *__kmap_atomic(struct page *page)
return (void*) vaddr;
}
-EXPORT_SYMBOL(__kmap_atomic);
+EXPORT_SYMBOL(kmap_atomic);
void __kunmap_atomic(void *kvaddr)
{
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 3b3ffd4..1a85ba9 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -207,21 +207,21 @@ void copy_user_highpage(struct page *to, struct page *from,
{
void *vfrom, *vto;
- vto = kmap_atomic(to, KM_USER1);
+ vto = kmap_atomic(to);
if (cpu_has_dc_aliases &&
page_mapped(from) && !Page_dcache_dirty(from)) {
vfrom = kmap_coherent(from, vaddr);
copy_page(vto, vfrom);
kunmap_coherent();
} else {
- vfrom = kmap_atomic(from, KM_USER0);
+ vfrom = kmap_atomic(from);
copy_page(vto, vfrom);
- kunmap_atomic(vfrom, KM_USER0);
+ kunmap_atomic(vfrom);
}
if ((!cpu_has_ic_fills_f_dc) ||
pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
flush_data_cache_page((unsigned long)vto);
- kunmap_atomic(vto, KM_USER1);
+ kunmap_atomic(vto);
/* Make sure this page is cleared on other CPU's too before using it */
smp_wmb();
}
diff --git a/arch/mn10300/include/asm/highmem.h b/arch/mn10300/include/asm/highmem.h
index bfe2d88..7c137cd 100644
--- a/arch/mn10300/include/asm/highmem.h
+++ b/arch/mn10300/include/asm/highmem.h
@@ -70,7 +70,7 @@ static inline void kunmap(struct page *page)
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
*/
-static inline unsigned long __kmap_atomic(struct page *page)
+static inline unsigned long kmap_atomic(struct page *page)
{
unsigned long vaddr;
int idx, type;
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index da601dd..9f21ab0 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -140,7 +140,7 @@ static inline void *kmap(struct page *page)
#define kunmap(page) kunmap_parisc(page_address(page))
-static inline void *__kmap_atomic(struct page *page)
+static inline void *kmap_atomic(struct page *page)
{
pagefault_disable();
return page_address(page);
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index dbc2640..caaf6e0 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -79,7 +79,7 @@ static inline void kunmap(struct page *page)
kunmap_high(page);
}
-static inline void *__kmap_atomic(struct page *page)
+static inline void *kmap_atomic(struct page *page)
{
return kmap_atomic_prot(page, kmap_prot);
}
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index e2cfb9e..220fcdf 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -227,14 +227,14 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
hpage_offset /= 4;
get_page(hpage);
- page = kmap_atomic(hpage, KM_USER0);
+ page = kmap_atomic(hpage);
/* patch dcbz into reserved instruction, so we trap */
for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
if ((page[i] & 0xff0007ff) == INS_DCBZ)
page[i] &= 0xfffffff7;
- kunmap_atomic(page, KM_USER0);
+ kunmap_atomic(page);
put_page(hpage);
}
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index 329be36..6747eec 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -365,12 +365,11 @@ static inline void __dma_sync_page_highmem(struct page *page,
local_irq_save(flags);
do {
- start = (unsigned long)kmap_atomic(page + seg_nr,
- KM_PPC_SYNC_PAGE) + seg_offset;
+ start = (unsigned long)kmap_atomic(page + seg_nr) + seg_offset;
/* Sync this buffer segment */
__dma_sync((void *)start, seg_size, direction);
- kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE);
+ kunmap_atomic((void *)start);
seg_nr++;
/* Calculate next buffer segment size */
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index a8b3cc7..57c7465 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -910,9 +910,9 @@ void flush_dcache_icache_hugepage(struct page *page)
if (!PageHighMem(page)) {
__flush_dcache_icache(page_address(page+i));
} else {
- start = kmap_atomic(page+i, KM_PPC_SYNC_ICACHE);
+ start = kmap_atomic(page+i);
__flush_dcache_icache(start);
- kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
+ kunmap_atomic(start);
}
}
}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index d974b79..baaafde 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -458,9 +458,9 @@ void flush_dcache_icache_page(struct page *page)
#endif
#ifdef CONFIG_BOOKE
{
- void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
+ void *start = kmap_atomic(page);
__flush_dcache_icache(start);
- kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
+ kunmap_atomic(start);
}
#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
/* On 8xx there is no need to kmap since highmem is not supported */
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 92eb986..112fea1 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -244,7 +244,7 @@ static void sh4_flush_cache_page(void *args)
if (map_coherent)
vaddr = kmap_coherent(page, address);
else
- vaddr = kmap_atomic(page, KM_USER0);
+ vaddr = kmap_atomic(page);
address = (unsigned long)vaddr;
}
@@ -259,7 +259,7 @@ static void sh4_flush_cache_page(void *args)
if (map_coherent)
kunmap_coherent(vaddr);
else
- kunmap_atomic(vaddr, KM_USER0);
+ kunmap_atomic(vaddr);
}
}
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 5a580ea..616966a 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -95,7 +95,7 @@ void copy_user_highpage(struct page *to, struct page *from,
{
void *vfrom, *vto;
- vto = kmap_atomic(to, KM_USER1);
+ vto = kmap_atomic(to);
if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
test_bit(PG_dcache_clean, &from->flags)) {
@@ -103,16 +103,16 @@ void copy_user_highpage(struct page *to, struct page *from,
copy_page(vto, vfrom);
kunmap_coherent(vfrom);
} else {
- vfrom = kmap_atomic(from, KM_USER0);
+ vfrom = kmap_atomic(from);
copy_page(vto, vfrom);
- kunmap_atomic(vfrom, KM_USER0);
+ kunmap_atomic(vfrom);
}
if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
(vma->vm_flags & VM_EXEC))
__flush_purge_region(vto, PAGE_SIZE);
- kunmap_atomic(vto, KM_USER1);
+ kunmap_atomic(vto);
/* Make sure this page is cleared on other CPU's too before using it */
smp_wmb();
}
@@ -120,14 +120,14 @@ EXPORT_SYMBOL(copy_user_highpage);
void clear_user_highpage(struct page *page, unsigned long vaddr)
{
- void *kaddr = kmap_atomic(page, KM_USER0);
+ void *kaddr = kmap_atomic(page);
clear_page(kaddr);
if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
__flush_purge_region(kaddr, PAGE_SIZE);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
EXPORT_SYMBOL(clear_user_highpage);
diff --git a/arch/sparc/include/asm/highmem.h b/arch/sparc/include/asm/highmem.h
index 3d7afbb..3b6e00d 100644
--- a/arch/sparc/include/asm/highmem.h
+++ b/arch/sparc/include/asm/highmem.h
@@ -70,7 +70,7 @@ static inline void kunmap(struct page *page)
kunmap_high(page);
}
-extern void *__kmap_atomic(struct page *page);
+extern void *kmap_atomic(struct page *page);
extern void __kunmap_atomic(void *kvaddr);
extern struct page *kmap_atomic_to_page(void *vaddr);
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
index 77140a0..055c66c 100644
--- a/arch/sparc/mm/highmem.c
+++ b/arch/sparc/mm/highmem.c
@@ -30,7 +30,7 @@
#include <asm/tlbflush.h>
#include <asm/fixmap.h>
-void *__kmap_atomic(struct page *page)
+void *kmap_atomic(struct page *page)
{
unsigned long vaddr;
long idx, type;
@@ -64,7 +64,7 @@ void *__kmap_atomic(struct page *page)
return (void*) vaddr;
}
-EXPORT_SYMBOL(__kmap_atomic);
+EXPORT_SYMBOL(kmap_atomic);
void __kunmap_atomic(void *kvaddr)
{
diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h
index b2a6c5d..fc8429a 100644
--- a/arch/tile/include/asm/highmem.h
+++ b/arch/tile/include/asm/highmem.h
@@ -59,7 +59,7 @@ void *kmap_fix_kpte(struct page *page, int finished);
/* This macro is used only in map_new_virtual() to map "page". */
#define kmap_prot page_to_kpgprot(page)
-void *__kmap_atomic(struct page *page);
+void *kmap_atomic(struct page *page);
void __kunmap_atomic(void *kvaddr);
void *kmap_atomic_pfn(unsigned long pfn);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index 31dbbd9..ef8e5a6 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -224,12 +224,12 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
}
EXPORT_SYMBOL(kmap_atomic_prot);
-void *__kmap_atomic(struct page *page)
+void *kmap_atomic(struct page *page)
{
/* PAGE_NONE is a magic value that tells us to check immutability. */
return kmap_atomic_prot(page, PAGE_NONE);
}
-EXPORT_SYMBOL(__kmap_atomic);
+EXPORT_SYMBOL(kmap_atomic);
void __kunmap_atomic(void *kvaddr)
{
diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c
index 9fefd92..cd7df79 100644
--- a/arch/um/kernel/skas/uaccess.c
+++ b/arch/um/kernel/skas/uaccess.c
@@ -69,7 +69,7 @@ static int do_op_one_page(unsigned long addr, int len, int is_write,
return -1;
page = pte_page(*pte);
- addr = (unsigned long) kmap_atomic(page, KM_UML_USERCOPY) +
+ addr = (unsigned long) kmap_atomic(page) +
(addr & ~PAGE_MASK);
current->thread.fault_catcher = &buf;
@@ -82,7 +82,7 @@ static int do_op_one_page(unsigned long addr, int len, int is_write,
current->thread.fault_catcher = NULL;
- kunmap_atomic((void *)addr, KM_UML_USERCOPY);
+ kunmap_atomic((void *)addr);
return n;
}
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index b3350bd..c799352 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -1108,12 +1108,12 @@ static int __driver_rfc4106_encrypt(struct aead_request *req)
one_entry_in_sg = 1;
scatterwalk_start(&src_sg_walk, req->src);
scatterwalk_start(&assoc_sg_walk, req->assoc);
- src = scatterwalk_map(&src_sg_walk, 0);
- assoc = scatterwalk_map(&assoc_sg_walk, 0);
+ src = scatterwalk_map(&src_sg_walk);
+ assoc = scatterwalk_map(&assoc_sg_walk);
dst = src;
if (unlikely(req->src != req->dst)) {
scatterwalk_start(&dst_sg_walk, req->dst);
- dst = scatterwalk_map(&dst_sg_walk, 0);
+ dst = scatterwalk_map(&dst_sg_walk);
}
} else {
@@ -1137,11 +1137,11 @@ static int __driver_rfc4106_encrypt(struct aead_request *req)
* back to the packet. */
if (one_entry_in_sg) {
if (unlikely(req->src != req->dst)) {
- scatterwalk_unmap(dst, 0);
+ scatterwalk_unmap(dst);
scatterwalk_done(&dst_sg_walk, 0, 0);
}
- scatterwalk_unmap(src, 0);
- scatterwalk_unmap(assoc, 0);
+ scatterwalk_unmap(src);
+ scatterwalk_unmap(assoc);
scatterwalk_done(&src_sg_walk, 0, 0);
scatterwalk_done(&assoc_sg_walk, 0, 0);
} else {
@@ -1190,12 +1190,12 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
one_entry_in_sg = 1;
scatterwalk_start(&src_sg_walk, req->src);
scatterwalk_start(&assoc_sg_walk, req->assoc);
- src = scatterwalk_map(&src_sg_walk, 0);
- assoc = scatterwalk_map(&assoc_sg_walk, 0);
+ src = scatterwalk_map(&src_sg_walk);
+ assoc = scatterwalk_map(&assoc_sg_walk);
dst = src;
if (unlikely(req->src != req->dst)) {
scatterwalk_start(&dst_sg_walk, req->dst);
- dst = scatterwalk_map(&dst_sg_walk, 0);
+ dst = scatterwalk_map(&dst_sg_walk);
}
} else {
@@ -1220,11 +1220,11 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
if (one_entry_in_sg) {
if (unlikely(req->src != req->dst)) {
- scatterwalk_unmap(dst, 0);
+ scatterwalk_unmap(dst);
scatterwalk_done(&dst_sg_walk, 0, 0);
}
- scatterwalk_unmap(src, 0);
- scatterwalk_unmap(assoc, 0);
+ scatterwalk_unmap(src);
+ scatterwalk_unmap(assoc);
scatterwalk_done(&src_sg_walk, 0, 0);
scatterwalk_done(&assoc_sg_walk, 0, 0);
} else {
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index 3bd0402..302a323 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -61,7 +61,7 @@ void *kmap(struct page *page);
void kunmap(struct page *page);
void *kmap_atomic_prot(struct page *page, pgprot_t prot);
-void *__kmap_atomic(struct page *page);
+void *kmap_atomic(struct page *page);
void __kunmap_atomic(void *kvaddr);
void *kmap_atomic_pfn(unsigned long pfn);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c
index 642f75a..11891ca 100644
--- a/arch/x86/kernel/crash_dump_32.c
+++ b/arch/x86/kernel/crash_dump_32.c
@@ -62,16 +62,16 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!userbuf) {
memcpy(buf, (vaddr + offset), csize);
- kunmap_atomic(vaddr, KM_PTE0);
+ kunmap_atomic(vaddr);
} else {
if (!kdump_buf_page) {
printk(KERN_WARNING "Kdump: Kdump buffer page not"
" allocated\n");
- kunmap_atomic(vaddr, KM_PTE0);
+ kunmap_atomic(vaddr);
return -EFAULT;
}
copy_page(kdump_buf_page, vaddr);
- kunmap_atomic(vaddr, KM_PTE0);
+ kunmap_atomic(vaddr);
if (copy_to_user(buf, (kdump_buf_page + offset), csize))
return -EFAULT;
}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index cfdc6e0..31bfc69 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1283,9 +1283,9 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
return;
- vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0);
+ vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
- kunmap_atomic(vapic, KM_USER0);
+ kunmap_atomic(vapic);
apic_set_tpr(vcpu->arch.apic, data & 0xff);
}
@@ -1310,9 +1310,9 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
max_isr = 0;
data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
- vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0);
+ vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
*(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
- kunmap_atomic(vapic, KM_USER0);
+ kunmap_atomic(vapic);
}
void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 1561028..df5a703 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -92,9 +92,9 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
if (unlikely(npages != 1))
return -EFAULT;
- table = kmap_atomic(page, KM_USER0);
+ table = kmap_atomic(page);
ret = CMPXCHG(&table[index], orig_pte, new_pte);
- kunmap_atomic(table, KM_USER0);
+ kunmap_atomic(table);
kvm_release_page_dirty(page);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9cbfc06..bb4fd263 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1162,12 +1162,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/
vcpu->hv_clock.version += 2;
- shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
+ shared_kaddr = kmap_atomic(vcpu->time_page);
memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
sizeof(vcpu->hv_clock));
- kunmap_atomic(shared_kaddr, KM_USER0);
+ kunmap_atomic(shared_kaddr);
mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
return 0;
@@ -3848,7 +3848,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
goto emul_write;
}
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
kaddr += offset_in_page(gpa);
switch (bytes) {
case 1:
@@ -3866,7 +3866,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
default:
BUG();
}
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
kvm_release_page_dirty(page);
if (!exchanged)
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index e218d5d..d9b094c 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -760,9 +760,9 @@ survive:
break;
}
- maddr = kmap_atomic(pg, KM_USER0);
+ maddr = kmap_atomic(pg);
memcpy(maddr + offset, from, len);
- kunmap_atomic(maddr, KM_USER0);
+ kunmap_atomic(maddr);
set_page_dirty_lock(pg);
put_page(pg);
up_read(&current->mm->mmap_sem);
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index f4f29b1..6f31ee5 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -51,11 +51,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
}
EXPORT_SYMBOL(kmap_atomic_prot);
-void *__kmap_atomic(struct page *page)
+void *kmap_atomic(struct page *page)
{
return kmap_atomic_prot(page, kmap_prot);
}
-EXPORT_SYMBOL(__kmap_atomic);
+EXPORT_SYMBOL(kmap_atomic);
/*
* This is the same as kmap_atomic() but can map memory that doesn't