diff options
author | Geert Uytterhoeven <geert@linux-m68k.org> | 2013-06-09 18:12:42 (GMT) |
---|---|---|
committer | Geert Uytterhoeven <geert@linux-m68k.org> | 2013-06-24 17:44:19 (GMT) |
commit | 631d8b674f5f8235e9cb7e628b0fe9e5200e3158 (patch) | |
tree | 94cfb3bcdfbff1ddea8ef4a511fe92ca06cf9e31 /arch/m68k/include | |
parent | 957d6bf665462eb1e94f7c23b4bdf20a83fea4b2 (diff) | |
download | linux-631d8b674f5f8235e9cb7e628b0fe9e5200e3158.tar.xz |
m68k/uaccess: Fix asm constraints for userspace access
When compiling a MMU kernel with CPU_HAS_ADDRESS_SPACES=n (e.g. "MMU=y
allnoconfig": "echo CONFIG_MMU=y > allno.config && make KCONFIG_ALLCONFIG=1
allnoconfig"), we use plain "move" instead of "moves", and I got:
CC arch/m68k/lib/uaccess.o
{standard input}: Assembler messages:
{standard input}:47: Error: operands mismatch -- statement `move.b %a0,(%a1)' ignored
This happens because plain "move" doesn't support byte transfers between
memory and address registers, while "moves" does.
Fix the asm constraints for __generic_copy_from_user(),
__generic_copy_to_user(), and __clear_user() to only use data registers
when accessing userspace.
Also, relax the asm constraints for 16-bit userspace accesses in
__put_user() and __get_user(), as both "move" and "moves" do support
such transfers between memory and address registers.
Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
Diffstat (limited to 'arch/m68k/include')
-rw-r--r-- | arch/m68k/include/asm/uaccess_mm.h | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h index 472c891..15901db 100644 --- a/arch/m68k/include/asm/uaccess_mm.h +++ b/arch/m68k/include/asm/uaccess_mm.h @@ -90,7 +90,7 @@ asm volatile ("\n" \ __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \ break; \ case 2: \ - __put_user_asm(__pu_err, __pu_val, ptr, w, d, -EFAULT); \ + __put_user_asm(__pu_err, __pu_val, ptr, w, r, -EFAULT); \ break; \ case 4: \ __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \ @@ -158,7 +158,7 @@ asm volatile ("\n" \ __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \ break; \ case 2: \ - __get_user_asm(__gu_err, x, ptr, u16, w, d, -EFAULT); \ + __get_user_asm(__gu_err, x, ptr, u16, w, r, -EFAULT); \ break; \ case 4: \ __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \ @@ -245,7 +245,7 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n) __get_user_asm(res, *(u8 *)to, (u8 __user *)from, u8, b, d, 1); break; case 2: - __get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w, d, 2); + __get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w, r, 2); break; case 3: __constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,); @@ -326,7 +326,7 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n) __put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1); break; case 2: - __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, d, 2); + __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, r, 2); break; case 3: __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,); |